Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(152)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 10557002: MIPS: Share optimized code for closures. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 80
81 __ bind(&call_builtin); 81 __ bind(&call_builtin);
82 __ push(a0); 82 __ push(a0);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84 } 84 }
85 85
86 86
87 void FastNewClosureStub::Generate(MacroAssembler* masm) { 87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88 // Create a new closure from the given function info in new 88 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp. 89 // space. Set the context to the current context in cp.
90 Counters* counters = masm->isolate()->counters();
91
90 Label gc; 92 Label gc;
91 93
92 // Pop the function info from the stack. 94 // Pop the function info from the stack.
93 __ pop(a3); 95 __ pop(a3);
94 96
95 // Attempt to allocate new JSFunction in new space. 97 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize, 98 __ AllocateInNewSpace(JSFunction::kSize,
97 v0, 99 v0,
98 a1, 100 a1,
99 a2, 101 a2,
100 &gc, 102 &gc,
101 TAG_OBJECT); 103 TAG_OBJECT);
102 104
105 __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
106
103 int map_index = (language_mode_ == CLASSIC_MODE) 107 int map_index = (language_mode_ == CLASSIC_MODE)
104 ? Context::FUNCTION_MAP_INDEX 108 ? Context::FUNCTION_MAP_INDEX
105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 109 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
106 110
107 // Compute the function map in the current global context and set that 111 // Compute the function map in the current global context and set that
108 // as the map of the allocated object. 112 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 113 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); 114 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index))); 115 __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 116 __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
113 117
114 // Initialize the rest of the function. We don't have to update the 118 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space. 119 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex); 120 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex); 121 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 122 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset)); 123 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); 124 __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); 125 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset)); 126 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset)); 127 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126 128
127 // Initialize the code pointer in the function to be the one 129 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object. 130 // found in the shared function info object.
131 // But first check if there is an optimized version for our context.
132 Label check_optimized;
133 Label install_unoptimized;
134 if (FLAG_cache_optimized_code) {
135 __ lw(a1,
136 FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
137 __ And(at, a1, a1);
138 __ Branch(&check_optimized, ne, at, Operand(zero_reg));
139 }
140 __ bind(&install_unoptimized);
141 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
142 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); 143 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); 144 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 145
132 // Return result. The argument function info has been popped already. 146 // Return result. The argument function info has been popped already.
133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); 147 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
134 __ Ret(); 148 __ Ret();
135 149
150 __ bind(&check_optimized);
151
152 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
153
154 // a2 holds global context, a1 points to fixed array of 3-element entries
155 // (global context, optimized code, literals).
156 // The optimized code map must never be empty, so check the first elements.
157 Label install_optimized;
158 // Speculatively move code object into t0.
159 __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
160 __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
161 __ Branch(&install_optimized, eq, a2, Operand(t1));
162 __ Branch(&install_unoptimized);
Michael Starzinger 2012/06/18 12:41:38 It seems that this branch unconditionally installs
163
164 // Iterate through the rest of map backwards. t0 holds an index as a Smi.
165 Label loop;
166 __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
167 __ bind(&loop);
168 // Do not double check first entry.
169
170 __ Branch(&install_unoptimized, eq, t0,
171 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
172 __ Subu(t0, t0, Operand(
173 Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
174 __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
175 __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
176 __ Addu(t1, t1, Operand(at));
177 __ lw(t1, MemOperand(t1));
178 __ Branch(&loop, ne, a2, Operand(t1));
179 // Hit: fetch the optimized code.
180 __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
181 __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
182 __ Addu(t1, t1, Operand(at));
183 __ Addu(t1, t1, Operand(kPointerSize));
184 __ lw(t0, MemOperand(t1));
185
186 __ bind(&install_optimized);
187 __ IncrementCounter(counters->fast_new_closure_install_optimized(),
188 1, t2, t3);
189
190 // TODO(fschneider): Idea: store proper code pointers in the map and either
191 // unmangle them on marking or do nothing as the whole map is discarded on
192 // major GC anyway.
193 __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
194 __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
195
196 // Now link a function into a list of optimized functions.
197 __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
198
199 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
200 // No need for write barrier as JSFunction (eax) is in the new space.
201
202 __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
203 // Store JSFunction (eax) into edx before issuing write barrier as
204 // it clobbers all the registers passed.
205 __ mov(t0, v0);
206 __ RecordWriteContextSlot(
207 a2,
208 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
209 t0,
210 a1,
211 kRAHasNotBeenSaved,
212 kDontSaveFPRegs);
213
214 // Return result. The argument function info has been popped already.
215 __ Ret();
216
136 // Create a new closure through the slower runtime call. 217 // Create a new closure through the slower runtime call.
137 __ bind(&gc); 218 __ bind(&gc);
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex); 219 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139 __ Push(cp, a3, t0); 220 __ Push(cp, a3, t0);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 221 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
141 } 222 }
142 223
143 224
144 void FastNewContextStub::Generate(MacroAssembler* masm) { 225 void FastNewContextStub::Generate(MacroAssembler* masm) {
145 // Try to allocate the context in new space. 226 // Try to allocate the context in new space.
(...skipping 7227 matching lines...) Expand 10 before | Expand all | Expand 10 after
7373 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 7454 // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7374 // and ElementsTransitionGenerator::GenerateSmiToDouble 7455 // and ElementsTransitionGenerator::GenerateSmiToDouble
7375 // and ElementsTransitionGenerator::GenerateDoubleToObject 7456 // and ElementsTransitionGenerator::GenerateDoubleToObject
7376 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET }, 7457 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7377 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET }, 7458 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7378 // ElementsTransitionGenerator::GenerateDoubleToObject 7459 // ElementsTransitionGenerator::GenerateDoubleToObject
7379 { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET }, 7460 { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7380 { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET }, 7461 { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7381 // StoreArrayLiteralElementStub::Generate 7462 // StoreArrayLiteralElementStub::Generate
7382 { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET }, 7463 { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7464 // FastNewClosureStub::Generate
7465 { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
7383 // Null termination. 7466 // Null termination.
7384 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7467 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7385 }; 7468 };
7386 7469
7387 #undef REG 7470 #undef REG
7388 7471
7389 7472
7390 bool RecordWriteStub::IsPregenerated() { 7473 bool RecordWriteStub::IsPregenerated() {
7391 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7474 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7392 !entry->object.is(no_reg); 7475 !entry->object.is(no_reg);
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after
7682 __ Ret(USE_DELAY_SLOT); 7765 __ Ret(USE_DELAY_SLOT);
7683 __ mov(v0, a0); 7766 __ mov(v0, a0);
7684 } 7767 }
7685 7768
7686 7769
7687 #undef __ 7770 #undef __
7688 7771
7689 } } // namespace v8::internal 7772 } } // namespace v8::internal
7690 7773
7691 #endif // V8_TARGET_ARCH_MIPS 7774 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « no previous file | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698