Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(512)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 10103035: Share optimized code for closures. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 78
79 __ bind(&call_builtin); 79 __ bind(&call_builtin);
80 __ push(r0); 80 __ push(r0);
81 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 81 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
82 } 82 }
83 83
84 84
85 void FastNewClosureStub::Generate(MacroAssembler* masm) { 85 void FastNewClosureStub::Generate(MacroAssembler* masm) {
86 // Create a new closure from the given function info in new 86 // Create a new closure from the given function info in new
87 // space. Set the context to the current context in cp. 87 // space. Set the context to the current context in cp.
88 Counters* counters = masm->isolate()->counters();
89
88 Label gc; 90 Label gc;
89 91
90 // Pop the function info from the stack. 92 // Pop the function info from the stack.
91 __ pop(r3); 93 __ pop(r3);
92 94
93 // Attempt to allocate new JSFunction in new space. 95 // Attempt to allocate new JSFunction in new space.
94 __ AllocateInNewSpace(JSFunction::kSize, 96 __ AllocateInNewSpace(JSFunction::kSize,
95 r0, 97 r0,
96 r1, 98 r1,
97 r2, 99 r2,
98 &gc, 100 &gc,
99 TAG_OBJECT); 101 TAG_OBJECT);
100 102
103 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
104
101 int map_index = (language_mode_ == CLASSIC_MODE) 105 int map_index = (language_mode_ == CLASSIC_MODE)
102 ? Context::FUNCTION_MAP_INDEX 106 ? Context::FUNCTION_MAP_INDEX
103 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 107 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
104 108
105 // Compute the function map in the current global context and set that 109 // Compute the function map in the current global context and set that
106 // as the map of the allocated object. 110 // as the map of the allocated object.
107 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 111 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
108 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); 112 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
109 __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index))); 113 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
110 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 114 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
111 115
112 // Initialize the rest of the function. We don't have to update the 116 // Initialize the rest of the function. We don't have to update the
113 // write barrier because the allocated object is in new space. 117 // write barrier because the allocated object is in new space.
114 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); 118 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
115 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); 119 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
116 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
117 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); 120 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
118 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); 121 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
119 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); 122 __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
120 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); 123 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
121 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); 124 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
122 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); 125 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
123 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
124 126
125 // Initialize the code pointer in the function to be the one 127 // Initialize the code pointer in the function to be the one
126 // found in the shared function info object. 128 // found in the shared function info object.
129 // But first check if there is an optimized version for our context.
130 Label check_optimized;
131 Label install_unoptimized;
132 if (FLAG_cache_optimized_code) {
133 __ ldr(r1,
134 FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
135 __ tst(r1, r1);
136 __ b(ne, &check_optimized);
137 }
138 __ bind(&install_unoptimized);
139 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
140 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
127 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); 141 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
128 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 142 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
129 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 143 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
130 144
131 // Return result. The argument function info has been popped already. 145 // Return result. The argument function info has been popped already.
132 __ Ret(); 146 __ Ret();
133 147
148 __ bind(&check_optimized);
149
150 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
151
152 // r2 holds global context, r1 points to fixed array of 3-element entries
153 // (global context, optimized code, literals).
154 // The optimized code map must never be empty, so check the first elements.
155 Label install_optimized;
156 // Speculatively move code object into r4.
157 __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
158 __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
159 __ cmp(r2, r5);
160 __ b(eq, &install_optimized);
161 __ b(&install_unoptimized);
162
163 // Iterate through the rest of map backwards. r4 holds an index as a Smi.
164 Label loop;
165 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
166 __ bind(&loop);
167 // Do not double check first entry.
168
169 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
170 __ b(eq, &install_unoptimized);
171 __ sub(r4, r4, Operand(
172 Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
173 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
174 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
175 __ ldr(r5, MemOperand(r5));
176 __ cmp(r2, r5);
177 __ b(ne, &loop);
178 // Hit: fetch the optimized code.
179 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
180 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
181 __ add(r5, r5, Operand(kPointerSize));
182 __ ldr(r4, MemOperand(r5));
183
184 __ bind(&install_optimized);
185 __ IncrementCounter(counters->fast_new_closure_install_optimized(),
186 1, r6, r7);
187
188 // TODO(fschneider): Idea: store proper code pointers in the map and either
189 // unmangle them on marking or do nothing as the whole map is discarded on
190 // major GC anyway.
191 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
192 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
193
194 // Now link a function into a list of optimized functions.
195 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
196
197 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
198 // No need for write barrier as JSFunction (eax) is in the new space.
199
200 __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
201 // Store JSFunction (eax) into edx before issuing write barrier as
202 // it clobbers all the registers passed.
203 __ mov(r4, r0);
204 __ RecordWriteContextSlot(
205 r2,
206 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
207 r4,
208 r1,
209 kLRHasNotBeenSaved,
210 kDontSaveFPRegs);
211
212 // Return result. The argument function info has been popped already.
213 __ Ret();
214
134 // Create a new closure through the slower runtime call. 215 // Create a new closure through the slower runtime call.
135 __ bind(&gc); 216 __ bind(&gc);
136 __ LoadRoot(r4, Heap::kFalseValueRootIndex); 217 __ LoadRoot(r4, Heap::kFalseValueRootIndex);
137 __ Push(cp, r3, r4); 218 __ Push(cp, r3, r4);
138 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 219 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
139 } 220 }
140 221
141 222
142 void FastNewContextStub::Generate(MacroAssembler* masm) { 223 void FastNewContextStub::Generate(MacroAssembler* masm) {
143 // Try to allocate the context in new space. 224 // Try to allocate the context in new space.
(...skipping 6980 matching lines...) Expand 10 before | Expand all | Expand 10 after
7124 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 7205 // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7125 // and ElementsTransitionGenerator::GenerateSmiToDouble 7206 // and ElementsTransitionGenerator::GenerateSmiToDouble
7126 // and ElementsTransitionGenerator::GenerateDoubleToObject 7207 // and ElementsTransitionGenerator::GenerateDoubleToObject
7127 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, 7208 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
7128 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, 7209 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
7129 // ElementsTransitionGenerator::GenerateDoubleToObject 7210 // ElementsTransitionGenerator::GenerateDoubleToObject
7130 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, 7211 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
7131 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, 7212 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
7132 // StoreArrayLiteralElementStub::Generate 7213 // StoreArrayLiteralElementStub::Generate
7133 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 7214 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7215 // FastNewClosureStub::Generate
7216 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
7134 // Null termination. 7217 // Null termination.
7135 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7218 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7136 }; 7219 };
7137 7220
7138 #undef REG 7221 #undef REG
7139 7222
7140 bool RecordWriteStub::IsPregenerated() { 7223 bool RecordWriteStub::IsPregenerated() {
7141 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7224 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7142 !entry->object.is(no_reg); 7225 !entry->object.is(no_reg);
7143 entry++) { 7226 entry++) {
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after
7429 __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2, 7512 __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
7430 &slow_elements); 7513 &slow_elements);
7431 __ Ret(); 7514 __ Ret();
7432 } 7515 }
7433 7516
7434 #undef __ 7517 #undef __
7435 7518
7436 } } // namespace v8::internal 7519 } } // namespace v8::internal
7437 7520
7438 #endif // V8_TARGET_ARCH_ARM 7521 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698