Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(107)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 10103035: Share optimized code for closures. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: added x64 and ARM ports Created 8 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
55 __ pop(rcx); // Pop return address. 55 __ pop(rcx); // Pop return address.
56 __ push(rax); 56 __ push(rax);
57 __ push(rcx); // Push return address. 57 __ push(rcx); // Push return address.
58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
59 } 59 }
60 60
61 61
62 void FastNewClosureStub::Generate(MacroAssembler* masm) { 62 void FastNewClosureStub::Generate(MacroAssembler* masm) {
63 // Create a new closure from the given function info in new 63 // Create a new closure from the given function info in new
64 // space. Set the context to the current context in rsi. 64 // space. Set the context to the current context in rsi.
65 Counters* counters = masm->isolate()->counters();
66
65 Label gc; 67 Label gc;
66 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); 68 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
67 69
70 __ IncrementCounter(counters->fast_new_closure_total(), 1);
71
68 // Get the function info from the stack. 72 // Get the function info from the stack.
69 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); 73 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
70 74
71 int map_index = (language_mode_ == CLASSIC_MODE) 75 int map_index = (language_mode_ == CLASSIC_MODE)
72 ? Context::FUNCTION_MAP_INDEX 76 ? Context::FUNCTION_MAP_INDEX
73 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 77 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
74 78
75 // Compute the function map in the current global context and set that 79 // Compute the function map in the current global context and set that
76 // as the map of the allocated object. 80 // as the map of the allocated object.
77 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); 81 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
78 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); 82 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
79 __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index))); 83 __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
80 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx); 84 __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
81 85
82 // Initialize the rest of the function. We don't have to update the 86 // Initialize the rest of the function. We don't have to update the
83 // write barrier because the allocated object is in new space. 87 // write barrier because the allocated object is in new space.
84 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); 88 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
85 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); 89 __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
86 __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex); 90 __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
87 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); 91 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
88 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); 92 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
89 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx); 93 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
90 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); 94 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
91 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); 95 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
92 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); 96 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
93 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
94 97
95 // Initialize the code pointer in the function to be the one 98 // Initialize the code pointer in the function to be the one
96 // found in the shared function info object. 99 // found in the shared function info object.
100 // But first check if there is an optimized version for our context.
101 Label check_optimized;
102 Label install_unoptimized;
103 if (FLAG_cache_optimized_code) {
104 __ movq(rbx,
105 FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
106 __ testq(rbx, rbx);
107 __ j(not_zero, &check_optimized, Label::kNear);
108 }
109 __ bind(&install_unoptimized);
110 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
111 rdi); // Initialize with undefined.
97 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); 112 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
98 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); 113 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
99 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); 114 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
100 115
116 // Return and remove the on-stack parameter.
117 __ ret(1 * kPointerSize);
118
119 __ bind(&check_optimized);
120
121 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
122
123 // rcx holds global context, ebx points to fixed array of 3-element entries
124 // (global context, optimized code, literals).
125 // The optimized code map must never be empty, so check the first elements.
126 Label install_optimized;
127 const int kEntryLength = 3;
128 // Speculatively move code object into edx.
129 __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
130 __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
131 __ j(equal, &install_optimized);
132
133 // Iterate through the rest of map backwards. rdx holds an index.
134 Label loop;
135 Label restore;
136 __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
137 __ SmiToInteger32(rdx, rdx);
138 __ bind(&loop);
139 // Do not double check first entry.
140 __ cmpq(rdx, Immediate(kEntryLength));
141 __ j(equal, &restore);
142 __ subq(rdx, Immediate(kEntryLength)); // Skip an entry.
143 __ cmpq(rcx, FieldOperand(rbx,
144 rdx,
145 times_pointer_size,
146 FixedArray::kHeaderSize));
147 __ j(not_equal, &loop, Label::kNear);
148 // Hit: fetch the optimized code.
149 __ movq(rdx, FieldOperand(rbx,
150 rdx,
151 times_pointer_size,
152 FixedArray::kHeaderSize + 1 * kPointerSize));
153
154 __ bind(&install_optimized);
155 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
156
157 // TODO(fschneider): Idea: store proper code pointers in the map and either
158 // unmangle them on marking or do nothing as the whole map is discarded on
159 // major GC anyway.
160 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
161 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
162
163 // Now link a function into a list of optimized functions.
164 __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
165
166 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
167 // No need for write barrier as JSFunction (rax) is in the new space.
168
169 __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
170 // Store JSFunction (rax) into rdx before issuing write barrier as
171 // it clobbers all the registers passed.
172 __ movq(rdx, rax);
173 __ RecordWriteContextSlot(
174 rcx,
175 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
176 rdx,
177 rbx,
178 kDontSaveFPRegs);
101 179
102 // Return and remove the on-stack parameter. 180 // Return and remove the on-stack parameter.
103 __ ret(1 * kPointerSize); 181 __ ret(1 * kPointerSize);
104 182
183 __ bind(&restore);
184 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
185 __ jmp(&install_unoptimized);
186
105 // Create a new closure through the slower runtime call. 187 // Create a new closure through the slower runtime call.
106 __ bind(&gc); 188 __ bind(&gc);
107 __ pop(rcx); // Temporarily remove return address. 189 __ pop(rcx); // Temporarily remove return address.
108 __ pop(rdx); 190 __ pop(rdx);
109 __ push(rsi); 191 __ push(rsi);
110 __ push(rdx); 192 __ push(rdx);
111 __ PushRoot(Heap::kFalseValueRootIndex); 193 __ PushRoot(Heap::kFalseValueRootIndex);
112 __ push(rcx); // Restore return address. 194 __ push(rcx); // Restore return address.
113 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 195 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
114 } 196 }
(...skipping 5883 matching lines...) Expand 10 before | Expand all | Expand 10 after
5998 // and ElementsTransitionGenerator::GenerateDoubleToObject 6080 // and ElementsTransitionGenerator::GenerateDoubleToObject
5999 { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET}, 6081 { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6000 { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET}, 6082 { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6001 // ElementsTransitionGenerator::GenerateSmiOnlyToDouble 6083 // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
6002 // and ElementsTransitionGenerator::GenerateDoubleToObject 6084 // and ElementsTransitionGenerator::GenerateDoubleToObject
6003 { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET}, 6085 { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6004 // ElementsTransitionGenerator::GenerateDoubleToObject 6086 // ElementsTransitionGenerator::GenerateDoubleToObject
6005 { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET}, 6087 { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6006 // StoreArrayLiteralElementStub::Generate 6088 // StoreArrayLiteralElementStub::Generate
6007 { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET}, 6089 { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6090 // FastNewClosureStub::Generate
6091 { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
6008 // Null termination. 6092 // Null termination.
6009 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 6093 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6010 }; 6094 };
6011 6095
6012 #undef REG 6096 #undef REG
6013 6097
6014 bool RecordWriteStub::IsPregenerated() { 6098 bool RecordWriteStub::IsPregenerated() {
6015 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6099 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6016 !entry->object.is(no_reg); 6100 !entry->object.is(no_reg);
6017 entry++) { 6101 entry++) {
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after
6326 xmm0, 6410 xmm0,
6327 &slow_elements); 6411 &slow_elements);
6328 __ ret(0); 6412 __ ret(0);
6329 } 6413 }
6330 6414
6331 #undef __ 6415 #undef __
6332 6416
6333 } } // namespace v8::internal 6417 } } // namespace v8::internal
6334 6418
6335 #endif // V8_TARGET_ARCH_X64 6419 #endif // V8_TARGET_ARCH_X64
OLDNEW
« src/runtime.cc ('K') | « src/v8-counters.h ('k') | src/x64/deoptimizer-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698