OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 __ pop(rcx); // Pop return address. | 55 __ pop(rcx); // Pop return address. |
56 __ push(rax); | 56 __ push(rax); |
57 __ push(rcx); // Push return address. | 57 __ push(rcx); // Push return address. |
58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); | 58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
59 } | 59 } |
60 | 60 |
61 | 61 |
62 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 62 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
63 // Create a new closure from the given function info in new | 63 // Create a new closure from the given function info in new |
64 // space. Set the context to the current context in rsi. | 64 // space. Set the context to the current context in rsi. |
| 65 Counters* counters = masm->isolate()->counters(); |
| 66 |
65 Label gc; | 67 Label gc; |
66 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); | 68 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); |
67 | 69 |
| 70 __ IncrementCounter(counters->fast_new_closure_total(), 1); |
| 71 |
68 // Get the function info from the stack. | 72 // Get the function info from the stack. |
69 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 73 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
70 | 74 |
71 int map_index = (language_mode_ == CLASSIC_MODE) | 75 int map_index = (language_mode_ == CLASSIC_MODE) |
72 ? Context::FUNCTION_MAP_INDEX | 76 ? Context::FUNCTION_MAP_INDEX |
73 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; | 77 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; |
74 | 78 |
75 // Compute the function map in the current global context and set that | 79 // Compute the function map in the current global context and set that |
76 // as the map of the allocated object. | 80 // as the map of the allocated object. |
77 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | 81 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
78 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); | 82 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); |
79 __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index))); | 83 __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index))); |
80 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx); | 84 __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx); |
81 | 85 |
82 // Initialize the rest of the function. We don't have to update the | 86 // Initialize the rest of the function. We don't have to update the |
83 // write barrier because the allocated object is in new space. | 87 // write barrier because the allocated object is in new space. |
84 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); | 88 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); |
85 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); | 89 __ LoadRoot(r8, Heap::kTheHoleValueRootIndex); |
86 __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex); | 90 __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex); |
87 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); | 91 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); |
88 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); | 92 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); |
89 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx); | 93 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8); |
90 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); | 94 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); |
91 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); | 95 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); |
92 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); | 96 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); |
93 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi); | |
94 | 97 |
95 // Initialize the code pointer in the function to be the one | 98 // Initialize the code pointer in the function to be the one |
96 // found in the shared function info object. | 99 // found in the shared function info object. |
| 100 // But first check if there is an optimized version for our context. |
| 101 Label check_optimized; |
| 102 Label install_unoptimized; |
| 103 if (FLAG_cache_optimized_code) { |
| 104 __ movq(rbx, |
| 105 FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 106 __ testq(rbx, rbx); |
| 107 __ j(not_zero, &check_optimized, Label::kNear); |
| 108 } |
| 109 __ bind(&install_unoptimized); |
| 110 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), |
| 111 rdi); // Initialize with undefined. |
97 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); | 112 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); |
98 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); | 113 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); |
99 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); | 114 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); |
100 | 115 |
| 116 // Return and remove the on-stack parameter. |
| 117 __ ret(1 * kPointerSize); |
| 118 |
| 119 __ bind(&check_optimized); |
| 120 |
| 121 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1); |
| 122 |
| 123 // rcx holds global context, ebx points to fixed array of 3-element entries |
| 124 // (global context, optimized code, literals). |
| 125 // The optimized code map must never be empty, so check the first elements. |
| 126 Label install_optimized; |
| 127 // Speculatively move code object into edx. |
| 128 __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize)); |
| 129 __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize)); |
| 130 __ j(equal, &install_optimized); |
| 131 |
| 132 // Iterate through the rest of map backwards. rdx holds an index. |
| 133 Label loop; |
| 134 Label restore; |
| 135 __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset)); |
| 136 __ SmiToInteger32(rdx, rdx); |
| 137 __ bind(&loop); |
| 138 // Do not double check first entry. |
| 139 __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); |
| 140 __ j(equal, &restore); |
| 141 __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry. |
| 142 __ cmpq(rcx, FieldOperand(rbx, |
| 143 rdx, |
| 144 times_pointer_size, |
| 145 FixedArray::kHeaderSize)); |
| 146 __ j(not_equal, &loop, Label::kNear); |
| 147 // Hit: fetch the optimized code. |
| 148 __ movq(rdx, FieldOperand(rbx, |
| 149 rdx, |
| 150 times_pointer_size, |
| 151 FixedArray::kHeaderSize + 1 * kPointerSize)); |
| 152 |
| 153 __ bind(&install_optimized); |
| 154 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1); |
| 155 |
| 156 // TODO(fschneider): Idea: store proper code pointers in the map and either |
| 157 // unmangle them on marking or do nothing as the whole map is discarded on |
| 158 // major GC anyway. |
| 159 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); |
| 160 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); |
| 161 |
| 162 // Now link a function into a list of optimized functions. |
| 163 __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 164 |
| 165 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx); |
| 166 // No need for write barrier as JSFunction (rax) is in the new space. |
| 167 |
| 168 __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax); |
| 169 // Store JSFunction (rax) into rdx before issuing write barrier as |
| 170 // it clobbers all the registers passed. |
| 171 __ movq(rdx, rax); |
| 172 __ RecordWriteContextSlot( |
| 173 rcx, |
| 174 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), |
| 175 rdx, |
| 176 rbx, |
| 177 kDontSaveFPRegs); |
101 | 178 |
102 // Return and remove the on-stack parameter. | 179 // Return and remove the on-stack parameter. |
103 __ ret(1 * kPointerSize); | 180 __ ret(1 * kPointerSize); |
104 | 181 |
| 182 __ bind(&restore); |
| 183 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
| 184 __ jmp(&install_unoptimized); |
| 185 |
105 // Create a new closure through the slower runtime call. | 186 // Create a new closure through the slower runtime call. |
106 __ bind(&gc); | 187 __ bind(&gc); |
107 __ pop(rcx); // Temporarily remove return address. | 188 __ pop(rcx); // Temporarily remove return address. |
108 __ pop(rdx); | 189 __ pop(rdx); |
109 __ push(rsi); | 190 __ push(rsi); |
110 __ push(rdx); | 191 __ push(rdx); |
111 __ PushRoot(Heap::kFalseValueRootIndex); | 192 __ PushRoot(Heap::kFalseValueRootIndex); |
112 __ push(rcx); // Restore return address. | 193 __ push(rcx); // Restore return address. |
113 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); | 194 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
114 } | 195 } |
(...skipping 5892 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6007 // and ElementsTransitionGenerator::GenerateDoubleToObject | 6088 // and ElementsTransitionGenerator::GenerateDoubleToObject |
6008 { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET}, | 6089 { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET}, |
6009 { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET}, | 6090 { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET}, |
6010 // ElementsTransitionGenerator::GenerateSmiToDouble | 6091 // ElementsTransitionGenerator::GenerateSmiToDouble |
6011 // and ElementsTransitionGenerator::GenerateDoubleToObject | 6092 // and ElementsTransitionGenerator::GenerateDoubleToObject |
6012 { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET}, | 6093 { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET}, |
6013 // ElementsTransitionGenerator::GenerateDoubleToObject | 6094 // ElementsTransitionGenerator::GenerateDoubleToObject |
6014 { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET}, | 6095 { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET}, |
6015 // StoreArrayLiteralElementStub::Generate | 6096 // StoreArrayLiteralElementStub::Generate |
6016 { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET}, | 6097 { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET}, |
| 6098 // FastNewClosureStub::Generate |
| 6099 { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET}, |
6017 // Null termination. | 6100 // Null termination. |
6018 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} | 6101 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
6019 }; | 6102 }; |
6020 | 6103 |
6021 #undef REG | 6104 #undef REG |
6022 | 6105 |
6023 bool RecordWriteStub::IsPregenerated() { | 6106 bool RecordWriteStub::IsPregenerated() { |
6024 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 6107 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
6025 !entry->object.is(no_reg); | 6108 !entry->object.is(no_reg); |
6026 entry++) { | 6109 entry++) { |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6335 xmm0, | 6418 xmm0, |
6336 &slow_elements); | 6419 &slow_elements); |
6337 __ ret(0); | 6420 __ ret(0); |
6338 } | 6421 } |
6339 | 6422 |
6340 #undef __ | 6423 #undef __ |
6341 | 6424 |
6342 } } // namespace v8::internal | 6425 } } // namespace v8::internal |
6343 | 6426 |
6344 #endif // V8_TARGET_ARCH_X64 | 6427 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |