Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(741)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 10878047: Revert to code state of 3.13.1 plus r12350 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 r2, 99 r2,
100 &gc, 100 &gc,
101 TAG_OBJECT); 101 TAG_OBJECT);
102 102
103 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); 103 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
104 104
105 int map_index = (language_mode_ == CLASSIC_MODE) 105 int map_index = (language_mode_ == CLASSIC_MODE)
106 ? Context::FUNCTION_MAP_INDEX 106 ? Context::FUNCTION_MAP_INDEX
107 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 107 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
108 108
109 // Compute the function map in the current native context and set that 109 // Compute the function map in the current global context and set that
110 // as the map of the allocated object. 110 // as the map of the allocated object.
111 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 111 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
112 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); 112 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
113 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); 113 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
114 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); 114 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
115 115
116 // Initialize the rest of the function. We don't have to update the 116 // Initialize the rest of the function. We don't have to update the
117 // write barrier because the allocated object is in new space. 117 // write barrier because the allocated object is in new space.
118 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); 118 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
119 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); 119 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
120 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); 120 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
121 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); 121 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
122 __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); 122 __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
(...skipping 19 matching lines...) Expand all
142 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 142 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
143 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 143 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
144 144
145 // Return result. The argument function info has been popped already. 145 // Return result. The argument function info has been popped already.
146 __ Ret(); 146 __ Ret();
147 147
148 __ bind(&check_optimized); 148 __ bind(&check_optimized);
149 149
150 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); 150 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
151 151
152 // r2 holds native context, r1 points to fixed array of 3-element entries 152 // r2 holds global context, r1 points to fixed array of 3-element entries
153 // (native context, optimized code, literals). 153 // (global context, optimized code, literals).
154 // The optimized code map must never be empty, so check the first elements. 154 // The optimized code map must never be empty, so check the first elements.
155 Label install_optimized; 155 Label install_optimized;
156 // Speculatively move code object into r4. 156 // Speculatively move code object into r4.
157 __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize)); 157 __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
158 __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize)); 158 __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
159 __ cmp(r2, r5); 159 __ cmp(r2, r5);
160 __ b(eq, &install_optimized); 160 __ b(eq, &install_optimized);
161 161
162 // Iterate through the rest of map backwards. r4 holds an index as a Smi. 162 // Iterate through the rest of map backwards. r4 holds an index as a Smi.
163 Label loop; 163 Label loop;
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
235 // Load the function from the stack. 235 // Load the function from the stack.
236 __ ldr(r3, MemOperand(sp, 0)); 236 __ ldr(r3, MemOperand(sp, 0));
237 237
238 // Set up the object header. 238 // Set up the object header.
239 __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); 239 __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
240 __ mov(r2, Operand(Smi::FromInt(length))); 240 __ mov(r2, Operand(Smi::FromInt(length)));
241 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); 241 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
242 __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 242 __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
243 243
244 // Set up the fixed slots, copy the global object from the previous context. 244 // Set up the fixed slots, copy the global object from the previous context.
245 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 245 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
246 __ mov(r1, Operand(Smi::FromInt(0))); 246 __ mov(r1, Operand(Smi::FromInt(0)));
247 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); 247 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
248 __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); 248 __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
249 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); 249 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
250 __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 250 __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
251 251
252 // Initialize the rest of the slots to undefined. 252 // Initialize the rest of the slots to undefined.
253 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); 253 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
254 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 254 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
255 __ str(r1, MemOperand(r0, Context::SlotOffset(i))); 255 __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
256 } 256 }
257 257
258 // Remove the on-stack argument and return. 258 // Remove the on-stack argument and return.
259 __ mov(cp, r0); 259 __ mov(cp, r0);
260 __ pop(); 260 __ pop();
(...skipping 22 matching lines...) Expand all
283 283
284 // Load the serialized scope info from the stack. 284 // Load the serialized scope info from the stack.
285 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); 285 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
286 286
287 // Set up the object header. 287 // Set up the object header.
288 __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); 288 __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
289 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 289 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
290 __ mov(r2, Operand(Smi::FromInt(length))); 290 __ mov(r2, Operand(Smi::FromInt(length)));
291 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); 291 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
292 292
293 // If this block context is nested in the native context we get a smi 293 // If this block context is nested in the global context we get a smi
294 // sentinel instead of a function. The block context should get the 294 // sentinel instead of a function. The block context should get the
295 // canonical empty function of the native context as its closure which 295 // canonical empty function of the global context as its closure which
296 // we still have to look up. 296 // we still have to look up.
297 Label after_sentinel; 297 Label after_sentinel;
298 __ JumpIfNotSmi(r3, &after_sentinel); 298 __ JumpIfNotSmi(r3, &after_sentinel);
299 if (FLAG_debug_code) { 299 if (FLAG_debug_code) {
300 const char* message = "Expected 0 as a Smi sentinel"; 300 const char* message = "Expected 0 as a Smi sentinel";
301 __ cmp(r3, Operand::Zero()); 301 __ cmp(r3, Operand::Zero());
302 __ Assert(eq, message); 302 __ Assert(eq, message);
303 } 303 }
304 __ ldr(r3, GlobalObjectOperand()); 304 __ ldr(r3, GlobalObjectOperand());
305 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); 305 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
306 __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); 306 __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
307 __ bind(&after_sentinel); 307 __ bind(&after_sentinel);
308 308
309 // Set up the fixed slots, copy the global object from the previous context. 309 // Set up the fixed slots, copy the global object from the previous context.
310 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 310 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
311 __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); 311 __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
312 __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); 312 __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
313 __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); 313 __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
314 __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX)); 314 __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
315 315
316 // Initialize the rest of the slots to the hole value. 316 // Initialize the rest of the slots to the hole value.
317 __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); 317 __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
318 for (int i = 0; i < slots_; i++) { 318 for (int i = 0; i < slots_; i++) {
319 __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); 319 __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
320 } 320 }
321 321
322 // Remove the on-stack argument and return. 322 // Remove the on-stack argument and return.
323 __ mov(cp, r0); 323 __ mov(cp, r0);
324 __ add(sp, sp, Operand(2 * kPointerSize)); 324 __ add(sp, sp, Operand(2 * kPointerSize));
(...skipping 4175 matching lines...) Expand 10 before | Expand all | Expand 10 after
4500 __ add(r9, r9, Operand(FixedArray::kHeaderSize)); 4500 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
4501 4501
4502 // 3. Arguments object. 4502 // 3. Arguments object.
4503 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); 4503 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
4504 4504
4505 // Do the allocation of all three objects in one go. 4505 // Do the allocation of all three objects in one go.
4506 __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT); 4506 __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
4507 4507
4508 // r0 = address of new object(s) (tagged) 4508 // r0 = address of new object(s) (tagged)
4509 // r2 = argument count (tagged) 4509 // r2 = argument count (tagged)
4510 // Get the arguments boilerplate from the current native context into r4. 4510 // Get the arguments boilerplate from the current (global) context into r4.
4511 const int kNormalOffset = 4511 const int kNormalOffset =
4512 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); 4512 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4513 const int kAliasedOffset = 4513 const int kAliasedOffset =
4514 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); 4514 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4515 4515
4516 __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4516 __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
4517 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); 4517 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4518 __ cmp(r1, Operand::Zero()); 4518 __ cmp(r1, Operand::Zero());
4519 __ ldr(r4, MemOperand(r4, kNormalOffset), eq); 4519 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4520 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); 4520 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4521 4521
4522 // r0 = address of new object (tagged) 4522 // r0 = address of new object (tagged)
4523 // r1 = mapped parameter count (tagged) 4523 // r1 = mapped parameter count (tagged)
4524 // r2 = argument count (tagged) 4524 // r2 = argument count (tagged)
4525 // r4 = address of boilerplate object (tagged) 4525 // r4 = address of boilerplate object (tagged)
4526 // Copy the JS object part. 4526 // Copy the JS object part.
4527 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 4527 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
4680 4680
4681 // Do the allocation of both objects in one go. 4681 // Do the allocation of both objects in one go.
4682 __ AllocateInNewSpace(r1, 4682 __ AllocateInNewSpace(r1,
4683 r0, 4683 r0,
4684 r2, 4684 r2,
4685 r3, 4685 r3,
4686 &runtime, 4686 &runtime,
4687 static_cast<AllocationFlags>(TAG_OBJECT | 4687 static_cast<AllocationFlags>(TAG_OBJECT |
4688 SIZE_IN_WORDS)); 4688 SIZE_IN_WORDS));
4689 4689
4690 // Get the arguments boilerplate from the current native context. 4690 // Get the arguments boilerplate from the current (global) context.
4691 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4691 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4692 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); 4692 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4693 __ ldr(r4, MemOperand(r4, Context::SlotOffset( 4693 __ ldr(r4, MemOperand(r4, Context::SlotOffset(
4694 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); 4694 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4695 4695
4696 // Copy the JS object part. 4696 // Copy the JS object part.
4697 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); 4697 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4698 4698
4699 // Get the length (smi tagged) and set that as an in-object property too. 4699 // Get the length (smi tagged) and set that as an in-object property too.
4700 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4700 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4701 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 4701 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4702 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + 4702 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after
5197 &slowcase, 5197 &slowcase,
5198 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 5198 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5199 // r0: Start of allocated area, object-tagged. 5199 // r0: Start of allocated area, object-tagged.
5200 // r1: Number of elements in array, as smi. 5200 // r1: Number of elements in array, as smi.
5201 // r5: Number of elements, untagged. 5201 // r5: Number of elements, untagged.
5202 5202
5203 // Set JSArray map to global.regexp_result_map(). 5203 // Set JSArray map to global.regexp_result_map().
5204 // Set empty properties FixedArray. 5204 // Set empty properties FixedArray.
5205 // Set elements to point to FixedArray allocated right after the JSArray. 5205 // Set elements to point to FixedArray allocated right after the JSArray.
5206 // Interleave operations for better latency. 5206 // Interleave operations for better latency.
5207 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 5207 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5208 __ add(r3, r0, Operand(JSRegExpResult::kSize)); 5208 __ add(r3, r0, Operand(JSRegExpResult::kSize));
5209 __ mov(r4, Operand(factory->empty_fixed_array())); 5209 __ mov(r4, Operand(factory->empty_fixed_array()));
5210 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); 5210 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5211 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); 5211 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5212 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); 5212 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5213 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); 5213 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5214 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 5214 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5215 5215
5216 // Set input, index and length fields from arguments. 5216 // Set input, index and length fields from arguments.
5217 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); 5217 __ ldr(r1, MemOperand(sp, kPointerSize * 0));
5218 __ ldr(r2, MemOperand(sp, kPointerSize * 1)); 5218 __ ldr(r2, MemOperand(sp, kPointerSize * 1));
5219 __ ldr(r6, MemOperand(sp, kPointerSize * 2)); 5219 __ ldr(r6, MemOperand(sp, kPointerSize * 2));
5220 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); 5220 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
5306 // function stub. 5306 // function stub.
5307 if (ReceiverMightBeImplicit()) { 5307 if (ReceiverMightBeImplicit()) {
5308 Label call; 5308 Label call;
5309 // Get the receiver from the stack. 5309 // Get the receiver from the stack.
5310 // function, receiver [, arguments] 5310 // function, receiver [, arguments]
5311 __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); 5311 __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
5312 // Call as function is indicated with the hole. 5312 // Call as function is indicated with the hole.
5313 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); 5313 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5314 __ b(ne, &call); 5314 __ b(ne, &call);
5315 // Patch the receiver on the stack with the global receiver object. 5315 // Patch the receiver on the stack with the global receiver object.
5316 __ ldr(r3, 5316 __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5317 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5318 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); 5317 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
5319 __ str(r3, MemOperand(sp, argc_ * kPointerSize)); 5318 __ str(r3, MemOperand(sp, argc_ * kPointerSize));
5320 __ bind(&call); 5319 __ bind(&call);
5321 } 5320 }
5322 5321
5323 // Check that the function is really a JavaScript function. 5322 // Check that the function is really a JavaScript function.
5324 // r1: pushed function (to be verified) 5323 // r1: pushed function (to be verified)
5325 __ JumpIfSmi(r1, &non_function); 5324 __ JumpIfSmi(r1, &non_function);
5326 // Get the map of the function object. 5325 // Get the map of the function object.
5327 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); 5326 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
(...skipping 2285 matching lines...) Expand 10 before | Expand all | Expand 10 after
7613 7612
7614 __ Pop(lr, r5, r1); 7613 __ Pop(lr, r5, r1);
7615 __ Ret(); 7614 __ Ret();
7616 } 7615 }
7617 7616
7618 #undef __ 7617 #undef __
7619 7618
7620 } } // namespace v8::internal 7619 } } // namespace v8::internal
7621 7620
7622 #endif // V8_TARGET_ARCH_ARM 7621 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698