Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 10878047: Revert to code state of 3.13.1 plus r12350 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 a2, 101 a2,
102 &gc, 102 &gc,
103 TAG_OBJECT); 103 TAG_OBJECT);
104 104
105 __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3); 105 __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
106 106
107 int map_index = (language_mode_ == CLASSIC_MODE) 107 int map_index = (language_mode_ == CLASSIC_MODE)
108 ? Context::FUNCTION_MAP_INDEX 108 ? Context::FUNCTION_MAP_INDEX
109 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 109 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
110 110
111 // Compute the function map in the current native context and set that 111 // Compute the function map in the current global context and set that
112 // as the map of the allocated object. 112 // as the map of the allocated object.
113 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 113 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
114 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset)); 114 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
115 __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index))); 115 __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
116 __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset)); 116 __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
117 117
118 // Initialize the rest of the function. We don't have to update the 118 // Initialize the rest of the function. We don't have to update the
119 // write barrier because the allocated object is in new space. 119 // write barrier because the allocated object is in new space.
120 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex); 120 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
121 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); 121 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
122 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 122 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
123 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset)); 123 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
124 __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); 124 __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
(...skipping 19 matching lines...) Expand all
144 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); 144 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
145 145
146 // Return result. The argument function info has been popped already. 146 // Return result. The argument function info has been popped already.
147 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); 147 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
148 __ Ret(); 148 __ Ret();
149 149
150 __ bind(&check_optimized); 150 __ bind(&check_optimized);
151 151
152 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3); 152 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
153 153
154 // a2 holds native context, a1 points to fixed array of 3-element entries 154 // a2 holds global context, a1 points to fixed array of 3-element entries
155 // (native context, optimized code, literals). 155 // (global context, optimized code, literals).
156 // The optimized code map must never be empty, so check the first elements. 156 // The optimized code map must never be empty, so check the first elements.
157 Label install_optimized; 157 Label install_optimized;
158 // Speculatively move code object into t0. 158 // Speculatively move code object into t0.
159 __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize)); 159 __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
160 __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize)); 160 __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
161 __ Branch(&install_optimized, eq, a2, Operand(t1)); 161 __ Branch(&install_optimized, eq, a2, Operand(t1));
162 162
163 // Iterate through the rest of map backwards. t0 holds an index as a Smi. 163 // Iterate through the rest of map backwards. t0 holds an index as a Smi.
164 Label loop; 164 Label loop;
165 __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset)); 165 __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 // Load the function from the stack. 237 // Load the function from the stack.
238 __ lw(a3, MemOperand(sp, 0)); 238 __ lw(a3, MemOperand(sp, 0));
239 239
240 // Set up the object header. 240 // Set up the object header.
241 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex); 241 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
242 __ li(a2, Operand(Smi::FromInt(length))); 242 __ li(a2, Operand(Smi::FromInt(length)));
243 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); 243 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
244 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 244 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
245 245
246 // Set up the fixed slots, copy the global object from the previous context. 246 // Set up the fixed slots, copy the global object from the previous context.
247 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 247 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
248 __ li(a1, Operand(Smi::FromInt(0))); 248 __ li(a1, Operand(Smi::FromInt(0)));
249 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); 249 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
250 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); 250 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
251 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); 251 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
252 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 252 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
253 253
254 // Initialize the rest of the slots to undefined. 254 // Initialize the rest of the slots to undefined.
255 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); 255 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
256 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 256 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
257 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); 257 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
258 } 258 }
259 259
260 // Remove the on-stack argument and return. 260 // Remove the on-stack argument and return.
261 __ mov(cp, v0); 261 __ mov(cp, v0);
262 __ DropAndRet(1); 262 __ DropAndRet(1);
(...skipping 21 matching lines...) Expand all
284 284
285 // Load the serialized scope info from the stack. 285 // Load the serialized scope info from the stack.
286 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); 286 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
287 287
288 // Set up the object header. 288 // Set up the object header.
289 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); 289 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
290 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 290 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
291 __ li(a2, Operand(Smi::FromInt(length))); 291 __ li(a2, Operand(Smi::FromInt(length)));
292 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); 292 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
293 293
294 // If this block context is nested in the native context we get a smi 294 // If this block context is nested in the global context we get a smi
295 // sentinel instead of a function. The block context should get the 295 // sentinel instead of a function. The block context should get the
296 // canonical empty function of the native context as its closure which 296 // canonical empty function of the global context as its closure which
297 // we still have to look up. 297 // we still have to look up.
298 Label after_sentinel; 298 Label after_sentinel;
299 __ JumpIfNotSmi(a3, &after_sentinel); 299 __ JumpIfNotSmi(a3, &after_sentinel);
300 if (FLAG_debug_code) { 300 if (FLAG_debug_code) {
301 const char* message = "Expected 0 as a Smi sentinel"; 301 const char* message = "Expected 0 as a Smi sentinel";
302 __ Assert(eq, message, a3, Operand(zero_reg)); 302 __ Assert(eq, message, a3, Operand(zero_reg));
303 } 303 }
304 __ lw(a3, GlobalObjectOperand()); 304 __ lw(a3, GlobalObjectOperand());
305 __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); 305 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
306 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); 306 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
307 __ bind(&after_sentinel); 307 __ bind(&after_sentinel);
308 308
309 // Set up the fixed slots, copy the global object from the previous context. 309 // Set up the fixed slots, copy the global object from the previous context.
310 __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 310 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
311 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); 311 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
312 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); 312 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
313 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); 313 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
314 __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX)); 314 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
315 315
316 // Initialize the rest of the slots to the hole value. 316 // Initialize the rest of the slots to the hole value.
317 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); 317 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
318 for (int i = 0; i < slots_; i++) { 318 for (int i = 0; i < slots_; i++) {
319 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); 319 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
320 } 320 }
321 321
322 // Remove the on-stack argument and return. 322 // Remove the on-stack argument and return.
323 __ mov(cp, v0); 323 __ mov(cp, v0);
324 __ DropAndRet(2); 324 __ DropAndRet(2);
(...skipping 4314 matching lines...) Expand 10 before | Expand all | Expand 10 after
4639 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); 4639 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4640 4640
4641 // 3. Arguments object. 4641 // 3. Arguments object.
4642 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize)); 4642 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4643 4643
4644 // Do the allocation of all three objects in one go. 4644 // Do the allocation of all three objects in one go.
4645 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT); 4645 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4646 4646
4647 // v0 = address of new object(s) (tagged) 4647 // v0 = address of new object(s) (tagged)
4648 // a2 = argument count (tagged) 4648 // a2 = argument count (tagged)
4649 // Get the arguments boilerplate from the current native context into t0. 4649 // Get the arguments boilerplate from the current (global) context into t0.
4650 const int kNormalOffset = 4650 const int kNormalOffset =
4651 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); 4651 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4652 const int kAliasedOffset = 4652 const int kAliasedOffset =
4653 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); 4653 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4654 4654
4655 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4655 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4656 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); 4656 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4657 Label skip2_ne, skip2_eq; 4657 Label skip2_ne, skip2_eq;
4658 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); 4658 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4659 __ lw(t0, MemOperand(t0, kNormalOffset)); 4659 __ lw(t0, MemOperand(t0, kNormalOffset));
4660 __ bind(&skip2_ne); 4660 __ bind(&skip2_ne);
4661 4661
4662 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); 4662 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4663 __ lw(t0, MemOperand(t0, kAliasedOffset)); 4663 __ lw(t0, MemOperand(t0, kAliasedOffset));
4664 __ bind(&skip2_eq); 4664 __ bind(&skip2_eq);
4665 4665
4666 // v0 = address of new object (tagged) 4666 // v0 = address of new object (tagged)
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
4834 4834
4835 // Do the allocation of both objects in one go. 4835 // Do the allocation of both objects in one go.
4836 __ AllocateInNewSpace(a1, 4836 __ AllocateInNewSpace(a1,
4837 v0, 4837 v0,
4838 a2, 4838 a2,
4839 a3, 4839 a3,
4840 &runtime, 4840 &runtime,
4841 static_cast<AllocationFlags>(TAG_OBJECT | 4841 static_cast<AllocationFlags>(TAG_OBJECT |
4842 SIZE_IN_WORDS)); 4842 SIZE_IN_WORDS));
4843 4843
4844 // Get the arguments boilerplate from the current native context. 4844 // Get the arguments boilerplate from the current (global) context.
4845 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4845 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4846 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); 4846 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4847 __ lw(t0, MemOperand(t0, Context::SlotOffset( 4847 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4848 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); 4848 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4849 4849
4850 // Copy the JS object part. 4850 // Copy the JS object part.
4851 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); 4851 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4852 4852
4853 // Get the length (smi tagged) and set that as an in-object property too. 4853 // Get the length (smi tagged) and set that as an in-object property too.
4854 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4854 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4855 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); 4855 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4856 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + 4856 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
(...skipping 512 matching lines...) Expand 10 before | Expand all | Expand 10 after
5369 &slowcase, 5369 &slowcase,
5370 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 5370 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5371 // v0: Start of allocated area, object-tagged. 5371 // v0: Start of allocated area, object-tagged.
5372 // a1: Number of elements in array, as smi. 5372 // a1: Number of elements in array, as smi.
5373 // t1: Number of elements, untagged. 5373 // t1: Number of elements, untagged.
5374 5374
5375 // Set JSArray map to global.regexp_result_map(). 5375 // Set JSArray map to global.regexp_result_map().
5376 // Set empty properties FixedArray. 5376 // Set empty properties FixedArray.
5377 // Set elements to point to FixedArray allocated right after the JSArray. 5377 // Set elements to point to FixedArray allocated right after the JSArray.
5378 // Interleave operations for better latency. 5378 // Interleave operations for better latency.
5379 __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 5379 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5380 __ Addu(a3, v0, Operand(JSRegExpResult::kSize)); 5380 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5381 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array())); 5381 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5382 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset)); 5382 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5383 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); 5383 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5384 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX)); 5384 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5385 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 5385 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5386 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 5386 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5387 5387
5388 // Set input, index and length fields from arguments. 5388 // Set input, index and length fields from arguments.
5389 __ lw(a1, MemOperand(sp, kPointerSize * 0)); 5389 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5390 __ lw(a2, MemOperand(sp, kPointerSize * 1)); 5390 __ lw(a2, MemOperand(sp, kPointerSize * 1));
5391 __ lw(t2, MemOperand(sp, kPointerSize * 2)); 5391 __ lw(t2, MemOperand(sp, kPointerSize * 2));
5392 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset)); 5392 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
5481 // function stub. 5481 // function stub.
5482 if (ReceiverMightBeImplicit()) { 5482 if (ReceiverMightBeImplicit()) {
5483 Label call; 5483 Label call;
5484 // Get the receiver from the stack. 5484 // Get the receiver from the stack.
5485 // function, receiver [, arguments] 5485 // function, receiver [, arguments]
5486 __ lw(t0, MemOperand(sp, argc_ * kPointerSize)); 5486 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5487 // Call as function is indicated with the hole. 5487 // Call as function is indicated with the hole.
5488 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5488 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5489 __ Branch(&call, ne, t0, Operand(at)); 5489 __ Branch(&call, ne, t0, Operand(at));
5490 // Patch the receiver on the stack with the global receiver object. 5490 // Patch the receiver on the stack with the global receiver object.
5491 __ lw(a3, 5491 __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5492 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5493 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset)); 5492 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
5494 __ sw(a3, MemOperand(sp, argc_ * kPointerSize)); 5493 __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
5495 __ bind(&call); 5494 __ bind(&call);
5496 } 5495 }
5497 5496
5498 // Check that the function is really a JavaScript function. 5497 // Check that the function is really a JavaScript function.
5499 // a1: pushed function (to be verified) 5498 // a1: pushed function (to be verified)
5500 __ JumpIfSmi(a1, &non_function); 5499 __ JumpIfSmi(a1, &non_function);
5501 // Get the map of the function object. 5500 // Get the map of the function object.
5502 __ GetObjectType(a1, a3, a3); 5501 __ GetObjectType(a1, a3, a3);
(...skipping 2322 matching lines...) Expand 10 before | Expand all | Expand 10 after
7825 __ Pop(ra, t1, a1); 7824 __ Pop(ra, t1, a1);
7826 __ Ret(); 7825 __ Ret();
7827 } 7826 }
7828 7827
7829 7828
7830 #undef __ 7829 #undef __
7831 7830
7832 } } // namespace v8::internal 7831 } } // namespace v8::internal
7833 7832
7834 #endif // V8_TARGET_ARCH_MIPS 7833 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698