OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 __ Subu(s1, s1, kPointerSize); | 74 __ Subu(s1, s1, kPointerSize); |
75 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); | 75 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); |
76 } | 76 } |
77 | 77 |
78 | 78 |
79 // Load the built-in InternalArray function from the current context. | 79 // Load the built-in InternalArray function from the current context. |
80 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, | 80 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
81 Register result) { | 81 Register result) { |
82 // Load the native context. | 82 // Load the native context. |
83 | 83 |
84 __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 84 __ lw(result, |
| 85 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
85 __ lw(result, | 86 __ lw(result, |
86 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); | 87 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
87 // Load the InternalArray function from the native context. | 88 // Load the InternalArray function from the native context. |
88 __ lw(result, | 89 __ lw(result, |
89 MemOperand(result, | 90 MemOperand(result, |
90 Context::SlotOffset( | 91 Context::SlotOffset( |
91 Context::INTERNAL_ARRAY_FUNCTION_INDEX))); | 92 Context::INTERNAL_ARRAY_FUNCTION_INDEX))); |
92 } | 93 } |
93 | 94 |
94 | 95 |
95 // Load the built-in Array function from the current context. | 96 // Load the built-in Array function from the current context. |
96 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { | 97 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { |
97 // Load the native context. | 98 // Load the native context. |
98 | 99 |
99 __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 100 __ lw(result, |
| 101 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
100 __ lw(result, | 102 __ lw(result, |
101 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); | 103 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
102 // Load the Array function from the native context. | 104 // Load the Array function from the native context. |
103 __ lw(result, | 105 __ lw(result, |
104 MemOperand(result, | 106 MemOperand(result, |
105 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); | 107 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); |
106 } | 108 } |
107 | 109 |
108 | 110 |
109 // Allocate an empty JSArray. The allocated array is put into the result | 111 // Allocate an empty JSArray. The allocated array is put into the result |
(...skipping 1313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1423 __ sll(at, a0, kPointerSizeLog2); | 1425 __ sll(at, a0, kPointerSizeLog2); |
1424 __ addu(at, sp, at); | 1426 __ addu(at, sp, at); |
1425 __ lw(a1, MemOperand(at)); | 1427 __ lw(a1, MemOperand(at)); |
1426 __ li(t0, Operand(0, RelocInfo::NONE)); | 1428 __ li(t0, Operand(0, RelocInfo::NONE)); |
1427 __ Branch(&patch_receiver); | 1429 __ Branch(&patch_receiver); |
1428 | 1430 |
1429 // Use the global receiver object from the called function as the | 1431 // Use the global receiver object from the called function as the |
1430 // receiver. | 1432 // receiver. |
1431 __ bind(&use_global_receiver); | 1433 __ bind(&use_global_receiver); |
1432 const int kGlobalIndex = | 1434 const int kGlobalIndex = |
1433 Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; | 1435 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
1434 __ lw(a2, FieldMemOperand(cp, kGlobalIndex)); | 1436 __ lw(a2, FieldMemOperand(cp, kGlobalIndex)); |
1435 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset)); | 1437 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset)); |
1436 __ lw(a2, FieldMemOperand(a2, kGlobalIndex)); | 1438 __ lw(a2, FieldMemOperand(a2, kGlobalIndex)); |
1437 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); | 1439 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); |
1438 | 1440 |
1439 __ bind(&patch_receiver); | 1441 __ bind(&patch_receiver); |
1440 __ sll(at, a0, kPointerSizeLog2); | 1442 __ sll(at, a0, kPointerSizeLog2); |
1441 __ addu(a3, sp, at); | 1443 __ addu(a3, sp, at); |
1442 __ sw(a2, MemOperand(a3, -kPointerSize)); | 1444 __ sw(a2, MemOperand(a3, -kPointerSize)); |
1443 | 1445 |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1616 // a0: receiver | 1618 // a0: receiver |
1617 __ bind(&call_to_object); | 1619 __ bind(&call_to_object); |
1618 __ push(a0); | 1620 __ push(a0); |
1619 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | 1621 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
1620 __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. | 1622 __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. |
1621 __ Branch(&push_receiver); | 1623 __ Branch(&push_receiver); |
1622 | 1624 |
1623 // Use the current global receiver object as the receiver. | 1625 // Use the current global receiver object as the receiver. |
1624 __ bind(&use_global_receiver); | 1626 __ bind(&use_global_receiver); |
1625 const int kGlobalOffset = | 1627 const int kGlobalOffset = |
1626 Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; | 1628 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
1627 __ lw(a0, FieldMemOperand(cp, kGlobalOffset)); | 1629 __ lw(a0, FieldMemOperand(cp, kGlobalOffset)); |
1628 __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset)); | 1630 __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset)); |
1629 __ lw(a0, FieldMemOperand(a0, kGlobalOffset)); | 1631 __ lw(a0, FieldMemOperand(a0, kGlobalOffset)); |
1630 __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset)); | 1632 __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset)); |
1631 | 1633 |
1632 // Push the receiver. | 1634 // Push the receiver. |
1633 // a0: receiver | 1635 // a0: receiver |
1634 __ bind(&push_receiver); | 1636 __ bind(&push_receiver); |
1635 __ push(a0); | 1637 __ push(a0); |
1636 | 1638 |
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1834 __ bind(&dont_adapt_arguments); | 1836 __ bind(&dont_adapt_arguments); |
1835 __ Jump(a3); | 1837 __ Jump(a3); |
1836 } | 1838 } |
1837 | 1839 |
1838 | 1840 |
1839 #undef __ | 1841 #undef __ |
1840 | 1842 |
1841 } } // namespace v8::internal | 1843 } } // namespace v8::internal |
1842 | 1844 |
1843 #endif // V8_TARGET_ARCH_MIPS | 1845 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |