OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
(...skipping 1516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1527 | 1527 |
1528 | 1528 |
1529 intptr_t Assembler::FindImmediate(int32_t imm) { | 1529 intptr_t Assembler::FindImmediate(int32_t imm) { |
1530 return object_pool_wrapper_.FindImmediate(imm); | 1530 return object_pool_wrapper_.FindImmediate(imm); |
1531 } | 1531 } |
1532 | 1532 |
1533 | 1533 |
1534 // Uses a code sequence that can easily be decoded. | 1534 // Uses a code sequence that can easily be decoded. |
1535 void Assembler::LoadWordFromPoolOffset(Register rd, | 1535 void Assembler::LoadWordFromPoolOffset(Register rd, |
1536 int32_t offset, | 1536 int32_t offset, |
| 1537 Register pp, |
1537 Condition cond) { | 1538 Condition cond) { |
1538 ASSERT(constant_pool_allowed()); | 1539 ASSERT((pp != PP) || constant_pool_allowed()); |
1539 ASSERT(rd != PP); | 1540 ASSERT(rd != pp); |
1540 int32_t offset_mask = 0; | 1541 int32_t offset_mask = 0; |
1541 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { | 1542 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { |
1542 ldr(rd, Address(PP, offset), cond); | 1543 ldr(rd, Address(pp, offset), cond); |
1543 } else { | 1544 } else { |
1544 int32_t offset_hi = offset & ~offset_mask; // signed | 1545 int32_t offset_hi = offset & ~offset_mask; // signed |
1545 uint32_t offset_lo = offset & offset_mask; // unsigned | 1546 uint32_t offset_lo = offset & offset_mask; // unsigned |
1546 // Inline a simplified version of AddImmediate(rd, PP, offset_hi). | 1547 // Inline a simplified version of AddImmediate(rd, pp, offset_hi). |
1547 Operand o; | 1548 Operand o; |
1548 if (Operand::CanHold(offset_hi, &o)) { | 1549 if (Operand::CanHold(offset_hi, &o)) { |
1549 add(rd, PP, o, cond); | 1550 add(rd, pp, o, cond); |
1550 } else { | 1551 } else { |
1551 LoadImmediate(rd, offset_hi, cond); | 1552 LoadImmediate(rd, offset_hi, cond); |
1552 add(rd, PP, Operand(rd), cond); | 1553 add(rd, pp, Operand(rd), cond); |
1553 } | 1554 } |
1554 ldr(rd, Address(rd, offset_lo), cond); | 1555 ldr(rd, Address(rd, offset_lo), cond); |
1555 } | 1556 } |
1556 } | 1557 } |
1557 | 1558 |
| 1559 void Assembler::CheckCodePointer() { |
| 1560 #ifdef DEBUG |
| 1561 Label cid_ok, instructions_ok; |
| 1562 Push(R0); |
| 1563 Push(IP); |
| 1564 CompareClassId(CODE_REG, kCodeCid, R0); |
| 1565 b(&cid_ok, EQ); |
| 1566 bkpt(0); |
| 1567 Bind(&cid_ok); |
1558 | 1568 |
1559 void Assembler::LoadPoolPointer() { | 1569 const intptr_t offset = CodeSize() + Instr::kPCReadOffset + |
1560 const intptr_t object_pool_pc_dist = | 1570 Instructions::HeaderSize() - kHeapObjectTag; |
1561 Instructions::HeaderSize() - Instructions::object_pool_offset() + | 1571 mov(R0, Operand(PC)); |
1562 CodeSize() + Instr::kPCReadOffset; | 1572 AddImmediate(R0, R0, -offset); |
1563 LoadFromOffset(kWord, PP, PC, -object_pool_pc_dist); | 1573 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); |
1564 set_constant_pool_allowed(true); | 1574 cmp(R0, Operand(IP)); |
| 1575 b(&instructions_ok, EQ); |
| 1576 bkpt(1); |
| 1577 Bind(&instructions_ok); |
| 1578 Pop(IP); |
| 1579 Pop(R0); |
| 1580 #endif |
1565 } | 1581 } |
1566 | 1582 |
1567 | 1583 |
| 1584 void Assembler::RestoreCodePointer() { |
| 1585 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); |
| 1586 CheckCodePointer(); |
| 1587 } |
| 1588 |
| 1589 |
| 1590 void Assembler::LoadPoolPointer(Register reg) { |
| 1591 // Load new pool pointer. |
| 1592 CheckCodePointer(); |
| 1593 ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); |
| 1594 set_constant_pool_allowed(reg == PP); |
| 1595 } |
| 1596 |
| 1597 |
1568 void Assembler::LoadIsolate(Register rd) { | 1598 void Assembler::LoadIsolate(Register rd) { |
1569 ldr(rd, Address(THR, Thread::isolate_offset())); | 1599 ldr(rd, Address(THR, Thread::isolate_offset())); |
1570 } | 1600 } |
1571 | 1601 |
1572 | 1602 |
1573 void Assembler::LoadObjectHelper(Register rd, | 1603 void Assembler::LoadObjectHelper(Register rd, |
1574 const Object& object, | 1604 const Object& object, |
1575 Condition cond, | 1605 Condition cond, |
1576 bool is_unique) { | 1606 bool is_unique, |
| 1607 Register pp) { |
1577 // Load common VM constants from the thread. This works also in places where | 1608 // Load common VM constants from the thread. This works also in places where |
1578 // no constant pool is set up (e.g. intrinsic code). | 1609 // no constant pool is set up (e.g. intrinsic code). |
1579 if (Thread::CanLoadFromThread(object)) { | 1610 if (Thread::CanLoadFromThread(object)) { |
1580 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); | 1611 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); |
1581 return; | 1612 return; |
1582 } | 1613 } |
1583 // Smis and VM heap objects are never relocated; do not use object pool. | 1614 // Smis and VM heap objects are never relocated; do not use object pool. |
1584 if (object.IsSmi()) { | 1615 if (object.IsSmi()) { |
1585 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); | 1616 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); |
1586 } else if (object.InVMHeap() || !constant_pool_allowed()) { | 1617 } else if (object.InVMHeap() || !constant_pool_allowed()) { |
1587 ASSERT(FLAG_allow_absolute_addresses); | 1618 ASSERT(FLAG_allow_absolute_addresses); |
1588 // Make sure that class CallPattern is able to decode this load immediate. | 1619 // Make sure that class CallPattern is able to decode this load immediate. |
1589 const int32_t object_raw = reinterpret_cast<int32_t>(object.raw()); | 1620 const int32_t object_raw = reinterpret_cast<int32_t>(object.raw()); |
1590 LoadImmediate(rd, object_raw, cond); | 1621 LoadImmediate(rd, object_raw, cond); |
1591 } else { | 1622 } else { |
1592 // Make sure that class CallPattern is able to decode this load from the | 1623 // Make sure that class CallPattern is able to decode this load from the |
1593 // object pool. | 1624 // object pool. |
1594 const int32_t offset = ObjectPool::element_offset( | 1625 const int32_t offset = ObjectPool::element_offset( |
1595 is_unique ? object_pool_wrapper_.AddObject(object) | 1626 is_unique ? object_pool_wrapper_.AddObject(object) |
1596 : object_pool_wrapper_.FindObject(object)); | 1627 : object_pool_wrapper_.FindObject(object)); |
1597 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); | 1628 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
1598 } | 1629 } |
1599 } | 1630 } |
1600 | 1631 |
1601 | 1632 |
1602 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { | 1633 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { |
1603 LoadObjectHelper(rd, object, cond, false); | 1634 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); |
1604 } | 1635 } |
1605 | 1636 |
1606 | 1637 |
1607 void Assembler::LoadUniqueObject(Register rd, | 1638 void Assembler::LoadUniqueObject(Register rd, |
1608 const Object& object, | 1639 const Object& object, |
1609 Condition cond) { | 1640 Condition cond) { |
1610 LoadObjectHelper(rd, object, cond, true); | 1641 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP); |
1611 } | 1642 } |
1612 | 1643 |
1613 | 1644 |
1614 void Assembler::LoadExternalLabel(Register rd, | 1645 void Assembler::LoadExternalLabel(Register rd, |
1615 const ExternalLabel* label, | 1646 const ExternalLabel* label, |
1616 Patchability patchable, | 1647 Patchability patchable, |
1617 Condition cond) { | 1648 Condition cond) { |
1618 const int32_t offset = ObjectPool::element_offset( | 1649 const int32_t offset = ObjectPool::element_offset( |
1619 object_pool_wrapper_.FindExternalLabel(label, patchable)); | 1650 object_pool_wrapper_.FindExternalLabel(label, patchable)); |
1620 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); | 1651 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
1621 } | 1652 } |
1622 | 1653 |
1623 | 1654 |
| 1655 void Assembler::LoadFunctionFromCalleePool(Register dst, |
| 1656 const Function& function, |
| 1657 Register new_pp) { |
| 1658 const int32_t offset = |
| 1659 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); |
| 1660 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL); |
| 1661 } |
| 1662 |
| 1663 |
1624 void Assembler::LoadNativeEntry(Register rd, | 1664 void Assembler::LoadNativeEntry(Register rd, |
1625 const ExternalLabel* label, | 1665 const ExternalLabel* label, |
1626 Patchability patchable, | 1666 Patchability patchable, |
1627 Condition cond) { | 1667 Condition cond) { |
1628 const int32_t offset = ObjectPool::element_offset( | 1668 const int32_t offset = ObjectPool::element_offset( |
1629 object_pool_wrapper_.FindNativeEntry(label, patchable)); | 1669 object_pool_wrapper_.FindNativeEntry(label, patchable)); |
1630 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); | 1670 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
1631 } | 1671 } |
1632 | 1672 |
1633 | 1673 |
1634 void Assembler::PushObject(const Object& object) { | 1674 void Assembler::PushObject(const Object& object) { |
1635 LoadObject(IP, object); | 1675 LoadObject(IP, object); |
1636 Push(IP); | 1676 Push(IP); |
1637 } | 1677 } |
1638 | 1678 |
1639 | 1679 |
1640 void Assembler::CompareObject(Register rn, const Object& object) { | 1680 void Assembler::CompareObject(Register rn, const Object& object) { |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1817 bool can_value_be_smi) { | 1857 bool can_value_be_smi) { |
1818 ASSERT(object != value); | 1858 ASSERT(object != value); |
1819 VerifiedWrite(dest, value, kHeapObjectOrSmi); | 1859 VerifiedWrite(dest, value, kHeapObjectOrSmi); |
1820 Label done; | 1860 Label done; |
1821 if (can_value_be_smi) { | 1861 if (can_value_be_smi) { |
1822 StoreIntoObjectFilter(object, value, &done); | 1862 StoreIntoObjectFilter(object, value, &done); |
1823 } else { | 1863 } else { |
1824 StoreIntoObjectFilterNoSmi(object, value, &done); | 1864 StoreIntoObjectFilterNoSmi(object, value, &done); |
1825 } | 1865 } |
1826 // A store buffer update is required. | 1866 // A store buffer update is required. |
1827 RegList regs = (1 << LR); | 1867 RegList regs = (1 << CODE_REG) | (1 << LR); |
1828 if (value != R0) { | 1868 if (value != R0) { |
1829 regs |= (1 << R0); // Preserve R0. | 1869 regs |= (1 << R0); // Preserve R0. |
1830 } | 1870 } |
1831 PushList(regs); | 1871 PushList(regs); |
1832 if (object != R0) { | 1872 if (object != R0) { |
1833 mov(R0, Operand(object)); | 1873 mov(R0, Operand(object)); |
1834 } | 1874 } |
| 1875 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset())); |
1835 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); | 1876 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); |
1836 blx(LR); | 1877 blx(LR); |
1837 PopList(regs); | 1878 PopList(regs); |
1838 Bind(&done); | 1879 Bind(&done); |
1839 } | 1880 } |
1840 | 1881 |
1841 | 1882 |
1842 void Assembler::StoreIntoObjectOffset(Register object, | 1883 void Assembler::StoreIntoObjectOffset(Register object, |
1843 int32_t offset, | 1884 int32_t offset, |
1844 Register value, | 1885 Register value, |
(...skipping 838 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2683 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { | 2724 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { |
2684 ASSERT(qd != QTMP); | 2725 ASSERT(qd != QTMP); |
2685 ASSERT(qn != QTMP); | 2726 ASSERT(qn != QTMP); |
2686 ASSERT(qm != QTMP); | 2727 ASSERT(qm != QTMP); |
2687 | 2728 |
2688 Vreciprocalqs(qd, qm); | 2729 Vreciprocalqs(qd, qm); |
2689 vmulqs(qd, qn, qd); | 2730 vmulqs(qd, qn, qd); |
2690 } | 2731 } |
2691 | 2732 |
2692 | 2733 |
2693 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) { | 2734 void Assembler::Branch(const StubEntry& stub_entry, |
2694 // Address is never patched. | 2735 Patchability patchable, |
2695 LoadImmediate(IP, stub_entry.label().address(), cond); | 2736 Register pp, |
| 2737 Condition cond) { |
| 2738 const Code& target_code = Code::Handle(stub_entry.code()); |
| 2739 const int32_t offset = ObjectPool::element_offset( |
| 2740 object_pool_wrapper_.FindObject(target_code, patchable)); |
| 2741 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond); |
| 2742 ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond); |
2696 bx(IP, cond); | 2743 bx(IP, cond); |
2697 } | 2744 } |
2698 | 2745 |
2699 | 2746 |
2700 void Assembler::BranchPatchable(const StubEntry& stub_entry) { | 2747 void Assembler::BranchLink(const Code& target, Patchability patchable) { |
2701 // Use a fixed size code sequence, since a function prologue may be patched | 2748 // Make sure that class CallPattern is able to patch the label referred |
2702 // with this branch sequence. | 2749 // to by this code sequence. |
2703 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction | 2750 // For added code robustness, use 'blx lr' in a patchable sequence and |
2704 // cache flush upon patching. | 2751 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
2705 LoadPatchableImmediate(IP, stub_entry.label().address()); | 2752 const int32_t offset = ObjectPool::element_offset( |
2706 bx(IP); | 2753 object_pool_wrapper_.FindObject(target, patchable)); |
| 2754 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); |
| 2755 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); |
| 2756 blx(LR); // Use blx instruction so that the return branch prediction works. |
| 2757 } |
| 2758 |
| 2759 |
| 2760 void Assembler::BranchLink(const StubEntry& stub_entry, |
| 2761 Patchability patchable) { |
| 2762 const Code& code = Code::Handle(stub_entry.code()); |
| 2763 BranchLink(code, patchable); |
| 2764 } |
| 2765 |
| 2766 |
| 2767 void Assembler::BranchLinkPatchable(const Code& target) { |
| 2768 BranchLink(target, kPatchable); |
2707 } | 2769 } |
2708 | 2770 |
2709 | 2771 |
2710 void Assembler::BranchLink(const ExternalLabel* label) { | 2772 void Assembler::BranchLink(const ExternalLabel* label) { |
2711 LoadImmediate(LR, label->address()); // Target address is never patched. | 2773 LoadImmediate(LR, label->address()); // Target address is never patched. |
2712 blx(LR); // Use blx instruction so that the return branch prediction works. | 2774 blx(LR); // Use blx instruction so that the return branch prediction works. |
2713 } | 2775 } |
2714 | 2776 |
2715 | 2777 |
2716 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) { | 2778 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { |
2717 // Make sure that class CallPattern is able to patch the label referred | 2779 BranchLinkPatchable(Code::Handle(stub_entry.code())); |
2718 // to by this code sequence. | |
2719 // For added code robustness, use 'blx lr' in a patchable sequence and | |
2720 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). | |
2721 const int32_t offset = ObjectPool::element_offset( | |
2722 object_pool_wrapper_.FindExternalLabel(label, patchable)); | |
2723 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag, AL); | |
2724 blx(LR); // Use blx instruction so that the return branch prediction works. | |
2725 } | 2780 } |
2726 | 2781 |
2727 | 2782 |
2728 void Assembler::BranchLink(const StubEntry& stub_entry, | |
2729 Patchability patchable) { | |
2730 BranchLink(&stub_entry.label(), patchable); | |
2731 } | |
2732 | |
2733 | |
2734 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { | |
2735 BranchLink(&stub_entry.label(), kPatchable); | |
2736 } | |
2737 | |
2738 | |
2739 void Assembler::BranchLinkOffset(Register base, int32_t offset) { | 2783 void Assembler::BranchLinkOffset(Register base, int32_t offset) { |
2740 ASSERT(base != PC); | 2784 ASSERT(base != PC); |
2741 ASSERT(base != IP); | 2785 ASSERT(base != IP); |
2742 LoadFromOffset(kWord, IP, base, offset); | 2786 LoadFromOffset(kWord, IP, base, offset); |
2743 blx(IP); // Use blx instruction so that the return branch prediction works. | 2787 blx(IP); // Use blx instruction so that the return branch prediction works. |
2744 } | 2788 } |
2745 | 2789 |
2746 | 2790 |
2747 void Assembler::LoadPatchableImmediate( | 2791 void Assembler::LoadPatchableImmediate( |
2748 Register rd, int32_t value, Condition cond) { | 2792 Register rd, int32_t value, Condition cond) { |
(...skipping 17 matching lines...) Expand all Loading... |
2766 } | 2810 } |
2767 } | 2811 } |
2768 | 2812 |
2769 | 2813 |
2770 void Assembler::LoadDecodableImmediate( | 2814 void Assembler::LoadDecodableImmediate( |
2771 Register rd, int32_t value, Condition cond) { | 2815 Register rd, int32_t value, Condition cond) { |
2772 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2816 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2773 if ((version == ARMv5TE) || (version == ARMv6)) { | 2817 if ((version == ARMv5TE) || (version == ARMv6)) { |
2774 if (constant_pool_allowed()) { | 2818 if (constant_pool_allowed()) { |
2775 const int32_t offset = Array::element_offset(FindImmediate(value)); | 2819 const int32_t offset = Array::element_offset(FindImmediate(value)); |
2776 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); | 2820 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
2777 } else { | 2821 } else { |
2778 LoadPatchableImmediate(rd, value, cond); | 2822 LoadPatchableImmediate(rd, value, cond); |
2779 } | 2823 } |
2780 } else { | 2824 } else { |
2781 ASSERT(version == ARMv7); | 2825 ASSERT(version == ARMv7); |
2782 movw(rd, Utils::Low16Bits(value), cond); | 2826 movw(rd, Utils::Low16Bits(value), cond); |
2783 const uint16_t value_high = Utils::High16Bits(value); | 2827 const uint16_t value_high = Utils::High16Bits(value); |
2784 if (value_high != 0) { | 2828 if (value_high != 0) { |
2785 movt(rd, value_high, cond); | 2829 movt(rd, value_high, cond); |
2786 } | 2830 } |
(...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3301 } | 3345 } |
3302 | 3346 |
3303 | 3347 |
3304 void Assembler::CallRuntime(const RuntimeEntry& entry, | 3348 void Assembler::CallRuntime(const RuntimeEntry& entry, |
3305 intptr_t argument_count) { | 3349 intptr_t argument_count) { |
3306 entry.Call(this, argument_count); | 3350 entry.Call(this, argument_count); |
3307 } | 3351 } |
3308 | 3352 |
3309 | 3353 |
3310 void Assembler::EnterDartFrame(intptr_t frame_size) { | 3354 void Assembler::EnterDartFrame(intptr_t frame_size) { |
| 3355 CheckCodePointer(); |
3311 ASSERT(!constant_pool_allowed()); | 3356 ASSERT(!constant_pool_allowed()); |
3312 const intptr_t offset = CodeSize(); | |
3313 | 3357 |
3314 // Save PC in frame for fast identification of corresponding code. | 3358 // Registers are pushed in descending order: R9 | R10 | R11 | R14. |
3315 // Note that callee-saved registers can be added to the register list. | 3359 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0); |
3316 EnterFrame((1 << PP) | (1 << FP) | (1 << LR) | (1 << PC), 0); | |
3317 | |
3318 if (offset != 0) { | |
3319 // Adjust saved PC for any intrinsic code that could have been generated | |
3320 // before a frame is created. Use PP as temp register. | |
3321 ldr(PP, Address(FP, 2 * kWordSize)); | |
3322 AddImmediate(PP, PP, -offset); | |
3323 str(PP, Address(FP, 2 * kWordSize)); | |
3324 } | |
3325 | 3360 |
3326 // Setup pool pointer for this dart function. | 3361 // Setup pool pointer for this dart function. |
3327 LoadPoolPointer(); | 3362 LoadPoolPointer(); |
3328 | 3363 |
3329 // Reserve space for locals. | 3364 // Reserve space for locals. |
3330 AddImmediate(SP, -frame_size); | 3365 AddImmediate(SP, -frame_size); |
3331 } | 3366 } |
3332 | 3367 |
3333 | 3368 |
3334 // On entry to a function compiled for OSR, the caller's frame pointer, the | 3369 // On entry to a function compiled for OSR, the caller's frame pointer, the |
3335 // stack locals, and any copied parameters are already in place. The frame | 3370 // stack locals, and any copied parameters are already in place. The frame |
3336 // pointer is already set up. The PC marker is not correct for the | 3371 // pointer is already set up. The PC marker is not correct for the |
3337 // optimized function and there may be extra space for spill slots to | 3372 // optimized function and there may be extra space for spill slots to |
3338 // allocate. We must also set up the pool pointer for the function. | 3373 // allocate. We must also set up the pool pointer for the function. |
3339 void Assembler::EnterOsrFrame(intptr_t extra_size) { | 3374 void Assembler::EnterOsrFrame(intptr_t extra_size) { |
3340 ASSERT(!constant_pool_allowed()); | 3375 ASSERT(!constant_pool_allowed()); |
3341 // mov(IP, Operand(PC)) loads PC + Instr::kPCReadOffset (8). This may be | |
3342 // different from EntryPointToPcMarkerOffset(). | |
3343 const intptr_t offset = | |
3344 CodeSize() + Instr::kPCReadOffset - EntryPointToPcMarkerOffset(); | |
3345 | |
3346 Comment("EnterOsrFrame"); | 3376 Comment("EnterOsrFrame"); |
3347 mov(IP, Operand(PC)); | 3377 RestoreCodePointer(); |
3348 | |
3349 AddImmediate(IP, -offset); | |
3350 str(IP, Address(FP, kPcMarkerSlotFromFp * kWordSize)); | |
3351 | |
3352 // Setup pool pointer for this dart function. | |
3353 LoadPoolPointer(); | 3378 LoadPoolPointer(); |
3354 | 3379 |
3355 AddImmediate(SP, -extra_size); | 3380 AddImmediate(SP, -extra_size); |
3356 } | 3381 } |
3357 | 3382 |
3358 | 3383 |
3359 void Assembler::LeaveDartFrame() { | 3384 void Assembler::LeaveDartFrame(RestorePP restore_pp) { |
3360 set_constant_pool_allowed(false); | 3385 if (restore_pp == kRestoreCallerPP) { |
3361 LeaveFrame((1 << PP) | (1 << FP) | (1 << LR)); | 3386 ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize)); |
3362 // Adjust SP for PC pushed in EnterDartFrame. | 3387 set_constant_pool_allowed(false); |
3363 AddImmediate(SP, kWordSize); | 3388 } |
| 3389 Drop(2); // Drop saved PP, PC marker. |
| 3390 LeaveFrame((1 << FP) | (1 << LR)); |
3364 } | 3391 } |
3365 | 3392 |
3366 | 3393 |
3367 void Assembler::EnterStubFrame() { | 3394 void Assembler::EnterStubFrame() { |
3368 set_constant_pool_allowed(false); | 3395 EnterDartFrame(0); |
3369 // Push 0 as saved PC for stub frames. | |
3370 mov(IP, Operand(LR)); | |
3371 mov(LR, Operand(0)); | |
3372 RegList regs = (1 << PP) | (1 << FP) | (1 << IP) | (1 << LR); | |
3373 EnterFrame(regs, 0); | |
3374 // Setup pool pointer for this stub. | |
3375 LoadPoolPointer(); | |
3376 } | 3396 } |
3377 | 3397 |
3378 | 3398 |
3379 void Assembler::LeaveStubFrame() { | 3399 void Assembler::LeaveStubFrame() { |
3380 LeaveDartFrame(); | 3400 LeaveDartFrame(); |
3381 } | 3401 } |
3382 | 3402 |
3383 | 3403 |
3384 void Assembler::LoadAllocationStatsAddress(Register dest, | 3404 void Assembler::LoadAllocationStatsAddress(Register dest, |
3385 intptr_t cid, | 3405 intptr_t cid, |
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3666 | 3686 |
3667 | 3687 |
3668 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 3688 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
3669 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 3689 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
3670 return fpu_reg_names[reg]; | 3690 return fpu_reg_names[reg]; |
3671 } | 3691 } |
3672 | 3692 |
3673 } // namespace dart | 3693 } // namespace dart |
3674 | 3694 |
3675 #endif // defined TARGET_ARCH_ARM | 3695 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |