OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 #include "serialize.h" | 42 #include "serialize.h" |
43 | 43 |
44 namespace v8 { | 44 namespace v8 { |
45 namespace internal { | 45 namespace internal { |
46 | 46 |
47 #ifdef DEBUG | 47 #ifdef DEBUG |
48 bool CpuFeatures::initialized_ = false; | 48 bool CpuFeatures::initialized_ = false; |
49 #endif | 49 #endif |
50 unsigned CpuFeatures::supported_ = 0; | 50 unsigned CpuFeatures::supported_ = 0; |
51 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; | 51 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; |
| 52 unsigned CpuFeatures::cache_line_size_ = 64; |
52 | 53 |
53 | 54 |
54 ExternalReference ExternalReference::cpu_features() { | 55 ExternalReference ExternalReference::cpu_features() { |
55 ASSERT(CpuFeatures::initialized_); | 56 ASSERT(CpuFeatures::initialized_); |
56 return ExternalReference(&CpuFeatures::supported_); | 57 return ExternalReference(&CpuFeatures::supported_); |
57 } | 58 } |
58 | 59 |
59 // Get the CPU features enabled by the build. For cross compilation the | 60 // Get the CPU features enabled by the build. For cross compilation the |
60 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS | 61 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS |
61 // can be defined to enable ARMv7 and VFPv3 instructions when building the | 62 // can be defined to enable ARMv7 and VFPv3 instructions when building the |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
117 } | 118 } |
118 | 119 |
119 #ifndef __arm__ | 120 #ifndef __arm__ |
120 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is | 121 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is |
121 // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. | 122 // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. |
122 if (FLAG_enable_vfp3) { | 123 if (FLAG_enable_vfp3) { |
123 supported_ |= | 124 supported_ |= |
124 static_cast<uint64_t>(1) << VFP3 | | 125 static_cast<uint64_t>(1) << VFP3 | |
125 static_cast<uint64_t>(1) << ARMv7; | 126 static_cast<uint64_t>(1) << ARMv7; |
126 } | 127 } |
| 128 if (FLAG_enable_neon) { |
| 129 supported_ |= 1u << NEON; |
| 130 } |
127 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled | 131 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled |
128 if (FLAG_enable_armv7) { | 132 if (FLAG_enable_armv7) { |
129 supported_ |= static_cast<uint64_t>(1) << ARMv7; | 133 supported_ |= static_cast<uint64_t>(1) << ARMv7; |
130 } | 134 } |
131 | 135 |
132 if (FLAG_enable_sudiv) { | 136 if (FLAG_enable_sudiv) { |
133 supported_ |= static_cast<uint64_t>(1) << SUDIV; | 137 supported_ |= static_cast<uint64_t>(1) << SUDIV; |
134 } | 138 } |
135 | 139 |
136 if (FLAG_enable_movw_movt) { | 140 if (FLAG_enable_movw_movt) { |
(...skipping 12 matching lines...) Expand all Loading... |
149 // Probe for additional features not already known to be available. | 153 // Probe for additional features not already known to be available. |
150 if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) { | 154 if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) { |
151 // This implementation also sets the VFP flags if runtime | 155 // This implementation also sets the VFP flags if runtime |
152 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI | 156 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI |
153 // 0406B, page A1-6. | 157 // 0406B, page A1-6. |
154 found_by_runtime_probing_only_ |= | 158 found_by_runtime_probing_only_ |= |
155 static_cast<uint64_t>(1) << VFP3 | | 159 static_cast<uint64_t>(1) << VFP3 | |
156 static_cast<uint64_t>(1) << ARMv7; | 160 static_cast<uint64_t>(1) << ARMv7; |
157 } | 161 } |
158 | 162 |
| 163 if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) { |
| 164 found_by_runtime_probing_only_ |= 1u << NEON; |
| 165 } |
| 166 |
159 if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) { | 167 if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) { |
160 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; | 168 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; |
161 } | 169 } |
162 | 170 |
163 if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) { | 171 if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) { |
164 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; | 172 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; |
165 } | 173 } |
166 | 174 |
167 if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses | 175 if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses |
168 && OS::ArmCpuHasFeature(ARMv7)) { | 176 && OS::ArmCpuHasFeature(ARMv7)) { |
169 found_by_runtime_probing_only_ |= | 177 found_by_runtime_probing_only_ |= |
170 static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; | 178 static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; |
171 } | 179 } |
172 | 180 |
173 if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && | 181 CpuImplementer implementer = OS::GetCpuImplementer(); |
| 182 if (implementer == QUALCOMM_IMPLEMENTER && |
174 FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) { | 183 FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) { |
175 found_by_runtime_probing_only_ |= | 184 found_by_runtime_probing_only_ |= |
176 static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; | 185 static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; |
177 } | 186 } |
178 | 187 |
| 188 CpuPart part = OS::GetCpuPart(implementer); |
| 189 if ((part == CORTEX_A9) || (part == CORTEX_A5)) { |
| 190 cache_line_size_ = 32; |
| 191 } |
| 192 |
179 if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs | 193 if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs |
180 && OS::ArmCpuHasFeature(VFP32DREGS)) { | 194 && OS::ArmCpuHasFeature(VFP32DREGS)) { |
181 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; | 195 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; |
182 } | 196 } |
183 | 197 |
184 supported_ |= found_by_runtime_probing_only_; | 198 supported_ |= found_by_runtime_probing_only_; |
185 #endif | 199 #endif |
186 | 200 |
187 // Assert that VFP3 implies ARMv7. | 201 // Assert that VFP3 implies ARMv7. |
188 ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); | 202 ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 | 253 |
240 #endif // __arm__ | 254 #endif // __arm__ |
241 | 255 |
242 printf("target%s %s%s%s %s\n", | 256 printf("target%s %s%s%s %s\n", |
243 arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi); | 257 arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi); |
244 } | 258 } |
245 | 259 |
246 | 260 |
247 void CpuFeatures::PrintFeatures() { | 261 void CpuFeatures::PrintFeatures() { |
248 printf( | 262 printf( |
249 "ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d " | 263 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d " |
250 "MOVW_MOVT_IMMEDIATE_LOADS=%d", | 264 "MOVW_MOVT_IMMEDIATE_LOADS=%d", |
251 CpuFeatures::IsSupported(ARMv7), | 265 CpuFeatures::IsSupported(ARMv7), |
252 CpuFeatures::IsSupported(VFP3), | 266 CpuFeatures::IsSupported(VFP3), |
253 CpuFeatures::IsSupported(VFP32DREGS), | 267 CpuFeatures::IsSupported(VFP32DREGS), |
| 268 CpuFeatures::IsSupported(NEON), |
254 CpuFeatures::IsSupported(SUDIV), | 269 CpuFeatures::IsSupported(SUDIV), |
255 CpuFeatures::IsSupported(UNALIGNED_ACCESSES), | 270 CpuFeatures::IsSupported(UNALIGNED_ACCESSES), |
256 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); | 271 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); |
257 #ifdef __arm__ | 272 #ifdef __arm__ |
258 bool eabi_hardfloat = OS::ArmUsingHardFloat(); | 273 bool eabi_hardfloat = OS::ArmUsingHardFloat(); |
259 #elif USE_EABI_HARDFLOAT | 274 #elif USE_EABI_HARDFLOAT |
260 bool eabi_hardfloat = true; | 275 bool eabi_hardfloat = true; |
261 #else | 276 #else |
262 bool eabi_hardfloat = false; | 277 bool eabi_hardfloat = false; |
263 #endif | 278 #endif |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 ShiftOp shift_op, int shift_imm, AddrMode am) { | 384 ShiftOp shift_op, int shift_imm, AddrMode am) { |
370 ASSERT(is_uint5(shift_imm)); | 385 ASSERT(is_uint5(shift_imm)); |
371 rn_ = rn; | 386 rn_ = rn; |
372 rm_ = rm; | 387 rm_ = rm; |
373 shift_op_ = shift_op; | 388 shift_op_ = shift_op; |
374 shift_imm_ = shift_imm & 31; | 389 shift_imm_ = shift_imm & 31; |
375 am_ = am; | 390 am_ = am; |
376 } | 391 } |
377 | 392 |
378 | 393 |
| 394 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) { |
| 395 ASSERT((am == Offset) || (am == PostIndex)); |
| 396 rn_ = rn; |
| 397 rm_ = (am == Offset) ? pc : sp; |
| 398 switch (align) { |
| 399 case 0: |
| 400 align_ = 0; |
| 401 break; |
| 402 case 64: |
| 403 align_ = 1; |
| 404 break; |
| 405 case 128: |
| 406 align_ = 2; |
| 407 break; |
| 408 case 256: |
| 409 align_ = 3; |
| 410 break; |
| 411 default: |
| 412 UNREACHABLE(); |
| 413 align_ = 0; |
| 414 break; |
| 415 } |
| 416 } |
| 417 |
| 418 |
| 419 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) { |
| 420 rn_ = rn; |
| 421 rm_ = rm; |
| 422 switch (align) { |
| 423 case 0: |
| 424 align_ = 0; |
| 425 break; |
| 426 case 64: |
| 427 align_ = 1; |
| 428 break; |
| 429 case 128: |
| 430 align_ = 2; |
| 431 break; |
| 432 case 256: |
| 433 align_ = 3; |
| 434 break; |
| 435 default: |
| 436 UNREACHABLE(); |
| 437 align_ = 0; |
| 438 break; |
| 439 } |
| 440 } |
| 441 |
| 442 |
| 443 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) { |
| 444 base_ = base; |
| 445 switch (registers_count) { |
| 446 case 1: |
| 447 type_ = nlt_1; |
| 448 break; |
| 449 case 2: |
| 450 type_ = nlt_2; |
| 451 break; |
| 452 case 3: |
| 453 type_ = nlt_3; |
| 454 break; |
| 455 case 4: |
| 456 type_ = nlt_4; |
| 457 break; |
| 458 default: |
| 459 UNREACHABLE(); |
| 460 type_ = nlt_1; |
| 461 break; |
| 462 } |
| 463 } |
| 464 |
| 465 |
379 // ----------------------------------------------------------------------------- | 466 // ----------------------------------------------------------------------------- |
380 // Specific instructions, constants, and masks. | 467 // Specific instructions, constants, and masks. |
381 | 468 |
382 // add(sp, sp, 4) instruction (aka Pop()) | 469 // add(sp, sp, 4) instruction (aka Pop()) |
383 const Instr kPopInstruction = | 470 const Instr kPopInstruction = |
384 al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 | | 471 al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 | |
385 kRegister_sp_Code * B12; | 472 kRegister_sp_Code * B12; |
386 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) | 473 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) |
387 // register r is not encoded. | 474 // register r is not encoded. |
388 const Instr kPushRegPattern = | 475 const Instr kPushRegPattern = |
(...skipping 1147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1536 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1623 ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1537 ASSERT(!dst.is(pc) && !src.is(pc)); | 1624 ASSERT(!dst.is(pc) && !src.is(pc)); |
1538 ASSERT((lsb >= 0) && (lsb <= 31)); | 1625 ASSERT((lsb >= 0) && (lsb <= 31)); |
1539 ASSERT((width >= 1) && (width <= (32 - lsb))); | 1626 ASSERT((width >= 1) && (width <= (32 - lsb))); |
1540 int msb = lsb + width - 1; | 1627 int msb = lsb + width - 1; |
1541 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | | 1628 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | |
1542 src.code()); | 1629 src.code()); |
1543 } | 1630 } |
1544 | 1631 |
1545 | 1632 |
| 1633 void Assembler::pkhbt(Register dst, |
| 1634 Register src1, |
| 1635 const Operand& src2, |
| 1636 Condition cond ) { |
| 1637 // Instruction details available in ARM DDI 0406C.b, A8.8.125. |
| 1638 // cond(31-28) | 01101000(27-20) | Rn(19-16) | |
| 1639 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) |
| 1640 ASSERT(!dst.is(pc)); |
| 1641 ASSERT(!src1.is(pc)); |
| 1642 ASSERT(!src2.rm().is(pc)); |
| 1643 ASSERT(!src2.rm().is(no_reg)); |
| 1644 ASSERT(src2.rs().is(no_reg)); |
| 1645 ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); |
| 1646 ASSERT(src2.shift_op() == LSL); |
| 1647 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | |
| 1648 src2.shift_imm_*B7 | B4 | src2.rm().code()); |
| 1649 } |
| 1650 |
| 1651 |
| 1652 void Assembler::pkhtb(Register dst, |
| 1653 Register src1, |
| 1654 const Operand& src2, |
| 1655 Condition cond) { |
| 1656 // Instruction details available in ARM DDI 0406C.b, A8.8.125. |
| 1657 // cond(31-28) | 01101000(27-20) | Rn(19-16) | |
| 1658 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) |
| 1659 ASSERT(!dst.is(pc)); |
| 1660 ASSERT(!src1.is(pc)); |
| 1661 ASSERT(!src2.rm().is(pc)); |
| 1662 ASSERT(!src2.rm().is(no_reg)); |
| 1663 ASSERT(src2.rs().is(no_reg)); |
| 1664 ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); |
| 1665 ASSERT(src2.shift_op() == ASR); |
| 1666 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; |
| 1667 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | |
| 1668 asr*B7 | B6 | B4 | src2.rm().code()); |
| 1669 } |
| 1670 |
| 1671 |
| 1672 void Assembler::uxtb(Register dst, |
| 1673 const Operand& src, |
| 1674 Condition cond) { |
| 1675 // Instruction details available in ARM DDI 0406C.b, A8.8.274. |
| 1676 // cond(31-28) | 01101110(27-20) | 1111(19-16) | |
| 1677 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
| 1678 ASSERT(!dst.is(pc)); |
| 1679 ASSERT(!src.rm().is(pc)); |
| 1680 ASSERT(!src.rm().is(no_reg)); |
| 1681 ASSERT(src.rs().is(no_reg)); |
| 1682 ASSERT((src.shift_imm_ == 0) || |
| 1683 (src.shift_imm_ == 8) || |
| 1684 (src.shift_imm_ == 16) || |
| 1685 (src.shift_imm_ == 24)); |
| 1686 ASSERT(src.shift_op() == ROR); |
| 1687 emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 | |
| 1688 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
| 1689 } |
| 1690 |
| 1691 |
| 1692 void Assembler::uxtab(Register dst, |
| 1693 Register src1, |
| 1694 const Operand& src2, |
| 1695 Condition cond) { |
| 1696 // Instruction details available in ARM DDI 0406C.b, A8.8.271. |
| 1697 // cond(31-28) | 01101110(27-20) | Rn(19-16) | |
| 1698 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
| 1699 ASSERT(!dst.is(pc)); |
| 1700 ASSERT(!src1.is(pc)); |
| 1701 ASSERT(!src2.rm().is(pc)); |
| 1702 ASSERT(!src2.rm().is(no_reg)); |
| 1703 ASSERT(src2.rs().is(no_reg)); |
| 1704 ASSERT((src2.shift_imm_ == 0) || |
| 1705 (src2.shift_imm_ == 8) || |
| 1706 (src2.shift_imm_ == 16) || |
| 1707 (src2.shift_imm_ == 24)); |
| 1708 ASSERT(src2.shift_op() == ROR); |
| 1709 emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 | |
| 1710 ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code()); |
| 1711 } |
| 1712 |
| 1713 |
| 1714 void Assembler::uxtb16(Register dst, |
| 1715 const Operand& src, |
| 1716 Condition cond) { |
| 1717 // Instruction details available in ARM DDI 0406C.b, A8.8.275. |
| 1718 // cond(31-28) | 01101100(27-20) | 1111(19-16) | |
| 1719 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
| 1720 ASSERT(!dst.is(pc)); |
| 1721 ASSERT(!src.rm().is(pc)); |
| 1722 ASSERT(!src.rm().is(no_reg)); |
| 1723 ASSERT(src.rs().is(no_reg)); |
| 1724 ASSERT((src.shift_imm_ == 0) || |
| 1725 (src.shift_imm_ == 8) || |
| 1726 (src.shift_imm_ == 16) || |
| 1727 (src.shift_imm_ == 24)); |
| 1728 ASSERT(src.shift_op() == ROR); |
| 1729 emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 | |
| 1730 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
| 1731 } |
| 1732 |
| 1733 |
1546 // Status register access instructions. | 1734 // Status register access instructions. |
1547 void Assembler::mrs(Register dst, SRegister s, Condition cond) { | 1735 void Assembler::mrs(Register dst, SRegister s, Condition cond) { |
1548 ASSERT(!dst.is(pc)); | 1736 ASSERT(!dst.is(pc)); |
1549 emit(cond | B24 | s | 15*B16 | dst.code()*B12); | 1737 emit(cond | B24 | s | 15*B16 | dst.code()*B12); |
1550 } | 1738 } |
1551 | 1739 |
1552 | 1740 |
1553 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, | 1741 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, |
1554 Condition cond) { | 1742 Condition cond) { |
1555 ASSERT(fields >= B16 && fields < B20); // at least one field set | 1743 ASSERT(fields >= B16 && fields < B20); // at least one field set |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1633 void Assembler::strd(Register src1, Register src2, | 1821 void Assembler::strd(Register src1, Register src2, |
1634 const MemOperand& dst, Condition cond) { | 1822 const MemOperand& dst, Condition cond) { |
1635 ASSERT(dst.rm().is(no_reg)); | 1823 ASSERT(dst.rm().is(no_reg)); |
1636 ASSERT(!src1.is(lr)); // r14. | 1824 ASSERT(!src1.is(lr)); // r14. |
1637 ASSERT_EQ(0, src1.code() % 2); | 1825 ASSERT_EQ(0, src1.code() % 2); |
1638 ASSERT_EQ(src1.code() + 1, src2.code()); | 1826 ASSERT_EQ(src1.code() + 1, src2.code()); |
1639 ASSERT(IsEnabled(ARMv7)); | 1827 ASSERT(IsEnabled(ARMv7)); |
1640 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); | 1828 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); |
1641 } | 1829 } |
1642 | 1830 |
| 1831 // Preload instructions. |
| 1832 void Assembler::pld(const MemOperand& address) { |
| 1833 ASSERT(address.rm().is(no_reg)); |
| 1834 ASSERT(address.am() == Offset); |
| 1835 int U = B23; |
| 1836 int offset = address.offset(); |
| 1837 if (offset < 0) { |
| 1838 offset = -offset; |
| 1839 U = 0; |
| 1840 } |
| 1841 ASSERT(offset < 4096); |
| 1842 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 | |
| 1843 0xf*B12 | offset); |
| 1844 } |
| 1845 |
| 1846 |
1643 // Load/Store multiple instructions. | 1847 // Load/Store multiple instructions. |
1644 void Assembler::ldm(BlockAddrMode am, | 1848 void Assembler::ldm(BlockAddrMode am, |
1645 Register base, | 1849 Register base, |
1646 RegList dst, | 1850 RegList dst, |
1647 Condition cond) { | 1851 Condition cond) { |
1648 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. | 1852 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. |
1649 ASSERT(base.is(sp) || (dst & sp.bit()) == 0); | 1853 ASSERT(base.is(sp) || (dst & sp.bit()) == 0); |
1650 | 1854 |
1651 addrmod4(cond | B27 | am | L, base, dst); | 1855 addrmod4(cond | B27 | am | L, base, dst); |
1652 | 1856 |
(...skipping 1041 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2694 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) | 2898 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) |
2695 int vd, d; | 2899 int vd, d; |
2696 dst.split_code(&vd, &d); | 2900 dst.split_code(&vd, &d); |
2697 int vm, m; | 2901 int vm, m; |
2698 src.split_code(&vm, &m); | 2902 src.split_code(&vm, &m); |
2699 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 | | 2903 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 | |
2700 m*B5 | vm); | 2904 m*B5 | vm); |
2701 } | 2905 } |
2702 | 2906 |
2703 | 2907 |
| 2908 // Support for NEON. |
| 2909 |
| 2910 void Assembler::vld1(NeonSize size, |
| 2911 const NeonListOperand& dst, |
| 2912 const NeonMemOperand& src) { |
| 2913 // Instruction details available in ARM DDI 0406C.b, A8.8.320. |
| 2914 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) | |
| 2915 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) |
| 2916 ASSERT(CpuFeatures::IsSupported(NEON)); |
| 2917 int vd, d; |
| 2918 dst.base().split_code(&vd, &d); |
| 2919 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | |
| 2920 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code()); |
| 2921 } |
| 2922 |
| 2923 |
| 2924 void Assembler::vst1(NeonSize size, |
| 2925 const NeonListOperand& src, |
| 2926 const NeonMemOperand& dst) { |
| 2927 // Instruction details available in ARM DDI 0406C.b, A8.8.404. |
| 2928 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | |
| 2929 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) |
| 2930 ASSERT(CpuFeatures::IsSupported(NEON)); |
| 2931 int vd, d; |
| 2932 src.base().split_code(&vd, &d); |
| 2933 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | |
| 2934 size*B6 | dst.align()*B4 | dst.rm().code()); |
| 2935 } |
| 2936 |
| 2937 |
| 2938 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) { |
| 2939 // Instruction details available in ARM DDI 0406C.b, A8.8.346. |
| 2940 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) | |
| 2941 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0) |
| 2942 ASSERT(CpuFeatures::IsSupported(NEON)); |
| 2943 int vd, d; |
| 2944 dst.split_code(&vd, &d); |
| 2945 int vm, m; |
| 2946 src.split_code(&vm, &m); |
| 2947 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 | |
| 2948 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm); |
| 2949 } |
| 2950 |
| 2951 |
2704 // Pseudo instructions. | 2952 // Pseudo instructions. |
2705 void Assembler::nop(int type) { | 2953 void Assembler::nop(int type) { |
2706 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes | 2954 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes |
2707 // some of the CPU's pipeline and has to issue. Older ARM chips simply used | 2955 // some of the CPU's pipeline and has to issue. Older ARM chips simply used |
2708 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. | 2956 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. |
2709 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode | 2957 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode |
2710 // a type. | 2958 // a type. |
2711 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 2959 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
2712 emit(al | 13*B21 | type*B12 | type); | 2960 emit(al | 13*B21 | type*B12 | type); |
2713 } | 2961 } |
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3102 | 3350 |
3103 // Since a constant pool was just emitted, move the check offset forward by | 3351 // Since a constant pool was just emitted, move the check offset forward by |
3104 // the standard interval. | 3352 // the standard interval. |
3105 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 3353 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
3106 } | 3354 } |
3107 | 3355 |
3108 | 3356 |
3109 } } // namespace v8::internal | 3357 } } // namespace v8::internal |
3110 | 3358 |
3111 #endif // V8_TARGET_ARCH_ARM | 3359 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |