| Index: src/arm/assembler-thumb16.cc
|
| diff --git a/src/arm/assembler-thumb16.cc b/src/arm/assembler-thumb16.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..345db53f7a6717b75e9e68744d6dda8840ce569d
|
| --- /dev/null
|
| +++ b/src/arm/assembler-thumb16.cc
|
| @@ -0,0 +1,204 @@
|
| +// Copyright 2013 the V8 project authors. All rights reserved.
|
| +// Redistribution and use in source and binary forms, with or without
|
| +// modification, are permitted provided that the following conditions are
|
| +// met:
|
| +//
|
| +// * Redistributions of source code must retain the above copyright
|
| +// notice, this list of conditions and the following disclaimer.
|
| +// * Redistributions in binary form must reproduce the above
|
| +// copyright notice, this list of conditions and the following
|
| +// disclaimer in the documentation and/or other materials provided
|
| +// with the distribution.
|
| +// * Neither the name of Google Inc. nor the names of its
|
| +// contributors may be used to endorse or promote products derived
|
| +// from this software without specific prior written permission.
|
| +//
|
| +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| +
|
| +#include "v8.h"
|
| +
|
| +#if defined(V8_TARGET_ARCH_ARM)
|
| +
|
| +#include "arm/assembler-arm-inl.h"
|
| +#include "serialize.h"
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +
|
| +// A.6.2.1 -> mode1
|
| +Instr16 Assembler::thumb16_mode1(ThumbMode1Opcode16 op) {
|
| + return (op*B9);
|
| +}
|
| +
|
| +
|
| +// A.6.2.2 -> mode2
|
| +Instr16 Assembler::thumb16_mode2(ThumbMode2Opcode16 op) {
|
| + return (B14 | op*B6);
|
| +}
|
| +
|
| +
|
| +// A.6.2.3 -> mode3
|
| +Instr16 Assembler::thumb16_mode3(ThumbMode3Opcode16 op) {
|
| + return (B14 | B10 | op*B6);
|
| +}
|
| +
|
| +
|
| +// A.6.2.4 -> mode4, opA 0101 -> _1
|
| +Instr16 Assembler::thumb16_mode4_1(ThumbMode4_1Opcode16 opB) {
|
| + return (B14 | B12 | opB*B9);
|
| +}
|
| +
|
| +
|
| +// A.6.2.4 -> mode4, opA 0110 -> _2
|
| +Instr16 Assembler::thumb16_mode4_2(ThumbMode4_2Opcode16 opB) {
|
| + return (B14 | B13 | opB*B9);
|
| +}
|
| +
|
| +
|
| +// A.6.2.4 -> mode4, opA 0111 -> _3
|
| +Instr16 Assembler::thumb16_mode4_3(ThumbMode4_3Opcode16 opB) {
|
| + return (B14 | B13 | B12 | opB*B9);
|
| +}
|
| +
|
| +
|
| +// A.6.2.4 -> mode4, opA 1000 -> _4
|
| +Instr16 Assembler::thumb16_mode4_4(ThumbMode4_4Opcode16 opB) {
|
| + return (B15 | opB*B9);
|
| +}
|
| +
|
| +
|
| +// A.6.2.4 -> mode4, opA 1001 -> _5
|
| +Instr16 Assembler::thumb16_mode4_5(ThumbMode4_5Opcode16 opB) {
|
| + return (B15 | B12 | opB*B9);
|
| +}
|
| +
|
| +
|
| +// two low reg r0-r7
|
| +Instr16 Assembler::thumb16_2lowreg_encoding(Register rd,
|
| + const Operand& x) {
|
| + return (x.rm_.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +// two low reg r0-r7
|
| +Instr16 Assembler::thumb16_2lowreg_encoding(Register rd,
|
| + Register rs) {
|
| + return (rs.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +// imm3 with two low reg
|
| +Instr16 Assembler::thumb16_2lowreg_imm3_encoding(Register rd,
|
| + Register rn,
|
| + const Operand& x) {
|
| + ASSERT(!x.rm_.is_valid()); // is Immediate.
|
| + ASSERT(is_uint3(x.imm32_));
|
| + uint16_t imm3 = x.imm32_ & 7;
|
| + return (imm3*B6 | rn.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +// imm5 with two low reg
|
| +Instr16 Assembler::thumb16_2lowreg_imm5_encoding(Register rd,
|
| + Register rn,
|
| + uint32_t offset) {
|
| + ASSERT(is_uint5(offset));
|
| + uint16_t imm5 = offset & 0x1f;
|
| + return (imm5*B6 | rn.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +Instr16 Assembler::thumb16_2lowreg_imm5_encoding(Register rd,
|
| + Register rn,
|
| + const Operand& x) {
|
| + ASSERT(!x.rs_.is_valid());
|
| + ASSERT(is_uint5(x.shift_imm_));
|
| + uint16_t imm5 = x.shift_imm_ & 0x1f;
|
| + return (imm5*B6 | rn.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +// three low reg
|
| +Instr16 Assembler::thumb16_3lowreg_encoding(Register rd,
|
| + const MemOperand& x) {
|
| + ASSERT(x.rm_.is_valid()); // is Register.
|
| + return (x.rm_.code()*B6 | x.rn_.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +Instr16 Assembler::thumb16_3lowreg_encoding(Register rd,
|
| + Register rn,
|
| + const Operand& x) {
|
| + ASSERT(x.rm_.is_valid()); // is Register.
|
| + return (x.rm_.code()*B6 | rn.code()*B3 | rd.code());
|
| +}
|
| +
|
| +
|
| +// one any reg
|
| +Instr16 Assembler::thumb16_anyreg_encoding(const Operand& x) {
|
| + return (x.rm_.code()*B3);
|
| +}
|
| +
|
| +
|
| +// one any reg
|
| +Instr16 Assembler::thumb16_anyreg_encoding(const Register rm) {
|
| + return (rm.code()*B3);
|
| +}
|
| +
|
| +
|
| +// two any reg
|
| +Instr16 Assembler::thumb16_2anyreg_encoding(Register rd, const Operand& x) {
|
| + uint16_t d = rd.code() >> 3;
|
| + return (d*B7 | x.rm_.code()*B3 | (rd.code() & 7));
|
| +}
|
| +
|
| +
|
| +// low reg with imm8
|
| +Instr16 Assembler::thumb16_lowreg_imm8_encoding(Register rd, uint32_t offset) {
|
| + ASSERT(is_uint8(offset));
|
| + uint16_t imm8 = offset & 0xff;
|
| + return (rd.code()*B8 | imm8);
|
| +}
|
| +
|
| +
|
| +Instr16 Assembler::thumb16_lowreg_imm8_encoding(Register rd, const Operand& x) {
|
| + ASSERT(!x.rm_.is_valid()); // Immediate.
|
| + ASSERT(is_uint8(x.imm32_));
|
| + uint16_t imm8 = x.imm32_ & 0xff;
|
| + return (rd.code()*B8 | imm8);
|
| +}
|
| +
|
| +
|
| +bool Assembler::is_low_reg(Register reg) {
|
| + return is_uint3(reg.code());
|
| +}
|
| +
|
| +
|
| +bool Assembler::are_low_reg(Register reg1, Register reg2) {
|
| + return is_uint3(reg1.code()) && is_uint3(reg2.code());
|
| +}
|
| +
|
| +
|
| +Instr16 Assembler::thumb16_instr_at(Address addr) {
|
| + return Memory::int16_at(addr);
|
| +}
|
| +
|
| +
|
| +void Assembler::thumb16_instr_at_put(int pos, Instr16 instr) {
|
| + *reinterpret_cast<Instr16*>(buffer_ + pos) = instr;
|
| +}
|
| +
|
| +} } // namespace v8::internal
|
| +
|
| +#endif // V8_TARGET_ARCH_ARM
|
| +
|
|
|