OLD | NEW |
---|---|
1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// | 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
(...skipping 724 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
735 for (SizeT i = 0; i < CalleeSaves.size(); ++i) { | 735 for (SizeT i = 0; i < CalleeSaves.size(); ++i) { |
736 if (RegARM32::isI64RegisterPair(i)) { | 736 if (RegARM32::isI64RegisterPair(i)) { |
737 // We don't save register pairs explicitly. Instead, we rely on the code | 737 // We don't save register pairs explicitly. Instead, we rely on the code |
738 // fake-defing/fake-using each register in the pair. | 738 // fake-defing/fake-using each register in the pair. |
739 continue; | 739 continue; |
740 } | 740 } |
741 if (CalleeSaves[i] && RegsUsed[i]) { | 741 if (CalleeSaves[i] && RegsUsed[i]) { |
742 // TODO(jvoung): do separate vpush for each floating point register | 742 // TODO(jvoung): do separate vpush for each floating point register |
743 // segment and += 4, or 8 depending on type. | 743 // segment and += 4, or 8 depending on type. |
744 ++NumCallee; | 744 ++NumCallee; |
745 PreservedRegsSizeBytes += 4; | 745 Variable *PhysicalRegister = getPhysicalRegister(i); |
746 PreservedRegsSizeBytes += | |
747 typeWidthInBytesOnStack(PhysicalRegister->getType()); | |
746 GPRsToPreserve.push_back(getPhysicalRegister(i)); | 748 GPRsToPreserve.push_back(getPhysicalRegister(i)); |
747 } | 749 } |
748 } | 750 } |
749 Ctx->statsUpdateRegistersSaved(NumCallee); | 751 Ctx->statsUpdateRegistersSaved(NumCallee); |
750 if (!GPRsToPreserve.empty()) | 752 if (!GPRsToPreserve.empty()) |
751 _push(GPRsToPreserve); | 753 _push(GPRsToPreserve); |
752 | 754 |
753 // Generate "mov FP, SP" if needed. | 755 // Generate "mov FP, SP" if needed. |
754 if (UsesFramePointer) { | 756 if (UsesFramePointer) { |
755 Variable *FP = getPhysicalRegister(RegARM32::Reg_fp); | 757 Variable *FP = getPhysicalRegister(RegARM32::Reg_fp); |
(...skipping 865 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1621 // Dest->getType() is a non-i64 scalar. | 1623 // Dest->getType() is a non-i64 scalar. |
1622 Variable *Src0R = legalizeToReg(Src0); | 1624 Variable *Src0R = legalizeToReg(Src0); |
1623 Variable *T = makeReg(Dest->getType()); | 1625 Variable *T = makeReg(Dest->getType()); |
1624 // Handle div/rem separately. They require a non-legalized Src1 to inspect | 1626 // Handle div/rem separately. They require a non-legalized Src1 to inspect |
1625 // whether or not Src1 is a non-zero constant. Once legalized it is more | 1627 // whether or not Src1 is a non-zero constant. Once legalized it is more |
1626 // difficult to determine (constant may be moved to a register). | 1628 // difficult to determine (constant may be moved to a register). |
1627 switch (Inst->getOp()) { | 1629 switch (Inst->getOp()) { |
1628 default: | 1630 default: |
1629 break; | 1631 break; |
1630 case InstArithmetic::Udiv: { | 1632 case InstArithmetic::Udiv: { |
1631 constexpr bool IsRemainder = false; | 1633 constexpr bool NotRemainder = false; |
1632 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_uxt, &TargetARM32::_udiv, | 1634 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_uxt, &TargetARM32::_udiv, |
1633 H_udiv_i32, IsRemainder); | 1635 H_udiv_i32, NotRemainder); |
1634 return; | 1636 return; |
1635 } | 1637 } |
1636 case InstArithmetic::Sdiv: { | 1638 case InstArithmetic::Sdiv: { |
1637 constexpr bool IsRemainder = false; | 1639 constexpr bool NotRemainder = false; |
1638 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_sxt, &TargetARM32::_sdiv, | 1640 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_sxt, &TargetARM32::_sdiv, |
1639 H_sdiv_i32, IsRemainder); | 1641 H_sdiv_i32, NotRemainder); |
1640 return; | 1642 return; |
1641 } | 1643 } |
1642 case InstArithmetic::Urem: { | 1644 case InstArithmetic::Urem: { |
1643 constexpr bool IsRemainder = true; | 1645 constexpr bool IsRemainder = true; |
1644 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_uxt, &TargetARM32::_udiv, | 1646 lowerIDivRem(Dest, T, Src0R, Src1, &TargetARM32::_uxt, &TargetARM32::_udiv, |
1645 H_urem_i32, IsRemainder); | 1647 H_urem_i32, IsRemainder); |
1646 return; | 1648 return; |
1647 } | 1649 } |
1648 case InstArithmetic::Srem: { | 1650 case InstArithmetic::Srem: { |
1649 constexpr bool IsRemainder = true; | 1651 constexpr bool IsRemainder = true; |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1723 Variable *Src1R = legalizeToReg(Src1RF); | 1725 Variable *Src1R = legalizeToReg(Src1RF); |
1724 _mul(T, Src0R, Src1R); | 1726 _mul(T, Src0R, Src1R); |
1725 _mov(Dest, T); | 1727 _mov(Dest, T); |
1726 return; | 1728 return; |
1727 } | 1729 } |
1728 case InstArithmetic::Shl: | 1730 case InstArithmetic::Shl: |
1729 _lsl(T, Src0R, Src1RF); | 1731 _lsl(T, Src0R, Src1RF); |
1730 _mov(Dest, T); | 1732 _mov(Dest, T); |
1731 return; | 1733 return; |
1732 case InstArithmetic::Lshr: | 1734 case InstArithmetic::Lshr: |
1735 if (Dest->getType() != IceType_i32) { | |
1736 _uxt(Src0R, Src0R); | |
1737 } | |
1733 _lsr(T, Src0R, Src1RF); | 1738 _lsr(T, Src0R, Src1RF); |
1734 _mov(Dest, T); | 1739 _mov(Dest, T); |
1735 return; | 1740 return; |
1736 case InstArithmetic::Ashr: | 1741 case InstArithmetic::Ashr: |
1742 if (Dest->getType() != IceType_i32) { | |
1743 _sxt(Src0R, Src0R); | |
1744 } | |
1737 _asr(T, Src0R, Src1RF); | 1745 _asr(T, Src0R, Src1RF); |
1738 _mov(Dest, T); | 1746 _mov(Dest, T); |
1739 return; | 1747 return; |
1740 case InstArithmetic::Udiv: | 1748 case InstArithmetic::Udiv: |
1741 case InstArithmetic::Sdiv: | 1749 case InstArithmetic::Sdiv: |
1742 case InstArithmetic::Urem: | 1750 case InstArithmetic::Urem: |
1743 case InstArithmetic::Srem: | 1751 case InstArithmetic::Srem: |
1744 llvm_unreachable("Integer div/rem should have been handled earlier."); | 1752 llvm_unreachable("Integer div/rem should have been handled earlier."); |
1745 return; | 1753 return; |
1746 case InstArithmetic::Fadd: | 1754 case InstArithmetic::Fadd: |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1797 | 1805 |
1798 void TargetARM32::lowerBr(const InstBr *Inst) { | 1806 void TargetARM32::lowerBr(const InstBr *Inst) { |
1799 if (Inst->isUnconditional()) { | 1807 if (Inst->isUnconditional()) { |
1800 _br(Inst->getTargetUnconditional()); | 1808 _br(Inst->getTargetUnconditional()); |
1801 return; | 1809 return; |
1802 } | 1810 } |
1803 Operand *Cond = Inst->getCondition(); | 1811 Operand *Cond = Inst->getCondition(); |
1804 // TODO(jvoung): Handle folding opportunities. | 1812 // TODO(jvoung): Handle folding opportunities. |
1805 | 1813 |
1806 Variable *Src0R = legalizeToReg(Cond); | 1814 Variable *Src0R = legalizeToReg(Cond); |
1815 assert(Src0R->mustHaveReg()); | |
1816 if (Src0R->getType() != IceType_i32) | |
Jim Stichnoth
2015/10/15 23:34:21
Won't it always be the case that Src0R->getType()
John
2015/11/05 20:25:13
maybe, but better safe than sorry. I left an asser
| |
1817 _uxt(Src0R, Src0R); | |
1807 Constant *Zero = Ctx->getConstantZero(IceType_i32); | 1818 Constant *Zero = Ctx->getConstantZero(IceType_i32); |
1808 _cmp(Src0R, Zero); | 1819 _cmp(Src0R, Zero); |
1809 _br(Inst->getTargetTrue(), Inst->getTargetFalse(), CondARM32::NE); | 1820 _br(Inst->getTargetTrue(), Inst->getTargetFalse(), CondARM32::NE); |
1810 } | 1821 } |
1811 | 1822 |
1812 void TargetARM32::lowerCall(const InstCall *Instr) { | 1823 void TargetARM32::lowerCall(const InstCall *Instr) { |
1813 MaybeLeafFunc = false; | 1824 MaybeLeafFunc = false; |
1814 NeedsStackAlignment = true; | 1825 NeedsStackAlignment = true; |
1815 | 1826 |
1816 // Assign arguments to registers and stack. Also reserve stack. | 1827 // Assign arguments to registers and stack. Also reserve stack. |
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2291 case IceType_i64: { | 2302 case IceType_i64: { |
2292 // t0, t1 <- src0 | 2303 // t0, t1 <- src0 |
2293 // dest[31..0] = t0 | 2304 // dest[31..0] = t0 |
2294 // dest[63..32] = t1 | 2305 // dest[63..32] = t1 |
2295 assert(Src0->getType() == IceType_f64); | 2306 assert(Src0->getType() == IceType_f64); |
2296 auto *T = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); | 2307 auto *T = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); |
2297 T->initHiLo(Func); | 2308 T->initHiLo(Func); |
2298 configureBitcastTemporary(T); | 2309 configureBitcastTemporary(T); |
2299 Variable *Src0R = legalizeToReg(Src0); | 2310 Variable *Src0R = legalizeToReg(Src0); |
2300 _mov(T, Src0R); | 2311 _mov(T, Src0R); |
2312 Context.insert(InstFakeUse::create(Func, T->getHi())); | |
2313 Context.insert(InstFakeUse::create(Func, T->getLo())); | |
2301 lowerAssign(InstAssign::create(Func, Dest, T)); | 2314 lowerAssign(InstAssign::create(Func, Dest, T)); |
2302 break; | 2315 break; |
2303 } | 2316 } |
2304 case IceType_f64: { | 2317 case IceType_f64: { |
2305 // T0 <- lo(src) | 2318 // T0 <- lo(src) |
2306 // T1 <- hi(src) | 2319 // T1 <- hi(src) |
2307 // vmov T2, T0, T1 | 2320 // vmov T2, T0, T1 |
2308 // Dest <- T2 | 2321 // Dest <- T2 |
2309 assert(Src0->getType() == IceType_i64); | 2322 assert(Src0->getType() == IceType_i64); |
2310 Variable *T = makeReg(DestType); | 2323 Variable *T = makeReg(DestType); |
(...skipping 930 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3241 Type Ty = Load->getDest()->getType(); | 3254 Type Ty = Load->getDest()->getType(); |
3242 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty); | 3255 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty); |
3243 Variable *DestLoad = Load->getDest(); | 3256 Variable *DestLoad = Load->getDest(); |
3244 | 3257 |
3245 // TODO(jvoung): handled folding opportunities. Sign and zero extension can | 3258 // TODO(jvoung): handled folding opportunities. Sign and zero extension can |
3246 // be folded into a load. | 3259 // be folded into a load. |
3247 InstAssign *Assign = InstAssign::create(Func, DestLoad, Src0); | 3260 InstAssign *Assign = InstAssign::create(Func, DestLoad, Src0); |
3248 lowerAssign(Assign); | 3261 lowerAssign(Assign); |
3249 } | 3262 } |
3250 | 3263 |
3251 void TargetARM32::doAddressOptLoad() { | 3264 void TargetARM32::doAddressOptLoad() {} |
3252 UnimplementedError(Func->getContext()->getFlags()); | |
3253 } | |
3254 | 3265 |
3255 void TargetARM32::randomlyInsertNop(float Probability, | 3266 void TargetARM32::randomlyInsertNop(float Probability, |
3256 RandomNumberGenerator &RNG) { | 3267 RandomNumberGenerator &RNG) { |
3257 RandomNumberGeneratorWrapper RNGW(RNG); | 3268 RandomNumberGeneratorWrapper RNGW(RNG); |
3258 if (RNGW.getTrueWithProbability(Probability)) { | 3269 if (RNGW.getTrueWithProbability(Probability)) { |
3259 UnimplementedError(Func->getContext()->getFlags()); | 3270 UnimplementedError(Func->getContext()->getFlags()); |
3260 } | 3271 } |
3261 } | 3272 } |
3262 | 3273 |
3263 void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) { | 3274 void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) { |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3314 Variable *T = makeReg(DestTy); | 3325 Variable *T = makeReg(DestTy); |
3315 Context.insert(InstFakeDef::create(Func, T)); | 3326 Context.insert(InstFakeDef::create(Func, T)); |
3316 _mov(Dest, T); | 3327 _mov(Dest, T); |
3317 UnimplementedError(Func->getContext()->getFlags()); | 3328 UnimplementedError(Func->getContext()->getFlags()); |
3318 return; | 3329 return; |
3319 } | 3330 } |
3320 // TODO(jvoung): handle folding opportunities. | 3331 // TODO(jvoung): handle folding opportunities. |
3321 // cmp cond, #0; mov t, SrcF; mov_cond t, SrcT; mov dest, t | 3332 // cmp cond, #0; mov t, SrcF; mov_cond t, SrcT; mov dest, t |
3322 Variable *CmpOpnd0 = legalizeToReg(Condition); | 3333 Variable *CmpOpnd0 = legalizeToReg(Condition); |
3323 Operand *CmpOpnd1 = Ctx->getConstantZero(IceType_i32); | 3334 Operand *CmpOpnd1 = Ctx->getConstantZero(IceType_i32); |
3335 if (CmpOpnd0->getType() != IceType_i32) | |
Jim Stichnoth
2015/10/15 23:34:21
Same comment as above -- isn't the type always Ice
John
2015/11/05 20:25:13
Done.
| |
3336 _uxt(CmpOpnd0, CmpOpnd0); | |
3324 _cmp(CmpOpnd0, CmpOpnd1); | 3337 _cmp(CmpOpnd0, CmpOpnd1); |
3325 static constexpr CondARM32::Cond Cond = CondARM32::NE; | 3338 static constexpr CondARM32::Cond Cond = CondARM32::NE; |
3326 if (DestTy == IceType_i64) { | 3339 if (DestTy == IceType_i64) { |
3327 SrcT = legalizeUndef(SrcT); | 3340 SrcT = legalizeUndef(SrcT); |
3328 SrcF = legalizeUndef(SrcF); | 3341 SrcF = legalizeUndef(SrcF); |
3329 // Set the low portion. | 3342 // Set the low portion. |
3330 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 3343 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
3331 Operand *SrcFLo = legalize(loOperand(SrcF), Legal_Reg | Legal_Flex); | 3344 Operand *SrcFLo = legalize(loOperand(SrcF), Legal_Reg | Legal_Flex); |
3332 Variable *TLo = makeReg(SrcFLo->getType()); | 3345 Variable *TLo = makeReg(SrcFLo->getType()); |
3333 _mov(TLo, SrcFLo); | 3346 _mov(TLo, SrcFLo); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3377 Variable *ValueHi = legalizeToReg(hiOperand(Value)); | 3390 Variable *ValueHi = legalizeToReg(hiOperand(Value)); |
3378 Variable *ValueLo = legalizeToReg(loOperand(Value)); | 3391 Variable *ValueLo = legalizeToReg(loOperand(Value)); |
3379 _str(ValueHi, llvm::cast<OperandARM32Mem>(hiOperand(NewAddr))); | 3392 _str(ValueHi, llvm::cast<OperandARM32Mem>(hiOperand(NewAddr))); |
3380 _str(ValueLo, llvm::cast<OperandARM32Mem>(loOperand(NewAddr))); | 3393 _str(ValueLo, llvm::cast<OperandARM32Mem>(loOperand(NewAddr))); |
3381 } else { | 3394 } else { |
3382 Variable *ValueR = legalizeToReg(Value); | 3395 Variable *ValueR = legalizeToReg(Value); |
3383 _str(ValueR, NewAddr); | 3396 _str(ValueR, NewAddr); |
3384 } | 3397 } |
3385 } | 3398 } |
3386 | 3399 |
3387 void TargetARM32::doAddressOptStore() { | 3400 void TargetARM32::doAddressOptStore() {} |
3388 UnimplementedError(Func->getContext()->getFlags()); | |
3389 } | |
3390 | 3401 |
3391 void TargetARM32::lowerSwitch(const InstSwitch *Inst) { | 3402 void TargetARM32::lowerSwitch(const InstSwitch *Inst) { |
3392 // This implements the most naive possible lowering. | 3403 // This implements the most naive possible lowering. |
3393 // cmp a,val[0]; jeq label[0]; cmp a,val[1]; jeq label[1]; ... jmp default | 3404 // cmp a,val[0]; jeq label[0]; cmp a,val[1]; jeq label[1]; ... jmp default |
3394 Operand *Src0 = Inst->getComparison(); | 3405 Operand *Src0 = Inst->getComparison(); |
3395 SizeT NumCases = Inst->getNumCases(); | 3406 SizeT NumCases = Inst->getNumCases(); |
3396 if (Src0->getType() == IceType_i64) { | 3407 if (Src0->getType() == IceType_i64) { |
3397 Src0 = legalizeUndef(Src0); | 3408 Src0 = legalizeUndef(Src0); |
3398 Variable *Src0Lo = legalizeToReg(loOperand(Src0)); | 3409 Variable *Src0Lo = legalizeToReg(loOperand(Src0)); |
3399 Variable *Src0Hi = legalizeToReg(hiOperand(Src0)); | 3410 Variable *Src0Hi = legalizeToReg(hiOperand(Src0)); |
3400 for (SizeT I = 0; I < NumCases; ++I) { | 3411 for (SizeT I = 0; I < NumCases; ++I) { |
3401 Operand *ValueLo = Ctx->getConstantInt32(Inst->getValue(I)); | 3412 Operand *ValueLo = Ctx->getConstantInt32(Inst->getValue(I)); |
3402 Operand *ValueHi = Ctx->getConstantInt32(Inst->getValue(I) >> 32); | 3413 Operand *ValueHi = Ctx->getConstantInt32(Inst->getValue(I) >> 32); |
3403 ValueLo = legalize(ValueLo, Legal_Reg | Legal_Flex); | 3414 ValueLo = legalize(ValueLo, Legal_Reg | Legal_Flex); |
3404 ValueHi = legalize(ValueHi, Legal_Reg | Legal_Flex); | 3415 ValueHi = legalize(ValueHi, Legal_Reg | Legal_Flex); |
3405 _cmp(Src0Lo, ValueLo); | 3416 _cmp(Src0Lo, ValueLo); |
3406 _cmp(Src0Hi, ValueHi, CondARM32::EQ); | 3417 _cmp(Src0Hi, ValueHi, CondARM32::EQ); |
3407 _br(Inst->getLabel(I), CondARM32::EQ); | 3418 _br(Inst->getLabel(I), CondARM32::EQ); |
3408 } | 3419 } |
3409 _br(Inst->getLabelDefault()); | 3420 _br(Inst->getLabelDefault()); |
3410 return; | 3421 return; |
3411 } | 3422 } |
3412 | 3423 |
3413 // 32 bit integer | |
3414 Variable *Src0Var = legalizeToReg(Src0); | 3424 Variable *Src0Var = legalizeToReg(Src0); |
3425 // If Src0 is not an i32, we left shift it -- see the icmp lowering for the | |
3426 // reason. | |
3427 assert(Src0Var->mustHaveReg()); | |
3428 const size_t ShiftAmt = 32 - getScalarIntBitWidth(Src0->getType()); | |
3429 assert(ShiftAmt < 32); | |
3430 if (ShiftAmt > 0) { | |
3431 Operand *ShiftConst = Ctx->getConstantInt32(ShiftAmt); | |
3432 Variable *T = makeReg(IceType_i32); | |
3433 _lsl(T, Src0Var, ShiftConst); | |
3434 Src0Var = T; | |
3435 } | |
3436 | |
3415 for (SizeT I = 0; I < NumCases; ++I) { | 3437 for (SizeT I = 0; I < NumCases; ++I) { |
3416 Operand *Value = Ctx->getConstantInt32(Inst->getValue(I)); | 3438 Operand *Value = Ctx->getConstantInt32(Inst->getValue(I) << ShiftAmt); |
3417 Value = legalize(Value, Legal_Reg | Legal_Flex); | 3439 Value = legalize(Value, Legal_Reg | Legal_Flex); |
3418 _cmp(Src0Var, Value); | 3440 _cmp(Src0Var, Value); |
3419 _br(Inst->getLabel(I), CondARM32::EQ); | 3441 _br(Inst->getLabel(I), CondARM32::EQ); |
3420 } | 3442 } |
3421 _br(Inst->getLabelDefault()); | 3443 _br(Inst->getLabelDefault()); |
3422 } | 3444 } |
3423 | 3445 |
3424 void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) { | 3446 void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) { |
3425 _trap(); | 3447 _trap(); |
3426 } | 3448 } |
(...skipping 502 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3929 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; | 3951 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; |
3930 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { | 3952 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { |
3931 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; | 3953 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; |
3932 } | 3954 } |
3933 // Technically R9 is used for TLS with Sandboxing, and we reserve it. | 3955 // Technically R9 is used for TLS with Sandboxing, and we reserve it. |
3934 // However, for compatibility with current NaCl LLVM, don't claim that. | 3956 // However, for compatibility with current NaCl LLVM, don't claim that. |
3935 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; | 3957 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; |
3936 } | 3958 } |
3937 | 3959 |
3938 } // end of namespace Ice | 3960 } // end of namespace Ice |
OLD | NEW |