| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 690 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 701 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; | 701 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; |
| 702 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 702 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
| 703 return MemOperand(sp, doubles_size + register_offset); | 703 return MemOperand(sp, doubles_size + register_offset); |
| 704 } | 704 } |
| 705 | 705 |
| 706 | 706 |
| 707 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 707 void MacroAssembler::Ldrd(Register dst1, Register dst2, |
| 708 const MemOperand& src, Condition cond) { | 708 const MemOperand& src, Condition cond) { |
| 709 ASSERT(src.rm().is(no_reg)); | 709 ASSERT(src.rm().is(no_reg)); |
| 710 ASSERT(!dst1.is(lr)); // r14. | 710 ASSERT(!dst1.is(lr)); // r14. |
| 711 ASSERT_EQ(0, dst1.code() % 2); | |
| 712 ASSERT_EQ(dst1.code() + 1, dst2.code()); | |
| 713 | 711 |
| 714 // V8 does not use this addressing mode, so the fallback code | 712 // V8 does not use this addressing mode, so the fallback code |
| 715 // below doesn't support it yet. | 713 // below doesn't support it yet. |
| 716 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); | 714 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); |
| 717 | 715 |
| 718 // Generate two ldr instructions if ldrd is not available. | 716 // Generate two ldr instructions if ldrd is not available. |
| 719 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 717 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && |
| 718 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { |
| 720 CpuFeatureScope scope(this, ARMv7); | 719 CpuFeatureScope scope(this, ARMv7); |
| 721 ldrd(dst1, dst2, src, cond); | 720 ldrd(dst1, dst2, src, cond); |
| 722 } else { | 721 } else { |
| 723 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 722 if ((src.am() == Offset) || (src.am() == NegOffset)) { |
| 724 MemOperand src2(src); | 723 MemOperand src2(src); |
| 725 src2.set_offset(src2.offset() + 4); | 724 src2.set_offset(src2.offset() + 4); |
| 726 if (dst1.is(src.rn())) { | 725 if (dst1.is(src.rn())) { |
| 727 ldr(dst2, src2, cond); | 726 ldr(dst2, src2, cond); |
| 728 ldr(dst1, src, cond); | 727 ldr(dst1, src, cond); |
| 729 } else { | 728 } else { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 743 } | 742 } |
| 744 } | 743 } |
| 745 } | 744 } |
| 746 } | 745 } |
| 747 | 746 |
| 748 | 747 |
| 749 void MacroAssembler::Strd(Register src1, Register src2, | 748 void MacroAssembler::Strd(Register src1, Register src2, |
| 750 const MemOperand& dst, Condition cond) { | 749 const MemOperand& dst, Condition cond) { |
| 751 ASSERT(dst.rm().is(no_reg)); | 750 ASSERT(dst.rm().is(no_reg)); |
| 752 ASSERT(!src1.is(lr)); // r14. | 751 ASSERT(!src1.is(lr)); // r14. |
| 753 ASSERT_EQ(0, src1.code() % 2); | |
| 754 ASSERT_EQ(src1.code() + 1, src2.code()); | |
| 755 | 752 |
| 756 // V8 does not use this addressing mode, so the fallback code | 753 // V8 does not use this addressing mode, so the fallback code |
| 757 // below doesn't support it yet. | 754 // below doesn't support it yet. |
| 758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | 755 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); |
| 759 | 756 |
| 760 // Generate two str instructions if strd is not available. | 757 // Generate two str instructions if strd is not available. |
| 761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 758 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && |
| 759 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { |
| 762 CpuFeatureScope scope(this, ARMv7); | 760 CpuFeatureScope scope(this, ARMv7); |
| 763 strd(src1, src2, dst, cond); | 761 strd(src1, src2, dst, cond); |
| 764 } else { | 762 } else { |
| 765 MemOperand dst2(dst); | 763 MemOperand dst2(dst); |
| 766 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 764 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { |
| 767 dst2.set_offset(dst2.offset() + 4); | 765 dst2.set_offset(dst2.offset() + 4); |
| 768 str(src1, dst, cond); | 766 str(src1, dst, cond); |
| 769 str(src2, dst2, cond); | 767 str(src2, dst2, cond); |
| 770 } else { // PostIndex or NegPostIndex. | 768 } else { // PostIndex or NegPostIndex. |
| 771 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 769 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); |
| (...skipping 1720 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2492 sub(result, result, Operand(1), SetCC); | 2490 sub(result, result, Operand(1), SetCC); |
| 2493 // If result is still negative, go to done, result fetched. | 2491 // If result is still negative, go to done, result fetched. |
| 2494 // Else, we had an overflow and we fall through exception. | 2492 // Else, we had an overflow and we fall through exception. |
| 2495 b(mi, done); | 2493 b(mi, done); |
| 2496 bind(&exception); | 2494 bind(&exception); |
| 2497 } | 2495 } |
| 2498 | 2496 |
| 2499 | 2497 |
| 2500 void MacroAssembler::ECMAConvertNumberToInt32(Register source, | 2498 void MacroAssembler::ECMAConvertNumberToInt32(Register source, |
| 2501 Register result, | 2499 Register result, |
| 2500 Register input_low, |
| 2501 Register input_high, |
| 2502 Register scratch, | 2502 Register scratch, |
| 2503 Register input_high, | |
| 2504 Register input_low, | |
| 2505 DwVfpRegister double_scratch1, | 2503 DwVfpRegister double_scratch1, |
| 2506 DwVfpRegister double_scratch2) { | 2504 DwVfpRegister double_scratch2) { |
| 2507 if (CpuFeatures::IsSupported(VFP2)) { | 2505 if (CpuFeatures::IsSupported(VFP2)) { |
| 2508 CpuFeatureScope scope(this, VFP2); | 2506 CpuFeatureScope scope(this, VFP2); |
| 2509 vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); | 2507 vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); |
| 2510 ECMAToInt32VFP(result, double_scratch1, double_scratch2, | 2508 ECMAToInt32VFP(result, double_scratch1, double_scratch2, |
| 2511 scratch, input_high, input_low); | 2509 scratch, input_high, input_low); |
| 2512 } else { | 2510 } else { |
| 2513 Ldrd(input_low, input_high, | 2511 Ldrd(input_low, input_high, |
| 2514 FieldMemOperand(source, HeapNumber::kValueOffset)); | 2512 FieldMemOperand(source, HeapNumber::kValueOffset)); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2571 ASSERT(!result.is(input_high)); | 2569 ASSERT(!result.is(input_high)); |
| 2572 ASSERT(!result.is(input_low)); | 2570 ASSERT(!result.is(input_low)); |
| 2573 ASSERT(!scratch.is(input_high)); | 2571 ASSERT(!scratch.is(input_high)); |
| 2574 ASSERT(!scratch.is(input_low)); | 2572 ASSERT(!scratch.is(input_low)); |
| 2575 ASSERT(!input_high.is(input_low)); | 2573 ASSERT(!input_high.is(input_low)); |
| 2576 | 2574 |
| 2577 Label both, out_of_range, negate, done; | 2575 Label both, out_of_range, negate, done; |
| 2578 | 2576 |
| 2579 Ubfx(scratch, input_high, | 2577 Ubfx(scratch, input_high, |
| 2580 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2578 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 2581 // Load scratch with exponent - 1. This is faster than loading | 2579 // Load scratch with exponent. |
| 2582 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. | 2580 sub(scratch, scratch, Operand(HeapNumber::kExponentBias)); |
| 2583 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | |
| 2584 // If exponent is negative, 0 < input < 1, the result is 0. | 2581 // If exponent is negative, 0 < input < 1, the result is 0. |
| 2585 // If exponent is greater than or equal to 84, the 32 less significant | 2582 // If exponent is greater than or equal to 84, the 32 less significant |
| 2586 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | 2583 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), |
| 2587 // the result is 0. | 2584 // the result is 0. |
| 2588 // This test also catch Nan and infinities which also return 0. | 2585 // This test also catch Nan and infinities which also return 0. |
| 2589 // Compare exponent with 84 (compare exponent - 1 with 83). | 2586 cmp(scratch, Operand(84)); |
| 2590 cmp(scratch, Operand(83)); | |
| 2591 // We do an unsigned comparison so negative numbers are treated as big | 2587 // We do an unsigned comparison so negative numbers are treated as big |
| 2592 // positive number and the two tests above are done in one test. | 2588 // positive number and the two tests above are done in one test. |
| 2593 b(hs, &out_of_range); | 2589 b(hs, &out_of_range); |
| 2594 | 2590 |
| 2595 // Load scratch with 20 - exponent (load with 19 - (exponent - 1)). | 2591 // Load scratch with 20 - exponent. |
| 2596 rsb(scratch, scratch, Operand(19), SetCC); | 2592 rsb(scratch, scratch, Operand(20), SetCC); |
| 2597 b(mi, &both); | 2593 b(mi, &both); |
| 2598 | 2594 |
| 2595 // Test 0 and -0. |
| 2596 bic(result, input_high, Operand(HeapNumber::kSignMask)); |
| 2597 orr(result, result, Operand(input_low), SetCC); |
| 2598 b(eq, &done); |
| 2599 // 0 <= exponent <= 20, shift only input_high. | 2599 // 0 <= exponent <= 20, shift only input_high. |
| 2600 // Scratch contains: 20 - exponent. | 2600 // Scratch contains: 20 - exponent. |
| 2601 Ubfx(result, input_high, | 2601 Ubfx(result, input_high, |
| 2602 0, HeapNumber::kMantissaBitsInTopWord); | 2602 0, HeapNumber::kMantissaBitsInTopWord); |
| 2603 // Set the implicit 1 before the mantissa part in input_high. | 2603 // Set the implicit 1 before the mantissa part in input_high. |
| 2604 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | 2604 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
| 2605 mov(result, Operand(result, LSR, scratch)); | 2605 mov(result, Operand(result, LSR, scratch)); |
| 2606 b(&negate); | 2606 b(&negate); |
| 2607 | 2607 |
| 2608 bind(&both); | 2608 bind(&both); |
| (...skipping 1340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3949 void CodePatcher::EmitCondition(Condition cond) { | 3949 void CodePatcher::EmitCondition(Condition cond) { |
| 3950 Instr instr = Assembler::instr_at(masm_.pc_); | 3950 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3951 instr = (instr & ~kCondMask) | cond; | 3951 instr = (instr & ~kCondMask) | cond; |
| 3952 masm_.emit(instr); | 3952 masm_.emit(instr); |
| 3953 } | 3953 } |
| 3954 | 3954 |
| 3955 | 3955 |
| 3956 } } // namespace v8::internal | 3956 } } // namespace v8::internal |
| 3957 | 3957 |
| 3958 #endif // V8_TARGET_ARCH_ARM | 3958 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |