OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 758 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
769 } else { // PostIndex or NegPostIndex. | 769 } else { // PostIndex or NegPostIndex. |
770 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 770 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); |
771 dst2.set_offset(dst2.offset() - 4); | 771 dst2.set_offset(dst2.offset() - 4); |
772 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | 772 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); |
773 str(src2, dst2, cond); | 773 str(src2, dst2, cond); |
774 } | 774 } |
775 } | 775 } |
776 } | 776 } |
777 | 777 |
778 | 778 |
779 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear, | |
780 const Register scratch, | |
781 const Condition cond) { | |
782 vmrs(scratch, cond); | |
783 bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond); | |
784 vmsr(scratch, cond); | |
785 } | |
786 | |
787 | |
788 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | 779 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, |
789 const DwVfpRegister src2, | 780 const DwVfpRegister src2, |
790 const Condition cond) { | 781 const Condition cond) { |
791 // Compare and move FPSCR flags to the normal condition flags. | 782 // Compare and move FPSCR flags to the normal condition flags. |
792 VFPCompareAndLoadFlags(src1, src2, pc, cond); | 783 VFPCompareAndLoadFlags(src1, src2, pc, cond); |
793 } | 784 } |
794 | 785 |
795 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | 786 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, |
796 const double src2, | 787 const double src2, |
797 const Condition cond) { | 788 const Condition cond) { |
(...skipping 1887 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2685 ASSERT(!input_high.is(result)); | 2676 ASSERT(!input_high.is(result)); |
2686 ASSERT(!input_low.is(result)); | 2677 ASSERT(!input_low.is(result)); |
2687 ASSERT(!input_low.is(input_high)); | 2678 ASSERT(!input_low.is(input_high)); |
2688 ASSERT(!scratch.is(result) && | 2679 ASSERT(!scratch.is(result) && |
2689 !scratch.is(input_high) && | 2680 !scratch.is(input_high) && |
2690 !scratch.is(input_low)); | 2681 !scratch.is(input_low)); |
2691 ASSERT(!double_input.is(double_scratch)); | 2682 ASSERT(!double_input.is(double_scratch)); |
2692 | 2683 |
2693 Label done; | 2684 Label done; |
2694 | 2685 |
2695 // Test for values that can be exactly represented as a signed 32-bit integer. | 2686 // Test if the value can be exactly represented as a signed integer. |
2696 TryFastDoubleToInt32(result, double_input, double_scratch, &done); | 2687 vcvt_s32_f64(double_scratch.low(), double_input); |
| 2688 vmov(result, double_scratch.low()); |
| 2689 vcvt_f64_s32(double_scratch, double_scratch.low()); |
| 2690 // Note: this comparison is cheaper than reading the FPSCR exception bits. |
| 2691 VFPCompareAndSetFlags(double_input, double_scratch); |
| 2692 b(eq, &done); |
| 2693 |
| 2694 // Check the exception flags. If they are not set, we are done. |
| 2695 // If they are set, it could be because of the conversion above, or because |
| 2696 // they were set before this code. |
| 2697 vmrs(scratch); |
| 2698 tst(scratch, Operand(kVFPOverflowExceptionBit | |
| 2699 kVFPUnderflowExceptionBit | |
| 2700 kVFPInvalidOpExceptionBit)); |
| 2701 b(eq, &done); |
2697 | 2702 |
2698 // Clear cumulative exception flags. | 2703 // Clear cumulative exception flags. |
2699 ClearFPSCRBits(kVFPExceptionMask, scratch); | 2704 bic(scratch, scratch, Operand(kVFPExceptionMask)); |
| 2705 vmsr(scratch); |
2700 // Try a conversion to a signed integer. | 2706 // Try a conversion to a signed integer. |
2701 vcvt_s32_f64(double_scratch.low(), double_input); | 2707 vcvt_s32_f64(double_scratch.low(), double_input); |
2702 vmov(result, double_scratch.low()); | 2708 // Retrieve the FPSCR. |
2703 // Retrieve he FPSCR. | |
2704 vmrs(scratch); | 2709 vmrs(scratch); |
2705 // Check for overflow and NaNs. | 2710 // Check for overflow and NaNs. |
2706 tst(scratch, Operand(kVFPOverflowExceptionBit | | 2711 tst(scratch, Operand(kVFPOverflowExceptionBit | |
2707 kVFPUnderflowExceptionBit | | 2712 kVFPUnderflowExceptionBit | |
2708 kVFPInvalidOpExceptionBit)); | 2713 kVFPInvalidOpExceptionBit)); |
2709 // If we had no exceptions we are done. | 2714 // If we had no exceptions we are done. |
2710 b(eq, &done); | 2715 b(eq, &done); |
2711 | 2716 |
2712 // Load the double value and perform a manual truncation. | 2717 // Load the double value and perform a manual truncation. |
2713 vmov(input_low, input_high, double_input); | 2718 vmov(input_low, input_high, double_input); |
(...skipping 1271 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3985 void CodePatcher::EmitCondition(Condition cond) { | 3990 void CodePatcher::EmitCondition(Condition cond) { |
3986 Instr instr = Assembler::instr_at(masm_.pc_); | 3991 Instr instr = Assembler::instr_at(masm_.pc_); |
3987 instr = (instr & ~kCondMask) | cond; | 3992 instr = (instr & ~kCondMask) | cond; |
3988 masm_.emit(instr); | 3993 masm_.emit(instr); |
3989 } | 3994 } |
3990 | 3995 |
3991 | 3996 |
3992 } } // namespace v8::internal | 3997 } } // namespace v8::internal |
3993 | 3998 |
3994 #endif // V8_TARGET_ARCH_ARM | 3999 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |