| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 925 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 936 | 936 |
| 937 Label not_special, done; | 937 Label not_special, done; |
| 938 // Move sign bit from source to destination. This works because the sign | 938 // Move sign bit from source to destination. This works because the sign |
| 939 // bit in the exponent word of the double has the same position and polarity | 939 // bit in the exponent word of the double has the same position and polarity |
| 940 // as the 2's complement sign bit in a Smi. | 940 // as the 2's complement sign bit in a Smi. |
| 941 ASSERT(kBinary32SignMask == 0x80000000u); | 941 ASSERT(kBinary32SignMask == 0x80000000u); |
| 942 | 942 |
| 943 __ And(fval, ival, Operand(kBinary32SignMask)); | 943 __ And(fval, ival, Operand(kBinary32SignMask)); |
| 944 // Negate value if it is negative. | 944 // Negate value if it is negative. |
| 945 __ subu(scratch1, zero_reg, ival); | 945 __ subu(scratch1, zero_reg, ival); |
| 946 __ movn(ival, scratch1, fval); | 946 __ Movn(ival, scratch1, fval); |
| 947 | 947 |
| 948 // We have -1, 0 or 1, which we treat specially. Register ival contains | 948 // We have -1, 0 or 1, which we treat specially. Register ival contains |
| 949 // absolute value: it is either equal to 1 (special case of -1 and 1), | 949 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| 950 // greater than 1 (not a special case) or less than 1 (special case of 0). | 950 // greater than 1 (not a special case) or less than 1 (special case of 0). |
| 951 __ Branch(¬_special, gt, ival, Operand(1)); | 951 __ Branch(¬_special, gt, ival, Operand(1)); |
| 952 | 952 |
| 953 // For 1 or -1 we need to or in the 0 exponent (biased). | 953 // For 1 or -1 we need to or in the 0 exponent (biased). |
| 954 static const uint32_t exponent_word_for_1 = | 954 static const uint32_t exponent_word_for_1 = |
| 955 kBinary32ExponentBias << kBinary32ExponentShift; | 955 kBinary32ExponentBias << kBinary32ExponentShift; |
| 956 | 956 |
| 957 __ Xor(scratch1, ival, Operand(1)); | 957 __ Xor(scratch1, ival, Operand(1)); |
| 958 __ li(scratch2, exponent_word_for_1); | 958 __ li(scratch2, exponent_word_for_1); |
| 959 __ or_(scratch2, fval, scratch2); | 959 __ or_(scratch2, fval, scratch2); |
| 960 __ movz(fval, scratch2, scratch1); // Only if ival is equal to 1. | 960 __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1. |
| 961 __ Branch(&done); | 961 __ Branch(&done); |
| 962 | 962 |
| 963 __ bind(¬_special); | 963 __ bind(¬_special); |
| 964 // Count leading zeros. | 964 // Count leading zeros. |
| 965 // Gets the wrong answer for 0, but we already checked for that case above. | 965 // Gets the wrong answer for 0, but we already checked for that case above. |
| 966 Register zeros = scratch2; | 966 Register zeros = scratch2; |
| 967 __ clz(zeros, ival); | 967 __ Clz(zeros, ival); |
| 968 | 968 |
| 969 // Compute exponent and or it into the exponent register. | 969 // Compute exponent and or it into the exponent register. |
| 970 __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias); | 970 __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias); |
| 971 __ subu(scratch1, scratch1, zeros); | 971 __ subu(scratch1, scratch1, zeros); |
| 972 | 972 |
| 973 __ sll(scratch1, scratch1, kBinary32ExponentShift); | 973 __ sll(scratch1, scratch1, kBinary32ExponentShift); |
| 974 __ or_(fval, fval, scratch1); | 974 __ or_(fval, fval, scratch1); |
| 975 | 975 |
| 976 // Shift up the source chopping the top bit off. | 976 // Shift up the source chopping the top bit off. |
| 977 __ Addu(zeros, zeros, Operand(1)); | 977 __ Addu(zeros, zeros, Operand(1)); |
| (...skipping 2650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3628 | 3628 |
| 3629 // Extract exponent to t5. | 3629 // Extract exponent to t5. |
| 3630 __ srl(t5, value, kBinary32MantissaBits); | 3630 __ srl(t5, value, kBinary32MantissaBits); |
| 3631 __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | 3631 __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
| 3632 | 3632 |
| 3633 Label exponent_rebiased; | 3633 Label exponent_rebiased; |
| 3634 __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg)); | 3634 __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg)); |
| 3635 | 3635 |
| 3636 __ li(t0, 0x7ff); | 3636 __ li(t0, 0x7ff); |
| 3637 __ Xor(t1, t5, Operand(0xFF)); | 3637 __ Xor(t1, t5, Operand(0xFF)); |
| 3638 __ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff. | 3638 __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff. |
| 3639 __ Branch(&exponent_rebiased, eq, t0, Operand(0xff)); | 3639 __ Branch(&exponent_rebiased, eq, t0, Operand(0xff)); |
| 3640 | 3640 |
| 3641 // Rebias exponent. | 3641 // Rebias exponent. |
| 3642 __ Addu(t5, | 3642 __ Addu(t5, |
| 3643 t5, | 3643 t5, |
| 3644 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | 3644 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
| 3645 | 3645 |
| 3646 __ bind(&exponent_rebiased); | 3646 __ bind(&exponent_rebiased); |
| 3647 __ And(a2, value, Operand(kBinary32SignMask)); | 3647 __ And(a2, value, Operand(kBinary32SignMask)); |
| 3648 value = no_reg; | 3648 value = no_reg; |
| (...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3922 kBitsPerInt - kMantissaInHiWordShift; | 3922 kBitsPerInt - kMantissaInHiWordShift; |
| 3923 | 3923 |
| 3924 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 3924 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 3925 // and infinities. All these should be converted to 0. | 3925 // and infinities. All these should be converted to 0. |
| 3926 __ li(t5, HeapNumber::kExponentMask); | 3926 __ li(t5, HeapNumber::kExponentMask); |
| 3927 __ and_(t6, t3, t5); | 3927 __ and_(t6, t3, t5); |
| 3928 __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg)); | 3928 __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg)); |
| 3929 | 3929 |
| 3930 __ xor_(t1, t6, t5); | 3930 __ xor_(t1, t6, t5); |
| 3931 __ li(t2, kBinary32ExponentMask); | 3931 __ li(t2, kBinary32ExponentMask); |
| 3932 __ movz(t6, t2, t1); // Only if t6 is equal to t5. | 3932 __ Movz(t6, t2, t1); // Only if t6 is equal to t5. |
| 3933 __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5)); | 3933 __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5)); |
| 3934 | 3934 |
| 3935 // Rebias exponent. | 3935 // Rebias exponent. |
| 3936 __ srl(t6, t6, HeapNumber::kExponentShift); | 3936 __ srl(t6, t6, HeapNumber::kExponentShift); |
| 3937 __ Addu(t6, | 3937 __ Addu(t6, |
| 3938 t6, | 3938 t6, |
| 3939 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | 3939 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); |
| 3940 | 3940 |
| 3941 __ li(t1, Operand(kBinary32MaxExponent)); | 3941 __ li(t1, Operand(kBinary32MaxExponent)); |
| 3942 __ Slt(t1, t1, t6); | 3942 __ Slt(t1, t1, t6); |
| 3943 __ And(t2, t3, Operand(HeapNumber::kSignMask)); | 3943 __ And(t2, t3, Operand(HeapNumber::kSignMask)); |
| 3944 __ Or(t2, t2, Operand(kBinary32ExponentMask)); | 3944 __ Or(t2, t2, Operand(kBinary32ExponentMask)); |
| 3945 __ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent. | 3945 __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent. |
| 3946 __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent)); | 3946 __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent)); |
| 3947 | 3947 |
| 3948 __ Slt(t1, t6, Operand(kBinary32MinExponent)); | 3948 __ Slt(t1, t6, Operand(kBinary32MinExponent)); |
| 3949 __ And(t2, t3, Operand(HeapNumber::kSignMask)); | 3949 __ And(t2, t3, Operand(HeapNumber::kSignMask)); |
| 3950 __ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent. | 3950 __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent. |
| 3951 __ Branch(&done, lt, t6, Operand(kBinary32MinExponent)); | 3951 __ Branch(&done, lt, t6, Operand(kBinary32MinExponent)); |
| 3952 | 3952 |
| 3953 __ And(t7, t3, Operand(HeapNumber::kSignMask)); | 3953 __ And(t7, t3, Operand(HeapNumber::kSignMask)); |
| 3954 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); | 3954 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); |
| 3955 __ sll(t3, t3, kMantissaInHiWordShift); | 3955 __ sll(t3, t3, kMantissaInHiWordShift); |
| 3956 __ or_(t7, t7, t3); | 3956 __ or_(t7, t7, t3); |
| 3957 __ srl(t4, t4, kMantissaInLoWordShift); | 3957 __ srl(t4, t4, kMantissaInLoWordShift); |
| 3958 __ or_(t7, t7, t4); | 3958 __ or_(t7, t7, t4); |
| 3959 __ sll(t6, t6, kBinary32ExponentShift); | 3959 __ sll(t6, t6, kBinary32ExponentShift); |
| 3960 __ or_(t3, t7, t6); | 3960 __ or_(t3, t7, t6); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3990 bool is_signed_type = IsElementTypeSigned(elements_kind); | 3990 bool is_signed_type = IsElementTypeSigned(elements_kind); |
| 3991 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | 3991 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; |
| 3992 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | 3992 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; |
| 3993 | 3993 |
| 3994 Label done, sign; | 3994 Label done, sign; |
| 3995 | 3995 |
| 3996 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 3996 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 3997 // and infinities. All these should be converted to 0. | 3997 // and infinities. All these should be converted to 0. |
| 3998 __ li(t5, HeapNumber::kExponentMask); | 3998 __ li(t5, HeapNumber::kExponentMask); |
| 3999 __ and_(t6, t3, t5); | 3999 __ and_(t6, t3, t5); |
| 4000 __ movz(t3, zero_reg, t6); // Only if t6 is equal to zero. | 4000 __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero. |
| 4001 __ Branch(&done, eq, t6, Operand(zero_reg)); | 4001 __ Branch(&done, eq, t6, Operand(zero_reg)); |
| 4002 | 4002 |
| 4003 __ xor_(t2, t6, t5); | 4003 __ xor_(t2, t6, t5); |
| 4004 __ movz(t3, zero_reg, t2); // Only if t6 is equal to t5. | 4004 __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5. |
| 4005 __ Branch(&done, eq, t6, Operand(t5)); | 4005 __ Branch(&done, eq, t6, Operand(t5)); |
| 4006 | 4006 |
| 4007 // Unbias exponent. | 4007 // Unbias exponent. |
| 4008 __ srl(t6, t6, HeapNumber::kExponentShift); | 4008 __ srl(t6, t6, HeapNumber::kExponentShift); |
| 4009 __ Subu(t6, t6, Operand(HeapNumber::kExponentBias)); | 4009 __ Subu(t6, t6, Operand(HeapNumber::kExponentBias)); |
| 4010 // If exponent is negative then result is 0. | 4010 // If exponent is negative then result is 0. |
| 4011 __ slt(t2, t6, zero_reg); | 4011 __ slt(t2, t6, zero_reg); |
| 4012 __ movn(t3, zero_reg, t2); // Only if exponent is negative. | 4012 __ Movn(t3, zero_reg, t2); // Only if exponent is negative. |
| 4013 __ Branch(&done, lt, t6, Operand(zero_reg)); | 4013 __ Branch(&done, lt, t6, Operand(zero_reg)); |
| 4014 | 4014 |
| 4015 // If exponent is too big then result is minimal value. | 4015 // If exponent is too big then result is minimal value. |
| 4016 __ slti(t1, t6, meaningfull_bits - 1); | 4016 __ slti(t1, t6, meaningfull_bits - 1); |
| 4017 __ li(t2, min_value); | 4017 __ li(t2, min_value); |
| 4018 __ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1. | 4018 __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1. |
| 4019 __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1)); | 4019 __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1)); |
| 4020 | 4020 |
| 4021 __ And(t5, t3, Operand(HeapNumber::kSignMask)); | 4021 __ And(t5, t3, Operand(HeapNumber::kSignMask)); |
| 4022 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); | 4022 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); |
| 4023 __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | 4023 __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
| 4024 | 4024 |
| 4025 __ li(t9, HeapNumber::kMantissaBitsInTopWord); | 4025 __ li(t9, HeapNumber::kMantissaBitsInTopWord); |
| 4026 __ subu(t6, t9, t6); | 4026 __ subu(t6, t9, t6); |
| 4027 __ slt(t1, t6, zero_reg); | 4027 __ slt(t1, t6, zero_reg); |
| 4028 __ srlv(t2, t3, t6); | 4028 __ srlv(t2, t3, t6); |
| 4029 __ movz(t3, t2, t1); // Only if t6 is positive. | 4029 __ Movz(t3, t2, t1); // Only if t6 is positive. |
| 4030 __ Branch(&sign, ge, t6, Operand(zero_reg)); | 4030 __ Branch(&sign, ge, t6, Operand(zero_reg)); |
| 4031 | 4031 |
| 4032 __ subu(t6, zero_reg, t6); | 4032 __ subu(t6, zero_reg, t6); |
| 4033 __ sllv(t3, t3, t6); | 4033 __ sllv(t3, t3, t6); |
| 4034 __ li(t9, meaningfull_bits); | 4034 __ li(t9, meaningfull_bits); |
| 4035 __ subu(t6, t9, t6); | 4035 __ subu(t6, t9, t6); |
| 4036 __ srlv(t4, t4, t6); | 4036 __ srlv(t4, t4, t6); |
| 4037 __ or_(t3, t3, t4); | 4037 __ or_(t3, t3, t4); |
| 4038 | 4038 |
| 4039 __ bind(&sign); | 4039 __ bind(&sign); |
| 4040 __ subu(t2, t3, zero_reg); | 4040 __ subu(t2, t3, zero_reg); |
| 4041 __ movz(t3, t2, t5); // Only if t5 is zero. | 4041 __ Movz(t3, t2, t5); // Only if t5 is zero. |
| 4042 | 4042 |
| 4043 __ bind(&done); | 4043 __ bind(&done); |
| 4044 | 4044 |
| 4045 // Result is in t3. | 4045 // Result is in t3. |
| 4046 // This switch block should be exactly the same as above (FPU mode). | 4046 // This switch block should be exactly the same as above (FPU mode). |
| 4047 switch (elements_kind) { | 4047 switch (elements_kind) { |
| 4048 case EXTERNAL_BYTE_ELEMENTS: | 4048 case EXTERNAL_BYTE_ELEMENTS: |
| 4049 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 4049 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 4050 __ srl(t8, key, 1); | 4050 __ srl(t8, key, 1); |
| 4051 __ addu(t8, a3, t8); | 4051 __ addu(t8, a3, t8); |
| (...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4521 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4521 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
| 4522 } | 4522 } |
| 4523 } | 4523 } |
| 4524 | 4524 |
| 4525 | 4525 |
| 4526 #undef __ | 4526 #undef __ |
| 4527 | 4527 |
| 4528 } } // namespace v8::internal | 4528 } } // namespace v8::internal |
| 4529 | 4529 |
| 4530 #endif // V8_TARGET_ARCH_MIPS | 4530 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |