OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 717 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
728 __ add(r0, sp, Operand(1 * kPointerSize)); | 728 __ add(r0, sp, Operand(1 * kPointerSize)); |
729 // v8::Arguments::implicit_args_ | 729 // v8::Arguments::implicit_args_ |
730 __ str(r2, MemOperand(r0, 0 * kPointerSize)); | 730 __ str(r2, MemOperand(r0, 0 * kPointerSize)); |
731 // v8::Arguments::values_ | 731 // v8::Arguments::values_ |
732 __ add(ip, r2, Operand(argc * kPointerSize)); | 732 __ add(ip, r2, Operand(argc * kPointerSize)); |
733 __ str(ip, MemOperand(r0, 1 * kPointerSize)); | 733 __ str(ip, MemOperand(r0, 1 * kPointerSize)); |
734 // v8::Arguments::length_ = argc | 734 // v8::Arguments::length_ = argc |
735 __ mov(ip, Operand(argc)); | 735 __ mov(ip, Operand(argc)); |
736 __ str(ip, MemOperand(r0, 2 * kPointerSize)); | 736 __ str(ip, MemOperand(r0, 2 * kPointerSize)); |
737 // v8::Arguments::is_construct_call = 0 | 737 // v8::Arguments::is_construct_call = 0 |
738 __ mov(ip, Operand(0)); | 738 __ mov(ip, Operand::Zero()); |
739 __ str(ip, MemOperand(r0, 3 * kPointerSize)); | 739 __ str(ip, MemOperand(r0, 3 * kPointerSize)); |
740 | 740 |
741 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; | 741 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; |
742 Address function_address = v8::ToCData<Address>(api_call_info->callback()); | 742 Address function_address = v8::ToCData<Address>(api_call_info->callback()); |
743 ApiFunction fun(function_address); | 743 ApiFunction fun(function_address); |
744 ExternalReference ref = ExternalReference(&fun, | 744 ExternalReference ref = ExternalReference(&fun, |
745 ExternalReference::DIRECT_API_CALL, | 745 ExternalReference::DIRECT_API_CALL, |
746 masm->isolate()); | 746 masm->isolate()); |
747 AllowExternalCallThatCantCauseGC scope(masm); | 747 AllowExternalCallThatCantCauseGC scope(masm); |
748 | 748 |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1001 __ vstr(s0, scratch1, 0); | 1001 __ vstr(s0, scratch1, 0); |
1002 } else { | 1002 } else { |
1003 Label not_special, done; | 1003 Label not_special, done; |
1004 // Move sign bit from source to destination. This works because the sign | 1004 // Move sign bit from source to destination. This works because the sign |
1005 // bit in the exponent word of the double has the same position and polarity | 1005 // bit in the exponent word of the double has the same position and polarity |
1006 // as the 2's complement sign bit in a Smi. | 1006 // as the 2's complement sign bit in a Smi. |
1007 ASSERT(kBinary32SignMask == 0x80000000u); | 1007 ASSERT(kBinary32SignMask == 0x80000000u); |
1008 | 1008 |
1009 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); | 1009 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); |
1010 // Negate value if it is negative. | 1010 // Negate value if it is negative. |
1011 __ rsb(ival, ival, Operand(0, RelocInfo::NONE32), LeaveCC, ne); | 1011 __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne); |
1012 | 1012 |
1013 // We have -1, 0 or 1, which we treat specially. Register ival contains | 1013 // We have -1, 0 or 1, which we treat specially. Register ival contains |
1014 // absolute value: it is either equal to 1 (special case of -1 and 1), | 1014 // absolute value: it is either equal to 1 (special case of -1 and 1), |
1015 // greater than 1 (not a special case) or less than 1 (special case of 0). | 1015 // greater than 1 (not a special case) or less than 1 (special case of 0). |
1016 __ cmp(ival, Operand(1)); | 1016 __ cmp(ival, Operand(1)); |
1017 __ b(gt, ¬_special); | 1017 __ b(gt, ¬_special); |
1018 | 1018 |
1019 // For 1 or -1 we need to or in the 0 exponent (biased). | 1019 // For 1 or -1 we need to or in the 0 exponent (biased). |
1020 static const uint32_t exponent_word_for_1 = | 1020 static const uint32_t exponent_word_for_1 = |
1021 kBinary32ExponentBias << kBinary32ExponentShift; | 1021 kBinary32ExponentBias << kBinary32ExponentShift; |
(...skipping 1212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2234 // Move the result back to general purpose register r0. | 2234 // Move the result back to general purpose register r0. |
2235 __ vmov(r0, s0); | 2235 __ vmov(r0, s0); |
2236 // Check if the result fits into a smi. | 2236 // Check if the result fits into a smi. |
2237 __ add(r1, r0, Operand(0x40000000), SetCC); | 2237 __ add(r1, r0, Operand(0x40000000), SetCC); |
2238 __ b(&wont_fit_smi, mi); | 2238 __ b(&wont_fit_smi, mi); |
2239 // Tag the result. | 2239 // Tag the result. |
2240 STATIC_ASSERT(kSmiTag == 0); | 2240 STATIC_ASSERT(kSmiTag == 0); |
2241 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | 2241 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); |
2242 | 2242 |
2243 // Check for -0. | 2243 // Check for -0. |
2244 __ cmp(r0, Operand(0, RelocInfo::NONE32)); | 2244 __ cmp(r0, Operand::Zero()); |
2245 __ b(&restore_fpscr_and_return, ne); | 2245 __ b(&restore_fpscr_and_return, ne); |
2246 // r5 already holds the HeapNumber exponent. | 2246 // r5 already holds the HeapNumber exponent. |
2247 __ tst(r5, Operand(HeapNumber::kSignMask)); | 2247 __ tst(r5, Operand(HeapNumber::kSignMask)); |
2248 // If our HeapNumber is negative it was -0, so load its address and return. | 2248 // If our HeapNumber is negative it was -0, so load its address and return. |
2249 // Else r0 is loaded with 0, so we can also just return. | 2249 // Else r0 is loaded with 0, so we can also just return. |
2250 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); | 2250 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); |
2251 | 2251 |
2252 __ bind(&restore_fpscr_and_return); | 2252 __ bind(&restore_fpscr_and_return); |
2253 // Restore FPSCR and return. | 2253 // Restore FPSCR and return. |
2254 __ vmsr(r3); | 2254 __ vmsr(r3); |
(...skipping 1682 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3937 bool is_signed_type = IsElementTypeSigned(elements_kind); | 3937 bool is_signed_type = IsElementTypeSigned(elements_kind); |
3938 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | 3938 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; |
3939 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | 3939 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; |
3940 | 3940 |
3941 Label done, sign; | 3941 Label done, sign; |
3942 | 3942 |
3943 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 3943 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
3944 // and infinities. All these should be converted to 0. | 3944 // and infinities. All these should be converted to 0. |
3945 __ mov(r7, Operand(HeapNumber::kExponentMask)); | 3945 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
3946 __ and_(r9, r5, Operand(r7), SetCC); | 3946 __ and_(r9, r5, Operand(r7), SetCC); |
3947 __ mov(r5, Operand(0, RelocInfo::NONE32), LeaveCC, eq); | 3947 __ mov(r5, Operand::Zero(), LeaveCC, eq); |
3948 __ b(eq, &done); | 3948 __ b(eq, &done); |
3949 | 3949 |
3950 __ teq(r9, Operand(r7)); | 3950 __ teq(r9, Operand(r7)); |
3951 __ mov(r5, Operand(0, RelocInfo::NONE32), LeaveCC, eq); | 3951 __ mov(r5, Operand::Zero(), LeaveCC, eq); |
3952 __ b(eq, &done); | 3952 __ b(eq, &done); |
3953 | 3953 |
3954 // Unbias exponent. | 3954 // Unbias exponent. |
3955 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | 3955 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
3956 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); | 3956 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); |
3957 // If exponent is negative then result is 0. | 3957 // If exponent is negative then result is 0. |
3958 __ mov(r5, Operand(0, RelocInfo::NONE32), LeaveCC, mi); | 3958 __ mov(r5, Operand::Zero(), LeaveCC, mi); |
3959 __ b(mi, &done); | 3959 __ b(mi, &done); |
3960 | 3960 |
3961 // If exponent is too big then result is minimal value. | 3961 // If exponent is too big then result is minimal value. |
3962 __ cmp(r9, Operand(meaningfull_bits - 1)); | 3962 __ cmp(r9, Operand(meaningfull_bits - 1)); |
3963 __ mov(r5, Operand(min_value), LeaveCC, ge); | 3963 __ mov(r5, Operand(min_value), LeaveCC, ge); |
3964 __ b(ge, &done); | 3964 __ b(ge, &done); |
3965 | 3965 |
3966 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); | 3966 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); |
3967 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | 3967 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
3968 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | 3968 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
3969 | 3969 |
3970 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | 3970 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
3971 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); | 3971 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); |
3972 __ b(pl, &sign); | 3972 __ b(pl, &sign); |
3973 | 3973 |
3974 __ rsb(r9, r9, Operand(0, RelocInfo::NONE32)); | 3974 __ rsb(r9, r9, Operand::Zero()); |
3975 __ mov(r5, Operand(r5, LSL, r9)); | 3975 __ mov(r5, Operand(r5, LSL, r9)); |
3976 __ rsb(r9, r9, Operand(meaningfull_bits)); | 3976 __ rsb(r9, r9, Operand(meaningfull_bits)); |
3977 __ orr(r5, r5, Operand(r6, LSR, r9)); | 3977 __ orr(r5, r5, Operand(r6, LSR, r9)); |
3978 | 3978 |
3979 __ bind(&sign); | 3979 __ bind(&sign); |
3980 __ teq(r7, Operand(0, RelocInfo::NONE32)); | 3980 __ teq(r7, Operand::Zero()); |
3981 __ rsb(r5, r5, Operand(0, RelocInfo::NONE32), LeaveCC, ne); | 3981 __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne); |
3982 | 3982 |
3983 __ bind(&done); | 3983 __ bind(&done); |
3984 switch (elements_kind) { | 3984 switch (elements_kind) { |
3985 case EXTERNAL_BYTE_ELEMENTS: | 3985 case EXTERNAL_BYTE_ELEMENTS: |
3986 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3986 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
3987 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 3987 __ strb(r5, MemOperand(r3, key, LSR, 1)); |
3988 break; | 3988 break; |
3989 case EXTERNAL_SHORT_ELEMENTS: | 3989 case EXTERNAL_SHORT_ELEMENTS: |
3990 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 3990 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
3991 __ strh(r5, MemOperand(r3, key, LSL, 0)); | 3991 __ strh(r5, MemOperand(r3, key, LSL, 0)); |
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4372 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4372 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
4373 } | 4373 } |
4374 } | 4374 } |
4375 | 4375 |
4376 | 4376 |
4377 #undef __ | 4377 #undef __ |
4378 | 4378 |
4379 } } // namespace v8::internal | 4379 } } // namespace v8::internal |
4380 | 4380 |
4381 #endif // V8_TARGET_ARCH_ARM | 4381 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |