Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 11191029: Use VLDR instead of VMOVs from GPR when a 64-bit double can't be encoded as a VMOV immediate. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 712 matching lines...) Expand 10 before | Expand all | Expand 10 after
723 __ add(r0, sp, Operand(1 * kPointerSize)); 723 __ add(r0, sp, Operand(1 * kPointerSize));
724 // v8::Arguments::implicit_args_ 724 // v8::Arguments::implicit_args_
725 __ str(r2, MemOperand(r0, 0 * kPointerSize)); 725 __ str(r2, MemOperand(r0, 0 * kPointerSize));
726 // v8::Arguments::values_ 726 // v8::Arguments::values_
727 __ add(ip, r2, Operand(argc * kPointerSize)); 727 __ add(ip, r2, Operand(argc * kPointerSize));
728 __ str(ip, MemOperand(r0, 1 * kPointerSize)); 728 __ str(ip, MemOperand(r0, 1 * kPointerSize));
729 // v8::Arguments::length_ = argc 729 // v8::Arguments::length_ = argc
730 __ mov(ip, Operand(argc)); 730 __ mov(ip, Operand(argc));
731 __ str(ip, MemOperand(r0, 2 * kPointerSize)); 731 __ str(ip, MemOperand(r0, 2 * kPointerSize));
732 // v8::Arguments::is_construct_call = 0 732 // v8::Arguments::is_construct_call = 0
733 __ mov(ip, Operand(0)); 733 __ mov(ip, Operand::Zero());
734 __ str(ip, MemOperand(r0, 3 * kPointerSize)); 734 __ str(ip, MemOperand(r0, 3 * kPointerSize));
735 735
736 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; 736 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
737 Address function_address = v8::ToCData<Address>(api_call_info->callback()); 737 Address function_address = v8::ToCData<Address>(api_call_info->callback());
738 ApiFunction fun(function_address); 738 ApiFunction fun(function_address);
739 ExternalReference ref = ExternalReference(&fun, 739 ExternalReference ref = ExternalReference(&fun,
740 ExternalReference::DIRECT_API_CALL, 740 ExternalReference::DIRECT_API_CALL,
741 masm->isolate()); 741 masm->isolate());
742 AllowExternalCallThatCantCauseGC scope(masm); 742 AllowExternalCallThatCantCauseGC scope(masm);
743 743
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 __ vstr(s0, scratch1, 0); 996 __ vstr(s0, scratch1, 0);
997 } else { 997 } else {
998 Label not_special, done; 998 Label not_special, done;
999 // Move sign bit from source to destination. This works because the sign 999 // Move sign bit from source to destination. This works because the sign
1000 // bit in the exponent word of the double has the same position and polarity 1000 // bit in the exponent word of the double has the same position and polarity
1001 // as the 2's complement sign bit in a Smi. 1001 // as the 2's complement sign bit in a Smi.
1002 ASSERT(kBinary32SignMask == 0x80000000u); 1002 ASSERT(kBinary32SignMask == 0x80000000u);
1003 1003
1004 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); 1004 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
1005 // Negate value if it is negative. 1005 // Negate value if it is negative.
1006 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1006 __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
1007 1007
1008 // We have -1, 0 or 1, which we treat specially. Register ival contains 1008 // We have -1, 0 or 1, which we treat specially. Register ival contains
1009 // absolute value: it is either equal to 1 (special case of -1 and 1), 1009 // absolute value: it is either equal to 1 (special case of -1 and 1),
1010 // greater than 1 (not a special case) or less than 1 (special case of 0). 1010 // greater than 1 (not a special case) or less than 1 (special case of 0).
1011 __ cmp(ival, Operand(1)); 1011 __ cmp(ival, Operand(1));
1012 __ b(gt, &not_special); 1012 __ b(gt, &not_special);
1013 1013
1014 // For 1 or -1 we need to or in the 0 exponent (biased). 1014 // For 1 or -1 we need to or in the 0 exponent (biased).
1015 static const uint32_t exponent_word_for_1 = 1015 static const uint32_t exponent_word_for_1 =
1016 kBinary32ExponentBias << kBinary32ExponentShift; 1016 kBinary32ExponentBias << kBinary32ExponentShift;
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1065 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; 1065 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1066 1066
1067 const int mantissa_shift_for_lo_word = 1067 const int mantissa_shift_for_lo_word =
1068 kBitsPerInt - mantissa_shift_for_hi_word; 1068 kBitsPerInt - mantissa_shift_for_hi_word;
1069 1069
1070 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); 1070 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1071 if (mantissa_shift_for_hi_word > 0) { 1071 if (mantissa_shift_for_hi_word > 0) {
1072 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); 1072 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1073 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); 1073 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1074 } else { 1074 } else {
1075 __ mov(loword, Operand(0, RelocInfo::NONE)); 1075 __ mov(loword, Operand::Zero());
1076 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); 1076 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1077 } 1077 }
1078 1078
1079 // If least significant bit of biased exponent was not 1 it was corrupted 1079 // If least significant bit of biased exponent was not 1 it was corrupted
1080 // by most significant bit of mantissa so we should fix that. 1080 // by most significant bit of mantissa so we should fix that.
1081 if (!(biased_exponent & 1)) { 1081 if (!(biased_exponent & 1)) {
1082 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); 1082 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1083 } 1083 }
1084 } 1084 }
1085 1085
(...skipping 1142 matching lines...) Expand 10 before | Expand all | Expand 10 after
2228 // Move the result back to general purpose register r0. 2228 // Move the result back to general purpose register r0.
2229 __ vmov(r0, s0); 2229 __ vmov(r0, s0);
2230 // Check if the result fits into a smi. 2230 // Check if the result fits into a smi.
2231 __ add(r1, r0, Operand(0x40000000), SetCC); 2231 __ add(r1, r0, Operand(0x40000000), SetCC);
2232 __ b(&wont_fit_smi, mi); 2232 __ b(&wont_fit_smi, mi);
2233 // Tag the result. 2233 // Tag the result.
2234 STATIC_ASSERT(kSmiTag == 0); 2234 STATIC_ASSERT(kSmiTag == 0);
2235 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 2235 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2236 2236
2237 // Check for -0. 2237 // Check for -0.
2238 __ cmp(r0, Operand(0, RelocInfo::NONE)); 2238 __ cmp(r0, Operand::Zero());
2239 __ b(&restore_fpscr_and_return, ne); 2239 __ b(&restore_fpscr_and_return, ne);
2240 // r5 already holds the HeapNumber exponent. 2240 // r5 already holds the HeapNumber exponent.
2241 __ tst(r5, Operand(HeapNumber::kSignMask)); 2241 __ tst(r5, Operand(HeapNumber::kSignMask));
2242 // If our HeapNumber is negative it was -0, so load its address and return. 2242 // If our HeapNumber is negative it was -0, so load its address and return.
2243 // Else r0 is loaded with 0, so we can also just return. 2243 // Else r0 is loaded with 0, so we can also just return.
2244 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); 2244 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
2245 2245
2246 __ bind(&restore_fpscr_and_return); 2246 __ bind(&restore_fpscr_and_return);
2247 // Restore FPSCR and return. 2247 // Restore FPSCR and return.
2248 __ vmsr(r3); 2248 __ vmsr(r3);
(...skipping 2002 matching lines...) Expand 10 before | Expand all | Expand 10 after
4251 bool is_signed_type = IsElementTypeSigned(elements_kind); 4251 bool is_signed_type = IsElementTypeSigned(elements_kind);
4252 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; 4252 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
4253 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; 4253 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
4254 4254
4255 Label done, sign; 4255 Label done, sign;
4256 4256
4257 // Test for all special exponent values: zeros, subnormal numbers, NaNs 4257 // Test for all special exponent values: zeros, subnormal numbers, NaNs
4258 // and infinities. All these should be converted to 0. 4258 // and infinities. All these should be converted to 0.
4259 __ mov(r7, Operand(HeapNumber::kExponentMask)); 4259 __ mov(r7, Operand(HeapNumber::kExponentMask));
4260 __ and_(r9, r5, Operand(r7), SetCC); 4260 __ and_(r9, r5, Operand(r7), SetCC);
4261 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); 4261 __ mov(r5, Operand::Zero(), LeaveCC, eq);
4262 __ b(eq, &done); 4262 __ b(eq, &done);
4263 4263
4264 __ teq(r9, Operand(r7)); 4264 __ teq(r9, Operand(r7));
4265 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); 4265 __ mov(r5, Operand::Zero(), LeaveCC, eq);
4266 __ b(eq, &done); 4266 __ b(eq, &done);
4267 4267
4268 // Unbias exponent. 4268 // Unbias exponent.
4269 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); 4269 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
4270 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); 4270 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
4271 // If exponent is negative then result is 0. 4271 // If exponent is negative then result is 0.
4272 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); 4272 __ mov(r5, Operand::Zero(), LeaveCC, mi);
4273 __ b(mi, &done); 4273 __ b(mi, &done);
4274 4274
4275 // If exponent is too big then result is minimal value. 4275 // If exponent is too big then result is minimal value.
4276 __ cmp(r9, Operand(meaningfull_bits - 1)); 4276 __ cmp(r9, Operand(meaningfull_bits - 1));
4277 __ mov(r5, Operand(min_value), LeaveCC, ge); 4277 __ mov(r5, Operand(min_value), LeaveCC, ge);
4278 __ b(ge, &done); 4278 __ b(ge, &done);
4279 4279
4280 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); 4280 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
4281 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); 4281 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
4282 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); 4282 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4283 4283
4284 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); 4284 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
4285 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); 4285 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
4286 __ b(pl, &sign); 4286 __ b(pl, &sign);
4287 4287
4288 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); 4288 __ rsb(r9, r9, Operand::Zero());
4289 __ mov(r5, Operand(r5, LSL, r9)); 4289 __ mov(r5, Operand(r5, LSL, r9));
4290 __ rsb(r9, r9, Operand(meaningfull_bits)); 4290 __ rsb(r9, r9, Operand(meaningfull_bits));
4291 __ orr(r5, r5, Operand(r6, LSR, r9)); 4291 __ orr(r5, r5, Operand(r6, LSR, r9));
4292 4292
4293 __ bind(&sign); 4293 __ bind(&sign);
4294 __ teq(r7, Operand(0, RelocInfo::NONE)); 4294 __ teq(r7, Operand::Zero());
4295 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); 4295 __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
4296 4296
4297 __ bind(&done); 4297 __ bind(&done);
4298 switch (elements_kind) { 4298 switch (elements_kind) {
4299 case EXTERNAL_BYTE_ELEMENTS: 4299 case EXTERNAL_BYTE_ELEMENTS:
4300 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4300 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4301 __ strb(r5, MemOperand(r3, key, LSR, 1)); 4301 __ strb(r5, MemOperand(r3, key, LSR, 1));
4302 break; 4302 break;
4303 case EXTERNAL_SHORT_ELEMENTS: 4303 case EXTERNAL_SHORT_ELEMENTS:
4304 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 4304 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4305 __ strh(r5, MemOperand(r3, key, LSL, 0)); 4305 __ strh(r5, MemOperand(r3, key, LSL, 0));
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
4777 __ Jump(ic_slow, RelocInfo::CODE_TARGET); 4777 __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4778 } 4778 }
4779 } 4779 }
4780 4780
4781 4781
4782 #undef __ 4782 #undef __
4783 4783
4784 } } // namespace v8::internal 4784 } } // namespace v8::internal
4785 4785
4786 #endif // V8_TARGET_ARCH_ARM 4786 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698