Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(366)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 11191029: Use VLDR instead of VMOVs from GPR when a 64-bit double can't be encoded as a VMOV immediate. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 533 matching lines...) Expand 10 before | Expand all | Expand 10 after
544 544
545 Label not_special; 545 Label not_special;
546 // Convert from Smi to integer. 546 // Convert from Smi to integer.
547 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); 547 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
548 // Move sign bit from source to destination. This works because the sign bit 548 // Move sign bit from source to destination. This works because the sign bit
549 // in the exponent word of the double has the same position and polarity as 549 // in the exponent word of the double has the same position and polarity as
550 // the 2's complement sign bit in a Smi. 550 // the 2's complement sign bit in a Smi.
551 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 551 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
552 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); 552 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
553 // Subtract from 0 if source was negative. 553 // Subtract from 0 if source was negative.
554 __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 554 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
555 555
556 // We have -1, 0 or 1, which we treat specially. Register source_ contains 556 // We have -1, 0 or 1, which we treat specially. Register source_ contains
557 // absolute value: it is either equal to 1 (special case of -1 and 1), 557 // absolute value: it is either equal to 1 (special case of -1 and 1),
558 // greater than 1 (not a special case) or less than 1 (special case of 0). 558 // greater than 1 (not a special case) or less than 1 (special case of 0).
559 __ cmp(source_, Operand(1)); 559 __ cmp(source_, Operand(1));
560 __ b(gt, &not_special); 560 __ b(gt, &not_special);
561 561
562 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 562 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
563 const uint32_t exponent_word_for_1 = 563 const uint32_t exponent_word_for_1 =
564 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 564 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
565 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); 565 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
566 // 1, 0 and -1 all have 0 for the second word. 566 // 1, 0 and -1 all have 0 for the second word.
567 __ mov(mantissa, Operand(0, RelocInfo::NONE)); 567 __ mov(mantissa, Operand::Zero());
568 __ Ret(); 568 __ Ret();
569 569
570 __ bind(&not_special); 570 __ bind(&not_special);
571 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. 571 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
572 // Gets the wrong answer for 0, but we already checked for that case above. 572 // Gets the wrong answer for 0, but we already checked for that case above.
573 __ CountLeadingZeros(zeros_, source_, mantissa); 573 __ CountLeadingZeros(zeros_, source_, mantissa);
574 // Compute exponent and or it into the exponent register. 574 // Compute exponent and or it into the exponent register.
575 // We use mantissa as a scratch register here. Use a fudge factor to 575 // We use mantissa as a scratch register here. Use a fudge factor to
576 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts 576 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
577 // that fit in the ARM's constant field. 577 // that fit in the ARM's constant field.
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after
1107 __ cmp(the_int_, Operand(0x80000000u)); 1107 __ cmp(the_int_, Operand(0x80000000u));
1108 __ b(eq, &max_negative_int); 1108 __ b(eq, &max_negative_int);
1109 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 1109 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1110 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 1110 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1111 uint32_t non_smi_exponent = 1111 uint32_t non_smi_exponent =
1112 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 1112 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1113 __ mov(scratch_, Operand(non_smi_exponent)); 1113 __ mov(scratch_, Operand(non_smi_exponent));
1114 // Set the sign bit in scratch_ if the value was negative. 1114 // Set the sign bit in scratch_ if the value was negative.
1115 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); 1115 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1116 // Subtract from 0 if the value was negative. 1116 // Subtract from 0 if the value was negative.
1117 __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); 1117 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
1118 // We should be masking the implict first digit of the mantissa away here, 1118 // We should be masking the implict first digit of the mantissa away here,
1119 // but it just ends up combining harmlessly with the last digit of the 1119 // but it just ends up combining harmlessly with the last digit of the
1120 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get 1120 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1121 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. 1121 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1122 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); 1122 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1123 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 1123 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1124 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); 1124 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1125 __ str(scratch_, FieldMemOperand(the_heap_number_, 1125 __ str(scratch_, FieldMemOperand(the_heap_number_,
1126 HeapNumber::kExponentOffset)); 1126 HeapNumber::kExponentOffset));
1127 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); 1127 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1128 __ str(scratch_, FieldMemOperand(the_heap_number_, 1128 __ str(scratch_, FieldMemOperand(the_heap_number_,
1129 HeapNumber::kMantissaOffset)); 1129 HeapNumber::kMantissaOffset));
1130 __ Ret(); 1130 __ Ret();
1131 1131
1132 __ bind(&max_negative_int); 1132 __ bind(&max_negative_int);
1133 // The max negative int32 is stored as a positive number in the mantissa of 1133 // The max negative int32 is stored as a positive number in the mantissa of
1134 // a double because it uses a sign bit instead of using two's complement. 1134 // a double because it uses a sign bit instead of using two's complement.
1135 // The actual mantissa bits stored are all 0 because the implicit most 1135 // The actual mantissa bits stored are all 0 because the implicit most
1136 // significant 1 bit is not stored. 1136 // significant 1 bit is not stored.
1137 non_smi_exponent += 1 << HeapNumber::kExponentShift; 1137 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1138 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); 1138 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1139 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); 1139 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1140 __ mov(ip, Operand(0, RelocInfo::NONE)); 1140 __ mov(ip, Operand::Zero());
1141 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 1141 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1142 __ Ret(); 1142 __ Ret();
1143 } 1143 }
1144 1144
1145 1145
1146 // Handle the case where the lhs and rhs are the same object. 1146 // Handle the case where the lhs and rhs are the same object.
1147 // Equality is almost reflexive (everything but NaN), so this is a test 1147 // Equality is almost reflexive (everything but NaN), so this is a test
1148 // for "identity and not NaN". 1148 // for "identity and not NaN".
1149 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 1149 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1150 Label* slow, 1150 Label* slow,
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
1353 lhs_exponent, 1353 lhs_exponent,
1354 HeapNumber::kExponentShift, 1354 HeapNumber::kExponentShift,
1355 HeapNumber::kExponentBits); 1355 HeapNumber::kExponentBits);
1356 // NaNs have all-one exponents so they sign extend to -1. 1356 // NaNs have all-one exponents so they sign extend to -1.
1357 __ cmp(r4, Operand(-1)); 1357 __ cmp(r4, Operand(-1));
1358 __ b(ne, lhs_not_nan); 1358 __ b(ne, lhs_not_nan);
1359 __ mov(r4, 1359 __ mov(r4,
1360 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1360 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1361 SetCC); 1361 SetCC);
1362 __ b(ne, &one_is_nan); 1362 __ b(ne, &one_is_nan);
1363 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); 1363 __ cmp(lhs_mantissa, Operand::Zero());
1364 __ b(ne, &one_is_nan); 1364 __ b(ne, &one_is_nan);
1365 1365
1366 __ bind(lhs_not_nan); 1366 __ bind(lhs_not_nan);
1367 __ Sbfx(r4, 1367 __ Sbfx(r4,
1368 rhs_exponent, 1368 rhs_exponent,
1369 HeapNumber::kExponentShift, 1369 HeapNumber::kExponentShift,
1370 HeapNumber::kExponentBits); 1370 HeapNumber::kExponentBits);
1371 // NaNs have all-one exponents so they sign extend to -1. 1371 // NaNs have all-one exponents so they sign extend to -1.
1372 __ cmp(r4, Operand(-1)); 1372 __ cmp(r4, Operand(-1));
1373 __ b(ne, &neither_is_nan); 1373 __ b(ne, &neither_is_nan);
1374 __ mov(r4, 1374 __ mov(r4,
1375 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1375 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1376 SetCC); 1376 SetCC);
1377 __ b(ne, &one_is_nan); 1377 __ b(ne, &one_is_nan);
1378 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); 1378 __ cmp(rhs_mantissa, Operand::Zero());
1379 __ b(eq, &neither_is_nan); 1379 __ b(eq, &neither_is_nan);
1380 1380
1381 __ bind(&one_is_nan); 1381 __ bind(&one_is_nan);
1382 // NaN comparisons always fail. 1382 // NaN comparisons always fail.
1383 // Load whatever we need in r0 to make the comparison fail. 1383 // Load whatever we need in r0 to make the comparison fail.
1384 if (cond == lt || cond == le) { 1384 if (cond == lt || cond == le) {
1385 __ mov(r0, Operand(GREATER)); 1385 __ mov(r0, Operand(GREATER));
1386 } else { 1386 } else {
1387 __ mov(r0, Operand(LESS)); 1387 __ mov(r0, Operand(LESS));
1388 } 1388 }
(...skipping 485 matching lines...) Expand 10 before | Expand all | Expand 10 after
1874 __ JumpIfSmi(tos_, &patch); 1874 __ JumpIfSmi(tos_, &patch);
1875 } 1875 }
1876 1876
1877 if (types_.NeedsMap()) { 1877 if (types_.NeedsMap()) {
1878 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1878 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1879 1879
1880 if (types_.CanBeUndetectable()) { 1880 if (types_.CanBeUndetectable()) {
1881 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 1881 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1882 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 1882 __ tst(ip, Operand(1 << Map::kIsUndetectable));
1883 // Undetectable -> false. 1883 // Undetectable -> false.
1884 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1884 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1885 __ Ret(ne); 1885 __ Ret(ne);
1886 } 1886 }
1887 } 1887 }
1888 1888
1889 if (types_.Contains(SPEC_OBJECT)) { 1889 if (types_.Contains(SPEC_OBJECT)) {
1890 // Spec object -> true. 1890 // Spec object -> true.
1891 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 1891 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1892 // tos_ contains the correct non-zero return value already. 1892 // tos_ contains the correct non-zero return value already.
1893 __ Ret(ge); 1893 __ Ret(ge);
1894 } 1894 }
(...skipping 12 matching lines...) Expand all
1907 __ b(ne, &not_heap_number); 1907 __ b(ne, &not_heap_number);
1908 1908
1909 if (CpuFeatures::IsSupported(VFP2)) { 1909 if (CpuFeatures::IsSupported(VFP2)) {
1910 CpuFeatures::Scope scope(VFP2); 1910 CpuFeatures::Scope scope(VFP2);
1911 1911
1912 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1912 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1913 __ VFPCompareAndSetFlags(d1, 0.0); 1913 __ VFPCompareAndSetFlags(d1, 0.0);
1914 // "tos_" is a register, and contains a non zero value by default. 1914 // "tos_" is a register, and contains a non zero value by default.
1915 // Hence we only need to overwrite "tos_" with zero to return false for 1915 // Hence we only need to overwrite "tos_" with zero to return false for
1916 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. 1916 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1917 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO 1917 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
1918 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN 1918 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
1919 } else { 1919 } else {
1920 Label done, not_nan, not_zero; 1920 Label done, not_nan, not_zero;
1921 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1921 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1922 // -0 maps to false: 1922 // -0 maps to false:
1923 __ bic( 1923 __ bic(
1924 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC); 1924 temp, temp, Operand(HeapNumber::kSignMask), SetCC);
1925 __ b(ne, &not_zero); 1925 __ b(ne, &not_zero);
1926 // If exponent word is zero then the answer depends on the mantissa word. 1926 // If exponent word is zero then the answer depends on the mantissa word.
1927 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1927 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1928 __ jmp(&done); 1928 __ jmp(&done);
1929 1929
1930 // Check for NaN. 1930 // Check for NaN.
1931 __ bind(&not_zero); 1931 __ bind(&not_zero);
1932 // We already zeroed the sign bit, now shift out the mantissa so we only 1932 // We already zeroed the sign bit, now shift out the mantissa so we only
1933 // have the exponent left. 1933 // have the exponent left.
1934 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); 1934 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
1935 unsigned int shifted_exponent_mask = 1935 unsigned int shifted_exponent_mask =
1936 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; 1936 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
1937 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE)); 1937 __ cmp(temp, Operand(shifted_exponent_mask));
1938 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN. 1938 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
1939 1939
1940 // Reload exponent word. 1940 // Reload exponent word.
1941 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1941 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1942 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE)); 1942 __ tst(temp, Operand(HeapNumber::kMantissaMask));
1943 // If mantissa is not zero then we have a NaN, so return 0. 1943 // If mantissa is not zero then we have a NaN, so return 0.
1944 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1944 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1945 __ b(ne, &done); 1945 __ b(ne, &done);
1946 1946
1947 // Load mantissa word. 1947 // Load mantissa word.
1948 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1948 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1949 __ cmp(temp, Operand(0, RelocInfo::NONE)); 1949 __ cmp(temp, Operand::Zero());
1950 // If mantissa is not zero then we have a NaN, so return 0. 1950 // If mantissa is not zero then we have a NaN, so return 0.
1951 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1951 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1952 __ b(ne, &done); 1952 __ b(ne, &done);
1953 1953
1954 __ bind(&not_nan); 1954 __ bind(&not_nan);
1955 __ mov(tos_, Operand(1, RelocInfo::NONE)); 1955 __ mov(tos_, Operand(1));
1956 __ bind(&done); 1956 __ bind(&done);
1957 } 1957 }
1958 __ Ret(); 1958 __ Ret();
1959 __ bind(&not_heap_number); 1959 __ bind(&not_heap_number);
1960 } 1960 }
1961 1961
1962 __ bind(&patch); 1962 __ bind(&patch);
1963 GenerateTypeTransition(masm); 1963 GenerateTypeTransition(masm);
1964 } 1964 }
1965 1965
1966 1966
1967 void ToBooleanStub::CheckOddball(MacroAssembler* masm, 1967 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1968 Type type, 1968 Type type,
1969 Heap::RootListIndex value, 1969 Heap::RootListIndex value,
1970 bool result) { 1970 bool result) {
1971 if (types_.Contains(type)) { 1971 if (types_.Contains(type)) {
1972 // If we see an expected oddball, return its ToBoolean value tos_. 1972 // If we see an expected oddball, return its ToBoolean value tos_.
1973 __ LoadRoot(ip, value); 1973 __ LoadRoot(ip, value);
1974 __ cmp(tos_, ip); 1974 __ cmp(tos_, ip);
1975 // The value of a root is never NULL, so we can avoid loading a non-null 1975 // The value of a root is never NULL, so we can avoid loading a non-null
1976 // value into tos_ when we want to return 'true'. 1976 // value into tos_ when we want to return 'true'.
1977 if (!result) { 1977 if (!result) {
1978 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); 1978 __ mov(tos_, Operand::Zero(), LeaveCC, eq);
1979 } 1979 }
1980 __ Ret(eq); 1980 __ Ret(eq);
1981 } 1981 }
1982 } 1982 }
1983 1983
1984 1984
1985 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { 1985 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1986 if (!tos_.is(r3)) { 1986 if (!tos_.is(r3)) {
1987 __ mov(r3, Operand(tos_)); 1987 __ mov(r3, Operand(tos_));
1988 } 1988 }
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
2113 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, 2113 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2114 Label* non_smi, 2114 Label* non_smi,
2115 Label* slow) { 2115 Label* slow) {
2116 __ JumpIfNotSmi(r0, non_smi); 2116 __ JumpIfNotSmi(r0, non_smi);
2117 2117
2118 // The result of negating zero or the smallest negative smi is not a smi. 2118 // The result of negating zero or the smallest negative smi is not a smi.
2119 __ bic(ip, r0, Operand(0x80000000), SetCC); 2119 __ bic(ip, r0, Operand(0x80000000), SetCC);
2120 __ b(eq, slow); 2120 __ b(eq, slow);
2121 2121
2122 // Return '0 - value'. 2122 // Return '0 - value'.
2123 __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); 2123 __ rsb(r0, r0, Operand::Zero());
2124 __ Ret(); 2124 __ Ret();
2125 } 2125 }
2126 2126
2127 2127
2128 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, 2128 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2129 Label* non_smi) { 2129 Label* non_smi) {
2130 __ JumpIfNotSmi(r0, non_smi); 2130 __ JumpIfNotSmi(r0, non_smi);
2131 2131
2132 // Flip bits and revert inverted smi-tag. 2132 // Flip bits and revert inverted smi-tag.
2133 __ mvn(r0, Operand(r0)); 2133 __ mvn(r0, Operand(r0));
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
2429 // Do multiplication 2429 // Do multiplication
2430 // scratch1 = lower 32 bits of ip * left. 2430 // scratch1 = lower 32 bits of ip * left.
2431 // scratch2 = higher 32 bits of ip * left. 2431 // scratch2 = higher 32 bits of ip * left.
2432 __ smull(scratch1, scratch2, left, ip); 2432 __ smull(scratch1, scratch2, left, ip);
2433 // Check for overflowing the smi range - no overflow if higher 33 bits of 2433 // Check for overflowing the smi range - no overflow if higher 33 bits of
2434 // the result are identical. 2434 // the result are identical.
2435 __ mov(ip, Operand(scratch1, ASR, 31)); 2435 __ mov(ip, Operand(scratch1, ASR, 31));
2436 __ cmp(ip, Operand(scratch2)); 2436 __ cmp(ip, Operand(scratch2));
2437 __ b(ne, &not_smi_result); 2437 __ b(ne, &not_smi_result);
2438 // Go slow on zero result to handle -0. 2438 // Go slow on zero result to handle -0.
2439 __ cmp(scratch1, Operand(0)); 2439 __ cmp(scratch1, Operand::Zero());
2440 __ mov(right, Operand(scratch1), LeaveCC, ne); 2440 __ mov(right, Operand(scratch1), LeaveCC, ne);
2441 __ Ret(ne); 2441 __ Ret(ne);
2442 // We need -0 if we were multiplying a negative number with 0 to get 0. 2442 // We need -0 if we were multiplying a negative number with 0 to get 0.
2443 // We know one of them was zero. 2443 // We know one of them was zero.
2444 __ add(scratch2, right, Operand(left), SetCC); 2444 __ add(scratch2, right, Operand(left), SetCC);
2445 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); 2445 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2446 __ Ret(pl); // Return smi 0 if the non-zero one was positive. 2446 __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2447 // We fall through here if we multiplied a negative number with 0, because 2447 // We fall through here if we multiplied a negative number with 0, because
2448 // that would mean we should produce -0. 2448 // that would mean we should produce -0.
2449 break; 2449 break;
(...skipping 929 matching lines...) Expand 10 before | Expand all | Expand 10 after
3379 Isolate* isolate = masm->isolate(); 3379 Isolate* isolate = masm->isolate();
3380 ExternalReference cache_array = 3380 ExternalReference cache_array =
3381 ExternalReference::transcendental_cache_array_address(isolate); 3381 ExternalReference::transcendental_cache_array_address(isolate);
3382 __ mov(cache_entry, Operand(cache_array)); 3382 __ mov(cache_entry, Operand(cache_array));
3383 // cache_entry points to cache array. 3383 // cache_entry points to cache array.
3384 int cache_array_index 3384 int cache_array_index
3385 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); 3385 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3386 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); 3386 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3387 // r0 points to the cache for the type type_. 3387 // r0 points to the cache for the type type_.
3388 // If NULL, the cache hasn't been initialized yet, so go through runtime. 3388 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3389 __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); 3389 __ cmp(cache_entry, Operand::Zero());
3390 __ b(eq, &invalid_cache); 3390 __ b(eq, &invalid_cache);
3391 3391
3392 #ifdef DEBUG 3392 #ifdef DEBUG
3393 // Check that the layout of cache elements match expectations. 3393 // Check that the layout of cache elements match expectations.
3394 { TranscendentalCache::SubCache::Element test_elem[2]; 3394 { TranscendentalCache::SubCache::Element test_elem[2];
3395 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 3395 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3396 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 3396 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3397 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 3397 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3398 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 3398 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3399 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 3399 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
3690 if (exponent_type_ == INTEGER) { 3690 if (exponent_type_ == INTEGER) {
3691 __ mov(scratch, exponent); 3691 __ mov(scratch, exponent);
3692 } else { 3692 } else {
3693 // Exponent has previously been stored into scratch as untagged integer. 3693 // Exponent has previously been stored into scratch as untagged integer.
3694 __ mov(exponent, scratch); 3694 __ mov(exponent, scratch);
3695 } 3695 }
3696 __ vmov(double_scratch, double_base); // Back up base. 3696 __ vmov(double_scratch, double_base); // Back up base.
3697 __ vmov(double_result, 1.0, scratch2); 3697 __ vmov(double_result, 1.0, scratch2);
3698 3698
3699 // Get absolute value of exponent. 3699 // Get absolute value of exponent.
3700 __ cmp(scratch, Operand(0)); 3700 __ cmp(scratch, Operand::Zero());
3701 __ mov(scratch2, Operand(0), LeaveCC, mi); 3701 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
3702 __ sub(scratch, scratch2, scratch, LeaveCC, mi); 3702 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3703 3703
3704 Label while_true; 3704 Label while_true;
3705 __ bind(&while_true); 3705 __ bind(&while_true);
3706 __ mov(scratch, Operand(scratch, ASR, 1), SetCC); 3706 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3707 __ vmul(double_result, double_result, double_scratch, cs); 3707 __ vmul(double_result, double_result, double_scratch, cs);
3708 __ vmul(double_scratch, double_scratch, double_scratch, ne); 3708 __ vmul(double_scratch, double_scratch, double_scratch, ne);
3709 __ b(ne, &while_true); 3709 __ b(ne, &while_true);
3710 3710
3711 __ cmp(exponent, Operand(0)); 3711 __ cmp(exponent, Operand::Zero());
3712 __ b(ge, &done); 3712 __ b(ge, &done);
3713 __ vmov(double_scratch, 1.0, scratch); 3713 __ vmov(double_scratch, 1.0, scratch);
3714 __ vdiv(double_result, double_scratch, double_result); 3714 __ vdiv(double_result, double_scratch, double_result);
3715 // Test whether result is zero. Bail out to check for subnormal result. 3715 // Test whether result is zero. Bail out to check for subnormal result.
3716 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 3716 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3717 __ VFPCompareAndSetFlags(double_result, 0.0); 3717 __ VFPCompareAndSetFlags(double_result, 0.0);
3718 __ b(ne, &done); 3718 __ b(ne, &done);
3719 // double_exponent may not containe the exponent value if the input was a 3719 // double_exponent may not containe the exponent value if the input was a
3720 // smi. We set it with exponent value before bailing out. 3720 // smi. We set it with exponent value before bailing out.
3721 __ vmov(single_scratch, exponent); 3721 __ vmov(single_scratch, exponent);
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
3975 &throw_termination_exception, 3975 &throw_termination_exception,
3976 &throw_out_of_memory_exception, 3976 &throw_out_of_memory_exception,
3977 true, 3977 true,
3978 true); 3978 true);
3979 3979
3980 __ bind(&throw_out_of_memory_exception); 3980 __ bind(&throw_out_of_memory_exception);
3981 // Set external caught exception to false. 3981 // Set external caught exception to false.
3982 Isolate* isolate = masm->isolate(); 3982 Isolate* isolate = masm->isolate();
3983 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, 3983 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3984 isolate); 3984 isolate);
3985 __ mov(r0, Operand(false, RelocInfo::NONE)); 3985 __ mov(r0, Operand(false));
3986 __ mov(r2, Operand(external_caught)); 3986 __ mov(r2, Operand(external_caught));
3987 __ str(r0, MemOperand(r2)); 3987 __ str(r0, MemOperand(r2));
3988 3988
3989 // Set pending exception and r0 to out of memory exception. 3989 // Set pending exception and r0 to out of memory exception.
3990 Failure* out_of_memory = Failure::OutOfMemoryException(); 3990 Failure* out_of_memory = Failure::OutOfMemoryException();
3991 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); 3991 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3992 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3992 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3993 isolate))); 3993 isolate)));
3994 __ str(r0, MemOperand(r2)); 3994 __ str(r0, MemOperand(r2));
3995 // Fall through to the next label. 3995 // Fall through to the next label.
(...skipping 661 matching lines...) Expand 10 before | Expand all | Expand 10 after
4657 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4657 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4658 __ str(r1, MemOperand(sp, 0)); 4658 __ str(r1, MemOperand(sp, 0));
4659 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); 4659 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4660 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 4660 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4661 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 4661 __ str(r3, MemOperand(sp, 1 * kPointerSize));
4662 4662
4663 // Try the new space allocation. Start out with computing the size 4663 // Try the new space allocation. Start out with computing the size
4664 // of the arguments object and the elements array in words. 4664 // of the arguments object and the elements array in words.
4665 Label add_arguments_object; 4665 Label add_arguments_object;
4666 __ bind(&try_allocate); 4666 __ bind(&try_allocate);
4667 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4667 __ cmp(r1, Operand::Zero());
4668 __ b(eq, &add_arguments_object); 4668 __ b(eq, &add_arguments_object);
4669 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4669 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4670 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); 4670 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4671 __ bind(&add_arguments_object); 4671 __ bind(&add_arguments_object);
4672 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); 4672 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4673 4673
4674 // Do the allocation of both objects in one go. 4674 // Do the allocation of both objects in one go.
4675 __ AllocateInNewSpace(r1, 4675 __ AllocateInNewSpace(r1,
4676 r0, 4676 r0,
4677 r2, 4677 r2,
(...skipping 12 matching lines...) Expand all
4690 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); 4690 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4691 4691
4692 // Get the length (smi tagged) and set that as an in-object property too. 4692 // Get the length (smi tagged) and set that as an in-object property too.
4693 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4693 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4694 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 4694 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4695 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + 4695 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4696 Heap::kArgumentsLengthIndex * kPointerSize)); 4696 Heap::kArgumentsLengthIndex * kPointerSize));
4697 4697
4698 // If there are no actual arguments, we're done. 4698 // If there are no actual arguments, we're done.
4699 Label done; 4699 Label done;
4700 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4700 __ cmp(r1, Operand::Zero());
4701 __ b(eq, &done); 4701 __ b(eq, &done);
4702 4702
4703 // Get the parameters pointer from the stack. 4703 // Get the parameters pointer from the stack.
4704 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); 4704 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4705 4705
4706 // Set up the elements pointer in the allocated arguments object and 4706 // Set up the elements pointer in the allocated arguments object and
4707 // initialize the header in the elements fixed array. 4707 // initialize the header in the elements fixed array.
4708 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); 4708 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4709 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); 4709 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4710 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); 4710 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4711 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); 4711 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4712 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); 4712 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4713 // Untag the length for the loop. 4713 // Untag the length for the loop.
4714 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4714 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4715 4715
4716 // Copy the fixed array slots. 4716 // Copy the fixed array slots.
4717 Label loop; 4717 Label loop;
4718 // Set up r4 to point to the first array slot. 4718 // Set up r4 to point to the first array slot.
4719 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 4719 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4720 __ bind(&loop); 4720 __ bind(&loop);
4721 // Pre-decrement r2 with kPointerSize on each iteration. 4721 // Pre-decrement r2 with kPointerSize on each iteration.
4722 // Pre-decrement in order to skip receiver. 4722 // Pre-decrement in order to skip receiver.
4723 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); 4723 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4724 // Post-increment r4 with kPointerSize on each iteration. 4724 // Post-increment r4 with kPointerSize on each iteration.
4725 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); 4725 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4726 __ sub(r1, r1, Operand(1)); 4726 __ sub(r1, r1, Operand(1));
4727 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4727 __ cmp(r1, Operand::Zero());
4728 __ b(ne, &loop); 4728 __ b(ne, &loop);
4729 4729
4730 // Return and remove the on-stack parameters. 4730 // Return and remove the on-stack parameters.
4731 __ bind(&done); 4731 __ bind(&done);
4732 __ add(sp, sp, Operand(3 * kPointerSize)); 4732 __ add(sp, sp, Operand(3 * kPointerSize));
4733 __ Ret(); 4733 __ Ret();
4734 4734
4735 // Do the runtime call to allocate the arguments object. 4735 // Do the runtime call to allocate the arguments object.
4736 __ bind(&runtime); 4736 __ bind(&runtime);
4737 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 4737 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4769 Register last_match_info_elements = r6; 4769 Register last_match_info_elements = r6;
4770 4770
4771 // Ensure that a RegExp stack is allocated. 4771 // Ensure that a RegExp stack is allocated.
4772 Isolate* isolate = masm->isolate(); 4772 Isolate* isolate = masm->isolate();
4773 ExternalReference address_of_regexp_stack_memory_address = 4773 ExternalReference address_of_regexp_stack_memory_address =
4774 ExternalReference::address_of_regexp_stack_memory_address(isolate); 4774 ExternalReference::address_of_regexp_stack_memory_address(isolate);
4775 ExternalReference address_of_regexp_stack_memory_size = 4775 ExternalReference address_of_regexp_stack_memory_size =
4776 ExternalReference::address_of_regexp_stack_memory_size(isolate); 4776 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4777 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 4777 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4778 __ ldr(r0, MemOperand(r0, 0)); 4778 __ ldr(r0, MemOperand(r0, 0));
4779 __ cmp(r0, Operand(0)); 4779 __ cmp(r0, Operand::Zero());
4780 __ b(eq, &runtime); 4780 __ b(eq, &runtime);
4781 4781
4782 // Check that the first argument is a JSRegExp object. 4782 // Check that the first argument is a JSRegExp object.
4783 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); 4783 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4784 STATIC_ASSERT(kSmiTag == 0); 4784 STATIC_ASSERT(kSmiTag == 0);
4785 __ JumpIfSmi(r0, &runtime); 4785 __ JumpIfSmi(r0, &runtime);
4786 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); 4786 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4787 __ b(ne, &runtime); 4787 __ b(ne, &runtime);
4788 4788
4789 // Check that the RegExp has been compiled (data contains a fixed array). 4789 // Check that the RegExp has been compiled (data contains a fixed array).
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
4851 __ b(ne, &runtime); 4851 __ b(ne, &runtime);
4852 // Check that the last match info has space for the capture registers and the 4852 // Check that the last match info has space for the capture registers and the
4853 // additional information. 4853 // additional information.
4854 __ ldr(r0, 4854 __ ldr(r0,
4855 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); 4855 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4856 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); 4856 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4857 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); 4857 __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4858 __ b(gt, &runtime); 4858 __ b(gt, &runtime);
4859 4859
4860 // Reset offset for possibly sliced string. 4860 // Reset offset for possibly sliced string.
4861 __ mov(r9, Operand(0)); 4861 __ mov(r9, Operand::Zero());
4862 // subject: Subject string 4862 // subject: Subject string
4863 // regexp_data: RegExp data (FixedArray) 4863 // regexp_data: RegExp data (FixedArray)
4864 // Check the representation and encoding of the subject string. 4864 // Check the representation and encoding of the subject string.
4865 Label seq_string; 4865 Label seq_string;
4866 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 4866 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4867 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 4867 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4868 // First check for flat string. None of the following string type tests will 4868 // First check for flat string. None of the following string type tests will
4869 // succeed if subject is not a string or a short external string. 4869 // succeed if subject is not a string or a short external string.
4870 __ and_(r1, 4870 __ and_(r1,
4871 r0, 4871 r0,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
4972 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. 4972 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
4973 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); 4973 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4974 __ ldr(r0, MemOperand(r0, 0)); 4974 __ ldr(r0, MemOperand(r0, 0));
4975 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); 4975 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4976 __ ldr(r2, MemOperand(r2, 0)); 4976 __ ldr(r2, MemOperand(r2, 0));
4977 __ add(r0, r0, Operand(r2)); 4977 __ add(r0, r0, Operand(r2));
4978 __ str(r0, MemOperand(sp, 3 * kPointerSize)); 4978 __ str(r0, MemOperand(sp, 3 * kPointerSize));
4979 4979
4980 // Argument 6: Set the number of capture registers to zero to force global 4980 // Argument 6: Set the number of capture registers to zero to force global
4981 // regexps to behave as non-global. This does not affect non-global regexps. 4981 // regexps to behave as non-global. This does not affect non-global regexps.
4982 __ mov(r0, Operand(0)); 4982 __ mov(r0, Operand::Zero());
4983 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 4983 __ str(r0, MemOperand(sp, 2 * kPointerSize));
4984 4984
4985 // Argument 5 (sp[4]): static offsets vector buffer. 4985 // Argument 5 (sp[4]): static offsets vector buffer.
4986 __ mov(r0, 4986 __ mov(r0,
4987 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); 4987 Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4988 __ str(r0, MemOperand(sp, 1 * kPointerSize)); 4988 __ str(r0, MemOperand(sp, 1 * kPointerSize));
4989 4989
4990 // For arguments 4 and 3 get string length, calculate start of string data and 4990 // For arguments 4 and 3 get string length, calculate start of string data and
4991 // calculate the shift of the index (0 for ASCII and 1 for two byte). 4991 // calculate the shift of the index (0 for ASCII and 1 for two byte).
4992 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); 4992 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
5227 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); 5227 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5228 // Fill contents of fixed-array with undefined. 5228 // Fill contents of fixed-array with undefined.
5229 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 5229 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
5230 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 5230 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5231 // Fill fixed array elements with undefined. 5231 // Fill fixed array elements with undefined.
5232 // r0: JSArray, tagged. 5232 // r0: JSArray, tagged.
5233 // r2: undefined. 5233 // r2: undefined.
5234 // r3: Start of elements in FixedArray. 5234 // r3: Start of elements in FixedArray.
5235 // r5: Number of elements to fill. 5235 // r5: Number of elements to fill.
5236 Label loop; 5236 Label loop;
5237 __ cmp(r5, Operand(0)); 5237 __ cmp(r5, Operand::Zero());
5238 __ bind(&loop); 5238 __ bind(&loop);
5239 __ b(le, &done); // Jump if r5 is negative or zero. 5239 __ b(le, &done); // Jump if r5 is negative or zero.
5240 __ sub(r5, r5, Operand(1), SetCC); 5240 __ sub(r5, r5, Operand(1), SetCC);
5241 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); 5241 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5242 __ jmp(&loop); 5242 __ jmp(&loop);
5243 5243
5244 __ bind(&done); 5244 __ bind(&done);
5245 __ add(sp, sp, Operand(3 * kPointerSize)); 5245 __ add(sp, sp, Operand(3 * kPointerSize));
5246 __ Ret(); 5246 __ Ret();
5247 5247
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
5353 // object (undefined) so no write barrier is needed. 5353 // object (undefined) so no write barrier is needed.
5354 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 5354 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5355 masm->isolate()->heap()->undefined_value()); 5355 masm->isolate()->heap()->undefined_value());
5356 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5356 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5357 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); 5357 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
5358 } 5358 }
5359 // Check for function proxy. 5359 // Check for function proxy.
5360 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5360 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5361 __ b(ne, &non_function); 5361 __ b(ne, &non_function);
5362 __ push(r1); // put proxy as additional argument 5362 __ push(r1); // put proxy as additional argument
5363 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); 5363 __ mov(r0, Operand(argc_ + 1));
5364 __ mov(r2, Operand(0, RelocInfo::NONE)); 5364 __ mov(r2, Operand::Zero());
5365 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); 5365 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5366 __ SetCallKind(r5, CALL_AS_METHOD); 5366 __ SetCallKind(r5, CALL_AS_METHOD);
5367 { 5367 {
5368 Handle<Code> adaptor = 5368 Handle<Code> adaptor =
5369 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 5369 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5370 __ Jump(adaptor, RelocInfo::CODE_TARGET); 5370 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5371 } 5371 }
5372 5372
5373 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 5373 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5374 // of the original receiver from the call site). 5374 // of the original receiver from the call site).
5375 __ bind(&non_function); 5375 __ bind(&non_function);
5376 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); 5376 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5377 __ mov(r0, Operand(argc_)); // Set up the number of arguments. 5377 __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5378 __ mov(r2, Operand(0, RelocInfo::NONE)); 5378 __ mov(r2, Operand::Zero());
5379 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); 5379 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5380 __ SetCallKind(r5, CALL_AS_METHOD); 5380 __ SetCallKind(r5, CALL_AS_METHOD);
5381 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5381 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5382 RelocInfo::CODE_TARGET); 5382 RelocInfo::CODE_TARGET);
5383 } 5383 }
5384 5384
5385 5385
5386 void CallConstructStub::Generate(MacroAssembler* masm) { 5386 void CallConstructStub::Generate(MacroAssembler* masm) {
5387 // r0 : number of arguments 5387 // r0 : number of arguments
5388 // r1 : the function to call 5388 // r1 : the function to call
(...skipping 22 matching lines...) Expand all
5411 __ bind(&slow); 5411 __ bind(&slow);
5412 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5412 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5413 __ b(ne, &non_function_call); 5413 __ b(ne, &non_function_call);
5414 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 5414 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5415 __ jmp(&do_call); 5415 __ jmp(&do_call);
5416 5416
5417 __ bind(&non_function_call); 5417 __ bind(&non_function_call);
5418 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 5418 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5419 __ bind(&do_call); 5419 __ bind(&do_call);
5420 // Set expected number of arguments to zero (not changing r0). 5420 // Set expected number of arguments to zero (not changing r0).
5421 __ mov(r2, Operand(0, RelocInfo::NONE)); 5421 __ mov(r2, Operand::Zero());
5422 __ SetCallKind(r5, CALL_AS_METHOD); 5422 __ SetCallKind(r5, CALL_AS_METHOD);
5423 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5423 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5424 RelocInfo::CODE_TARGET); 5424 RelocInfo::CODE_TARGET);
5425 } 5425 }
5426 5426
5427 5427
5428 // Unfortunately you have to run without snapshots to see most of these 5428 // Unfortunately you have to run without snapshots to see most of these
5429 // names in the profile since most compare stubs end up in the snapshot. 5429 // names in the profile since most compare stubs end up in the snapshot.
5430 void CompareStub::PrintName(StringStream* stream) { 5430 void CompareStub::PrintName(StringStream* stream) {
5431 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || 5431 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
5622 Register count, 5622 Register count,
5623 Register scratch, 5623 Register scratch,
5624 bool ascii) { 5624 bool ascii) {
5625 Label loop; 5625 Label loop;
5626 Label done; 5626 Label done;
5627 // This loop just copies one character at a time, as it is only used for very 5627 // This loop just copies one character at a time, as it is only used for very
5628 // short strings. 5628 // short strings.
5629 if (!ascii) { 5629 if (!ascii) {
5630 __ add(count, count, Operand(count), SetCC); 5630 __ add(count, count, Operand(count), SetCC);
5631 } else { 5631 } else {
5632 __ cmp(count, Operand(0, RelocInfo::NONE)); 5632 __ cmp(count, Operand::Zero());
5633 } 5633 }
5634 __ b(eq, &done); 5634 __ b(eq, &done);
5635 5635
5636 __ bind(&loop); 5636 __ bind(&loop);
5637 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); 5637 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5638 // Perform sub between load and dependent store to get the load time to 5638 // Perform sub between load and dependent store to get the load time to
5639 // complete. 5639 // complete.
5640 __ sub(count, count, Operand(1), SetCC); 5640 __ sub(count, count, Operand(1), SetCC);
5641 __ strb(scratch, MemOperand(dest, 1, PostIndex)); 5641 __ strb(scratch, MemOperand(dest, 1, PostIndex));
5642 // last iteration. 5642 // last iteration.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5677 // Ensure that reading an entire aligned word containing the last character 5677 // Ensure that reading an entire aligned word containing the last character
5678 // of a string will not read outside the allocated area (because we pad up 5678 // of a string will not read outside the allocated area (because we pad up
5679 // to kObjectAlignment). 5679 // to kObjectAlignment).
5680 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); 5680 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5681 // Assumes word reads and writes are little endian. 5681 // Assumes word reads and writes are little endian.
5682 // Nothing to do for zero characters. 5682 // Nothing to do for zero characters.
5683 Label done; 5683 Label done;
5684 if (!ascii) { 5684 if (!ascii) {
5685 __ add(count, count, Operand(count), SetCC); 5685 __ add(count, count, Operand(count), SetCC);
5686 } else { 5686 } else {
5687 __ cmp(count, Operand(0, RelocInfo::NONE)); 5687 __ cmp(count, Operand::Zero());
5688 } 5688 }
5689 __ b(eq, &done); 5689 __ b(eq, &done);
5690 5690
5691 // Assume that you cannot read (or write) unaligned. 5691 // Assume that you cannot read (or write) unaligned.
5692 Label byte_loop; 5692 Label byte_loop;
5693 // Must copy at least eight bytes, otherwise just do it one byte at a time. 5693 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5694 __ cmp(count, Operand(8)); 5694 __ cmp(count, Operand(8));
5695 __ add(count, dest, Operand(count)); 5695 __ add(count, dest, Operand(count));
5696 Register limit = count; // Read until src equals this. 5696 Register limit = count; // Read until src equals this.
5697 __ b(lt, &byte_loop); 5697 __ b(lt, &byte_loop);
(...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after
6195 __ cmp(length, scratch2); 6195 __ cmp(length, scratch2);
6196 __ b(eq, &check_zero_length); 6196 __ b(eq, &check_zero_length);
6197 __ bind(&strings_not_equal); 6197 __ bind(&strings_not_equal);
6198 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); 6198 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6199 __ Ret(); 6199 __ Ret();
6200 6200
6201 // Check if the length is zero. 6201 // Check if the length is zero.
6202 Label compare_chars; 6202 Label compare_chars;
6203 __ bind(&check_zero_length); 6203 __ bind(&check_zero_length);
6204 STATIC_ASSERT(kSmiTag == 0); 6204 STATIC_ASSERT(kSmiTag == 0);
6205 __ cmp(length, Operand(0)); 6205 __ cmp(length, Operand::Zero());
6206 __ b(ne, &compare_chars); 6206 __ b(ne, &compare_chars);
6207 __ mov(r0, Operand(Smi::FromInt(EQUAL))); 6207 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6208 __ Ret(); 6208 __ Ret();
6209 6209
6210 // Compare characters. 6210 // Compare characters.
6211 __ bind(&compare_chars); 6211 __ bind(&compare_chars);
6212 GenerateAsciiCharsCompareLoop(masm, 6212 GenerateAsciiCharsCompareLoop(masm,
6213 left, right, length, scratch2, scratch3, 6213 left, right, length, scratch2, scratch3,
6214 &strings_not_equal); 6214 &strings_not_equal);
6215 6215
(...skipping 12 matching lines...) Expand all
6228 Register scratch4) { 6228 Register scratch4) {
6229 Label result_not_equal, compare_lengths; 6229 Label result_not_equal, compare_lengths;
6230 // Find minimum length and length difference. 6230 // Find minimum length and length difference.
6231 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); 6231 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6232 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); 6232 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6233 __ sub(scratch3, scratch1, Operand(scratch2), SetCC); 6233 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6234 Register length_delta = scratch3; 6234 Register length_delta = scratch3;
6235 __ mov(scratch1, scratch2, LeaveCC, gt); 6235 __ mov(scratch1, scratch2, LeaveCC, gt);
6236 Register min_length = scratch1; 6236 Register min_length = scratch1;
6237 STATIC_ASSERT(kSmiTag == 0); 6237 STATIC_ASSERT(kSmiTag == 0);
6238 __ cmp(min_length, Operand(0)); 6238 __ cmp(min_length, Operand::Zero());
6239 __ b(eq, &compare_lengths); 6239 __ b(eq, &compare_lengths);
6240 6240
6241 // Compare loop. 6241 // Compare loop.
6242 GenerateAsciiCharsCompareLoop(masm, 6242 GenerateAsciiCharsCompareLoop(masm,
6243 left, right, min_length, scratch2, scratch4, 6243 left, right, min_length, scratch2, scratch4,
6244 &result_not_equal); 6244 &result_not_equal);
6245 6245
6246 // Compare lengths - strings up to min-length are equal. 6246 // Compare lengths - strings up to min-length are equal.
6247 __ bind(&compare_lengths); 6247 __ bind(&compare_lengths);
6248 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); 6248 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
(...skipping 777 matching lines...) Expand 10 before | Expand all | Expand 10 after
7026 7026
7027 const int spill_mask = 7027 const int spill_mask =
7028 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | 7028 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
7029 r2.bit() | r1.bit() | r0.bit()); 7029 r2.bit() | r1.bit() | r0.bit());
7030 7030
7031 __ stm(db_w, sp, spill_mask); 7031 __ stm(db_w, sp, spill_mask);
7032 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 7032 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7033 __ mov(r1, Operand(Handle<String>(name))); 7033 __ mov(r1, Operand(Handle<String>(name)));
7034 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); 7034 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7035 __ CallStub(&stub); 7035 __ CallStub(&stub);
7036 __ cmp(r0, Operand(0)); 7036 __ cmp(r0, Operand::Zero());
7037 __ ldm(ia_w, sp, spill_mask); 7037 __ ldm(ia_w, sp, spill_mask);
7038 7038
7039 __ b(eq, done); 7039 __ b(eq, done);
7040 __ b(ne, miss); 7040 __ b(ne, miss);
7041 } 7041 }
7042 7042
7043 7043
7044 // Probe the string dictionary in the |elements| register. Jump to the 7044 // Probe the string dictionary in the |elements| register. Jump to the
7045 // |done| label if a property with the given name is found. Jump to 7045 // |done| label if a property with the given name is found. Jump to
7046 // the |miss| label otherwise. 7046 // the |miss| label otherwise.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
7102 if (name.is(r0)) { 7102 if (name.is(r0)) {
7103 ASSERT(!elements.is(r1)); 7103 ASSERT(!elements.is(r1));
7104 __ Move(r1, name); 7104 __ Move(r1, name);
7105 __ Move(r0, elements); 7105 __ Move(r0, elements);
7106 } else { 7106 } else {
7107 __ Move(r0, elements); 7107 __ Move(r0, elements);
7108 __ Move(r1, name); 7108 __ Move(r1, name);
7109 } 7109 }
7110 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); 7110 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7111 __ CallStub(&stub); 7111 __ CallStub(&stub);
7112 __ cmp(r0, Operand(0)); 7112 __ cmp(r0, Operand::Zero());
7113 __ mov(scratch2, Operand(r2)); 7113 __ mov(scratch2, Operand(r2));
7114 __ ldm(ia_w, sp, spill_mask); 7114 __ ldm(ia_w, sp, spill_mask);
7115 7115
7116 __ b(ne, done); 7116 __ b(ne, done);
7117 __ b(eq, miss); 7117 __ b(eq, miss);
7118 } 7118 }
7119 7119
7120 7120
7121 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 7121 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7122 // This stub overrides SometimesSetsUpAFrame() to return false. That means 7122 // This stub overrides SometimesSetsUpAFrame() to return false. That means
(...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after
7623 7623
7624 __ Pop(lr, r5, r1); 7624 __ Pop(lr, r5, r1);
7625 __ Ret(); 7625 __ Ret();
7626 } 7626 }
7627 7627
7628 #undef __ 7628 #undef __
7629 7629
7630 } } // namespace v8::internal 7630 } } // namespace v8::internal
7631 7631
7632 #endif // V8_TARGET_ARCH_ARM 7632 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« src/arm/assembler-arm.cc ('K') | « src/arm/builtins-arm.cc ('k') | src/arm/constants-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698