Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(114)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 11745030: ARM: generate integer zero in a uniform manner. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/debug-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 553
554 Label not_special; 554 Label not_special;
555 // Convert from Smi to integer. 555 // Convert from Smi to integer.
556 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); 556 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
557 // Move sign bit from source to destination. This works because the sign bit 557 // Move sign bit from source to destination. This works because the sign bit
558 // in the exponent word of the double has the same position and polarity as 558 // in the exponent word of the double has the same position and polarity as
559 // the 2's complement sign bit in a Smi. 559 // the 2's complement sign bit in a Smi.
560 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 560 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
561 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); 561 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
562 // Subtract from 0 if source was negative. 562 // Subtract from 0 if source was negative.
563 __ rsb(source_, source_, Operand(0, RelocInfo::NONE32), LeaveCC, ne); 563 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
564 564
565 // We have -1, 0 or 1, which we treat specially. Register source_ contains 565 // We have -1, 0 or 1, which we treat specially. Register source_ contains
566 // absolute value: it is either equal to 1 (special case of -1 and 1), 566 // absolute value: it is either equal to 1 (special case of -1 and 1),
567 // greater than 1 (not a special case) or less than 1 (special case of 0). 567 // greater than 1 (not a special case) or less than 1 (special case of 0).
568 __ cmp(source_, Operand(1)); 568 __ cmp(source_, Operand(1));
569 __ b(gt, &not_special); 569 __ b(gt, &not_special);
570 570
571 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 571 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
572 const uint32_t exponent_word_for_1 = 572 const uint32_t exponent_word_for_1 =
573 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 573 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
574 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); 574 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
575 // 1, 0 and -1 all have 0 for the second word. 575 // 1, 0 and -1 all have 0 for the second word.
576 __ mov(mantissa, Operand(0, RelocInfo::NONE32)); 576 __ mov(mantissa, Operand::Zero());
577 __ Ret(); 577 __ Ret();
578 578
579 __ bind(&not_special); 579 __ bind(&not_special);
580 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. 580 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
581 // Gets the wrong answer for 0, but we already checked for that case above. 581 // Gets the wrong answer for 0, but we already checked for that case above.
582 __ CountLeadingZeros(zeros_, source_, mantissa); 582 __ CountLeadingZeros(zeros_, source_, mantissa);
583 // Compute exponent and or it into the exponent register. 583 // Compute exponent and or it into the exponent register.
584 // We use mantissa as a scratch register here. Use a fudge factor to 584 // We use mantissa as a scratch register here. Use a fudge factor to
585 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts 585 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
586 // that fit in the ARM's constant field. 586 // that fit in the ARM's constant field.
(...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after
1134 __ cmp(the_int_, Operand(0x80000000u)); 1134 __ cmp(the_int_, Operand(0x80000000u));
1135 __ b(eq, &max_negative_int); 1135 __ b(eq, &max_negative_int);
1136 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 1136 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1137 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 1137 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1138 uint32_t non_smi_exponent = 1138 uint32_t non_smi_exponent =
1139 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 1139 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1140 __ mov(scratch_, Operand(non_smi_exponent)); 1140 __ mov(scratch_, Operand(non_smi_exponent));
1141 // Set the sign bit in scratch_ if the value was negative. 1141 // Set the sign bit in scratch_ if the value was negative.
1142 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); 1142 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1143 // Subtract from 0 if the value was negative. 1143 // Subtract from 0 if the value was negative.
1144 __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE32), LeaveCC, cs); 1144 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
1145 // We should be masking the implict first digit of the mantissa away here, 1145 // We should be masking the implict first digit of the mantissa away here,
1146 // but it just ends up combining harmlessly with the last digit of the 1146 // but it just ends up combining harmlessly with the last digit of the
1147 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get 1147 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1148 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. 1148 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1149 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); 1149 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1150 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 1150 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1151 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); 1151 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1152 __ str(scratch_, FieldMemOperand(the_heap_number_, 1152 __ str(scratch_, FieldMemOperand(the_heap_number_,
1153 HeapNumber::kExponentOffset)); 1153 HeapNumber::kExponentOffset));
1154 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); 1154 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1155 __ str(scratch_, FieldMemOperand(the_heap_number_, 1155 __ str(scratch_, FieldMemOperand(the_heap_number_,
1156 HeapNumber::kMantissaOffset)); 1156 HeapNumber::kMantissaOffset));
1157 __ Ret(); 1157 __ Ret();
1158 1158
1159 __ bind(&max_negative_int); 1159 __ bind(&max_negative_int);
1160 // The max negative int32 is stored as a positive number in the mantissa of 1160 // The max negative int32 is stored as a positive number in the mantissa of
1161 // a double because it uses a sign bit instead of using two's complement. 1161 // a double because it uses a sign bit instead of using two's complement.
1162 // The actual mantissa bits stored are all 0 because the implicit most 1162 // The actual mantissa bits stored are all 0 because the implicit most
1163 // significant 1 bit is not stored. 1163 // significant 1 bit is not stored.
1164 non_smi_exponent += 1 << HeapNumber::kExponentShift; 1164 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1165 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); 1165 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1166 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); 1166 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1167 __ mov(ip, Operand(0, RelocInfo::NONE32)); 1167 __ mov(ip, Operand::Zero());
1168 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 1168 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1169 __ Ret(); 1169 __ Ret();
1170 } 1170 }
1171 1171
1172 1172
1173 // Handle the case where the lhs and rhs are the same object. 1173 // Handle the case where the lhs and rhs are the same object.
1174 // Equality is almost reflexive (everything but NaN), so this is a test 1174 // Equality is almost reflexive (everything but NaN), so this is a test
1175 // for "identity and not NaN". 1175 // for "identity and not NaN".
1176 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 1176 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1177 Label* slow, 1177 Label* slow,
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
1373 lhs_exponent, 1373 lhs_exponent,
1374 HeapNumber::kExponentShift, 1374 HeapNumber::kExponentShift,
1375 HeapNumber::kExponentBits); 1375 HeapNumber::kExponentBits);
1376 // NaNs have all-one exponents so they sign extend to -1. 1376 // NaNs have all-one exponents so they sign extend to -1.
1377 __ cmp(r4, Operand(-1)); 1377 __ cmp(r4, Operand(-1));
1378 __ b(ne, lhs_not_nan); 1378 __ b(ne, lhs_not_nan);
1379 __ mov(r4, 1379 __ mov(r4,
1380 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1380 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1381 SetCC); 1381 SetCC);
1382 __ b(ne, &one_is_nan); 1382 __ b(ne, &one_is_nan);
1383 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE32)); 1383 __ cmp(lhs_mantissa, Operand::Zero());
1384 __ b(ne, &one_is_nan); 1384 __ b(ne, &one_is_nan);
1385 1385
1386 __ bind(lhs_not_nan); 1386 __ bind(lhs_not_nan);
1387 __ Sbfx(r4, 1387 __ Sbfx(r4,
1388 rhs_exponent, 1388 rhs_exponent,
1389 HeapNumber::kExponentShift, 1389 HeapNumber::kExponentShift,
1390 HeapNumber::kExponentBits); 1390 HeapNumber::kExponentBits);
1391 // NaNs have all-one exponents so they sign extend to -1. 1391 // NaNs have all-one exponents so they sign extend to -1.
1392 __ cmp(r4, Operand(-1)); 1392 __ cmp(r4, Operand(-1));
1393 __ b(ne, &neither_is_nan); 1393 __ b(ne, &neither_is_nan);
1394 __ mov(r4, 1394 __ mov(r4,
1395 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1395 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1396 SetCC); 1396 SetCC);
1397 __ b(ne, &one_is_nan); 1397 __ b(ne, &one_is_nan);
1398 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE32)); 1398 __ cmp(rhs_mantissa, Operand::Zero());
1399 __ b(eq, &neither_is_nan); 1399 __ b(eq, &neither_is_nan);
1400 1400
1401 __ bind(&one_is_nan); 1401 __ bind(&one_is_nan);
1402 // NaN comparisons always fail. 1402 // NaN comparisons always fail.
1403 // Load whatever we need in r0 to make the comparison fail. 1403 // Load whatever we need in r0 to make the comparison fail.
1404 if (cond == lt || cond == le) { 1404 if (cond == lt || cond == le) {
1405 __ mov(r0, Operand(GREATER)); 1405 __ mov(r0, Operand(GREATER));
1406 } else { 1406 } else {
1407 __ mov(r0, Operand(LESS)); 1407 __ mov(r0, Operand(LESS));
1408 } 1408 }
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
1915 __ JumpIfSmi(tos_, &patch); 1915 __ JumpIfSmi(tos_, &patch);
1916 } 1916 }
1917 1917
1918 if (types_.NeedsMap()) { 1918 if (types_.NeedsMap()) {
1919 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1919 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1920 1920
1921 if (types_.CanBeUndetectable()) { 1921 if (types_.CanBeUndetectable()) {
1922 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 1922 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1923 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 1923 __ tst(ip, Operand(1 << Map::kIsUndetectable));
1924 // Undetectable -> false. 1924 // Undetectable -> false.
1925 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne); 1925 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1926 __ Ret(ne); 1926 __ Ret(ne);
1927 } 1927 }
1928 } 1928 }
1929 1929
1930 if (types_.Contains(SPEC_OBJECT)) { 1930 if (types_.Contains(SPEC_OBJECT)) {
1931 // Spec object -> true. 1931 // Spec object -> true.
1932 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 1932 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1933 // tos_ contains the correct non-zero return value already. 1933 // tos_ contains the correct non-zero return value already.
1934 __ Ret(ge); 1934 __ Ret(ge);
1935 } 1935 }
(...skipping 12 matching lines...) Expand all
1948 __ b(ne, &not_heap_number); 1948 __ b(ne, &not_heap_number);
1949 1949
1950 if (CpuFeatures::IsSupported(VFP2)) { 1950 if (CpuFeatures::IsSupported(VFP2)) {
1951 CpuFeatures::Scope scope(VFP2); 1951 CpuFeatures::Scope scope(VFP2);
1952 1952
1953 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1953 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1954 __ VFPCompareAndSetFlags(d1, 0.0); 1954 __ VFPCompareAndSetFlags(d1, 0.0);
1955 // "tos_" is a register, and contains a non zero value by default. 1955 // "tos_" is a register, and contains a non zero value by default.
1956 // Hence we only need to overwrite "tos_" with zero to return false for 1956 // Hence we only need to overwrite "tos_" with zero to return false for
1957 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. 1957 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1958 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, eq); // for FP_ZERO 1958 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
1959 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, vs); // for FP_NAN 1959 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
1960 } else { 1960 } else {
1961 Label done, not_nan, not_zero; 1961 Label done, not_nan, not_zero;
1962 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1962 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1963 // -0 maps to false: 1963 // -0 maps to false:
1964 __ bic( 1964 __ bic(
1965 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); 1965 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
1966 __ b(ne, &not_zero); 1966 __ b(ne, &not_zero);
1967 // If exponent word is zero then the answer depends on the mantissa word. 1967 // If exponent word is zero then the answer depends on the mantissa word.
1968 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1968 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1969 __ jmp(&done); 1969 __ jmp(&done);
1970 1970
1971 // Check for NaN. 1971 // Check for NaN.
1972 __ bind(&not_zero); 1972 __ bind(&not_zero);
1973 // We already zeroed the sign bit, now shift out the mantissa so we only 1973 // We already zeroed the sign bit, now shift out the mantissa so we only
1974 // have the exponent left. 1974 // have the exponent left.
1975 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); 1975 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
1976 unsigned int shifted_exponent_mask = 1976 unsigned int shifted_exponent_mask =
1977 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; 1977 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
1978 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); 1978 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
1979 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN. 1979 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
1980 1980
1981 // Reload exponent word. 1981 // Reload exponent word.
1982 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1982 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1983 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); 1983 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
1984 // If mantissa is not zero then we have a NaN, so return 0. 1984 // If mantissa is not zero then we have a NaN, so return 0.
1985 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne); 1985 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1986 __ b(ne, &done); 1986 __ b(ne, &done);
1987 1987
1988 // Load mantissa word. 1988 // Load mantissa word.
1989 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1989 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1990 __ cmp(temp, Operand(0, RelocInfo::NONE32)); 1990 __ cmp(temp, Operand::Zero());
1991 // If mantissa is not zero then we have a NaN, so return 0. 1991 // If mantissa is not zero then we have a NaN, so return 0.
1992 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne); 1992 __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1993 __ b(ne, &done); 1993 __ b(ne, &done);
1994 1994
1995 __ bind(&not_nan); 1995 __ bind(&not_nan);
1996 __ mov(tos_, Operand(1, RelocInfo::NONE32)); 1996 __ mov(tos_, Operand(1, RelocInfo::NONE32));
1997 __ bind(&done); 1997 __ bind(&done);
1998 } 1998 }
1999 __ Ret(); 1999 __ Ret();
2000 __ bind(&not_heap_number); 2000 __ bind(&not_heap_number);
2001 } 2001 }
2002 2002
2003 __ bind(&patch); 2003 __ bind(&patch);
2004 GenerateTypeTransition(masm); 2004 GenerateTypeTransition(masm);
2005 } 2005 }
2006 2006
2007 2007
2008 void ToBooleanStub::CheckOddball(MacroAssembler* masm, 2008 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
2009 Type type, 2009 Type type,
2010 Heap::RootListIndex value, 2010 Heap::RootListIndex value,
2011 bool result) { 2011 bool result) {
2012 if (types_.Contains(type)) { 2012 if (types_.Contains(type)) {
2013 // If we see an expected oddball, return its ToBoolean value tos_. 2013 // If we see an expected oddball, return its ToBoolean value tos_.
2014 __ LoadRoot(ip, value); 2014 __ LoadRoot(ip, value);
2015 __ cmp(tos_, ip); 2015 __ cmp(tos_, ip);
2016 // The value of a root is never NULL, so we can avoid loading a non-null 2016 // The value of a root is never NULL, so we can avoid loading a non-null
2017 // value into tos_ when we want to return 'true'. 2017 // value into tos_ when we want to return 'true'.
2018 if (!result) { 2018 if (!result) {
2019 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, eq); 2019 __ mov(tos_, Operand::Zero(), LeaveCC, eq);
2020 } 2020 }
2021 __ Ret(eq); 2021 __ Ret(eq);
2022 } 2022 }
2023 } 2023 }
2024 2024
2025 2025
2026 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { 2026 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
2027 if (!tos_.is(r3)) { 2027 if (!tos_.is(r3)) {
2028 __ mov(r3, Operand(tos_)); 2028 __ mov(r3, Operand(tos_));
2029 } 2029 }
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
2154 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, 2154 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2155 Label* non_smi, 2155 Label* non_smi,
2156 Label* slow) { 2156 Label* slow) {
2157 __ JumpIfNotSmi(r0, non_smi); 2157 __ JumpIfNotSmi(r0, non_smi);
2158 2158
2159 // The result of negating zero or the smallest negative smi is not a smi. 2159 // The result of negating zero or the smallest negative smi is not a smi.
2160 __ bic(ip, r0, Operand(0x80000000), SetCC); 2160 __ bic(ip, r0, Operand(0x80000000), SetCC);
2161 __ b(eq, slow); 2161 __ b(eq, slow);
2162 2162
2163 // Return '0 - value'. 2163 // Return '0 - value'.
2164 __ rsb(r0, r0, Operand(0, RelocInfo::NONE32)); 2164 __ rsb(r0, r0, Operand::Zero());
2165 __ Ret(); 2165 __ Ret();
2166 } 2166 }
2167 2167
2168 2168
2169 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, 2169 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2170 Label* non_smi) { 2170 Label* non_smi) {
2171 __ JumpIfNotSmi(r0, non_smi); 2171 __ JumpIfNotSmi(r0, non_smi);
2172 2172
2173 // Flip bits and revert inverted smi-tag. 2173 // Flip bits and revert inverted smi-tag.
2174 __ mvn(r0, Operand(r0)); 2174 __ mvn(r0, Operand(r0));
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
2422 // Do multiplication 2422 // Do multiplication
2423 // scratch1 = lower 32 bits of ip * left. 2423 // scratch1 = lower 32 bits of ip * left.
2424 // scratch2 = higher 32 bits of ip * left. 2424 // scratch2 = higher 32 bits of ip * left.
2425 __ smull(scratch1, scratch2, left, ip); 2425 __ smull(scratch1, scratch2, left, ip);
2426 // Check for overflowing the smi range - no overflow if higher 33 bits of 2426 // Check for overflowing the smi range - no overflow if higher 33 bits of
2427 // the result are identical. 2427 // the result are identical.
2428 __ mov(ip, Operand(scratch1, ASR, 31)); 2428 __ mov(ip, Operand(scratch1, ASR, 31));
2429 __ cmp(ip, Operand(scratch2)); 2429 __ cmp(ip, Operand(scratch2));
2430 __ b(ne, &not_smi_result); 2430 __ b(ne, &not_smi_result);
2431 // Go slow on zero result to handle -0. 2431 // Go slow on zero result to handle -0.
2432 __ cmp(scratch1, Operand(0)); 2432 __ cmp(scratch1, Operand::Zero());
2433 __ mov(right, Operand(scratch1), LeaveCC, ne); 2433 __ mov(right, Operand(scratch1), LeaveCC, ne);
2434 __ Ret(ne); 2434 __ Ret(ne);
2435 // We need -0 if we were multiplying a negative number with 0 to get 0. 2435 // We need -0 if we were multiplying a negative number with 0 to get 0.
2436 // We know one of them was zero. 2436 // We know one of them was zero.
2437 __ add(scratch2, right, Operand(left), SetCC); 2437 __ add(scratch2, right, Operand(left), SetCC);
2438 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); 2438 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2439 __ Ret(pl); // Return smi 0 if the non-zero one was positive. 2439 __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2440 // We fall through here if we multiplied a negative number with 0, because 2440 // We fall through here if we multiplied a negative number with 0, because
2441 // that would mean we should produce -0. 2441 // that would mean we should produce -0.
2442 break; 2442 break;
2443 case Token::DIV: { 2443 case Token::DIV: {
2444 Label div_with_sdiv; 2444 Label div_with_sdiv;
2445 2445
2446 // Check for 0 divisor. 2446 // Check for 0 divisor.
2447 __ cmp(right, Operand(0)); 2447 __ cmp(right, Operand::Zero());
2448 __ b(eq, &not_smi_result); 2448 __ b(eq, &not_smi_result);
2449 2449
2450 // Check for power of two on the right hand side. 2450 // Check for power of two on the right hand side.
2451 __ sub(scratch1, right, Operand(1)); 2451 __ sub(scratch1, right, Operand(1));
2452 __ tst(scratch1, right); 2452 __ tst(scratch1, right);
2453 if (CpuFeatures::IsSupported(SUDIV)) { 2453 if (CpuFeatures::IsSupported(SUDIV)) {
2454 __ b(ne, &div_with_sdiv); 2454 __ b(ne, &div_with_sdiv);
2455 // Check for no remainder. 2455 // Check for no remainder.
2456 __ tst(left, scratch1); 2456 __ tst(left, scratch1);
2457 __ b(ne, &not_smi_result); 2457 __ b(ne, &not_smi_result);
2458 // Check for positive left hand side. 2458 // Check for positive left hand side.
2459 __ cmp(left, Operand(0)); 2459 __ cmp(left, Operand::Zero());
2460 __ b(mi, &div_with_sdiv); 2460 __ b(mi, &div_with_sdiv);
2461 } else { 2461 } else {
2462 __ b(ne, &not_smi_result); 2462 __ b(ne, &not_smi_result);
2463 // Check for positive and no remainder. 2463 // Check for positive and no remainder.
2464 __ orr(scratch2, scratch1, Operand(0x80000000u)); 2464 __ orr(scratch2, scratch1, Operand(0x80000000u));
2465 __ tst(left, scratch2); 2465 __ tst(left, scratch2);
2466 __ b(ne, &not_smi_result); 2466 __ b(ne, &not_smi_result);
2467 } 2467 }
2468 2468
2469 // Perform division by shifting. 2469 // Perform division by shifting.
2470 __ CountLeadingZeros(scratch1, scratch1, scratch2); 2470 __ CountLeadingZeros(scratch1, scratch1, scratch2);
2471 __ rsb(scratch1, scratch1, Operand(31)); 2471 __ rsb(scratch1, scratch1, Operand(31));
2472 __ mov(right, Operand(left, LSR, scratch1)); 2472 __ mov(right, Operand(left, LSR, scratch1));
2473 __ Ret(); 2473 __ Ret();
2474 2474
2475 if (CpuFeatures::IsSupported(SUDIV)) { 2475 if (CpuFeatures::IsSupported(SUDIV)) {
2476 Label result_not_zero; 2476 Label result_not_zero;
2477 2477
2478 __ bind(&div_with_sdiv); 2478 __ bind(&div_with_sdiv);
2479 // Do division. 2479 // Do division.
2480 __ sdiv(scratch1, left, right); 2480 __ sdiv(scratch1, left, right);
2481 // Check that the remainder is zero. 2481 // Check that the remainder is zero.
2482 __ mls(scratch2, scratch1, right, left); 2482 __ mls(scratch2, scratch1, right, left);
2483 __ cmp(scratch2, Operand(0)); 2483 __ cmp(scratch2, Operand::Zero());
2484 __ b(ne, &not_smi_result); 2484 __ b(ne, &not_smi_result);
2485 // Check for negative zero result. 2485 // Check for negative zero result.
2486 __ cmp(scratch1, Operand(0)); 2486 __ cmp(scratch1, Operand::Zero());
2487 __ b(ne, &result_not_zero); 2487 __ b(ne, &result_not_zero);
2488 __ cmp(right, Operand(0)); 2488 __ cmp(right, Operand::Zero());
2489 __ b(lt, &not_smi_result); 2489 __ b(lt, &not_smi_result);
2490 __ bind(&result_not_zero); 2490 __ bind(&result_not_zero);
2491 // Check for the corner case of dividing the most negative smi by -1. 2491 // Check for the corner case of dividing the most negative smi by -1.
2492 __ cmp(scratch1, Operand(0x40000000)); 2492 __ cmp(scratch1, Operand(0x40000000));
2493 __ b(eq, &not_smi_result); 2493 __ b(eq, &not_smi_result);
2494 // Tag and return the result. 2494 // Tag and return the result.
2495 __ SmiTag(right, scratch1); 2495 __ SmiTag(right, scratch1);
2496 __ Ret(); 2496 __ Ret();
2497 } 2497 }
2498 break; 2498 break;
2499 } 2499 }
2500 case Token::MOD: { 2500 case Token::MOD: {
2501 Label modulo_with_sdiv; 2501 Label modulo_with_sdiv;
2502 2502
2503 if (CpuFeatures::IsSupported(SUDIV)) { 2503 if (CpuFeatures::IsSupported(SUDIV)) {
2504 // Check for x % 0. 2504 // Check for x % 0.
2505 __ cmp(right, Operand(0)); 2505 __ cmp(right, Operand::Zero());
2506 __ b(eq, &not_smi_result); 2506 __ b(eq, &not_smi_result);
2507 2507
2508 // Check for two positive smis. 2508 // Check for two positive smis.
2509 __ orr(scratch1, left, Operand(right)); 2509 __ orr(scratch1, left, Operand(right));
2510 __ tst(scratch1, Operand(0x80000000u)); 2510 __ tst(scratch1, Operand(0x80000000u));
2511 __ b(ne, &modulo_with_sdiv); 2511 __ b(ne, &modulo_with_sdiv);
2512 2512
2513 // Check for power of two on the right hand side. 2513 // Check for power of two on the right hand side.
2514 __ sub(scratch1, right, Operand(1)); 2514 __ sub(scratch1, right, Operand(1));
2515 __ tst(scratch1, right); 2515 __ tst(scratch1, right);
(...skipping 12 matching lines...) Expand all
2528 __ and_(right, left, Operand(scratch1)); 2528 __ and_(right, left, Operand(scratch1));
2529 __ Ret(); 2529 __ Ret();
2530 2530
2531 if (CpuFeatures::IsSupported(SUDIV)) { 2531 if (CpuFeatures::IsSupported(SUDIV)) {
2532 __ bind(&modulo_with_sdiv); 2532 __ bind(&modulo_with_sdiv);
2533 __ mov(scratch2, right); 2533 __ mov(scratch2, right);
2534 // Perform modulus with sdiv and mls. 2534 // Perform modulus with sdiv and mls.
2535 __ sdiv(scratch1, left, right); 2535 __ sdiv(scratch1, left, right);
2536 __ mls(right, scratch1, right, left); 2536 __ mls(right, scratch1, right, left);
2537 // Return if the result is not 0. 2537 // Return if the result is not 0.
2538 __ cmp(right, Operand(0)); 2538 __ cmp(right, Operand::Zero());
2539 __ Ret(ne); 2539 __ Ret(ne);
2540 // The result is 0, check for -0 case. 2540 // The result is 0, check for -0 case.
2541 __ cmp(left, Operand(0)); 2541 __ cmp(left, Operand::Zero());
2542 __ Ret(pl); 2542 __ Ret(pl);
2543 // This is a -0 case, restore the value of right. 2543 // This is a -0 case, restore the value of right.
2544 __ mov(right, scratch2); 2544 __ mov(right, scratch2);
2545 // We fall through here to not_smi_result to produce -0. 2545 // We fall through here to not_smi_result to produce -0.
2546 } 2546 }
2547 break; 2547 break;
2548 } 2548 }
2549 case Token::BIT_OR: 2549 case Token::BIT_OR:
2550 __ orr(right, left, Operand(right)); 2550 __ orr(right, left, Operand(right));
2551 __ Ret(); 2551 __ Ret();
(...skipping 919 matching lines...) Expand 10 before | Expand all | Expand 10 after
3471 Isolate* isolate = masm->isolate(); 3471 Isolate* isolate = masm->isolate();
3472 ExternalReference cache_array = 3472 ExternalReference cache_array =
3473 ExternalReference::transcendental_cache_array_address(isolate); 3473 ExternalReference::transcendental_cache_array_address(isolate);
3474 __ mov(cache_entry, Operand(cache_array)); 3474 __ mov(cache_entry, Operand(cache_array));
3475 // cache_entry points to cache array. 3475 // cache_entry points to cache array.
3476 int cache_array_index 3476 int cache_array_index
3477 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); 3477 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3478 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); 3478 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3479 // r0 points to the cache for the type type_. 3479 // r0 points to the cache for the type type_.
3480 // If NULL, the cache hasn't been initialized yet, so go through runtime. 3480 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3481 __ cmp(cache_entry, Operand(0, RelocInfo::NONE32)); 3481 __ cmp(cache_entry, Operand::Zero());
3482 __ b(eq, &invalid_cache); 3482 __ b(eq, &invalid_cache);
3483 3483
3484 #ifdef DEBUG 3484 #ifdef DEBUG
3485 // Check that the layout of cache elements match expectations. 3485 // Check that the layout of cache elements match expectations.
3486 { TranscendentalCache::SubCache::Element test_elem[2]; 3486 { TranscendentalCache::SubCache::Element test_elem[2];
3487 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 3487 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3488 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 3488 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3489 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 3489 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3490 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 3490 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3491 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 3491 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
3782 if (exponent_type_ == INTEGER) { 3782 if (exponent_type_ == INTEGER) {
3783 __ mov(scratch, exponent); 3783 __ mov(scratch, exponent);
3784 } else { 3784 } else {
3785 // Exponent has previously been stored into scratch as untagged integer. 3785 // Exponent has previously been stored into scratch as untagged integer.
3786 __ mov(exponent, scratch); 3786 __ mov(exponent, scratch);
3787 } 3787 }
3788 __ vmov(double_scratch, double_base); // Back up base. 3788 __ vmov(double_scratch, double_base); // Back up base.
3789 __ vmov(double_result, 1.0, scratch2); 3789 __ vmov(double_result, 1.0, scratch2);
3790 3790
3791 // Get absolute value of exponent. 3791 // Get absolute value of exponent.
3792 __ cmp(scratch, Operand(0)); 3792 __ cmp(scratch, Operand::Zero());
3793 __ mov(scratch2, Operand(0), LeaveCC, mi); 3793 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
3794 __ sub(scratch, scratch2, scratch, LeaveCC, mi); 3794 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3795 3795
3796 Label while_true; 3796 Label while_true;
3797 __ bind(&while_true); 3797 __ bind(&while_true);
3798 __ mov(scratch, Operand(scratch, ASR, 1), SetCC); 3798 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3799 __ vmul(double_result, double_result, double_scratch, cs); 3799 __ vmul(double_result, double_result, double_scratch, cs);
3800 __ vmul(double_scratch, double_scratch, double_scratch, ne); 3800 __ vmul(double_scratch, double_scratch, double_scratch, ne);
3801 __ b(ne, &while_true); 3801 __ b(ne, &while_true);
3802 3802
3803 __ cmp(exponent, Operand(0)); 3803 __ cmp(exponent, Operand::Zero());
3804 __ b(ge, &done); 3804 __ b(ge, &done);
3805 __ vmov(double_scratch, 1.0, scratch); 3805 __ vmov(double_scratch, 1.0, scratch);
3806 __ vdiv(double_result, double_scratch, double_result); 3806 __ vdiv(double_result, double_scratch, double_result);
3807 // Test whether result is zero. Bail out to check for subnormal result. 3807 // Test whether result is zero. Bail out to check for subnormal result.
3808 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 3808 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3809 __ VFPCompareAndSetFlags(double_result, 0.0); 3809 __ VFPCompareAndSetFlags(double_result, 0.0);
3810 __ b(ne, &done); 3810 __ b(ne, &done);
3811 // double_exponent may not containe the exponent value if the input was a 3811 // double_exponent may not containe the exponent value if the input was a
3812 // smi. We set it with exponent value before bailing out. 3812 // smi. We set it with exponent value before bailing out.
3813 __ vmov(single_scratch, exponent); 3813 __ vmov(single_scratch, exponent);
(...skipping 952 matching lines...) Expand 10 before | Expand all | Expand 10 after
4766 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4766 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4767 __ str(r1, MemOperand(sp, 0)); 4767 __ str(r1, MemOperand(sp, 0));
4768 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); 4768 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4769 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 4769 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4770 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 4770 __ str(r3, MemOperand(sp, 1 * kPointerSize));
4771 4771
4772 // Try the new space allocation. Start out with computing the size 4772 // Try the new space allocation. Start out with computing the size
4773 // of the arguments object and the elements array in words. 4773 // of the arguments object and the elements array in words.
4774 Label add_arguments_object; 4774 Label add_arguments_object;
4775 __ bind(&try_allocate); 4775 __ bind(&try_allocate);
4776 __ cmp(r1, Operand(0, RelocInfo::NONE32)); 4776 __ cmp(r1, Operand::Zero());
4777 __ b(eq, &add_arguments_object); 4777 __ b(eq, &add_arguments_object);
4778 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4778 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4779 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); 4779 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4780 __ bind(&add_arguments_object); 4780 __ bind(&add_arguments_object);
4781 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); 4781 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4782 4782
4783 // Do the allocation of both objects in one go. 4783 // Do the allocation of both objects in one go.
4784 __ AllocateInNewSpace(r1, 4784 __ AllocateInNewSpace(r1,
4785 r0, 4785 r0,
4786 r2, 4786 r2,
(...skipping 12 matching lines...) Expand all
4799 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); 4799 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4800 4800
4801 // Get the length (smi tagged) and set that as an in-object property too. 4801 // Get the length (smi tagged) and set that as an in-object property too.
4802 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4802 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4803 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 4803 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4804 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + 4804 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4805 Heap::kArgumentsLengthIndex * kPointerSize)); 4805 Heap::kArgumentsLengthIndex * kPointerSize));
4806 4806
4807 // If there are no actual arguments, we're done. 4807 // If there are no actual arguments, we're done.
4808 Label done; 4808 Label done;
4809 __ cmp(r1, Operand(0, RelocInfo::NONE32)); 4809 __ cmp(r1, Operand::Zero());
4810 __ b(eq, &done); 4810 __ b(eq, &done);
4811 4811
4812 // Get the parameters pointer from the stack. 4812 // Get the parameters pointer from the stack.
4813 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); 4813 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4814 4814
4815 // Set up the elements pointer in the allocated arguments object and 4815 // Set up the elements pointer in the allocated arguments object and
4816 // initialize the header in the elements fixed array. 4816 // initialize the header in the elements fixed array.
4817 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); 4817 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4818 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); 4818 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4819 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); 4819 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4820 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); 4820 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4821 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); 4821 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4822 // Untag the length for the loop. 4822 // Untag the length for the loop.
4823 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4823 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4824 4824
4825 // Copy the fixed array slots. 4825 // Copy the fixed array slots.
4826 Label loop; 4826 Label loop;
4827 // Set up r4 to point to the first array slot. 4827 // Set up r4 to point to the first array slot.
4828 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 4828 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4829 __ bind(&loop); 4829 __ bind(&loop);
4830 // Pre-decrement r2 with kPointerSize on each iteration. 4830 // Pre-decrement r2 with kPointerSize on each iteration.
4831 // Pre-decrement in order to skip receiver. 4831 // Pre-decrement in order to skip receiver.
4832 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); 4832 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4833 // Post-increment r4 with kPointerSize on each iteration. 4833 // Post-increment r4 with kPointerSize on each iteration.
4834 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); 4834 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4835 __ sub(r1, r1, Operand(1)); 4835 __ sub(r1, r1, Operand(1));
4836 __ cmp(r1, Operand(0, RelocInfo::NONE32)); 4836 __ cmp(r1, Operand::Zero());
4837 __ b(ne, &loop); 4837 __ b(ne, &loop);
4838 4838
4839 // Return and remove the on-stack parameters. 4839 // Return and remove the on-stack parameters.
4840 __ bind(&done); 4840 __ bind(&done);
4841 __ add(sp, sp, Operand(3 * kPointerSize)); 4841 __ add(sp, sp, Operand(3 * kPointerSize));
4842 __ Ret(); 4842 __ Ret();
4843 4843
4844 // Do the runtime call to allocate the arguments object. 4844 // Do the runtime call to allocate the arguments object.
4845 __ bind(&runtime); 4845 __ bind(&runtime);
4846 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 4846 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4878 Register last_match_info_elements = r6; 4878 Register last_match_info_elements = r6;
4879 4879
4880 // Ensure that a RegExp stack is allocated. 4880 // Ensure that a RegExp stack is allocated.
4881 Isolate* isolate = masm->isolate(); 4881 Isolate* isolate = masm->isolate();
4882 ExternalReference address_of_regexp_stack_memory_address = 4882 ExternalReference address_of_regexp_stack_memory_address =
4883 ExternalReference::address_of_regexp_stack_memory_address(isolate); 4883 ExternalReference::address_of_regexp_stack_memory_address(isolate);
4884 ExternalReference address_of_regexp_stack_memory_size = 4884 ExternalReference address_of_regexp_stack_memory_size =
4885 ExternalReference::address_of_regexp_stack_memory_size(isolate); 4885 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4886 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 4886 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4887 __ ldr(r0, MemOperand(r0, 0)); 4887 __ ldr(r0, MemOperand(r0, 0));
4888 __ cmp(r0, Operand(0)); 4888 __ cmp(r0, Operand::Zero());
4889 __ b(eq, &runtime); 4889 __ b(eq, &runtime);
4890 4890
4891 // Check that the first argument is a JSRegExp object. 4891 // Check that the first argument is a JSRegExp object.
4892 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); 4892 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4893 STATIC_ASSERT(kSmiTag == 0); 4893 STATIC_ASSERT(kSmiTag == 0);
4894 __ JumpIfSmi(r0, &runtime); 4894 __ JumpIfSmi(r0, &runtime);
4895 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); 4895 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4896 __ b(ne, &runtime); 4896 __ b(ne, &runtime);
4897 4897
4898 // Check that the RegExp has been compiled (data contains a fixed array). 4898 // Check that the RegExp has been compiled (data contains a fixed array).
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
4960 __ b(ne, &runtime); 4960 __ b(ne, &runtime);
4961 // Check that the last match info has space for the capture registers and the 4961 // Check that the last match info has space for the capture registers and the
4962 // additional information. 4962 // additional information.
4963 __ ldr(r0, 4963 __ ldr(r0,
4964 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); 4964 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4965 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); 4965 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4966 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); 4966 __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4967 __ b(gt, &runtime); 4967 __ b(gt, &runtime);
4968 4968
4969 // Reset offset for possibly sliced string. 4969 // Reset offset for possibly sliced string.
4970 __ mov(r9, Operand(0)); 4970 __ mov(r9, Operand::Zero());
4971 // subject: Subject string 4971 // subject: Subject string
4972 // regexp_data: RegExp data (FixedArray) 4972 // regexp_data: RegExp data (FixedArray)
4973 // Check the representation and encoding of the subject string. 4973 // Check the representation and encoding of the subject string.
4974 Label seq_string; 4974 Label seq_string;
4975 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 4975 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4976 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 4976 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4977 // First check for flat string. None of the following string type tests will 4977 // First check for flat string. None of the following string type tests will
4978 // succeed if subject is not a string or a short external string. 4978 // succeed if subject is not a string or a short external string.
4979 __ and_(r1, 4979 __ and_(r1,
4980 r0, 4980 r0,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
5081 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. 5081 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
5082 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); 5082 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
5083 __ ldr(r0, MemOperand(r0, 0)); 5083 __ ldr(r0, MemOperand(r0, 0));
5084 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); 5084 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
5085 __ ldr(r2, MemOperand(r2, 0)); 5085 __ ldr(r2, MemOperand(r2, 0));
5086 __ add(r0, r0, Operand(r2)); 5086 __ add(r0, r0, Operand(r2));
5087 __ str(r0, MemOperand(sp, 3 * kPointerSize)); 5087 __ str(r0, MemOperand(sp, 3 * kPointerSize));
5088 5088
5089 // Argument 6: Set the number of capture registers to zero to force global 5089 // Argument 6: Set the number of capture registers to zero to force global
5090 // regexps to behave as non-global. This does not affect non-global regexps. 5090 // regexps to behave as non-global. This does not affect non-global regexps.
5091 __ mov(r0, Operand(0)); 5091 __ mov(r0, Operand::Zero());
5092 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 5092 __ str(r0, MemOperand(sp, 2 * kPointerSize));
5093 5093
5094 // Argument 5 (sp[4]): static offsets vector buffer. 5094 // Argument 5 (sp[4]): static offsets vector buffer.
5095 __ mov(r0, 5095 __ mov(r0,
5096 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); 5096 Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
5097 __ str(r0, MemOperand(sp, 1 * kPointerSize)); 5097 __ str(r0, MemOperand(sp, 1 * kPointerSize));
5098 5098
5099 // For arguments 4 and 3 get string length, calculate start of string data and 5099 // For arguments 4 and 3 get string length, calculate start of string data and
5100 // calculate the shift of the index (0 for ASCII and 1 for two byte). 5100 // calculate the shift of the index (0 for ASCII and 1 for two byte).
5101 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); 5101 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
5336 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); 5336 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5337 // Fill contents of fixed-array with undefined. 5337 // Fill contents of fixed-array with undefined.
5338 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 5338 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
5339 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 5339 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5340 // Fill fixed array elements with undefined. 5340 // Fill fixed array elements with undefined.
5341 // r0: JSArray, tagged. 5341 // r0: JSArray, tagged.
5342 // r2: undefined. 5342 // r2: undefined.
5343 // r3: Start of elements in FixedArray. 5343 // r3: Start of elements in FixedArray.
5344 // r5: Number of elements to fill. 5344 // r5: Number of elements to fill.
5345 Label loop; 5345 Label loop;
5346 __ cmp(r5, Operand(0)); 5346 __ cmp(r5, Operand::Zero());
5347 __ bind(&loop); 5347 __ bind(&loop);
5348 __ b(le, &done); // Jump if r5 is negative or zero. 5348 __ b(le, &done); // Jump if r5 is negative or zero.
5349 __ sub(r5, r5, Operand(1), SetCC); 5349 __ sub(r5, r5, Operand(1), SetCC);
5350 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); 5350 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5351 __ jmp(&loop); 5351 __ jmp(&loop);
5352 5352
5353 __ bind(&done); 5353 __ bind(&done);
5354 __ add(sp, sp, Operand(3 * kPointerSize)); 5354 __ add(sp, sp, Operand(3 * kPointerSize));
5355 __ Ret(); 5355 __ Ret();
5356 5356
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
5463 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 5463 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5464 masm->isolate()->heap()->undefined_value()); 5464 masm->isolate()->heap()->undefined_value());
5465 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5465 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5466 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); 5466 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
5467 } 5467 }
5468 // Check for function proxy. 5468 // Check for function proxy.
5469 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5469 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5470 __ b(ne, &non_function); 5470 __ b(ne, &non_function);
5471 __ push(r1); // put proxy as additional argument 5471 __ push(r1); // put proxy as additional argument
5472 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); 5472 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
5473 __ mov(r2, Operand(0, RelocInfo::NONE32)); 5473 __ mov(r2, Operand::Zero());
5474 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); 5474 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5475 __ SetCallKind(r5, CALL_AS_METHOD); 5475 __ SetCallKind(r5, CALL_AS_METHOD);
5476 { 5476 {
5477 Handle<Code> adaptor = 5477 Handle<Code> adaptor =
5478 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 5478 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5479 __ Jump(adaptor, RelocInfo::CODE_TARGET); 5479 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5480 } 5480 }
5481 5481
5482 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 5482 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5483 // of the original receiver from the call site). 5483 // of the original receiver from the call site).
5484 __ bind(&non_function); 5484 __ bind(&non_function);
5485 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); 5485 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5486 __ mov(r0, Operand(argc_)); // Set up the number of arguments. 5486 __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5487 __ mov(r2, Operand(0, RelocInfo::NONE32)); 5487 __ mov(r2, Operand::Zero());
5488 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); 5488 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5489 __ SetCallKind(r5, CALL_AS_METHOD); 5489 __ SetCallKind(r5, CALL_AS_METHOD);
5490 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5490 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5491 RelocInfo::CODE_TARGET); 5491 RelocInfo::CODE_TARGET);
5492 } 5492 }
5493 5493
5494 5494
5495 void CallConstructStub::Generate(MacroAssembler* masm) { 5495 void CallConstructStub::Generate(MacroAssembler* masm) {
5496 // r0 : number of arguments 5496 // r0 : number of arguments
5497 // r1 : the function to call 5497 // r1 : the function to call
(...skipping 22 matching lines...) Expand all
5520 __ bind(&slow); 5520 __ bind(&slow);
5521 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5521 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5522 __ b(ne, &non_function_call); 5522 __ b(ne, &non_function_call);
5523 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 5523 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5524 __ jmp(&do_call); 5524 __ jmp(&do_call);
5525 5525
5526 __ bind(&non_function_call); 5526 __ bind(&non_function_call);
5527 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 5527 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5528 __ bind(&do_call); 5528 __ bind(&do_call);
5529 // Set expected number of arguments to zero (not changing r0). 5529 // Set expected number of arguments to zero (not changing r0).
5530 __ mov(r2, Operand(0, RelocInfo::NONE32)); 5530 __ mov(r2, Operand::Zero());
5531 __ SetCallKind(r5, CALL_AS_METHOD); 5531 __ SetCallKind(r5, CALL_AS_METHOD);
5532 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5532 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5533 RelocInfo::CODE_TARGET); 5533 RelocInfo::CODE_TARGET);
5534 } 5534 }
5535 5535
5536 5536
5537 // StringCharCodeAtGenerator 5537 // StringCharCodeAtGenerator
5538 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 5538 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5539 Label flat_string; 5539 Label flat_string;
5540 Label ascii_string; 5540 Label ascii_string;
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
5689 Register count, 5689 Register count,
5690 Register scratch, 5690 Register scratch,
5691 bool ascii) { 5691 bool ascii) {
5692 Label loop; 5692 Label loop;
5693 Label done; 5693 Label done;
5694 // This loop just copies one character at a time, as it is only used for very 5694 // This loop just copies one character at a time, as it is only used for very
5695 // short strings. 5695 // short strings.
5696 if (!ascii) { 5696 if (!ascii) {
5697 __ add(count, count, Operand(count), SetCC); 5697 __ add(count, count, Operand(count), SetCC);
5698 } else { 5698 } else {
5699 __ cmp(count, Operand(0, RelocInfo::NONE32)); 5699 __ cmp(count, Operand::Zero());
5700 } 5700 }
5701 __ b(eq, &done); 5701 __ b(eq, &done);
5702 5702
5703 __ bind(&loop); 5703 __ bind(&loop);
5704 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); 5704 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5705 // Perform sub between load and dependent store to get the load time to 5705 // Perform sub between load and dependent store to get the load time to
5706 // complete. 5706 // complete.
5707 __ sub(count, count, Operand(1), SetCC); 5707 __ sub(count, count, Operand(1), SetCC);
5708 __ strb(scratch, MemOperand(dest, 1, PostIndex)); 5708 __ strb(scratch, MemOperand(dest, 1, PostIndex));
5709 // last iteration. 5709 // last iteration.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5744 // Ensure that reading an entire aligned word containing the last character 5744 // Ensure that reading an entire aligned word containing the last character
5745 // of a string will not read outside the allocated area (because we pad up 5745 // of a string will not read outside the allocated area (because we pad up
5746 // to kObjectAlignment). 5746 // to kObjectAlignment).
5747 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); 5747 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5748 // Assumes word reads and writes are little endian. 5748 // Assumes word reads and writes are little endian.
5749 // Nothing to do for zero characters. 5749 // Nothing to do for zero characters.
5750 Label done; 5750 Label done;
5751 if (!ascii) { 5751 if (!ascii) {
5752 __ add(count, count, Operand(count), SetCC); 5752 __ add(count, count, Operand(count), SetCC);
5753 } else { 5753 } else {
5754 __ cmp(count, Operand(0, RelocInfo::NONE32)); 5754 __ cmp(count, Operand::Zero());
5755 } 5755 }
5756 __ b(eq, &done); 5756 __ b(eq, &done);
5757 5757
5758 // Assume that you cannot read (or write) unaligned. 5758 // Assume that you cannot read (or write) unaligned.
5759 Label byte_loop; 5759 Label byte_loop;
5760 // Must copy at least eight bytes, otherwise just do it one byte at a time. 5760 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5761 __ cmp(count, Operand(8)); 5761 __ cmp(count, Operand(8));
5762 __ add(count, dest, Operand(count)); 5762 __ add(count, dest, Operand(count));
5763 Register limit = count; // Read until src equals this. 5763 Register limit = count; // Read until src equals this.
5764 __ b(lt, &byte_loop); 5764 __ b(lt, &byte_loop);
(...skipping 502 matching lines...) Expand 10 before | Expand all | Expand 10 after
6267 __ cmp(length, scratch2); 6267 __ cmp(length, scratch2);
6268 __ b(eq, &check_zero_length); 6268 __ b(eq, &check_zero_length);
6269 __ bind(&strings_not_equal); 6269 __ bind(&strings_not_equal);
6270 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); 6270 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6271 __ Ret(); 6271 __ Ret();
6272 6272
6273 // Check if the length is zero. 6273 // Check if the length is zero.
6274 Label compare_chars; 6274 Label compare_chars;
6275 __ bind(&check_zero_length); 6275 __ bind(&check_zero_length);
6276 STATIC_ASSERT(kSmiTag == 0); 6276 STATIC_ASSERT(kSmiTag == 0);
6277 __ cmp(length, Operand(0)); 6277 __ cmp(length, Operand::Zero());
6278 __ b(ne, &compare_chars); 6278 __ b(ne, &compare_chars);
6279 __ mov(r0, Operand(Smi::FromInt(EQUAL))); 6279 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6280 __ Ret(); 6280 __ Ret();
6281 6281
6282 // Compare characters. 6282 // Compare characters.
6283 __ bind(&compare_chars); 6283 __ bind(&compare_chars);
6284 GenerateAsciiCharsCompareLoop(masm, 6284 GenerateAsciiCharsCompareLoop(masm,
6285 left, right, length, scratch2, scratch3, 6285 left, right, length, scratch2, scratch3,
6286 &strings_not_equal); 6286 &strings_not_equal);
6287 6287
(...skipping 12 matching lines...) Expand all
6300 Register scratch4) { 6300 Register scratch4) {
6301 Label result_not_equal, compare_lengths; 6301 Label result_not_equal, compare_lengths;
6302 // Find minimum length and length difference. 6302 // Find minimum length and length difference.
6303 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); 6303 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6304 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); 6304 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6305 __ sub(scratch3, scratch1, Operand(scratch2), SetCC); 6305 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6306 Register length_delta = scratch3; 6306 Register length_delta = scratch3;
6307 __ mov(scratch1, scratch2, LeaveCC, gt); 6307 __ mov(scratch1, scratch2, LeaveCC, gt);
6308 Register min_length = scratch1; 6308 Register min_length = scratch1;
6309 STATIC_ASSERT(kSmiTag == 0); 6309 STATIC_ASSERT(kSmiTag == 0);
6310 __ cmp(min_length, Operand(0)); 6310 __ cmp(min_length, Operand::Zero());
6311 __ b(eq, &compare_lengths); 6311 __ b(eq, &compare_lengths);
6312 6312
6313 // Compare loop. 6313 // Compare loop.
6314 GenerateAsciiCharsCompareLoop(masm, 6314 GenerateAsciiCharsCompareLoop(masm,
6315 left, right, min_length, scratch2, scratch4, 6315 left, right, min_length, scratch2, scratch4,
6316 &result_not_equal); 6316 &result_not_equal);
6317 6317
6318 // Compare lengths - strings up to min-length are equal. 6318 // Compare lengths - strings up to min-length are equal.
6319 __ bind(&compare_lengths); 6319 __ bind(&compare_lengths);
6320 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); 6320 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
(...skipping 796 matching lines...) Expand 10 before | Expand all | Expand 10 after
7117 7117
7118 const int spill_mask = 7118 const int spill_mask =
7119 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | 7119 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
7120 r2.bit() | r1.bit() | r0.bit()); 7120 r2.bit() | r1.bit() | r0.bit());
7121 7121
7122 __ stm(db_w, sp, spill_mask); 7122 __ stm(db_w, sp, spill_mask);
7123 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 7123 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7124 __ mov(r1, Operand(Handle<String>(name))); 7124 __ mov(r1, Operand(Handle<String>(name)));
7125 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); 7125 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7126 __ CallStub(&stub); 7126 __ CallStub(&stub);
7127 __ cmp(r0, Operand(0)); 7127 __ cmp(r0, Operand::Zero());
7128 __ ldm(ia_w, sp, spill_mask); 7128 __ ldm(ia_w, sp, spill_mask);
7129 7129
7130 __ b(eq, done); 7130 __ b(eq, done);
7131 __ b(ne, miss); 7131 __ b(ne, miss);
7132 } 7132 }
7133 7133
7134 7134
7135 // Probe the string dictionary in the |elements| register. Jump to the 7135 // Probe the string dictionary in the |elements| register. Jump to the
7136 // |done| label if a property with the given name is found. Jump to 7136 // |done| label if a property with the given name is found. Jump to
7137 // the |miss| label otherwise. 7137 // the |miss| label otherwise.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
7193 if (name.is(r0)) { 7193 if (name.is(r0)) {
7194 ASSERT(!elements.is(r1)); 7194 ASSERT(!elements.is(r1));
7195 __ Move(r1, name); 7195 __ Move(r1, name);
7196 __ Move(r0, elements); 7196 __ Move(r0, elements);
7197 } else { 7197 } else {
7198 __ Move(r0, elements); 7198 __ Move(r0, elements);
7199 __ Move(r1, name); 7199 __ Move(r1, name);
7200 } 7200 }
7201 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); 7201 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7202 __ CallStub(&stub); 7202 __ CallStub(&stub);
7203 __ cmp(r0, Operand(0)); 7203 __ cmp(r0, Operand::Zero());
7204 __ mov(scratch2, Operand(r2)); 7204 __ mov(scratch2, Operand(r2));
7205 __ ldm(ia_w, sp, spill_mask); 7205 __ ldm(ia_w, sp, spill_mask);
7206 7206
7207 __ b(ne, done); 7207 __ b(ne, done);
7208 __ b(eq, miss); 7208 __ b(eq, miss);
7209 } 7209 }
7210 7210
7211 7211
7212 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 7212 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7213 // This stub overrides SometimesSetsUpAFrame() to return false. That means 7213 // This stub overrides SometimesSetsUpAFrame() to return false. That means
(...skipping 496 matching lines...) Expand 10 before | Expand all | Expand 10 after
7710 7710
7711 __ Pop(lr, r5, r1); 7711 __ Pop(lr, r5, r1);
7712 __ Ret(); 7712 __ Ret();
7713 } 7713 }
7714 7714
7715 #undef __ 7715 #undef __
7716 7716
7717 } } // namespace v8::internal 7717 } } // namespace v8::internal
7718 7718
7719 #endif // V8_TARGET_ARCH_ARM 7719 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/debug-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698