Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1013)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 11744020: Rename RelocInfo::NONE to RelocInfo::NONE32. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/debug-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 553
554 Label not_special; 554 Label not_special;
555 // Convert from Smi to integer. 555 // Convert from Smi to integer.
556 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); 556 __ mov(source_, Operand(source_, ASR, kSmiTagSize));
557 // Move sign bit from source to destination. This works because the sign bit 557 // Move sign bit from source to destination. This works because the sign bit
558 // in the exponent word of the double has the same position and polarity as 558 // in the exponent word of the double has the same position and polarity as
559 // the 2's complement sign bit in a Smi. 559 // the 2's complement sign bit in a Smi.
560 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 560 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
561 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); 561 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
562 // Subtract from 0 if source was negative. 562 // Subtract from 0 if source was negative.
563 __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 563 __ rsb(source_, source_, Operand(0, RelocInfo::NONE32), LeaveCC, ne);
564 564
565 // We have -1, 0 or 1, which we treat specially. Register source_ contains 565 // We have -1, 0 or 1, which we treat specially. Register source_ contains
566 // absolute value: it is either equal to 1 (special case of -1 and 1), 566 // absolute value: it is either equal to 1 (special case of -1 and 1),
567 // greater than 1 (not a special case) or less than 1 (special case of 0). 567 // greater than 1 (not a special case) or less than 1 (special case of 0).
568 __ cmp(source_, Operand(1)); 568 __ cmp(source_, Operand(1));
569 __ b(gt, &not_special); 569 __ b(gt, &not_special);
570 570
571 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 571 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
572 const uint32_t exponent_word_for_1 = 572 const uint32_t exponent_word_for_1 =
573 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 573 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
574 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); 574 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
575 // 1, 0 and -1 all have 0 for the second word. 575 // 1, 0 and -1 all have 0 for the second word.
576 __ mov(mantissa, Operand(0, RelocInfo::NONE)); 576 __ mov(mantissa, Operand(0, RelocInfo::NONE32));
577 __ Ret(); 577 __ Ret();
578 578
579 __ bind(&not_special); 579 __ bind(&not_special);
580 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. 580 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
581 // Gets the wrong answer for 0, but we already checked for that case above. 581 // Gets the wrong answer for 0, but we already checked for that case above.
582 __ CountLeadingZeros(zeros_, source_, mantissa); 582 __ CountLeadingZeros(zeros_, source_, mantissa);
583 // Compute exponent and or it into the exponent register. 583 // Compute exponent and or it into the exponent register.
584 // We use mantissa as a scratch register here. Use a fudge factor to 584 // We use mantissa as a scratch register here. Use a fudge factor to
585 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts 585 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
586 // that fit in the ARM's constant field. 586 // that fit in the ARM's constant field.
(...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after
1134 __ cmp(the_int_, Operand(0x80000000u)); 1134 __ cmp(the_int_, Operand(0x80000000u));
1135 __ b(eq, &max_negative_int); 1135 __ b(eq, &max_negative_int);
1136 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 1136 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1137 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 1137 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1138 uint32_t non_smi_exponent = 1138 uint32_t non_smi_exponent =
1139 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 1139 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1140 __ mov(scratch_, Operand(non_smi_exponent)); 1140 __ mov(scratch_, Operand(non_smi_exponent));
1141 // Set the sign bit in scratch_ if the value was negative. 1141 // Set the sign bit in scratch_ if the value was negative.
1142 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); 1142 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1143 // Subtract from 0 if the value was negative. 1143 // Subtract from 0 if the value was negative.
1144 __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); 1144 __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE32), LeaveCC, cs);
1145 // We should be masking the implict first digit of the mantissa away here, 1145 // We should be masking the implict first digit of the mantissa away here,
1146 // but it just ends up combining harmlessly with the last digit of the 1146 // but it just ends up combining harmlessly with the last digit of the
1147 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get 1147 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1148 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. 1148 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1149 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); 1149 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1150 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 1150 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1151 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); 1151 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1152 __ str(scratch_, FieldMemOperand(the_heap_number_, 1152 __ str(scratch_, FieldMemOperand(the_heap_number_,
1153 HeapNumber::kExponentOffset)); 1153 HeapNumber::kExponentOffset));
1154 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); 1154 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1155 __ str(scratch_, FieldMemOperand(the_heap_number_, 1155 __ str(scratch_, FieldMemOperand(the_heap_number_,
1156 HeapNumber::kMantissaOffset)); 1156 HeapNumber::kMantissaOffset));
1157 __ Ret(); 1157 __ Ret();
1158 1158
1159 __ bind(&max_negative_int); 1159 __ bind(&max_negative_int);
1160 // The max negative int32 is stored as a positive number in the mantissa of 1160 // The max negative int32 is stored as a positive number in the mantissa of
1161 // a double because it uses a sign bit instead of using two's complement. 1161 // a double because it uses a sign bit instead of using two's complement.
1162 // The actual mantissa bits stored are all 0 because the implicit most 1162 // The actual mantissa bits stored are all 0 because the implicit most
1163 // significant 1 bit is not stored. 1163 // significant 1 bit is not stored.
1164 non_smi_exponent += 1 << HeapNumber::kExponentShift; 1164 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1165 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); 1165 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1166 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); 1166 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1167 __ mov(ip, Operand(0, RelocInfo::NONE)); 1167 __ mov(ip, Operand(0, RelocInfo::NONE32));
1168 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 1168 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1169 __ Ret(); 1169 __ Ret();
1170 } 1170 }
1171 1171
1172 1172
1173 // Handle the case where the lhs and rhs are the same object. 1173 // Handle the case where the lhs and rhs are the same object.
1174 // Equality is almost reflexive (everything but NaN), so this is a test 1174 // Equality is almost reflexive (everything but NaN), so this is a test
1175 // for "identity and not NaN". 1175 // for "identity and not NaN".
1176 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 1176 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1177 Label* slow, 1177 Label* slow,
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
1373 lhs_exponent, 1373 lhs_exponent,
1374 HeapNumber::kExponentShift, 1374 HeapNumber::kExponentShift,
1375 HeapNumber::kExponentBits); 1375 HeapNumber::kExponentBits);
1376 // NaNs have all-one exponents so they sign extend to -1. 1376 // NaNs have all-one exponents so they sign extend to -1.
1377 __ cmp(r4, Operand(-1)); 1377 __ cmp(r4, Operand(-1));
1378 __ b(ne, lhs_not_nan); 1378 __ b(ne, lhs_not_nan);
1379 __ mov(r4, 1379 __ mov(r4,
1380 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1380 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1381 SetCC); 1381 SetCC);
1382 __ b(ne, &one_is_nan); 1382 __ b(ne, &one_is_nan);
1383 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); 1383 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE32));
1384 __ b(ne, &one_is_nan); 1384 __ b(ne, &one_is_nan);
1385 1385
1386 __ bind(lhs_not_nan); 1386 __ bind(lhs_not_nan);
1387 __ Sbfx(r4, 1387 __ Sbfx(r4,
1388 rhs_exponent, 1388 rhs_exponent,
1389 HeapNumber::kExponentShift, 1389 HeapNumber::kExponentShift,
1390 HeapNumber::kExponentBits); 1390 HeapNumber::kExponentBits);
1391 // NaNs have all-one exponents so they sign extend to -1. 1391 // NaNs have all-one exponents so they sign extend to -1.
1392 __ cmp(r4, Operand(-1)); 1392 __ cmp(r4, Operand(-1));
1393 __ b(ne, &neither_is_nan); 1393 __ b(ne, &neither_is_nan);
1394 __ mov(r4, 1394 __ mov(r4,
1395 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 1395 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1396 SetCC); 1396 SetCC);
1397 __ b(ne, &one_is_nan); 1397 __ b(ne, &one_is_nan);
1398 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); 1398 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE32));
1399 __ b(eq, &neither_is_nan); 1399 __ b(eq, &neither_is_nan);
1400 1400
1401 __ bind(&one_is_nan); 1401 __ bind(&one_is_nan);
1402 // NaN comparisons always fail. 1402 // NaN comparisons always fail.
1403 // Load whatever we need in r0 to make the comparison fail. 1403 // Load whatever we need in r0 to make the comparison fail.
1404 if (cond == lt || cond == le) { 1404 if (cond == lt || cond == le) {
1405 __ mov(r0, Operand(GREATER)); 1405 __ mov(r0, Operand(GREATER));
1406 } else { 1406 } else {
1407 __ mov(r0, Operand(LESS)); 1407 __ mov(r0, Operand(LESS));
1408 } 1408 }
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
1915 __ JumpIfSmi(tos_, &patch); 1915 __ JumpIfSmi(tos_, &patch);
1916 } 1916 }
1917 1917
1918 if (types_.NeedsMap()) { 1918 if (types_.NeedsMap()) {
1919 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1919 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1920 1920
1921 if (types_.CanBeUndetectable()) { 1921 if (types_.CanBeUndetectable()) {
1922 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 1922 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1923 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 1923 __ tst(ip, Operand(1 << Map::kIsUndetectable));
1924 // Undetectable -> false. 1924 // Undetectable -> false.
1925 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1925 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne);
1926 __ Ret(ne); 1926 __ Ret(ne);
1927 } 1927 }
1928 } 1928 }
1929 1929
1930 if (types_.Contains(SPEC_OBJECT)) { 1930 if (types_.Contains(SPEC_OBJECT)) {
1931 // Spec object -> true. 1931 // Spec object -> true.
1932 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 1932 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1933 // tos_ contains the correct non-zero return value already. 1933 // tos_ contains the correct non-zero return value already.
1934 __ Ret(ge); 1934 __ Ret(ge);
1935 } 1935 }
(...skipping 12 matching lines...) Expand all
1948 __ b(ne, &not_heap_number); 1948 __ b(ne, &not_heap_number);
1949 1949
1950 if (CpuFeatures::IsSupported(VFP2)) { 1950 if (CpuFeatures::IsSupported(VFP2)) {
1951 CpuFeatures::Scope scope(VFP2); 1951 CpuFeatures::Scope scope(VFP2);
1952 1952
1953 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1953 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1954 __ VFPCompareAndSetFlags(d1, 0.0); 1954 __ VFPCompareAndSetFlags(d1, 0.0);
1955 // "tos_" is a register, and contains a non zero value by default. 1955 // "tos_" is a register, and contains a non zero value by default.
1956 // Hence we only need to overwrite "tos_" with zero to return false for 1956 // Hence we only need to overwrite "tos_" with zero to return false for
1957 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. 1957 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1958 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO 1958 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, eq); // for FP_ZERO
1959 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN 1959 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, vs); // for FP_NAN
1960 } else { 1960 } else {
1961 Label done, not_nan, not_zero; 1961 Label done, not_nan, not_zero;
1962 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1962 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1963 // -0 maps to false: 1963 // -0 maps to false:
1964 __ bic( 1964 __ bic(
1965 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC); 1965 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
1966 __ b(ne, &not_zero); 1966 __ b(ne, &not_zero);
1967 // If exponent word is zero then the answer depends on the mantissa word. 1967 // If exponent word is zero then the answer depends on the mantissa word.
1968 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1968 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1969 __ jmp(&done); 1969 __ jmp(&done);
1970 1970
1971 // Check for NaN. 1971 // Check for NaN.
1972 __ bind(&not_zero); 1972 __ bind(&not_zero);
1973 // We already zeroed the sign bit, now shift out the mantissa so we only 1973 // We already zeroed the sign bit, now shift out the mantissa so we only
1974 // have the exponent left. 1974 // have the exponent left.
1975 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); 1975 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
1976 unsigned int shifted_exponent_mask = 1976 unsigned int shifted_exponent_mask =
1977 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; 1977 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
1978 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE)); 1978 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
1979 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN. 1979 __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
1980 1980
1981 // Reload exponent word. 1981 // Reload exponent word.
1982 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); 1982 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1983 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE)); 1983 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
1984 // If mantissa is not zero then we have a NaN, so return 0. 1984 // If mantissa is not zero then we have a NaN, so return 0.
1985 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1985 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne);
1986 __ b(ne, &done); 1986 __ b(ne, &done);
1987 1987
1988 // Load mantissa word. 1988 // Load mantissa word.
1989 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); 1989 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
1990 __ cmp(temp, Operand(0, RelocInfo::NONE)); 1990 __ cmp(temp, Operand(0, RelocInfo::NONE32));
1991 // If mantissa is not zero then we have a NaN, so return 0. 1991 // If mantissa is not zero then we have a NaN, so return 0.
1992 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1992 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, ne);
1993 __ b(ne, &done); 1993 __ b(ne, &done);
1994 1994
1995 __ bind(&not_nan); 1995 __ bind(&not_nan);
1996 __ mov(tos_, Operand(1, RelocInfo::NONE)); 1996 __ mov(tos_, Operand(1, RelocInfo::NONE32));
1997 __ bind(&done); 1997 __ bind(&done);
1998 } 1998 }
1999 __ Ret(); 1999 __ Ret();
2000 __ bind(&not_heap_number); 2000 __ bind(&not_heap_number);
2001 } 2001 }
2002 2002
2003 __ bind(&patch); 2003 __ bind(&patch);
2004 GenerateTypeTransition(masm); 2004 GenerateTypeTransition(masm);
2005 } 2005 }
2006 2006
2007 2007
2008 void ToBooleanStub::CheckOddball(MacroAssembler* masm, 2008 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
2009 Type type, 2009 Type type,
2010 Heap::RootListIndex value, 2010 Heap::RootListIndex value,
2011 bool result) { 2011 bool result) {
2012 if (types_.Contains(type)) { 2012 if (types_.Contains(type)) {
2013 // If we see an expected oddball, return its ToBoolean value tos_. 2013 // If we see an expected oddball, return its ToBoolean value tos_.
2014 __ LoadRoot(ip, value); 2014 __ LoadRoot(ip, value);
2015 __ cmp(tos_, ip); 2015 __ cmp(tos_, ip);
2016 // The value of a root is never NULL, so we can avoid loading a non-null 2016 // The value of a root is never NULL, so we can avoid loading a non-null
2017 // value into tos_ when we want to return 'true'. 2017 // value into tos_ when we want to return 'true'.
2018 if (!result) { 2018 if (!result) {
2019 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); 2019 __ mov(tos_, Operand(0, RelocInfo::NONE32), LeaveCC, eq);
2020 } 2020 }
2021 __ Ret(eq); 2021 __ Ret(eq);
2022 } 2022 }
2023 } 2023 }
2024 2024
2025 2025
2026 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { 2026 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
2027 if (!tos_.is(r3)) { 2027 if (!tos_.is(r3)) {
2028 __ mov(r3, Operand(tos_)); 2028 __ mov(r3, Operand(tos_));
2029 } 2029 }
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
2154 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, 2154 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2155 Label* non_smi, 2155 Label* non_smi,
2156 Label* slow) { 2156 Label* slow) {
2157 __ JumpIfNotSmi(r0, non_smi); 2157 __ JumpIfNotSmi(r0, non_smi);
2158 2158
2159 // The result of negating zero or the smallest negative smi is not a smi. 2159 // The result of negating zero or the smallest negative smi is not a smi.
2160 __ bic(ip, r0, Operand(0x80000000), SetCC); 2160 __ bic(ip, r0, Operand(0x80000000), SetCC);
2161 __ b(eq, slow); 2161 __ b(eq, slow);
2162 2162
2163 // Return '0 - value'. 2163 // Return '0 - value'.
2164 __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); 2164 __ rsb(r0, r0, Operand(0, RelocInfo::NONE32));
2165 __ Ret(); 2165 __ Ret();
2166 } 2166 }
2167 2167
2168 2168
2169 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, 2169 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2170 Label* non_smi) { 2170 Label* non_smi) {
2171 __ JumpIfNotSmi(r0, non_smi); 2171 __ JumpIfNotSmi(r0, non_smi);
2172 2172
2173 // Flip bits and revert inverted smi-tag. 2173 // Flip bits and revert inverted smi-tag.
2174 __ mvn(r0, Operand(r0)); 2174 __ mvn(r0, Operand(r0));
(...skipping 1296 matching lines...) Expand 10 before | Expand all | Expand 10 after
3471 Isolate* isolate = masm->isolate(); 3471 Isolate* isolate = masm->isolate();
3472 ExternalReference cache_array = 3472 ExternalReference cache_array =
3473 ExternalReference::transcendental_cache_array_address(isolate); 3473 ExternalReference::transcendental_cache_array_address(isolate);
3474 __ mov(cache_entry, Operand(cache_array)); 3474 __ mov(cache_entry, Operand(cache_array));
3475 // cache_entry points to cache array. 3475 // cache_entry points to cache array.
3476 int cache_array_index 3476 int cache_array_index
3477 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); 3477 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3478 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); 3478 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3479 // r0 points to the cache for the type type_. 3479 // r0 points to the cache for the type type_.
3480 // If NULL, the cache hasn't been initialized yet, so go through runtime. 3480 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3481 __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); 3481 __ cmp(cache_entry, Operand(0, RelocInfo::NONE32));
3482 __ b(eq, &invalid_cache); 3482 __ b(eq, &invalid_cache);
3483 3483
3484 #ifdef DEBUG 3484 #ifdef DEBUG
3485 // Check that the layout of cache elements match expectations. 3485 // Check that the layout of cache elements match expectations.
3486 { TranscendentalCache::SubCache::Element test_elem[2]; 3486 { TranscendentalCache::SubCache::Element test_elem[2];
3487 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 3487 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3488 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 3488 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3489 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 3489 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3490 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 3490 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3491 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 3491 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
(...skipping 592 matching lines...) Expand 10 before | Expand all | Expand 10 after
4084 &throw_termination_exception, 4084 &throw_termination_exception,
4085 &throw_out_of_memory_exception, 4085 &throw_out_of_memory_exception,
4086 true, 4086 true,
4087 true); 4087 true);
4088 4088
4089 __ bind(&throw_out_of_memory_exception); 4089 __ bind(&throw_out_of_memory_exception);
4090 // Set external caught exception to false. 4090 // Set external caught exception to false.
4091 Isolate* isolate = masm->isolate(); 4091 Isolate* isolate = masm->isolate();
4092 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, 4092 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4093 isolate); 4093 isolate);
4094 __ mov(r0, Operand(false, RelocInfo::NONE)); 4094 __ mov(r0, Operand(false, RelocInfo::NONE32));
4095 __ mov(r2, Operand(external_caught)); 4095 __ mov(r2, Operand(external_caught));
4096 __ str(r0, MemOperand(r2)); 4096 __ str(r0, MemOperand(r2));
4097 4097
4098 // Set pending exception and r0 to out of memory exception. 4098 // Set pending exception and r0 to out of memory exception.
4099 Failure* out_of_memory = Failure::OutOfMemoryException(); 4099 Failure* out_of_memory = Failure::OutOfMemoryException();
4100 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); 4100 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4101 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 4101 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4102 isolate))); 4102 isolate)));
4103 __ str(r0, MemOperand(r2)); 4103 __ str(r0, MemOperand(r2));
4104 // Fall through to the next label. 4104 // Fall through to the next label.
(...skipping 661 matching lines...) Expand 10 before | Expand all | Expand 10 after
4766 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4766 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4767 __ str(r1, MemOperand(sp, 0)); 4767 __ str(r1, MemOperand(sp, 0));
4768 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); 4768 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4769 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 4769 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4770 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 4770 __ str(r3, MemOperand(sp, 1 * kPointerSize));
4771 4771
4772 // Try the new space allocation. Start out with computing the size 4772 // Try the new space allocation. Start out with computing the size
4773 // of the arguments object and the elements array in words. 4773 // of the arguments object and the elements array in words.
4774 Label add_arguments_object; 4774 Label add_arguments_object;
4775 __ bind(&try_allocate); 4775 __ bind(&try_allocate);
4776 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4776 __ cmp(r1, Operand(0, RelocInfo::NONE32));
4777 __ b(eq, &add_arguments_object); 4777 __ b(eq, &add_arguments_object);
4778 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4778 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4779 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); 4779 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4780 __ bind(&add_arguments_object); 4780 __ bind(&add_arguments_object);
4781 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); 4781 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4782 4782
4783 // Do the allocation of both objects in one go. 4783 // Do the allocation of both objects in one go.
4784 __ AllocateInNewSpace(r1, 4784 __ AllocateInNewSpace(r1,
4785 r0, 4785 r0,
4786 r2, 4786 r2,
(...skipping 12 matching lines...) Expand all
4799 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); 4799 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4800 4800
4801 // Get the length (smi tagged) and set that as an in-object property too. 4801 // Get the length (smi tagged) and set that as an in-object property too.
4802 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4802 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4803 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 4803 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4804 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + 4804 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4805 Heap::kArgumentsLengthIndex * kPointerSize)); 4805 Heap::kArgumentsLengthIndex * kPointerSize));
4806 4806
4807 // If there are no actual arguments, we're done. 4807 // If there are no actual arguments, we're done.
4808 Label done; 4808 Label done;
4809 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4809 __ cmp(r1, Operand(0, RelocInfo::NONE32));
4810 __ b(eq, &done); 4810 __ b(eq, &done);
4811 4811
4812 // Get the parameters pointer from the stack. 4812 // Get the parameters pointer from the stack.
4813 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); 4813 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4814 4814
4815 // Set up the elements pointer in the allocated arguments object and 4815 // Set up the elements pointer in the allocated arguments object and
4816 // initialize the header in the elements fixed array. 4816 // initialize the header in the elements fixed array.
4817 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); 4817 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4818 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); 4818 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4819 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); 4819 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4820 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); 4820 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4821 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); 4821 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4822 // Untag the length for the loop. 4822 // Untag the length for the loop.
4823 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); 4823 __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4824 4824
4825 // Copy the fixed array slots. 4825 // Copy the fixed array slots.
4826 Label loop; 4826 Label loop;
4827 // Set up r4 to point to the first array slot. 4827 // Set up r4 to point to the first array slot.
4828 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 4828 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4829 __ bind(&loop); 4829 __ bind(&loop);
4830 // Pre-decrement r2 with kPointerSize on each iteration. 4830 // Pre-decrement r2 with kPointerSize on each iteration.
4831 // Pre-decrement in order to skip receiver. 4831 // Pre-decrement in order to skip receiver.
4832 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); 4832 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4833 // Post-increment r4 with kPointerSize on each iteration. 4833 // Post-increment r4 with kPointerSize on each iteration.
4834 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); 4834 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4835 __ sub(r1, r1, Operand(1)); 4835 __ sub(r1, r1, Operand(1));
4836 __ cmp(r1, Operand(0, RelocInfo::NONE)); 4836 __ cmp(r1, Operand(0, RelocInfo::NONE32));
4837 __ b(ne, &loop); 4837 __ b(ne, &loop);
4838 4838
4839 // Return and remove the on-stack parameters. 4839 // Return and remove the on-stack parameters.
4840 __ bind(&done); 4840 __ bind(&done);
4841 __ add(sp, sp, Operand(3 * kPointerSize)); 4841 __ add(sp, sp, Operand(3 * kPointerSize));
4842 __ Ret(); 4842 __ Ret();
4843 4843
4844 // Do the runtime call to allocate the arguments object. 4844 // Do the runtime call to allocate the arguments object.
4845 __ bind(&runtime); 4845 __ bind(&runtime);
4846 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 4846 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
(...skipping 615 matching lines...) Expand 10 before | Expand all | Expand 10 after
5462 // object (undefined) so no write barrier is needed. 5462 // object (undefined) so no write barrier is needed.
5463 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 5463 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5464 masm->isolate()->heap()->undefined_value()); 5464 masm->isolate()->heap()->undefined_value());
5465 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5465 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5466 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); 5466 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
5467 } 5467 }
5468 // Check for function proxy. 5468 // Check for function proxy.
5469 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5469 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5470 __ b(ne, &non_function); 5470 __ b(ne, &non_function);
5471 __ push(r1); // put proxy as additional argument 5471 __ push(r1); // put proxy as additional argument
5472 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); 5472 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
5473 __ mov(r2, Operand(0, RelocInfo::NONE)); 5473 __ mov(r2, Operand(0, RelocInfo::NONE32));
5474 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); 5474 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5475 __ SetCallKind(r5, CALL_AS_METHOD); 5475 __ SetCallKind(r5, CALL_AS_METHOD);
5476 { 5476 {
5477 Handle<Code> adaptor = 5477 Handle<Code> adaptor =
5478 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 5478 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5479 __ Jump(adaptor, RelocInfo::CODE_TARGET); 5479 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5480 } 5480 }
5481 5481
5482 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 5482 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5483 // of the original receiver from the call site). 5483 // of the original receiver from the call site).
5484 __ bind(&non_function); 5484 __ bind(&non_function);
5485 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); 5485 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5486 __ mov(r0, Operand(argc_)); // Set up the number of arguments. 5486 __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5487 __ mov(r2, Operand(0, RelocInfo::NONE)); 5487 __ mov(r2, Operand(0, RelocInfo::NONE32));
5488 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); 5488 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5489 __ SetCallKind(r5, CALL_AS_METHOD); 5489 __ SetCallKind(r5, CALL_AS_METHOD);
5490 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5490 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5491 RelocInfo::CODE_TARGET); 5491 RelocInfo::CODE_TARGET);
5492 } 5492 }
5493 5493
5494 5494
5495 void CallConstructStub::Generate(MacroAssembler* masm) { 5495 void CallConstructStub::Generate(MacroAssembler* masm) {
5496 // r0 : number of arguments 5496 // r0 : number of arguments
5497 // r1 : the function to call 5497 // r1 : the function to call
(...skipping 22 matching lines...) Expand all
5520 __ bind(&slow); 5520 __ bind(&slow);
5521 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 5521 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5522 __ b(ne, &non_function_call); 5522 __ b(ne, &non_function_call);
5523 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 5523 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5524 __ jmp(&do_call); 5524 __ jmp(&do_call);
5525 5525
5526 __ bind(&non_function_call); 5526 __ bind(&non_function_call);
5527 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 5527 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5528 __ bind(&do_call); 5528 __ bind(&do_call);
5529 // Set expected number of arguments to zero (not changing r0). 5529 // Set expected number of arguments to zero (not changing r0).
5530 __ mov(r2, Operand(0, RelocInfo::NONE)); 5530 __ mov(r2, Operand(0, RelocInfo::NONE32));
5531 __ SetCallKind(r5, CALL_AS_METHOD); 5531 __ SetCallKind(r5, CALL_AS_METHOD);
5532 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5532 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5533 RelocInfo::CODE_TARGET); 5533 RelocInfo::CODE_TARGET);
5534 } 5534 }
5535 5535
5536 5536
5537 // StringCharCodeAtGenerator 5537 // StringCharCodeAtGenerator
5538 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 5538 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5539 Label flat_string; 5539 Label flat_string;
5540 Label ascii_string; 5540 Label ascii_string;
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
5689 Register count, 5689 Register count,
5690 Register scratch, 5690 Register scratch,
5691 bool ascii) { 5691 bool ascii) {
5692 Label loop; 5692 Label loop;
5693 Label done; 5693 Label done;
5694 // This loop just copies one character at a time, as it is only used for very 5694 // This loop just copies one character at a time, as it is only used for very
5695 // short strings. 5695 // short strings.
5696 if (!ascii) { 5696 if (!ascii) {
5697 __ add(count, count, Operand(count), SetCC); 5697 __ add(count, count, Operand(count), SetCC);
5698 } else { 5698 } else {
5699 __ cmp(count, Operand(0, RelocInfo::NONE)); 5699 __ cmp(count, Operand(0, RelocInfo::NONE32));
5700 } 5700 }
5701 __ b(eq, &done); 5701 __ b(eq, &done);
5702 5702
5703 __ bind(&loop); 5703 __ bind(&loop);
5704 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); 5704 __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5705 // Perform sub between load and dependent store to get the load time to 5705 // Perform sub between load and dependent store to get the load time to
5706 // complete. 5706 // complete.
5707 __ sub(count, count, Operand(1), SetCC); 5707 __ sub(count, count, Operand(1), SetCC);
5708 __ strb(scratch, MemOperand(dest, 1, PostIndex)); 5708 __ strb(scratch, MemOperand(dest, 1, PostIndex));
5709 // last iteration. 5709 // last iteration.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5744 // Ensure that reading an entire aligned word containing the last character 5744 // Ensure that reading an entire aligned word containing the last character
5745 // of a string will not read outside the allocated area (because we pad up 5745 // of a string will not read outside the allocated area (because we pad up
5746 // to kObjectAlignment). 5746 // to kObjectAlignment).
5747 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); 5747 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5748 // Assumes word reads and writes are little endian. 5748 // Assumes word reads and writes are little endian.
5749 // Nothing to do for zero characters. 5749 // Nothing to do for zero characters.
5750 Label done; 5750 Label done;
5751 if (!ascii) { 5751 if (!ascii) {
5752 __ add(count, count, Operand(count), SetCC); 5752 __ add(count, count, Operand(count), SetCC);
5753 } else { 5753 } else {
5754 __ cmp(count, Operand(0, RelocInfo::NONE)); 5754 __ cmp(count, Operand(0, RelocInfo::NONE32));
5755 } 5755 }
5756 __ b(eq, &done); 5756 __ b(eq, &done);
5757 5757
5758 // Assume that you cannot read (or write) unaligned. 5758 // Assume that you cannot read (or write) unaligned.
5759 Label byte_loop; 5759 Label byte_loop;
5760 // Must copy at least eight bytes, otherwise just do it one byte at a time. 5760 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5761 __ cmp(count, Operand(8)); 5761 __ cmp(count, Operand(8));
5762 __ add(count, dest, Operand(count)); 5762 __ add(count, dest, Operand(count));
5763 Register limit = count; // Read until src equals this. 5763 Register limit = count; // Read until src equals this.
5764 __ b(lt, &byte_loop); 5764 __ b(lt, &byte_loop);
(...skipping 1945 matching lines...) Expand 10 before | Expand all | Expand 10 after
7710 7710
7711 __ Pop(lr, r5, r1); 7711 __ Pop(lr, r5, r1);
7712 __ Ret(); 7712 __ Ret();
7713 } 7713 }
7714 7714
7715 #undef __ 7715 #undef __
7716 7716
7717 } } // namespace v8::internal 7717 } } // namespace v8::internal
7718 7718
7719 #endif // V8_TARGET_ARCH_ARM 7719 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/builtins-arm.cc ('k') | src/arm/debug-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698