| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
| 35 | 35 |
| 36 namespace v8 { | 36 namespace v8 { |
| 37 namespace internal { | 37 namespace internal { |
| 38 | 38 |
| 39 | 39 |
| 40 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
| 41 | 41 |
| 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 43 Label* slow, | 43 Label* slow, |
| 44 Condition cond, | 44 Condition cond); |
| 45 bool never_nan_nan); | |
| 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 45 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 47 Register lhs, | 46 Register lhs, |
| 48 Register rhs, | 47 Register rhs, |
| 49 Label* lhs_not_nan, | 48 Label* lhs_not_nan, |
| 50 Label* slow, | 49 Label* slow, |
| 51 bool strict); | 50 bool strict); |
| 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); | 51 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); |
| 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 52 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 54 Register lhs, | 53 Register lhs, |
| 55 Register rhs); | 54 Register rhs); |
| (...skipping 564 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 620 __ Call(stub1.GetCode()); | 619 __ Call(stub1.GetCode()); |
| 621 // Write Smi from r1 to r1 and r0 in double format. | 620 // Write Smi from r1 to r1 and r0 in double format. |
| 622 __ mov(scratch1, Operand(r1)); | 621 __ mov(scratch1, Operand(r1)); |
| 623 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | 622 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); |
| 624 __ Call(stub2.GetCode()); | 623 __ Call(stub2.GetCode()); |
| 625 __ pop(lr); | 624 __ pop(lr); |
| 626 } | 625 } |
| 627 } | 626 } |
| 628 | 627 |
| 629 | 628 |
| 630 void FloatingPointHelper::LoadOperands( | |
| 631 MacroAssembler* masm, | |
| 632 FloatingPointHelper::Destination destination, | |
| 633 Register heap_number_map, | |
| 634 Register scratch1, | |
| 635 Register scratch2, | |
| 636 Label* slow) { | |
| 637 | |
| 638 // Load right operand (r0) to d6 or r2/r3. | |
| 639 LoadNumber(masm, destination, | |
| 640 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); | |
| 641 | |
| 642 // Load left operand (r1) to d7 or r0/r1. | |
| 643 LoadNumber(masm, destination, | |
| 644 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); | |
| 645 } | |
| 646 | |
| 647 | |
| 648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 629 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| 649 Destination destination, | 630 Destination destination, |
| 650 Register object, | 631 Register object, |
| 651 DwVfpRegister dst, | 632 DwVfpRegister dst, |
| 652 Register dst1, | 633 Register dst1, |
| 653 Register dst2, | 634 Register dst2, |
| 654 Register heap_number_map, | 635 Register heap_number_map, |
| 655 Register scratch1, | 636 Register scratch1, |
| 656 Register scratch2, | 637 Register scratch2, |
| 657 Label* not_number) { | 638 Label* not_number) { |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 903 Register scratch3, | 884 Register scratch3, |
| 904 DwVfpRegister double_scratch0, | 885 DwVfpRegister double_scratch0, |
| 905 DwVfpRegister double_scratch1, | 886 DwVfpRegister double_scratch1, |
| 906 Label* not_int32) { | 887 Label* not_int32) { |
| 907 ASSERT(!dst.is(object)); | 888 ASSERT(!dst.is(object)); |
| 908 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 889 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
| 909 ASSERT(!scratch1.is(scratch2) && | 890 ASSERT(!scratch1.is(scratch2) && |
| 910 !scratch1.is(scratch3) && | 891 !scratch1.is(scratch3) && |
| 911 !scratch2.is(scratch3)); | 892 !scratch2.is(scratch3)); |
| 912 | 893 |
| 913 Label done; | 894 Label done, maybe_undefined; |
| 914 | 895 |
| 915 __ UntagAndJumpIfSmi(dst, object, &done); | 896 __ UntagAndJumpIfSmi(dst, object, &done); |
| 916 | 897 |
| 917 __ AssertRootValue(heap_number_map, | 898 __ AssertRootValue(heap_number_map, |
| 918 Heap::kHeapNumberMapRootIndex, | 899 Heap::kHeapNumberMapRootIndex, |
| 919 "HeapNumberMap register clobbered."); | 900 "HeapNumberMap register clobbered."); |
| 920 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 901 |
| 902 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
| 921 | 903 |
| 922 // Object is a heap number. | 904 // Object is a heap number. |
| 923 // Convert the floating point value to a 32-bit integer. | 905 // Convert the floating point value to a 32-bit integer. |
| 924 if (CpuFeatures::IsSupported(VFP2)) { | 906 if (CpuFeatures::IsSupported(VFP2)) { |
| 925 CpuFeatures::Scope scope(VFP2); | 907 CpuFeatures::Scope scope(VFP2); |
| 926 | 908 |
| 927 // Load the double value. | 909 // Load the double value. |
| 928 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 910 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 929 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | 911 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); |
| 930 | 912 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 957 // Shift back the higher bits of the mantissa. | 939 // Shift back the higher bits of the mantissa. |
| 958 __ mov(dst, Operand(dst, LSR, scratch3)); | 940 __ mov(dst, Operand(dst, LSR, scratch3)); |
| 959 // Set the implicit first bit. | 941 // Set the implicit first bit. |
| 960 __ rsb(scratch3, scratch3, Operand(32)); | 942 __ rsb(scratch3, scratch3, Operand(32)); |
| 961 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); | 943 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); |
| 962 // Set the sign. | 944 // Set the sign. |
| 963 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 945 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 964 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 946 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 965 __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); | 947 __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); |
| 966 } | 948 } |
| 949 __ b(&done); |
| 950 |
| 951 __ bind(&maybe_undefined); |
| 952 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); |
| 953 __ b(ne, not_int32); |
| 954 // |undefined| is truncated to 0. |
| 955 __ mov(dst, Operand(Smi::FromInt(0))); |
| 956 // Fall through. |
| 967 | 957 |
| 968 __ bind(&done); | 958 __ bind(&done); |
| 969 } | 959 } |
| 970 | 960 |
| 971 | 961 |
| 972 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | 962 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
| 973 Register src1, | 963 Register src1, |
| 974 Register src2, | 964 Register src2, |
| 975 Register dst, | 965 Register dst, |
| 976 Register scratch, | 966 Register scratch, |
| (...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1141 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 1131 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
| 1142 __ Ret(); | 1132 __ Ret(); |
| 1143 } | 1133 } |
| 1144 | 1134 |
| 1145 | 1135 |
| 1146 // Handle the case where the lhs and rhs are the same object. | 1136 // Handle the case where the lhs and rhs are the same object. |
| 1147 // Equality is almost reflexive (everything but NaN), so this is a test | 1137 // Equality is almost reflexive (everything but NaN), so this is a test |
| 1148 // for "identity and not NaN". | 1138 // for "identity and not NaN". |
| 1149 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 1139 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 1150 Label* slow, | 1140 Label* slow, |
| 1151 Condition cond, | 1141 Condition cond) { |
| 1152 bool never_nan_nan) { | |
| 1153 Label not_identical; | 1142 Label not_identical; |
| 1154 Label heap_number, return_equal; | 1143 Label heap_number, return_equal; |
| 1155 __ cmp(r0, r1); | 1144 __ cmp(r0, r1); |
| 1156 __ b(ne, ¬_identical); | 1145 __ b(ne, ¬_identical); |
| 1157 | 1146 |
| 1158 // The two objects are identical. If we know that one of them isn't NaN then | 1147 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), |
| 1159 // we now know they test equal. | 1148 // so we do the second best thing - test it ourselves. |
| 1160 if (cond != eq || !never_nan_nan) { | 1149 // They are both equal and they are not both Smis so both of them are not |
| 1161 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), | 1150 // Smis. If it's not a heap number, then return equal. |
| 1162 // so we do the second best thing - test it ourselves. | 1151 if (cond == lt || cond == gt) { |
| 1163 // They are both equal and they are not both Smis so both of them are not | 1152 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); |
| 1164 // Smis. If it's not a heap number, then return equal. | 1153 __ b(ge, slow); |
| 1165 if (cond == lt || cond == gt) { | 1154 } else { |
| 1166 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); | 1155 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
| 1156 __ b(eq, &heap_number); |
| 1157 // Comparing JS objects with <=, >= is complicated. |
| 1158 if (cond != eq) { |
| 1159 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1167 __ b(ge, slow); | 1160 __ b(ge, slow); |
| 1168 } else { | 1161 // Normally here we fall through to return_equal, but undefined is |
| 1169 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 1162 // special: (undefined == undefined) == true, but |
| 1170 __ b(eq, &heap_number); | 1163 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 1171 // Comparing JS objects with <=, >= is complicated. | 1164 if (cond == le || cond == ge) { |
| 1172 if (cond != eq) { | 1165 __ cmp(r4, Operand(ODDBALL_TYPE)); |
| 1173 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1166 __ b(ne, &return_equal); |
| 1174 __ b(ge, slow); | 1167 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| 1175 // Normally here we fall through to return_equal, but undefined is | 1168 __ cmp(r0, r2); |
| 1176 // special: (undefined == undefined) == true, but | 1169 __ b(ne, &return_equal); |
| 1177 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 1170 if (cond == le) { |
| 1178 if (cond == le || cond == ge) { | 1171 // undefined <= undefined should fail. |
| 1179 __ cmp(r4, Operand(ODDBALL_TYPE)); | 1172 __ mov(r0, Operand(GREATER)); |
| 1180 __ b(ne, &return_equal); | 1173 } else { |
| 1181 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 1174 // undefined >= undefined should fail. |
| 1182 __ cmp(r0, r2); | 1175 __ mov(r0, Operand(LESS)); |
| 1183 __ b(ne, &return_equal); | |
| 1184 if (cond == le) { | |
| 1185 // undefined <= undefined should fail. | |
| 1186 __ mov(r0, Operand(GREATER)); | |
| 1187 } else { | |
| 1188 // undefined >= undefined should fail. | |
| 1189 __ mov(r0, Operand(LESS)); | |
| 1190 } | |
| 1191 __ Ret(); | |
| 1192 } | 1176 } |
| 1177 __ Ret(); |
| 1193 } | 1178 } |
| 1194 } | 1179 } |
| 1195 } | 1180 } |
| 1196 | 1181 |
| 1197 __ bind(&return_equal); | 1182 __ bind(&return_equal); |
| 1198 if (cond == lt) { | 1183 if (cond == lt) { |
| 1199 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. | 1184 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. |
| 1200 } else if (cond == gt) { | 1185 } else if (cond == gt) { |
| 1201 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. | 1186 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. |
| 1202 } else { | 1187 } else { |
| 1203 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. | 1188 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. |
| 1204 } | 1189 } |
| 1205 __ Ret(); | 1190 __ Ret(); |
| 1206 | 1191 |
| 1207 if (cond != eq || !never_nan_nan) { | 1192 // For less and greater we don't have to check for NaN since the result of |
| 1208 // For less and greater we don't have to check for NaN since the result of | 1193 // x < x is false regardless. For the others here is some code to check |
| 1209 // x < x is false regardless. For the others here is some code to check | 1194 // for NaN. |
| 1210 // for NaN. | 1195 if (cond != lt && cond != gt) { |
| 1211 if (cond != lt && cond != gt) { | 1196 __ bind(&heap_number); |
| 1212 __ bind(&heap_number); | 1197 // It is a heap number, so return non-equal if it's NaN and equal if it's |
| 1213 // It is a heap number, so return non-equal if it's NaN and equal if it's | 1198 // not NaN. |
| 1214 // not NaN. | |
| 1215 | 1199 |
| 1216 // The representation of NaN values has all exponent bits (52..62) set, | 1200 // The representation of NaN values has all exponent bits (52..62) set, |
| 1217 // and not all mantissa bits (0..51) clear. | 1201 // and not all mantissa bits (0..51) clear. |
| 1218 // Read top bits of double representation (second word of value). | 1202 // Read top bits of double representation (second word of value). |
| 1219 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 1203 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 1220 // Test that exponent bits are all set. | 1204 // Test that exponent bits are all set. |
| 1221 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 1205 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 1222 // NaNs have all-one exponents so they sign extend to -1. | 1206 // NaNs have all-one exponents so they sign extend to -1. |
| 1223 __ cmp(r3, Operand(-1)); | 1207 __ cmp(r3, Operand(-1)); |
| 1224 __ b(ne, &return_equal); | 1208 __ b(ne, &return_equal); |
| 1225 | 1209 |
| 1226 // Shift out flag and all exponent bits, retaining only mantissa. | 1210 // Shift out flag and all exponent bits, retaining only mantissa. |
| 1227 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 1211 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
| 1228 // Or with all low-bits of mantissa. | 1212 // Or with all low-bits of mantissa. |
| 1229 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 1213 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 1230 __ orr(r0, r3, Operand(r2), SetCC); | 1214 __ orr(r0, r3, Operand(r2), SetCC); |
| 1231 // For equal we already have the right value in r0: Return zero (equal) | 1215 // For equal we already have the right value in r0: Return zero (equal) |
| 1232 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 1216 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 1233 // not (it's a NaN). For <= and >= we need to load r0 with the failing | 1217 // not (it's a NaN). For <= and >= we need to load r0 with the failing |
| 1234 // value if it's a NaN. | 1218 // value if it's a NaN. |
| 1235 if (cond != eq) { | 1219 if (cond != eq) { |
| 1236 // All-zero means Infinity means equal. | 1220 // All-zero means Infinity means equal. |
| 1237 __ Ret(eq); | 1221 __ Ret(eq); |
| 1238 if (cond == le) { | 1222 if (cond == le) { |
| 1239 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. | 1223 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. |
| 1240 } else { | 1224 } else { |
| 1241 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. | 1225 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. |
| 1242 } | |
| 1243 } | 1226 } |
| 1244 __ Ret(); | |
| 1245 } | 1227 } |
| 1246 // No fall through here. | 1228 __ Ret(); |
| 1247 } | 1229 } |
| 1230 // No fall through here. |
| 1248 | 1231 |
| 1249 __ bind(¬_identical); | 1232 __ bind(¬_identical); |
| 1250 } | 1233 } |
| 1251 | 1234 |
| 1252 | 1235 |
| 1253 // See comment at call site. | 1236 // See comment at call site. |
| 1254 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 1237 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 1255 Register lhs, | 1238 Register lhs, |
| 1256 Register rhs, | 1239 Register rhs, |
| 1257 Label* lhs_not_nan, | 1240 Label* lhs_not_nan, |
| (...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1671 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); | 1654 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); |
| 1672 __ add(sp, sp, Operand(1 * kPointerSize)); | 1655 __ add(sp, sp, Operand(1 * kPointerSize)); |
| 1673 __ Ret(); | 1656 __ Ret(); |
| 1674 | 1657 |
| 1675 __ bind(&runtime); | 1658 __ bind(&runtime); |
| 1676 // Handle number to string in the runtime system if not found in the cache. | 1659 // Handle number to string in the runtime system if not found in the cache. |
| 1677 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | 1660 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
| 1678 } | 1661 } |
| 1679 | 1662 |
| 1680 | 1663 |
| 1681 // On entry lhs_ and rhs_ are the values to be compared. | 1664 static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
| 1665 Register input, |
| 1666 Register scratch, |
| 1667 CompareIC::State expected, |
| 1668 Label* fail) { |
| 1669 Label ok; |
| 1670 if (expected == CompareIC::SMI) { |
| 1671 __ JumpIfNotSmi(input, fail); |
| 1672 } else if (expected == CompareIC::HEAP_NUMBER) { |
| 1673 __ JumpIfSmi(input, &ok); |
| 1674 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, |
| 1675 DONT_DO_SMI_CHECK); |
| 1676 } |
| 1677 // We could be strict about symbol/string here, but as long as |
| 1678 // hydrogen doesn't care, the stub doesn't have to care either. |
| 1679 __ bind(&ok); |
| 1680 } |
| 1681 |
| 1682 |
| 1683 // On entry r1 and r2 are the values to be compared. |
| 1682 // On exit r0 is 0, positive or negative to indicate the result of | 1684 // On exit r0 is 0, positive or negative to indicate the result of |
| 1683 // the comparison. | 1685 // the comparison. |
| 1684 void CompareStub::Generate(MacroAssembler* masm) { | 1686 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| 1685 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 1687 Register lhs = r1; |
| 1686 (lhs_.is(r1) && rhs_.is(r0))); | 1688 Register rhs = r0; |
| 1689 Condition cc = GetCondition(); |
| 1690 |
| 1691 Label miss; |
| 1692 ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); |
| 1693 ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); |
| 1687 | 1694 |
| 1688 Label slow; // Call builtin. | 1695 Label slow; // Call builtin. |
| 1689 Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 1696 Label not_smis, both_loaded_as_doubles, lhs_not_nan; |
| 1690 | 1697 |
| 1691 if (include_smi_compare_) { | 1698 Label not_two_smis, smi_done; |
| 1692 Label not_two_smis, smi_done; | 1699 __ orr(r2, r1, r0); |
| 1693 __ orr(r2, r1, r0); | 1700 __ JumpIfNotSmi(r2, ¬_two_smis); |
| 1694 __ JumpIfNotSmi(r2, ¬_two_smis); | 1701 __ mov(r1, Operand(r1, ASR, 1)); |
| 1695 __ mov(r1, Operand(r1, ASR, 1)); | 1702 __ sub(r0, r1, Operand(r0, ASR, 1)); |
| 1696 __ sub(r0, r1, Operand(r0, ASR, 1)); | 1703 __ Ret(); |
| 1697 __ Ret(); | 1704 __ bind(¬_two_smis); |
| 1698 __ bind(¬_two_smis); | |
| 1699 } else if (FLAG_debug_code) { | |
| 1700 __ orr(r2, r1, r0); | |
| 1701 __ tst(r2, Operand(kSmiTagMask)); | |
| 1702 __ Assert(ne, "CompareStub: unexpected smi operands."); | |
| 1703 } | |
| 1704 | 1705 |
| 1705 // NOTICE! This code is only reached after a smi-fast-case check, so | 1706 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 1706 // it is certain that at least one operand isn't a smi. | 1707 // it is certain that at least one operand isn't a smi. |
| 1707 | 1708 |
| 1708 // Handle the case where the objects are identical. Either returns the answer | 1709 // Handle the case where the objects are identical. Either returns the answer |
| 1709 // or goes to slow. Only falls through if the objects were not identical. | 1710 // or goes to slow. Only falls through if the objects were not identical. |
| 1710 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 1711 EmitIdenticalObjectComparison(masm, &slow, cc); |
| 1711 | 1712 |
| 1712 // If either is a Smi (we know that not both are), then they can only | 1713 // If either is a Smi (we know that not both are), then they can only |
| 1713 // be strictly equal if the other is a HeapNumber. | 1714 // be strictly equal if the other is a HeapNumber. |
| 1714 STATIC_ASSERT(kSmiTag == 0); | 1715 STATIC_ASSERT(kSmiTag == 0); |
| 1715 ASSERT_EQ(0, Smi::FromInt(0)); | 1716 ASSERT_EQ(0, Smi::FromInt(0)); |
| 1716 __ and_(r2, lhs_, Operand(rhs_)); | 1717 __ and_(r2, lhs, Operand(rhs)); |
| 1717 __ JumpIfNotSmi(r2, ¬_smis); | 1718 __ JumpIfNotSmi(r2, ¬_smis); |
| 1718 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 1719 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| 1719 // 1) Return the answer. | 1720 // 1) Return the answer. |
| 1720 // 2) Go to slow. | 1721 // 2) Go to slow. |
| 1721 // 3) Fall through to both_loaded_as_doubles. | 1722 // 3) Fall through to both_loaded_as_doubles. |
| 1722 // 4) Jump to lhs_not_nan. | 1723 // 4) Jump to lhs_not_nan. |
| 1723 // In cases 3 and 4 we have found out we were dealing with a number-number | 1724 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1724 // comparison. If VFP3 is supported the double values of the numbers have | 1725 // comparison. If VFP3 is supported the double values of the numbers have |
| 1725 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 1726 // been loaded into d7 and d6. Otherwise, the double values have been loaded |
| 1726 // into r0, r1, r2, and r3. | 1727 // into r0, r1, r2, and r3. |
| 1727 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | 1728 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); |
| 1728 | 1729 |
| 1729 __ bind(&both_loaded_as_doubles); | 1730 __ bind(&both_loaded_as_doubles); |
| 1730 // The arguments have been converted to doubles and stored in d6 and d7, if | 1731 // The arguments have been converted to doubles and stored in d6 and d7, if |
| 1731 // VFP3 is supported, or in r0, r1, r2, and r3. | 1732 // VFP3 is supported, or in r0, r1, r2, and r3. |
| 1732 Isolate* isolate = masm->isolate(); | 1733 Isolate* isolate = masm->isolate(); |
| 1733 if (CpuFeatures::IsSupported(VFP2)) { | 1734 if (CpuFeatures::IsSupported(VFP2)) { |
| 1734 __ bind(&lhs_not_nan); | 1735 __ bind(&lhs_not_nan); |
| 1735 CpuFeatures::Scope scope(VFP2); | 1736 CpuFeatures::Scope scope(VFP2); |
| 1736 Label no_nan; | 1737 Label no_nan; |
| 1737 // ARMv7 VFP3 instructions to implement double precision comparison. | 1738 // ARMv7 VFP3 instructions to implement double precision comparison. |
| 1738 __ VFPCompareAndSetFlags(d7, d6); | 1739 __ VFPCompareAndSetFlags(d7, d6); |
| 1739 Label nan; | 1740 Label nan; |
| 1740 __ b(vs, &nan); | 1741 __ b(vs, &nan); |
| 1741 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1742 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 1742 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1743 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 1743 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 1744 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 1744 __ Ret(); | 1745 __ Ret(); |
| 1745 | 1746 |
| 1746 __ bind(&nan); | 1747 __ bind(&nan); |
| 1747 // If one of the sides was a NaN then the v flag is set. Load r0 with | 1748 // If one of the sides was a NaN then the v flag is set. Load r0 with |
| 1748 // whatever it takes to make the comparison fail, since comparisons with NaN | 1749 // whatever it takes to make the comparison fail, since comparisons with NaN |
| 1749 // always fail. | 1750 // always fail. |
| 1750 if (cc_ == lt || cc_ == le) { | 1751 if (cc == lt || cc == le) { |
| 1751 __ mov(r0, Operand(GREATER)); | 1752 __ mov(r0, Operand(GREATER)); |
| 1752 } else { | 1753 } else { |
| 1753 __ mov(r0, Operand(LESS)); | 1754 __ mov(r0, Operand(LESS)); |
| 1754 } | 1755 } |
| 1755 __ Ret(); | 1756 __ Ret(); |
| 1756 } else { | 1757 } else { |
| 1757 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1758 // Checks for NaN in the doubles we have loaded. Can return the answer or |
| 1758 // fall through if neither is a NaN. Also binds lhs_not_nan. | 1759 // fall through if neither is a NaN. Also binds lhs_not_nan. |
| 1759 EmitNanCheck(masm, &lhs_not_nan, cc_); | 1760 EmitNanCheck(masm, &lhs_not_nan, cc); |
| 1760 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | 1761 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the |
| 1761 // answer. Never falls through. | 1762 // answer. Never falls through. |
| 1762 EmitTwoNonNanDoubleComparison(masm, cc_); | 1763 EmitTwoNonNanDoubleComparison(masm, cc); |
| 1763 } | 1764 } |
| 1764 | 1765 |
| 1765 __ bind(¬_smis); | 1766 __ bind(¬_smis); |
| 1766 // At this point we know we are dealing with two different objects, | 1767 // At this point we know we are dealing with two different objects, |
| 1767 // and neither of them is a Smi. The objects are in rhs_ and lhs_. | 1768 // and neither of them is a Smi. The objects are in rhs_ and lhs_. |
| 1768 if (strict_) { | 1769 if (strict()) { |
| 1769 // This returns non-equal for some object types, or falls through if it | 1770 // This returns non-equal for some object types, or falls through if it |
| 1770 // was not lucky. | 1771 // was not lucky. |
| 1771 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); | 1772 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
| 1772 } | 1773 } |
| 1773 | 1774 |
| 1774 Label check_for_symbols; | 1775 Label check_for_symbols; |
| 1775 Label flat_string_check; | 1776 Label flat_string_check; |
| 1776 // Check for heap-number-heap-number comparison. Can jump to slow case, | 1777 // Check for heap-number-heap-number comparison. Can jump to slow case, |
| 1777 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles | 1778 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles |
| 1778 // that case. If the inputs are not doubles then jumps to check_for_symbols. | 1779 // that case. If the inputs are not doubles then jumps to check_for_symbols. |
| 1779 // In this case r2 will contain the type of rhs_. Never falls through. | 1780 // In this case r2 will contain the type of rhs_. Never falls through. |
| 1780 EmitCheckForTwoHeapNumbers(masm, | 1781 EmitCheckForTwoHeapNumbers(masm, |
| 1781 lhs_, | 1782 lhs, |
| 1782 rhs_, | 1783 rhs, |
| 1783 &both_loaded_as_doubles, | 1784 &both_loaded_as_doubles, |
| 1784 &check_for_symbols, | 1785 &check_for_symbols, |
| 1785 &flat_string_check); | 1786 &flat_string_check); |
| 1786 | 1787 |
| 1787 __ bind(&check_for_symbols); | 1788 __ bind(&check_for_symbols); |
| 1788 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of | 1789 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of |
| 1789 // symbols. | 1790 // symbols. |
| 1790 if (cc_ == eq && !strict_) { | 1791 if (cc == eq && !strict()) { |
| 1791 // Returns an answer for two symbols or two detectable objects. | 1792 // Returns an answer for two symbols or two detectable objects. |
| 1792 // Otherwise jumps to string case or not both strings case. | 1793 // Otherwise jumps to string case or not both strings case. |
| 1793 // Assumes that r2 is the type of rhs_ on entry. | 1794 // Assumes that r2 is the type of rhs_ on entry. |
| 1794 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); | 1795 EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow); |
| 1795 } | 1796 } |
| 1796 | 1797 |
| 1797 // Check for both being sequential ASCII strings, and inline if that is the | 1798 // Check for both being sequential ASCII strings, and inline if that is the |
| 1798 // case. | 1799 // case. |
| 1799 __ bind(&flat_string_check); | 1800 __ bind(&flat_string_check); |
| 1800 | 1801 |
| 1801 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); | 1802 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); |
| 1802 | 1803 |
| 1803 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); | 1804 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); |
| 1804 if (cc_ == eq) { | 1805 if (cc == eq) { |
| 1805 StringCompareStub::GenerateFlatAsciiStringEquals(masm, | 1806 StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
| 1806 lhs_, | 1807 lhs, |
| 1807 rhs_, | 1808 rhs, |
| 1808 r2, | 1809 r2, |
| 1809 r3, | 1810 r3, |
| 1810 r4); | 1811 r4); |
| 1811 } else { | 1812 } else { |
| 1812 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 1813 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
| 1813 lhs_, | 1814 lhs, |
| 1814 rhs_, | 1815 rhs, |
| 1815 r2, | 1816 r2, |
| 1816 r3, | 1817 r3, |
| 1817 r4, | 1818 r4, |
| 1818 r5); | 1819 r5); |
| 1819 } | 1820 } |
| 1820 // Never falls through to here. | 1821 // Never falls through to here. |
| 1821 | 1822 |
| 1822 __ bind(&slow); | 1823 __ bind(&slow); |
| 1823 | 1824 |
| 1824 __ Push(lhs_, rhs_); | 1825 __ Push(lhs, rhs); |
| 1825 // Figure out which native to call and setup the arguments. | 1826 // Figure out which native to call and setup the arguments. |
| 1826 Builtins::JavaScript native; | 1827 Builtins::JavaScript native; |
| 1827 if (cc_ == eq) { | 1828 if (cc == eq) { |
| 1828 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 1829 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1829 } else { | 1830 } else { |
| 1830 native = Builtins::COMPARE; | 1831 native = Builtins::COMPARE; |
| 1831 int ncr; // NaN compare result | 1832 int ncr; // NaN compare result |
| 1832 if (cc_ == lt || cc_ == le) { | 1833 if (cc == lt || cc == le) { |
| 1833 ncr = GREATER; | 1834 ncr = GREATER; |
| 1834 } else { | 1835 } else { |
| 1835 ASSERT(cc_ == gt || cc_ == ge); // remaining cases | 1836 ASSERT(cc == gt || cc == ge); // remaining cases |
| 1836 ncr = LESS; | 1837 ncr = LESS; |
| 1837 } | 1838 } |
| 1838 __ mov(r0, Operand(Smi::FromInt(ncr))); | 1839 __ mov(r0, Operand(Smi::FromInt(ncr))); |
| 1839 __ push(r0); | 1840 __ push(r0); |
| 1840 } | 1841 } |
| 1841 | 1842 |
| 1842 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1843 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1843 // tagged as a small integer. | 1844 // tagged as a small integer. |
| 1844 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1845 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 1846 |
| 1847 __ bind(&miss); |
| 1848 GenerateMiss(masm); |
| 1845 } | 1849 } |
| 1846 | 1850 |
| 1847 | 1851 |
| 1848 // The stub expects its argument in the tos_ register and returns its result in | 1852 // The stub expects its argument in the tos_ register and returns its result in |
| 1849 // it, too: zero for false, and a non-zero value for true. | 1853 // it, too: zero for false, and a non-zero value for true. |
| 1850 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1854 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 1851 // This stub overrides SometimesSetsUpAFrame() to return false. That means | 1855 // This stub overrides SometimesSetsUpAFrame() to return false. That means |
| 1852 // we cannot call anything that could cause a GC from this stub. | 1856 // we cannot call anything that could cause a GC from this stub. |
| 1853 Label patch; | 1857 Label patch; |
| 1854 const Register map = r9.is(tos_) ? r7 : r9; | 1858 const Register map = r9.is(tos_) ? r7 : r9; |
| (...skipping 463 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2318 break; | 2322 break; |
| 2319 case Token::BIT_NOT: | 2323 case Token::BIT_NOT: |
| 2320 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 2324 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 2321 break; | 2325 break; |
| 2322 default: | 2326 default: |
| 2323 UNREACHABLE(); | 2327 UNREACHABLE(); |
| 2324 } | 2328 } |
| 2325 } | 2329 } |
| 2326 | 2330 |
| 2327 | 2331 |
| 2332 void BinaryOpStub::Initialize() { |
| 2333 platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); |
| 2334 } |
| 2335 |
| 2336 |
| 2328 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 2337 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 2329 Label get_result; | 2338 Label get_result; |
| 2330 | 2339 |
| 2331 __ Push(r1, r0); | 2340 __ Push(r1, r0); |
| 2332 | 2341 |
| 2333 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | 2342 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| 2334 __ mov(r1, Operand(Smi::FromInt(op_))); | 2343 __ push(r2); |
| 2335 __ mov(r0, Operand(Smi::FromInt(operands_type_))); | |
| 2336 __ Push(r2, r1, r0); | |
| 2337 | 2344 |
| 2338 __ TailCallExternalReference( | 2345 __ TailCallExternalReference( |
| 2339 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | 2346 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
| 2340 masm->isolate()), | 2347 masm->isolate()), |
| 2341 5, | 2348 3, |
| 2342 1); | 2349 1); |
| 2343 } | 2350 } |
| 2344 | 2351 |
| 2345 | 2352 |
| 2346 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 2353 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
| 2347 MacroAssembler* masm) { | 2354 MacroAssembler* masm) { |
| 2348 UNIMPLEMENTED(); | 2355 UNIMPLEMENTED(); |
| 2349 } | 2356 } |
| 2350 | 2357 |
| 2351 | 2358 |
| 2352 void BinaryOpStub::Generate(MacroAssembler* masm) { | 2359 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
| 2353 // Explicitly allow generation of nested stubs. It is safe here because | 2360 Token::Value op) { |
| 2354 // generation code does not use any raw pointers. | |
| 2355 AllowStubCallsScope allow_stub_calls(masm, true); | |
| 2356 | |
| 2357 switch (operands_type_) { | |
| 2358 case BinaryOpIC::UNINITIALIZED: | |
| 2359 GenerateTypeTransition(masm); | |
| 2360 break; | |
| 2361 case BinaryOpIC::SMI: | |
| 2362 GenerateSmiStub(masm); | |
| 2363 break; | |
| 2364 case BinaryOpIC::INT32: | |
| 2365 GenerateInt32Stub(masm); | |
| 2366 break; | |
| 2367 case BinaryOpIC::HEAP_NUMBER: | |
| 2368 GenerateHeapNumberStub(masm); | |
| 2369 break; | |
| 2370 case BinaryOpIC::ODDBALL: | |
| 2371 GenerateOddballStub(masm); | |
| 2372 break; | |
| 2373 case BinaryOpIC::BOTH_STRING: | |
| 2374 GenerateBothStringStub(masm); | |
| 2375 break; | |
| 2376 case BinaryOpIC::STRING: | |
| 2377 GenerateStringStub(masm); | |
| 2378 break; | |
| 2379 case BinaryOpIC::GENERIC: | |
| 2380 GenerateGeneric(masm); | |
| 2381 break; | |
| 2382 default: | |
| 2383 UNREACHABLE(); | |
| 2384 } | |
| 2385 } | |
| 2386 | |
| 2387 | |
| 2388 void BinaryOpStub::PrintName(StringStream* stream) { | |
| 2389 const char* op_name = Token::Name(op_); | |
| 2390 const char* overwrite_name; | |
| 2391 switch (mode_) { | |
| 2392 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | |
| 2393 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | |
| 2394 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | |
| 2395 default: overwrite_name = "UnknownOverwrite"; break; | |
| 2396 } | |
| 2397 stream->Add("BinaryOpStub_%s_%s_%s", | |
| 2398 op_name, | |
| 2399 overwrite_name, | |
| 2400 BinaryOpIC::GetName(operands_type_)); | |
| 2401 } | |
| 2402 | |
| 2403 | |
| 2404 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { | |
| 2405 Register left = r1; | 2361 Register left = r1; |
| 2406 Register right = r0; | 2362 Register right = r0; |
| 2407 Register scratch1 = r7; | 2363 Register scratch1 = r7; |
| 2408 Register scratch2 = r9; | 2364 Register scratch2 = r9; |
| 2409 | 2365 |
| 2410 ASSERT(right.is(r0)); | 2366 ASSERT(right.is(r0)); |
| 2411 STATIC_ASSERT(kSmiTag == 0); | 2367 STATIC_ASSERT(kSmiTag == 0); |
| 2412 | 2368 |
| 2413 Label not_smi_result; | 2369 Label not_smi_result; |
| 2414 switch (op_) { | 2370 switch (op) { |
| 2415 case Token::ADD: | 2371 case Token::ADD: |
| 2416 __ add(right, left, Operand(right), SetCC); // Add optimistically. | 2372 __ add(right, left, Operand(right), SetCC); // Add optimistically. |
| 2417 __ Ret(vc); | 2373 __ Ret(vc); |
| 2418 __ sub(right, right, Operand(left)); // Revert optimistic add. | 2374 __ sub(right, right, Operand(left)); // Revert optimistic add. |
| 2419 break; | 2375 break; |
| 2420 case Token::SUB: | 2376 case Token::SUB: |
| 2421 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. | 2377 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. |
| 2422 __ Ret(vc); | 2378 __ Ret(vc); |
| 2423 __ sub(right, left, Operand(right)); // Revert optimistic subtract. | 2379 __ sub(right, left, Operand(right)); // Revert optimistic subtract. |
| 2424 break; | 2380 break; |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2519 __ SmiTag(right, scratch1); | 2475 __ SmiTag(right, scratch1); |
| 2520 __ Ret(); | 2476 __ Ret(); |
| 2521 break; | 2477 break; |
| 2522 default: | 2478 default: |
| 2523 UNREACHABLE(); | 2479 UNREACHABLE(); |
| 2524 } | 2480 } |
| 2525 __ bind(¬_smi_result); | 2481 __ bind(¬_smi_result); |
| 2526 } | 2482 } |
| 2527 | 2483 |
| 2528 | 2484 |
| 2529 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2485 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 2530 bool smi_operands, | 2486 Register result, |
| 2531 Label* not_numbers, | 2487 Register heap_number_map, |
| 2532 Label* gc_required) { | 2488 Register scratch1, |
| 2489 Register scratch2, |
| 2490 Label* gc_required, |
| 2491 OverwriteMode mode); |
| 2492 |
| 2493 |
| 2494 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
| 2495 BinaryOpIC::TypeInfo left_type, |
| 2496 BinaryOpIC::TypeInfo right_type, |
| 2497 bool smi_operands, |
| 2498 Label* not_numbers, |
| 2499 Label* gc_required, |
| 2500 Label* miss, |
| 2501 Token::Value op, |
| 2502 OverwriteMode mode) { |
| 2533 Register left = r1; | 2503 Register left = r1; |
| 2534 Register right = r0; | 2504 Register right = r0; |
| 2535 Register scratch1 = r7; | 2505 Register scratch1 = r7; |
| 2536 Register scratch2 = r9; | 2506 Register scratch2 = r9; |
| 2537 Register scratch3 = r4; | 2507 Register scratch3 = r4; |
| 2538 | 2508 |
| 2539 ASSERT(smi_operands || (not_numbers != NULL)); | 2509 ASSERT(smi_operands || (not_numbers != NULL)); |
| 2540 if (smi_operands) { | 2510 if (smi_operands) { |
| 2541 __ AssertSmi(left); | 2511 __ AssertSmi(left); |
| 2542 __ AssertSmi(right); | 2512 __ AssertSmi(right); |
| 2543 } | 2513 } |
| 2514 if (left_type == BinaryOpIC::SMI) { |
| 2515 __ JumpIfNotSmi(left, miss); |
| 2516 } |
| 2517 if (right_type == BinaryOpIC::SMI) { |
| 2518 __ JumpIfNotSmi(right, miss); |
| 2519 } |
| 2544 | 2520 |
| 2545 Register heap_number_map = r6; | 2521 Register heap_number_map = r6; |
| 2546 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2522 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2547 | 2523 |
| 2548 switch (op_) { | 2524 switch (op) { |
| 2549 case Token::ADD: | 2525 case Token::ADD: |
| 2550 case Token::SUB: | 2526 case Token::SUB: |
| 2551 case Token::MUL: | 2527 case Token::MUL: |
| 2552 case Token::DIV: | 2528 case Token::DIV: |
| 2553 case Token::MOD: { | 2529 case Token::MOD: { |
| 2554 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 2530 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
| 2555 // depending on whether VFP3 is available or not. | 2531 // depending on whether VFP3 is available or not. |
| 2556 FloatingPointHelper::Destination destination = | 2532 FloatingPointHelper::Destination destination = |
| 2557 CpuFeatures::IsSupported(VFP2) && | 2533 CpuFeatures::IsSupported(VFP2) && |
| 2558 op_ != Token::MOD ? | 2534 op != Token::MOD ? |
| 2559 FloatingPointHelper::kVFPRegisters : | 2535 FloatingPointHelper::kVFPRegisters : |
| 2560 FloatingPointHelper::kCoreRegisters; | 2536 FloatingPointHelper::kCoreRegisters; |
| 2561 | 2537 |
| 2562 // Allocate new heap number for result. | 2538 // Allocate new heap number for result. |
| 2563 Register result = r5; | 2539 Register result = r5; |
| 2564 GenerateHeapResultAllocation( | 2540 BinaryOpStub_GenerateHeapResultAllocation( |
| 2565 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2541 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| 2566 | 2542 |
| 2567 // Load the operands. | 2543 // Load the operands. |
| 2568 if (smi_operands) { | 2544 if (smi_operands) { |
| 2569 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 2545 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
| 2570 } else { | 2546 } else { |
| 2571 FloatingPointHelper::LoadOperands(masm, | 2547 // Load right operand to d7 or r2/r3. |
| 2572 destination, | 2548 if (right_type == BinaryOpIC::INT32) { |
| 2573 heap_number_map, | 2549 FloatingPointHelper::LoadNumberAsInt32Double( |
| 2574 scratch1, | 2550 masm, right, destination, d7, d8, r2, r3, heap_number_map, |
| 2575 scratch2, | 2551 scratch1, scratch2, s0, miss); |
| 2576 not_numbers); | 2552 } else { |
| 2553 Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss |
| 2554 : not_numbers; |
| 2555 FloatingPointHelper::LoadNumber( |
| 2556 masm, destination, right, d7, r2, r3, heap_number_map, |
| 2557 scratch1, scratch2, fail); |
| 2558 } |
| 2559 // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it |
| 2560 // jumps to |miss|. |
| 2561 if (left_type == BinaryOpIC::INT32) { |
| 2562 FloatingPointHelper::LoadNumberAsInt32Double( |
| 2563 masm, left, destination, d6, d8, r0, r1, heap_number_map, |
| 2564 scratch1, scratch2, s0, miss); |
| 2565 } else { |
| 2566 Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss |
| 2567 : not_numbers; |
| 2568 FloatingPointHelper::LoadNumber( |
| 2569 masm, destination, left, d6, r0, r1, heap_number_map, |
| 2570 scratch1, scratch2, fail); |
| 2571 } |
| 2577 } | 2572 } |
| 2578 | 2573 |
| 2579 // Calculate the result. | 2574 // Calculate the result. |
| 2580 if (destination == FloatingPointHelper::kVFPRegisters) { | 2575 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 2581 // Using VFP registers: | 2576 // Using VFP registers: |
| 2582 // d6: Left value | 2577 // d6: Left value |
| 2583 // d7: Right value | 2578 // d7: Right value |
| 2584 CpuFeatures::Scope scope(VFP2); | 2579 CpuFeatures::Scope scope(VFP2); |
| 2585 switch (op_) { | 2580 switch (op) { |
| 2586 case Token::ADD: | 2581 case Token::ADD: |
| 2587 __ vadd(d5, d6, d7); | 2582 __ vadd(d5, d6, d7); |
| 2588 break; | 2583 break; |
| 2589 case Token::SUB: | 2584 case Token::SUB: |
| 2590 __ vsub(d5, d6, d7); | 2585 __ vsub(d5, d6, d7); |
| 2591 break; | 2586 break; |
| 2592 case Token::MUL: | 2587 case Token::MUL: |
| 2593 __ vmul(d5, d6, d7); | 2588 __ vmul(d5, d6, d7); |
| 2594 break; | 2589 break; |
| 2595 case Token::DIV: | 2590 case Token::DIV: |
| 2596 __ vdiv(d5, d6, d7); | 2591 __ vdiv(d5, d6, d7); |
| 2597 break; | 2592 break; |
| 2598 default: | 2593 default: |
| 2599 UNREACHABLE(); | 2594 UNREACHABLE(); |
| 2600 } | 2595 } |
| 2601 | 2596 |
| 2602 __ sub(r0, result, Operand(kHeapObjectTag)); | 2597 __ sub(r0, result, Operand(kHeapObjectTag)); |
| 2603 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2598 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 2604 __ add(r0, r0, Operand(kHeapObjectTag)); | 2599 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 2605 __ Ret(); | 2600 __ Ret(); |
| 2606 } else { | 2601 } else { |
| 2607 // Call the C function to handle the double operation. | 2602 // Call the C function to handle the double operation. |
| 2608 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 2603 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
| 2609 op_, | 2604 op, |
| 2610 result, | 2605 result, |
| 2611 scratch1); | 2606 scratch1); |
| 2612 if (FLAG_debug_code) { | 2607 if (FLAG_debug_code) { |
| 2613 __ stop("Unreachable code."); | 2608 __ stop("Unreachable code."); |
| 2614 } | 2609 } |
| 2615 } | 2610 } |
| 2616 break; | 2611 break; |
| 2617 } | 2612 } |
| 2618 case Token::BIT_OR: | 2613 case Token::BIT_OR: |
| 2619 case Token::BIT_XOR: | 2614 case Token::BIT_XOR: |
| (...skipping 20 matching lines...) Expand all Loading... |
| 2640 r2, | 2635 r2, |
| 2641 heap_number_map, | 2636 heap_number_map, |
| 2642 scratch1, | 2637 scratch1, |
| 2643 scratch2, | 2638 scratch2, |
| 2644 scratch3, | 2639 scratch3, |
| 2645 d0, | 2640 d0, |
| 2646 not_numbers); | 2641 not_numbers); |
| 2647 } | 2642 } |
| 2648 | 2643 |
| 2649 Label result_not_a_smi; | 2644 Label result_not_a_smi; |
| 2650 switch (op_) { | 2645 switch (op) { |
| 2651 case Token::BIT_OR: | 2646 case Token::BIT_OR: |
| 2652 __ orr(r2, r3, Operand(r2)); | 2647 __ orr(r2, r3, Operand(r2)); |
| 2653 break; | 2648 break; |
| 2654 case Token::BIT_XOR: | 2649 case Token::BIT_XOR: |
| 2655 __ eor(r2, r3, Operand(r2)); | 2650 __ eor(r2, r3, Operand(r2)); |
| 2656 break; | 2651 break; |
| 2657 case Token::BIT_AND: | 2652 case Token::BIT_AND: |
| 2658 __ and_(r2, r3, Operand(r2)); | 2653 __ and_(r2, r3, Operand(r2)); |
| 2659 break; | 2654 break; |
| 2660 case Token::SAR: | 2655 case Token::SAR: |
| (...skipping 30 matching lines...) Expand all Loading... |
| 2691 __ SmiTag(r0, r2); | 2686 __ SmiTag(r0, r2); |
| 2692 __ Ret(); | 2687 __ Ret(); |
| 2693 | 2688 |
| 2694 // Allocate new heap number for result. | 2689 // Allocate new heap number for result. |
| 2695 __ bind(&result_not_a_smi); | 2690 __ bind(&result_not_a_smi); |
| 2696 Register result = r5; | 2691 Register result = r5; |
| 2697 if (smi_operands) { | 2692 if (smi_operands) { |
| 2698 __ AllocateHeapNumber( | 2693 __ AllocateHeapNumber( |
| 2699 result, scratch1, scratch2, heap_number_map, gc_required); | 2694 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2700 } else { | 2695 } else { |
| 2701 GenerateHeapResultAllocation( | 2696 BinaryOpStub_GenerateHeapResultAllocation( |
| 2702 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2697 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
| 2698 mode); |
| 2703 } | 2699 } |
| 2704 | 2700 |
| 2705 // r2: Answer as signed int32. | 2701 // r2: Answer as signed int32. |
| 2706 // r5: Heap number to write answer into. | 2702 // r5: Heap number to write answer into. |
| 2707 | 2703 |
| 2708 // Nothing can go wrong now, so move the heap number to r0, which is the | 2704 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 2709 // result. | 2705 // result. |
| 2710 __ mov(r0, Operand(r5)); | 2706 __ mov(r0, Operand(r5)); |
| 2711 | 2707 |
| 2712 if (CpuFeatures::IsSupported(VFP2)) { | 2708 if (CpuFeatures::IsSupported(VFP2)) { |
| 2713 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 2709 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 2714 // mentioned above SHR needs to always produce a positive result. | 2710 // mentioned above SHR needs to always produce a positive result. |
| 2715 CpuFeatures::Scope scope(VFP2); | 2711 CpuFeatures::Scope scope(VFP2); |
| 2716 __ vmov(s0, r2); | 2712 __ vmov(s0, r2); |
| 2717 if (op_ == Token::SHR) { | 2713 if (op == Token::SHR) { |
| 2718 __ vcvt_f64_u32(d0, s0); | 2714 __ vcvt_f64_u32(d0, s0); |
| 2719 } else { | 2715 } else { |
| 2720 __ vcvt_f64_s32(d0, s0); | 2716 __ vcvt_f64_s32(d0, s0); |
| 2721 } | 2717 } |
| 2722 __ sub(r3, r0, Operand(kHeapObjectTag)); | 2718 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 2723 __ vstr(d0, r3, HeapNumber::kValueOffset); | 2719 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 2724 __ Ret(); | 2720 __ Ret(); |
| 2725 } else { | 2721 } else { |
| 2726 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2722 // Tail call that writes the int32 in r2 to the heap number in r0, using |
| 2727 // r3 as scratch. r0 is preserved and returned. | 2723 // r3 as scratch. r0 is preserved and returned. |
| 2728 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 2724 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
| 2729 __ TailCallStub(&stub); | 2725 __ TailCallStub(&stub); |
| 2730 } | 2726 } |
| 2731 break; | 2727 break; |
| 2732 } | 2728 } |
| 2733 default: | 2729 default: |
| 2734 UNREACHABLE(); | 2730 UNREACHABLE(); |
| 2735 } | 2731 } |
| 2736 } | 2732 } |
| 2737 | 2733 |
| 2738 | 2734 |
| 2739 // Generate the smi code. If the operation on smis are successful this return is | 2735 // Generate the smi code. If the operation on smis are successful this return is |
| 2740 // generated. If the result is not a smi and heap number allocation is not | 2736 // generated. If the result is not a smi and heap number allocation is not |
| 2741 // requested the code falls through. If number allocation is requested but a | 2737 // requested the code falls through. If number allocation is requested but a |
| 2742 // heap number cannot be allocated the code jumps to the lable gc_required. | 2738 // heap number cannot be allocated the code jumps to the label gc_required. |
| 2743 void BinaryOpStub::GenerateSmiCode( | 2739 void BinaryOpStub_GenerateSmiCode( |
| 2744 MacroAssembler* masm, | 2740 MacroAssembler* masm, |
| 2745 Label* use_runtime, | 2741 Label* use_runtime, |
| 2746 Label* gc_required, | 2742 Label* gc_required, |
| 2747 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2743 Token::Value op, |
| 2744 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| 2745 OverwriteMode mode) { |
| 2748 Label not_smis; | 2746 Label not_smis; |
| 2749 | 2747 |
| 2750 Register left = r1; | 2748 Register left = r1; |
| 2751 Register right = r0; | 2749 Register right = r0; |
| 2752 Register scratch1 = r7; | 2750 Register scratch1 = r7; |
| 2753 | 2751 |
| 2754 // Perform combined smi check on both operands. | 2752 // Perform combined smi check on both operands. |
| 2755 __ orr(scratch1, left, Operand(right)); | 2753 __ orr(scratch1, left, Operand(right)); |
| 2756 STATIC_ASSERT(kSmiTag == 0); | 2754 STATIC_ASSERT(kSmiTag == 0); |
| 2757 __ JumpIfNotSmi(scratch1, ¬_smis); | 2755 __ JumpIfNotSmi(scratch1, ¬_smis); |
| 2758 | 2756 |
| 2759 // If the smi-smi operation results in a smi return is generated. | 2757 // If the smi-smi operation results in a smi return is generated. |
| 2760 GenerateSmiSmiOperation(masm); | 2758 BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| 2761 | 2759 |
| 2762 // If heap number results are possible generate the result in an allocated | 2760 // If heap number results are possible generate the result in an allocated |
| 2763 // heap number. | 2761 // heap number. |
| 2764 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2762 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| 2765 GenerateFPOperation(masm, true, use_runtime, gc_required); | 2763 BinaryOpStub_GenerateFPOperation( |
| 2764 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
| 2765 use_runtime, gc_required, ¬_smis, op, mode); |
| 2766 } | 2766 } |
| 2767 __ bind(¬_smis); | 2767 __ bind(¬_smis); |
| 2768 } | 2768 } |
| 2769 | 2769 |
| 2770 | 2770 |
| 2771 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2771 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 2772 Label not_smis, call_runtime; | 2772 Label not_smis, call_runtime; |
| 2773 | 2773 |
| 2774 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 2774 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
| 2775 result_type_ == BinaryOpIC::SMI) { | 2775 result_type_ == BinaryOpIC::SMI) { |
| 2776 // Only allow smi results. | 2776 // Only allow smi results. |
| 2777 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); | 2777 BinaryOpStub_GenerateSmiCode( |
| 2778 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); |
| 2778 } else { | 2779 } else { |
| 2779 // Allow heap number result and don't make a transition if a heap number | 2780 // Allow heap number result and don't make a transition if a heap number |
| 2780 // cannot be allocated. | 2781 // cannot be allocated. |
| 2781 GenerateSmiCode(masm, | 2782 BinaryOpStub_GenerateSmiCode( |
| 2782 &call_runtime, | 2783 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, |
| 2783 &call_runtime, | 2784 mode_); |
| 2784 ALLOW_HEAPNUMBER_RESULTS); | |
| 2785 } | 2785 } |
| 2786 | 2786 |
| 2787 // Code falls through if the result is not returned as either a smi or heap | 2787 // Code falls through if the result is not returned as either a smi or heap |
| 2788 // number. | 2788 // number. |
| 2789 GenerateTypeTransition(masm); | 2789 GenerateTypeTransition(masm); |
| 2790 | 2790 |
| 2791 __ bind(&call_runtime); | 2791 __ bind(&call_runtime); |
| 2792 GenerateRegisterArgsPush(masm); |
| 2792 GenerateCallRuntime(masm); | 2793 GenerateCallRuntime(masm); |
| 2793 } | 2794 } |
| 2794 | 2795 |
| 2795 | 2796 |
| 2796 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | |
| 2797 ASSERT(operands_type_ == BinaryOpIC::STRING); | |
| 2798 ASSERT(op_ == Token::ADD); | |
| 2799 // Try to add arguments as strings, otherwise, transition to the generic | |
| 2800 // BinaryOpIC type. | |
| 2801 GenerateAddStrings(masm); | |
| 2802 GenerateTypeTransition(masm); | |
| 2803 } | |
| 2804 | |
| 2805 | |
| 2806 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 2797 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| 2807 Label call_runtime; | 2798 Label call_runtime; |
| 2808 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); | 2799 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
| 2809 ASSERT(op_ == Token::ADD); | 2800 ASSERT(op_ == Token::ADD); |
| 2810 // If both arguments are strings, call the string add stub. | 2801 // If both arguments are strings, call the string add stub. |
| 2811 // Otherwise, do a transition. | 2802 // Otherwise, do a transition. |
| 2812 | 2803 |
| 2813 // Registers containing left and right operands respectively. | 2804 // Registers containing left and right operands respectively. |
| 2814 Register left = r1; | 2805 Register left = r1; |
| 2815 Register right = r0; | 2806 Register right = r0; |
| 2816 | 2807 |
| 2817 // Test if left operand is a string. | 2808 // Test if left operand is a string. |
| 2818 __ JumpIfSmi(left, &call_runtime); | 2809 __ JumpIfSmi(left, &call_runtime); |
| 2819 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | 2810 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
| 2820 __ b(ge, &call_runtime); | 2811 __ b(ge, &call_runtime); |
| 2821 | 2812 |
| 2822 // Test if right operand is a string. | 2813 // Test if right operand is a string. |
| 2823 __ JumpIfSmi(right, &call_runtime); | 2814 __ JumpIfSmi(right, &call_runtime); |
| 2824 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | 2815 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
| 2825 __ b(ge, &call_runtime); | 2816 __ b(ge, &call_runtime); |
| 2826 | 2817 |
| 2827 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 2818 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 2828 GenerateRegisterArgsPush(masm); | 2819 GenerateRegisterArgsPush(masm); |
| 2829 __ TailCallStub(&string_add_stub); | 2820 __ TailCallStub(&string_add_stub); |
| 2830 | 2821 |
| 2831 __ bind(&call_runtime); | 2822 __ bind(&call_runtime); |
| 2832 GenerateTypeTransition(masm); | 2823 GenerateTypeTransition(masm); |
| 2833 } | 2824 } |
| 2834 | 2825 |
| 2835 | 2826 |
| 2836 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2827 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 2837 ASSERT(operands_type_ == BinaryOpIC::INT32); | 2828 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| 2838 | 2829 |
| 2839 Register left = r1; | 2830 Register left = r1; |
| 2840 Register right = r0; | 2831 Register right = r0; |
| 2841 Register scratch1 = r7; | 2832 Register scratch1 = r7; |
| 2842 Register scratch2 = r9; | 2833 Register scratch2 = r9; |
| 2843 DwVfpRegister double_scratch = d0; | 2834 DwVfpRegister double_scratch = d0; |
| 2844 | 2835 |
| 2845 Register heap_number_result = no_reg; | 2836 Register heap_number_result = no_reg; |
| 2846 Register heap_number_map = r6; | 2837 Register heap_number_map = r6; |
| 2847 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2838 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2848 | 2839 |
| 2849 Label call_runtime; | 2840 Label call_runtime; |
| 2850 // Labels for type transition, used for wrong input or output types. | 2841 // Labels for type transition, used for wrong input or output types. |
| 2851 // Both label are currently actually bound to the same position. We use two | 2842 // Both label are currently actually bound to the same position. We use two |
| 2852 // different label to differentiate the cause leading to type transition. | 2843 // different label to differentiate the cause leading to type transition. |
| 2853 Label transition; | 2844 Label transition; |
| 2854 | 2845 |
| 2855 // Smi-smi fast case. | 2846 // Smi-smi fast case. |
| 2856 Label skip; | 2847 Label skip; |
| 2857 __ orr(scratch1, left, right); | 2848 __ orr(scratch1, left, right); |
| 2858 __ JumpIfNotSmi(scratch1, &skip); | 2849 __ JumpIfNotSmi(scratch1, &skip); |
| 2859 GenerateSmiSmiOperation(masm); | 2850 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); |
| 2860 // Fall through if the result is not a smi. | 2851 // Fall through if the result is not a smi. |
| 2861 __ bind(&skip); | 2852 __ bind(&skip); |
| 2862 | 2853 |
| 2863 switch (op_) { | 2854 switch (op_) { |
| 2864 case Token::ADD: | 2855 case Token::ADD: |
| 2865 case Token::SUB: | 2856 case Token::SUB: |
| 2866 case Token::MUL: | 2857 case Token::MUL: |
| 2867 case Token::DIV: | 2858 case Token::DIV: |
| 2868 case Token::MOD: { | 2859 case Token::MOD: { |
| 2860 // It could be that only SMIs have been seen at either the left |
| 2861 // or the right operand. For precise type feedback, patch the IC |
| 2862 // again if this changes. |
| 2863 if (left_type_ == BinaryOpIC::SMI) { |
| 2864 __ JumpIfNotSmi(left, &transition); |
| 2865 } |
| 2866 if (right_type_ == BinaryOpIC::SMI) { |
| 2867 __ JumpIfNotSmi(right, &transition); |
| 2868 } |
| 2869 // Load both operands and check that they are 32-bit integer. | 2869 // Load both operands and check that they are 32-bit integer. |
| 2870 // Jump to type transition if they are not. The registers r0 and r1 (right | 2870 // Jump to type transition if they are not. The registers r0 and r1 (right |
| 2871 // and left) are preserved for the runtime call. | 2871 // and left) are preserved for the runtime call. |
| 2872 FloatingPointHelper::Destination destination = | 2872 FloatingPointHelper::Destination destination = |
| 2873 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) | 2873 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) |
| 2874 ? FloatingPointHelper::kVFPRegisters | 2874 ? FloatingPointHelper::kVFPRegisters |
| 2875 : FloatingPointHelper::kCoreRegisters; | 2875 : FloatingPointHelper::kCoreRegisters; |
| 2876 | 2876 |
| 2877 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2877 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 2878 right, | 2878 right, |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2957 // DIV just falls through to allocating a heap number. | 2957 // DIV just falls through to allocating a heap number. |
| 2958 } | 2958 } |
| 2959 | 2959 |
| 2960 __ bind(&return_heap_number); | 2960 __ bind(&return_heap_number); |
| 2961 // Return a heap number, or fall through to type transition or runtime | 2961 // Return a heap number, or fall through to type transition or runtime |
| 2962 // call if we can't. | 2962 // call if we can't. |
| 2963 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER | 2963 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER |
| 2964 : BinaryOpIC::INT32)) { | 2964 : BinaryOpIC::INT32)) { |
| 2965 // We are using vfp registers so r5 is available. | 2965 // We are using vfp registers so r5 is available. |
| 2966 heap_number_result = r5; | 2966 heap_number_result = r5; |
| 2967 GenerateHeapResultAllocation(masm, | 2967 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2968 heap_number_result, | 2968 heap_number_result, |
| 2969 heap_number_map, | 2969 heap_number_map, |
| 2970 scratch1, | 2970 scratch1, |
| 2971 scratch2, | 2971 scratch2, |
| 2972 &call_runtime); | 2972 &call_runtime, |
| 2973 mode_); |
| 2973 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 2974 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
| 2974 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2975 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 2975 __ mov(r0, heap_number_result); | 2976 __ mov(r0, heap_number_result); |
| 2976 __ Ret(); | 2977 __ Ret(); |
| 2977 } | 2978 } |
| 2978 | 2979 |
| 2979 // A DIV operation expecting an integer result falls through | 2980 // A DIV operation expecting an integer result falls through |
| 2980 // to type transition. | 2981 // to type transition. |
| 2981 | 2982 |
| 2982 } else { | 2983 } else { |
| 2983 // We preserved r0 and r1 to be able to call runtime. | 2984 // We preserved r0 and r1 to be able to call runtime. |
| 2984 // Save the left value on the stack. | 2985 // Save the left value on the stack. |
| 2985 __ Push(r5, r4); | 2986 __ Push(r5, r4); |
| 2986 | 2987 |
| 2987 Label pop_and_call_runtime; | 2988 Label pop_and_call_runtime; |
| 2988 | 2989 |
| 2989 // Allocate a heap number to store the result. | 2990 // Allocate a heap number to store the result. |
| 2990 heap_number_result = r5; | 2991 heap_number_result = r5; |
| 2991 GenerateHeapResultAllocation(masm, | 2992 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2992 heap_number_result, | 2993 heap_number_result, |
| 2993 heap_number_map, | 2994 heap_number_map, |
| 2994 scratch1, | 2995 scratch1, |
| 2995 scratch2, | 2996 scratch2, |
| 2996 &pop_and_call_runtime); | 2997 &pop_and_call_runtime, |
| 2998 mode_); |
| 2997 | 2999 |
| 2998 // Load the left value from the value saved on the stack. | 3000 // Load the left value from the value saved on the stack. |
| 2999 __ Pop(r1, r0); | 3001 __ Pop(r1, r0); |
| 3000 | 3002 |
| 3001 // Call the C function to handle the double operation. | 3003 // Call the C function to handle the double operation. |
| 3002 FloatingPointHelper::CallCCodeForDoubleOperation( | 3004 FloatingPointHelper::CallCCodeForDoubleOperation( |
| 3003 masm, op_, heap_number_result, scratch1); | 3005 masm, op_, heap_number_result, scratch1); |
| 3004 if (FLAG_debug_code) { | 3006 if (FLAG_debug_code) { |
| 3005 __ stop("Unreachable code."); | 3007 __ stop("Unreachable code."); |
| 3006 } | 3008 } |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3091 // Check if the result fits in a smi. | 3093 // Check if the result fits in a smi. |
| 3092 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 3094 __ add(scratch1, r2, Operand(0x40000000), SetCC); |
| 3093 // If not try to return a heap number. (We know the result is an int32.) | 3095 // If not try to return a heap number. (We know the result is an int32.) |
| 3094 __ b(mi, &return_heap_number); | 3096 __ b(mi, &return_heap_number); |
| 3095 // Tag the result and return. | 3097 // Tag the result and return. |
| 3096 __ SmiTag(r0, r2); | 3098 __ SmiTag(r0, r2); |
| 3097 __ Ret(); | 3099 __ Ret(); |
| 3098 | 3100 |
| 3099 __ bind(&return_heap_number); | 3101 __ bind(&return_heap_number); |
| 3100 heap_number_result = r5; | 3102 heap_number_result = r5; |
| 3101 GenerateHeapResultAllocation(masm, | 3103 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 3102 heap_number_result, | 3104 heap_number_result, |
| 3103 heap_number_map, | 3105 heap_number_map, |
| 3104 scratch1, | 3106 scratch1, |
| 3105 scratch2, | 3107 scratch2, |
| 3106 &call_runtime); | 3108 &call_runtime, |
| 3109 mode_); |
| 3107 | 3110 |
| 3108 if (CpuFeatures::IsSupported(VFP2)) { | 3111 if (CpuFeatures::IsSupported(VFP2)) { |
| 3109 CpuFeatures::Scope scope(VFP2); | 3112 CpuFeatures::Scope scope(VFP2); |
| 3110 if (op_ != Token::SHR) { | 3113 if (op_ != Token::SHR) { |
| 3111 // Convert the result to a floating point value. | 3114 // Convert the result to a floating point value. |
| 3112 __ vmov(double_scratch.low(), r2); | 3115 __ vmov(double_scratch.low(), r2); |
| 3113 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | 3116 __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
| 3114 } else { | 3117 } else { |
| 3115 // The result must be interpreted as an unsigned 32-bit integer. | 3118 // The result must be interpreted as an unsigned 32-bit integer. |
| 3116 __ vmov(double_scratch.low(), r2); | 3119 __ vmov(double_scratch.low(), r2); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 3140 // We never expect DIV to yield an integer result, so we always generate | 3143 // We never expect DIV to yield an integer result, so we always generate |
| 3141 // type transition code for DIV operations expecting an integer result: the | 3144 // type transition code for DIV operations expecting an integer result: the |
| 3142 // code will fall through to this type transition. | 3145 // code will fall through to this type transition. |
| 3143 if (transition.is_linked() || | 3146 if (transition.is_linked() || |
| 3144 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { | 3147 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
| 3145 __ bind(&transition); | 3148 __ bind(&transition); |
| 3146 GenerateTypeTransition(masm); | 3149 GenerateTypeTransition(masm); |
| 3147 } | 3150 } |
| 3148 | 3151 |
| 3149 __ bind(&call_runtime); | 3152 __ bind(&call_runtime); |
| 3153 GenerateRegisterArgsPush(masm); |
| 3150 GenerateCallRuntime(masm); | 3154 GenerateCallRuntime(masm); |
| 3151 } | 3155 } |
| 3152 | 3156 |
| 3153 | 3157 |
| 3154 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { | 3158 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
| 3155 Label call_runtime; | 3159 Label call_runtime; |
| 3156 | 3160 |
| 3157 if (op_ == Token::ADD) { | 3161 if (op_ == Token::ADD) { |
| 3158 // Handle string addition here, because it is the only operation | 3162 // Handle string addition here, because it is the only operation |
| 3159 // that does not do a ToNumber conversion on the operands. | 3163 // that does not do a ToNumber conversion on the operands. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3178 } else { | 3182 } else { |
| 3179 __ LoadRoot(r0, Heap::kNanValueRootIndex); | 3183 __ LoadRoot(r0, Heap::kNanValueRootIndex); |
| 3180 } | 3184 } |
| 3181 __ bind(&done); | 3185 __ bind(&done); |
| 3182 | 3186 |
| 3183 GenerateHeapNumberStub(masm); | 3187 GenerateHeapNumberStub(masm); |
| 3184 } | 3188 } |
| 3185 | 3189 |
| 3186 | 3190 |
| 3187 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 3191 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 3188 Label call_runtime; | 3192 Label call_runtime, transition; |
| 3189 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); | 3193 BinaryOpStub_GenerateFPOperation( |
| 3194 masm, left_type_, right_type_, false, |
| 3195 &transition, &call_runtime, &transition, op_, mode_); |
| 3196 |
| 3197 __ bind(&transition); |
| 3198 GenerateTypeTransition(masm); |
| 3190 | 3199 |
| 3191 __ bind(&call_runtime); | 3200 __ bind(&call_runtime); |
| 3201 GenerateRegisterArgsPush(masm); |
| 3192 GenerateCallRuntime(masm); | 3202 GenerateCallRuntime(masm); |
| 3193 } | 3203 } |
| 3194 | 3204 |
| 3195 | 3205 |
| 3196 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 3206 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 3197 Label call_runtime, call_string_add_or_runtime; | 3207 Label call_runtime, call_string_add_or_runtime, transition; |
| 3198 | 3208 |
| 3199 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 3209 BinaryOpStub_GenerateSmiCode( |
| 3210 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); |
| 3200 | 3211 |
| 3201 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); | 3212 BinaryOpStub_GenerateFPOperation( |
| 3213 masm, left_type_, right_type_, false, |
| 3214 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); |
| 3215 |
| 3216 __ bind(&transition); |
| 3217 GenerateTypeTransition(masm); |
| 3202 | 3218 |
| 3203 __ bind(&call_string_add_or_runtime); | 3219 __ bind(&call_string_add_or_runtime); |
| 3204 if (op_ == Token::ADD) { | 3220 if (op_ == Token::ADD) { |
| 3205 GenerateAddStrings(masm); | 3221 GenerateAddStrings(masm); |
| 3206 } | 3222 } |
| 3207 | 3223 |
| 3208 __ bind(&call_runtime); | 3224 __ bind(&call_runtime); |
| 3225 GenerateRegisterArgsPush(masm); |
| 3209 GenerateCallRuntime(masm); | 3226 GenerateCallRuntime(masm); |
| 3210 } | 3227 } |
| 3211 | 3228 |
| 3212 | 3229 |
| 3213 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 3230 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
| 3214 ASSERT(op_ == Token::ADD); | 3231 ASSERT(op_ == Token::ADD); |
| 3215 Label left_not_string, call_runtime; | 3232 Label left_not_string, call_runtime; |
| 3216 | 3233 |
| 3217 Register left = r1; | 3234 Register left = r1; |
| 3218 Register right = r0; | 3235 Register right = r0; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3234 | 3251 |
| 3235 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); | 3252 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); |
| 3236 GenerateRegisterArgsPush(masm); | 3253 GenerateRegisterArgsPush(masm); |
| 3237 __ TailCallStub(&string_add_right_stub); | 3254 __ TailCallStub(&string_add_right_stub); |
| 3238 | 3255 |
| 3239 // At least one argument is not a string. | 3256 // At least one argument is not a string. |
| 3240 __ bind(&call_runtime); | 3257 __ bind(&call_runtime); |
| 3241 } | 3258 } |
| 3242 | 3259 |
| 3243 | 3260 |
| 3244 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { | 3261 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 3245 GenerateRegisterArgsPush(masm); | 3262 Register result, |
| 3246 switch (op_) { | 3263 Register heap_number_map, |
| 3247 case Token::ADD: | 3264 Register scratch1, |
| 3248 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | 3265 Register scratch2, |
| 3249 break; | 3266 Label* gc_required, |
| 3250 case Token::SUB: | 3267 OverwriteMode mode) { |
| 3251 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | |
| 3252 break; | |
| 3253 case Token::MUL: | |
| 3254 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | |
| 3255 break; | |
| 3256 case Token::DIV: | |
| 3257 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | |
| 3258 break; | |
| 3259 case Token::MOD: | |
| 3260 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | |
| 3261 break; | |
| 3262 case Token::BIT_OR: | |
| 3263 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | |
| 3264 break; | |
| 3265 case Token::BIT_AND: | |
| 3266 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); | |
| 3267 break; | |
| 3268 case Token::BIT_XOR: | |
| 3269 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); | |
| 3270 break; | |
| 3271 case Token::SAR: | |
| 3272 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); | |
| 3273 break; | |
| 3274 case Token::SHR: | |
| 3275 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | |
| 3276 break; | |
| 3277 case Token::SHL: | |
| 3278 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); | |
| 3279 break; | |
| 3280 default: | |
| 3281 UNREACHABLE(); | |
| 3282 } | |
| 3283 } | |
| 3284 | |
| 3285 | |
| 3286 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, | |
| 3287 Register result, | |
| 3288 Register heap_number_map, | |
| 3289 Register scratch1, | |
| 3290 Register scratch2, | |
| 3291 Label* gc_required) { | |
| 3292 // Code below will scratch result if allocation fails. To keep both arguments | 3268 // Code below will scratch result if allocation fails. To keep both arguments |
| 3293 // intact for the runtime call result cannot be one of these. | 3269 // intact for the runtime call result cannot be one of these. |
| 3294 ASSERT(!result.is(r0) && !result.is(r1)); | 3270 ASSERT(!result.is(r0) && !result.is(r1)); |
| 3295 | 3271 |
| 3296 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { | 3272 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { |
| 3297 Label skip_allocation, allocated; | 3273 Label skip_allocation, allocated; |
| 3298 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; | 3274 Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; |
| 3299 // If the overwritable operand is already an object, we skip the | 3275 // If the overwritable operand is already an object, we skip the |
| 3300 // allocation of a heap number. | 3276 // allocation of a heap number. |
| 3301 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | 3277 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| 3302 // Allocate a heap number for the result. | 3278 // Allocate a heap number for the result. |
| 3303 __ AllocateHeapNumber( | 3279 __ AllocateHeapNumber( |
| 3304 result, scratch1, scratch2, heap_number_map, gc_required); | 3280 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3305 __ b(&allocated); | 3281 __ b(&allocated); |
| 3306 __ bind(&skip_allocation); | 3282 __ bind(&skip_allocation); |
| 3307 // Use object holding the overwritable operand for result. | 3283 // Use object holding the overwritable operand for result. |
| 3308 __ mov(result, Operand(overwritable_operand)); | 3284 __ mov(result, Operand(overwritable_operand)); |
| 3309 __ bind(&allocated); | 3285 __ bind(&allocated); |
| 3310 } else { | 3286 } else { |
| 3311 ASSERT(mode_ == NO_OVERWRITE); | 3287 ASSERT(mode == NO_OVERWRITE); |
| 3312 __ AllocateHeapNumber( | 3288 __ AllocateHeapNumber( |
| 3313 result, scratch1, scratch2, heap_number_map, gc_required); | 3289 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3314 } | 3290 } |
| 3315 } | 3291 } |
| 3316 | 3292 |
| 3317 | 3293 |
| 3318 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 3294 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 3319 __ Push(r1, r0); | 3295 __ Push(r1, r0); |
| 3320 } | 3296 } |
| 3321 | 3297 |
| (...skipping 2096 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5418 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | 5394 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
| 5419 __ bind(&do_call); | 5395 __ bind(&do_call); |
| 5420 // Set expected number of arguments to zero (not changing r0). | 5396 // Set expected number of arguments to zero (not changing r0). |
| 5421 __ mov(r2, Operand(0, RelocInfo::NONE)); | 5397 __ mov(r2, Operand(0, RelocInfo::NONE)); |
| 5422 __ SetCallKind(r5, CALL_AS_METHOD); | 5398 __ SetCallKind(r5, CALL_AS_METHOD); |
| 5423 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 5399 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 5424 RelocInfo::CODE_TARGET); | 5400 RelocInfo::CODE_TARGET); |
| 5425 } | 5401 } |
| 5426 | 5402 |
| 5427 | 5403 |
| 5428 // Unfortunately you have to run without snapshots to see most of these | |
| 5429 // names in the profile since most compare stubs end up in the snapshot. | |
| 5430 void CompareStub::PrintName(StringStream* stream) { | |
| 5431 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | |
| 5432 (lhs_.is(r1) && rhs_.is(r0))); | |
| 5433 const char* cc_name; | |
| 5434 switch (cc_) { | |
| 5435 case lt: cc_name = "LT"; break; | |
| 5436 case gt: cc_name = "GT"; break; | |
| 5437 case le: cc_name = "LE"; break; | |
| 5438 case ge: cc_name = "GE"; break; | |
| 5439 case eq: cc_name = "EQ"; break; | |
| 5440 case ne: cc_name = "NE"; break; | |
| 5441 default: cc_name = "UnknownCondition"; break; | |
| 5442 } | |
| 5443 bool is_equality = cc_ == eq || cc_ == ne; | |
| 5444 stream->Add("CompareStub_%s", cc_name); | |
| 5445 stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); | |
| 5446 stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); | |
| 5447 if (strict_ && is_equality) stream->Add("_STRICT"); | |
| 5448 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); | |
| 5449 if (!include_number_compare_) stream->Add("_NO_NUMBER"); | |
| 5450 if (!include_smi_compare_) stream->Add("_NO_SMI"); | |
| 5451 } | |
| 5452 | |
| 5453 | |
| 5454 int CompareStub::MinorKey() { | |
| 5455 // Encode the three parameters in a unique 16 bit value. To avoid duplicate | |
| 5456 // stubs the never NaN NaN condition is only taken into account if the | |
| 5457 // condition is equals. | |
| 5458 ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12)); | |
| 5459 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | |
| 5460 (lhs_.is(r1) && rhs_.is(r0))); | |
| 5461 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) | |
| 5462 | RegisterField::encode(lhs_.is(r0)) | |
| 5463 | StrictField::encode(strict_) | |
| 5464 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) | |
| 5465 | IncludeNumberCompareField::encode(include_number_compare_) | |
| 5466 | IncludeSmiCompareField::encode(include_smi_compare_); | |
| 5467 } | |
| 5468 | |
| 5469 | |
| 5470 // StringCharCodeAtGenerator | 5404 // StringCharCodeAtGenerator |
| 5471 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 5405 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 5472 Label flat_string; | 5406 Label flat_string; |
| 5473 Label ascii_string; | 5407 Label ascii_string; |
| 5474 Label got_char_code; | 5408 Label got_char_code; |
| 5475 Label sliced_string; | 5409 Label sliced_string; |
| 5476 | 5410 |
| 5477 // If the receiver is a smi trigger the non-string case. | 5411 // If the receiver is a smi trigger the non-string case. |
| 5478 __ JumpIfSmi(object_, receiver_not_string_); | 5412 __ JumpIfSmi(object_, receiver_not_string_); |
| 5479 | 5413 |
| (...skipping 1181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6661 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); | 6595 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
| 6662 __ b(ne, slow); | 6596 __ b(ne, slow); |
| 6663 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); | 6597 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
| 6664 __ str(arg, MemOperand(sp, stack_offset)); | 6598 __ str(arg, MemOperand(sp, stack_offset)); |
| 6665 | 6599 |
| 6666 __ bind(&done); | 6600 __ bind(&done); |
| 6667 } | 6601 } |
| 6668 | 6602 |
| 6669 | 6603 |
| 6670 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 6604 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 6671 ASSERT(state_ == CompareIC::SMIS); | 6605 ASSERT(state_ == CompareIC::SMI); |
| 6672 Label miss; | 6606 Label miss; |
| 6673 __ orr(r2, r1, r0); | 6607 __ orr(r2, r1, r0); |
| 6674 __ JumpIfNotSmi(r2, &miss); | 6608 __ JumpIfNotSmi(r2, &miss); |
| 6675 | 6609 |
| 6676 if (GetCondition() == eq) { | 6610 if (GetCondition() == eq) { |
| 6677 // For equality we do not care about the sign of the result. | 6611 // For equality we do not care about the sign of the result. |
| 6678 __ sub(r0, r0, r1, SetCC); | 6612 __ sub(r0, r0, r1, SetCC); |
| 6679 } else { | 6613 } else { |
| 6680 // Untag before subtracting to avoid handling overflow. | 6614 // Untag before subtracting to avoid handling overflow. |
| 6681 __ SmiUntag(r1); | 6615 __ SmiUntag(r1); |
| 6682 __ sub(r0, r1, SmiUntagOperand(r0)); | 6616 __ sub(r0, r1, SmiUntagOperand(r0)); |
| 6683 } | 6617 } |
| 6684 __ Ret(); | 6618 __ Ret(); |
| 6685 | 6619 |
| 6686 __ bind(&miss); | 6620 __ bind(&miss); |
| 6687 GenerateMiss(masm); | 6621 GenerateMiss(masm); |
| 6688 } | 6622 } |
| 6689 | 6623 |
| 6690 | 6624 |
| 6691 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 6625 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
| 6692 ASSERT(state_ == CompareIC::HEAP_NUMBERS); | 6626 ASSERT(state_ == CompareIC::HEAP_NUMBER); |
| 6693 | 6627 |
| 6694 Label generic_stub; | 6628 Label generic_stub; |
| 6695 Label unordered, maybe_undefined1, maybe_undefined2; | 6629 Label unordered, maybe_undefined1, maybe_undefined2; |
| 6696 Label miss; | 6630 Label miss; |
| 6697 __ and_(r2, r1, Operand(r0)); | |
| 6698 __ JumpIfSmi(r2, &generic_stub); | |
| 6699 | 6631 |
| 6700 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); | 6632 if (left_ == CompareIC::SMI) { |
| 6701 __ b(ne, &maybe_undefined1); | 6633 __ JumpIfNotSmi(r1, &miss); |
| 6702 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 6634 } |
| 6703 __ b(ne, &maybe_undefined2); | 6635 if (right_ == CompareIC::SMI) { |
| 6636 __ JumpIfNotSmi(r0, &miss); |
| 6637 } |
| 6704 | 6638 |
| 6705 // Inlining the double comparison and falling back to the general compare | 6639 // Inlining the double comparison and falling back to the general compare |
| 6706 // stub if NaN is involved or VFP3 is unsupported. | 6640 // stub if NaN is involved or VFP2 is unsupported. |
| 6707 if (CpuFeatures::IsSupported(VFP2)) { | 6641 if (CpuFeatures::IsSupported(VFP2)) { |
| 6708 CpuFeatures::Scope scope(VFP2); | 6642 CpuFeatures::Scope scope(VFP2); |
| 6709 | 6643 |
| 6710 // Load left and right operand | 6644 // Load left and right operand. |
| 6645 Label done, left, left_smi, right_smi; |
| 6646 __ JumpIfSmi(r0, &right_smi); |
| 6647 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 6648 DONT_DO_SMI_CHECK); |
| 6649 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 6650 __ vldr(d1, r2, HeapNumber::kValueOffset); |
| 6651 __ b(&left); |
| 6652 __ bind(&right_smi); |
| 6653 __ SmiUntag(r2, r0); // Can't clobber r0 yet. |
| 6654 SwVfpRegister single_scratch = d2.low(); |
| 6655 __ vmov(single_scratch, r2); |
| 6656 __ vcvt_f64_s32(d1, single_scratch); |
| 6657 |
| 6658 __ bind(&left); |
| 6659 __ JumpIfSmi(r1, &left_smi); |
| 6660 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 6661 DONT_DO_SMI_CHECK); |
| 6711 __ sub(r2, r1, Operand(kHeapObjectTag)); | 6662 __ sub(r2, r1, Operand(kHeapObjectTag)); |
| 6712 __ vldr(d0, r2, HeapNumber::kValueOffset); | 6663 __ vldr(d0, r2, HeapNumber::kValueOffset); |
| 6713 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6664 __ b(&done); |
| 6714 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6665 __ bind(&left_smi); |
| 6666 __ SmiUntag(r2, r1); // Can't clobber r1 yet. |
| 6667 single_scratch = d3.low(); |
| 6668 __ vmov(single_scratch, r2); |
| 6669 __ vcvt_f64_s32(d0, single_scratch); |
| 6715 | 6670 |
| 6716 // Compare operands | 6671 __ bind(&done); |
| 6672 // Compare operands. |
| 6717 __ VFPCompareAndSetFlags(d0, d1); | 6673 __ VFPCompareAndSetFlags(d0, d1); |
| 6718 | 6674 |
| 6719 // Don't base result on status bits when a NaN is involved. | 6675 // Don't base result on status bits when a NaN is involved. |
| 6720 __ b(vs, &unordered); | 6676 __ b(vs, &unordered); |
| 6721 | 6677 |
| 6722 // Return a result of -1, 0, or 1, based on status bits. | 6678 // Return a result of -1, 0, or 1, based on status bits. |
| 6723 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 6679 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 6724 __ mov(r0, Operand(LESS), LeaveCC, lt); | 6680 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 6725 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 6681 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 6726 __ Ret(); | 6682 __ Ret(); |
| 6727 } | 6683 } |
| 6728 | 6684 |
| 6729 __ bind(&unordered); | 6685 __ bind(&unordered); |
| 6730 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); | |
| 6731 __ bind(&generic_stub); | 6686 __ bind(&generic_stub); |
| 6687 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| 6688 CompareIC::GENERIC); |
| 6732 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 6689 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 6733 | 6690 |
| 6734 __ bind(&maybe_undefined1); | 6691 __ bind(&maybe_undefined1); |
| 6735 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6692 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 6736 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 6693 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
| 6737 __ b(ne, &miss); | 6694 __ b(ne, &miss); |
| 6695 __ JumpIfSmi(r1, &unordered); |
| 6738 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 6696 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); |
| 6739 __ b(ne, &maybe_undefined2); | 6697 __ b(ne, &maybe_undefined2); |
| 6740 __ jmp(&unordered); | 6698 __ jmp(&unordered); |
| 6741 } | 6699 } |
| 6742 | 6700 |
| 6743 __ bind(&maybe_undefined2); | 6701 __ bind(&maybe_undefined2); |
| 6744 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6702 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 6745 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | 6703 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
| 6746 __ b(eq, &unordered); | 6704 __ b(eq, &unordered); |
| 6747 } | 6705 } |
| 6748 | 6706 |
| 6749 __ bind(&miss); | 6707 __ bind(&miss); |
| 6750 GenerateMiss(masm); | 6708 GenerateMiss(masm); |
| 6751 } | 6709 } |
| 6752 | 6710 |
| 6753 | 6711 |
| 6754 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { | 6712 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
| 6755 ASSERT(state_ == CompareIC::SYMBOLS); | 6713 ASSERT(state_ == CompareIC::SYMBOL); |
| 6756 Label miss; | 6714 Label miss; |
| 6757 | 6715 |
| 6758 // Registers containing left and right operands respectively. | 6716 // Registers containing left and right operands respectively. |
| 6759 Register left = r1; | 6717 Register left = r1; |
| 6760 Register right = r0; | 6718 Register right = r0; |
| 6761 Register tmp1 = r2; | 6719 Register tmp1 = r2; |
| 6762 Register tmp2 = r3; | 6720 Register tmp2 = r3; |
| 6763 | 6721 |
| 6764 // Check that both operands are heap objects. | 6722 // Check that both operands are heap objects. |
| 6765 __ JumpIfEitherSmi(left, right, &miss); | 6723 __ JumpIfEitherSmi(left, right, &miss); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 6783 STATIC_ASSERT(kSmiTag == 0); | 6741 STATIC_ASSERT(kSmiTag == 0); |
| 6784 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 6742 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
| 6785 __ Ret(); | 6743 __ Ret(); |
| 6786 | 6744 |
| 6787 __ bind(&miss); | 6745 __ bind(&miss); |
| 6788 GenerateMiss(masm); | 6746 GenerateMiss(masm); |
| 6789 } | 6747 } |
| 6790 | 6748 |
| 6791 | 6749 |
| 6792 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 6750 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 6793 ASSERT(state_ == CompareIC::STRINGS); | 6751 ASSERT(state_ == CompareIC::STRING); |
| 6794 Label miss; | 6752 Label miss; |
| 6795 | 6753 |
| 6796 bool equality = Token::IsEqualityOp(op_); | 6754 bool equality = Token::IsEqualityOp(op_); |
| 6797 | 6755 |
| 6798 // Registers containing left and right operands respectively. | 6756 // Registers containing left and right operands respectively. |
| 6799 Register left = r1; | 6757 Register left = r1; |
| 6800 Register right = r0; | 6758 Register right = r0; |
| 6801 Register tmp1 = r2; | 6759 Register tmp1 = r2; |
| 6802 Register tmp2 = r3; | 6760 Register tmp2 = r3; |
| 6803 Register tmp3 = r4; | 6761 Register tmp3 = r4; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6861 } else { | 6819 } else { |
| 6862 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 6820 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 6863 } | 6821 } |
| 6864 | 6822 |
| 6865 __ bind(&miss); | 6823 __ bind(&miss); |
| 6866 GenerateMiss(masm); | 6824 GenerateMiss(masm); |
| 6867 } | 6825 } |
| 6868 | 6826 |
| 6869 | 6827 |
| 6870 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 6828 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 6871 ASSERT(state_ == CompareIC::OBJECTS); | 6829 ASSERT(state_ == CompareIC::OBJECT); |
| 6872 Label miss; | 6830 Label miss; |
| 6873 __ and_(r2, r1, Operand(r0)); | 6831 __ and_(r2, r1, Operand(r0)); |
| 6874 __ JumpIfSmi(r2, &miss); | 6832 __ JumpIfSmi(r2, &miss); |
| 6875 | 6833 |
| 6876 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); | 6834 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); |
| 6877 __ b(ne, &miss); | 6835 __ b(ne, &miss); |
| 6878 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); | 6836 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); |
| 6879 __ b(ne, &miss); | 6837 __ b(ne, &miss); |
| 6880 | 6838 |
| 6881 ASSERT(GetCondition() == eq); | 6839 ASSERT(GetCondition() == eq); |
| (...skipping 747 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7629 | 7587 |
| 7630 __ Pop(lr, r5, r1); | 7588 __ Pop(lr, r5, r1); |
| 7631 __ Ret(); | 7589 __ Ret(); |
| 7632 } | 7590 } |
| 7633 | 7591 |
| 7634 #undef __ | 7592 #undef __ |
| 7635 | 7593 |
| 7636 } } // namespace v8::internal | 7594 } } // namespace v8::internal |
| 7637 | 7595 |
| 7638 #endif // V8_TARGET_ARCH_ARM | 7596 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |