| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 | 314 |
| 315 // Iterate through the rest of map backwards. r4 holds an index as a Smi. | 315 // Iterate through the rest of map backwards. r4 holds an index as a Smi. |
| 316 Label loop; | 316 Label loop; |
| 317 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); | 317 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); |
| 318 __ bind(&loop); | 318 __ bind(&loop); |
| 319 // Do not double check first entry. | 319 // Do not double check first entry. |
| 320 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); | 320 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); |
| 321 __ b(eq, &install_unoptimized); | 321 __ b(eq, &install_unoptimized); |
| 322 __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | 322 __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 323 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 323 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 324 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); | 324 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); |
| 325 __ ldr(r5, MemOperand(r5)); | 325 __ ldr(r5, MemOperand(r5)); |
| 326 __ cmp(r2, r5); | 326 __ cmp(r2, r5); |
| 327 __ b(ne, &loop); | 327 __ b(ne, &loop); |
| 328 // Hit: fetch the optimized code. | 328 // Hit: fetch the optimized code. |
| 329 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 329 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 330 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); | 330 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); |
| 331 __ add(r5, r5, Operand(kPointerSize)); | 331 __ add(r5, r5, Operand(kPointerSize)); |
| 332 __ ldr(r4, MemOperand(r5)); | 332 __ ldr(r4, MemOperand(r5)); |
| 333 | 333 |
| 334 __ bind(&install_optimized); | 334 __ bind(&install_optimized); |
| 335 __ IncrementCounter(counters->fast_new_closure_install_optimized(), | 335 __ IncrementCounter(counters->fast_new_closure_install_optimized(), |
| 336 1, r6, r7); | 336 1, r6, r7); |
| 337 | 337 |
| 338 // TODO(fschneider): Idea: store proper code pointers in the map and either | 338 // TODO(fschneider): Idea: store proper code pointers in the map and either |
| 339 // unmangle them on marking or do nothing as the whole map is discarded on | 339 // unmangle them on marking or do nothing as the whole map is discarded on |
| 340 // major GC anyway. | 340 // major GC anyway. |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 512 | 512 |
| 513 void Generate(MacroAssembler* masm); | 513 void Generate(MacroAssembler* masm); |
| 514 }; | 514 }; |
| 515 | 515 |
| 516 | 516 |
| 517 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 517 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
| 518 Register exponent = result1_; | 518 Register exponent = result1_; |
| 519 Register mantissa = result2_; | 519 Register mantissa = result2_; |
| 520 | 520 |
| 521 Label not_special; | 521 Label not_special; |
| 522 // Convert from Smi to integer. | 522 __ SmiUntag(source_); |
| 523 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | |
| 524 // Move sign bit from source to destination. This works because the sign bit | 523 // Move sign bit from source to destination. This works because the sign bit |
| 525 // in the exponent word of the double has the same position and polarity as | 524 // in the exponent word of the double has the same position and polarity as |
| 526 // the 2's complement sign bit in a Smi. | 525 // the 2's complement sign bit in a Smi. |
| 527 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 526 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 528 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | 527 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); |
| 529 // Subtract from 0 if source was negative. | 528 // Subtract from 0 if source was negative. |
| 530 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); | 529 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); |
| 531 | 530 |
| 532 // We have -1, 0 or 1, which we treat specially. Register source_ contains | 531 // We have -1, 0 or 1, which we treat specially. Register source_ contains |
| 533 // absolute value: it is either equal to 1 (special case of -1 and 1), | 532 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| (...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 763 } | 762 } |
| 764 __ Ret(ne); | 763 __ Ret(ne); |
| 765 } else { | 764 } else { |
| 766 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 765 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 767 // the runtime. | 766 // the runtime. |
| 768 __ b(ne, slow); | 767 __ b(ne, slow); |
| 769 } | 768 } |
| 770 | 769 |
| 771 // Lhs is a smi, rhs is a number. | 770 // Lhs is a smi, rhs is a number. |
| 772 // Convert lhs to a double in d7. | 771 // Convert lhs to a double in d7. |
| 773 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 772 __ SmiToDouble(d7, lhs); |
| 774 // Load the double from rhs, tagged HeapNumber r0, to d6. | 773 // Load the double from rhs, tagged HeapNumber r0, to d6. |
| 775 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 774 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| 776 __ vldr(d6, r7, HeapNumber::kValueOffset); | 775 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 777 | 776 |
| 778 // We now have both loaded as doubles but we can skip the lhs nan check | 777 // We now have both loaded as doubles but we can skip the lhs nan check |
| 779 // since it's a smi. | 778 // since it's a smi. |
| 780 __ jmp(lhs_not_nan); | 779 __ jmp(lhs_not_nan); |
| 781 | 780 |
| 782 __ bind(&rhs_is_smi); | 781 __ bind(&rhs_is_smi); |
| 783 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 782 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
| (...skipping 10 matching lines...) Expand all Loading... |
| 794 // Smi compared non-strictly with a non-smi non-heap-number. Call | 793 // Smi compared non-strictly with a non-smi non-heap-number. Call |
| 795 // the runtime. | 794 // the runtime. |
| 796 __ b(ne, slow); | 795 __ b(ne, slow); |
| 797 } | 796 } |
| 798 | 797 |
| 799 // Rhs is a smi, lhs is a heap number. | 798 // Rhs is a smi, lhs is a heap number. |
| 800 // Load the double from lhs, tagged HeapNumber r1, to d7. | 799 // Load the double from lhs, tagged HeapNumber r1, to d7. |
| 801 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 800 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| 802 __ vldr(d7, r7, HeapNumber::kValueOffset); | 801 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 803 // Convert rhs to a double in d6 . | 802 // Convert rhs to a double in d6 . |
| 804 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | 803 __ SmiToDouble(d6, rhs); |
| 805 // Fall through to both_loaded_as_doubles. | 804 // Fall through to both_loaded_as_doubles. |
| 806 } | 805 } |
| 807 | 806 |
| 808 | 807 |
| 809 // See comment at call site. | 808 // See comment at call site. |
| 810 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 809 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 811 Register lhs, | 810 Register lhs, |
| 812 Register rhs) { | 811 Register rhs) { |
| 813 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 812 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| 814 (lhs.is(r1) && rhs.is(r0))); | 813 (lhs.is(r1) && rhs.is(r0))); |
| (...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1221 | 1220 |
| 1222 // Boolean -> its value. | 1221 // Boolean -> its value. |
| 1223 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); | 1222 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
| 1224 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); | 1223 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
| 1225 | 1224 |
| 1226 // 'null' -> false. | 1225 // 'null' -> false. |
| 1227 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); | 1226 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); |
| 1228 | 1227 |
| 1229 if (types_.Contains(SMI)) { | 1228 if (types_.Contains(SMI)) { |
| 1230 // Smis: 0 -> false, all other -> true | 1229 // Smis: 0 -> false, all other -> true |
| 1231 __ tst(tos_, Operand(kSmiTagMask)); | 1230 __ SmiTst(tos_); |
| 1232 // tos_ contains the correct return value already | 1231 // tos_ contains the correct return value already |
| 1233 __ Ret(eq); | 1232 __ Ret(eq); |
| 1234 } else if (types_.NeedsMap()) { | 1233 } else if (types_.NeedsMap()) { |
| 1235 // If we need a map later and have a Smi -> patch. | 1234 // If we need a map later and have a Smi -> patch. |
| 1236 __ JumpIfSmi(tos_, &patch); | 1235 __ JumpIfSmi(tos_, &patch); |
| 1237 } | 1236 } |
| 1238 | 1237 |
| 1239 if (types_.NeedsMap()) { | 1238 if (types_.NeedsMap()) { |
| 1240 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1239 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1241 | 1240 |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1526 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 1525 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 1527 __ ECMAToInt32(r1, d0, r2, r3, r4, d1); | 1526 __ ECMAToInt32(r1, d0, r2, r3, r4, d1); |
| 1528 | 1527 |
| 1529 // Do the bitwise operation and check if the result fits in a smi. | 1528 // Do the bitwise operation and check if the result fits in a smi. |
| 1530 Label try_float; | 1529 Label try_float; |
| 1531 __ mvn(r1, Operand(r1)); | 1530 __ mvn(r1, Operand(r1)); |
| 1532 __ cmn(r1, Operand(0x40000000)); | 1531 __ cmn(r1, Operand(0x40000000)); |
| 1533 __ b(mi, &try_float); | 1532 __ b(mi, &try_float); |
| 1534 | 1533 |
| 1535 // Tag the result as a smi and we're done. | 1534 // Tag the result as a smi and we're done. |
| 1536 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 1535 __ SmiTag(r0, r1); |
| 1537 __ Ret(); | 1536 __ Ret(); |
| 1538 | 1537 |
| 1539 // Try to store the result in a heap number. | 1538 // Try to store the result in a heap number. |
| 1540 __ bind(&try_float); | 1539 __ bind(&try_float); |
| 1541 if (mode_ == UNARY_NO_OVERWRITE) { | 1540 if (mode_ == UNARY_NO_OVERWRITE) { |
| 1542 Label slow_allocate_heapnumber, heapnumber_allocated; | 1541 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 1543 __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber); | 1542 __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber); |
| 1544 __ jmp(&heapnumber_allocated); | 1543 __ jmp(&heapnumber_allocated); |
| 1545 | 1544 |
| 1546 __ bind(&slow_allocate_heapnumber); | 1545 __ bind(&slow_allocate_heapnumber); |
| (...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1873 // Smi tag result. | 1872 // Smi tag result. |
| 1874 __ SmiTag(right, scratch1); | 1873 __ SmiTag(right, scratch1); |
| 1875 __ Ret(); | 1874 __ Ret(); |
| 1876 break; | 1875 break; |
| 1877 case Token::SHL: | 1876 case Token::SHL: |
| 1878 // Remove tags from operands. | 1877 // Remove tags from operands. |
| 1879 __ SmiUntag(scratch1, left); | 1878 __ SmiUntag(scratch1, left); |
| 1880 __ GetLeastBitsFromSmi(scratch2, right, 5); | 1879 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 1881 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); | 1880 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
| 1882 // Check that the signed result fits in a Smi. | 1881 // Check that the signed result fits in a Smi. |
| 1883 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | 1882 __ TrySmiTag(right, scratch1, ¬_smi_result); |
| 1884 __ b(mi, ¬_smi_result); | |
| 1885 __ SmiTag(right, scratch1); | |
| 1886 __ Ret(); | 1883 __ Ret(); |
| 1887 break; | 1884 break; |
| 1888 default: | 1885 default: |
| 1889 UNREACHABLE(); | 1886 UNREACHABLE(); |
| 1890 } | 1887 } |
| 1891 __ bind(¬_smi_result); | 1888 __ bind(¬_smi_result); |
| 1892 } | 1889 } |
| 1893 | 1890 |
| 1894 | 1891 |
| 1895 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 1892 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1937 case Token::MUL: | 1934 case Token::MUL: |
| 1938 case Token::DIV: | 1935 case Token::DIV: |
| 1939 case Token::MOD: { | 1936 case Token::MOD: { |
| 1940 // Allocate new heap number for result. | 1937 // Allocate new heap number for result. |
| 1941 Register result = r5; | 1938 Register result = r5; |
| 1942 BinaryOpStub_GenerateHeapResultAllocation( | 1939 BinaryOpStub_GenerateHeapResultAllocation( |
| 1943 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1940 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| 1944 | 1941 |
| 1945 // Load left and right operands into d0 and d1. | 1942 // Load left and right operands into d0 and d1. |
| 1946 if (smi_operands) { | 1943 if (smi_operands) { |
| 1947 __ SmiUntag(scratch1, right); | 1944 __ SmiToDouble(d1, right); |
| 1948 __ vmov(d1.high(), scratch1); | 1945 __ SmiToDouble(d0, left); |
| 1949 __ vcvt_f64_s32(d1, d1.high()); | |
| 1950 __ SmiUntag(scratch1, left); | |
| 1951 __ vmov(d0.high(), scratch1); | |
| 1952 __ vcvt_f64_s32(d0, d0.high()); | |
| 1953 } else { | 1946 } else { |
| 1954 // Load right operand into d1. | 1947 // Load right operand into d1. |
| 1955 if (right_type == BinaryOpIC::INT32) { | 1948 if (right_type == BinaryOpIC::INT32) { |
| 1956 __ LoadNumberAsInt32Double( | 1949 __ LoadNumberAsInt32Double( |
| 1957 right, d1, heap_number_map, scratch1, d8, miss); | 1950 right, d1, heap_number_map, scratch1, d8, miss); |
| 1958 } else { | 1951 } else { |
| 1959 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | 1952 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| 1960 __ LoadNumber(right, d1, heap_number_map, scratch1, fail); | 1953 __ LoadNumber(right, d1, heap_number_map, scratch1, fail); |
| 1961 } | 1954 } |
| 1962 // Load left operand into d0. | 1955 // Load left operand into d0. |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2053 case Token::SHL: | 2046 case Token::SHL: |
| 2054 // Use only the 5 least significant bits of the shift count. | 2047 // Use only the 5 least significant bits of the shift count. |
| 2055 __ GetLeastBitsFromInt32(r2, r2, 5); | 2048 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2056 __ mov(r2, Operand(r3, LSL, r2)); | 2049 __ mov(r2, Operand(r3, LSL, r2)); |
| 2057 break; | 2050 break; |
| 2058 default: | 2051 default: |
| 2059 UNREACHABLE(); | 2052 UNREACHABLE(); |
| 2060 } | 2053 } |
| 2061 | 2054 |
| 2062 // Check that the *signed* result fits in a smi. | 2055 // Check that the *signed* result fits in a smi. |
| 2063 __ add(r3, r2, Operand(0x40000000), SetCC); | 2056 __ TrySmiTag(r0, r2, &result_not_a_smi); |
| 2064 __ b(mi, &result_not_a_smi); | |
| 2065 __ SmiTag(r0, r2); | |
| 2066 __ Ret(); | 2057 __ Ret(); |
| 2067 | 2058 |
| 2068 // Allocate new heap number for result. | 2059 // Allocate new heap number for result. |
| 2069 __ bind(&result_not_a_smi); | 2060 __ bind(&result_not_a_smi); |
| 2070 Register result = r5; | 2061 Register result = r5; |
| 2071 if (smi_operands) { | 2062 if (smi_operands) { |
| 2072 __ AllocateHeapNumber( | 2063 __ AllocateHeapNumber( |
| 2073 result, scratch1, scratch2, heap_number_map, gc_required); | 2064 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2074 } else { | 2065 } else { |
| 2075 BinaryOpStub_GenerateHeapResultAllocation( | 2066 BinaryOpStub_GenerateHeapResultAllocation( |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2115 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 2106 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| 2116 OverwriteMode mode) { | 2107 OverwriteMode mode) { |
| 2117 Label not_smis; | 2108 Label not_smis; |
| 2118 | 2109 |
| 2119 Register left = r1; | 2110 Register left = r1; |
| 2120 Register right = r0; | 2111 Register right = r0; |
| 2121 Register scratch1 = r7; | 2112 Register scratch1 = r7; |
| 2122 | 2113 |
| 2123 // Perform combined smi check on both operands. | 2114 // Perform combined smi check on both operands. |
| 2124 __ orr(scratch1, left, Operand(right)); | 2115 __ orr(scratch1, left, Operand(right)); |
| 2125 STATIC_ASSERT(kSmiTag == 0); | |
| 2126 __ JumpIfNotSmi(scratch1, ¬_smis); | 2116 __ JumpIfNotSmi(scratch1, ¬_smis); |
| 2127 | 2117 |
| 2128 // If the smi-smi operation results in a smi return is generated. | 2118 // If the smi-smi operation results in a smi return is generated. |
| 2129 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | 2119 BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| 2130 | 2120 |
| 2131 // If heap number results are possible generate the result in an allocated | 2121 // If heap number results are possible generate the result in an allocated |
| 2132 // heap number. | 2122 // heap number. |
| 2133 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | 2123 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| 2134 BinaryOpStub_GenerateFPOperation( | 2124 BinaryOpStub_GenerateFPOperation( |
| 2135 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | 2125 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
| (...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2404 : &return_heap_number); | 2394 : &return_heap_number); |
| 2405 break; | 2395 break; |
| 2406 case Token::SHL: | 2396 case Token::SHL: |
| 2407 __ and_(r2, r2, Operand(0x1f)); | 2397 __ and_(r2, r2, Operand(0x1f)); |
| 2408 __ mov(r2, Operand(r3, LSL, r2)); | 2398 __ mov(r2, Operand(r3, LSL, r2)); |
| 2409 break; | 2399 break; |
| 2410 default: | 2400 default: |
| 2411 UNREACHABLE(); | 2401 UNREACHABLE(); |
| 2412 } | 2402 } |
| 2413 | 2403 |
| 2414 // Check if the result fits in a smi. | 2404 // Check if the result fits in a smi. If not try to return a heap number. |
| 2415 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 2405 // (We know the result is an int32). |
| 2416 // If not try to return a heap number. (We know the result is an int32.) | 2406 __ TrySmiTag(r0, r2, &return_heap_number); |
| 2417 __ b(mi, &return_heap_number); | |
| 2418 // Tag the result and return. | |
| 2419 __ SmiTag(r0, r2); | |
| 2420 __ Ret(); | 2407 __ Ret(); |
| 2421 | 2408 |
| 2422 __ bind(&return_heap_number); | 2409 __ bind(&return_heap_number); |
| 2423 heap_number_result = r5; | 2410 heap_number_result = r5; |
| 2424 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2411 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2425 heap_number_result, | 2412 heap_number_result, |
| 2426 heap_number_map, | 2413 heap_number_map, |
| 2427 scratch1, | 2414 scratch1, |
| 2428 scratch2, | 2415 scratch2, |
| 2429 &call_runtime, | 2416 &call_runtime, |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2637 const Register cache_entry = r0; | 2624 const Register cache_entry = r0; |
| 2638 const bool tagged = (argument_type_ == TAGGED); | 2625 const bool tagged = (argument_type_ == TAGGED); |
| 2639 | 2626 |
| 2640 if (tagged) { | 2627 if (tagged) { |
| 2641 // Argument is a number and is on stack and in r0. | 2628 // Argument is a number and is on stack and in r0. |
| 2642 // Load argument and check if it is a smi. | 2629 // Load argument and check if it is a smi. |
| 2643 __ JumpIfNotSmi(r0, &input_not_smi); | 2630 __ JumpIfNotSmi(r0, &input_not_smi); |
| 2644 | 2631 |
| 2645 // Input is a smi. Convert to double and load the low and high words | 2632 // Input is a smi. Convert to double and load the low and high words |
| 2646 // of the double into r2, r3. | 2633 // of the double into r2, r3. |
| 2647 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 2634 __ SmiToDouble(d7, r0); |
| 2635 __ vmov(r2, r3, d7); |
| 2648 __ b(&loaded); | 2636 __ b(&loaded); |
| 2649 | 2637 |
| 2650 __ bind(&input_not_smi); | 2638 __ bind(&input_not_smi); |
| 2651 // Check if input is a HeapNumber. | 2639 // Check if input is a HeapNumber. |
| 2652 __ CheckMap(r0, | 2640 __ CheckMap(r0, |
| 2653 r1, | 2641 r1, |
| 2654 Heap::kHeapNumberMapRootIndex, | 2642 Heap::kHeapNumberMapRootIndex, |
| 2655 &calculate, | 2643 &calculate, |
| 2656 DONT_DO_SMI_CHECK); | 2644 DONT_DO_SMI_CHECK); |
| 2657 // Input is a HeapNumber. Load it to a double register and store the | 2645 // Input is a HeapNumber. Load it to a double register and store the |
| (...skipping 1177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3835 __ b(eq, &adaptor); | 3823 __ b(eq, &adaptor); |
| 3836 | 3824 |
| 3837 // Check index against formal parameters count limit passed in | 3825 // Check index against formal parameters count limit passed in |
| 3838 // through register r0. Use unsigned comparison to get negative | 3826 // through register r0. Use unsigned comparison to get negative |
| 3839 // check for free. | 3827 // check for free. |
| 3840 __ cmp(r1, r0); | 3828 __ cmp(r1, r0); |
| 3841 __ b(hs, &slow); | 3829 __ b(hs, &slow); |
| 3842 | 3830 |
| 3843 // Read the argument from the stack and return it. | 3831 // Read the argument from the stack and return it. |
| 3844 __ sub(r3, r0, r1); | 3832 __ sub(r3, r0, r1); |
| 3845 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 3833 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); |
| 3846 __ ldr(r0, MemOperand(r3, kDisplacement)); | 3834 __ ldr(r0, MemOperand(r3, kDisplacement)); |
| 3847 __ Jump(lr); | 3835 __ Jump(lr); |
| 3848 | 3836 |
| 3849 // Arguments adaptor case: Check index against actual arguments | 3837 // Arguments adaptor case: Check index against actual arguments |
| 3850 // limit found in the arguments adaptor frame. Use unsigned | 3838 // limit found in the arguments adaptor frame. Use unsigned |
| 3851 // comparison to get negative check for free. | 3839 // comparison to get negative check for free. |
| 3852 __ bind(&adaptor); | 3840 __ bind(&adaptor); |
| 3853 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 3841 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 3854 __ cmp(r1, r0); | 3842 __ cmp(r1, r0); |
| 3855 __ b(cs, &slow); | 3843 __ b(cs, &slow); |
| 3856 | 3844 |
| 3857 // Read the argument from the adaptor frame and return it. | 3845 // Read the argument from the adaptor frame and return it. |
| 3858 __ sub(r3, r0, r1); | 3846 __ sub(r3, r0, r1); |
| 3859 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 3847 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); |
| 3860 __ ldr(r0, MemOperand(r3, kDisplacement)); | 3848 __ ldr(r0, MemOperand(r3, kDisplacement)); |
| 3861 __ Jump(lr); | 3849 __ Jump(lr); |
| 3862 | 3850 |
| 3863 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 3851 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
| 3864 // by calling the runtime system. | 3852 // by calling the runtime system. |
| 3865 __ bind(&slow); | 3853 __ bind(&slow); |
| 3866 __ push(r1); | 3854 __ push(r1); |
| 3867 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 3855 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
| 3868 } | 3856 } |
| 3869 | 3857 |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4102 __ b(eq, &adaptor_frame); | 4090 __ b(eq, &adaptor_frame); |
| 4103 | 4091 |
| 4104 // Get the length from the frame. | 4092 // Get the length from the frame. |
| 4105 __ ldr(r1, MemOperand(sp, 0)); | 4093 __ ldr(r1, MemOperand(sp, 0)); |
| 4106 __ b(&try_allocate); | 4094 __ b(&try_allocate); |
| 4107 | 4095 |
| 4108 // Patch the arguments.length and the parameters pointer. | 4096 // Patch the arguments.length and the parameters pointer. |
| 4109 __ bind(&adaptor_frame); | 4097 __ bind(&adaptor_frame); |
| 4110 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4098 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 4111 __ str(r1, MemOperand(sp, 0)); | 4099 __ str(r1, MemOperand(sp, 0)); |
| 4112 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4100 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); |
| 4113 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 4101 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 4114 __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 4102 __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
| 4115 | 4103 |
| 4116 // Try the new space allocation. Start out with computing the size | 4104 // Try the new space allocation. Start out with computing the size |
| 4117 // of the arguments object and the elements array in words. | 4105 // of the arguments object and the elements array in words. |
| 4118 Label add_arguments_object; | 4106 Label add_arguments_object; |
| 4119 __ bind(&try_allocate); | 4107 __ bind(&try_allocate); |
| 4120 __ cmp(r1, Operand::Zero()); | 4108 __ SmiUntag(r1, SetCC); |
| 4121 __ b(eq, &add_arguments_object); | 4109 __ b(eq, &add_arguments_object); |
| 4122 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | |
| 4123 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 4110 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
| 4124 __ bind(&add_arguments_object); | 4111 __ bind(&add_arguments_object); |
| 4125 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); | 4112 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); |
| 4126 | 4113 |
| 4127 // Do the allocation of both objects in one go. | 4114 // Do the allocation of both objects in one go. |
| 4128 __ Allocate(r1, r0, r2, r3, &runtime, | 4115 __ Allocate(r1, r0, r2, r3, &runtime, |
| 4129 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 4116 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 4130 | 4117 |
| 4131 // Get the arguments boilerplate from the current native context. | 4118 // Get the arguments boilerplate from the current native context. |
| 4132 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 4119 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 4151 // Get the parameters pointer from the stack. | 4138 // Get the parameters pointer from the stack. |
| 4152 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); | 4139 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); |
| 4153 | 4140 |
| 4154 // Set up the elements pointer in the allocated arguments object and | 4141 // Set up the elements pointer in the allocated arguments object and |
| 4155 // initialize the header in the elements fixed array. | 4142 // initialize the header in the elements fixed array. |
| 4156 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); | 4143 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); |
| 4157 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | 4144 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| 4158 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 4145 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); |
| 4159 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); | 4146 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); |
| 4160 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 4147 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
| 4161 // Untag the length for the loop. | 4148 __ SmiUntag(r1); |
| 4162 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | |
| 4163 | 4149 |
| 4164 // Copy the fixed array slots. | 4150 // Copy the fixed array slots. |
| 4165 Label loop; | 4151 Label loop; |
| 4166 // Set up r4 to point to the first array slot. | 4152 // Set up r4 to point to the first array slot. |
| 4167 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4153 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4168 __ bind(&loop); | 4154 __ bind(&loop); |
| 4169 // Pre-decrement r2 with kPointerSize on each iteration. | 4155 // Pre-decrement r2 with kPointerSize on each iteration. |
| 4170 // Pre-decrement in order to skip receiver. | 4156 // Pre-decrement in order to skip receiver. |
| 4171 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); | 4157 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); |
| 4172 // Post-increment r4 with kPointerSize on each iteration. | 4158 // Post-increment r4 with kPointerSize on each iteration. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4221 ExternalReference::address_of_regexp_stack_memory_address(isolate); | 4207 ExternalReference::address_of_regexp_stack_memory_address(isolate); |
| 4222 ExternalReference address_of_regexp_stack_memory_size = | 4208 ExternalReference address_of_regexp_stack_memory_size = |
| 4223 ExternalReference::address_of_regexp_stack_memory_size(isolate); | 4209 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
| 4224 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 4210 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
| 4225 __ ldr(r0, MemOperand(r0, 0)); | 4211 __ ldr(r0, MemOperand(r0, 0)); |
| 4226 __ cmp(r0, Operand::Zero()); | 4212 __ cmp(r0, Operand::Zero()); |
| 4227 __ b(eq, &runtime); | 4213 __ b(eq, &runtime); |
| 4228 | 4214 |
| 4229 // Check that the first argument is a JSRegExp object. | 4215 // Check that the first argument is a JSRegExp object. |
| 4230 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); | 4216 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); |
| 4231 STATIC_ASSERT(kSmiTag == 0); | |
| 4232 __ JumpIfSmi(r0, &runtime); | 4217 __ JumpIfSmi(r0, &runtime); |
| 4233 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 4218 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
| 4234 __ b(ne, &runtime); | 4219 __ b(ne, &runtime); |
| 4235 | 4220 |
| 4236 // Check that the RegExp has been compiled (data contains a fixed array). | 4221 // Check that the RegExp has been compiled (data contains a fixed array). |
| 4237 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 4222 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
| 4238 if (FLAG_debug_code) { | 4223 if (FLAG_debug_code) { |
| 4239 __ tst(regexp_data, Operand(kSmiTagMask)); | 4224 __ SmiTst(regexp_data); |
| 4240 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); | 4225 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); |
| 4241 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 4226 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); |
| 4242 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | 4227 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); |
| 4243 } | 4228 } |
| 4244 | 4229 |
| 4245 // regexp_data: RegExp data (FixedArray) | 4230 // regexp_data: RegExp data (FixedArray) |
| 4246 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 4231 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
| 4247 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 4232 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
| 4248 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 4233 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
| 4249 __ b(ne, &runtime); | 4234 __ b(ne, &runtime); |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4334 // subject: sequential subject string (or look-alike, external string) | 4319 // subject: sequential subject string (or look-alike, external string) |
| 4335 // r3: original subject string | 4320 // r3: original subject string |
| 4336 // Load previous index and check range before r3 is overwritten. We have to | 4321 // Load previous index and check range before r3 is overwritten. We have to |
| 4337 // use r3 instead of subject here because subject might have been only made | 4322 // use r3 instead of subject here because subject might have been only made |
| 4338 // to look like a sequential string when it actually is an external string. | 4323 // to look like a sequential string when it actually is an external string. |
| 4339 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | 4324 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); |
| 4340 __ JumpIfNotSmi(r1, &runtime); | 4325 __ JumpIfNotSmi(r1, &runtime); |
| 4341 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); | 4326 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); |
| 4342 __ cmp(r3, Operand(r1)); | 4327 __ cmp(r3, Operand(r1)); |
| 4343 __ b(ls, &runtime); | 4328 __ b(ls, &runtime); |
| 4344 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 4329 __ SmiUntag(r1); |
| 4345 | 4330 |
| 4346 STATIC_ASSERT(4 == kOneByteStringTag); | 4331 STATIC_ASSERT(4 == kOneByteStringTag); |
| 4347 STATIC_ASSERT(kTwoByteStringTag == 0); | 4332 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 4348 __ and_(r0, r0, Operand(kStringEncodingMask)); | 4333 __ and_(r0, r0, Operand(kStringEncodingMask)); |
| 4349 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 4334 __ mov(r3, Operand(r0, ASR, 2), SetCC); |
| 4350 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 4335 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); |
| 4351 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 4336 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); |
| 4352 | 4337 |
| 4353 // (E) Carry on. String handling is done. | 4338 // (E) Carry on. String handling is done. |
| 4354 // r7: irregexp code | 4339 // r7: irregexp code |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4409 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) | 4394 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) |
| 4410 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 4395 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); |
| 4411 // If slice offset is not 0, load the length from the original sliced string. | 4396 // If slice offset is not 0, load the length from the original sliced string. |
| 4412 // Argument 4, r3: End of string data | 4397 // Argument 4, r3: End of string data |
| 4413 // Argument 3, r2: Start of string data | 4398 // Argument 3, r2: Start of string data |
| 4414 // Prepare start and end index of the input. | 4399 // Prepare start and end index of the input. |
| 4415 __ add(r9, r8, Operand(r9, LSL, r3)); | 4400 __ add(r9, r8, Operand(r9, LSL, r3)); |
| 4416 __ add(r2, r9, Operand(r1, LSL, r3)); | 4401 __ add(r2, r9, Operand(r1, LSL, r3)); |
| 4417 | 4402 |
| 4418 __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); | 4403 __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); |
| 4419 __ mov(r8, Operand(r8, ASR, kSmiTagSize)); | 4404 __ SmiUntag(r8); |
| 4420 __ add(r3, r9, Operand(r8, LSL, r3)); | 4405 __ add(r3, r9, Operand(r8, LSL, r3)); |
| 4421 | 4406 |
| 4422 // Argument 2 (r1): Previous index. | 4407 // Argument 2 (r1): Previous index. |
| 4423 // Already there | 4408 // Already there |
| 4424 | 4409 |
| 4425 // Argument 1 (r0): Subject string. | 4410 // Argument 1 (r0): Subject string. |
| 4426 __ mov(r0, subject); | 4411 __ mov(r0, subject); |
| 4427 | 4412 |
| 4428 // Locate the code entry and call it. | 4413 // Locate the code entry and call it. |
| 4429 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4414 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4496 __ ldr(last_match_info_elements, | 4481 __ ldr(last_match_info_elements, |
| 4497 FieldMemOperand(r0, JSArray::kElementsOffset)); | 4482 FieldMemOperand(r0, JSArray::kElementsOffset)); |
| 4498 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 4483 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
| 4499 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); | 4484 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
| 4500 __ b(ne, &runtime); | 4485 __ b(ne, &runtime); |
| 4501 // Check that the last match info has space for the capture registers and the | 4486 // Check that the last match info has space for the capture registers and the |
| 4502 // additional information. | 4487 // additional information. |
| 4503 __ ldr(r0, | 4488 __ ldr(r0, |
| 4504 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 4489 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
| 4505 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); | 4490 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); |
| 4506 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); | 4491 __ cmp(r2, Operand::SmiUntag(r0)); |
| 4507 __ b(gt, &runtime); | 4492 __ b(gt, &runtime); |
| 4508 | 4493 |
| 4509 // r1: number of capture registers | 4494 // r1: number of capture registers |
| 4510 // r4: subject string | 4495 // r4: subject string |
| 4511 // Store the capture count. | 4496 // Store the capture count. |
| 4512 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. | 4497 __ SmiTag(r2, r1); |
| 4513 __ str(r2, FieldMemOperand(last_match_info_elements, | 4498 __ str(r2, FieldMemOperand(last_match_info_elements, |
| 4514 RegExpImpl::kLastCaptureCountOffset)); | 4499 RegExpImpl::kLastCaptureCountOffset)); |
| 4515 // Store last subject and last input. | 4500 // Store last subject and last input. |
| 4516 __ str(subject, | 4501 __ str(subject, |
| 4517 FieldMemOperand(last_match_info_elements, | 4502 FieldMemOperand(last_match_info_elements, |
| 4518 RegExpImpl::kLastSubjectOffset)); | 4503 RegExpImpl::kLastSubjectOffset)); |
| 4519 __ mov(r2, subject); | 4504 __ mov(r2, subject); |
| 4520 __ RecordWriteField(last_match_info_elements, | 4505 __ RecordWriteField(last_match_info_elements, |
| 4521 RegExpImpl::kLastSubjectOffset, | 4506 RegExpImpl::kLastSubjectOffset, |
| 4522 subject, | 4507 subject, |
| (...skipping 23 matching lines...) Expand all Loading... |
| 4546 // counts down until wraping after zero. | 4531 // counts down until wraping after zero. |
| 4547 __ add(r0, | 4532 __ add(r0, |
| 4548 last_match_info_elements, | 4533 last_match_info_elements, |
| 4549 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | 4534 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
| 4550 __ bind(&next_capture); | 4535 __ bind(&next_capture); |
| 4551 __ sub(r1, r1, Operand(1), SetCC); | 4536 __ sub(r1, r1, Operand(1), SetCC); |
| 4552 __ b(mi, &done); | 4537 __ b(mi, &done); |
| 4553 // Read the value from the static offsets vector buffer. | 4538 // Read the value from the static offsets vector buffer. |
| 4554 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); | 4539 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); |
| 4555 // Store the smi value in the last match info. | 4540 // Store the smi value in the last match info. |
| 4556 __ mov(r3, Operand(r3, LSL, kSmiTagSize)); | 4541 __ SmiTag(r3); |
| 4557 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); | 4542 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); |
| 4558 __ jmp(&next_capture); | 4543 __ jmp(&next_capture); |
| 4559 __ bind(&done); | 4544 __ bind(&done); |
| 4560 | 4545 |
| 4561 // Return last match info. | 4546 // Return last match info. |
| 4562 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 4547 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
| 4563 __ add(sp, sp, Operand(4 * kPointerSize)); | 4548 __ add(sp, sp, Operand(4 * kPointerSize)); |
| 4564 __ Ret(); | 4549 __ Ret(); |
| 4565 | 4550 |
| 4566 // Do the runtime call to execute the regexp. | 4551 // Do the runtime call to execute the regexp. |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4594 | 4579 |
| 4595 // (8) Short external string or not a string? If yes, bail out to runtime. | 4580 // (8) Short external string or not a string? If yes, bail out to runtime. |
| 4596 __ bind(¬_long_external); | 4581 __ bind(¬_long_external); |
| 4597 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); | 4582 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); |
| 4598 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); | 4583 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); |
| 4599 __ b(ne, &runtime); | 4584 __ b(ne, &runtime); |
| 4600 | 4585 |
| 4601 // (9) Sliced string. Replace subject with parent. Go to (4). | 4586 // (9) Sliced string. Replace subject with parent. Go to (4). |
| 4602 // Load offset into r9 and replace subject string with parent. | 4587 // Load offset into r9 and replace subject string with parent. |
| 4603 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 4588 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); |
| 4604 __ mov(r9, Operand(r9, ASR, kSmiTagSize)); | 4589 __ SmiUntag(r9); |
| 4605 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 4590 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
| 4606 __ jmp(&check_underlying); // Go to (4). | 4591 __ jmp(&check_underlying); // Go to (4). |
| 4607 #endif // V8_INTERPRETED_REGEXP | 4592 #endif // V8_INTERPRETED_REGEXP |
| 4608 } | 4593 } |
| 4609 | 4594 |
| 4610 | 4595 |
| 4611 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { | 4596 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
| 4612 const int kMaxInlineLength = 100; | 4597 const int kMaxInlineLength = 100; |
| 4613 Label slowcase; | 4598 Label slowcase; |
| 4614 Label done; | 4599 Label done; |
| 4615 Factory* factory = masm->isolate()->factory(); | 4600 Factory* factory = masm->isolate()->factory(); |
| 4616 | 4601 |
| 4617 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | 4602 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); |
| 4618 STATIC_ASSERT(kSmiTag == 0); | 4603 STATIC_ASSERT(kSmiTag == 0); |
| 4619 STATIC_ASSERT(kSmiTagSize == 1); | 4604 STATIC_ASSERT(kSmiTagSize == 1); |
| 4620 __ JumpIfNotSmi(r1, &slowcase); | 4605 __ JumpIfNotSmi(r1, &slowcase); |
| 4621 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); | 4606 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); |
| 4622 __ b(hi, &slowcase); | 4607 __ b(hi, &slowcase); |
| 4623 // Smi-tagging is equivalent to multiplying by 2. | 4608 // Smi-tagging is equivalent to multiplying by 2. |
| 4624 // Allocate RegExpResult followed by FixedArray with size in ebx. | 4609 // Allocate RegExpResult followed by FixedArray with size in ebx. |
| 4625 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] | 4610 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
| 4626 // Elements: [Map][Length][..elements..] | 4611 // Elements: [Map][Length][..elements..] |
| 4627 // Size of JSArray with two in-object properties and the header of a | 4612 // Size of JSArray with two in-object properties and the header of a |
| 4628 // FixedArray. | 4613 // FixedArray. |
| 4629 int objects_size = | 4614 int objects_size = |
| 4630 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; | 4615 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; |
| 4631 __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); | 4616 __ SmiUntag(r5, r1); |
| 4632 __ add(r2, r5, Operand(objects_size)); | 4617 __ add(r2, r5, Operand(objects_size)); |
| 4633 __ Allocate( | 4618 __ Allocate( |
| 4634 r2, // In: Size, in words. | 4619 r2, // In: Size, in words. |
| 4635 r0, // Out: Start of allocation (tagged). | 4620 r0, // Out: Start of allocation (tagged). |
| 4636 r3, // Scratch register. | 4621 r3, // Scratch register. |
| 4637 r4, // Scratch register. | 4622 r4, // Scratch register. |
| 4638 &slowcase, | 4623 &slowcase, |
| 4639 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 4624 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 4640 // r0: Start of allocated area, object-tagged. | 4625 // r0: Start of allocated area, object-tagged. |
| 4641 // r1: Number of elements in array, as smi. | 4626 // r1: Number of elements in array, as smi. |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4664 | 4649 |
| 4665 // Fill out the elements FixedArray. | 4650 // Fill out the elements FixedArray. |
| 4666 // r0: JSArray, tagged. | 4651 // r0: JSArray, tagged. |
| 4667 // r3: FixedArray, tagged. | 4652 // r3: FixedArray, tagged. |
| 4668 // r5: Number of elements in array, untagged. | 4653 // r5: Number of elements in array, untagged. |
| 4669 | 4654 |
| 4670 // Set map. | 4655 // Set map. |
| 4671 __ mov(r2, Operand(factory->fixed_array_map())); | 4656 __ mov(r2, Operand(factory->fixed_array_map())); |
| 4672 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 4657 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| 4673 // Set FixedArray length. | 4658 // Set FixedArray length. |
| 4674 __ mov(r6, Operand(r5, LSL, kSmiTagSize)); | 4659 __ SmiTag(r6, r5); |
| 4675 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 4660 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
| 4676 // Fill contents of fixed-array with undefined. | 4661 // Fill contents of fixed-array with undefined. |
| 4677 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 4662 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| 4678 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4663 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4679 // Fill fixed array elements with undefined. | 4664 // Fill fixed array elements with undefined. |
| 4680 // r0: JSArray, tagged. | 4665 // r0: JSArray, tagged. |
| 4681 // r2: undefined. | 4666 // r2: undefined. |
| 4682 // r3: Start of elements in FixedArray. | 4667 // r3: Start of elements in FixedArray. |
| 4683 // r5: Number of elements to fill. | 4668 // r5: Number of elements to fill. |
| 4684 Label loop; | 4669 Label loop; |
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4981 | 4966 |
| 4982 // If the index is non-smi trigger the non-smi case. | 4967 // If the index is non-smi trigger the non-smi case. |
| 4983 __ JumpIfNotSmi(index_, &index_not_smi_); | 4968 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 4984 __ bind(&got_smi_index_); | 4969 __ bind(&got_smi_index_); |
| 4985 | 4970 |
| 4986 // Check for index out of range. | 4971 // Check for index out of range. |
| 4987 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | 4972 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); |
| 4988 __ cmp(ip, Operand(index_)); | 4973 __ cmp(ip, Operand(index_)); |
| 4989 __ b(ls, index_out_of_range_); | 4974 __ b(ls, index_out_of_range_); |
| 4990 | 4975 |
| 4991 __ mov(index_, Operand(index_, ASR, kSmiTagSize)); | 4976 __ SmiUntag(index_); |
| 4992 | 4977 |
| 4993 StringCharLoadGenerator::Generate(masm, | 4978 StringCharLoadGenerator::Generate(masm, |
| 4994 object_, | 4979 object_, |
| 4995 index_, | 4980 index_, |
| 4996 result_, | 4981 result_, |
| 4997 &call_runtime_); | 4982 &call_runtime_); |
| 4998 | 4983 |
| 4999 __ mov(result_, Operand(result_, LSL, kSmiTagSize)); | 4984 __ SmiTag(result_); |
| 5000 __ bind(&exit_); | 4985 __ bind(&exit_); |
| 5001 } | 4986 } |
| 5002 | 4987 |
| 5003 | 4988 |
| 5004 void StringCharCodeAtGenerator::GenerateSlow( | 4989 void StringCharCodeAtGenerator::GenerateSlow( |
| 5005 MacroAssembler* masm, | 4990 MacroAssembler* masm, |
| 5006 const RuntimeCallHelper& call_helper) { | 4991 const RuntimeCallHelper& call_helper) { |
| 5007 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); | 4992 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); |
| 5008 | 4993 |
| 5009 // Index is not a smi. | 4994 // Index is not a smi. |
| (...skipping 25 matching lines...) Expand all Loading... |
| 5035 // If index is still not a smi, it must be out of range. | 5020 // If index is still not a smi, it must be out of range. |
| 5036 __ JumpIfNotSmi(index_, index_out_of_range_); | 5021 __ JumpIfNotSmi(index_, index_out_of_range_); |
| 5037 // Otherwise, return to the fast path. | 5022 // Otherwise, return to the fast path. |
| 5038 __ jmp(&got_smi_index_); | 5023 __ jmp(&got_smi_index_); |
| 5039 | 5024 |
| 5040 // Call runtime. We get here when the receiver is a string and the | 5025 // Call runtime. We get here when the receiver is a string and the |
| 5041 // index is a number, but the code of getting the actual character | 5026 // index is a number, but the code of getting the actual character |
| 5042 // is too complex (e.g., when the string needs to be flattened). | 5027 // is too complex (e.g., when the string needs to be flattened). |
| 5043 __ bind(&call_runtime_); | 5028 __ bind(&call_runtime_); |
| 5044 call_helper.BeforeCall(masm); | 5029 call_helper.BeforeCall(masm); |
| 5045 __ mov(index_, Operand(index_, LSL, kSmiTagSize)); | 5030 __ SmiTag(index_); |
| 5046 __ Push(object_, index_); | 5031 __ Push(object_, index_); |
| 5047 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | 5032 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| 5048 __ Move(result_, r0); | 5033 __ Move(result_, r0); |
| 5049 call_helper.AfterCall(masm); | 5034 call_helper.AfterCall(masm); |
| 5050 __ jmp(&exit_); | 5035 __ jmp(&exit_); |
| 5051 | 5036 |
| 5052 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | 5037 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
| 5053 } | 5038 } |
| 5054 | 5039 |
| 5055 | 5040 |
| 5056 // ------------------------------------------------------------------------- | 5041 // ------------------------------------------------------------------------- |
| 5057 // StringCharFromCodeGenerator | 5042 // StringCharFromCodeGenerator |
| 5058 | 5043 |
| 5059 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 5044 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| 5060 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 5045 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| 5061 STATIC_ASSERT(kSmiTag == 0); | 5046 STATIC_ASSERT(kSmiTag == 0); |
| 5062 STATIC_ASSERT(kSmiShiftSize == 0); | 5047 STATIC_ASSERT(kSmiShiftSize == 0); |
| 5063 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 5048 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
| 5064 __ tst(code_, | 5049 __ tst(code_, |
| 5065 Operand(kSmiTagMask | | 5050 Operand(kSmiTagMask | |
| 5066 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 5051 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
| 5067 __ b(ne, &slow_case_); | 5052 __ b(ne, &slow_case_); |
| 5068 | 5053 |
| 5069 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 5054 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 5070 // At this point code register contains smi tagged ASCII char code. | 5055 // At this point code register contains smi tagged ASCII char code. |
| 5071 STATIC_ASSERT(kSmiTag == 0); | 5056 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); |
| 5072 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 5073 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 5057 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
| 5074 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 5058 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
| 5075 __ b(eq, &slow_case_); | 5059 __ b(eq, &slow_case_); |
| 5076 __ bind(&exit_); | 5060 __ bind(&exit_); |
| 5077 } | 5061 } |
| 5078 | 5062 |
| 5079 | 5063 |
| 5080 void StringCharFromCodeGenerator::GenerateSlow( | 5064 void StringCharFromCodeGenerator::GenerateSlow( |
| 5081 MacroAssembler* masm, | 5065 MacroAssembler* masm, |
| 5082 const RuntimeCallHelper& call_helper) { | 5066 const RuntimeCallHelper& call_helper) { |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5487 // We want to bailout to runtime here if From is negative. In that case, the | 5471 // We want to bailout to runtime here if From is negative. In that case, the |
| 5488 // next instruction is not executed and we fall through to bailing out to | 5472 // next instruction is not executed and we fall through to bailing out to |
| 5489 // runtime. | 5473 // runtime. |
| 5490 // Executed if both r2 and r3 are untagged integers. | 5474 // Executed if both r2 and r3 are untagged integers. |
| 5491 __ sub(r2, r2, Operand(r3), SetCC, cc); | 5475 __ sub(r2, r2, Operand(r3), SetCC, cc); |
| 5492 // One of the above un-smis or the above SUB could have set N==1. | 5476 // One of the above un-smis or the above SUB could have set N==1. |
| 5493 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. | 5477 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. |
| 5494 | 5478 |
| 5495 // Make sure first argument is a string. | 5479 // Make sure first argument is a string. |
| 5496 __ ldr(r0, MemOperand(sp, kStringOffset)); | 5480 __ ldr(r0, MemOperand(sp, kStringOffset)); |
| 5497 STATIC_ASSERT(kSmiTag == 0); | |
| 5498 // Do a JumpIfSmi, but fold its jump into the subsequent string test. | 5481 // Do a JumpIfSmi, but fold its jump into the subsequent string test. |
| 5499 __ tst(r0, Operand(kSmiTagMask)); | 5482 __ SmiTst(r0); |
| 5500 Condition is_string = masm->IsObjectStringType(r0, r1, ne); | 5483 Condition is_string = masm->IsObjectStringType(r0, r1, ne); |
| 5501 ASSERT(is_string == eq); | 5484 ASSERT(is_string == eq); |
| 5502 __ b(NegateCondition(is_string), &runtime); | 5485 __ b(NegateCondition(is_string), &runtime); |
| 5503 | 5486 |
| 5504 Label single_char; | 5487 Label single_char; |
| 5505 __ cmp(r2, Operand(1)); | 5488 __ cmp(r2, Operand(1)); |
| 5506 __ b(eq, &single_char); | 5489 __ b(eq, &single_char); |
| 5507 | 5490 |
| 5508 // Short-cut for the case of trivial substring. | 5491 // Short-cut for the case of trivial substring. |
| 5509 Label return_r0; | 5492 Label return_r0; |
| (...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5886 __ cmp(r3, Operand(Smi::FromInt(0)), ne); | 5869 __ cmp(r3, Operand(Smi::FromInt(0)), ne); |
| 5887 __ b(ne, &strings_not_empty); // If either string was empty, return r0. | 5870 __ b(ne, &strings_not_empty); // If either string was empty, return r0. |
| 5888 | 5871 |
| 5889 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5872 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| 5890 __ add(sp, sp, Operand(2 * kPointerSize)); | 5873 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 5891 __ Ret(); | 5874 __ Ret(); |
| 5892 | 5875 |
| 5893 __ bind(&strings_not_empty); | 5876 __ bind(&strings_not_empty); |
| 5894 } | 5877 } |
| 5895 | 5878 |
| 5896 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); | 5879 __ SmiUntag(r2); |
| 5897 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); | 5880 __ SmiUntag(r3); |
| 5898 // Both strings are non-empty. | 5881 // Both strings are non-empty. |
| 5899 // r0: first string | 5882 // r0: first string |
| 5900 // r1: second string | 5883 // r1: second string |
| 5901 // r2: length of first string | 5884 // r2: length of first string |
| 5902 // r3: length of second string | 5885 // r3: length of second string |
| 5903 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5886 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5904 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5887 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5905 // Look at the length of the result of adding the two strings. | 5888 // Look at the length of the result of adding the two strings. |
| 5906 Label string_add_flat_result, longer_than_two; | 5889 Label string_add_flat_result, longer_than_two; |
| 5907 // Adding two lengths can't overflow. | 5890 // Adding two lengths can't overflow. |
| (...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6229 Label miss; | 6212 Label miss; |
| 6230 __ orr(r2, r1, r0); | 6213 __ orr(r2, r1, r0); |
| 6231 __ JumpIfNotSmi(r2, &miss); | 6214 __ JumpIfNotSmi(r2, &miss); |
| 6232 | 6215 |
| 6233 if (GetCondition() == eq) { | 6216 if (GetCondition() == eq) { |
| 6234 // For equality we do not care about the sign of the result. | 6217 // For equality we do not care about the sign of the result. |
| 6235 __ sub(r0, r0, r1, SetCC); | 6218 __ sub(r0, r0, r1, SetCC); |
| 6236 } else { | 6219 } else { |
| 6237 // Untag before subtracting to avoid handling overflow. | 6220 // Untag before subtracting to avoid handling overflow. |
| 6238 __ SmiUntag(r1); | 6221 __ SmiUntag(r1); |
| 6239 __ sub(r0, r1, SmiUntagOperand(r0)); | 6222 __ sub(r0, r1, Operand::SmiUntag(r0)); |
| 6240 } | 6223 } |
| 6241 __ Ret(); | 6224 __ Ret(); |
| 6242 | 6225 |
| 6243 __ bind(&miss); | 6226 __ bind(&miss); |
| 6244 GenerateMiss(masm); | 6227 GenerateMiss(masm); |
| 6245 } | 6228 } |
| 6246 | 6229 |
| 6247 | 6230 |
| 6248 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 6231 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| 6249 ASSERT(state_ == CompareIC::NUMBER); | 6232 ASSERT(state_ == CompareIC::NUMBER); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 6263 // stub if NaN is involved. | 6246 // stub if NaN is involved. |
| 6264 // Load left and right operand. | 6247 // Load left and right operand. |
| 6265 Label done, left, left_smi, right_smi; | 6248 Label done, left, left_smi, right_smi; |
| 6266 __ JumpIfSmi(r0, &right_smi); | 6249 __ JumpIfSmi(r0, &right_smi); |
| 6267 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 6250 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 6268 DONT_DO_SMI_CHECK); | 6251 DONT_DO_SMI_CHECK); |
| 6269 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6252 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 6270 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6253 __ vldr(d1, r2, HeapNumber::kValueOffset); |
| 6271 __ b(&left); | 6254 __ b(&left); |
| 6272 __ bind(&right_smi); | 6255 __ bind(&right_smi); |
| 6273 __ SmiUntag(r2, r0); // Can't clobber r0 yet. | 6256 __ SmiToDouble(d1, r0); |
| 6274 SwVfpRegister single_scratch = d2.low(); | |
| 6275 __ vmov(single_scratch, r2); | |
| 6276 __ vcvt_f64_s32(d1, single_scratch); | |
| 6277 | 6257 |
| 6278 __ bind(&left); | 6258 __ bind(&left); |
| 6279 __ JumpIfSmi(r1, &left_smi); | 6259 __ JumpIfSmi(r1, &left_smi); |
| 6280 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 6260 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 6281 DONT_DO_SMI_CHECK); | 6261 DONT_DO_SMI_CHECK); |
| 6282 __ sub(r2, r1, Operand(kHeapObjectTag)); | 6262 __ sub(r2, r1, Operand(kHeapObjectTag)); |
| 6283 __ vldr(d0, r2, HeapNumber::kValueOffset); | 6263 __ vldr(d0, r2, HeapNumber::kValueOffset); |
| 6284 __ b(&done); | 6264 __ b(&done); |
| 6285 __ bind(&left_smi); | 6265 __ bind(&left_smi); |
| 6286 __ SmiUntag(r2, r1); // Can't clobber r1 yet. | 6266 __ SmiToDouble(d0, r1); |
| 6287 single_scratch = d3.low(); | |
| 6288 __ vmov(single_scratch, r2); | |
| 6289 __ vcvt_f64_s32(d0, single_scratch); | |
| 6290 | 6267 |
| 6291 __ bind(&done); | 6268 __ bind(&done); |
| 6292 // Compare operands. | 6269 // Compare operands. |
| 6293 __ VFPCompareAndSetFlags(d0, d1); | 6270 __ VFPCompareAndSetFlags(d0, d1); |
| 6294 | 6271 |
| 6295 // Don't base result on status bits when a NaN is involved. | 6272 // Don't base result on status bits when a NaN is involved. |
| 6296 __ b(vs, &unordered); | 6273 __ b(vs, &unordered); |
| 6297 | 6274 |
| 6298 // Return a result of -1, 0, or 1, based on status bits. | 6275 // Return a result of -1, 0, or 1, based on status bits. |
| 6299 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 6276 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| (...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6690 Register scratch2) { | 6667 Register scratch2) { |
| 6691 ASSERT(!elements.is(scratch1)); | 6668 ASSERT(!elements.is(scratch1)); |
| 6692 ASSERT(!elements.is(scratch2)); | 6669 ASSERT(!elements.is(scratch2)); |
| 6693 ASSERT(!name.is(scratch1)); | 6670 ASSERT(!name.is(scratch1)); |
| 6694 ASSERT(!name.is(scratch2)); | 6671 ASSERT(!name.is(scratch2)); |
| 6695 | 6672 |
| 6696 __ AssertName(name); | 6673 __ AssertName(name); |
| 6697 | 6674 |
| 6698 // Compute the capacity mask. | 6675 // Compute the capacity mask. |
| 6699 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 6676 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| 6700 __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int | 6677 __ SmiUntag(scratch1); |
| 6701 __ sub(scratch1, scratch1, Operand(1)); | 6678 __ sub(scratch1, scratch1, Operand(1)); |
| 6702 | 6679 |
| 6703 // Generate an unrolled loop that performs a few probes before | 6680 // Generate an unrolled loop that performs a few probes before |
| 6704 // giving up. Measurements done on Gmail indicate that 2 probes | 6681 // giving up. Measurements done on Gmail indicate that 2 probes |
| 6705 // cover ~93% of loads from dictionaries. | 6682 // cover ~93% of loads from dictionaries. |
| 6706 for (int i = 0; i < kInlinedProbes; i++) { | 6683 for (int i = 0; i < kInlinedProbes; i++) { |
| 6707 // Compute the masked index: (hash + i + i * i) & mask. | 6684 // Compute the masked index: (hash + i + i * i) & mask. |
| 6708 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 6685 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 6709 if (i > 0) { | 6686 if (i > 0) { |
| 6710 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 6687 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6771 Register key = r1; | 6748 Register key = r1; |
| 6772 Register index = r2; | 6749 Register index = r2; |
| 6773 Register mask = r3; | 6750 Register mask = r3; |
| 6774 Register hash = r4; | 6751 Register hash = r4; |
| 6775 Register undefined = r5; | 6752 Register undefined = r5; |
| 6776 Register entry_key = r6; | 6753 Register entry_key = r6; |
| 6777 | 6754 |
| 6778 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 6755 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
| 6779 | 6756 |
| 6780 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 6757 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
| 6781 __ mov(mask, Operand(mask, ASR, kSmiTagSize)); | 6758 __ SmiUntag(mask); |
| 6782 __ sub(mask, mask, Operand(1)); | 6759 __ sub(mask, mask, Operand(1)); |
| 6783 | 6760 |
| 6784 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 6761 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 6785 | 6762 |
| 6786 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 6763 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 6787 | 6764 |
| 6788 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 6765 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 6789 // Compute the masked index: (hash + i + i * i) & mask. | 6766 // Compute the masked index: (hash + i + i * i) & mask. |
| 6790 // Capacity is smi 2^n. | 6767 // Capacity is smi 2^n. |
| 6791 if (i > 0) { | 6768 if (i > 0) { |
| (...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7169 // call. | 7146 // call. |
| 7170 __ Push(r1, r3, r0); | 7147 __ Push(r1, r3, r0); |
| 7171 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 7148 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 7172 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); | 7149 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); |
| 7173 __ Push(r5, r4); | 7150 __ Push(r5, r4); |
| 7174 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | 7151 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
| 7175 | 7152 |
| 7176 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 7153 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
| 7177 __ bind(&fast_elements); | 7154 __ bind(&fast_elements); |
| 7178 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7155 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 7179 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 7156 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
| 7180 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 7157 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 7181 __ str(r0, MemOperand(r6, 0)); | 7158 __ str(r0, MemOperand(r6, 0)); |
| 7182 // Update the write barrier for the array store. | 7159 // Update the write barrier for the array store. |
| 7183 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, | 7160 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 7184 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 7161 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 7185 __ Ret(); | 7162 __ Ret(); |
| 7186 | 7163 |
| 7187 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | 7164 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
| 7188 // and value is Smi. | 7165 // and value is Smi. |
| 7189 __ bind(&smi_element); | 7166 __ bind(&smi_element); |
| 7190 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7167 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 7191 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 7168 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
| 7192 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); | 7169 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); |
| 7193 __ Ret(); | 7170 __ Ret(); |
| 7194 | 7171 |
| 7195 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. | 7172 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. |
| 7196 __ bind(&double_elements); | 7173 __ bind(&double_elements); |
| 7197 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7174 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 7198 __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements); | 7175 __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements); |
| 7199 __ Ret(); | 7176 __ Ret(); |
| 7200 } | 7177 } |
| 7201 | 7178 |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7457 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); | 7434 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
| 7458 } | 7435 } |
| 7459 } | 7436 } |
| 7460 | 7437 |
| 7461 | 7438 |
| 7462 #undef __ | 7439 #undef __ |
| 7463 | 7440 |
| 7464 } } // namespace v8::internal | 7441 } } // namespace v8::internal |
| 7465 | 7442 |
| 7466 #endif // V8_TARGET_ARCH_ARM | 7443 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |