| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 471 Label not_special; | 471 Label not_special; |
| 472 // Convert from Smi to integer. | 472 // Convert from Smi to integer. |
| 473 __ sra(source_, source_, kSmiTagSize); | 473 __ sra(source_, source_, kSmiTagSize); |
| 474 // Move sign bit from source to destination. This works because the sign bit | 474 // Move sign bit from source to destination. This works because the sign bit |
| 475 // in the exponent word of the double has the same position and polarity as | 475 // in the exponent word of the double has the same position and polarity as |
| 476 // the 2's complement sign bit in a Smi. | 476 // the 2's complement sign bit in a Smi. |
| 477 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 477 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 478 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); | 478 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); |
| 479 // Subtract from 0 if source was negative. | 479 // Subtract from 0 if source was negative. |
| 480 __ subu(at, zero_reg, source_); | 480 __ subu(at, zero_reg, source_); |
| 481 __ movn(source_, at, exponent); | 481 __ Movn(source_, at, exponent); |
| 482 | 482 |
| 483 // We have -1, 0 or 1, which we treat specially. Register source_ contains | 483 // We have -1, 0 or 1, which we treat specially. Register source_ contains |
| 484 // absolute value: it is either equal to 1 (special case of -1 and 1), | 484 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| 485 // greater than 1 (not a special case) or less than 1 (special case of 0). | 485 // greater than 1 (not a special case) or less than 1 (special case of 0). |
| 486 __ Branch(¬_special, gt, source_, Operand(1)); | 486 __ Branch(¬_special, gt, source_, Operand(1)); |
| 487 | 487 |
| 488 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | 488 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). |
| 489 static const uint32_t exponent_word_for_1 = | 489 static const uint32_t exponent_word_for_1 = |
| 490 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | 490 HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
| 491 // Safe to use 'at' as dest reg here. | 491 // Safe to use 'at' as dest reg here. |
| 492 __ Or(at, exponent, Operand(exponent_word_for_1)); | 492 __ Or(at, exponent, Operand(exponent_word_for_1)); |
| 493 __ movn(exponent, at, source_); // Write exp when source not 0. | 493 __ Movn(exponent, at, source_); // Write exp when source not 0. |
| 494 // 1, 0 and -1 all have 0 for the second word. | 494 // 1, 0 and -1 all have 0 for the second word. |
| 495 __ mov(mantissa, zero_reg); | 495 __ mov(mantissa, zero_reg); |
| 496 __ Ret(); | 496 __ Ret(); |
| 497 | 497 |
| 498 __ bind(¬_special); | 498 __ bind(¬_special); |
| 499 // Count leading zeros. | 499 // Count leading zeros. |
| 500 // Gets the wrong answer for 0, but we already checked for that case above. | 500 // Gets the wrong answer for 0, but we already checked for that case above. |
| 501 __ clz(zeros_, source_); | 501 __ Clz(zeros_, source_); |
| 502 // Compute exponent and or it into the exponent register. | 502 // Compute exponent and or it into the exponent register. |
| 503 // We use mantissa as a scratch register here. | 503 // We use mantissa as a scratch register here. |
| 504 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); | 504 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); |
| 505 __ subu(mantissa, mantissa, zeros_); | 505 __ subu(mantissa, mantissa, zeros_); |
| 506 __ sll(mantissa, mantissa, HeapNumber::kExponentShift); | 506 __ sll(mantissa, mantissa, HeapNumber::kExponentShift); |
| 507 __ Or(exponent, exponent, mantissa); | 507 __ Or(exponent, exponent, mantissa); |
| 508 | 508 |
| 509 // Shift up the source chopping the top bit off. | 509 // Shift up the source chopping the top bit off. |
| 510 __ Addu(zeros_, zeros_, Operand(1)); | 510 __ Addu(zeros_, zeros_, Operand(1)); |
| 511 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | 511 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 714 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); | 714 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); |
| 715 // Get the absolute value of the object (as an unsigned integer). | 715 // Get the absolute value of the object (as an unsigned integer). |
| 716 Label skip_sub; | 716 Label skip_sub; |
| 717 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); | 717 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); |
| 718 __ Subu(int_scratch, zero_reg, int_scratch); | 718 __ Subu(int_scratch, zero_reg, int_scratch); |
| 719 __ bind(&skip_sub); | 719 __ bind(&skip_sub); |
| 720 | 720 |
| 721 // Get mantissa[51:20]. | 721 // Get mantissa[51:20]. |
| 722 | 722 |
| 723 // Get the position of the first set bit. | 723 // Get the position of the first set bit. |
| 724 __ clz(dst1, int_scratch); | 724 __ Clz(dst1, int_scratch); |
| 725 __ li(scratch2, 31); | 725 __ li(scratch2, 31); |
| 726 __ Subu(dst1, scratch2, dst1); | 726 __ Subu(dst1, scratch2, dst1); |
| 727 | 727 |
| 728 // Set the exponent. | 728 // Set the exponent. |
| 729 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); | 729 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); |
| 730 __ Ins(dst2, scratch2, | 730 __ Ins(dst2, scratch2, |
| 731 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 731 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 732 | 732 |
| 733 // Clear the first non null bit. | 733 // Clear the first non null bit. |
| 734 __ li(scratch2, Operand(1)); | 734 __ li(scratch2, Operand(1)); |
| (...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1072 | 1072 |
| 1073 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | 1073 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
| 1074 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | 1074 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). |
| 1075 uint32_t non_smi_exponent = | 1075 uint32_t non_smi_exponent = |
| 1076 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 1076 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
| 1077 __ li(scratch_, Operand(non_smi_exponent)); | 1077 __ li(scratch_, Operand(non_smi_exponent)); |
| 1078 // Set the sign bit in scratch_ if the value was negative. | 1078 // Set the sign bit in scratch_ if the value was negative. |
| 1079 __ or_(scratch_, scratch_, sign_); | 1079 __ or_(scratch_, scratch_, sign_); |
| 1080 // Subtract from 0 if the value was negative. | 1080 // Subtract from 0 if the value was negative. |
| 1081 __ subu(at, zero_reg, the_int_); | 1081 __ subu(at, zero_reg, the_int_); |
| 1082 __ movn(the_int_, at, sign_); | 1082 __ Movn(the_int_, at, sign_); |
| 1083 // We should be masking the implict first digit of the mantissa away here, | 1083 // We should be masking the implict first digit of the mantissa away here, |
| 1084 // but it just ends up combining harmlessly with the last digit of the | 1084 // but it just ends up combining harmlessly with the last digit of the |
| 1085 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 1085 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
| 1086 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 1086 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
| 1087 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 1087 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
| 1088 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 1088 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 1089 __ srl(at, the_int_, shift_distance); | 1089 __ srl(at, the_int_, shift_distance); |
| 1090 __ or_(scratch_, scratch_, at); | 1090 __ or_(scratch_, scratch_, at); |
| 1091 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 1091 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 1092 HeapNumber::kExponentOffset)); | 1092 HeapNumber::kExponentOffset)); |
| (...skipping 650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1743 __ li(t0, Operand(LESS)); | 1743 __ li(t0, Operand(LESS)); |
| 1744 __ li(t1, Operand(GREATER)); | 1744 __ li(t1, Operand(GREATER)); |
| 1745 __ li(t2, Operand(EQUAL)); | 1745 __ li(t2, Operand(EQUAL)); |
| 1746 | 1746 |
| 1747 // Check if either rhs or lhs is NaN. | 1747 // Check if either rhs or lhs is NaN. |
| 1748 __ BranchF(NULL, &nan, eq, f12, f14); | 1748 __ BranchF(NULL, &nan, eq, f12, f14); |
| 1749 | 1749 |
| 1750 // Check if LESS condition is satisfied. If true, move conditionally | 1750 // Check if LESS condition is satisfied. If true, move conditionally |
| 1751 // result to v0. | 1751 // result to v0. |
| 1752 __ c(OLT, D, f12, f14); | 1752 __ c(OLT, D, f12, f14); |
| 1753 __ movt(v0, t0); | 1753 __ Movt(v0, t0); |
| 1754 // Use previous check to store conditionally to v0 oposite condition | 1754 // Use previous check to store conditionally to v0 oposite condition |
| 1755 // (GREATER). If rhs is equal to lhs, this will be corrected in next | 1755 // (GREATER). If rhs is equal to lhs, this will be corrected in next |
| 1756 // check. | 1756 // check. |
| 1757 __ movf(v0, t1); | 1757 __ Movf(v0, t1); |
| 1758 // Check if EQUAL condition is satisfied. If true, move conditionally | 1758 // Check if EQUAL condition is satisfied. If true, move conditionally |
| 1759 // result to v0. | 1759 // result to v0. |
| 1760 __ c(EQ, D, f12, f14); | 1760 __ c(EQ, D, f12, f14); |
| 1761 __ movt(v0, t2); | 1761 __ Movt(v0, t2); |
| 1762 | 1762 |
| 1763 __ Ret(); | 1763 __ Ret(); |
| 1764 | 1764 |
| 1765 __ bind(&nan); | 1765 __ bind(&nan); |
| 1766 // NaN comparisons always fail. | 1766 // NaN comparisons always fail. |
| 1767 // Load whatever we need in v0 to make the comparison fail. | 1767 // Load whatever we need in v0 to make the comparison fail. |
| 1768 if (cc_ == lt || cc_ == le) { | 1768 if (cc_ == lt || cc_ == le) { |
| 1769 __ li(v0, Operand(GREATER)); | 1769 __ li(v0, Operand(GREATER)); |
| 1770 } else { | 1770 } else { |
| 1771 __ li(v0, Operand(LESS)); | 1771 __ li(v0, Operand(LESS)); |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1892 __ JumpIfSmi(tos_, &patch); | 1892 __ JumpIfSmi(tos_, &patch); |
| 1893 } | 1893 } |
| 1894 | 1894 |
| 1895 if (types_.NeedsMap()) { | 1895 if (types_.NeedsMap()) { |
| 1896 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1896 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1897 | 1897 |
| 1898 if (types_.CanBeUndetectable()) { | 1898 if (types_.CanBeUndetectable()) { |
| 1899 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 1899 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 1900 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 1900 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 1901 // Undetectable -> false. | 1901 // Undetectable -> false. |
| 1902 __ movn(tos_, zero_reg, at); | 1902 __ Movn(tos_, zero_reg, at); |
| 1903 __ Ret(ne, at, Operand(zero_reg)); | 1903 __ Ret(ne, at, Operand(zero_reg)); |
| 1904 } | 1904 } |
| 1905 } | 1905 } |
| 1906 | 1906 |
| 1907 if (types_.Contains(SPEC_OBJECT)) { | 1907 if (types_.Contains(SPEC_OBJECT)) { |
| 1908 // Spec object -> true. | 1908 // Spec object -> true. |
| 1909 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1909 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1910 // tos_ contains the correct non-zero return value already. | 1910 // tos_ contains the correct non-zero return value already. |
| 1911 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1911 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1912 } | 1912 } |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1948 Type type, | 1948 Type type, |
| 1949 Heap::RootListIndex value, | 1949 Heap::RootListIndex value, |
| 1950 bool result) { | 1950 bool result) { |
| 1951 if (types_.Contains(type)) { | 1951 if (types_.Contains(type)) { |
| 1952 // If we see an expected oddball, return its ToBoolean value tos_. | 1952 // If we see an expected oddball, return its ToBoolean value tos_. |
| 1953 __ LoadRoot(at, value); | 1953 __ LoadRoot(at, value); |
| 1954 __ Subu(at, at, tos_); // This is a check for equality for the movz below. | 1954 __ Subu(at, at, tos_); // This is a check for equality for the movz below. |
| 1955 // The value of a root is never NULL, so we can avoid loading a non-null | 1955 // The value of a root is never NULL, so we can avoid loading a non-null |
| 1956 // value into tos_ when we want to return 'true'. | 1956 // value into tos_ when we want to return 'true'. |
| 1957 if (!result) { | 1957 if (!result) { |
| 1958 __ movz(tos_, zero_reg, at); | 1958 __ Movz(tos_, zero_reg, at); |
| 1959 } | 1959 } |
| 1960 __ Ret(eq, at, Operand(zero_reg)); | 1960 __ Ret(eq, at, Operand(zero_reg)); |
| 1961 } | 1961 } |
| 1962 } | 1962 } |
| 1963 | 1963 |
| 1964 | 1964 |
| 1965 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { | 1965 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 1966 __ Move(a3, tos_); | 1966 __ Move(a3, tos_); |
| 1967 __ li(a2, Operand(Smi::FromInt(tos_.code()))); | 1967 __ li(a2, Operand(Smi::FromInt(tos_.code()))); |
| 1968 __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); | 1968 __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); |
| (...skipping 3032 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5001 // regexp_data: RegExp data (FixedArray) | 5001 // regexp_data: RegExp data (FixedArray) |
| 5002 // a0: Instance type of subject string | 5002 // a0: Instance type of subject string |
| 5003 STATIC_ASSERT(kStringEncodingMask == 4); | 5003 STATIC_ASSERT(kStringEncodingMask == 4); |
| 5004 STATIC_ASSERT(kAsciiStringTag == 4); | 5004 STATIC_ASSERT(kAsciiStringTag == 4); |
| 5005 STATIC_ASSERT(kTwoByteStringTag == 0); | 5005 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 5006 // Find the code object based on the assumptions above. | 5006 // Find the code object based on the assumptions above. |
| 5007 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. | 5007 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. |
| 5008 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); | 5008 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); |
| 5009 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). | 5009 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). |
| 5010 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); | 5010 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); |
| 5011 __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. | 5011 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. |
| 5012 | 5012 |
| 5013 // Check that the irregexp code has been generated for the actual string | 5013 // Check that the irregexp code has been generated for the actual string |
| 5014 // encoding. If it has, the field contains a code object otherwise it contains | 5014 // encoding. If it has, the field contains a code object otherwise it contains |
| 5015 // a smi (code flushing support). | 5015 // a smi (code flushing support). |
| 5016 __ JumpIfSmi(t9, &runtime); | 5016 __ JumpIfSmi(t9, &runtime); |
| 5017 | 5017 |
| 5018 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); | 5018 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); |
| 5019 // t9: code | 5019 // t9: code |
| 5020 // subject: Subject string | 5020 // subject: Subject string |
| 5021 // regexp_data: RegExp data (FixedArray) | 5021 // regexp_data: RegExp data (FixedArray) |
| (...skipping 1008 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6030 __ xor_(hash, hash, at); | 6030 __ xor_(hash, hash, at); |
| 6031 // hash += hash << 15; | 6031 // hash += hash << 15; |
| 6032 __ sll(at, hash, 15); | 6032 __ sll(at, hash, 15); |
| 6033 __ addu(hash, hash, at); | 6033 __ addu(hash, hash, at); |
| 6034 | 6034 |
| 6035 __ li(at, Operand(String::kHashBitMask)); | 6035 __ li(at, Operand(String::kHashBitMask)); |
| 6036 __ and_(hash, hash, at); | 6036 __ and_(hash, hash, at); |
| 6037 | 6037 |
| 6038 // if (hash == 0) hash = 27; | 6038 // if (hash == 0) hash = 27; |
| 6039 __ ori(at, zero_reg, StringHasher::kZeroHash); | 6039 __ ori(at, zero_reg, StringHasher::kZeroHash); |
| 6040 __ movz(hash, at, hash); | 6040 __ Movz(hash, at, hash); |
| 6041 } | 6041 } |
| 6042 | 6042 |
| 6043 | 6043 |
| 6044 void SubStringStub::Generate(MacroAssembler* masm) { | 6044 void SubStringStub::Generate(MacroAssembler* masm) { |
| 6045 Label runtime; | 6045 Label runtime; |
| 6046 // Stack frame on entry. | 6046 // Stack frame on entry. |
| 6047 // ra: return address | 6047 // ra: return address |
| 6048 // sp[0]: to | 6048 // sp[0]: to |
| 6049 // sp[4]: from | 6049 // sp[4]: from |
| 6050 // sp[8]: string | 6050 // sp[8]: string |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6320 Register scratch2, | 6320 Register scratch2, |
| 6321 Register scratch3, | 6321 Register scratch3, |
| 6322 Register scratch4) { | 6322 Register scratch4) { |
| 6323 Label result_not_equal, compare_lengths; | 6323 Label result_not_equal, compare_lengths; |
| 6324 // Find minimum length and length difference. | 6324 // Find minimum length and length difference. |
| 6325 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 6325 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| 6326 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 6326 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 6327 __ Subu(scratch3, scratch1, Operand(scratch2)); | 6327 __ Subu(scratch3, scratch1, Operand(scratch2)); |
| 6328 Register length_delta = scratch3; | 6328 Register length_delta = scratch3; |
| 6329 __ slt(scratch4, scratch2, scratch1); | 6329 __ slt(scratch4, scratch2, scratch1); |
| 6330 __ movn(scratch1, scratch2, scratch4); | 6330 __ Movn(scratch1, scratch2, scratch4); |
| 6331 Register min_length = scratch1; | 6331 Register min_length = scratch1; |
| 6332 STATIC_ASSERT(kSmiTag == 0); | 6332 STATIC_ASSERT(kSmiTag == 0); |
| 6333 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); | 6333 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
| 6334 | 6334 |
| 6335 // Compare loop. | 6335 // Compare loop. |
| 6336 GenerateAsciiCharsCompareLoop(masm, | 6336 GenerateAsciiCharsCompareLoop(masm, |
| 6337 left, right, min_length, scratch2, scratch4, v0, | 6337 left, right, min_length, scratch2, scratch4, v0, |
| 6338 &result_not_equal); | 6338 &result_not_equal); |
| 6339 | 6339 |
| 6340 // Compare lengths - strings up to min-length are equal. | 6340 // Compare lengths - strings up to min-length are equal. |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6478 { | 6478 { |
| 6479 Label strings_not_empty; | 6479 Label strings_not_empty; |
| 6480 // Check if either of the strings are empty. In that case return the other. | 6480 // Check if either of the strings are empty. In that case return the other. |
| 6481 // These tests use zero-length check on string-length whch is an Smi. | 6481 // These tests use zero-length check on string-length whch is an Smi. |
| 6482 // Assert that Smi::FromInt(0) is really 0. | 6482 // Assert that Smi::FromInt(0) is really 0. |
| 6483 STATIC_ASSERT(kSmiTag == 0); | 6483 STATIC_ASSERT(kSmiTag == 0); |
| 6484 ASSERT(Smi::FromInt(0) == 0); | 6484 ASSERT(Smi::FromInt(0) == 0); |
| 6485 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset)); | 6485 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset)); |
| 6486 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset)); | 6486 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset)); |
| 6487 __ mov(v0, a0); // Assume we'll return first string (from a0). | 6487 __ mov(v0, a0); // Assume we'll return first string (from a0). |
| 6488 __ movz(v0, a1, a2); // If first is empty, return second (from a1). | 6488 __ Movz(v0, a1, a2); // If first is empty, return second (from a1). |
| 6489 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1. | 6489 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1. |
| 6490 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1. | 6490 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1. |
| 6491 __ and_(t4, t4, t5); // Branch if both strings were non-empty. | 6491 __ and_(t4, t4, t5); // Branch if both strings were non-empty. |
| 6492 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg)); | 6492 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg)); |
| 6493 | 6493 |
| 6494 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); | 6494 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 6495 __ DropAndRet(2); | 6495 __ DropAndRet(2); |
| 6496 | 6496 |
| 6497 __ bind(&strings_not_empty); | 6497 __ bind(&strings_not_empty); |
| 6498 } | 6498 } |
| (...skipping 1183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7682 __ Ret(USE_DELAY_SLOT); | 7682 __ Ret(USE_DELAY_SLOT); |
| 7683 __ mov(v0, a0); | 7683 __ mov(v0, a0); |
| 7684 } | 7684 } |
| 7685 | 7685 |
| 7686 | 7686 |
| 7687 #undef __ | 7687 #undef __ |
| 7688 | 7688 |
| 7689 } } // namespace v8::internal | 7689 } } // namespace v8::internal |
| 7690 | 7690 |
| 7691 #endif // V8_TARGET_ARCH_MIPS | 7691 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |