Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 21063002: Out-of-line constant pool on Arm: Stage 1 - Free up r7 for use as constant pool pointer register (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 807 matching lines...) Expand 10 before | Expand all | Expand 10 after
818 } else { 818 } else {
819 // Smi compared non-strictly with a non-Smi non-heap-number. Call 819 // Smi compared non-strictly with a non-Smi non-heap-number. Call
820 // the runtime. 820 // the runtime.
821 __ b(ne, slow); 821 __ b(ne, slow);
822 } 822 }
823 823
824 // Lhs is a smi, rhs is a number. 824 // Lhs is a smi, rhs is a number.
825 // Convert lhs to a double in d7. 825 // Convert lhs to a double in d7.
826 __ SmiToDouble(d7, lhs); 826 __ SmiToDouble(d7, lhs);
827 // Load the double from rhs, tagged HeapNumber r0, to d6. 827 // Load the double from rhs, tagged HeapNumber r0, to d6.
828 __ sub(r7, rhs, Operand(kHeapObjectTag)); 828 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
829 __ vldr(d6, r7, HeapNumber::kValueOffset);
830 829
831 // We now have both loaded as doubles but we can skip the lhs nan check 830 // We now have both loaded as doubles but we can skip the lhs nan check
832 // since it's a smi. 831 // since it's a smi.
833 __ jmp(lhs_not_nan); 832 __ jmp(lhs_not_nan);
834 833
835 __ bind(&rhs_is_smi); 834 __ bind(&rhs_is_smi);
836 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 835 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
837 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); 836 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
838 if (strict) { 837 if (strict) {
839 // If lhs is not a number and rhs is a smi then strict equality cannot 838 // If lhs is not a number and rhs is a smi then strict equality cannot
840 // succeed. Return non-equal. 839 // succeed. Return non-equal.
841 // If lhs is r0 then there is already a non zero value in it. 840 // If lhs is r0 then there is already a non zero value in it.
842 if (!lhs.is(r0)) { 841 if (!lhs.is(r0)) {
843 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 842 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
844 } 843 }
845 __ Ret(ne); 844 __ Ret(ne);
846 } else { 845 } else {
847 // Smi compared non-strictly with a non-smi non-heap-number. Call 846 // Smi compared non-strictly with a non-smi non-heap-number. Call
848 // the runtime. 847 // the runtime.
849 __ b(ne, slow); 848 __ b(ne, slow);
850 } 849 }
851 850
852 // Rhs is a smi, lhs is a heap number. 851 // Rhs is a smi, lhs is a heap number.
853 // Load the double from lhs, tagged HeapNumber r1, to d7. 852 // Load the double from lhs, tagged HeapNumber r1, to d7.
854 __ sub(r7, lhs, Operand(kHeapObjectTag)); 853 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
855 __ vldr(d7, r7, HeapNumber::kValueOffset);
856 // Convert rhs to a double in d6 . 854 // Convert rhs to a double in d6 .
857 __ SmiToDouble(d6, rhs); 855 __ SmiToDouble(d6, rhs);
858 // Fall through to both_loaded_as_doubles. 856 // Fall through to both_loaded_as_doubles.
859 } 857 }
860 858
861 859
862 // See comment at call site. 860 // See comment at call site.
863 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 861 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
864 Register lhs, 862 Register lhs,
865 Register rhs) { 863 Register rhs) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
913 (lhs.is(r1) && rhs.is(r0))); 911 (lhs.is(r1) && rhs.is(r0)));
914 912
915 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); 913 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
916 __ b(ne, not_heap_numbers); 914 __ b(ne, not_heap_numbers);
917 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); 915 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
918 __ cmp(r2, r3); 916 __ cmp(r2, r3);
919 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 917 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
920 918
921 // Both are heap numbers. Load them up then jump to the code we have 919 // Both are heap numbers. Load them up then jump to the code we have
922 // for that. 920 // for that.
923 __ sub(r7, rhs, Operand(kHeapObjectTag)); 921 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
924 __ vldr(d6, r7, HeapNumber::kValueOffset); 922 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
925 __ sub(r7, lhs, Operand(kHeapObjectTag));
926 __ vldr(d7, r7, HeapNumber::kValueOffset);
927 __ jmp(both_loaded_as_doubles); 923 __ jmp(both_loaded_as_doubles);
928 } 924 }
929 925
930 926
931 // Fast negative check for internalized-to-internalized equality. 927 // Fast negative check for internalized-to-internalized equality.
932 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, 928 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
933 Register lhs, 929 Register lhs,
934 Register rhs, 930 Register rhs,
935 Label* possible_strings, 931 Label* possible_strings,
936 Label* not_both_strings) { 932 Label* not_both_strings) {
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 } 1256 }
1261 1257
1262 1258
1263 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( 1259 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1264 MacroAssembler* masm) { 1260 MacroAssembler* masm) {
1265 UNIMPLEMENTED(); 1261 UNIMPLEMENTED();
1266 } 1262 }
1267 1263
1268 1264
1269 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, 1265 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
1270 Token::Value op) { 1266 Token::Value op,
1267 Register scratch1,
1268 Register scratch2) {
1271 Register left = r1; 1269 Register left = r1;
1272 Register right = r0; 1270 Register right = r0;
1273 Register scratch1 = r7;
1274 Register scratch2 = r9;
1275 1271
1276 ASSERT(right.is(r0)); 1272 ASSERT(right.is(r0));
1273 ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
1277 STATIC_ASSERT(kSmiTag == 0); 1274 STATIC_ASSERT(kSmiTag == 0);
1278 1275
1279 Label not_smi_result; 1276 Label not_smi_result;
1280 switch (op) { 1277 switch (op) {
1281 case Token::ADD: 1278 case Token::ADD:
1282 __ add(right, left, Operand(right), SetCC); // Add optimistically. 1279 __ add(right, left, Operand(right), SetCC); // Add optimistically.
1283 __ Ret(vc); 1280 __ Ret(vc);
1284 __ sub(right, right, Operand(left)); // Revert optimistic add. 1281 __ sub(right, right, Operand(left)); // Revert optimistic add.
1285 break; 1282 break;
1286 case Token::SUB: 1283 case Token::SUB:
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
1481 1478
1482 1479
1483 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, 1480 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
1484 BinaryOpIC::TypeInfo left_type, 1481 BinaryOpIC::TypeInfo left_type,
1485 BinaryOpIC::TypeInfo right_type, 1482 BinaryOpIC::TypeInfo right_type,
1486 bool smi_operands, 1483 bool smi_operands,
1487 Label* not_numbers, 1484 Label* not_numbers,
1488 Label* gc_required, 1485 Label* gc_required,
1489 Label* miss, 1486 Label* miss,
1490 Token::Value op, 1487 Token::Value op,
1491 OverwriteMode mode) { 1488 OverwriteMode mode,
1489 Register scratch1,
1490 Register scratch2,
1491 Register scratch3,
1492 Register scratch4) {
1492 Register left = r1; 1493 Register left = r1;
1493 Register right = r0; 1494 Register right = r0;
1494 Register scratch1 = r6; 1495 Register result = scratch3;
1495 Register scratch2 = r7; 1496 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1496 1497
1497 ASSERT(smi_operands || (not_numbers != NULL)); 1498 ASSERT(smi_operands || (not_numbers != NULL));
1498 if (smi_operands) { 1499 if (smi_operands) {
1499 __ AssertSmi(left); 1500 __ AssertSmi(left);
1500 __ AssertSmi(right); 1501 __ AssertSmi(right);
1501 } 1502 }
1502 if (left_type == BinaryOpIC::SMI) { 1503 if (left_type == BinaryOpIC::SMI) {
1503 __ JumpIfNotSmi(left, miss); 1504 __ JumpIfNotSmi(left, miss);
1504 } 1505 }
1505 if (right_type == BinaryOpIC::SMI) { 1506 if (right_type == BinaryOpIC::SMI) {
1506 __ JumpIfNotSmi(right, miss); 1507 __ JumpIfNotSmi(right, miss);
1507 } 1508 }
1508 1509
1509 Register heap_number_map = r9; 1510 Register heap_number_map = scratch4;
1510 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1511 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1511 1512
1512 switch (op) { 1513 switch (op) {
1513 case Token::ADD: 1514 case Token::ADD:
1514 case Token::SUB: 1515 case Token::SUB:
1515 case Token::MUL: 1516 case Token::MUL:
1516 case Token::DIV: 1517 case Token::DIV:
1517 case Token::MOD: { 1518 case Token::MOD: {
1518 // Allocate new heap number for result. 1519 // Allocate new heap number for result.
1519 Register result = r5;
1520 BinaryOpStub_GenerateHeapResultAllocation( 1520 BinaryOpStub_GenerateHeapResultAllocation(
1521 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); 1521 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
1522 1522
1523 // Load left and right operands into d0 and d1. 1523 // Load left and right operands into d0 and d1.
1524 if (smi_operands) { 1524 if (smi_operands) {
1525 __ SmiToDouble(d1, right); 1525 __ SmiToDouble(d1, right);
1526 __ SmiToDouble(d0, left); 1526 __ SmiToDouble(d0, left);
1527 } else { 1527 } else {
1528 // Load right operand into d1. 1528 // Load right operand into d1.
1529 if (right_type == BinaryOpIC::INT32) { 1529 if (right_type == BinaryOpIC::INT32) {
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
1628 default: 1628 default:
1629 UNREACHABLE(); 1629 UNREACHABLE();
1630 } 1630 }
1631 1631
1632 // Check that the *signed* result fits in a smi. 1632 // Check that the *signed* result fits in a smi.
1633 __ TrySmiTag(r0, r2, &result_not_a_smi); 1633 __ TrySmiTag(r0, r2, &result_not_a_smi);
1634 __ Ret(); 1634 __ Ret();
1635 1635
1636 // Allocate new heap number for result. 1636 // Allocate new heap number for result.
1637 __ bind(&result_not_a_smi); 1637 __ bind(&result_not_a_smi);
1638 Register result = r5;
1639 if (smi_operands) { 1638 if (smi_operands) {
1640 __ AllocateHeapNumber( 1639 __ AllocateHeapNumber(
1641 result, scratch1, scratch2, heap_number_map, gc_required); 1640 result, scratch1, scratch2, heap_number_map, gc_required);
1642 } else { 1641 } else {
1643 BinaryOpStub_GenerateHeapResultAllocation( 1642 BinaryOpStub_GenerateHeapResultAllocation(
1644 masm, result, heap_number_map, scratch1, scratch2, gc_required, 1643 masm, result, heap_number_map, scratch1, scratch2, gc_required,
1645 mode); 1644 mode);
1646 } 1645 }
1647 1646
1648 // r2: Answer as signed int32. 1647 // r2: Answer as signed int32.
1649 // r5: Heap number to write answer into. 1648 // result: Heap number to write answer into.
1650 1649
1651 // Nothing can go wrong now, so move the heap number to r0, which is the 1650 // Nothing can go wrong now, so move the heap number to r0, which is the
1652 // result. 1651 // result.
1653 __ mov(r0, Operand(r5)); 1652 __ mov(r0, Operand(result));
1654 1653
1655 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As 1654 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
1656 // mentioned above SHR needs to always produce a positive result. 1655 // mentioned above SHR needs to always produce a positive result.
1657 __ vmov(s0, r2); 1656 __ vmov(s0, r2);
1658 if (op == Token::SHR) { 1657 if (op == Token::SHR) {
1659 __ vcvt_f64_u32(d0, s0); 1658 __ vcvt_f64_u32(d0, s0);
1660 } else { 1659 } else {
1661 __ vcvt_f64_s32(d0, s0); 1660 __ vcvt_f64_s32(d0, s0);
1662 } 1661 }
1663 __ sub(r3, r0, Operand(kHeapObjectTag)); 1662 __ sub(r3, r0, Operand(kHeapObjectTag));
(...skipping 10 matching lines...) Expand all
1674 // Generate the smi code. If the operation on smis are successful this return is 1673 // Generate the smi code. If the operation on smis are successful this return is
1675 // generated. If the result is not a smi and heap number allocation is not 1674 // generated. If the result is not a smi and heap number allocation is not
1676 // requested the code falls through. If number allocation is requested but a 1675 // requested the code falls through. If number allocation is requested but a
1677 // heap number cannot be allocated the code jumps to the label gc_required. 1676 // heap number cannot be allocated the code jumps to the label gc_required.
1678 void BinaryOpStub_GenerateSmiCode( 1677 void BinaryOpStub_GenerateSmiCode(
1679 MacroAssembler* masm, 1678 MacroAssembler* masm,
1680 Label* use_runtime, 1679 Label* use_runtime,
1681 Label* gc_required, 1680 Label* gc_required,
1682 Token::Value op, 1681 Token::Value op,
1683 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 1682 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1684 OverwriteMode mode) { 1683 OverwriteMode mode,
1684 Register scratch1,
1685 Register scratch2,
1686 Register scratch3,
1687 Register scratch4) {
1685 Label not_smis; 1688 Label not_smis;
1686 1689
1687 Register left = r1; 1690 Register left = r1;
1688 Register right = r0; 1691 Register right = r0;
1689 Register scratch1 = r7; 1692 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1690 1693
1691 // Perform combined smi check on both operands. 1694 // Perform combined smi check on both operands.
1692 __ orr(scratch1, left, Operand(right)); 1695 __ orr(scratch1, left, Operand(right));
1693 __ JumpIfNotSmi(scratch1, &not_smis); 1696 __ JumpIfNotSmi(scratch1, &not_smis);
1694 1697
1695 // If the smi-smi operation results in a smi return is generated. 1698 // If the smi-smi operation results in a smi return is generated.
1696 BinaryOpStub_GenerateSmiSmiOperation(masm, op); 1699 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
1697 1700
1698 // If heap number results are possible generate the result in an allocated 1701 // If heap number results are possible generate the result in an allocated
1699 // heap number. 1702 // heap number.
1700 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { 1703 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
1701 BinaryOpStub_GenerateFPOperation( 1704 BinaryOpStub_GenerateFPOperation(
1702 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, 1705 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
1703 use_runtime, gc_required, &not_smis, op, mode); 1706 use_runtime, gc_required, &not_smis, op, mode, scratch2, scratch3,
1707 scratch1, scratch4);
1704 } 1708 }
1705 __ bind(&not_smis); 1709 __ bind(&not_smis);
1706 } 1710 }
1707 1711
1708 1712
1709 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1713 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1710 Label right_arg_changed, call_runtime; 1714 Label right_arg_changed, call_runtime;
1711 1715
1712 if (op_ == Token::MOD && encoded_right_arg_.has_value) { 1716 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1713 // It is guaranteed that the value will fit into a Smi, because if it 1717 // It is guaranteed that the value will fit into a Smi, because if it
1714 // didn't, we wouldn't be here, see BinaryOp_Patch. 1718 // didn't, we wouldn't be here, see BinaryOp_Patch.
1715 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); 1719 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
1716 __ b(ne, &right_arg_changed); 1720 __ b(ne, &right_arg_changed);
1717 } 1721 }
1718 1722
1719 if (result_type_ == BinaryOpIC::UNINITIALIZED || 1723 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1720 result_type_ == BinaryOpIC::SMI) { 1724 result_type_ == BinaryOpIC::SMI) {
1721 // Only allow smi results. 1725 // Only allow smi results.
1722 BinaryOpStub_GenerateSmiCode( 1726 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
1723 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); 1727 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1724 } else { 1728 } else {
1725 // Allow heap number result and don't make a transition if a heap number 1729 // Allow heap number result and don't make a transition if a heap number
1726 // cannot be allocated. 1730 // cannot be allocated.
1727 BinaryOpStub_GenerateSmiCode( 1731 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
1728 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, 1732 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1729 mode_);
1730 } 1733 }
1731 1734
1732 // Code falls through if the result is not returned as either a smi or heap 1735 // Code falls through if the result is not returned as either a smi or heap
1733 // number. 1736 // number.
1734 __ bind(&right_arg_changed); 1737 __ bind(&right_arg_changed);
1735 GenerateTypeTransition(masm); 1738 GenerateTypeTransition(masm);
1736 1739
1737 __ bind(&call_runtime); 1740 __ bind(&call_runtime);
1738 { 1741 {
1739 FrameScope scope(masm, StackFrame::INTERNAL); 1742 FrameScope scope(masm, StackFrame::INTERNAL);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 __ bind(&call_runtime); 1776 __ bind(&call_runtime);
1774 GenerateTypeTransition(masm); 1777 GenerateTypeTransition(masm);
1775 } 1778 }
1776 1779
1777 1780
1778 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1781 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1779 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); 1782 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1780 1783
1781 Register left = r1; 1784 Register left = r1;
1782 Register right = r0; 1785 Register right = r0;
1783 Register scratch1 = r7; 1786 Register scratch1 = r4;
1784 Register scratch2 = r9; 1787 Register scratch2 = r9;
1788 Register scratch3 = r5;
1785 LowDwVfpRegister double_scratch = d0; 1789 LowDwVfpRegister double_scratch = d0;
1786 1790
1787 Register heap_number_result = no_reg; 1791 Register heap_number_result = no_reg;
1788 Register heap_number_map = r6; 1792 Register heap_number_map = r6;
1789 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1793 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1790 1794
1791 Label call_runtime; 1795 Label call_runtime;
1792 // Labels for type transition, used for wrong input or output types. 1796 // Labels for type transition, used for wrong input or output types.
1793 // Both label are currently actually bound to the same position. We use two 1797 // Both label are currently actually bound to the same position. We use two
1794 // different label to differentiate the cause leading to type transition. 1798 // different label to differentiate the cause leading to type transition.
1795 Label transition; 1799 Label transition;
1796 1800
1797 // Smi-smi fast case. 1801 // Smi-smi fast case.
1798 Label skip; 1802 Label skip;
1799 __ orr(scratch1, left, right); 1803 __ orr(scratch1, left, right);
1800 __ JumpIfNotSmi(scratch1, &skip); 1804 __ JumpIfNotSmi(scratch1, &skip);
1801 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); 1805 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
1802 // Fall through if the result is not a smi. 1806 // Fall through if the result is not a smi.
1803 __ bind(&skip); 1807 __ bind(&skip);
1804 1808
1805 switch (op_) { 1809 switch (op_) {
1806 case Token::ADD: 1810 case Token::ADD:
1807 case Token::SUB: 1811 case Token::SUB:
1808 case Token::MUL: 1812 case Token::MUL:
1809 case Token::DIV: 1813 case Token::DIV:
1810 case Token::MOD: { 1814 case Token::MOD: {
1811 // It could be that only SMIs have been seen at either the left 1815 // It could be that only SMIs have been seen at either the left
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1885 // A DIV operation expecting an integer result falls through 1889 // A DIV operation expecting an integer result falls through
1886 // to type transition. 1890 // to type transition.
1887 1891
1888 } else { 1892 } else {
1889 if (encoded_right_arg_.has_value) { 1893 if (encoded_right_arg_.has_value) {
1890 __ Vmov(d8, fixed_right_arg_value(), scratch1); 1894 __ Vmov(d8, fixed_right_arg_value(), scratch1);
1891 __ VFPCompareAndSetFlags(d1, d8); 1895 __ VFPCompareAndSetFlags(d1, d8);
1892 __ b(ne, &transition); 1896 __ b(ne, &transition);
1893 } 1897 }
1894 1898
1895 // We preserved r0 and r1 to be able to call runtime.
1896 // Save the left value on the stack.
1897 __ Push(r5, r4);
1898
1899 Label pop_and_call_runtime;
1900
1901 // Allocate a heap number to store the result. 1899 // Allocate a heap number to store the result.
1902 heap_number_result = r5; 1900 heap_number_result = r5;
1903 BinaryOpStub_GenerateHeapResultAllocation(masm, 1901 BinaryOpStub_GenerateHeapResultAllocation(masm,
1904 heap_number_result, 1902 heap_number_result,
1905 heap_number_map, 1903 heap_number_map,
1906 scratch1, 1904 scratch1,
1907 scratch2, 1905 scratch2,
1908 &pop_and_call_runtime, 1906 &call_runtime,
1909 mode_); 1907 mode_);
1910 1908
1911 // Load the left value from the value saved on the stack.
1912 __ Pop(r1, r0);
1913
1914 // Call the C function to handle the double operation. 1909 // Call the C function to handle the double operation.
1915 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); 1910 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
1916 if (FLAG_debug_code) { 1911 if (FLAG_debug_code) {
1917 __ stop("Unreachable code."); 1912 __ stop("Unreachable code.");
1918 } 1913 }
1919 1914
1920 __ bind(&pop_and_call_runtime);
1921 __ Drop(2);
1922 __ b(&call_runtime); 1915 __ b(&call_runtime);
1923 } 1916 }
1924 1917
1925 break; 1918 break;
1926 } 1919 }
1927 1920
1928 case Token::BIT_OR: 1921 case Token::BIT_OR:
1929 case Token::BIT_XOR: 1922 case Token::BIT_XOR:
1930 case Token::BIT_AND: 1923 case Token::BIT_AND:
1931 case Token::SAR: 1924 case Token::SAR:
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
2062 __ bind(&done); 2055 __ bind(&done);
2063 2056
2064 GenerateNumberStub(masm); 2057 GenerateNumberStub(masm);
2065 } 2058 }
2066 2059
2067 2060
2068 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 2061 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
2069 Label call_runtime, transition; 2062 Label call_runtime, transition;
2070 BinaryOpStub_GenerateFPOperation( 2063 BinaryOpStub_GenerateFPOperation(
2071 masm, left_type_, right_type_, false, 2064 masm, left_type_, right_type_, false,
2072 &transition, &call_runtime, &transition, op_, mode_); 2065 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
2073 2066
2074 __ bind(&transition); 2067 __ bind(&transition);
2075 GenerateTypeTransition(masm); 2068 GenerateTypeTransition(masm);
2076 2069
2077 __ bind(&call_runtime); 2070 __ bind(&call_runtime);
2078 { 2071 {
2079 FrameScope scope(masm, StackFrame::INTERNAL); 2072 FrameScope scope(masm, StackFrame::INTERNAL);
2080 GenerateRegisterArgsPush(masm); 2073 GenerateRegisterArgsPush(masm);
2081 GenerateCallRuntime(masm); 2074 GenerateCallRuntime(masm);
2082 } 2075 }
2083 __ Ret(); 2076 __ Ret();
2084 } 2077 }
2085 2078
2086 2079
2087 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2080 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2088 Label call_runtime, call_string_add_or_runtime, transition; 2081 Label call_runtime, call_string_add_or_runtime, transition;
2089 2082
2090 BinaryOpStub_GenerateSmiCode( 2083 BinaryOpStub_GenerateSmiCode(
2091 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); 2084 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
2085 r5, r6, r4, r9);
2092 2086
2093 BinaryOpStub_GenerateFPOperation( 2087 BinaryOpStub_GenerateFPOperation(
2094 masm, left_type_, right_type_, false, 2088 masm, left_type_, right_type_, false,
2095 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); 2089 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
2090 r4, r5, r9);
2096 2091
2097 __ bind(&transition); 2092 __ bind(&transition);
2098 GenerateTypeTransition(masm); 2093 GenerateTypeTransition(masm);
2099 2094
2100 __ bind(&call_string_add_or_runtime); 2095 __ bind(&call_string_add_or_runtime);
2101 if (op_ == Token::ADD) { 2096 if (op_ == Token::ADD) {
2102 GenerateAddStrings(masm); 2097 GenerateAddStrings(masm);
2103 } 2098 }
2104 2099
2105 __ bind(&call_runtime); 2100 __ bind(&call_runtime);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
2187 // Untagged case: double input in d2, double result goes 2182 // Untagged case: double input in d2, double result goes
2188 // into d2. 2183 // into d2.
2189 // Tagged case: tagged input on top of stack and in r0, 2184 // Tagged case: tagged input on top of stack and in r0,
2190 // tagged result (heap number) goes into r0. 2185 // tagged result (heap number) goes into r0.
2191 2186
2192 Label input_not_smi; 2187 Label input_not_smi;
2193 Label loaded; 2188 Label loaded;
2194 Label calculate; 2189 Label calculate;
2195 Label invalid_cache; 2190 Label invalid_cache;
2196 const Register scratch0 = r9; 2191 const Register scratch0 = r9;
2197 const Register scratch1 = r7; 2192 Register scratch1 = no_reg; // will be r4
2198 const Register cache_entry = r0; 2193 const Register cache_entry = r0;
2199 const bool tagged = (argument_type_ == TAGGED); 2194 const bool tagged = (argument_type_ == TAGGED);
2200 2195
2201 if (tagged) { 2196 if (tagged) {
2202 // Argument is a number and is on stack and in r0. 2197 // Argument is a number and is on stack and in r0.
2203 // Load argument and check if it is a smi. 2198 // Load argument and check if it is a smi.
2204 __ JumpIfNotSmi(r0, &input_not_smi); 2199 __ JumpIfNotSmi(r0, &input_not_smi);
2205 2200
2206 // Input is a smi. Convert to double and load the low and high words 2201 // Input is a smi. Convert to double and load the low and high words
2207 // of the double into r2, r3. 2202 // of the double into r2, r3.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2267 #endif 2262 #endif
2268 2263
2269 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. 2264 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
2270 __ add(r1, r1, Operand(r1, LSL, 1)); 2265 __ add(r1, r1, Operand(r1, LSL, 1));
2271 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); 2266 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
2272 // Check if cache matches: Double value is stored in uint32_t[2] array. 2267 // Check if cache matches: Double value is stored in uint32_t[2] array.
2273 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); 2268 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
2274 __ cmp(r2, r4); 2269 __ cmp(r2, r4);
2275 __ cmp(r3, r5, eq); 2270 __ cmp(r3, r5, eq);
2276 __ b(ne, &calculate); 2271 __ b(ne, &calculate);
2272
2273 scratch1 = r4; // Start of scratch1 range.
2274
2277 // Cache hit. Load result, cleanup and return. 2275 // Cache hit. Load result, cleanup and return.
2278 Counters* counters = masm->isolate()->counters(); 2276 Counters* counters = masm->isolate()->counters();
2279 __ IncrementCounter( 2277 __ IncrementCounter(
2280 counters->transcendental_cache_hit(), 1, scratch0, scratch1); 2278 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
2281 if (tagged) { 2279 if (tagged) {
2282 // Pop input value from stack and load result into r0. 2280 // Pop input value from stack and load result into r0.
2283 __ pop(); 2281 __ pop();
2284 __ mov(r0, Operand(r6)); 2282 __ mov(r0, Operand(r6));
2285 } else { 2283 } else {
2286 // Load result into d2. 2284 // Load result into d2.
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
2409 const Register base = r1; 2407 const Register base = r1;
2410 const Register exponent = r2; 2408 const Register exponent = r2;
2411 const Register heapnumbermap = r5; 2409 const Register heapnumbermap = r5;
2412 const Register heapnumber = r0; 2410 const Register heapnumber = r0;
2413 const DwVfpRegister double_base = d1; 2411 const DwVfpRegister double_base = d1;
2414 const DwVfpRegister double_exponent = d2; 2412 const DwVfpRegister double_exponent = d2;
2415 const DwVfpRegister double_result = d3; 2413 const DwVfpRegister double_result = d3;
2416 const DwVfpRegister double_scratch = d0; 2414 const DwVfpRegister double_scratch = d0;
2417 const SwVfpRegister single_scratch = s0; 2415 const SwVfpRegister single_scratch = s0;
2418 const Register scratch = r9; 2416 const Register scratch = r9;
2419 const Register scratch2 = r7; 2417 const Register scratch2 = r4;
2420 2418
2421 Label call_runtime, done, int_exponent; 2419 Label call_runtime, done, int_exponent;
2422 if (exponent_type_ == ON_STACK) { 2420 if (exponent_type_ == ON_STACK) {
2423 Label base_is_smi, unpack_exponent; 2421 Label base_is_smi, unpack_exponent;
2424 // The exponent and base are supplied as arguments on the stack. 2422 // The exponent and base are supplied as arguments on the stack.
2425 // This can only happen if the stub is called from non-optimized code. 2423 // This can only happen if the stub is called from non-optimized code.
2426 // Load input parameters from stack to double registers. 2424 // Load input parameters from stack to double registers.
2427 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); 2425 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
2428 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); 2426 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
2429 2427
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after
2919 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; 2917 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
2920 __ ldr(r4, MemOperand(sp, offset_to_argv)); 2918 __ ldr(r4, MemOperand(sp, offset_to_argv));
2921 2919
2922 // Push a frame with special values setup to mark it as an entry frame. 2920 // Push a frame with special values setup to mark it as an entry frame.
2923 // r0: code entry 2921 // r0: code entry
2924 // r1: function 2922 // r1: function
2925 // r2: receiver 2923 // r2: receiver
2926 // r3: argc 2924 // r3: argc
2927 // r4: argv 2925 // r4: argv
2928 Isolate* isolate = masm->isolate(); 2926 Isolate* isolate = masm->isolate();
2929 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
2930 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 2927 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2931 __ mov(r7, Operand(Smi::FromInt(marker))); 2928 __ mov(r8, Operand(Smi::FromInt(marker)));
2932 __ mov(r6, Operand(Smi::FromInt(marker))); 2929 __ mov(r6, Operand(Smi::FromInt(marker)));
2933 __ mov(r5, 2930 __ mov(r5,
2934 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 2931 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
2935 __ ldr(r5, MemOperand(r5)); 2932 __ ldr(r5, MemOperand(r5));
2936 __ Push(r8, r7, r6, r5); 2933 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
2934 __ Push(ip, r8, r6, r5);
2937 2935
2938 // Set up frame pointer for the frame to be pushed. 2936 // Set up frame pointer for the frame to be pushed.
2939 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 2937 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
2940 2938
2941 // If this is the outermost JS call, set js_entry_sp value. 2939 // If this is the outermost JS call, set js_entry_sp value.
2942 Label non_outermost_js; 2940 Label non_outermost_js;
2943 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); 2941 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
2944 __ mov(r5, Operand(ExternalReference(js_entry_sp))); 2942 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
2945 __ ldr(r6, MemOperand(r5)); 2943 __ ldr(r6, MemOperand(r5));
2946 __ cmp(r6, Operand::Zero()); 2944 __ cmp(r6, Operand::Zero());
(...skipping 25 matching lines...) Expand all
2972 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2970 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2973 isolate))); 2971 isolate)));
2974 } 2972 }
2975 __ str(r0, MemOperand(ip)); 2973 __ str(r0, MemOperand(ip));
2976 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); 2974 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
2977 __ b(&exit); 2975 __ b(&exit);
2978 2976
2979 // Invoke: Link this frame into the handler chain. There's only one 2977 // Invoke: Link this frame into the handler chain. There's only one
2980 // handler block in this code object, so its index is 0. 2978 // handler block in this code object, so its index is 0.
2981 __ bind(&invoke); 2979 __ bind(&invoke);
2982 // Must preserve r0-r4, r5-r7 are available. 2980 // Must preserve r0-r4, r5-r6 are available.
2983 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 2981 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2984 // If an exception not caught by another handler occurs, this handler 2982 // If an exception not caught by another handler occurs, this handler
2985 // returns control to the code after the bl(&invoke) above, which 2983 // returns control to the code after the bl(&invoke) above, which
2986 // restores all kCalleeSaved registers (including cp and fp) to their 2984 // restores all kCalleeSaved registers (including cp and fp) to their
2987 // saved values before returning a failure to C. 2985 // saved values before returning a failure to C.
2988 2986
2989 // Clear any pending exceptions. 2987 // Clear any pending exceptions.
2990 __ mov(r5, Operand(isolate->factory()->the_hole_value())); 2988 __ mov(r5, Operand(isolate->factory()->the_hole_value()));
2991 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2989 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2992 isolate))); 2990 isolate)));
(...skipping 586 matching lines...) Expand 10 before | Expand all | Expand 10 after
3579 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 3577 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3580 // The mapped parameter thus need to get indices 3578 // The mapped parameter thus need to get indices
3581 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 3579 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3582 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 3580 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3583 // We loop from right to left. 3581 // We loop from right to left.
3584 Label parameters_loop, parameters_test; 3582 Label parameters_loop, parameters_test;
3585 __ mov(r6, r1); 3583 __ mov(r6, r1);
3586 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); 3584 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
3587 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 3585 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3588 __ sub(r9, r9, Operand(r1)); 3586 __ sub(r9, r9, Operand(r1));
3589 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); 3587 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
3590 __ add(r3, r4, Operand(r6, LSL, 1)); 3588 __ add(r3, r4, Operand(r6, LSL, 1));
3591 __ add(r3, r3, Operand(kParameterMapHeaderSize)); 3589 __ add(r3, r3, Operand(kParameterMapHeaderSize));
3592 3590
3593 // r6 = loop variable (tagged) 3591 // r6 = loop variable (tagged)
3594 // r1 = mapping index (tagged) 3592 // r1 = mapping index (tagged)
3595 // r3 = address of backing store (tagged) 3593 // r3 = address of backing store (tagged)
3596 // r4 = address of parameter map (tagged) 3594 // r4 = address of parameter map (tagged), which is also the address of new
3597 // r5 = temporary scratch (a.o., for address calculation) 3595 // object + Heap::kArgumentsObjectSize (tagged)
3598 // r7 = the hole value 3596 // r0 = temporary scratch (a.o., for address calculation)
3597 // r5 = the hole value
3599 __ jmp(&parameters_test); 3598 __ jmp(&parameters_test);
3600 3599
3601 __ bind(&parameters_loop); 3600 __ bind(&parameters_loop);
3602 __ sub(r6, r6, Operand(Smi::FromInt(1))); 3601 __ sub(r6, r6, Operand(Smi::FromInt(1)));
3603 __ mov(r5, Operand(r6, LSL, 1)); 3602 __ mov(r0, Operand(r6, LSL, 1));
3604 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); 3603 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
3605 __ str(r9, MemOperand(r4, r5)); 3604 __ str(r9, MemOperand(r4, r0));
3606 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); 3605 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
3607 __ str(r7, MemOperand(r3, r5)); 3606 __ str(r5, MemOperand(r3, r0));
3608 __ add(r9, r9, Operand(Smi::FromInt(1))); 3607 __ add(r9, r9, Operand(Smi::FromInt(1)));
3609 __ bind(&parameters_test); 3608 __ bind(&parameters_test);
3610 __ cmp(r6, Operand(Smi::FromInt(0))); 3609 __ cmp(r6, Operand(Smi::FromInt(0)));
3611 __ b(ne, &parameters_loop); 3610 __ b(ne, &parameters_loop);
3612 3611
3612 // Restore r0 = new object (tagged)
3613 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
3614
3613 __ bind(&skip_parameter_map); 3615 __ bind(&skip_parameter_map);
3616 // r0 = address of new object (tagged)
3614 // r2 = argument count (tagged) 3617 // r2 = argument count (tagged)
3615 // r3 = address of backing store (tagged) 3618 // r3 = address of backing store (tagged)
3616 // r5 = scratch 3619 // r5 = scratch
3617 // Copy arguments header and remaining slots (if there are any). 3620 // Copy arguments header and remaining slots (if there are any).
3618 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); 3621 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
3619 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); 3622 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
3620 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); 3623 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
3621 3624
3622 Label arguments_loop, arguments_test; 3625 Label arguments_loop, arguments_test;
3623 __ mov(r9, r1); 3626 __ mov(r9, r1);
(...skipping 10 matching lines...) Expand all
3634 3637
3635 __ bind(&arguments_test); 3638 __ bind(&arguments_test);
3636 __ cmp(r9, Operand(r2)); 3639 __ cmp(r9, Operand(r2));
3637 __ b(lt, &arguments_loop); 3640 __ b(lt, &arguments_loop);
3638 3641
3639 // Return and remove the on-stack parameters. 3642 // Return and remove the on-stack parameters.
3640 __ add(sp, sp, Operand(3 * kPointerSize)); 3643 __ add(sp, sp, Operand(3 * kPointerSize));
3641 __ Ret(); 3644 __ Ret();
3642 3645
3643 // Do the runtime call to allocate the arguments object. 3646 // Do the runtime call to allocate the arguments object.
3647 // r0 = address of new object (tagged)
3644 // r2 = argument count (tagged) 3648 // r2 = argument count (tagged)
3645 __ bind(&runtime); 3649 __ bind(&runtime);
3646 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 3650 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
3647 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3651 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3648 } 3652 }
3649 3653
3650 3654
3651 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 3655 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3652 // sp[0] : number of parameters 3656 // sp[0] : number of parameters
3653 // sp[4] : receiver displacement 3657 // sp[4] : receiver displacement
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
3762 const int kJSRegExpOffset = 3 * kPointerSize; 3766 const int kJSRegExpOffset = 3 * kPointerSize;
3763 3767
3764 Label runtime; 3768 Label runtime;
3765 // Allocation of registers for this function. These are in callee save 3769 // Allocation of registers for this function. These are in callee save
3766 // registers and will be preserved by the call to the native RegExp code, as 3770 // registers and will be preserved by the call to the native RegExp code, as
3767 // this code is called using the normal C calling convention. When calling 3771 // this code is called using the normal C calling convention. When calling
3768 // directly from generated code the native RegExp code will not do a GC and 3772 // directly from generated code the native RegExp code will not do a GC and
3769 // therefore the content of these registers are safe to use after the call. 3773 // therefore the content of these registers are safe to use after the call.
3770 Register subject = r4; 3774 Register subject = r4;
3771 Register regexp_data = r5; 3775 Register regexp_data = r5;
3772 Register last_match_info_elements = r6; 3776 Register last_match_info_elements = no_reg; // will be r6;
3773 3777
3774 // Ensure that a RegExp stack is allocated. 3778 // Ensure that a RegExp stack is allocated.
3775 Isolate* isolate = masm->isolate(); 3779 Isolate* isolate = masm->isolate();
3776 ExternalReference address_of_regexp_stack_memory_address = 3780 ExternalReference address_of_regexp_stack_memory_address =
3777 ExternalReference::address_of_regexp_stack_memory_address(isolate); 3781 ExternalReference::address_of_regexp_stack_memory_address(isolate);
3778 ExternalReference address_of_regexp_stack_memory_size = 3782 ExternalReference address_of_regexp_stack_memory_size =
3779 ExternalReference::address_of_regexp_stack_memory_size(isolate); 3783 ExternalReference::address_of_regexp_stack_memory_size(isolate);
3780 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 3784 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
3781 __ ldr(r0, MemOperand(r0, 0)); 3785 __ ldr(r0, MemOperand(r0, 0));
3782 __ cmp(r0, Operand::Zero()); 3786 __ cmp(r0, Operand::Zero());
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
3895 __ JumpIfNotSmi(r1, &runtime); 3899 __ JumpIfNotSmi(r1, &runtime);
3896 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); 3900 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
3897 __ cmp(r3, Operand(r1)); 3901 __ cmp(r3, Operand(r1));
3898 __ b(ls, &runtime); 3902 __ b(ls, &runtime);
3899 __ SmiUntag(r1); 3903 __ SmiUntag(r1);
3900 3904
3901 STATIC_ASSERT(4 == kOneByteStringTag); 3905 STATIC_ASSERT(4 == kOneByteStringTag);
3902 STATIC_ASSERT(kTwoByteStringTag == 0); 3906 STATIC_ASSERT(kTwoByteStringTag == 0);
3903 __ and_(r0, r0, Operand(kStringEncodingMask)); 3907 __ and_(r0, r0, Operand(kStringEncodingMask));
3904 __ mov(r3, Operand(r0, ASR, 2), SetCC); 3908 __ mov(r3, Operand(r0, ASR, 2), SetCC);
3905 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); 3909 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
3906 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); 3910 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
3907 3911
3908 // (E) Carry on. String handling is done. 3912 // (E) Carry on. String handling is done.
3909 // r7: irregexp code 3913 // r6: irregexp code
3910 // Check that the irregexp code has been generated for the actual string 3914 // Check that the irregexp code has been generated for the actual string
3911 // encoding. If it has, the field contains a code object otherwise it contains 3915 // encoding. If it has, the field contains a code object otherwise it contains
3912 // a smi (code flushing support). 3916 // a smi (code flushing support).
3913 __ JumpIfSmi(r7, &runtime); 3917 __ JumpIfSmi(r6, &runtime);
3914 3918
3915 // r1: previous index 3919 // r1: previous index
3916 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); 3920 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
3917 // r7: code 3921 // r6: code
3918 // subject: Subject string 3922 // subject: Subject string
3919 // regexp_data: RegExp data (FixedArray) 3923 // regexp_data: RegExp data (FixedArray)
3920 // All checks done. Now push arguments for native regexp code. 3924 // All checks done. Now push arguments for native regexp code.
3921 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); 3925 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
3922 3926
3923 // Isolates: note we add an additional parameter here (isolate pointer). 3927 // Isolates: note we add an additional parameter here (isolate pointer).
3924 const int kRegExpExecuteArguments = 9; 3928 const int kRegExpExecuteArguments = 9;
3925 const int kParameterRegisters = 4; 3929 const int kParameterRegisters = 4;
3926 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); 3930 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
3927 3931
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3974 __ SmiUntag(r8); 3978 __ SmiUntag(r8);
3975 __ add(r3, r9, Operand(r8, LSL, r3)); 3979 __ add(r3, r9, Operand(r8, LSL, r3));
3976 3980
3977 // Argument 2 (r1): Previous index. 3981 // Argument 2 (r1): Previous index.
3978 // Already there 3982 // Already there
3979 3983
3980 // Argument 1 (r0): Subject string. 3984 // Argument 1 (r0): Subject string.
3981 __ mov(r0, subject); 3985 __ mov(r0, subject);
3982 3986
3983 // Locate the code entry and call it. 3987 // Locate the code entry and call it.
3984 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 3988 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
3985 DirectCEntryStub stub; 3989 DirectCEntryStub stub;
3986 stub.GenerateCall(masm, r7); 3990 stub.GenerateCall(masm, r6);
3987 3991
3988 __ LeaveExitFrame(false, no_reg, true); 3992 __ LeaveExitFrame(false, no_reg, true);
3989 3993
3994 last_match_info_elements = r6;
3995
3990 // r0: result 3996 // r0: result
3991 // subject: subject string (callee saved) 3997 // subject: subject string (callee saved)
3992 // regexp_data: RegExp data (callee saved) 3998 // regexp_data: RegExp data (callee saved)
3993 // last_match_info_elements: Last match info elements (callee saved) 3999 // last_match_info_elements: Last match info elements (callee saved)
3994 // Check the result. 4000 // Check the result.
3995 Label success; 4001 Label success;
3996 __ cmp(r0, Operand(1)); 4002 __ cmp(r0, Operand(1));
3997 // We expect exactly one result since we force the called regexp to behave 4003 // We expect exactly one result since we force the called regexp to behave
3998 // as non-global. 4004 // as non-global.
3999 __ b(eq, &success); 4005 __ b(eq, &success);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
4068 __ str(r2, FieldMemOperand(last_match_info_elements, 4074 __ str(r2, FieldMemOperand(last_match_info_elements,
4069 RegExpImpl::kLastCaptureCountOffset)); 4075 RegExpImpl::kLastCaptureCountOffset));
4070 // Store last subject and last input. 4076 // Store last subject and last input.
4071 __ str(subject, 4077 __ str(subject,
4072 FieldMemOperand(last_match_info_elements, 4078 FieldMemOperand(last_match_info_elements,
4073 RegExpImpl::kLastSubjectOffset)); 4079 RegExpImpl::kLastSubjectOffset));
4074 __ mov(r2, subject); 4080 __ mov(r2, subject);
4075 __ RecordWriteField(last_match_info_elements, 4081 __ RecordWriteField(last_match_info_elements,
4076 RegExpImpl::kLastSubjectOffset, 4082 RegExpImpl::kLastSubjectOffset,
4077 subject, 4083 subject,
4078 r7, 4084 r3,
4079 kLRHasNotBeenSaved, 4085 kLRHasNotBeenSaved,
4080 kDontSaveFPRegs); 4086 kDontSaveFPRegs);
4081 __ mov(subject, r2); 4087 __ mov(subject, r2);
4082 __ str(subject, 4088 __ str(subject,
4083 FieldMemOperand(last_match_info_elements, 4089 FieldMemOperand(last_match_info_elements,
4084 RegExpImpl::kLastInputOffset)); 4090 RegExpImpl::kLastInputOffset));
4085 __ RecordWriteField(last_match_info_elements, 4091 __ RecordWriteField(last_match_info_elements,
4086 RegExpImpl::kLastInputOffset, 4092 RegExpImpl::kLastInputOffset,
4087 subject, 4093 subject,
4088 r7, 4094 r3,
4089 kLRHasNotBeenSaved, 4095 kLRHasNotBeenSaved,
4090 kDontSaveFPRegs); 4096 kDontSaveFPRegs);
4091 4097
4092 // Get the static offsets vector filled by the native regexp code. 4098 // Get the static offsets vector filled by the native regexp code.
4093 ExternalReference address_of_static_offsets_vector = 4099 ExternalReference address_of_static_offsets_vector =
4094 ExternalReference::address_of_static_offsets_vector(isolate); 4100 ExternalReference::address_of_static_offsets_vector(isolate);
4095 __ mov(r2, Operand(address_of_static_offsets_vector)); 4101 __ mov(r2, Operand(address_of_static_offsets_vector));
4096 4102
4097 // r1: number of capture registers 4103 // r1: number of capture registers
4098 // r2: offsets vector 4104 // r2: offsets vector
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after
4645 4651
4646 4652
4647 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, 4653 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
4648 Register dest, 4654 Register dest,
4649 Register src, 4655 Register src,
4650 Register count, 4656 Register count,
4651 Register scratch1, 4657 Register scratch1,
4652 Register scratch2, 4658 Register scratch2,
4653 Register scratch3, 4659 Register scratch3,
4654 Register scratch4, 4660 Register scratch4,
4655 Register scratch5,
4656 int flags) { 4661 int flags) {
4657 bool ascii = (flags & COPY_ASCII) != 0; 4662 bool ascii = (flags & COPY_ASCII) != 0;
4658 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; 4663 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
4659 4664
4660 if (dest_always_aligned && FLAG_debug_code) { 4665 if (dest_always_aligned && FLAG_debug_code) {
4661 // Check that destination is actually word aligned if the flag says 4666 // Check that destination is actually word aligned if the flag says
4662 // that it is. 4667 // that it is.
4663 __ tst(dest, Operand(kPointerAlignmentMask)); 4668 __ tst(dest, Operand(kPointerAlignmentMask));
4664 __ Check(eq, kDestinationOfCopyNotAligned); 4669 __ Check(eq, kDestinationOfCopyNotAligned);
4665 } 4670 }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4720 __ and_(src, src, Operand(~3)); // Round down to load previous word. 4725 __ and_(src, src, Operand(~3)); // Round down to load previous word.
4721 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); 4726 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
4722 // Store the "shift" most significant bits of scratch in the least 4727 // Store the "shift" most significant bits of scratch in the least
4723 // signficant bits (i.e., shift down by (32-shift)). 4728 // signficant bits (i.e., shift down by (32-shift)).
4724 __ rsb(scratch2, left_shift, Operand(32)); 4729 __ rsb(scratch2, left_shift, Operand(32));
4725 Register right_shift = scratch2; 4730 Register right_shift = scratch2;
4726 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); 4731 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
4727 4732
4728 __ bind(&loop); 4733 __ bind(&loop);
4729 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); 4734 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
4730 __ sub(scratch5, limit, Operand(dest));
4731 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); 4735 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
4732 __ str(scratch1, MemOperand(dest, 4, PostIndex)); 4736 __ str(scratch1, MemOperand(dest, 4, PostIndex));
4733 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); 4737 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
4734 // Loop if four or more bytes left to copy. 4738 // Loop if four or more bytes left to copy.
4735 // Compare to eight, because we did the subtract before increasing dst. 4739 __ sub(scratch3, limit, Operand(dest));
4736 __ sub(scratch5, scratch5, Operand(8), SetCC); 4740 __ sub(scratch3, scratch3, Operand(4), SetCC);
4737 __ b(ge, &loop); 4741 __ b(ge, &loop);
4738 } 4742 }
4739 // There is now between zero and three bytes left to copy (negative that 4743 // There is now between zero and three bytes left to copy (negative that
4740 // number is in scratch5), and between one and three bytes already read into 4744 // number is in scratch3), and between one and three bytes already read into
4741 // scratch1 (eight times that number in scratch4). We may have read past 4745 // scratch1 (eight times that number in scratch4). We may have read past
4742 // the end of the string, but because objects are aligned, we have not read 4746 // the end of the string, but because objects are aligned, we have not read
4743 // past the end of the object. 4747 // past the end of the object.
4744 // Find the minimum of remaining characters to move and preloaded characters 4748 // Find the minimum of remaining characters to move and preloaded characters
4745 // and write those as bytes. 4749 // and write those as bytes.
4746 __ add(scratch5, scratch5, Operand(4), SetCC); 4750 __ add(scratch3, scratch3, Operand(4), SetCC);
4747 __ b(eq, &done); 4751 __ b(eq, &done);
4748 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); 4752 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
4749 // Move minimum of bytes read and bytes left to copy to scratch4. 4753 // Move minimum of bytes read and bytes left to copy to scratch4.
4750 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); 4754 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
4751 // Between one and three (value in scratch5) characters already read into 4755 // Between one and three (value in scratch3) characters already read into
4752 // scratch ready to write. 4756 // scratch ready to write.
4753 __ cmp(scratch5, Operand(2)); 4757 __ cmp(scratch3, Operand(2));
4754 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4758 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
4755 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); 4759 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
4756 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); 4760 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
4757 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); 4761 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
4758 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); 4762 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
4759 // Copy any remaining bytes. 4763 // Copy any remaining bytes.
4760 __ b(&byte_loop); 4764 __ b(&byte_loop);
4761 4765
4762 // Simple loop. 4766 // Simple loop.
4763 // Copy words from src to dst, until less than four bytes left. 4767 // Copy words from src to dst, until less than four bytes left.
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
5083 // Allocate new sliced string. At this point we do not reload the instance 5087 // Allocate new sliced string. At this point we do not reload the instance
5084 // type including the string encoding because we simply rely on the info 5088 // type including the string encoding because we simply rely on the info
5085 // provided by the original string. It does not matter if the original 5089 // provided by the original string. It does not matter if the original
5086 // string's encoding is wrong because we always have to recheck encoding of 5090 // string's encoding is wrong because we always have to recheck encoding of
5087 // the newly created string's parent anyways due to externalized strings. 5091 // the newly created string's parent anyways due to externalized strings.
5088 Label two_byte_slice, set_slice_header; 5092 Label two_byte_slice, set_slice_header;
5089 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 5093 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5090 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 5094 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5091 __ tst(r1, Operand(kStringEncodingMask)); 5095 __ tst(r1, Operand(kStringEncodingMask));
5092 __ b(eq, &two_byte_slice); 5096 __ b(eq, &two_byte_slice);
5093 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); 5097 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
5094 __ jmp(&set_slice_header); 5098 __ jmp(&set_slice_header);
5095 __ bind(&two_byte_slice); 5099 __ bind(&two_byte_slice);
5096 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); 5100 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
5097 __ bind(&set_slice_header); 5101 __ bind(&set_slice_header);
5098 __ mov(r3, Operand(r3, LSL, 1)); 5102 __ mov(r3, Operand(r3, LSL, 1));
5099 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); 5103 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5100 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); 5104 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5101 __ jmp(&return_r0); 5105 __ jmp(&return_r0);
5102 5106
5103 __ bind(&copy_routine); 5107 __ bind(&copy_routine);
5104 } 5108 }
5105 5109
5106 // r5: underlying subject string 5110 // r5: underlying subject string
(...skipping 20 matching lines...) Expand all
5127 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 5131 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
5128 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5132 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5129 5133
5130 __ bind(&allocate_result); 5134 __ bind(&allocate_result);
5131 // Sequential acii string. Allocate the result. 5135 // Sequential acii string. Allocate the result.
5132 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 5136 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
5133 __ tst(r1, Operand(kStringEncodingMask)); 5137 __ tst(r1, Operand(kStringEncodingMask));
5134 __ b(eq, &two_byte_sequential); 5138 __ b(eq, &two_byte_sequential);
5135 5139
5136 // Allocate and copy the resulting ASCII string. 5140 // Allocate and copy the resulting ASCII string.
5137 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); 5141 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
5138 5142
5139 // Locate first character of substring to copy. 5143 // Locate first character of substring to copy.
5140 __ add(r5, r5, r3); 5144 __ add(r5, r5, r3);
5141 // Locate first character of result. 5145 // Locate first character of result.
5142 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5146 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5143 5147
5144 // r0: result string 5148 // r0: result string
5145 // r1: first character of result string 5149 // r1: first character of result string
5146 // r2: result string length 5150 // r2: result string length
5147 // r5: first character of substring to copy 5151 // r5: first character of substring to copy
5148 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5152 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5149 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, 5153 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
5150 COPY_ASCII | DEST_ALWAYS_ALIGNED); 5154 COPY_ASCII | DEST_ALWAYS_ALIGNED);
5151 __ jmp(&return_r0); 5155 __ jmp(&return_r0);
5152 5156
5153 // Allocate and copy the resulting two-byte string. 5157 // Allocate and copy the resulting two-byte string.
5154 __ bind(&two_byte_sequential); 5158 __ bind(&two_byte_sequential);
5155 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); 5159 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
5156 5160
5157 // Locate first character of substring to copy. 5161 // Locate first character of substring to copy.
5158 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 5162 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5159 __ add(r5, r5, Operand(r3, LSL, 1)); 5163 __ add(r5, r5, Operand(r3, LSL, 1));
5160 // Locate first character of result. 5164 // Locate first character of result.
5161 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5165 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5162 5166
5163 // r0: result string. 5167 // r0: result string.
5164 // r1: first character of result. 5168 // r1: first character of result.
5165 // r2: result length. 5169 // r2: result length.
5166 // r5: first character of substring to copy. 5170 // r5: first character of substring to copy.
5167 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5171 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5168 StringHelper::GenerateCopyCharactersLong( 5172 StringHelper::GenerateCopyCharactersLong(
5169 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); 5173 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
5170 5174
5171 __ bind(&return_r0); 5175 __ bind(&return_r0);
5172 Counters* counters = masm->isolate()->counters(); 5176 Counters* counters = masm->isolate()->counters();
5173 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); 5177 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5174 __ Drop(3); 5178 __ Drop(3);
5175 __ Ret(); 5179 __ Ret();
5176 5180
5177 // Just jump to runtime to create the sub string. 5181 // Just jump to runtime to create the sub string.
5178 __ bind(&runtime); 5182 __ bind(&runtime);
5179 __ TailCallRuntime(Runtime::kSubString, 3, 1); 5183 __ TailCallRuntime(Runtime::kSubString, 3, 1);
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
5425 __ cmp(r6, Operand(2)); 5429 __ cmp(r6, Operand(2));
5426 __ b(ne, &longer_than_two); 5430 __ b(ne, &longer_than_two);
5427 5431
5428 // Check that both strings are non-external ASCII strings. 5432 // Check that both strings are non-external ASCII strings.
5429 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5433 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5430 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5434 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5431 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5435 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5432 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5436 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5433 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5437 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5434 } 5438 }
5435 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, 5439 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
5436 &call_runtime); 5440 &call_runtime);
5437 5441
5438 // Get the two characters forming the sub string. 5442 // Get the two characters forming the sub string.
5439 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); 5443 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
5440 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); 5444 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
5441 5445
5442 // Try to lookup two character string in string table. If it is not found 5446 // Try to lookup two character string in string table. If it is not found
5443 // just allocate a new one. 5447 // just allocate a new one.
5444 Label make_two_character_string; 5448 Label make_two_character_string;
5445 StringHelper::GenerateTwoCharacterStringTableProbe( 5449 StringHelper::GenerateTwoCharacterStringTableProbe(
5446 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); 5450 masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
5447 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5451 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5448 __ add(sp, sp, Operand(2 * kPointerSize)); 5452 __ add(sp, sp, Operand(2 * kPointerSize));
5449 __ Ret(); 5453 __ Ret();
5450 5454
5451 __ bind(&make_two_character_string); 5455 __ bind(&make_two_character_string);
5452 // Resulting string has length 2 and first chars of two strings 5456 // Resulting string has length 2 and first chars of two strings
5453 // are combined into single halfword in r2 register. 5457 // are combined into single halfword in r2 register.
5454 // So we can fill resulting string without two loops by a single 5458 // So we can fill resulting string without two loops by a single
5455 // halfword store instruction (which assumes that processor is 5459 // halfword store instruction (which assumes that processor is
5456 // in a little endian mode) 5460 // in a little endian mode)
(...skipping 24 matching lines...) Expand all
5481 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5485 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5482 } 5486 }
5483 Label non_ascii, allocated, ascii_data; 5487 Label non_ascii, allocated, ascii_data;
5484 STATIC_ASSERT(kTwoByteStringTag == 0); 5488 STATIC_ASSERT(kTwoByteStringTag == 0);
5485 __ tst(r4, Operand(kStringEncodingMask)); 5489 __ tst(r4, Operand(kStringEncodingMask));
5486 __ tst(r5, Operand(kStringEncodingMask), ne); 5490 __ tst(r5, Operand(kStringEncodingMask), ne);
5487 __ b(eq, &non_ascii); 5491 __ b(eq, &non_ascii);
5488 5492
5489 // Allocate an ASCII cons string. 5493 // Allocate an ASCII cons string.
5490 __ bind(&ascii_data); 5494 __ bind(&ascii_data);
5491 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); 5495 __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
5492 __ bind(&allocated); 5496 __ bind(&allocated);
5493 // Fill the fields of the cons string. 5497 // Fill the fields of the cons string.
5494 Label skip_write_barrier, after_writing; 5498 Label skip_write_barrier, after_writing;
5495 ExternalReference high_promotion_mode = ExternalReference:: 5499 ExternalReference high_promotion_mode = ExternalReference::
5496 new_space_high_promotion_mode_active_address(masm->isolate()); 5500 new_space_high_promotion_mode_active_address(masm->isolate());
5497 __ mov(r4, Operand(high_promotion_mode)); 5501 __ mov(r4, Operand(high_promotion_mode));
5498 __ ldr(r4, MemOperand(r4, 0)); 5502 __ ldr(r4, MemOperand(r4, 0));
5499 __ cmp(r4, Operand::Zero()); 5503 __ cmp(r4, Operand::Zero());
5500 __ b(eq, &skip_write_barrier); 5504 __ b(eq, &skip_write_barrier);
5501 5505
5502 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5506 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5503 __ RecordWriteField(r7, 5507 __ RecordWriteField(r3,
5504 ConsString::kFirstOffset, 5508 ConsString::kFirstOffset,
5505 r0, 5509 r0,
5506 r4, 5510 r4,
5507 kLRHasNotBeenSaved, 5511 kLRHasNotBeenSaved,
5508 kDontSaveFPRegs); 5512 kDontSaveFPRegs);
5509 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5513 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5510 __ RecordWriteField(r7, 5514 __ RecordWriteField(r3,
5511 ConsString::kSecondOffset, 5515 ConsString::kSecondOffset,
5512 r1, 5516 r1,
5513 r4, 5517 r4,
5514 kLRHasNotBeenSaved, 5518 kLRHasNotBeenSaved,
5515 kDontSaveFPRegs); 5519 kDontSaveFPRegs);
5516 __ jmp(&after_writing); 5520 __ jmp(&after_writing);
5517 5521
5518 __ bind(&skip_write_barrier); 5522 __ bind(&skip_write_barrier);
5519 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5523 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5520 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5524 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5521 5525
5522 __ bind(&after_writing); 5526 __ bind(&after_writing);
5523 5527
5524 __ mov(r0, Operand(r7)); 5528 __ mov(r0, Operand(r3));
5525 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5529 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5526 __ add(sp, sp, Operand(2 * kPointerSize)); 5530 __ add(sp, sp, Operand(2 * kPointerSize));
5527 __ Ret(); 5531 __ Ret();
5528 5532
5529 __ bind(&non_ascii); 5533 __ bind(&non_ascii);
5530 // At least one of the strings is two-byte. Check whether it happens 5534 // At least one of the strings is two-byte. Check whether it happens
5531 // to contain only one byte characters. 5535 // to contain only one byte characters.
5532 // r4: first instance type. 5536 // r4: first instance type.
5533 // r5: second instance type. 5537 // r5: second instance type.
5534 __ tst(r4, Operand(kOneByteDataHintMask)); 5538 __ tst(r4, Operand(kOneByteDataHintMask));
5535 __ tst(r5, Operand(kOneByteDataHintMask), ne); 5539 __ tst(r5, Operand(kOneByteDataHintMask), ne);
5536 __ b(ne, &ascii_data); 5540 __ b(ne, &ascii_data);
5537 __ eor(r4, r4, Operand(r5)); 5541 __ eor(r4, r4, Operand(r5));
5538 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); 5542 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
5539 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5543 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5540 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5544 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5541 __ b(eq, &ascii_data); 5545 __ b(eq, &ascii_data);
5542 5546
5543 // Allocate a two byte cons string. 5547 // Allocate a two byte cons string.
5544 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); 5548 __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
5545 __ jmp(&allocated); 5549 __ jmp(&allocated);
5546 5550
5547 // We cannot encounter sliced strings or cons strings here since: 5551 // We cannot encounter sliced strings or cons strings here since:
5548 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 5552 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
5549 // Handle creating a flat result from either external or sequential strings. 5553 // Handle creating a flat result from either external or sequential strings.
5550 // Locate the first characters' locations. 5554 // Locate the first characters' locations.
5551 // r0: first string 5555 // r0: first string
5552 // r1: second string 5556 // r1: second string
5553 // r2: length of first string 5557 // r2: length of first string
5554 // r3: length of second string 5558 // r3: length of second string
5555 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5559 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5556 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5560 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5557 // r6: sum of lengths. 5561 // r6: sum of lengths.
5558 Label first_prepared, second_prepared; 5562 Label first_prepared, second_prepared;
5559 __ bind(&string_add_flat_result); 5563 __ bind(&string_add_flat_result);
5560 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5564 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5561 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5565 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5562 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5566 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5563 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5567 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5564 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5568 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5565 } 5569 }
5566 5570
5567 // Check whether both strings have same encoding 5571 // Check whether both strings have same encoding
5568 __ eor(r7, r4, Operand(r5)); 5572 __ eor(ip, r4, Operand(r5));
5569 __ tst(r7, Operand(kStringEncodingMask)); 5573 ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
5574 __ tst(ip, Operand(kStringEncodingMask));
5570 __ b(ne, &call_runtime); 5575 __ b(ne, &call_runtime);
5571 5576
5572 STATIC_ASSERT(kSeqStringTag == 0); 5577 STATIC_ASSERT(kSeqStringTag == 0);
5573 __ tst(r4, Operand(kStringRepresentationMask)); 5578 __ tst(r4, Operand(kStringRepresentationMask));
5574 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5579 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5575 __ add(r7, 5580 __ add(r6,
5576 r0, 5581 r0,
5577 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5582 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5578 LeaveCC, 5583 LeaveCC,
5579 eq); 5584 eq);
5580 __ b(eq, &first_prepared); 5585 __ b(eq, &first_prepared);
5581 // External string: rule out short external string and load string resource. 5586 // External string: rule out short external string and load string resource.
5582 STATIC_ASSERT(kShortExternalStringTag != 0); 5587 STATIC_ASSERT(kShortExternalStringTag != 0);
5583 __ tst(r4, Operand(kShortExternalStringMask)); 5588 __ tst(r4, Operand(kShortExternalStringMask));
5584 __ b(ne, &call_runtime); 5589 __ b(ne, &call_runtime);
5585 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); 5590 __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
5586 __ bind(&first_prepared); 5591 __ bind(&first_prepared);
5587 5592
5588 STATIC_ASSERT(kSeqStringTag == 0); 5593 STATIC_ASSERT(kSeqStringTag == 0);
5589 __ tst(r5, Operand(kStringRepresentationMask)); 5594 __ tst(r5, Operand(kStringRepresentationMask));
5590 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5595 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5591 __ add(r1, 5596 __ add(r1,
5592 r1, 5597 r1,
5593 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5598 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5594 LeaveCC, 5599 LeaveCC,
5595 eq); 5600 eq);
5596 __ b(eq, &second_prepared); 5601 __ b(eq, &second_prepared);
5597 // External string: rule out short external string and load string resource. 5602 // External string: rule out short external string and load string resource.
5598 STATIC_ASSERT(kShortExternalStringTag != 0); 5603 STATIC_ASSERT(kShortExternalStringTag != 0);
5599 __ tst(r5, Operand(kShortExternalStringMask)); 5604 __ tst(r5, Operand(kShortExternalStringMask));
5600 __ b(ne, &call_runtime); 5605 __ b(ne, &call_runtime);
5601 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); 5606 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
5602 __ bind(&second_prepared); 5607 __ bind(&second_prepared);
5603 5608
5604 Label non_ascii_string_add_flat_result; 5609 Label non_ascii_string_add_flat_result;
5605 // r7: first character of first string 5610 // r6: first character of first string
5606 // r1: first character of second string 5611 // r1: first character of second string
5607 // r2: length of first string. 5612 // r2: length of first string.
5608 // r3: length of second string. 5613 // r3: length of second string.
5609 // r6: sum of lengths.
5610 // Both strings have the same encoding. 5614 // Both strings have the same encoding.
5611 STATIC_ASSERT(kTwoByteStringTag == 0); 5615 STATIC_ASSERT(kTwoByteStringTag == 0);
5612 __ tst(r5, Operand(kStringEncodingMask)); 5616 __ tst(r5, Operand(kStringEncodingMask));
5613 __ b(eq, &non_ascii_string_add_flat_result); 5617 __ b(eq, &non_ascii_string_add_flat_result);
5614 5618
5615 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); 5619 __ add(r2, r2, Operand(r3));
5616 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5620 __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
5621 __ sub(r2, r2, Operand(r3));
5622 __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5617 // r0: result string. 5623 // r0: result string.
5618 // r7: first character of first string. 5624 // r6: first character of first string.
5619 // r1: first character of second string. 5625 // r1: first character of second string.
5620 // r2: length of first string. 5626 // r2: length of first string.
5621 // r3: length of second string. 5627 // r3: length of second string.
5622 // r6: first character of result. 5628 // r5: first character of result.
5623 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); 5629 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
5624 // r6: next character of result. 5630 // r5: next character of result.
5625 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); 5631 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
5626 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5632 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5627 __ add(sp, sp, Operand(2 * kPointerSize)); 5633 __ add(sp, sp, Operand(2 * kPointerSize));
5628 __ Ret(); 5634 __ Ret();
5629 5635
5630 __ bind(&non_ascii_string_add_flat_result); 5636 __ bind(&non_ascii_string_add_flat_result);
5631 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); 5637 __ add(r2, r2, Operand(r3));
5632 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5638 __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
5639 __ sub(r2, r2, Operand(r3));
5640 __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5633 // r0: result string. 5641 // r0: result string.
5634 // r7: first character of first string. 5642 // r6: first character of first string.
5635 // r1: first character of second string. 5643 // r1: first character of second string.
5636 // r2: length of first string. 5644 // r2: length of first string.
5637 // r3: length of second string. 5645 // r3: length of second string.
5638 // r6: first character of result. 5646 // r5: first character of result.
5639 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); 5647 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
5640 // r6: next character of result. 5648 // r5: next character of result.
5641 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); 5649 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
5642 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5650 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5643 __ add(sp, sp, Operand(2 * kPointerSize)); 5651 __ add(sp, sp, Operand(2 * kPointerSize));
5644 __ Ret(); 5652 __ Ret();
5645 5653
5646 // Just jump to runtime to add the two strings. 5654 // Just jump to runtime to add the two strings.
5647 __ bind(&call_runtime); 5655 __ bind(&call_runtime);
5648 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5656 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
5649 GenerateRegisterArgsPop(masm); 5657 GenerateRegisterArgsPop(masm);
5650 // Build a frame 5658 // Build a frame
5651 { 5659 {
(...skipping 649 matching lines...) Expand 10 before | Expand all | Expand 10 after
6301 struct AheadOfTimeWriteBarrierStubList { 6309 struct AheadOfTimeWriteBarrierStubList {
6302 Register object, value, address; 6310 Register object, value, address;
6303 RememberedSetAction action; 6311 RememberedSetAction action;
6304 }; 6312 };
6305 6313
6306 6314
6307 #define REG(Name) { kRegister_ ## Name ## _Code } 6315 #define REG(Name) { kRegister_ ## Name ## _Code }
6308 6316
6309 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 6317 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6310 // Used in RegExpExecStub. 6318 // Used in RegExpExecStub.
6311 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, 6319 { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
6312 // Used in CompileArrayPushCall. 6320 // Used in CompileArrayPushCall.
6313 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. 6321 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
6314 // Also used in KeyedStoreIC::GenerateGeneric. 6322 // Also used in KeyedStoreIC::GenerateGeneric.
6315 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, 6323 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
6316 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. 6324 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
6317 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, 6325 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
6318 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, 6326 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
6319 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 6327 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6320 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, 6328 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
6321 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, 6329 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
6322 // KeyedStoreStubCompiler::GenerateStoreFastElement. 6330 // KeyedStoreStubCompiler::GenerateStoreFastElement.
6323 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, 6331 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
6324 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, 6332 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
6325 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 6333 // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6326 // and ElementsTransitionGenerator::GenerateSmiToDouble 6334 // and ElementsTransitionGenerator::GenerateSmiToDouble
6327 // and ElementsTransitionGenerator::GenerateDoubleToObject 6335 // and ElementsTransitionGenerator::GenerateDoubleToObject
6328 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, 6336 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
6329 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, 6337 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
6330 // ElementsTransitionGenerator::GenerateDoubleToObject 6338 // ElementsTransitionGenerator::GenerateDoubleToObject
6331 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, 6339 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
6332 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, 6340 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
6333 // StoreArrayLiteralElementStub::Generate 6341 // StoreArrayLiteralElementStub::Generate
6334 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 6342 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
6335 // FastNewClosureStub::Generate 6343 // FastNewClosureStub::Generate
6336 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, 6344 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
6337 // StringAddStub::Generate 6345 // StringAddStub::Generate
6338 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, 6346 { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
6339 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, 6347 { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
6340 // Null termination. 6348 // Null termination.
6341 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 6349 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6342 }; 6350 };
6343 6351
6344 #undef REG 6352 #undef REG
6345 6353
6346 6354
6347 bool RecordWriteStub::IsPregenerated(Isolate* isolate) { 6355 bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
6348 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6356 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6349 !entry->object.is(no_reg); 6357 !entry->object.is(no_reg);
(...skipping 711 matching lines...) Expand 10 before | Expand all | Expand 10 after
7061 __ bind(&fast_elements_case); 7069 __ bind(&fast_elements_case);
7062 GenerateCase(masm, FAST_ELEMENTS); 7070 GenerateCase(masm, FAST_ELEMENTS);
7063 } 7071 }
7064 7072
7065 7073
7066 #undef __ 7074 #undef __
7067 7075
7068 } } // namespace v8::internal 7076 } } // namespace v8::internal
7069 7077
7070 #endif // V8_TARGET_ARCH_ARM 7078 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | src/arm/stub-cache-arm.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698