OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 561 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
572 __ b(&fall_through, LT); | 572 __ b(&fall_through, LT); |
573 __ SmiUntag(R1); | 573 __ SmiUntag(R1); |
574 | 574 |
575 // Pull off high bits that will be shifted off of R1 by making a mask | 575 // Pull off high bits that will be shifted off of R1 by making a mask |
576 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. | 576 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. |
577 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) | 577 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) |
578 // lo bits = R1 << R0 | 578 // lo bits = R1 << R0 |
579 __ LoadImmediate(R7, 1); | 579 __ LoadImmediate(R7, 1); |
580 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 | 580 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 |
581 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 | 581 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 |
582 __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0 | 582 __ rsb(R10, R0, Operand(32)); // R10 <- 32 - R0 |
583 __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9 | 583 __ mov(R7, Operand(R7, LSL, R10)); // R7 <- R7 << R10 |
584 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 | 584 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 |
585 __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9 | 585 __ mov(R7, Operand(R7, LSR, R10)); // R7 <- R7 >> R10 |
586 // Now R7 has the bits that fall off of R1 on a left shift. | 586 // Now R7 has the bits that fall off of R1 on a left shift. |
587 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. | 587 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. |
588 | 588 |
589 const Class& mint_class = Class::Handle( | 589 const Class& mint_class = Class::Handle( |
590 Isolate::Current()->object_store()->mint_class()); | 590 Isolate::Current()->object_store()->mint_class()); |
591 __ TryAllocate(mint_class, &fall_through, R0, R2); | 591 __ TryAllocate(mint_class, &fall_through, R0, R2); |
592 | 592 |
593 | 593 |
594 __ str(R1, FieldAddress(R0, Mint::value_offset())); | 594 __ str(R1, FieldAddress(R0, Mint::value_offset())); |
595 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); | 595 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
813 __ ldrd(R2, SP, 2 * kWordSize); | 813 __ ldrd(R2, SP, 2 * kWordSize); |
814 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. | 814 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. |
815 __ ldrd(R4, SP, 0 * kWordSize); | 815 __ ldrd(R4, SP, 0 * kWordSize); |
816 __ SmiUntag(R5); | 816 __ SmiUntag(R5); |
817 // R0 = n ~/ _DIGIT_BITS | 817 // R0 = n ~/ _DIGIT_BITS |
818 __ Asr(R0, R5, Operand(5)); | 818 __ Asr(R0, R5, Operand(5)); |
819 // R6 = &x_digits[0] | 819 // R6 = &x_digits[0] |
820 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 820 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
821 // R7 = &x_digits[x_used] | 821 // R7 = &x_digits[x_used] |
822 __ add(R7, R6, Operand(R2, LSL, 1)); | 822 __ add(R7, R6, Operand(R2, LSL, 1)); |
823 // R9 = &r_digits[1] | 823 // R10 = &r_digits[1] |
824 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag + | 824 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag + |
825 Bigint::kBytesPerDigit)); | 825 Bigint::kBytesPerDigit)); |
826 // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] | 826 // R10 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] |
827 __ add(R0, R0, Operand(R2, ASR, 1)); | 827 __ add(R0, R0, Operand(R2, ASR, 1)); |
828 __ add(R9, R9, Operand(R0, LSL, 2)); | 828 __ add(R10, R10, Operand(R0, LSL, 2)); |
829 // R3 = n % _DIGIT_BITS | 829 // R3 = n % _DIGIT_BITS |
830 __ and_(R3, R5, Operand(31)); | 830 __ and_(R3, R5, Operand(31)); |
831 // R2 = 32 - R3 | 831 // R2 = 32 - R3 |
832 __ rsb(R2, R3, Operand(32)); | 832 __ rsb(R2, R3, Operand(32)); |
833 __ mov(R1, Operand(0)); | 833 __ mov(R1, Operand(0)); |
834 Label loop; | 834 Label loop; |
835 __ Bind(&loop); | 835 __ Bind(&loop); |
836 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); | 836 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); |
837 __ orr(R1, R1, Operand(R0, LSR, R2)); | 837 __ orr(R1, R1, Operand(R0, LSR, R2)); |
838 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); | 838 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); |
839 __ mov(R1, Operand(R0, LSL, R3)); | 839 __ mov(R1, Operand(R0, LSL, R3)); |
840 __ teq(R7, Operand(R6)); | 840 __ teq(R7, Operand(R6)); |
841 __ b(&loop, NE); | 841 __ b(&loop, NE); |
842 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); | 842 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); |
843 // Returning Object::null() is not required, since this method is private. | 843 // Returning Object::null() is not required, since this method is private. |
844 __ Ret(); | 844 __ Ret(); |
845 } | 845 } |
846 | 846 |
847 | 847 |
848 void Intrinsifier::Bigint_rsh(Assembler* assembler) { | 848 void Intrinsifier::Bigint_rsh(Assembler* assembler) { |
849 // static void _lsh(Uint32List x_digits, int x_used, int n, | 849 // static void _lsh(Uint32List x_digits, int x_used, int n, |
850 // Uint32List r_digits) | 850 // Uint32List r_digits) |
851 | 851 |
852 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. | 852 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. |
853 __ ldrd(R2, SP, 2 * kWordSize); | 853 __ ldrd(R2, SP, 2 * kWordSize); |
854 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. | 854 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. |
855 __ ldrd(R4, SP, 0 * kWordSize); | 855 __ ldrd(R4, SP, 0 * kWordSize); |
856 __ SmiUntag(R5); | 856 __ SmiUntag(R5); |
857 // R0 = n ~/ _DIGIT_BITS | 857 // R0 = n ~/ _DIGIT_BITS |
858 __ Asr(R0, R5, Operand(5)); | 858 __ Asr(R0, R5, Operand(5)); |
859 // R9 = &r_digits[0] | 859 // R10 = &r_digits[0] |
860 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); | 860 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); |
861 // R7 = &x_digits[n ~/ _DIGIT_BITS] | 861 // R7 = &x_digits[n ~/ _DIGIT_BITS] |
862 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 862 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
863 __ add(R7, R7, Operand(R0, LSL, 2)); | 863 __ add(R7, R7, Operand(R0, LSL, 2)); |
864 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] | 864 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] |
865 __ add(R0, R0, Operand(1)); | 865 __ add(R0, R0, Operand(1)); |
866 __ rsb(R0, R0, Operand(R2, ASR, 1)); | 866 __ rsb(R0, R0, Operand(R2, ASR, 1)); |
867 __ add(R6, R9, Operand(R0, LSL, 2)); | 867 __ add(R6, R10, Operand(R0, LSL, 2)); |
868 // R3 = n % _DIGIT_BITS | 868 // R3 = n % _DIGIT_BITS |
869 __ and_(R3, R5, Operand(31)); | 869 __ and_(R3, R5, Operand(31)); |
870 // R2 = 32 - R3 | 870 // R2 = 32 - R3 |
871 __ rsb(R2, R3, Operand(32)); | 871 __ rsb(R2, R3, Operand(32)); |
872 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) | 872 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) |
873 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); | 873 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); |
874 __ mov(R1, Operand(R1, LSR, R3)); | 874 __ mov(R1, Operand(R1, LSR, R3)); |
875 Label loop_entry; | 875 Label loop_entry; |
876 __ b(&loop_entry); | 876 __ b(&loop_entry); |
877 Label loop; | 877 Label loop; |
878 __ Bind(&loop); | 878 __ Bind(&loop); |
879 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); | 879 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); |
880 __ orr(R1, R1, Operand(R0, LSL, R2)); | 880 __ orr(R1, R1, Operand(R0, LSL, R2)); |
881 __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex)); | 881 __ str(R1, Address(R10, Bigint::kBytesPerDigit, Address::PostIndex)); |
882 __ mov(R1, Operand(R0, LSR, R3)); | 882 __ mov(R1, Operand(R0, LSR, R3)); |
883 __ Bind(&loop_entry); | 883 __ Bind(&loop_entry); |
884 __ teq(R9, Operand(R6)); | 884 __ teq(R10, Operand(R6)); |
885 __ b(&loop, NE); | 885 __ b(&loop, NE); |
886 __ str(R1, Address(R9, 0)); | 886 __ str(R1, Address(R10, 0)); |
887 // Returning Object::null() is not required, since this method is private. | 887 // Returning Object::null() is not required, since this method is private. |
888 __ Ret(); | 888 __ Ret(); |
889 } | 889 } |
890 | 890 |
891 | 891 |
892 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { | 892 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { |
893 // static void _absAdd(Uint32List digits, int used, | 893 // static void _absAdd(Uint32List digits, int used, |
894 // Uint32List a_digits, int a_used, | 894 // Uint32List a_digits, int a_used, |
895 // Uint32List r_digits) | 895 // Uint32List r_digits) |
896 | 896 |
897 // R2 = used, R3 = digits | 897 // R2 = used, R3 = digits |
898 __ ldrd(R2, SP, 3 * kWordSize); | 898 __ ldrd(R2, SP, 3 * kWordSize); |
899 // R3 = &digits[0] | 899 // R3 = &digits[0] |
900 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 900 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
901 | 901 |
902 // R4 = a_used, R5 = a_digits | 902 // R4 = a_used, R5 = a_digits |
903 __ ldrd(R4, SP, 1 * kWordSize); | 903 __ ldrd(R4, SP, 1 * kWordSize); |
904 // R5 = &a_digits[0] | 904 // R5 = &a_digits[0] |
905 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); | 905 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); |
906 | 906 |
907 // R6 = r_digits | 907 // R6 = r_digits |
908 __ ldr(R6, Address(SP, 0 * kWordSize)); | 908 __ ldr(R6, Address(SP, 0 * kWordSize)); |
909 // R6 = &r_digits[0] | 909 // R6 = &r_digits[0] |
910 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); | 910 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); |
911 | 911 |
912 // R7 = &digits[a_used >> 1], a_used is Smi. | 912 // R7 = &digits[a_used >> 1], a_used is Smi. |
913 __ add(R7, R3, Operand(R4, LSL, 1)); | 913 __ add(R7, R3, Operand(R4, LSL, 1)); |
914 | 914 |
915 // R9 = &digits[used >> 1], used is Smi. | 915 // R10 = &digits[used >> 1], used is Smi. |
916 __ add(R9, R3, Operand(R2, LSL, 1)); | 916 __ add(R10, R3, Operand(R2, LSL, 1)); |
917 | 917 |
918 __ adds(R0, R0, Operand(0)); // carry flag = 0 | 918 __ adds(R0, R0, Operand(0)); // carry flag = 0 |
919 Label add_loop; | 919 Label add_loop; |
920 __ Bind(&add_loop); | 920 __ Bind(&add_loop); |
921 // Loop a_used times, a_used > 0. | 921 // Loop a_used times, a_used > 0. |
922 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 922 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
923 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); | 923 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); |
924 __ adcs(R0, R0, Operand(R1)); | 924 __ adcs(R0, R0, Operand(R1)); |
925 __ teq(R3, Operand(R7)); // Does not affect carry flag. | 925 __ teq(R3, Operand(R7)); // Does not affect carry flag. |
926 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); | 926 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); |
927 __ b(&add_loop, NE); | 927 __ b(&add_loop, NE); |
928 | 928 |
929 Label last_carry; | 929 Label last_carry; |
930 __ teq(R3, Operand(R9)); // Does not affect carry flag. | 930 __ teq(R3, Operand(R10)); // Does not affect carry flag. |
931 __ b(&last_carry, EQ); // If used - a_used == 0. | 931 __ b(&last_carry, EQ); // If used - a_used == 0. |
932 | 932 |
933 Label carry_loop; | 933 Label carry_loop; |
934 __ Bind(&carry_loop); | 934 __ Bind(&carry_loop); |
935 // Loop used - a_used times, used - a_used > 0. | 935 // Loop used - a_used times, used - a_used > 0. |
936 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 936 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
937 __ adcs(R0, R0, Operand(0)); | 937 __ adcs(R0, R0, Operand(0)); |
938 __ teq(R3, Operand(R9)); // Does not affect carry flag. | 938 __ teq(R3, Operand(R10)); // Does not affect carry flag. |
939 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); | 939 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); |
940 __ b(&carry_loop, NE); | 940 __ b(&carry_loop, NE); |
941 | 941 |
942 __ Bind(&last_carry); | 942 __ Bind(&last_carry); |
943 __ mov(R0, Operand(0)); | 943 __ mov(R0, Operand(0)); |
944 __ adc(R0, R0, Operand(0)); | 944 __ adc(R0, R0, Operand(0)); |
945 __ str(R0, Address(R6, 0)); | 945 __ str(R0, Address(R6, 0)); |
946 | 946 |
947 // Returning Object::null() is not required, since this method is private. | 947 // Returning Object::null() is not required, since this method is private. |
948 __ Ret(); | 948 __ Ret(); |
(...skipping 16 matching lines...) Expand all Loading... |
965 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); | 965 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); |
966 | 966 |
967 // R6 = r_digits | 967 // R6 = r_digits |
968 __ ldr(R6, Address(SP, 0 * kWordSize)); | 968 __ ldr(R6, Address(SP, 0 * kWordSize)); |
969 // R6 = &r_digits[0] | 969 // R6 = &r_digits[0] |
970 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); | 970 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); |
971 | 971 |
972 // R7 = &digits[a_used >> 1], a_used is Smi. | 972 // R7 = &digits[a_used >> 1], a_used is Smi. |
973 __ add(R7, R3, Operand(R4, LSL, 1)); | 973 __ add(R7, R3, Operand(R4, LSL, 1)); |
974 | 974 |
975 // R9 = &digits[used >> 1], used is Smi. | 975 // R10 = &digits[used >> 1], used is Smi. |
976 __ add(R9, R3, Operand(R2, LSL, 1)); | 976 __ add(R10, R3, Operand(R2, LSL, 1)); |
977 | 977 |
978 __ subs(R0, R0, Operand(0)); // carry flag = 1 | 978 __ subs(R0, R0, Operand(0)); // carry flag = 1 |
979 Label sub_loop; | 979 Label sub_loop; |
980 __ Bind(&sub_loop); | 980 __ Bind(&sub_loop); |
981 // Loop a_used times, a_used > 0. | 981 // Loop a_used times, a_used > 0. |
982 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 982 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
983 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); | 983 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); |
984 __ sbcs(R0, R0, Operand(R1)); | 984 __ sbcs(R0, R0, Operand(R1)); |
985 __ teq(R3, Operand(R7)); // Does not affect carry flag. | 985 __ teq(R3, Operand(R7)); // Does not affect carry flag. |
986 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); | 986 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); |
987 __ b(&sub_loop, NE); | 987 __ b(&sub_loop, NE); |
988 | 988 |
989 Label done; | 989 Label done; |
990 __ teq(R3, Operand(R9)); // Does not affect carry flag. | 990 __ teq(R3, Operand(R10)); // Does not affect carry flag. |
991 __ b(&done, EQ); // If used - a_used == 0. | 991 __ b(&done, EQ); // If used - a_used == 0. |
992 | 992 |
993 Label carry_loop; | 993 Label carry_loop; |
994 __ Bind(&carry_loop); | 994 __ Bind(&carry_loop); |
995 // Loop used - a_used times, used - a_used > 0. | 995 // Loop used - a_used times, used - a_used > 0. |
996 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 996 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
997 __ sbcs(R0, R0, Operand(0)); | 997 __ sbcs(R0, R0, Operand(0)); |
998 __ teq(R3, Operand(R9)); // Does not affect carry flag. | 998 __ teq(R3, Operand(R10)); // Does not affect carry flag. |
999 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); | 999 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); |
1000 __ b(&carry_loop, NE); | 1000 __ b(&carry_loop, NE); |
1001 | 1001 |
1002 __ Bind(&done); | 1002 __ Bind(&done); |
1003 // Returning Object::null() is not required, since this method is private. | 1003 // Returning Object::null() is not required, since this method is private. |
1004 __ Ret(); | 1004 __ Ret(); |
1005 } | 1005 } |
1006 | 1006 |
1007 | 1007 |
1008 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { | 1008 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1157 | 1157 |
1158 // *ajp++ = low32(t) = R0 | 1158 // *ajp++ = low32(t) = R0 |
1159 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); | 1159 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); |
1160 | 1160 |
1161 // R6 = low32(c) = high32(t) | 1161 // R6 = low32(c) = high32(t) |
1162 // R7 = high32(c) = 0 | 1162 // R7 = high32(c) = 0 |
1163 __ mov(R7, Operand(0)); | 1163 __ mov(R7, Operand(0)); |
1164 | 1164 |
1165 // int n = used - i - 1; while (--n >= 0) ... | 1165 // int n = used - i - 1; while (--n >= 0) ... |
1166 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi | 1166 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi |
1167 __ sub(R9, R0, Operand(R2)); | 1167 __ sub(R10, R0, Operand(R2)); |
1168 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) | 1168 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) |
1169 __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize)); | 1169 __ rsbs(R10, R0, Operand(R10, ASR, kSmiTagSize)); |
1170 | 1170 |
1171 Label loop, done; | 1171 Label loop, done; |
1172 __ b(&done, MI); | 1172 __ b(&done, MI); |
1173 | 1173 |
1174 __ Bind(&loop); | 1174 __ Bind(&loop); |
1175 // x: R3 | 1175 // x: R3 |
1176 // xip: R4 | 1176 // xip: R4 |
1177 // ajp: R5 | 1177 // ajp: R5 |
1178 // c: R7:R6 | 1178 // c: R7:R6 |
1179 // t: R2:R1:R0 (not live at loop entry) | 1179 // t: R2:R1:R0 (not live at loop entry) |
1180 // n: R9 | 1180 // n: R10 |
1181 | 1181 |
1182 // uint32_t xi = *xip++ | 1182 // uint32_t xi = *xip++ |
1183 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); | 1183 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); |
1184 | 1184 |
1185 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c | 1185 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c |
1186 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. | 1186 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. |
1187 __ adds(R0, R0, Operand(R0)); | 1187 __ adds(R0, R0, Operand(R0)); |
1188 __ adcs(R1, R1, Operand(R1)); | 1188 __ adcs(R1, R1, Operand(R1)); |
1189 __ mov(R2, Operand(0)); | 1189 __ mov(R2, Operand(0)); |
1190 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. | 1190 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. |
1191 __ adds(R0, R0, Operand(R6)); | 1191 __ adds(R0, R0, Operand(R6)); |
1192 __ adcs(R1, R1, Operand(R7)); | 1192 __ adcs(R1, R1, Operand(R7)); |
1193 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. | 1193 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. |
1194 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. | 1194 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. |
1195 __ adds(R0, R0, Operand(R6)); | 1195 __ adds(R0, R0, Operand(R6)); |
1196 __ adcs(R6, R1, Operand(0)); | 1196 __ adcs(R6, R1, Operand(0)); |
1197 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. | 1197 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. |
1198 | 1198 |
1199 // *ajp++ = low32(t) = R0 | 1199 // *ajp++ = low32(t) = R0 |
1200 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); | 1200 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); |
1201 | 1201 |
1202 // while (--n >= 0) | 1202 // while (--n >= 0) |
1203 __ subs(R9, R9, Operand(1)); // --n | 1203 __ subs(R10, R10, Operand(1)); // --n |
1204 __ b(&loop, PL); | 1204 __ b(&loop, PL); |
1205 | 1205 |
1206 __ Bind(&done); | 1206 __ Bind(&done); |
1207 // uint32_t aj = *ajp | 1207 // uint32_t aj = *ajp |
1208 __ ldr(R0, Address(R5, 0)); | 1208 __ ldr(R0, Address(R5, 0)); |
1209 | 1209 |
1210 // uint64_t t = aj + c | 1210 // uint64_t t = aj + c |
1211 __ adds(R6, R6, Operand(R0)); | 1211 __ adds(R6, R6, Operand(R0)); |
1212 __ adc(R7, R7, Operand(0)); | 1212 __ adc(R7, R7, Operand(0)); |
1213 | 1213 |
(...skipping 794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2008 __ LoadClassId(R1, R1); | 2008 __ LoadClassId(R1, R1); |
2009 __ AddImmediate(R1, R1, -kOneByteStringCid); | 2009 __ AddImmediate(R1, R1, -kOneByteStringCid); |
2010 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); | 2010 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); |
2011 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); | 2011 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); |
2012 | 2012 |
2013 // Registers are now set up for the lazy compile stub. It expects the function | 2013 // Registers are now set up for the lazy compile stub. It expects the function |
2014 // in R0, the argument descriptor in R4, and IC-Data in R5. | 2014 // in R0, the argument descriptor in R4, and IC-Data in R5. |
2015 __ eor(R5, R5, Operand(R5)); | 2015 __ eor(R5, R5, Operand(R5)); |
2016 | 2016 |
2017 // Tail-call the function. | 2017 // Tail-call the function. |
| 2018 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); |
2018 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); | 2019 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); |
2019 __ bx(R1); | 2020 __ bx(R1); |
2020 } | 2021 } |
2021 | 2022 |
2022 | 2023 |
2023 // On stack: user tag (+0). | 2024 // On stack: user tag (+0). |
2024 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2025 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
2025 // R1: Isolate. | 2026 // R1: Isolate. |
2026 __ LoadIsolate(R1); | 2027 __ LoadIsolate(R1); |
2027 // R0: Current user tag. | 2028 // R0: Current user tag. |
(...skipping 19 matching lines...) Expand all Loading... |
2047 | 2048 |
2048 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { | 2049 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { |
2049 __ LoadIsolate(R0); | 2050 __ LoadIsolate(R0); |
2050 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); | 2051 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); |
2051 __ Ret(); | 2052 __ Ret(); |
2052 } | 2053 } |
2053 | 2054 |
2054 } // namespace dart | 2055 } // namespace dart |
2055 | 2056 |
2056 #endif // defined TARGET_ARCH_ARM | 2057 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |