Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(361)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Fix CheckCodePointer assertion, remove remaining 0 PC markers Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after
570 __ b(&fall_through, LT); 570 __ b(&fall_through, LT);
571 __ SmiUntag(R1); 571 __ SmiUntag(R1);
572 572
573 // Pull off high bits that will be shifted off of R1 by making a mask 573 // Pull off high bits that will be shifted off of R1 by making a mask
574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
576 // lo bits = R1 << R0 576 // lo bits = R1 << R0
577 __ LoadImmediate(R7, 1); 577 __ LoadImmediate(R7, 1);
578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
580 __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0 580 __ rsb(R10, R0, Operand(32)); // R10 <- 32 - R0
581 __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9 581 __ mov(R7, Operand(R7, LSL, R10)); // R7 <- R7 << R10
582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
583 __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9 583 __ mov(R7, Operand(R7, LSR, R10)); // R7 <- R7 >> R10
584 // Now R7 has the bits that fall off of R1 on a left shift. 584 // Now R7 has the bits that fall off of R1 on a left shift.
585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
586 586
587 const Class& mint_class = Class::Handle( 587 const Class& mint_class = Class::Handle(
588 Isolate::Current()->object_store()->mint_class()); 588 Isolate::Current()->object_store()->mint_class());
589 __ TryAllocate(mint_class, &fall_through, R0, R2); 589 __ TryAllocate(mint_class, &fall_through, R0, R2);
590 590
591 591
592 __ str(R1, FieldAddress(R0, Mint::value_offset())); 592 __ str(R1, FieldAddress(R0, Mint::value_offset()));
593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
811 __ ldrd(R2, SP, 2 * kWordSize); 811 __ ldrd(R2, SP, 2 * kWordSize);
812 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 812 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
813 __ ldrd(R4, SP, 0 * kWordSize); 813 __ ldrd(R4, SP, 0 * kWordSize);
814 __ SmiUntag(R5); 814 __ SmiUntag(R5);
815 // R0 = n ~/ _DIGIT_BITS 815 // R0 = n ~/ _DIGIT_BITS
816 __ Asr(R0, R5, Operand(5)); 816 __ Asr(R0, R5, Operand(5));
817 // R6 = &x_digits[0] 817 // R6 = &x_digits[0]
818 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 818 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
819 // R7 = &x_digits[x_used] 819 // R7 = &x_digits[x_used]
820 __ add(R7, R6, Operand(R2, LSL, 1)); 820 __ add(R7, R6, Operand(R2, LSL, 1));
821 // R9 = &r_digits[1] 821 // R10 = &r_digits[1]
822 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag + 822 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
823 Bigint::kBytesPerDigit)); 823 Bigint::kBytesPerDigit));
824 // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 824 // R10 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
825 __ add(R0, R0, Operand(R2, ASR, 1)); 825 __ add(R0, R0, Operand(R2, ASR, 1));
826 __ add(R9, R9, Operand(R0, LSL, 2)); 826 __ add(R10, R10, Operand(R0, LSL, 2));
827 // R3 = n % _DIGIT_BITS 827 // R3 = n % _DIGIT_BITS
828 __ and_(R3, R5, Operand(31)); 828 __ and_(R3, R5, Operand(31));
829 // R2 = 32 - R3 829 // R2 = 32 - R3
830 __ rsb(R2, R3, Operand(32)); 830 __ rsb(R2, R3, Operand(32));
831 __ mov(R1, Operand(0)); 831 __ mov(R1, Operand(0));
832 Label loop; 832 Label loop;
833 __ Bind(&loop); 833 __ Bind(&loop);
834 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 834 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
835 __ orr(R1, R1, Operand(R0, LSR, R2)); 835 __ orr(R1, R1, Operand(R0, LSR, R2));
836 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); 836 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex));
837 __ mov(R1, Operand(R0, LSL, R3)); 837 __ mov(R1, Operand(R0, LSL, R3));
838 __ teq(R7, Operand(R6)); 838 __ teq(R7, Operand(R6));
839 __ b(&loop, NE); 839 __ b(&loop, NE);
840 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); 840 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex));
841 // Returning Object::null() is not required, since this method is private. 841 // Returning Object::null() is not required, since this method is private.
842 __ Ret(); 842 __ Ret();
843 } 843 }
844 844
845 845
846 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 846 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
847 // static void _lsh(Uint32List x_digits, int x_used, int n, 847 // static void _lsh(Uint32List x_digits, int x_used, int n,
848 // Uint32List r_digits) 848 // Uint32List r_digits)
849 849
850 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 850 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
851 __ ldrd(R2, SP, 2 * kWordSize); 851 __ ldrd(R2, SP, 2 * kWordSize);
852 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 852 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
853 __ ldrd(R4, SP, 0 * kWordSize); 853 __ ldrd(R4, SP, 0 * kWordSize);
854 __ SmiUntag(R5); 854 __ SmiUntag(R5);
855 // R0 = n ~/ _DIGIT_BITS 855 // R0 = n ~/ _DIGIT_BITS
856 __ Asr(R0, R5, Operand(5)); 856 __ Asr(R0, R5, Operand(5));
857 // R9 = &r_digits[0] 857 // R10 = &r_digits[0]
858 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); 858 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
859 // R7 = &x_digits[n ~/ _DIGIT_BITS] 859 // R7 = &x_digits[n ~/ _DIGIT_BITS]
860 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 860 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
861 __ add(R7, R7, Operand(R0, LSL, 2)); 861 __ add(R7, R7, Operand(R0, LSL, 2));
862 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 862 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
863 __ add(R0, R0, Operand(1)); 863 __ add(R0, R0, Operand(1));
864 __ rsb(R0, R0, Operand(R2, ASR, 1)); 864 __ rsb(R0, R0, Operand(R2, ASR, 1));
865 __ add(R6, R9, Operand(R0, LSL, 2)); 865 __ add(R6, R10, Operand(R0, LSL, 2));
866 // R3 = n % _DIGIT_BITS 866 // R3 = n % _DIGIT_BITS
867 __ and_(R3, R5, Operand(31)); 867 __ and_(R3, R5, Operand(31));
868 // R2 = 32 - R3 868 // R2 = 32 - R3
869 __ rsb(R2, R3, Operand(32)); 869 __ rsb(R2, R3, Operand(32));
870 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 870 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
871 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 871 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
872 __ mov(R1, Operand(R1, LSR, R3)); 872 __ mov(R1, Operand(R1, LSR, R3));
873 Label loop_entry; 873 Label loop_entry;
874 __ b(&loop_entry); 874 __ b(&loop_entry);
875 Label loop; 875 Label loop;
876 __ Bind(&loop); 876 __ Bind(&loop);
877 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 877 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
878 __ orr(R1, R1, Operand(R0, LSL, R2)); 878 __ orr(R1, R1, Operand(R0, LSL, R2));
879 __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex)); 879 __ str(R1, Address(R10, Bigint::kBytesPerDigit, Address::PostIndex));
880 __ mov(R1, Operand(R0, LSR, R3)); 880 __ mov(R1, Operand(R0, LSR, R3));
881 __ Bind(&loop_entry); 881 __ Bind(&loop_entry);
882 __ teq(R9, Operand(R6)); 882 __ teq(R10, Operand(R6));
883 __ b(&loop, NE); 883 __ b(&loop, NE);
884 __ str(R1, Address(R9, 0)); 884 __ str(R1, Address(R10, 0));
885 // Returning Object::null() is not required, since this method is private. 885 // Returning Object::null() is not required, since this method is private.
886 __ Ret(); 886 __ Ret();
887 } 887 }
888 888
889 889
890 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 890 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
891 // static void _absAdd(Uint32List digits, int used, 891 // static void _absAdd(Uint32List digits, int used,
892 // Uint32List a_digits, int a_used, 892 // Uint32List a_digits, int a_used,
893 // Uint32List r_digits) 893 // Uint32List r_digits)
894 894
895 // R2 = used, R3 = digits 895 // R2 = used, R3 = digits
896 __ ldrd(R2, SP, 3 * kWordSize); 896 __ ldrd(R2, SP, 3 * kWordSize);
897 // R3 = &digits[0] 897 // R3 = &digits[0]
898 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 898 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
899 899
900 // R4 = a_used, R5 = a_digits 900 // R4 = a_used, R5 = a_digits
901 __ ldrd(R4, SP, 1 * kWordSize); 901 __ ldrd(R4, SP, 1 * kWordSize);
902 // R5 = &a_digits[0] 902 // R5 = &a_digits[0]
903 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 903 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
904 904
905 // R6 = r_digits 905 // R6 = r_digits
906 __ ldr(R6, Address(SP, 0 * kWordSize)); 906 __ ldr(R6, Address(SP, 0 * kWordSize));
907 // R6 = &r_digits[0] 907 // R6 = &r_digits[0]
908 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 908 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
909 909
910 // R7 = &digits[a_used >> 1], a_used is Smi. 910 // R7 = &digits[a_used >> 1], a_used is Smi.
911 __ add(R7, R3, Operand(R4, LSL, 1)); 911 __ add(R7, R3, Operand(R4, LSL, 1));
912 912
913 // R9 = &digits[used >> 1], used is Smi. 913 // R10 = &digits[used >> 1], used is Smi.
914 __ add(R9, R3, Operand(R2, LSL, 1)); 914 __ add(R10, R3, Operand(R2, LSL, 1));
915 915
916 __ adds(R0, R0, Operand(0)); // carry flag = 0 916 __ adds(R0, R0, Operand(0)); // carry flag = 0
917 Label add_loop; 917 Label add_loop;
918 __ Bind(&add_loop); 918 __ Bind(&add_loop);
919 // Loop a_used times, a_used > 0. 919 // Loop a_used times, a_used > 0.
920 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 920 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
921 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 921 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
922 __ adcs(R0, R0, Operand(R1)); 922 __ adcs(R0, R0, Operand(R1));
923 __ teq(R3, Operand(R7)); // Does not affect carry flag. 923 __ teq(R3, Operand(R7)); // Does not affect carry flag.
924 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 924 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
925 __ b(&add_loop, NE); 925 __ b(&add_loop, NE);
926 926
927 Label last_carry; 927 Label last_carry;
928 __ teq(R3, Operand(R9)); // Does not affect carry flag. 928 __ teq(R3, Operand(R10)); // Does not affect carry flag.
929 __ b(&last_carry, EQ); // If used - a_used == 0. 929 __ b(&last_carry, EQ); // If used - a_used == 0.
930 930
931 Label carry_loop; 931 Label carry_loop;
932 __ Bind(&carry_loop); 932 __ Bind(&carry_loop);
933 // Loop used - a_used times, used - a_used > 0. 933 // Loop used - a_used times, used - a_used > 0.
934 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 934 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
935 __ adcs(R0, R0, Operand(0)); 935 __ adcs(R0, R0, Operand(0));
936 __ teq(R3, Operand(R9)); // Does not affect carry flag. 936 __ teq(R3, Operand(R10)); // Does not affect carry flag.
937 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 937 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
938 __ b(&carry_loop, NE); 938 __ b(&carry_loop, NE);
939 939
940 __ Bind(&last_carry); 940 __ Bind(&last_carry);
941 __ mov(R0, Operand(0)); 941 __ mov(R0, Operand(0));
942 __ adc(R0, R0, Operand(0)); 942 __ adc(R0, R0, Operand(0));
943 __ str(R0, Address(R6, 0)); 943 __ str(R0, Address(R6, 0));
944 944
945 // Returning Object::null() is not required, since this method is private. 945 // Returning Object::null() is not required, since this method is private.
946 __ Ret(); 946 __ Ret();
(...skipping 16 matching lines...) Expand all
963 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 963 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
964 964
965 // R6 = r_digits 965 // R6 = r_digits
966 __ ldr(R6, Address(SP, 0 * kWordSize)); 966 __ ldr(R6, Address(SP, 0 * kWordSize));
967 // R6 = &r_digits[0] 967 // R6 = &r_digits[0]
968 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 968 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
969 969
970 // R7 = &digits[a_used >> 1], a_used is Smi. 970 // R7 = &digits[a_used >> 1], a_used is Smi.
971 __ add(R7, R3, Operand(R4, LSL, 1)); 971 __ add(R7, R3, Operand(R4, LSL, 1));
972 972
973 // R9 = &digits[used >> 1], used is Smi. 973 // R10 = &digits[used >> 1], used is Smi.
974 __ add(R9, R3, Operand(R2, LSL, 1)); 974 __ add(R10, R3, Operand(R2, LSL, 1));
975 975
976 __ subs(R0, R0, Operand(0)); // carry flag = 1 976 __ subs(R0, R0, Operand(0)); // carry flag = 1
977 Label sub_loop; 977 Label sub_loop;
978 __ Bind(&sub_loop); 978 __ Bind(&sub_loop);
979 // Loop a_used times, a_used > 0. 979 // Loop a_used times, a_used > 0.
980 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 980 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
981 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 981 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
982 __ sbcs(R0, R0, Operand(R1)); 982 __ sbcs(R0, R0, Operand(R1));
983 __ teq(R3, Operand(R7)); // Does not affect carry flag. 983 __ teq(R3, Operand(R7)); // Does not affect carry flag.
984 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 984 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
985 __ b(&sub_loop, NE); 985 __ b(&sub_loop, NE);
986 986
987 Label done; 987 Label done;
988 __ teq(R3, Operand(R9)); // Does not affect carry flag. 988 __ teq(R3, Operand(R10)); // Does not affect carry flag.
989 __ b(&done, EQ); // If used - a_used == 0. 989 __ b(&done, EQ); // If used - a_used == 0.
990 990
991 Label carry_loop; 991 Label carry_loop;
992 __ Bind(&carry_loop); 992 __ Bind(&carry_loop);
993 // Loop used - a_used times, used - a_used > 0. 993 // Loop used - a_used times, used - a_used > 0.
994 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 994 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
995 __ sbcs(R0, R0, Operand(0)); 995 __ sbcs(R0, R0, Operand(0));
996 __ teq(R3, Operand(R9)); // Does not affect carry flag. 996 __ teq(R3, Operand(R10)); // Does not affect carry flag.
997 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 997 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
998 __ b(&carry_loop, NE); 998 __ b(&carry_loop, NE);
999 999
1000 __ Bind(&done); 1000 __ Bind(&done);
1001 // Returning Object::null() is not required, since this method is private. 1001 // Returning Object::null() is not required, since this method is private.
1002 __ Ret(); 1002 __ Ret();
1003 } 1003 }
1004 1004
1005 1005
1006 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 1006 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1155 1155
1156 // *ajp++ = low32(t) = R0 1156 // *ajp++ = low32(t) = R0
1157 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1157 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1158 1158
1159 // R6 = low32(c) = high32(t) 1159 // R6 = low32(c) = high32(t)
1160 // R7 = high32(c) = 0 1160 // R7 = high32(c) = 0
1161 __ mov(R7, Operand(0)); 1161 __ mov(R7, Operand(0));
1162 1162
1163 // int n = used - i - 1; while (--n >= 0) ... 1163 // int n = used - i - 1; while (--n >= 0) ...
1164 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1164 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1165 __ sub(R9, R0, Operand(R2)); 1165 __ sub(R10, R0, Operand(R2));
1166 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1166 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1167 __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize)); 1167 __ rsbs(R10, R0, Operand(R10, ASR, kSmiTagSize));
1168 1168
1169 Label loop, done; 1169 Label loop, done;
1170 __ b(&done, MI); 1170 __ b(&done, MI);
1171 1171
1172 __ Bind(&loop); 1172 __ Bind(&loop);
1173 // x: R3 1173 // x: R3
1174 // xip: R4 1174 // xip: R4
1175 // ajp: R5 1175 // ajp: R5
1176 // c: R7:R6 1176 // c: R7:R6
1177 // t: R2:R1:R0 (not live at loop entry) 1177 // t: R2:R1:R0 (not live at loop entry)
1178 // n: R9 1178 // n: R10
1179 1179
1180 // uint32_t xi = *xip++ 1180 // uint32_t xi = *xip++
1181 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1181 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1182 1182
1183 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c 1183 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c
1184 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1184 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1185 __ adds(R0, R0, Operand(R0)); 1185 __ adds(R0, R0, Operand(R0));
1186 __ adcs(R1, R1, Operand(R1)); 1186 __ adcs(R1, R1, Operand(R1));
1187 __ mov(R2, Operand(0)); 1187 __ mov(R2, Operand(0));
1188 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1188 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1189 __ adds(R0, R0, Operand(R6)); 1189 __ adds(R0, R0, Operand(R6));
1190 __ adcs(R1, R1, Operand(R7)); 1190 __ adcs(R1, R1, Operand(R7));
1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1192 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. 1192 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp.
1193 __ adds(R0, R0, Operand(R6)); 1193 __ adds(R0, R0, Operand(R6));
1194 __ adcs(R6, R1, Operand(0)); 1194 __ adcs(R6, R1, Operand(0));
1195 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. 1195 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj.
1196 1196
1197 // *ajp++ = low32(t) = R0 1197 // *ajp++ = low32(t) = R0
1198 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1198 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1199 1199
1200 // while (--n >= 0) 1200 // while (--n >= 0)
1201 __ subs(R9, R9, Operand(1)); // --n 1201 __ subs(R10, R10, Operand(1)); // --n
1202 __ b(&loop, PL); 1202 __ b(&loop, PL);
1203 1203
1204 __ Bind(&done); 1204 __ Bind(&done);
1205 // uint32_t aj = *ajp 1205 // uint32_t aj = *ajp
1206 __ ldr(R0, Address(R5, 0)); 1206 __ ldr(R0, Address(R5, 0));
1207 1207
1208 // uint64_t t = aj + c 1208 // uint64_t t = aj + c
1209 __ adds(R6, R6, Operand(R0)); 1209 __ adds(R6, R6, Operand(R0));
1210 __ adc(R7, R7, Operand(0)); 1210 __ adc(R7, R7, Operand(0));
1211 1211
(...skipping 794 matching lines...) Expand 10 before | Expand all | Expand 10 after
2006 __ LoadClassId(R1, R1); 2006 __ LoadClassId(R1, R1);
2007 __ AddImmediate(R1, R1, -kOneByteStringCid); 2007 __ AddImmediate(R1, R1, -kOneByteStringCid);
2008 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); 2008 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
2009 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); 2009 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid)));
2010 2010
2011 // Registers are now set up for the lazy compile stub. It expects the function 2011 // Registers are now set up for the lazy compile stub. It expects the function
2012 // in R0, the argument descriptor in R4, and IC-Data in R5. 2012 // in R0, the argument descriptor in R4, and IC-Data in R5.
2013 __ eor(R5, R5, Operand(R5)); 2013 __ eor(R5, R5, Operand(R5));
2014 2014
2015 // Tail-call the function. 2015 // Tail-call the function.
2016 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2016 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); 2017 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
2017 __ bx(R1); 2018 __ bx(R1);
2018 } 2019 }
2019 2020
2020 2021
2021 // On stack: user tag (+0). 2022 // On stack: user tag (+0).
2022 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 2023 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
2023 // R1: Isolate. 2024 // R1: Isolate.
2024 __ LoadIsolate(R1); 2025 __ LoadIsolate(R1);
2025 // R0: Current user tag. 2026 // R0: Current user tag.
(...skipping 19 matching lines...) Expand all
2045 2046
2046 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2047 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2047 __ LoadIsolate(R0); 2048 __ LoadIsolate(R0);
2048 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2049 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2049 __ Ret(); 2050 __ Ret();
2050 } 2051 }
2051 2052
2052 } // namespace dart 2053 } // namespace dart
2053 2054
2054 #endif // defined TARGET_ARCH_ARM 2055 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698