Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(429)

Side by Side Diff: runtime/vm/intrinsifier_x64.cc

Issue 10704192: Implement the remaining methods in intrinsifier_x64.cc (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intrinsifier_ia32.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 659 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 return false; 670 return false;
671 } 671 }
672 672
673 673
674 bool Intrinsifier::Integer_bitXor(Assembler* assembler) { 674 bool Intrinsifier::Integer_bitXor(Assembler* assembler) {
675 return Integer_bitXorFromInteger(assembler); 675 return Integer_bitXorFromInteger(assembler);
676 } 676 }
677 677
678 678
679 bool Intrinsifier::Integer_shl(Assembler* assembler) { 679 bool Intrinsifier::Integer_shl(Assembler* assembler) {
680 ASSERT(kSmiTagShift == 1);
681 ASSERT(kSmiTag == 0);
682 Label fall_through, overflow;
683 TestBothArgumentsSmis(assembler, &fall_through);
684 // Shift value is in RAX. Compare with tagged Smi.
685 __ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits)));
686 __ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump);
687
688 __ SmiUntag(RAX);
689 __ movq(RCX, RAX); // Shift amount must be in RCX.
690 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Value.
691
692 // Overflow test - all the shifted-out bits must be same as the sign bit.
693 __ movq(RDI, RAX);
694 __ shlq(RAX, RCX);
695 __ sarq(RAX, RCX);
696 __ cmpq(RAX, RDI);
697 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
698
699 __ shlq(RAX, RCX); // Shift for result now we know there is no overflow.
700
701 // RAX is a correctly tagged Smi.
702 __ ret();
703
704 __ Bind(&overflow);
705 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of
706 // 63 bits as represented by Smi).
707 __ Bind(&fall_through);
680 return false; 708 return false;
681 } 709 }
682 710
683 711
684 static bool CompareIntegers(Assembler* assembler, Condition true_condition) { 712 static bool CompareIntegers(Assembler* assembler, Condition true_condition) {
685 Label fall_through, true_label; 713 Label fall_through, true_label;
686 const Bool& bool_true = Bool::ZoneHandle(Bool::True()); 714 const Bool& bool_true = Bool::ZoneHandle(Bool::True());
687 const Bool& bool_false = Bool::ZoneHandle(Bool::False()); 715 const Bool& bool_false = Bool::ZoneHandle(Bool::False());
688 TestBothArgumentsSmis(assembler, &fall_through); 716 TestBothArgumentsSmis(assembler, &fall_through);
689 // RAX contains the right argument. 717 // RAX contains the right argument.
(...skipping 28 matching lines...) Expand all
718 bool Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { 746 bool Intrinsifier::Integer_lessEqualThan(Assembler* assembler) {
719 return CompareIntegers(assembler, LESS_EQUAL); 747 return CompareIntegers(assembler, LESS_EQUAL);
720 } 748 }
721 749
722 750
723 bool Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { 751 bool Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) {
724 return CompareIntegers(assembler, GREATER_EQUAL); 752 return CompareIntegers(assembler, GREATER_EQUAL);
725 } 753 }
726 754
727 755
756 // This is called for Smi, Mint and Bigint receivers. The right argument
757 // can be Smi, Mint, Bigint or double.
728 bool Intrinsifier::Integer_equalToInteger(Assembler* assembler) { 758 bool Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
759 Label fall_through, true_label, check_for_mint;
760 const Bool& bool_true = Bool::ZoneHandle(Bool::True());
761 const Bool& bool_false = Bool::ZoneHandle(Bool::False());
762 // For integer receiver '===' check first.
763 __ movq(RAX, Address(RSP, + 1 * kWordSize));
764 __ movq(RCX, Address(RSP, + 2 * kWordSize));
765 __ cmpq(RAX, RCX);
766 __ j(EQUAL, &true_label, Assembler::kNearJump);
767 __ orq(RAX, RCX);
768 __ testq(RAX, Immediate(kSmiTagMask));
769 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
770 // Both arguments are smi, '===' is good enough.
771 __ LoadObject(RAX, bool_false);
772 __ ret();
773 __ Bind(&true_label);
774 __ LoadObject(RAX, bool_true);
775 __ ret();
776
777 // At least one of the arguments was not Smi.
778 Label receiver_not_smi;
779 __ Bind(&check_for_mint);
780 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Receiver.
781 __ testq(RAX, Immediate(kSmiTagMask));
782 __ j(NOT_ZERO, &receiver_not_smi);
783
784 // Left (receiver) is Smi, return false if right is not Double.
785 // Note that an instance of Mint or Bigint never contains a value that can be
786 // represented by Smi.
787 __ movq(RAX, Address(RSP, + 1 * kWordSize));
788 __ CompareClassId(RAX, kDouble);
789 __ j(EQUAL, &fall_through);
790 __ LoadObject(RAX, bool_false);
791 __ ret();
792
793 __ Bind(&receiver_not_smi);
794 // RAX:: receiver.
795 __ CompareClassId(RAX, kMint);
796 __ j(NOT_EQUAL, &fall_through);
797 // Receiver is Mint, return false if right is Smi.
798 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Right argument.
799 __ testq(RAX, Immediate(kSmiTagMask));
800 __ j(NOT_ZERO, &fall_through);
801 __ LoadObject(RAX, bool_false); // Smi == Mint -> false.
802 __ ret();
803 // TODO(srdjan): Implement Mint == Mint comparison.
804
805 __ Bind(&fall_through);
729 return false; 806 return false;
730 } 807 }
731 808
732 809
733 bool Intrinsifier::Integer_equal(Assembler* assembler) { 810 bool Intrinsifier::Integer_equal(Assembler* assembler) {
734 return Integer_equalToInteger(assembler); 811 return Integer_equalToInteger(assembler);
735 } 812 }
736 813
737 814
738 bool Intrinsifier::Integer_sar(Assembler* assembler) { 815 bool Intrinsifier::Integer_sar(Assembler* assembler) {
816 Label fall_through, shift_count_ok;
817 TestBothArgumentsSmis(assembler, &fall_through);
818 Immediate count_limit = Immediate(0x3F);
819 // Check that the count is not larger than what the hardware can handle.
820 // For shifting right a Smi the result is the same for all numbers
821 // >= count_limit.
822 __ SmiUntag(RAX);
823 // Negative counts throw exception.
824 __ cmpq(RAX, Immediate(0));
825 __ j(LESS, &fall_through, Assembler::kNearJump);
826 __ cmpq(RAX, count_limit);
827 __ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump);
828 __ movq(RAX, count_limit);
829 __ Bind(&shift_count_ok);
830 __ movq(RCX, RAX); // Shift amount must be in RCX.
831 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Value.
832 __ SmiUntag(RAX); // Value.
833 __ sarq(RAX, RCX);
834 __ SmiTag(RAX);
835 __ ret();
836 __ Bind(&fall_through);
739 return false; 837 return false;
740 } 838 }
741 839
742 840
841 // Argument is Smi (receiver).
743 bool Intrinsifier::Smi_bitNegate(Assembler* assembler) { 842 bool Intrinsifier::Smi_bitNegate(Assembler* assembler) {
843 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Index.
844 __ notq(RAX);
845 __ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
846 __ ret();
847 return true;
848 }
849
850
851 // Check if the last argument is a double, jump to label 'is_smi' if smi
852 // (easy to convert to double), otherwise jump to label 'not_double_smi',
853 // Returns the last argument in RAX.
854 static void TestLastArgumentIsDouble(Assembler* assembler,
855 Label* is_smi,
856 Label* not_double_smi) {
857 __ movq(RAX, Address(RSP, + 1 * kWordSize));
858 __ testq(RAX, Immediate(kSmiTagMask));
859 __ j(ZERO, is_smi, Assembler::kNearJump); // Jump if Smi.
860 __ CompareClassId(RAX, kDouble);
861 __ j(NOT_EQUAL, not_double_smi, Assembler::kNearJump);
862 // Fall through if double.
863 }
864
865
866 // Both arguments on stack, left argument is a double, right argument is of
867 // unknown type. Return true or false object in RAX. Any NaN argument
868 // returns false. Any non-double argument causes control flow to fall through
869 // to the slow case (compiled method body).
870 static bool CompareDoubles(Assembler* assembler, Condition true_condition) {
871 const Bool& bool_true = Bool::ZoneHandle(Bool::True());
872 const Bool& bool_false = Bool::ZoneHandle(Bool::False());
873 Label fall_through, is_false, is_true, is_smi, double_op;
874 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
875 // Both arguments are double, right operand is in RAX.
876 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset()));
877 __ Bind(&double_op);
878 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left argument.
879 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
880 __ comisd(XMM0, XMM1);
881 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
882 __ j(true_condition, &is_true, Assembler::kNearJump);
883 // Fall through false.
884 __ Bind(&is_false);
885 __ LoadObject(RAX, bool_false);
886 __ ret();
887 __ Bind(&is_true);
888 __ LoadObject(RAX, bool_true);
889 __ ret();
890 __ Bind(&is_smi);
891 __ SmiUntag(RAX);
892 __ cvtsi2sd(XMM1, RAX);
893 __ jmp(&double_op);
894 __ Bind(&fall_through);
744 return false; 895 return false;
745 } 896 }
746 897
747 898
748 bool Intrinsifier::Double_greaterThan(Assembler* assembler) { 899 bool Intrinsifier::Double_greaterThan(Assembler* assembler) {
749 return false; 900 return CompareDoubles(assembler, ABOVE);
750 } 901 }
751 902
752 903
753 bool Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { 904 bool Intrinsifier::Double_greaterEqualThan(Assembler* assembler) {
754 return false; 905 return CompareDoubles(assembler, ABOVE_EQUAL);
755 } 906 }
756 907
757 908
758 bool Intrinsifier::Double_lessThan(Assembler* assembler) { 909 bool Intrinsifier::Double_lessThan(Assembler* assembler) {
759 return false; 910 return CompareDoubles(assembler, BELOW);
760 } 911 }
761 912
762 913
763 bool Intrinsifier::Double_equal(Assembler* assembler) { 914 bool Intrinsifier::Double_equal(Assembler* assembler) {
764 return false; 915 return CompareDoubles(assembler, EQUAL);
765 } 916 }
766 917
767 918
768 bool Intrinsifier::Double_lessEqualThan(Assembler* assembler) { 919 bool Intrinsifier::Double_lessEqualThan(Assembler* assembler) {
769 return false; 920 return CompareDoubles(assembler, BELOW_EQUAL);
770 } 921 }
771 922
772 923
773 bool Intrinsifier::Double_toDouble(Assembler* assembler) { 924 bool Intrinsifier::Double_toDouble(Assembler* assembler) {
774 __ movq(RAX, Address(RSP, + 1 * kWordSize)); 925 __ movq(RAX, Address(RSP, + 1 * kWordSize));
775 __ ret(); 926 __ ret();
776 // Generate enough code to satisfy patchability constraint. 927 // Generate enough code to satisfy patchability constraint.
777 intptr_t offset = __ CodeSize(); 928 intptr_t offset = __ CodeSize();
778 __ nop(JumpPattern::InstructionLength() - offset); 929 __ nop(JumpPattern::InstructionLength() - offset);
779 return true; 930 return true;
780 } 931 }
781 932
782 bool Intrinsifier::Double_add(Assembler* assembler) { 933
934 // Expects left argument to be double (receiver). Right argument is unknown.
935 // Both arguments are on stack.
936 static bool DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
937 Label fall_through;
938 TestLastArgumentIsDouble(assembler, &fall_through, &fall_through);
939 // Both arguments are double, right operand is in RAX.
940 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset()));
941 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left argument.
942 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
943 switch (kind) {
944 case Token::kADD: __ addsd(XMM0, XMM1); break;
945 case Token::kSUB: __ subsd(XMM0, XMM1); break;
946 case Token::kMUL: __ mulsd(XMM0, XMM1); break;
947 case Token::kDIV: __ divsd(XMM0, XMM1); break;
948 default: UNREACHABLE();
949 }
950 const Class& double_class = Class::Handle(
951 Isolate::Current()->object_store()->double_class());
952 AssemblerMacros::TryAllocate(assembler,
953 double_class,
954 &fall_through,
955 RAX); // Result register.
956 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
957 __ ret();
958 __ Bind(&fall_through);
783 return false; 959 return false;
784 } 960 }
785 961
786 962
963 bool Intrinsifier::Double_add(Assembler* assembler) {
964 return DoubleArithmeticOperations(assembler, Token::kADD);
965 }
966
967
787 bool Intrinsifier::Double_mul(Assembler* assembler) { 968 bool Intrinsifier::Double_mul(Assembler* assembler) {
969 return DoubleArithmeticOperations(assembler, Token::kMUL);
970 }
971
972
973 bool Intrinsifier::Double_sub(Assembler* assembler) {
974 return DoubleArithmeticOperations(assembler, Token::kSUB);
975 }
976
977
978 bool Intrinsifier::Double_div(Assembler* assembler) {
979 return DoubleArithmeticOperations(assembler, Token::kDIV);
980 }
981
982
983 bool Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
984 Label fall_through;
985 // Only Smi-s allowed.
986 __ movq(RAX, Address(RSP, + 1 * kWordSize));
987 __ testq(RAX, Immediate(kSmiTagMask));
988 __ j(NOT_ZERO, &fall_through, Assembler::kNearJump);
989 // Is Smi.
990 __ SmiUntag(RAX);
991 __ cvtsi2sd(XMM1, RAX);
992 __ movq(RAX, Address(RSP, + 2 * kWordSize));
993 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
994 __ mulsd(XMM0, XMM1);
995 const Class& double_class = Class::Handle(
996 Isolate::Current()->object_store()->double_class());
997 AssemblerMacros::TryAllocate(assembler,
998 double_class,
999 &fall_through,
1000 RAX); // Result register.
1001 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
1002 __ ret();
1003 __ Bind(&fall_through);
788 return false; 1004 return false;
789 } 1005 }
790 1006
791 1007
792 bool Intrinsifier::Double_sub(Assembler* assembler) { 1008 // Left is double right is integer (Bigint, Mint or Smi)
1009 bool Intrinsifier::Double_fromInteger(Assembler* assembler) {
1010 Label fall_through;
1011 __ movq(RAX, Address(RSP, +1 * kWordSize));
1012 __ testq(RAX, Immediate(kSmiTagMask));
1013 __ j(NOT_ZERO, &fall_through, Assembler::kNearJump);
1014 // Is Smi.
1015 __ SmiUntag(RAX);
1016 __ cvtsi2sd(XMM0, RAX);
1017 const Class& double_class = Class::Handle(
1018 Isolate::Current()->object_store()->double_class());
1019 AssemblerMacros::TryAllocate(assembler,
1020 double_class,
1021 &fall_through,
1022 RAX); // Result register.
1023 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
1024 __ ret();
1025 __ Bind(&fall_through);
793 return false; 1026 return false;
794 } 1027 }
795 1028
796
797 bool Intrinsifier::Double_div(Assembler* assembler) {
798 return false;
799 }
800
801
802 bool Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
803 return false;
804 }
805
806
807 bool Intrinsifier::Double_fromInteger(Assembler* assembler) {
808 return false;
809 }
810
811 1029
812 bool Intrinsifier::Double_isNaN(Assembler* assembler) { 1030 bool Intrinsifier::Double_isNaN(Assembler* assembler) {
813 const Bool& bool_true = Bool::ZoneHandle(Bool::True()); 1031 const Bool& bool_true = Bool::ZoneHandle(Bool::True());
814 const Bool& bool_false = Bool::ZoneHandle(Bool::False()); 1032 const Bool& bool_false = Bool::ZoneHandle(Bool::False());
815 Label is_true; 1033 Label is_true;
816 __ movq(RAX, Address(RSP, +1 * kWordSize)); 1034 __ movq(RAX, Address(RSP, +1 * kWordSize));
817 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); 1035 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
818 __ comisd(XMM0, XMM0); 1036 __ comisd(XMM0, XMM0);
819 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true; 1037 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
820 __ LoadObject(RAX, bool_false); 1038 __ LoadObject(RAX, bool_false);
(...skipping 25 matching lines...) Expand all
846 __ Bind(&is_zero); 1064 __ Bind(&is_zero);
847 // Check for negative zero (get the sign bit). 1065 // Check for negative zero (get the sign bit).
848 __ movmskpd(RAX, XMM0); 1066 __ movmskpd(RAX, XMM0);
849 __ testq(RAX, Immediate(1)); 1067 __ testq(RAX, Immediate(1));
850 __ j(NOT_ZERO, &is_true, Assembler::kNearJump); 1068 __ j(NOT_ZERO, &is_true, Assembler::kNearJump);
851 __ jmp(&is_false, Assembler::kNearJump); 1069 __ jmp(&is_false, Assembler::kNearJump);
852 return true; // Method is complete, no slow case. 1070 return true; // Method is complete, no slow case.
853 } 1071 }
854 1072
855 1073
856 // Check if the last argument is a double, jump to label 'is_smi' if smi
857 // (easy to convert to double), otherwise jump to label 'not_double_smi',
858 // Returns the last argument in RAX.
859 static void TestLastArgumentIsDouble(Assembler* assembler,
860 Label* is_smi,
861 Label* not_double_smi) {
862 __ movq(RAX, Address(RSP, + 1 * kWordSize));
863 __ testq(RAX, Immediate(kSmiTagMask));
864 __ j(ZERO, is_smi, Assembler::kNearJump); // Jump if Smi.
865 __ CompareClassId(RAX, kDouble);
866 __ j(NOT_EQUAL, not_double_smi, Assembler::kNearJump);
867 // Fall through if double.
868 }
869
870
871 enum TrigonometricFunctions { 1074 enum TrigonometricFunctions {
872 kSine, 1075 kSine,
873 kCosine, 1076 kCosine,
874 }; 1077 };
875 1078
1079
876 static void EmitTrigonometric(Assembler* assembler, 1080 static void EmitTrigonometric(Assembler* assembler,
877 TrigonometricFunctions kind) { 1081 TrigonometricFunctions kind) {
878 Label fall_through, is_smi, double_op; 1082 Label fall_through, is_smi, double_op;
879 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1083 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
880 // Argument is double and is in EAX. 1084 // Argument is double and is in EAX.
881 __ fldl(FieldAddress(RAX, Double::value_offset())); 1085 __ fldl(FieldAddress(RAX, Double::value_offset()));
882 __ Bind(&double_op); 1086 __ Bind(&double_op);
883 switch (kind) { 1087 switch (kind) {
884 case kSine: __ fsin(); break; 1088 case kSine: __ fsin(); break;
885 case kCosine: __ fcos(); break; 1089 case kCosine: __ fcos(); break;
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
1114 __ LoadObject(RAX, bool_true); 1318 __ LoadObject(RAX, bool_true);
1115 __ ret(); 1319 __ ret();
1116 return true; 1320 return true;
1117 } 1321 }
1118 1322
1119 #undef __ 1323 #undef __
1120 1324
1121 } // namespace dart 1325 } // namespace dart
1122 1326
1123 #endif // defined TARGET_ARCH_X64 1327 #endif // defined TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « runtime/vm/intrinsifier_ia32.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698