OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1062 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1073 BinaryOpIC::GetName(operands_type_)); | 1073 BinaryOpIC::GetName(operands_type_)); |
1074 } | 1074 } |
1075 | 1075 |
1076 | 1076 |
1077 void BinaryOpStub::GenerateSmiCode( | 1077 void BinaryOpStub::GenerateSmiCode( |
1078 MacroAssembler* masm, | 1078 MacroAssembler* masm, |
1079 Label* slow, | 1079 Label* slow, |
1080 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1080 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
1081 | 1081 |
1082 // Arguments to BinaryOpStub are in rdx and rax. | 1082 // Arguments to BinaryOpStub are in rdx and rax. |
1083 Register left = rdx; | 1083 const Register left = rdx; |
1084 Register right = rax; | 1084 const Register right = rax; |
1085 | 1085 |
1086 // We only generate heapnumber answers for overflowing calculations | 1086 // We only generate heapnumber answers for overflowing calculations |
1087 // for the four basic arithmetic operations and logical right shift by 0. | 1087 // for the four basic arithmetic operations and logical right shift by 0. |
1088 bool generate_inline_heapnumber_results = | 1088 bool generate_inline_heapnumber_results = |
1089 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && | 1089 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
1090 (op_ == Token::ADD || op_ == Token::SUB || | 1090 (op_ == Token::ADD || op_ == Token::SUB || |
1091 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); | 1091 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
1092 | 1092 |
1093 // Smi check of both operands. If op is BIT_OR, the check is delayed | 1093 // Smi check of both operands. If op is BIT_OR, the check is delayed |
1094 // until after the OR operation. | 1094 // until after the OR operation. |
(...skipping 21 matching lines...) Expand all Loading... |
1116 __ movq(rax, left); | 1116 __ movq(rax, left); |
1117 break; | 1117 break; |
1118 | 1118 |
1119 case Token::MUL: | 1119 case Token::MUL: |
1120 ASSERT(right.is(rax)); | 1120 ASSERT(right.is(rax)); |
1121 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. | 1121 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
1122 break; | 1122 break; |
1123 | 1123 |
1124 case Token::DIV: | 1124 case Token::DIV: |
1125 // SmiDiv will not accept left in rdx or right in rax. | 1125 // SmiDiv will not accept left in rdx or right in rax. |
1126 left = rcx; | |
1127 right = rbx; | |
1128 __ movq(rbx, rax); | 1126 __ movq(rbx, rax); |
1129 __ movq(rcx, rdx); | 1127 __ movq(rcx, rdx); |
1130 __ SmiDiv(rax, left, right, &use_fp_on_smis); | 1128 __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis); |
1131 break; | 1129 break; |
1132 | 1130 |
1133 case Token::MOD: | 1131 case Token::MOD: |
1134 // SmiMod will not accept left in rdx or right in rax. | 1132 // SmiMod will not accept left in rdx or right in rax. |
1135 left = rcx; | |
1136 right = rbx; | |
1137 __ movq(rbx, rax); | 1133 __ movq(rbx, rax); |
1138 __ movq(rcx, rdx); | 1134 __ movq(rcx, rdx); |
1139 __ SmiMod(rax, left, right, &use_fp_on_smis); | 1135 __ SmiMod(rax, rcx, rbx, &use_fp_on_smis); |
1140 break; | 1136 break; |
1141 | 1137 |
1142 case Token::BIT_OR: { | 1138 case Token::BIT_OR: { |
1143 ASSERT(right.is(rax)); | 1139 ASSERT(right.is(rax)); |
1144 __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative. | 1140 __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative. |
1145 break; | 1141 break; |
1146 } | 1142 } |
1147 case Token::BIT_XOR: | 1143 case Token::BIT_XOR: |
1148 ASSERT(right.is(rax)); | 1144 ASSERT(right.is(rax)); |
1149 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 1145 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
(...skipping 5336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6486 #endif | 6482 #endif |
6487 | 6483 |
6488 __ Ret(); | 6484 __ Ret(); |
6489 } | 6485 } |
6490 | 6486 |
6491 #undef __ | 6487 #undef __ |
6492 | 6488 |
6493 } } // namespace v8::internal | 6489 } } // namespace v8::internal |
6494 | 6490 |
6495 #endif // V8_TARGET_ARCH_X64 | 6491 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |