OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
130 // cp: Callee's context. | 130 // cp: Callee's context. |
131 // fp: Caller's frame pointer. | 131 // fp: Caller's frame pointer. |
132 // lr: Caller's pc. | 132 // lr: Caller's pc. |
133 | 133 |
134 // Strict mode functions and builtins need to replace the receiver | 134 // Strict mode functions and builtins need to replace the receiver |
135 // with undefined when called as functions (without an explicit | 135 // with undefined when called as functions (without an explicit |
136 // receiver object). r5 is zero for method calls and non-zero for | 136 // receiver object). r5 is zero for method calls and non-zero for |
137 // function calls. | 137 // function calls. |
138 if (!info_->is_classic_mode() || info_->is_native()) { | 138 if (!info_->is_classic_mode() || info_->is_native()) { |
139 Label ok; | 139 Label ok; |
140 __ cmp(r5, Operand(0)); | 140 __ cmp(r5, Operand::Zero()); |
141 __ b(eq, &ok); | 141 __ b(eq, &ok); |
142 int receiver_offset = scope()->num_parameters() * kPointerSize; | 142 int receiver_offset = scope()->num_parameters() * kPointerSize; |
143 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 143 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
144 __ str(r2, MemOperand(sp, receiver_offset)); | 144 __ str(r2, MemOperand(sp, receiver_offset)); |
145 __ bind(&ok); | 145 __ bind(&ok); |
146 } | 146 } |
147 } | 147 } |
148 | 148 |
149 info()->set_prologue_offset(masm_->pc_offset()); | 149 info()->set_prologue_offset(masm_->pc_offset()); |
150 if (NeedsEagerFrame()) { | 150 if (NeedsEagerFrame()) { |
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
517 return Operand(constant->Integer32Value()); | 517 return Operand(constant->Integer32Value()); |
518 } else if (r.IsDouble()) { | 518 } else if (r.IsDouble()) { |
519 Abort("ToOperand Unsupported double immediate."); | 519 Abort("ToOperand Unsupported double immediate."); |
520 } | 520 } |
521 ASSERT(r.IsTagged()); | 521 ASSERT(r.IsTagged()); |
522 return Operand(constant->handle()); | 522 return Operand(constant->handle()); |
523 } else if (op->IsRegister()) { | 523 } else if (op->IsRegister()) { |
524 return Operand(ToRegister(op)); | 524 return Operand(ToRegister(op)); |
525 } else if (op->IsDoubleRegister()) { | 525 } else if (op->IsDoubleRegister()) { |
526 Abort("ToOperand IsDoubleRegister unimplemented"); | 526 Abort("ToOperand IsDoubleRegister unimplemented"); |
527 return Operand(0); | 527 return Operand::Zero(); |
528 } | 528 } |
529 // Stack slots not implemented, use ToMemOperand instead. | 529 // Stack slots not implemented, use ToMemOperand instead. |
530 UNREACHABLE(); | 530 UNREACHABLE(); |
531 return Operand(0); | 531 return Operand::Zero(); |
532 } | 532 } |
533 | 533 |
534 | 534 |
535 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | 535 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
536 ASSERT(!op->IsRegister()); | 536 ASSERT(!op->IsRegister()); |
537 ASSERT(!op->IsDoubleRegister()); | 537 ASSERT(!op->IsDoubleRegister()); |
538 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 538 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
539 int index = op->index(); | 539 int index = op->index(); |
540 if (index >= 0) { | 540 if (index >= 0) { |
541 // Local or spill slot. Skip the frame pointer, function, and | 541 // Local or spill slot. Skip the frame pointer, function, and |
(...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1069 if (instr->hydrogen()->HasPowerOf2Divisor()) { | 1069 if (instr->hydrogen()->HasPowerOf2Divisor()) { |
1070 Register dividend = ToRegister(instr->left()); | 1070 Register dividend = ToRegister(instr->left()); |
1071 Register result = ToRegister(instr->result()); | 1071 Register result = ToRegister(instr->result()); |
1072 | 1072 |
1073 int32_t divisor = | 1073 int32_t divisor = |
1074 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); | 1074 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); |
1075 | 1075 |
1076 if (divisor < 0) divisor = -divisor; | 1076 if (divisor < 0) divisor = -divisor; |
1077 | 1077 |
1078 Label positive_dividend, done; | 1078 Label positive_dividend, done; |
1079 __ cmp(dividend, Operand(0)); | 1079 __ cmp(dividend, Operand::Zero()); |
1080 __ b(pl, &positive_dividend); | 1080 __ b(pl, &positive_dividend); |
1081 __ rsb(result, dividend, Operand(0)); | 1081 __ rsb(result, dividend, Operand::Zero()); |
1082 __ and_(result, result, Operand(divisor - 1), SetCC); | 1082 __ and_(result, result, Operand(divisor - 1), SetCC); |
1083 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1083 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1084 DeoptimizeIf(eq, instr->environment()); | 1084 DeoptimizeIf(eq, instr->environment()); |
1085 } | 1085 } |
1086 __ rsb(result, result, Operand(0)); | 1086 __ rsb(result, result, Operand::Zero()); |
1087 __ b(&done); | 1087 __ b(&done); |
1088 __ bind(&positive_dividend); | 1088 __ bind(&positive_dividend); |
1089 __ and_(result, dividend, Operand(divisor - 1)); | 1089 __ and_(result, dividend, Operand(divisor - 1)); |
1090 __ bind(&done); | 1090 __ bind(&done); |
1091 return; | 1091 return; |
1092 } | 1092 } |
1093 | 1093 |
1094 // These registers hold untagged 32 bit values. | 1094 // These registers hold untagged 32 bit values. |
1095 Register left = ToRegister(instr->left()); | 1095 Register left = ToRegister(instr->left()); |
1096 Register right = ToRegister(instr->right()); | 1096 Register right = ToRegister(instr->right()); |
1097 Register result = ToRegister(instr->result()); | 1097 Register result = ToRegister(instr->result()); |
1098 Label done; | 1098 Label done; |
1099 | 1099 |
1100 if (CpuFeatures::IsSupported(SUDIV)) { | 1100 if (CpuFeatures::IsSupported(SUDIV)) { |
1101 CpuFeatures::Scope scope(SUDIV); | 1101 CpuFeatures::Scope scope(SUDIV); |
1102 // Check for x % 0. | 1102 // Check for x % 0. |
1103 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 1103 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
1104 __ cmp(right, Operand(0)); | 1104 __ cmp(right, Operand::Zero()); |
1105 DeoptimizeIf(eq, instr->environment()); | 1105 DeoptimizeIf(eq, instr->environment()); |
1106 } | 1106 } |
1107 | 1107 |
1108 // Check for (kMinInt % -1). | 1108 // Check for (kMinInt % -1). |
1109 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1109 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1110 Label left_not_min_int; | 1110 Label left_not_min_int; |
1111 __ cmp(left, Operand(kMinInt)); | 1111 __ cmp(left, Operand(kMinInt)); |
1112 __ b(ne, &left_not_min_int); | 1112 __ b(ne, &left_not_min_int); |
1113 __ cmp(right, Operand(-1)); | 1113 __ cmp(right, Operand(-1)); |
1114 DeoptimizeIf(eq, instr->environment()); | 1114 DeoptimizeIf(eq, instr->environment()); |
1115 __ bind(&left_not_min_int); | 1115 __ bind(&left_not_min_int); |
1116 } | 1116 } |
1117 | 1117 |
1118 // For r3 = r1 % r2; we can have the following ARM code | 1118 // For r3 = r1 % r2; we can have the following ARM code |
1119 // sdiv r3, r1, r2 | 1119 // sdiv r3, r1, r2 |
1120 // mls r3, r3, r2, r1 | 1120 // mls r3, r3, r2, r1 |
1121 | 1121 |
1122 __ sdiv(result, left, right); | 1122 __ sdiv(result, left, right); |
1123 __ mls(result, result, right, left); | 1123 __ mls(result, result, right, left); |
1124 __ cmp(result, Operand(0)); | 1124 __ cmp(result, Operand::Zero()); |
1125 __ b(ne, &done); | 1125 __ b(ne, &done); |
1126 | 1126 |
1127 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1127 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1128 __ cmp(left, Operand(0)); | 1128 __ cmp(left, Operand::Zero()); |
1129 DeoptimizeIf(lt, instr->environment()); | 1129 DeoptimizeIf(lt, instr->environment()); |
1130 } | 1130 } |
1131 } else { | 1131 } else { |
1132 Register scratch = scratch0(); | 1132 Register scratch = scratch0(); |
1133 Register scratch2 = ToRegister(instr->temp()); | 1133 Register scratch2 = ToRegister(instr->temp()); |
1134 DwVfpRegister dividend = ToDoubleRegister(instr->temp2()); | 1134 DwVfpRegister dividend = ToDoubleRegister(instr->temp2()); |
1135 DwVfpRegister divisor = ToDoubleRegister(instr->temp3()); | 1135 DwVfpRegister divisor = ToDoubleRegister(instr->temp3()); |
1136 DwVfpRegister quotient = double_scratch0(); | 1136 DwVfpRegister quotient = double_scratch0(); |
1137 | 1137 |
1138 ASSERT(!dividend.is(divisor)); | 1138 ASSERT(!dividend.is(divisor)); |
1139 ASSERT(!dividend.is(quotient)); | 1139 ASSERT(!dividend.is(quotient)); |
1140 ASSERT(!divisor.is(quotient)); | 1140 ASSERT(!divisor.is(quotient)); |
1141 ASSERT(!scratch.is(left)); | 1141 ASSERT(!scratch.is(left)); |
1142 ASSERT(!scratch.is(right)); | 1142 ASSERT(!scratch.is(right)); |
1143 ASSERT(!scratch.is(result)); | 1143 ASSERT(!scratch.is(result)); |
1144 | 1144 |
1145 Label vfp_modulo, both_positive, right_negative; | 1145 Label vfp_modulo, both_positive, right_negative; |
1146 | 1146 |
1147 CpuFeatures::Scope scope(VFP2); | 1147 CpuFeatures::Scope scope(VFP2); |
1148 | 1148 |
1149 // Check for x % 0. | 1149 // Check for x % 0. |
1150 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 1150 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
1151 __ cmp(right, Operand(0)); | 1151 __ cmp(right, Operand::Zero()); |
1152 DeoptimizeIf(eq, instr->environment()); | 1152 DeoptimizeIf(eq, instr->environment()); |
1153 } | 1153 } |
1154 | 1154 |
1155 __ Move(result, left); | 1155 __ Move(result, left); |
1156 | 1156 |
1157 // (0 % x) must yield 0 (if x is finite, which is the case here). | 1157 // (0 % x) must yield 0 (if x is finite, which is the case here). |
1158 __ cmp(left, Operand(0)); | 1158 __ cmp(left, Operand::Zero()); |
1159 __ b(eq, &done); | 1159 __ b(eq, &done); |
1160 // Preload right in a vfp register. | 1160 // Preload right in a vfp register. |
1161 __ vmov(divisor.low(), right); | 1161 __ vmov(divisor.low(), right); |
1162 __ b(lt, &vfp_modulo); | 1162 __ b(lt, &vfp_modulo); |
1163 | 1163 |
1164 __ cmp(left, Operand(right)); | 1164 __ cmp(left, Operand(right)); |
1165 __ b(lt, &done); | 1165 __ b(lt, &done); |
1166 | 1166 |
1167 // Check for (positive) power of two on the right hand side. | 1167 // Check for (positive) power of two on the right hand side. |
1168 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, | 1168 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, |
1169 scratch, | 1169 scratch, |
1170 &right_negative, | 1170 &right_negative, |
1171 &both_positive); | 1171 &both_positive); |
1172 // Perform modulo operation (scratch contains right - 1). | 1172 // Perform modulo operation (scratch contains right - 1). |
1173 __ and_(result, scratch, Operand(left)); | 1173 __ and_(result, scratch, Operand(left)); |
1174 __ b(&done); | 1174 __ b(&done); |
1175 | 1175 |
1176 __ bind(&right_negative); | 1176 __ bind(&right_negative); |
1177 // Negate right. The sign of the divisor does not matter. | 1177 // Negate right. The sign of the divisor does not matter. |
1178 __ rsb(right, right, Operand(0)); | 1178 __ rsb(right, right, Operand::Zero()); |
1179 | 1179 |
1180 __ bind(&both_positive); | 1180 __ bind(&both_positive); |
1181 const int kUnfolds = 3; | 1181 const int kUnfolds = 3; |
1182 // If the right hand side is smaller than the (nonnegative) | 1182 // If the right hand side is smaller than the (nonnegative) |
1183 // left hand side, the left hand side is the result. | 1183 // left hand side, the left hand side is the result. |
1184 // Else try a few subtractions of the left hand side. | 1184 // Else try a few subtractions of the left hand side. |
1185 __ mov(scratch, left); | 1185 __ mov(scratch, left); |
1186 for (int i = 0; i < kUnfolds; i++) { | 1186 for (int i = 0; i < kUnfolds; i++) { |
1187 // Check if the left hand side is less or equal than the | 1187 // Check if the left hand side is less or equal than the |
1188 // the right hand side. | 1188 // the right hand side. |
(...skipping 30 matching lines...) Expand all Loading... |
1219 __ vcvt_s32_f64(double_scratch.low(), double_scratch); | 1219 __ vcvt_s32_f64(double_scratch.low(), double_scratch); |
1220 __ vmov(scratch, double_scratch.low()); | 1220 __ vmov(scratch, double_scratch.low()); |
1221 | 1221 |
1222 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1222 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1223 __ sub(result, left, scratch); | 1223 __ sub(result, left, scratch); |
1224 } else { | 1224 } else { |
1225 Label ok; | 1225 Label ok; |
1226 // Check for -0. | 1226 // Check for -0. |
1227 __ sub(scratch2, left, scratch, SetCC); | 1227 __ sub(scratch2, left, scratch, SetCC); |
1228 __ b(ne, &ok); | 1228 __ b(ne, &ok); |
1229 __ cmp(left, Operand(0)); | 1229 __ cmp(left, Operand::Zero()); |
1230 DeoptimizeIf(mi, instr->environment()); | 1230 DeoptimizeIf(mi, instr->environment()); |
1231 __ bind(&ok); | 1231 __ bind(&ok); |
1232 // Load the result and we are done. | 1232 // Load the result and we are done. |
1233 __ mov(result, scratch2); | 1233 __ mov(result, scratch2); |
1234 } | 1234 } |
1235 } | 1235 } |
1236 __ bind(&done); | 1236 __ bind(&done); |
1237 } | 1237 } |
1238 | 1238 |
1239 | 1239 |
(...skipping 14 matching lines...) Expand all Loading... |
1254 | 1254 |
1255 switch (divisor_abs) { | 1255 switch (divisor_abs) { |
1256 case 0: | 1256 case 0: |
1257 DeoptimizeIf(al, environment); | 1257 DeoptimizeIf(al, environment); |
1258 return; | 1258 return; |
1259 | 1259 |
1260 case 1: | 1260 case 1: |
1261 if (divisor > 0) { | 1261 if (divisor > 0) { |
1262 __ Move(result, dividend); | 1262 __ Move(result, dividend); |
1263 } else { | 1263 } else { |
1264 __ rsb(result, dividend, Operand(0), SetCC); | 1264 __ rsb(result, dividend, Operand::Zero(), SetCC); |
1265 DeoptimizeIf(vs, environment); | 1265 DeoptimizeIf(vs, environment); |
1266 } | 1266 } |
1267 // Compute the remainder. | 1267 // Compute the remainder. |
1268 __ mov(remainder, Operand(0)); | 1268 __ mov(remainder, Operand::Zero()); |
1269 return; | 1269 return; |
1270 | 1270 |
1271 default: | 1271 default: |
1272 if (IsPowerOf2(divisor_abs)) { | 1272 if (IsPowerOf2(divisor_abs)) { |
1273 // Branch and condition free code for integer division by a power | 1273 // Branch and condition free code for integer division by a power |
1274 // of two. | 1274 // of two. |
1275 int32_t power = WhichPowerOf2(divisor_abs); | 1275 int32_t power = WhichPowerOf2(divisor_abs); |
1276 if (power > 1) { | 1276 if (power > 1) { |
1277 __ mov(scratch, Operand(dividend, ASR, power - 1)); | 1277 __ mov(scratch, Operand(dividend, ASR, power - 1)); |
1278 } | 1278 } |
1279 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); | 1279 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); |
1280 __ mov(result, Operand(scratch, ASR, power)); | 1280 __ mov(result, Operand(scratch, ASR, power)); |
1281 // Negate if necessary. | 1281 // Negate if necessary. |
1282 // We don't need to check for overflow because the case '-1' is | 1282 // We don't need to check for overflow because the case '-1' is |
1283 // handled separately. | 1283 // handled separately. |
1284 if (divisor < 0) { | 1284 if (divisor < 0) { |
1285 ASSERT(divisor != -1); | 1285 ASSERT(divisor != -1); |
1286 __ rsb(result, result, Operand(0)); | 1286 __ rsb(result, result, Operand::Zero()); |
1287 } | 1287 } |
1288 // Compute the remainder. | 1288 // Compute the remainder. |
1289 if (divisor > 0) { | 1289 if (divisor > 0) { |
1290 __ sub(remainder, dividend, Operand(result, LSL, power)); | 1290 __ sub(remainder, dividend, Operand(result, LSL, power)); |
1291 } else { | 1291 } else { |
1292 __ add(remainder, dividend, Operand(result, LSL, power)); | 1292 __ add(remainder, dividend, Operand(result, LSL, power)); |
1293 } | 1293 } |
1294 return; | 1294 return; |
1295 } else { | 1295 } else { |
1296 // Use magic numbers for a few specific divisors. | 1296 // Use magic numbers for a few specific divisors. |
(...skipping 15 matching lines...) Expand all Loading... |
1312 | 1312 |
1313 __ mov(ip, Operand(M)); | 1313 __ mov(ip, Operand(M)); |
1314 __ smull(ip, scratch, dividend, ip); | 1314 __ smull(ip, scratch, dividend, ip); |
1315 if (M < 0) { | 1315 if (M < 0) { |
1316 __ add(scratch, scratch, Operand(dividend)); | 1316 __ add(scratch, scratch, Operand(dividend)); |
1317 } | 1317 } |
1318 if (s > 0) { | 1318 if (s > 0) { |
1319 __ mov(scratch, Operand(scratch, ASR, s)); | 1319 __ mov(scratch, Operand(scratch, ASR, s)); |
1320 } | 1320 } |
1321 __ add(result, scratch, Operand(dividend, LSR, 31)); | 1321 __ add(result, scratch, Operand(dividend, LSR, 31)); |
1322 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1322 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
1323 // Compute the remainder. | 1323 // Compute the remainder. |
1324 __ mov(ip, Operand(divisor)); | 1324 __ mov(ip, Operand(divisor)); |
1325 // This sequence could be replaced with 'mls' when | 1325 // This sequence could be replaced with 'mls' when |
1326 // it gets implemented. | 1326 // it gets implemented. |
1327 __ mul(scratch, result, ip); | 1327 __ mul(scratch, result, ip); |
1328 __ sub(remainder, dividend, scratch); | 1328 __ sub(remainder, dividend, scratch); |
1329 } | 1329 } |
1330 } | 1330 } |
1331 } | 1331 } |
1332 | 1332 |
(...skipping 14 matching lines...) Expand all Loading... |
1347 LDivI* instr_; | 1347 LDivI* instr_; |
1348 }; | 1348 }; |
1349 | 1349 |
1350 const Register left = ToRegister(instr->left()); | 1350 const Register left = ToRegister(instr->left()); |
1351 const Register right = ToRegister(instr->right()); | 1351 const Register right = ToRegister(instr->right()); |
1352 const Register scratch = scratch0(); | 1352 const Register scratch = scratch0(); |
1353 const Register result = ToRegister(instr->result()); | 1353 const Register result = ToRegister(instr->result()); |
1354 | 1354 |
1355 // Check for x / 0. | 1355 // Check for x / 0. |
1356 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 1356 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
1357 __ cmp(right, Operand(0)); | 1357 __ cmp(right, Operand::Zero()); |
1358 DeoptimizeIf(eq, instr->environment()); | 1358 DeoptimizeIf(eq, instr->environment()); |
1359 } | 1359 } |
1360 | 1360 |
1361 // Check for (0 / -x) that will produce negative zero. | 1361 // Check for (0 / -x) that will produce negative zero. |
1362 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1362 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1363 Label left_not_zero; | 1363 Label left_not_zero; |
1364 __ cmp(left, Operand(0)); | 1364 __ cmp(left, Operand::Zero()); |
1365 __ b(ne, &left_not_zero); | 1365 __ b(ne, &left_not_zero); |
1366 __ cmp(right, Operand(0)); | 1366 __ cmp(right, Operand::Zero()); |
1367 DeoptimizeIf(mi, instr->environment()); | 1367 DeoptimizeIf(mi, instr->environment()); |
1368 __ bind(&left_not_zero); | 1368 __ bind(&left_not_zero); |
1369 } | 1369 } |
1370 | 1370 |
1371 // Check for (kMinInt / -1). | 1371 // Check for (kMinInt / -1). |
1372 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1372 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1373 Label left_not_min_int; | 1373 Label left_not_min_int; |
1374 __ cmp(left, Operand(kMinInt)); | 1374 __ cmp(left, Operand(kMinInt)); |
1375 __ b(ne, &left_not_min_int); | 1375 __ b(ne, &left_not_min_int); |
1376 __ cmp(right, Operand(-1)); | 1376 __ cmp(right, Operand(-1)); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1434 const Register scratch = scratch0(); | 1434 const Register scratch = scratch0(); |
1435 | 1435 |
1436 if (!CpuFeatures::IsSupported(SUDIV)) { | 1436 if (!CpuFeatures::IsSupported(SUDIV)) { |
1437 // If the CPU doesn't support sdiv instruction, we only optimize when we | 1437 // If the CPU doesn't support sdiv instruction, we only optimize when we |
1438 // have magic numbers for the divisor. The standard integer division routine | 1438 // have magic numbers for the divisor. The standard integer division routine |
1439 // is usually slower than transitionning to VFP. | 1439 // is usually slower than transitionning to VFP. |
1440 ASSERT(instr->right()->IsConstantOperand()); | 1440 ASSERT(instr->right()->IsConstantOperand()); |
1441 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); | 1441 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); |
1442 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); | 1442 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); |
1443 if (divisor < 0) { | 1443 if (divisor < 0) { |
1444 __ cmp(left, Operand(0)); | 1444 __ cmp(left, Operand::Zero()); |
1445 DeoptimizeIf(eq, instr->environment()); | 1445 DeoptimizeIf(eq, instr->environment()); |
1446 } | 1446 } |
1447 EmitSignedIntegerDivisionByConstant(result, | 1447 EmitSignedIntegerDivisionByConstant(result, |
1448 left, | 1448 left, |
1449 divisor, | 1449 divisor, |
1450 remainder, | 1450 remainder, |
1451 scratch, | 1451 scratch, |
1452 instr->environment()); | 1452 instr->environment()); |
1453 // We performed a truncating division. Correct the result if necessary. | 1453 // We performed a truncating division. Correct the result if necessary. |
1454 __ cmp(remainder, Operand(0)); | 1454 __ cmp(remainder, Operand::Zero()); |
1455 __ teq(remainder, Operand(divisor), ne); | 1455 __ teq(remainder, Operand(divisor), ne); |
1456 __ sub(result, result, Operand(1), LeaveCC, mi); | 1456 __ sub(result, result, Operand(1), LeaveCC, mi); |
1457 } else { | 1457 } else { |
1458 CpuFeatures::Scope scope(SUDIV); | 1458 CpuFeatures::Scope scope(SUDIV); |
1459 const Register right = ToRegister(instr->right()); | 1459 const Register right = ToRegister(instr->right()); |
1460 | 1460 |
1461 // Check for x / 0. | 1461 // Check for x / 0. |
1462 __ cmp(right, Operand(0)); | 1462 __ cmp(right, Operand::Zero()); |
1463 DeoptimizeIf(eq, instr->environment()); | 1463 DeoptimizeIf(eq, instr->environment()); |
1464 | 1464 |
1465 // Check for (kMinInt / -1). | 1465 // Check for (kMinInt / -1). |
1466 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1466 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1467 Label left_not_min_int; | 1467 Label left_not_min_int; |
1468 __ cmp(left, Operand(kMinInt)); | 1468 __ cmp(left, Operand(kMinInt)); |
1469 __ b(ne, &left_not_min_int); | 1469 __ b(ne, &left_not_min_int); |
1470 __ cmp(right, Operand(-1)); | 1470 __ cmp(right, Operand(-1)); |
1471 DeoptimizeIf(eq, instr->environment()); | 1471 DeoptimizeIf(eq, instr->environment()); |
1472 __ bind(&left_not_min_int); | 1472 __ bind(&left_not_min_int); |
1473 } | 1473 } |
1474 | 1474 |
1475 // Check for (0 / -x) that will produce negative zero. | 1475 // Check for (0 / -x) that will produce negative zero. |
1476 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1476 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1477 __ cmp(right, Operand(0)); | 1477 __ cmp(right, Operand::Zero()); |
1478 __ cmp(left, Operand(0), mi); | 1478 __ cmp(left, Operand::Zero(), mi); |
1479 // "right" can't be null because the code would have already been | 1479 // "right" can't be null because the code would have already been |
1480 // deoptimized. The Z flag is set only if (right < 0) and (left == 0). | 1480 // deoptimized. The Z flag is set only if (right < 0) and (left == 0). |
1481 // In this case we need to deoptimize to produce a -0. | 1481 // In this case we need to deoptimize to produce a -0. |
1482 DeoptimizeIf(eq, instr->environment()); | 1482 DeoptimizeIf(eq, instr->environment()); |
1483 } | 1483 } |
1484 | 1484 |
1485 Label done; | 1485 Label done; |
1486 __ sdiv(result, left, right); | 1486 __ sdiv(result, left, right); |
1487 // If both operands have the same sign then we are done. | 1487 // If both operands have the same sign then we are done. |
1488 __ eor(remainder, left, Operand(right), SetCC); | 1488 __ eor(remainder, left, Operand(right), SetCC); |
1489 __ b(pl, &done); | 1489 __ b(pl, &done); |
1490 | 1490 |
1491 // Check if the result needs to be corrected. | 1491 // Check if the result needs to be corrected. |
1492 __ mls(remainder, result, right, left); | 1492 __ mls(remainder, result, right, left); |
1493 __ cmp(remainder, Operand(0)); | 1493 __ cmp(remainder, Operand::Zero()); |
1494 __ sub(result, result, Operand(1), LeaveCC, ne); | 1494 __ sub(result, result, Operand(1), LeaveCC, ne); |
1495 | 1495 |
1496 __ bind(&done); | 1496 __ bind(&done); |
1497 } | 1497 } |
1498 } | 1498 } |
1499 | 1499 |
1500 | 1500 |
1501 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, | 1501 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, |
1502 LOperand* left_argument, | 1502 LOperand* left_argument, |
1503 LOperand* right_argument, | 1503 LOperand* right_argument, |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1542 bool bailout_on_minus_zero = | 1542 bool bailout_on_minus_zero = |
1543 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1543 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
1544 | 1544 |
1545 if (right_op->IsConstantOperand() && !can_overflow) { | 1545 if (right_op->IsConstantOperand() && !can_overflow) { |
1546 // Use optimized code for specific constants. | 1546 // Use optimized code for specific constants. |
1547 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1547 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
1548 | 1548 |
1549 if (bailout_on_minus_zero && (constant < 0)) { | 1549 if (bailout_on_minus_zero && (constant < 0)) { |
1550 // The case of a null constant will be handled separately. | 1550 // The case of a null constant will be handled separately. |
1551 // If constant is negative and left is null, the result should be -0. | 1551 // If constant is negative and left is null, the result should be -0. |
1552 __ cmp(left, Operand(0)); | 1552 __ cmp(left, Operand::Zero()); |
1553 DeoptimizeIf(eq, instr->environment()); | 1553 DeoptimizeIf(eq, instr->environment()); |
1554 } | 1554 } |
1555 | 1555 |
1556 switch (constant) { | 1556 switch (constant) { |
1557 case -1: | 1557 case -1: |
1558 __ rsb(result, left, Operand(0)); | 1558 __ rsb(result, left, Operand::Zero()); |
1559 break; | 1559 break; |
1560 case 0: | 1560 case 0: |
1561 if (bailout_on_minus_zero) { | 1561 if (bailout_on_minus_zero) { |
1562 // If left is strictly negative and the constant is null, the | 1562 // If left is strictly negative and the constant is null, the |
1563 // result is -0. Deoptimize if required, otherwise return 0. | 1563 // result is -0. Deoptimize if required, otherwise return 0. |
1564 __ cmp(left, Operand(0)); | 1564 __ cmp(left, Operand::Zero()); |
1565 DeoptimizeIf(mi, instr->environment()); | 1565 DeoptimizeIf(mi, instr->environment()); |
1566 } | 1566 } |
1567 __ mov(result, Operand(0)); | 1567 __ mov(result, Operand::Zero()); |
1568 break; | 1568 break; |
1569 case 1: | 1569 case 1: |
1570 __ Move(result, left); | 1570 __ Move(result, left); |
1571 break; | 1571 break; |
1572 default: | 1572 default: |
1573 // Multiplying by powers of two and powers of two plus or minus | 1573 // Multiplying by powers of two and powers of two plus or minus |
1574 // one can be done faster with shifted operands. | 1574 // one can be done faster with shifted operands. |
1575 // For other constants we emit standard code. | 1575 // For other constants we emit standard code. |
1576 int32_t mask = constant >> 31; | 1576 int32_t mask = constant >> 31; |
1577 uint32_t constant_abs = (constant + mask) ^ mask; | 1577 uint32_t constant_abs = (constant + mask) ^ mask; |
1578 | 1578 |
1579 if (IsPowerOf2(constant_abs) || | 1579 if (IsPowerOf2(constant_abs) || |
1580 IsPowerOf2(constant_abs - 1) || | 1580 IsPowerOf2(constant_abs - 1) || |
1581 IsPowerOf2(constant_abs + 1)) { | 1581 IsPowerOf2(constant_abs + 1)) { |
1582 if (IsPowerOf2(constant_abs)) { | 1582 if (IsPowerOf2(constant_abs)) { |
1583 int32_t shift = WhichPowerOf2(constant_abs); | 1583 int32_t shift = WhichPowerOf2(constant_abs); |
1584 __ mov(result, Operand(left, LSL, shift)); | 1584 __ mov(result, Operand(left, LSL, shift)); |
1585 } else if (IsPowerOf2(constant_abs - 1)) { | 1585 } else if (IsPowerOf2(constant_abs - 1)) { |
1586 int32_t shift = WhichPowerOf2(constant_abs - 1); | 1586 int32_t shift = WhichPowerOf2(constant_abs - 1); |
1587 __ add(result, left, Operand(left, LSL, shift)); | 1587 __ add(result, left, Operand(left, LSL, shift)); |
1588 } else if (IsPowerOf2(constant_abs + 1)) { | 1588 } else if (IsPowerOf2(constant_abs + 1)) { |
1589 int32_t shift = WhichPowerOf2(constant_abs + 1); | 1589 int32_t shift = WhichPowerOf2(constant_abs + 1); |
1590 __ rsb(result, left, Operand(left, LSL, shift)); | 1590 __ rsb(result, left, Operand(left, LSL, shift)); |
1591 } | 1591 } |
1592 | 1592 |
1593 // Correct the sign of the result is the constant is negative. | 1593 // Correct the sign of the result is the constant is negative. |
1594 if (constant < 0) __ rsb(result, result, Operand(0)); | 1594 if (constant < 0) __ rsb(result, result, Operand::Zero()); |
1595 | 1595 |
1596 } else { | 1596 } else { |
1597 // Generate standard code. | 1597 // Generate standard code. |
1598 __ mov(ip, Operand(constant)); | 1598 __ mov(ip, Operand(constant)); |
1599 __ mul(result, left, ip); | 1599 __ mul(result, left, ip); |
1600 } | 1600 } |
1601 } | 1601 } |
1602 | 1602 |
1603 } else { | 1603 } else { |
1604 Register right = EmitLoadRegister(right_op, scratch); | 1604 Register right = EmitLoadRegister(right_op, scratch); |
1605 if (bailout_on_minus_zero) { | 1605 if (bailout_on_minus_zero) { |
1606 __ orr(ToRegister(instr->temp()), left, right); | 1606 __ orr(ToRegister(instr->temp()), left, right); |
1607 } | 1607 } |
1608 | 1608 |
1609 if (can_overflow) { | 1609 if (can_overflow) { |
1610 // scratch:result = left * right. | 1610 // scratch:result = left * right. |
1611 __ smull(result, scratch, left, right); | 1611 __ smull(result, scratch, left, right); |
1612 __ cmp(scratch, Operand(result, ASR, 31)); | 1612 __ cmp(scratch, Operand(result, ASR, 31)); |
1613 DeoptimizeIf(ne, instr->environment()); | 1613 DeoptimizeIf(ne, instr->environment()); |
1614 } else { | 1614 } else { |
1615 __ mul(result, left, right); | 1615 __ mul(result, left, right); |
1616 } | 1616 } |
1617 | 1617 |
1618 if (bailout_on_minus_zero) { | 1618 if (bailout_on_minus_zero) { |
1619 // Bail out if the result is supposed to be negative zero. | 1619 // Bail out if the result is supposed to be negative zero. |
1620 Label done; | 1620 Label done; |
1621 __ cmp(result, Operand(0)); | 1621 __ cmp(result, Operand::Zero()); |
1622 __ b(ne, &done); | 1622 __ b(ne, &done); |
1623 __ cmp(ToRegister(instr->temp()), Operand(0)); | 1623 __ cmp(ToRegister(instr->temp()), Operand::Zero()); |
1624 DeoptimizeIf(mi, instr->environment()); | 1624 DeoptimizeIf(mi, instr->environment()); |
1625 __ bind(&done); | 1625 __ bind(&done); |
1626 } | 1626 } |
1627 } | 1627 } |
1628 } | 1628 } |
1629 | 1629 |
1630 | 1630 |
1631 void LCodeGen::DoBitI(LBitI* instr) { | 1631 void LCodeGen::DoBitI(LBitI* instr) { |
1632 LOperand* left_op = instr->left(); | 1632 LOperand* left_op = instr->left(); |
1633 LOperand* right_op = instr->right(); | 1633 LOperand* right_op = instr->right(); |
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2093 } | 2093 } |
2094 | 2094 |
2095 | 2095 |
2096 void LCodeGen::DoBranch(LBranch* instr) { | 2096 void LCodeGen::DoBranch(LBranch* instr) { |
2097 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 2097 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
2098 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 2098 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
2099 | 2099 |
2100 Representation r = instr->hydrogen()->value()->representation(); | 2100 Representation r = instr->hydrogen()->value()->representation(); |
2101 if (r.IsInteger32()) { | 2101 if (r.IsInteger32()) { |
2102 Register reg = ToRegister(instr->value()); | 2102 Register reg = ToRegister(instr->value()); |
2103 __ cmp(reg, Operand(0)); | 2103 __ cmp(reg, Operand::Zero()); |
2104 EmitBranch(true_block, false_block, ne); | 2104 EmitBranch(true_block, false_block, ne); |
2105 } else if (r.IsDouble()) { | 2105 } else if (r.IsDouble()) { |
2106 CpuFeatures::Scope scope(VFP2); | 2106 CpuFeatures::Scope scope(VFP2); |
2107 DwVfpRegister reg = ToDoubleRegister(instr->value()); | 2107 DwVfpRegister reg = ToDoubleRegister(instr->value()); |
2108 Register scratch = scratch0(); | 2108 Register scratch = scratch0(); |
2109 | 2109 |
2110 // Test the double value. Zero and NaN are false. | 2110 // Test the double value. Zero and NaN are false. |
2111 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); | 2111 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); |
2112 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); | 2112 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); |
2113 EmitBranch(true_block, false_block, eq); | 2113 EmitBranch(true_block, false_block, eq); |
2114 } else { | 2114 } else { |
2115 ASSERT(r.IsTagged()); | 2115 ASSERT(r.IsTagged()); |
2116 Register reg = ToRegister(instr->value()); | 2116 Register reg = ToRegister(instr->value()); |
2117 HType type = instr->hydrogen()->value()->type(); | 2117 HType type = instr->hydrogen()->value()->type(); |
2118 if (type.IsBoolean()) { | 2118 if (type.IsBoolean()) { |
2119 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | 2119 __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
2120 EmitBranch(true_block, false_block, eq); | 2120 EmitBranch(true_block, false_block, eq); |
2121 } else if (type.IsSmi()) { | 2121 } else if (type.IsSmi()) { |
2122 __ cmp(reg, Operand(0)); | 2122 __ cmp(reg, Operand::Zero()); |
2123 EmitBranch(true_block, false_block, ne); | 2123 EmitBranch(true_block, false_block, ne); |
2124 } else { | 2124 } else { |
2125 Label* true_label = chunk_->GetAssemblyLabel(true_block); | 2125 Label* true_label = chunk_->GetAssemblyLabel(true_block); |
2126 Label* false_label = chunk_->GetAssemblyLabel(false_block); | 2126 Label* false_label = chunk_->GetAssemblyLabel(false_block); |
2127 | 2127 |
2128 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2128 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
2129 // Avoid deopts in the case where we've never executed this path before. | 2129 // Avoid deopts in the case where we've never executed this path before. |
2130 if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); | 2130 if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); |
2131 | 2131 |
2132 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 2132 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
2133 // undefined -> false. | 2133 // undefined -> false. |
2134 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); | 2134 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); |
2135 __ b(eq, false_label); | 2135 __ b(eq, false_label); |
2136 } | 2136 } |
2137 if (expected.Contains(ToBooleanStub::BOOLEAN)) { | 2137 if (expected.Contains(ToBooleanStub::BOOLEAN)) { |
2138 // Boolean -> its value. | 2138 // Boolean -> its value. |
2139 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | 2139 __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
2140 __ b(eq, true_label); | 2140 __ b(eq, true_label); |
2141 __ CompareRoot(reg, Heap::kFalseValueRootIndex); | 2141 __ CompareRoot(reg, Heap::kFalseValueRootIndex); |
2142 __ b(eq, false_label); | 2142 __ b(eq, false_label); |
2143 } | 2143 } |
2144 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | 2144 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { |
2145 // 'null' -> false. | 2145 // 'null' -> false. |
2146 __ CompareRoot(reg, Heap::kNullValueRootIndex); | 2146 __ CompareRoot(reg, Heap::kNullValueRootIndex); |
2147 __ b(eq, false_label); | 2147 __ b(eq, false_label); |
2148 } | 2148 } |
2149 | 2149 |
2150 if (expected.Contains(ToBooleanStub::SMI)) { | 2150 if (expected.Contains(ToBooleanStub::SMI)) { |
2151 // Smis: 0 -> false, all other -> true. | 2151 // Smis: 0 -> false, all other -> true. |
2152 __ cmp(reg, Operand(0)); | 2152 __ cmp(reg, Operand::Zero()); |
2153 __ b(eq, false_label); | 2153 __ b(eq, false_label); |
2154 __ JumpIfSmi(reg, true_label); | 2154 __ JumpIfSmi(reg, true_label); |
2155 } else if (expected.NeedsMap()) { | 2155 } else if (expected.NeedsMap()) { |
2156 // If we need a map later and have a Smi -> deopt. | 2156 // If we need a map later and have a Smi -> deopt. |
2157 __ tst(reg, Operand(kSmiTagMask)); | 2157 __ tst(reg, Operand(kSmiTagMask)); |
2158 DeoptimizeIf(eq, instr->environment()); | 2158 DeoptimizeIf(eq, instr->environment()); |
2159 } | 2159 } |
2160 | 2160 |
2161 const Register map = scratch0(); | 2161 const Register map = scratch0(); |
2162 if (expected.NeedsMap()) { | 2162 if (expected.NeedsMap()) { |
(...skipping 12 matching lines...) Expand all Loading... |
2175 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); | 2175 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); |
2176 __ b(ge, true_label); | 2176 __ b(ge, true_label); |
2177 } | 2177 } |
2178 | 2178 |
2179 if (expected.Contains(ToBooleanStub::STRING)) { | 2179 if (expected.Contains(ToBooleanStub::STRING)) { |
2180 // String value -> false iff empty. | 2180 // String value -> false iff empty. |
2181 Label not_string; | 2181 Label not_string; |
2182 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 2182 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); |
2183 __ b(ge, ¬_string); | 2183 __ b(ge, ¬_string); |
2184 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); | 2184 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); |
2185 __ cmp(ip, Operand(0)); | 2185 __ cmp(ip, Operand::Zero()); |
2186 __ b(ne, true_label); | 2186 __ b(ne, true_label); |
2187 __ b(false_label); | 2187 __ b(false_label); |
2188 __ bind(¬_string); | 2188 __ bind(¬_string); |
2189 } | 2189 } |
2190 | 2190 |
2191 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2191 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
2192 CpuFeatures::Scope scope(VFP2); | 2192 CpuFeatures::Scope scope(VFP2); |
2193 // heap number -> false iff +0, -0, or NaN. | 2193 // heap number -> false iff +0, -0, or NaN. |
2194 DwVfpRegister dbl_scratch = double_scratch0(); | 2194 DwVfpRegister dbl_scratch = double_scratch0(); |
2195 Label not_heap_number; | 2195 Label not_heap_number; |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2470 } | 2470 } |
2471 | 2471 |
2472 | 2472 |
2473 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | 2473 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
2474 Token::Value op = instr->op(); | 2474 Token::Value op = instr->op(); |
2475 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 2475 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
2476 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 2476 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
2477 | 2477 |
2478 Handle<Code> ic = CompareIC::GetUninitialized(op); | 2478 Handle<Code> ic = CompareIC::GetUninitialized(op); |
2479 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2479 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2480 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. | 2480 // This instruction also signals no smi code inlined. |
| 2481 __ cmp(r0, Operand::Zero()); |
2481 | 2482 |
2482 Condition condition = ComputeCompareCondition(op); | 2483 Condition condition = ComputeCompareCondition(op); |
2483 | 2484 |
2484 EmitBranch(true_block, false_block, condition); | 2485 EmitBranch(true_block, false_block, condition); |
2485 } | 2486 } |
2486 | 2487 |
2487 | 2488 |
2488 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | 2489 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
2489 InstanceType from = instr->from(); | 2490 InstanceType from = instr->from(); |
2490 InstanceType to = instr->to(); | 2491 InstanceType to = instr->to(); |
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2644 } | 2645 } |
2645 | 2646 |
2646 | 2647 |
2647 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | 2648 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
2648 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. | 2649 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. |
2649 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. | 2650 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. |
2650 | 2651 |
2651 InstanceofStub stub(InstanceofStub::kArgsInRegisters); | 2652 InstanceofStub stub(InstanceofStub::kArgsInRegisters); |
2652 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
2653 | 2654 |
2654 __ cmp(r0, Operand(0)); | 2655 __ cmp(r0, Operand::Zero()); |
2655 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); | 2656 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); |
2656 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); | 2657 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); |
2657 } | 2658 } |
2658 | 2659 |
2659 | 2660 |
2660 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2661 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
2661 class DeferredInstanceOfKnownGlobal: public LDeferredCode { | 2662 class DeferredInstanceOfKnownGlobal: public LDeferredCode { |
2662 public: | 2663 public: |
2663 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2664 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
2664 LInstanceOfKnownGlobal* instr) | 2665 LInstanceOfKnownGlobal* instr) |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2788 // restore all registers. | 2789 // restore all registers. |
2789 __ StoreToSafepointRegisterSlot(result, result); | 2790 __ StoreToSafepointRegisterSlot(result, result); |
2790 } | 2791 } |
2791 | 2792 |
2792 | 2793 |
2793 void LCodeGen::DoCmpT(LCmpT* instr) { | 2794 void LCodeGen::DoCmpT(LCmpT* instr) { |
2794 Token::Value op = instr->op(); | 2795 Token::Value op = instr->op(); |
2795 | 2796 |
2796 Handle<Code> ic = CompareIC::GetUninitialized(op); | 2797 Handle<Code> ic = CompareIC::GetUninitialized(op); |
2797 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2798 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2798 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. | 2799 // This instruction also signals no smi code inlined. |
| 2800 __ cmp(r0, Operand::Zero()); |
2799 | 2801 |
2800 Condition condition = ComputeCompareCondition(op); | 2802 Condition condition = ComputeCompareCondition(op); |
2801 __ LoadRoot(ToRegister(instr->result()), | 2803 __ LoadRoot(ToRegister(instr->result()), |
2802 Heap::kTrueValueRootIndex, | 2804 Heap::kTrueValueRootIndex, |
2803 condition); | 2805 condition); |
2804 __ LoadRoot(ToRegister(instr->result()), | 2806 __ LoadRoot(ToRegister(instr->result()), |
2805 Heap::kFalseValueRootIndex, | 2807 Heap::kFalseValueRootIndex, |
2806 NegateCondition(condition)); | 2808 NegateCondition(condition)); |
2807 } | 2809 } |
2808 | 2810 |
(...skipping 719 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3528 // number of arguments. | 3530 // number of arguments. |
3529 __ push(receiver); | 3531 __ push(receiver); |
3530 __ mov(receiver, length); | 3532 __ mov(receiver, length); |
3531 // The arguments are at a one pointer size offset from elements. | 3533 // The arguments are at a one pointer size offset from elements. |
3532 __ add(elements, elements, Operand(1 * kPointerSize)); | 3534 __ add(elements, elements, Operand(1 * kPointerSize)); |
3533 | 3535 |
3534 // Loop through the arguments pushing them onto the execution | 3536 // Loop through the arguments pushing them onto the execution |
3535 // stack. | 3537 // stack. |
3536 Label invoke, loop; | 3538 Label invoke, loop; |
3537 // length is a small non-negative integer, due to the test above. | 3539 // length is a small non-negative integer, due to the test above. |
3538 __ cmp(length, Operand(0)); | 3540 __ cmp(length, Operand::Zero()); |
3539 __ b(eq, &invoke); | 3541 __ b(eq, &invoke); |
3540 __ bind(&loop); | 3542 __ bind(&loop); |
3541 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); | 3543 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); |
3542 __ push(scratch); | 3544 __ push(scratch); |
3543 __ sub(length, length, Operand(1), SetCC); | 3545 __ sub(length, length, Operand(1), SetCC); |
3544 __ b(ne, &loop); | 3546 __ b(ne, &loop); |
3545 | 3547 |
3546 __ bind(&invoke); | 3548 __ bind(&invoke); |
3547 ASSERT(instr->HasPointerMap()); | 3549 ASSERT(instr->HasPointerMap()); |
3548 LPointerMap* pointers = instr->pointer_map(); | 3550 LPointerMap* pointers = instr->pointer_map(); |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3732 __ StoreToSafepointRegisterSlot(tmp1, result); | 3734 __ StoreToSafepointRegisterSlot(tmp1, result); |
3733 } | 3735 } |
3734 | 3736 |
3735 __ bind(&done); | 3737 __ bind(&done); |
3736 } | 3738 } |
3737 | 3739 |
3738 | 3740 |
3739 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { | 3741 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { |
3740 Register input = ToRegister(instr->value()); | 3742 Register input = ToRegister(instr->value()); |
3741 Register result = ToRegister(instr->result()); | 3743 Register result = ToRegister(instr->result()); |
3742 __ cmp(input, Operand(0)); | 3744 __ cmp(input, Operand::Zero()); |
3743 __ Move(result, input, pl); | 3745 __ Move(result, input, pl); |
3744 // We can make rsb conditional because the previous cmp instruction | 3746 // We can make rsb conditional because the previous cmp instruction |
3745 // will clear the V (overflow) flag and rsb won't set this flag | 3747 // will clear the V (overflow) flag and rsb won't set this flag |
3746 // if input is positive. | 3748 // if input is positive. |
3747 __ rsb(result, input, Operand(0), SetCC, mi); | 3749 __ rsb(result, input, Operand::Zero(), SetCC, mi); |
3748 // Deoptimize on overflow. | 3750 // Deoptimize on overflow. |
3749 DeoptimizeIf(vs, instr->environment()); | 3751 DeoptimizeIf(vs, instr->environment()); |
3750 } | 3752 } |
3751 | 3753 |
3752 | 3754 |
3753 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 3755 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
3754 CpuFeatures::Scope scope(VFP2); | 3756 CpuFeatures::Scope scope(VFP2); |
3755 // Class for deferred case. | 3757 // Class for deferred case. |
3756 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 3758 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { |
3757 public: | 3759 public: |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3796 __ EmitVFPTruncate(kRoundToMinusInf, | 3798 __ EmitVFPTruncate(kRoundToMinusInf, |
3797 result, | 3799 result, |
3798 input, | 3800 input, |
3799 scratch, | 3801 scratch, |
3800 double_scratch0()); | 3802 double_scratch0()); |
3801 DeoptimizeIf(ne, instr->environment()); | 3803 DeoptimizeIf(ne, instr->environment()); |
3802 | 3804 |
3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3805 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3804 // Test for -0. | 3806 // Test for -0. |
3805 Label done; | 3807 Label done; |
3806 __ cmp(result, Operand(0)); | 3808 __ cmp(result, Operand::Zero()); |
3807 __ b(ne, &done); | 3809 __ b(ne, &done); |
3808 __ vmov(scratch, input.high()); | 3810 __ vmov(scratch, input.high()); |
3809 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3811 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
3810 DeoptimizeIf(ne, instr->environment()); | 3812 DeoptimizeIf(ne, instr->environment()); |
3811 __ bind(&done); | 3813 __ bind(&done); |
3812 } | 3814 } |
3813 } | 3815 } |
3814 | 3816 |
3815 | 3817 |
3816 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3818 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
3817 CpuFeatures::Scope scope(VFP2); | 3819 CpuFeatures::Scope scope(VFP2); |
3818 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3820 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3819 Register result = ToRegister(instr->result()); | 3821 Register result = ToRegister(instr->result()); |
3820 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3822 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
3821 Register scratch = scratch0(); | 3823 Register scratch = scratch0(); |
3822 Label done, check_sign_on_zero; | 3824 Label done, check_sign_on_zero; |
3823 | 3825 |
3824 // Extract exponent bits. | 3826 // Extract exponent bits. |
3825 __ vmov(result, input.high()); | 3827 __ vmov(result, input.high()); |
3826 __ ubfx(scratch, | 3828 __ ubfx(scratch, |
3827 result, | 3829 result, |
3828 HeapNumber::kExponentShift, | 3830 HeapNumber::kExponentShift, |
3829 HeapNumber::kExponentBits); | 3831 HeapNumber::kExponentBits); |
3830 | 3832 |
3831 // If the number is in ]-0.5, +0.5[, the result is +/- 0. | 3833 // If the number is in ]-0.5, +0.5[, the result is +/- 0. |
3832 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2)); | 3834 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2)); |
3833 __ mov(result, Operand(0), LeaveCC, le); | 3835 __ mov(result, Operand::Zero(), LeaveCC, le); |
3834 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3836 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3835 __ b(le, &check_sign_on_zero); | 3837 __ b(le, &check_sign_on_zero); |
3836 } else { | 3838 } else { |
3837 __ b(le, &done); | 3839 __ b(le, &done); |
3838 } | 3840 } |
3839 | 3841 |
3840 // The following conversion will not work with numbers | 3842 // The following conversion will not work with numbers |
3841 // outside of ]-2^32, 2^32[. | 3843 // outside of ]-2^32, 2^32[. |
3842 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32)); | 3844 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32)); |
3843 DeoptimizeIf(ge, instr->environment()); | 3845 DeoptimizeIf(ge, instr->environment()); |
3844 | 3846 |
3845 __ Vmov(double_scratch0(), 0.5, scratch); | 3847 __ Vmov(double_scratch0(), 0.5, scratch); |
3846 __ vadd(double_scratch0(), input, double_scratch0()); | 3848 __ vadd(double_scratch0(), input, double_scratch0()); |
3847 | 3849 |
3848 // Save the original sign for later comparison. | 3850 // Save the original sign for later comparison. |
3849 __ and_(scratch, result, Operand(HeapNumber::kSignMask)); | 3851 __ and_(scratch, result, Operand(HeapNumber::kSignMask)); |
3850 | 3852 |
3851 // Check sign of the result: if the sign changed, the input | 3853 // Check sign of the result: if the sign changed, the input |
3852 // value was in ]0.5, 0[ and the result should be -0. | 3854 // value was in ]0.5, 0[ and the result should be -0. |
3853 __ vmov(result, double_scratch0().high()); | 3855 __ vmov(result, double_scratch0().high()); |
3854 __ eor(result, result, Operand(scratch), SetCC); | 3856 __ eor(result, result, Operand(scratch), SetCC); |
3855 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3857 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3856 DeoptimizeIf(mi, instr->environment()); | 3858 DeoptimizeIf(mi, instr->environment()); |
3857 } else { | 3859 } else { |
3858 __ mov(result, Operand(0), LeaveCC, mi); | 3860 __ mov(result, Operand::Zero(), LeaveCC, mi); |
3859 __ b(mi, &done); | 3861 __ b(mi, &done); |
3860 } | 3862 } |
3861 | 3863 |
3862 __ EmitVFPTruncate(kRoundToMinusInf, | 3864 __ EmitVFPTruncate(kRoundToMinusInf, |
3863 result, | 3865 result, |
3864 double_scratch0(), | 3866 double_scratch0(), |
3865 scratch, | 3867 scratch, |
3866 double_scratch1); | 3868 double_scratch1); |
3867 DeoptimizeIf(ne, instr->environment()); | 3869 DeoptimizeIf(ne, instr->environment()); |
3868 | 3870 |
3869 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3871 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3870 // Test for -0. | 3872 // Test for -0. |
3871 __ cmp(result, Operand(0)); | 3873 __ cmp(result, Operand::Zero()); |
3872 __ b(ne, &done); | 3874 __ b(ne, &done); |
3873 __ bind(&check_sign_on_zero); | 3875 __ bind(&check_sign_on_zero); |
3874 __ vmov(scratch, input.high()); | 3876 __ vmov(scratch, input.high()); |
3875 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3877 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
3876 DeoptimizeIf(ne, instr->environment()); | 3878 DeoptimizeIf(ne, instr->environment()); |
3877 } | 3879 } |
3878 __ bind(&done); | 3880 __ bind(&done); |
3879 } | 3881 } |
3880 | 3882 |
3881 | 3883 |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3965 STATIC_ASSERT(kPointerSize == kSeedSize); | 3967 STATIC_ASSERT(kPointerSize == kSeedSize); |
3966 | 3968 |
3967 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); | 3969 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); |
3968 static const int kRandomSeedOffset = | 3970 static const int kRandomSeedOffset = |
3969 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; | 3971 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; |
3970 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset)); | 3972 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset)); |
3971 // r2: FixedArray of the native context's random seeds | 3973 // r2: FixedArray of the native context's random seeds |
3972 | 3974 |
3973 // Load state[0]. | 3975 // Load state[0]. |
3974 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); | 3976 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); |
3975 __ cmp(r1, Operand(0)); | 3977 __ cmp(r1, Operand::Zero()); |
3976 __ b(eq, deferred->entry()); | 3978 __ b(eq, deferred->entry()); |
3977 // Load state[1]. | 3979 // Load state[1]. |
3978 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); | 3980 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); |
3979 // r1: state[0]. | 3981 // r1: state[0]. |
3980 // r0: state[1]. | 3982 // r0: state[1]. |
3981 | 3983 |
3982 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) | 3984 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) |
3983 __ and_(r3, r1, Operand(0xFFFF)); | 3985 __ and_(r3, r1, Operand(0xFFFF)); |
3984 __ mov(r4, Operand(18273)); | 3986 __ mov(r4, Operand(18273)); |
3985 __ mul(r3, r3, r4); | 3987 __ mul(r3, r3, r4); |
(...skipping 14 matching lines...) Expand all Loading... |
4000 __ add(r0, r0, Operand(r1, LSL, 14)); | 4002 __ add(r0, r0, Operand(r1, LSL, 14)); |
4001 | 4003 |
4002 __ bind(deferred->exit()); | 4004 __ bind(deferred->exit()); |
4003 // 0x41300000 is the top half of 1.0 x 2^20 as a double. | 4005 // 0x41300000 is the top half of 1.0 x 2^20 as a double. |
4004 // Create this constant using mov/orr to avoid PC relative load. | 4006 // Create this constant using mov/orr to avoid PC relative load. |
4005 __ mov(r1, Operand(0x41000000)); | 4007 __ mov(r1, Operand(0x41000000)); |
4006 __ orr(r1, r1, Operand(0x300000)); | 4008 __ orr(r1, r1, Operand(0x300000)); |
4007 // Move 0x41300000xxxxxxxx (x = random bits) to VFP. | 4009 // Move 0x41300000xxxxxxxx (x = random bits) to VFP. |
4008 __ vmov(d7, r0, r1); | 4010 __ vmov(d7, r0, r1); |
4009 // Move 0x4130000000000000 to VFP. | 4011 // Move 0x4130000000000000 to VFP. |
4010 __ mov(r0, Operand(0, RelocInfo::NONE32)); | 4012 __ mov(r0, Operand::Zero()); |
4011 __ vmov(d8, r0, r1); | 4013 __ vmov(d8, r0, r1); |
4012 // Subtract and store the result in the heap number. | 4014 // Subtract and store the result in the heap number. |
4013 __ vsub(d7, d7, d8); | 4015 __ vsub(d7, d7, d8); |
4014 } | 4016 } |
4015 | 4017 |
4016 | 4018 |
4017 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 4019 void LCodeGen::DoDeferredRandom(LRandom* instr) { |
4018 __ PrepareCallCFunction(1, scratch0()); | 4020 __ PrepareCallCFunction(1, scratch0()); |
4019 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 4021 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); |
4020 // Return value is in r0. | 4022 // Return value is in r0. |
(...skipping 554 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4575 | 4577 |
4576 | 4578 |
4577 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { | 4579 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
4578 Register string = ToRegister(instr->string()); | 4580 Register string = ToRegister(instr->string()); |
4579 Register result = ToRegister(instr->result()); | 4581 Register result = ToRegister(instr->result()); |
4580 Register scratch = scratch0(); | 4582 Register scratch = scratch0(); |
4581 | 4583 |
4582 // TODO(3095996): Get rid of this. For now, we need to make the | 4584 // TODO(3095996): Get rid of this. For now, we need to make the |
4583 // result register contain a valid pointer because it is already | 4585 // result register contain a valid pointer because it is already |
4584 // contained in the register pointer map. | 4586 // contained in the register pointer map. |
4585 __ mov(result, Operand(0)); | 4587 __ mov(result, Operand::Zero()); |
4586 | 4588 |
4587 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4589 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
4588 __ push(string); | 4590 __ push(string); |
4589 // Push the index as a smi. This is safe because of the checks in | 4591 // Push the index as a smi. This is safe because of the checks in |
4590 // DoStringCharCodeAt above. | 4592 // DoStringCharCodeAt above. |
4591 if (instr->index()->IsConstantOperand()) { | 4593 if (instr->index()->IsConstantOperand()) { |
4592 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | 4594 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
4593 __ mov(scratch, Operand(Smi::FromInt(const_index))); | 4595 __ mov(scratch, Operand(Smi::FromInt(const_index))); |
4594 __ push(scratch); | 4596 __ push(scratch); |
4595 } else { | 4597 } else { |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4635 } | 4637 } |
4636 | 4638 |
4637 | 4639 |
4638 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { | 4640 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { |
4639 Register char_code = ToRegister(instr->char_code()); | 4641 Register char_code = ToRegister(instr->char_code()); |
4640 Register result = ToRegister(instr->result()); | 4642 Register result = ToRegister(instr->result()); |
4641 | 4643 |
4642 // TODO(3095996): Get rid of this. For now, we need to make the | 4644 // TODO(3095996): Get rid of this. For now, we need to make the |
4643 // result register contain a valid pointer because it is already | 4645 // result register contain a valid pointer because it is already |
4644 // contained in the register pointer map. | 4646 // contained in the register pointer map. |
4645 __ mov(result, Operand(0)); | 4647 __ mov(result, Operand::Zero()); |
4646 | 4648 |
4647 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4649 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
4648 __ SmiTag(char_code); | 4650 __ SmiTag(char_code); |
4649 __ push(char_code); | 4651 __ push(char_code); |
4650 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); | 4652 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); |
4651 __ StoreToSafepointRegisterSlot(r0, result); | 4653 __ StoreToSafepointRegisterSlot(r0, result); |
4652 } | 4654 } |
4653 | 4655 |
4654 | 4656 |
4655 void LCodeGen::DoStringLength(LStringLength* instr) { | 4657 void LCodeGen::DoStringLength(LStringLength* instr) { |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4756 const int mantissa_shift_for_hi_word = | 4758 const int mantissa_shift_for_hi_word = |
4757 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; | 4759 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |
4758 const int mantissa_shift_for_lo_word = | 4760 const int mantissa_shift_for_lo_word = |
4759 kBitsPerInt - mantissa_shift_for_hi_word; | 4761 kBitsPerInt - mantissa_shift_for_hi_word; |
4760 masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); | 4762 masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
4761 if (mantissa_shift_for_hi_word > 0) { | 4763 if (mantissa_shift_for_hi_word > 0) { |
4762 masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); | 4764 masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); |
4763 masm->orr(hiword, scratch, | 4765 masm->orr(hiword, scratch, |
4764 Operand(hiword, LSR, mantissa_shift_for_hi_word)); | 4766 Operand(hiword, LSR, mantissa_shift_for_hi_word)); |
4765 } else { | 4767 } else { |
4766 masm->mov(loword, Operand(0, RelocInfo::NONE32)); | 4768 masm->mov(loword, Operand::Zero()); |
4767 masm->orr(hiword, scratch, | 4769 masm->orr(hiword, scratch, |
4768 Operand(hiword, LSL, -mantissa_shift_for_hi_word)); | 4770 Operand(hiword, LSL, -mantissa_shift_for_hi_word)); |
4769 } | 4771 } |
4770 | 4772 |
4771 // If least significant bit of biased exponent was not 1 it was corrupted | 4773 // If least significant bit of biased exponent was not 1 it was corrupted |
4772 // by most significant bit of mantissa so we should fix that. | 4774 // by most significant bit of mantissa so we should fix that. |
4773 if (!(biased_exponent & 1)) { | 4775 if (!(biased_exponent & 1)) { |
4774 masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | 4776 masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |
4775 } | 4777 } |
4776 } | 4778 } |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4834 __ Move(dst, r5); | 4836 __ Move(dst, r5); |
4835 __ b(&done); | 4837 __ b(&done); |
4836 } | 4838 } |
4837 | 4839 |
4838 // Slow case: Call the runtime system to do the number allocation. | 4840 // Slow case: Call the runtime system to do the number allocation. |
4839 __ bind(&slow); | 4841 __ bind(&slow); |
4840 | 4842 |
4841 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4843 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
4842 // register is stored, as this register is in the pointer map, but contains an | 4844 // register is stored, as this register is in the pointer map, but contains an |
4843 // integer value. | 4845 // integer value. |
4844 __ mov(ip, Operand(0)); | 4846 __ mov(ip, Operand::Zero()); |
4845 __ StoreToSafepointRegisterSlot(ip, dst); | 4847 __ StoreToSafepointRegisterSlot(ip, dst); |
4846 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 4848 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
4847 __ Move(dst, r0); | 4849 __ Move(dst, r0); |
4848 __ sub(dst, dst, Operand(kHeapObjectTag)); | 4850 __ sub(dst, dst, Operand(kHeapObjectTag)); |
4849 | 4851 |
4850 // Done. Put the value in dbl_scratch into the value of the allocated heap | 4852 // Done. Put the value in dbl_scratch into the value of the allocated heap |
4851 // number. | 4853 // number. |
4852 __ bind(&done); | 4854 __ bind(&done); |
4853 if (CpuFeatures::IsSupported(VFP2)) { | 4855 if (CpuFeatures::IsSupported(VFP2)) { |
4854 CpuFeatures::Scope scope(VFP2); | 4856 CpuFeatures::Scope scope(VFP2); |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4899 // Now that we have finished with the object's real address tag it | 4901 // Now that we have finished with the object's real address tag it |
4900 __ add(reg, reg, Operand(kHeapObjectTag)); | 4902 __ add(reg, reg, Operand(kHeapObjectTag)); |
4901 } | 4903 } |
4902 | 4904 |
4903 | 4905 |
4904 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4906 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4905 // TODO(3095996): Get rid of this. For now, we need to make the | 4907 // TODO(3095996): Get rid of this. For now, we need to make the |
4906 // result register contain a valid pointer because it is already | 4908 // result register contain a valid pointer because it is already |
4907 // contained in the register pointer map. | 4909 // contained in the register pointer map. |
4908 Register reg = ToRegister(instr->result()); | 4910 Register reg = ToRegister(instr->result()); |
4909 __ mov(reg, Operand(0)); | 4911 __ mov(reg, Operand::Zero()); |
4910 | 4912 |
4911 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4913 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
4912 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 4914 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
4913 __ sub(r0, r0, Operand(kHeapObjectTag)); | 4915 __ sub(r0, r0, Operand(kHeapObjectTag)); |
4914 __ StoreToSafepointRegisterSlot(r0, reg); | 4916 __ StoreToSafepointRegisterSlot(r0, reg); |
4915 } | 4917 } |
4916 | 4918 |
4917 | 4919 |
4918 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4920 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4919 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); | 4921 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4970 __ vldr(result_reg, ip, HeapNumber::kValueOffset); | 4972 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
4971 __ jmp(&done); | 4973 __ jmp(&done); |
4972 | 4974 |
4973 __ bind(&heap_number); | 4975 __ bind(&heap_number); |
4974 } | 4976 } |
4975 // Heap number to double register conversion. | 4977 // Heap number to double register conversion. |
4976 __ sub(ip, input_reg, Operand(kHeapObjectTag)); | 4978 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
4977 __ vldr(result_reg, ip, HeapNumber::kValueOffset); | 4979 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
4978 if (deoptimize_on_minus_zero) { | 4980 if (deoptimize_on_minus_zero) { |
4979 __ vmov(ip, result_reg.low()); | 4981 __ vmov(ip, result_reg.low()); |
4980 __ cmp(ip, Operand(0)); | 4982 __ cmp(ip, Operand::Zero()); |
4981 __ b(ne, &done); | 4983 __ b(ne, &done); |
4982 __ vmov(ip, result_reg.high()); | 4984 __ vmov(ip, result_reg.high()); |
4983 __ cmp(ip, Operand(HeapNumber::kSignMask)); | 4985 __ cmp(ip, Operand(HeapNumber::kSignMask)); |
4984 DeoptimizeIf(eq, env); | 4986 DeoptimizeIf(eq, env); |
4985 } | 4987 } |
4986 __ jmp(&done); | 4988 __ jmp(&done); |
4987 | 4989 |
4988 // Smi to double register conversion | 4990 // Smi to double register conversion |
4989 __ bind(&load_smi); | 4991 __ bind(&load_smi); |
4990 // scratch: untagged value of input_reg | 4992 // scratch: untagged value of input_reg |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5025 !scratch3.is(scratch2)); | 5027 !scratch3.is(scratch2)); |
5026 // Performs a truncating conversion of a floating point number as used by | 5028 // Performs a truncating conversion of a floating point number as used by |
5027 // the JS bitwise operations. | 5029 // the JS bitwise operations. |
5028 Label heap_number; | 5030 Label heap_number; |
5029 __ b(eq, &heap_number); | 5031 __ b(eq, &heap_number); |
5030 // Check for undefined. Undefined is converted to zero for truncating | 5032 // Check for undefined. Undefined is converted to zero for truncating |
5031 // conversions. | 5033 // conversions. |
5032 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5034 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
5033 __ cmp(input_reg, Operand(ip)); | 5035 __ cmp(input_reg, Operand(ip)); |
5034 DeoptimizeIf(ne, instr->environment()); | 5036 DeoptimizeIf(ne, instr->environment()); |
5035 __ mov(input_reg, Operand(0)); | 5037 __ mov(input_reg, Operand::Zero()); |
5036 __ b(&done); | 5038 __ b(&done); |
5037 | 5039 |
5038 __ bind(&heap_number); | 5040 __ bind(&heap_number); |
5039 __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); | 5041 __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); |
5040 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); | 5042 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); |
5041 | 5043 |
5042 __ EmitECMATruncate(input_reg, | 5044 __ EmitECMATruncate(input_reg, |
5043 double_scratch2, | 5045 double_scratch2, |
5044 double_scratch, | 5046 double_scratch, |
5045 scratch1, | 5047 scratch1, |
5046 scratch2, | 5048 scratch2, |
5047 scratch3); | 5049 scratch3); |
5048 | 5050 |
5049 } else { | 5051 } else { |
5050 CpuFeatures::Scope scope(VFP3); | 5052 CpuFeatures::Scope scope(VFP3); |
5051 // Deoptimize if we don't have a heap number. | 5053 // Deoptimize if we don't have a heap number. |
5052 DeoptimizeIf(ne, instr->environment()); | 5054 DeoptimizeIf(ne, instr->environment()); |
5053 | 5055 |
5054 __ sub(ip, input_reg, Operand(kHeapObjectTag)); | 5056 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
5055 __ vldr(double_scratch, ip, HeapNumber::kValueOffset); | 5057 __ vldr(double_scratch, ip, HeapNumber::kValueOffset); |
5056 __ EmitVFPTruncate(kRoundToZero, | 5058 __ EmitVFPTruncate(kRoundToZero, |
5057 input_reg, | 5059 input_reg, |
5058 double_scratch, | 5060 double_scratch, |
5059 scratch1, | 5061 scratch1, |
5060 double_scratch2, | 5062 double_scratch2, |
5061 kCheckForInexactConversion); | 5063 kCheckForInexactConversion); |
5062 DeoptimizeIf(ne, instr->environment()); | 5064 DeoptimizeIf(ne, instr->environment()); |
5063 | 5065 |
5064 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5066 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5065 __ cmp(input_reg, Operand(0)); | 5067 __ cmp(input_reg, Operand::Zero()); |
5066 __ b(ne, &done); | 5068 __ b(ne, &done); |
5067 __ vmov(scratch1, double_scratch.high()); | 5069 __ vmov(scratch1, double_scratch.high()); |
5068 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5070 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
5069 DeoptimizeIf(ne, instr->environment()); | 5071 DeoptimizeIf(ne, instr->environment()); |
5070 } | 5072 } |
5071 } | 5073 } |
5072 __ bind(&done); | 5074 __ bind(&done); |
5073 } | 5075 } |
5074 | 5076 |
5075 | 5077 |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5286 | 5288 |
5287 // Check for heap number | 5289 // Check for heap number |
5288 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5290 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
5289 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5291 __ cmp(scratch, Operand(factory()->heap_number_map())); |
5290 __ b(eq, &heap_number); | 5292 __ b(eq, &heap_number); |
5291 | 5293 |
5292 // Check for undefined. Undefined is converted to zero for clamping | 5294 // Check for undefined. Undefined is converted to zero for clamping |
5293 // conversions. | 5295 // conversions. |
5294 __ cmp(input_reg, Operand(factory()->undefined_value())); | 5296 __ cmp(input_reg, Operand(factory()->undefined_value())); |
5295 DeoptimizeIf(ne, instr->environment()); | 5297 DeoptimizeIf(ne, instr->environment()); |
5296 __ mov(result_reg, Operand(0)); | 5298 __ mov(result_reg, Operand::Zero()); |
5297 __ jmp(&done); | 5299 __ jmp(&done); |
5298 | 5300 |
5299 // Heap number | 5301 // Heap number |
5300 __ bind(&heap_number); | 5302 __ bind(&heap_number); |
5301 __ vldr(double_scratch0(), FieldMemOperand(input_reg, | 5303 __ vldr(double_scratch0(), FieldMemOperand(input_reg, |
5302 HeapNumber::kValueOffset)); | 5304 HeapNumber::kValueOffset)); |
5303 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 5305 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
5304 __ jmp(&done); | 5306 __ jmp(&done); |
5305 | 5307 |
5306 // smi | 5308 // smi |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5408 | 5410 |
5409 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { | 5411 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { |
5410 Register result = ToRegister(instr->result()); | 5412 Register result = ToRegister(instr->result()); |
5411 Handle<JSFunction> constructor = instr->hydrogen()->constructor(); | 5413 Handle<JSFunction> constructor = instr->hydrogen()->constructor(); |
5412 Handle<Map> initial_map(constructor->initial_map()); | 5414 Handle<Map> initial_map(constructor->initial_map()); |
5413 int instance_size = initial_map->instance_size(); | 5415 int instance_size = initial_map->instance_size(); |
5414 | 5416 |
5415 // TODO(3095996): Get rid of this. For now, we need to make the | 5417 // TODO(3095996): Get rid of this. For now, we need to make the |
5416 // result register contain a valid pointer because it is already | 5418 // result register contain a valid pointer because it is already |
5417 // contained in the register pointer map. | 5419 // contained in the register pointer map. |
5418 __ mov(result, Operand(0)); | 5420 __ mov(result, Operand::Zero()); |
5419 | 5421 |
5420 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 5422 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
5421 __ mov(r0, Operand(Smi::FromInt(instance_size))); | 5423 __ mov(r0, Operand(Smi::FromInt(instance_size))); |
5422 __ push(r0); | 5424 __ push(r0); |
5423 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); | 5425 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
5424 __ StoreToSafepointRegisterSlot(r0, result); | 5426 __ StoreToSafepointRegisterSlot(r0, result); |
5425 } | 5427 } |
5426 | 5428 |
5427 | 5429 |
5428 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { | 5430 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
(...skipping 608 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6037 __ b(ne, &load_cache); | 6039 __ b(ne, &load_cache); |
6038 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 6040 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
6039 __ jmp(&done); | 6041 __ jmp(&done); |
6040 | 6042 |
6041 __ bind(&load_cache); | 6043 __ bind(&load_cache); |
6042 __ LoadInstanceDescriptors(map, result); | 6044 __ LoadInstanceDescriptors(map, result); |
6043 __ ldr(result, | 6045 __ ldr(result, |
6044 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 6046 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
6045 __ ldr(result, | 6047 __ ldr(result, |
6046 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 6048 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
6047 __ cmp(result, Operand(0)); | 6049 __ cmp(result, Operand::Zero()); |
6048 DeoptimizeIf(eq, instr->environment()); | 6050 DeoptimizeIf(eq, instr->environment()); |
6049 | 6051 |
6050 __ bind(&done); | 6052 __ bind(&done); |
6051 } | 6053 } |
6052 | 6054 |
6053 | 6055 |
6054 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 6056 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
6055 Register object = ToRegister(instr->value()); | 6057 Register object = ToRegister(instr->value()); |
6056 Register map = ToRegister(instr->map()); | 6058 Register map = ToRegister(instr->map()); |
6057 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 6059 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
6058 __ cmp(map, scratch0()); | 6060 __ cmp(map, scratch0()); |
6059 DeoptimizeIf(ne, instr->environment()); | 6061 DeoptimizeIf(ne, instr->environment()); |
6060 } | 6062 } |
6061 | 6063 |
6062 | 6064 |
6063 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | 6065 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
6064 Register object = ToRegister(instr->object()); | 6066 Register object = ToRegister(instr->object()); |
6065 Register index = ToRegister(instr->index()); | 6067 Register index = ToRegister(instr->index()); |
6066 Register result = ToRegister(instr->result()); | 6068 Register result = ToRegister(instr->result()); |
6067 Register scratch = scratch0(); | 6069 Register scratch = scratch0(); |
6068 | 6070 |
6069 Label out_of_object, done; | 6071 Label out_of_object, done; |
6070 __ cmp(index, Operand(0)); | 6072 __ cmp(index, Operand::Zero()); |
6071 __ b(lt, &out_of_object); | 6073 __ b(lt, &out_of_object); |
6072 | 6074 |
6073 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); | 6075 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); |
6074 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 6076 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
6075 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); | 6077 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); |
6076 | 6078 |
6077 __ b(&done); | 6079 __ b(&done); |
6078 | 6080 |
6079 __ bind(&out_of_object); | 6081 __ bind(&out_of_object); |
6080 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 6082 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
6081 // Index is equal to negated out of object property index plus 1. | 6083 // Index is equal to negated out of object property index plus 1. |
6082 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 6084 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
6083 __ ldr(result, FieldMemOperand(scratch, | 6085 __ ldr(result, FieldMemOperand(scratch, |
6084 FixedArray::kHeaderSize - kPointerSize)); | 6086 FixedArray::kHeaderSize - kPointerSize)); |
6085 __ bind(&done); | 6087 __ bind(&done); |
6086 } | 6088 } |
6087 | 6089 |
6088 | 6090 |
6089 #undef __ | 6091 #undef __ |
6090 | 6092 |
6091 } } // namespace v8::internal | 6093 } } // namespace v8::internal |
OLD | NEW |