Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 11191029: Use VLDR instead of VMOVs from GPR when a 64-bit double can't be encoded as a VMOV immediate. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
131 // cp: Callee's context. 131 // cp: Callee's context.
132 // fp: Caller's frame pointer. 132 // fp: Caller's frame pointer.
133 // lr: Caller's pc. 133 // lr: Caller's pc.
134 134
135 // Strict mode functions and builtins need to replace the receiver 135 // Strict mode functions and builtins need to replace the receiver
136 // with undefined when called as functions (without an explicit 136 // with undefined when called as functions (without an explicit
137 // receiver object). r5 is zero for method calls and non-zero for 137 // receiver object). r5 is zero for method calls and non-zero for
138 // function calls. 138 // function calls.
139 if (!info_->is_classic_mode() || info_->is_native()) { 139 if (!info_->is_classic_mode() || info_->is_native()) {
140 Label ok; 140 Label ok;
141 __ cmp(r5, Operand(0)); 141 __ cmp(r5, Operand::Zero());
142 __ b(eq, &ok); 142 __ b(eq, &ok);
143 int receiver_offset = scope()->num_parameters() * kPointerSize; 143 int receiver_offset = scope()->num_parameters() * kPointerSize;
144 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 144 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
145 __ str(r2, MemOperand(sp, receiver_offset)); 145 __ str(r2, MemOperand(sp, receiver_offset));
146 __ bind(&ok); 146 __ bind(&ok);
147 } 147 }
148 148
149 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); 149 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
150 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. 150 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
151 151
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 return Operand(constant->Integer32Value()); 417 return Operand(constant->Integer32Value());
418 } else if (r.IsDouble()) { 418 } else if (r.IsDouble()) {
419 Abort("ToOperand Unsupported double immediate."); 419 Abort("ToOperand Unsupported double immediate.");
420 } 420 }
421 ASSERT(r.IsTagged()); 421 ASSERT(r.IsTagged());
422 return Operand(constant->handle()); 422 return Operand(constant->handle());
423 } else if (op->IsRegister()) { 423 } else if (op->IsRegister()) {
424 return Operand(ToRegister(op)); 424 return Operand(ToRegister(op));
425 } else if (op->IsDoubleRegister()) { 425 } else if (op->IsDoubleRegister()) {
426 Abort("ToOperand IsDoubleRegister unimplemented"); 426 Abort("ToOperand IsDoubleRegister unimplemented");
427 return Operand(0); 427 return Operand::Zero();
428 } 428 }
429 // Stack slots not implemented, use ToMemOperand instead. 429 // Stack slots not implemented, use ToMemOperand instead.
430 UNREACHABLE(); 430 UNREACHABLE();
431 return Operand(0); 431 return Operand::Zero();
432 } 432 }
433 433
434 434
435 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 435 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
436 ASSERT(!op->IsRegister()); 436 ASSERT(!op->IsRegister());
437 ASSERT(!op->IsDoubleRegister()); 437 ASSERT(!op->IsDoubleRegister());
438 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 438 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
439 int index = op->index(); 439 int index = op->index();
440 if (index >= 0) { 440 if (index >= 0) {
441 // Local or spill slot. Skip the frame pointer, function, and 441 // Local or spill slot. Skip the frame pointer, function, and
(...skipping 511 matching lines...) Expand 10 before | Expand all | Expand 10 after
953 if (instr->hydrogen()->HasPowerOf2Divisor()) { 953 if (instr->hydrogen()->HasPowerOf2Divisor()) {
954 Register dividend = ToRegister(instr->left()); 954 Register dividend = ToRegister(instr->left());
955 Register result = ToRegister(instr->result()); 955 Register result = ToRegister(instr->result());
956 956
957 int32_t divisor = 957 int32_t divisor =
958 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); 958 HConstant::cast(instr->hydrogen()->right())->Integer32Value();
959 959
960 if (divisor < 0) divisor = -divisor; 960 if (divisor < 0) divisor = -divisor;
961 961
962 Label positive_dividend, done; 962 Label positive_dividend, done;
963 __ cmp(dividend, Operand(0)); 963 __ cmp(dividend, Operand::Zero());
964 __ b(pl, &positive_dividend); 964 __ b(pl, &positive_dividend);
965 __ rsb(result, dividend, Operand(0)); 965 __ rsb(result, dividend, Operand::Zero());
966 __ and_(result, result, Operand(divisor - 1), SetCC); 966 __ and_(result, result, Operand(divisor - 1), SetCC);
967 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 967 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
968 DeoptimizeIf(eq, instr->environment()); 968 DeoptimizeIf(eq, instr->environment());
969 } 969 }
970 __ rsb(result, result, Operand(0)); 970 __ rsb(result, result, Operand::Zero());
971 __ b(&done); 971 __ b(&done);
972 __ bind(&positive_dividend); 972 __ bind(&positive_dividend);
973 __ and_(result, dividend, Operand(divisor - 1)); 973 __ and_(result, dividend, Operand(divisor - 1));
974 __ bind(&done); 974 __ bind(&done);
975 return; 975 return;
976 } 976 }
977 977
978 // These registers hold untagged 32 bit values. 978 // These registers hold untagged 32 bit values.
979 Register left = ToRegister(instr->left()); 979 Register left = ToRegister(instr->left());
980 Register right = ToRegister(instr->right()); 980 Register right = ToRegister(instr->right());
981 Register result = ToRegister(instr->result()); 981 Register result = ToRegister(instr->result());
982 Label done; 982 Label done;
983 983
984 if (CpuFeatures::IsSupported(SUDIV)) { 984 if (CpuFeatures::IsSupported(SUDIV)) {
985 CpuFeatures::Scope scope(SUDIV); 985 CpuFeatures::Scope scope(SUDIV);
986 // Check for x % 0. 986 // Check for x % 0.
987 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 987 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
988 __ cmp(right, Operand(0)); 988 __ cmp(right, Operand::Zero());
989 DeoptimizeIf(eq, instr->environment()); 989 DeoptimizeIf(eq, instr->environment());
990 } 990 }
991 991
992 // For r3 = r1 % r2; we can have the following ARM code 992 // For r3 = r1 % r2; we can have the following ARM code
993 // sdiv r3, r1, r2 993 // sdiv r3, r1, r2
994 // mls r3, r3, r2, r1 994 // mls r3, r3, r2, r1
995 995
996 __ sdiv(result, left, right); 996 __ sdiv(result, left, right);
997 __ mls(result, result, right, left); 997 __ mls(result, result, right, left);
998 __ cmp(result, Operand(0)); 998 __ cmp(result, Operand::Zero());
999 __ b(ne, &done); 999 __ b(ne, &done);
1000 1000
1001 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1001 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1002 __ cmp(left, Operand(0)); 1002 __ cmp(left, Operand::Zero());
1003 DeoptimizeIf(lt, instr->environment()); 1003 DeoptimizeIf(lt, instr->environment());
1004 } 1004 }
1005 } else { 1005 } else {
1006 Register scratch = scratch0(); 1006 Register scratch = scratch0();
1007 Register scratch2 = ToRegister(instr->temp()); 1007 Register scratch2 = ToRegister(instr->temp());
1008 DwVfpRegister dividend = ToDoubleRegister(instr->temp2()); 1008 DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
1009 DwVfpRegister divisor = ToDoubleRegister(instr->temp3()); 1009 DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
1010 DwVfpRegister quotient = double_scratch0(); 1010 DwVfpRegister quotient = double_scratch0();
1011 1011
1012 ASSERT(!dividend.is(divisor)); 1012 ASSERT(!dividend.is(divisor));
1013 ASSERT(!dividend.is(quotient)); 1013 ASSERT(!dividend.is(quotient));
1014 ASSERT(!divisor.is(quotient)); 1014 ASSERT(!divisor.is(quotient));
1015 ASSERT(!scratch.is(left)); 1015 ASSERT(!scratch.is(left));
1016 ASSERT(!scratch.is(right)); 1016 ASSERT(!scratch.is(right));
1017 ASSERT(!scratch.is(result)); 1017 ASSERT(!scratch.is(result));
1018 1018
1019 Label vfp_modulo, both_positive, right_negative; 1019 Label vfp_modulo, both_positive, right_negative;
1020 1020
1021 // Check for x % 0. 1021 // Check for x % 0.
1022 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 1022 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1023 __ cmp(right, Operand(0)); 1023 __ cmp(right, Operand::Zero());
1024 DeoptimizeIf(eq, instr->environment()); 1024 DeoptimizeIf(eq, instr->environment());
1025 } 1025 }
1026 1026
1027 __ Move(result, left); 1027 __ Move(result, left);
1028 1028
1029 // (0 % x) must yield 0 (if x is finite, which is the case here). 1029 // (0 % x) must yield 0 (if x is finite, which is the case here).
1030 __ cmp(left, Operand(0)); 1030 __ cmp(left, Operand::Zero());
1031 __ b(eq, &done); 1031 __ b(eq, &done);
1032 // Preload right in a vfp register. 1032 // Preload right in a vfp register.
1033 __ vmov(divisor.low(), right); 1033 __ vmov(divisor.low(), right);
1034 __ b(lt, &vfp_modulo); 1034 __ b(lt, &vfp_modulo);
1035 1035
1036 __ cmp(left, Operand(right)); 1036 __ cmp(left, Operand(right));
1037 __ b(lt, &done); 1037 __ b(lt, &done);
1038 1038
1039 // Check for (positive) power of two on the right hand side. 1039 // Check for (positive) power of two on the right hand side.
1040 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, 1040 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
1041 scratch, 1041 scratch,
1042 &right_negative, 1042 &right_negative,
1043 &both_positive); 1043 &both_positive);
1044 // Perform modulo operation (scratch contains right - 1). 1044 // Perform modulo operation (scratch contains right - 1).
1045 __ and_(result, scratch, Operand(left)); 1045 __ and_(result, scratch, Operand(left));
1046 __ b(&done); 1046 __ b(&done);
1047 1047
1048 __ bind(&right_negative); 1048 __ bind(&right_negative);
1049 // Negate right. The sign of the divisor does not matter. 1049 // Negate right. The sign of the divisor does not matter.
1050 __ rsb(right, right, Operand(0)); 1050 __ rsb(right, right, Operand::Zero());
1051 1051
1052 __ bind(&both_positive); 1052 __ bind(&both_positive);
1053 const int kUnfolds = 3; 1053 const int kUnfolds = 3;
1054 // If the right hand side is smaller than the (nonnegative) 1054 // If the right hand side is smaller than the (nonnegative)
1055 // left hand side, the left hand side is the result. 1055 // left hand side, the left hand side is the result.
1056 // Else try a few subtractions of the left hand side. 1056 // Else try a few subtractions of the left hand side.
1057 __ mov(scratch, left); 1057 __ mov(scratch, left);
1058 for (int i = 0; i < kUnfolds; i++) { 1058 for (int i = 0; i < kUnfolds; i++) {
1059 // Check if the left hand side is less or equal than the 1059 // Check if the left hand side is less or equal than the
1060 // the right hand side. 1060 // the right hand side.
(...skipping 30 matching lines...) Expand all
1091 __ vcvt_s32_f64(double_scratch.low(), double_scratch); 1091 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1092 __ vmov(scratch, double_scratch.low()); 1092 __ vmov(scratch, double_scratch.low());
1093 1093
1094 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1094 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1095 __ sub(result, left, scratch); 1095 __ sub(result, left, scratch);
1096 } else { 1096 } else {
1097 Label ok; 1097 Label ok;
1098 // Check for -0. 1098 // Check for -0.
1099 __ sub(scratch2, left, scratch, SetCC); 1099 __ sub(scratch2, left, scratch, SetCC);
1100 __ b(ne, &ok); 1100 __ b(ne, &ok);
1101 __ cmp(left, Operand(0)); 1101 __ cmp(left, Operand::Zero());
1102 DeoptimizeIf(mi, instr->environment()); 1102 DeoptimizeIf(mi, instr->environment());
1103 __ bind(&ok); 1103 __ bind(&ok);
1104 // Load the result and we are done. 1104 // Load the result and we are done.
1105 __ mov(result, scratch2); 1105 __ mov(result, scratch2);
1106 } 1106 }
1107 } 1107 }
1108 __ bind(&done); 1108 __ bind(&done);
1109 } 1109 }
1110 1110
1111 1111
(...skipping 14 matching lines...) Expand all
1126 1126
1127 switch (divisor_abs) { 1127 switch (divisor_abs) {
1128 case 0: 1128 case 0:
1129 DeoptimizeIf(al, environment); 1129 DeoptimizeIf(al, environment);
1130 return; 1130 return;
1131 1131
1132 case 1: 1132 case 1:
1133 if (divisor > 0) { 1133 if (divisor > 0) {
1134 __ Move(result, dividend); 1134 __ Move(result, dividend);
1135 } else { 1135 } else {
1136 __ rsb(result, dividend, Operand(0), SetCC); 1136 __ rsb(result, dividend, Operand::Zero(), SetCC);
1137 DeoptimizeIf(vs, environment); 1137 DeoptimizeIf(vs, environment);
1138 } 1138 }
1139 // Compute the remainder. 1139 // Compute the remainder.
1140 __ mov(remainder, Operand(0)); 1140 __ mov(remainder, Operand::Zero());
1141 return; 1141 return;
1142 1142
1143 default: 1143 default:
1144 if (IsPowerOf2(divisor_abs)) { 1144 if (IsPowerOf2(divisor_abs)) {
1145 // Branch and condition free code for integer division by a power 1145 // Branch and condition free code for integer division by a power
1146 // of two. 1146 // of two.
1147 int32_t power = WhichPowerOf2(divisor_abs); 1147 int32_t power = WhichPowerOf2(divisor_abs);
1148 if (power > 1) { 1148 if (power > 1) {
1149 __ mov(scratch, Operand(dividend, ASR, power - 1)); 1149 __ mov(scratch, Operand(dividend, ASR, power - 1));
1150 } 1150 }
1151 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); 1151 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1152 __ mov(result, Operand(scratch, ASR, power)); 1152 __ mov(result, Operand(scratch, ASR, power));
1153 // Negate if necessary. 1153 // Negate if necessary.
1154 // We don't need to check for overflow because the case '-1' is 1154 // We don't need to check for overflow because the case '-1' is
1155 // handled separately. 1155 // handled separately.
1156 if (divisor < 0) { 1156 if (divisor < 0) {
1157 ASSERT(divisor != -1); 1157 ASSERT(divisor != -1);
1158 __ rsb(result, result, Operand(0)); 1158 __ rsb(result, result, Operand::Zero());
1159 } 1159 }
1160 // Compute the remainder. 1160 // Compute the remainder.
1161 if (divisor > 0) { 1161 if (divisor > 0) {
1162 __ sub(remainder, dividend, Operand(result, LSL, power)); 1162 __ sub(remainder, dividend, Operand(result, LSL, power));
1163 } else { 1163 } else {
1164 __ add(remainder, dividend, Operand(result, LSL, power)); 1164 __ add(remainder, dividend, Operand(result, LSL, power));
1165 } 1165 }
1166 return; 1166 return;
1167 } else { 1167 } else {
1168 // Use magic numbers for a few specific divisors. 1168 // Use magic numbers for a few specific divisors.
(...skipping 15 matching lines...) Expand all
1184 1184
1185 __ mov(ip, Operand(M)); 1185 __ mov(ip, Operand(M));
1186 __ smull(ip, scratch, dividend, ip); 1186 __ smull(ip, scratch, dividend, ip);
1187 if (M < 0) { 1187 if (M < 0) {
1188 __ add(scratch, scratch, Operand(dividend)); 1188 __ add(scratch, scratch, Operand(dividend));
1189 } 1189 }
1190 if (s > 0) { 1190 if (s > 0) {
1191 __ mov(scratch, Operand(scratch, ASR, s)); 1191 __ mov(scratch, Operand(scratch, ASR, s));
1192 } 1192 }
1193 __ add(result, scratch, Operand(dividend, LSR, 31)); 1193 __ add(result, scratch, Operand(dividend, LSR, 31));
1194 if (divisor < 0) __ rsb(result, result, Operand(0)); 1194 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1195 // Compute the remainder. 1195 // Compute the remainder.
1196 __ mov(ip, Operand(divisor)); 1196 __ mov(ip, Operand(divisor));
1197 // This sequence could be replaced with 'mls' when 1197 // This sequence could be replaced with 'mls' when
1198 // it gets implemented. 1198 // it gets implemented.
1199 __ mul(scratch, result, ip); 1199 __ mul(scratch, result, ip);
1200 __ sub(remainder, dividend, scratch); 1200 __ sub(remainder, dividend, scratch);
1201 } 1201 }
1202 } 1202 }
1203 } 1203 }
1204 1204
(...skipping 14 matching lines...) Expand all
1219 LDivI* instr_; 1219 LDivI* instr_;
1220 }; 1220 };
1221 1221
1222 const Register left = ToRegister(instr->left()); 1222 const Register left = ToRegister(instr->left());
1223 const Register right = ToRegister(instr->right()); 1223 const Register right = ToRegister(instr->right());
1224 const Register scratch = scratch0(); 1224 const Register scratch = scratch0();
1225 const Register result = ToRegister(instr->result()); 1225 const Register result = ToRegister(instr->result());
1226 1226
1227 // Check for x / 0. 1227 // Check for x / 0.
1228 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 1228 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1229 __ cmp(right, Operand(0)); 1229 __ cmp(right, Operand::Zero());
1230 DeoptimizeIf(eq, instr->environment()); 1230 DeoptimizeIf(eq, instr->environment());
1231 } 1231 }
1232 1232
1233 // Check for (0 / -x) that will produce negative zero. 1233 // Check for (0 / -x) that will produce negative zero.
1234 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1234 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1235 Label left_not_zero; 1235 Label left_not_zero;
1236 __ cmp(left, Operand(0)); 1236 __ cmp(left, Operand::Zero());
1237 __ b(ne, &left_not_zero); 1237 __ b(ne, &left_not_zero);
1238 __ cmp(right, Operand(0)); 1238 __ cmp(right, Operand::Zero());
1239 DeoptimizeIf(mi, instr->environment()); 1239 DeoptimizeIf(mi, instr->environment());
1240 __ bind(&left_not_zero); 1240 __ bind(&left_not_zero);
1241 } 1241 }
1242 1242
1243 // Check for (-kMinInt / -1). 1243 // Check for (-kMinInt / -1).
1244 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1244 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1245 Label left_not_min_int; 1245 Label left_not_min_int;
1246 __ cmp(left, Operand(kMinInt)); 1246 __ cmp(left, Operand(kMinInt));
1247 __ b(ne, &left_not_min_int); 1247 __ b(ne, &left_not_min_int);
1248 __ cmp(right, Operand(-1)); 1248 __ cmp(right, Operand(-1));
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1292 const Register left = ToRegister(instr->left()); 1292 const Register left = ToRegister(instr->left());
1293 const Register remainder = ToRegister(instr->temp()); 1293 const Register remainder = ToRegister(instr->temp());
1294 const Register scratch = scratch0(); 1294 const Register scratch = scratch0();
1295 1295
1296 // We only optimize this for division by constants, because the standard 1296 // We only optimize this for division by constants, because the standard
1297 // integer division routine is usually slower than transitionning to VFP. 1297 // integer division routine is usually slower than transitionning to VFP.
1298 // This could be optimized on processors with SDIV available. 1298 // This could be optimized on processors with SDIV available.
1299 ASSERT(instr->right()->IsConstantOperand()); 1299 ASSERT(instr->right()->IsConstantOperand());
1300 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); 1300 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1301 if (divisor < 0) { 1301 if (divisor < 0) {
1302 __ cmp(left, Operand(0)); 1302 __ cmp(left, Operand::Zero());
1303 DeoptimizeIf(eq, instr->environment()); 1303 DeoptimizeIf(eq, instr->environment());
1304 } 1304 }
1305 EmitSignedIntegerDivisionByConstant(result, 1305 EmitSignedIntegerDivisionByConstant(result,
1306 left, 1306 left,
1307 divisor, 1307 divisor,
1308 remainder, 1308 remainder,
1309 scratch, 1309 scratch,
1310 instr->environment()); 1310 instr->environment());
1311 // We operated a truncating division. Correct the result if necessary. 1311 // We operated a truncating division. Correct the result if necessary.
1312 __ cmp(remainder, Operand(0)); 1312 __ cmp(remainder, Operand::Zero());
1313 __ teq(remainder, Operand(divisor), ne); 1313 __ teq(remainder, Operand(divisor), ne);
1314 __ sub(result, result, Operand(1), LeaveCC, mi); 1314 __ sub(result, result, Operand(1), LeaveCC, mi);
1315 } 1315 }
1316 1316
1317 1317
1318 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, 1318 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1319 LOperand* left_argument, 1319 LOperand* left_argument,
1320 LOperand* right_argument, 1320 LOperand* right_argument,
1321 Token::Value op) { 1321 Token::Value op) {
1322 Register left = ToRegister(left_argument); 1322 Register left = ToRegister(left_argument);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1358 bool bailout_on_minus_zero = 1358 bool bailout_on_minus_zero =
1359 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1359 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1360 1360
1361 if (right_op->IsConstantOperand() && !can_overflow) { 1361 if (right_op->IsConstantOperand() && !can_overflow) {
1362 // Use optimized code for specific constants. 1362 // Use optimized code for specific constants.
1363 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1363 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1364 1364
1365 if (bailout_on_minus_zero && (constant < 0)) { 1365 if (bailout_on_minus_zero && (constant < 0)) {
1366 // The case of a null constant will be handled separately. 1366 // The case of a null constant will be handled separately.
1367 // If constant is negative and left is null, the result should be -0. 1367 // If constant is negative and left is null, the result should be -0.
1368 __ cmp(left, Operand(0)); 1368 __ cmp(left, Operand::Zero());
1369 DeoptimizeIf(eq, instr->environment()); 1369 DeoptimizeIf(eq, instr->environment());
1370 } 1370 }
1371 1371
1372 switch (constant) { 1372 switch (constant) {
1373 case -1: 1373 case -1:
1374 __ rsb(result, left, Operand(0)); 1374 __ rsb(result, left, Operand::Zero());
1375 break; 1375 break;
1376 case 0: 1376 case 0:
1377 if (bailout_on_minus_zero) { 1377 if (bailout_on_minus_zero) {
1378 // If left is strictly negative and the constant is null, the 1378 // If left is strictly negative and the constant is null, the
1379 // result is -0. Deoptimize if required, otherwise return 0. 1379 // result is -0. Deoptimize if required, otherwise return 0.
1380 __ cmp(left, Operand(0)); 1380 __ cmp(left, Operand::Zero());
1381 DeoptimizeIf(mi, instr->environment()); 1381 DeoptimizeIf(mi, instr->environment());
1382 } 1382 }
1383 __ mov(result, Operand(0)); 1383 __ mov(result, Operand::Zero());
1384 break; 1384 break;
1385 case 1: 1385 case 1:
1386 __ Move(result, left); 1386 __ Move(result, left);
1387 break; 1387 break;
1388 default: 1388 default:
1389 // Multiplying by powers of two and powers of two plus or minus 1389 // Multiplying by powers of two and powers of two plus or minus
1390 // one can be done faster with shifted operands. 1390 // one can be done faster with shifted operands.
1391 // For other constants we emit standard code. 1391 // For other constants we emit standard code.
1392 int32_t mask = constant >> 31; 1392 int32_t mask = constant >> 31;
1393 uint32_t constant_abs = (constant + mask) ^ mask; 1393 uint32_t constant_abs = (constant + mask) ^ mask;
1394 1394
1395 if (IsPowerOf2(constant_abs) || 1395 if (IsPowerOf2(constant_abs) ||
1396 IsPowerOf2(constant_abs - 1) || 1396 IsPowerOf2(constant_abs - 1) ||
1397 IsPowerOf2(constant_abs + 1)) { 1397 IsPowerOf2(constant_abs + 1)) {
1398 if (IsPowerOf2(constant_abs)) { 1398 if (IsPowerOf2(constant_abs)) {
1399 int32_t shift = WhichPowerOf2(constant_abs); 1399 int32_t shift = WhichPowerOf2(constant_abs);
1400 __ mov(result, Operand(left, LSL, shift)); 1400 __ mov(result, Operand(left, LSL, shift));
1401 } else if (IsPowerOf2(constant_abs - 1)) { 1401 } else if (IsPowerOf2(constant_abs - 1)) {
1402 int32_t shift = WhichPowerOf2(constant_abs - 1); 1402 int32_t shift = WhichPowerOf2(constant_abs - 1);
1403 __ add(result, left, Operand(left, LSL, shift)); 1403 __ add(result, left, Operand(left, LSL, shift));
1404 } else if (IsPowerOf2(constant_abs + 1)) { 1404 } else if (IsPowerOf2(constant_abs + 1)) {
1405 int32_t shift = WhichPowerOf2(constant_abs + 1); 1405 int32_t shift = WhichPowerOf2(constant_abs + 1);
1406 __ rsb(result, left, Operand(left, LSL, shift)); 1406 __ rsb(result, left, Operand(left, LSL, shift));
1407 } 1407 }
1408 1408
1409 // Correct the sign of the result is the constant is negative. 1409 // Correct the sign of the result is the constant is negative.
1410 if (constant < 0) __ rsb(result, result, Operand(0)); 1410 if (constant < 0) __ rsb(result, result, Operand::Zero());
1411 1411
1412 } else { 1412 } else {
1413 // Generate standard code. 1413 // Generate standard code.
1414 __ mov(ip, Operand(constant)); 1414 __ mov(ip, Operand(constant));
1415 __ mul(result, left, ip); 1415 __ mul(result, left, ip);
1416 } 1416 }
1417 } 1417 }
1418 1418
1419 } else { 1419 } else {
1420 Register right = EmitLoadRegister(right_op, scratch); 1420 Register right = EmitLoadRegister(right_op, scratch);
1421 if (bailout_on_minus_zero) { 1421 if (bailout_on_minus_zero) {
1422 __ orr(ToRegister(instr->temp()), left, right); 1422 __ orr(ToRegister(instr->temp()), left, right);
1423 } 1423 }
1424 1424
1425 if (can_overflow) { 1425 if (can_overflow) {
1426 // scratch:result = left * right. 1426 // scratch:result = left * right.
1427 __ smull(result, scratch, left, right); 1427 __ smull(result, scratch, left, right);
1428 __ cmp(scratch, Operand(result, ASR, 31)); 1428 __ cmp(scratch, Operand(result, ASR, 31));
1429 DeoptimizeIf(ne, instr->environment()); 1429 DeoptimizeIf(ne, instr->environment());
1430 } else { 1430 } else {
1431 __ mul(result, left, right); 1431 __ mul(result, left, right);
1432 } 1432 }
1433 1433
1434 if (bailout_on_minus_zero) { 1434 if (bailout_on_minus_zero) {
1435 // Bail out if the result is supposed to be negative zero. 1435 // Bail out if the result is supposed to be negative zero.
1436 Label done; 1436 Label done;
1437 __ cmp(result, Operand(0)); 1437 __ cmp(result, Operand::Zero());
1438 __ b(ne, &done); 1438 __ b(ne, &done);
1439 __ cmp(ToRegister(instr->temp()), Operand(0)); 1439 __ cmp(ToRegister(instr->temp()), Operand::Zero());
1440 DeoptimizeIf(mi, instr->environment()); 1440 DeoptimizeIf(mi, instr->environment());
1441 __ bind(&done); 1441 __ bind(&done);
1442 } 1442 }
1443 } 1443 }
1444 } 1444 }
1445 1445
1446 1446
1447 void LCodeGen::DoBitI(LBitI* instr) { 1447 void LCodeGen::DoBitI(LBitI* instr) {
1448 LOperand* left_op = instr->left(); 1448 LOperand* left_op = instr->left();
1449 LOperand* right_op = instr->right(); 1449 LOperand* right_op = instr->right();
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after
1866 } 1866 }
1867 1867
1868 1868
1869 void LCodeGen::DoBranch(LBranch* instr) { 1869 void LCodeGen::DoBranch(LBranch* instr) {
1870 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1870 int true_block = chunk_->LookupDestination(instr->true_block_id());
1871 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1871 int false_block = chunk_->LookupDestination(instr->false_block_id());
1872 1872
1873 Representation r = instr->hydrogen()->value()->representation(); 1873 Representation r = instr->hydrogen()->value()->representation();
1874 if (r.IsInteger32()) { 1874 if (r.IsInteger32()) {
1875 Register reg = ToRegister(instr->value()); 1875 Register reg = ToRegister(instr->value());
1876 __ cmp(reg, Operand(0)); 1876 __ cmp(reg, Operand::Zero());
1877 EmitBranch(true_block, false_block, ne); 1877 EmitBranch(true_block, false_block, ne);
1878 } else if (r.IsDouble()) { 1878 } else if (r.IsDouble()) {
1879 DoubleRegister reg = ToDoubleRegister(instr->value()); 1879 DoubleRegister reg = ToDoubleRegister(instr->value());
1880 Register scratch = scratch0(); 1880 Register scratch = scratch0();
1881 1881
1882 // Test the double value. Zero and NaN are false. 1882 // Test the double value. Zero and NaN are false.
1883 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); 1883 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1884 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); 1884 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1885 EmitBranch(true_block, false_block, eq); 1885 EmitBranch(true_block, false_block, eq);
1886 } else { 1886 } else {
1887 ASSERT(r.IsTagged()); 1887 ASSERT(r.IsTagged());
1888 Register reg = ToRegister(instr->value()); 1888 Register reg = ToRegister(instr->value());
1889 HType type = instr->hydrogen()->value()->type(); 1889 HType type = instr->hydrogen()->value()->type();
1890 if (type.IsBoolean()) { 1890 if (type.IsBoolean()) {
1891 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 1891 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1892 EmitBranch(true_block, false_block, eq); 1892 EmitBranch(true_block, false_block, eq);
1893 } else if (type.IsSmi()) { 1893 } else if (type.IsSmi()) {
1894 __ cmp(reg, Operand(0)); 1894 __ cmp(reg, Operand::Zero());
1895 EmitBranch(true_block, false_block, ne); 1895 EmitBranch(true_block, false_block, ne);
1896 } else { 1896 } else {
1897 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1897 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1898 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1898 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1899 1899
1900 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 1900 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1901 // Avoid deopts in the case where we've never executed this path before. 1901 // Avoid deopts in the case where we've never executed this path before.
1902 if (expected.IsEmpty()) expected = ToBooleanStub::all_types(); 1902 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1903 1903
1904 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 1904 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1905 // undefined -> false. 1905 // undefined -> false.
1906 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 1906 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1907 __ b(eq, false_label); 1907 __ b(eq, false_label);
1908 } 1908 }
1909 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 1909 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1910 // Boolean -> its value. 1910 // Boolean -> its value.
1911 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 1911 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1912 __ b(eq, true_label); 1912 __ b(eq, true_label);
1913 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 1913 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1914 __ b(eq, false_label); 1914 __ b(eq, false_label);
1915 } 1915 }
1916 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 1916 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1917 // 'null' -> false. 1917 // 'null' -> false.
1918 __ CompareRoot(reg, Heap::kNullValueRootIndex); 1918 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1919 __ b(eq, false_label); 1919 __ b(eq, false_label);
1920 } 1920 }
1921 1921
1922 if (expected.Contains(ToBooleanStub::SMI)) { 1922 if (expected.Contains(ToBooleanStub::SMI)) {
1923 // Smis: 0 -> false, all other -> true. 1923 // Smis: 0 -> false, all other -> true.
1924 __ cmp(reg, Operand(0)); 1924 __ cmp(reg, Operand::Zero());
1925 __ b(eq, false_label); 1925 __ b(eq, false_label);
1926 __ JumpIfSmi(reg, true_label); 1926 __ JumpIfSmi(reg, true_label);
1927 } else if (expected.NeedsMap()) { 1927 } else if (expected.NeedsMap()) {
1928 // If we need a map later and have a Smi -> deopt. 1928 // If we need a map later and have a Smi -> deopt.
1929 __ tst(reg, Operand(kSmiTagMask)); 1929 __ tst(reg, Operand(kSmiTagMask));
1930 DeoptimizeIf(eq, instr->environment()); 1930 DeoptimizeIf(eq, instr->environment());
1931 } 1931 }
1932 1932
1933 const Register map = scratch0(); 1933 const Register map = scratch0();
1934 if (expected.NeedsMap()) { 1934 if (expected.NeedsMap()) {
(...skipping 12 matching lines...) Expand all
1947 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 1947 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1948 __ b(ge, true_label); 1948 __ b(ge, true_label);
1949 } 1949 }
1950 1950
1951 if (expected.Contains(ToBooleanStub::STRING)) { 1951 if (expected.Contains(ToBooleanStub::STRING)) {
1952 // String value -> false iff empty. 1952 // String value -> false iff empty.
1953 Label not_string; 1953 Label not_string;
1954 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 1954 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1955 __ b(ge, &not_string); 1955 __ b(ge, &not_string);
1956 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 1956 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
1957 __ cmp(ip, Operand(0)); 1957 __ cmp(ip, Operand::Zero());
1958 __ b(ne, true_label); 1958 __ b(ne, true_label);
1959 __ b(false_label); 1959 __ b(false_label);
1960 __ bind(&not_string); 1960 __ bind(&not_string);
1961 } 1961 }
1962 1962
1963 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 1963 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1964 // heap number -> false iff +0, -0, or NaN. 1964 // heap number -> false iff +0, -0, or NaN.
1965 DoubleRegister dbl_scratch = double_scratch0(); 1965 DoubleRegister dbl_scratch = double_scratch0();
1966 Label not_heap_number; 1966 Label not_heap_number;
1967 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 1967 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
2240 } 2240 }
2241 2241
2242 2242
2243 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2243 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2244 Token::Value op = instr->op(); 2244 Token::Value op = instr->op();
2245 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2245 int true_block = chunk_->LookupDestination(instr->true_block_id());
2246 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2246 int false_block = chunk_->LookupDestination(instr->false_block_id());
2247 2247
2248 Handle<Code> ic = CompareIC::GetUninitialized(op); 2248 Handle<Code> ic = CompareIC::GetUninitialized(op);
2249 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2249 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2250 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. 2250
2251 // This instruction also signals no smi code inlined.
2252 __ cmp(r0, Operand::Zero());
2251 2253
2252 Condition condition = ComputeCompareCondition(op); 2254 Condition condition = ComputeCompareCondition(op);
2253 2255
2254 EmitBranch(true_block, false_block, condition); 2256 EmitBranch(true_block, false_block, condition);
2255 } 2257 }
2256 2258
2257 2259
2258 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2260 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2259 InstanceType from = instr->from(); 2261 InstanceType from = instr->from();
2260 InstanceType to = instr->to(); 2262 InstanceType to = instr->to();
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
2414 } 2416 }
2415 2417
2416 2418
2417 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2419 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2418 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. 2420 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2419 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. 2421 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2420 2422
2421 InstanceofStub stub(InstanceofStub::kArgsInRegisters); 2423 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2422 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2424 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2423 2425
2424 __ cmp(r0, Operand(0)); 2426 __ cmp(r0, Operand::Zero());
2425 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); 2427 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2426 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); 2428 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2427 } 2429 }
2428 2430
2429 2431
2430 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2432 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2431 class DeferredInstanceOfKnownGlobal: public LDeferredCode { 2433 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2432 public: 2434 public:
2433 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2435 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2434 LInstanceOfKnownGlobal* instr) 2436 LInstanceOfKnownGlobal* instr)
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
2554 // restore all registers. 2556 // restore all registers.
2555 __ StoreToSafepointRegisterSlot(result, result); 2557 __ StoreToSafepointRegisterSlot(result, result);
2556 } 2558 }
2557 2559
2558 2560
2559 void LCodeGen::DoCmpT(LCmpT* instr) { 2561 void LCodeGen::DoCmpT(LCmpT* instr) {
2560 Token::Value op = instr->op(); 2562 Token::Value op = instr->op();
2561 2563
2562 Handle<Code> ic = CompareIC::GetUninitialized(op); 2564 Handle<Code> ic = CompareIC::GetUninitialized(op);
2563 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2565 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2564 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. 2566
2567 // This instruction also signals no smi code inlined.
2568 __ cmp(r0, Operand::Zero());
2565 2569
2566 Condition condition = ComputeCompareCondition(op); 2570 Condition condition = ComputeCompareCondition(op);
2567 __ LoadRoot(ToRegister(instr->result()), 2571 __ LoadRoot(ToRegister(instr->result()),
2568 Heap::kTrueValueRootIndex, 2572 Heap::kTrueValueRootIndex,
2569 condition); 2573 condition);
2570 __ LoadRoot(ToRegister(instr->result()), 2574 __ LoadRoot(ToRegister(instr->result()),
2571 Heap::kFalseValueRootIndex, 2575 Heap::kFalseValueRootIndex,
2572 NegateCondition(condition)); 2576 NegateCondition(condition));
2573 } 2577 }
2574 2578
(...skipping 658 matching lines...) Expand 10 before | Expand all | Expand 10 after
3233 // number of arguments. 3237 // number of arguments.
3234 __ push(receiver); 3238 __ push(receiver);
3235 __ mov(receiver, length); 3239 __ mov(receiver, length);
3236 // The arguments are at a one pointer size offset from elements. 3240 // The arguments are at a one pointer size offset from elements.
3237 __ add(elements, elements, Operand(1 * kPointerSize)); 3241 __ add(elements, elements, Operand(1 * kPointerSize));
3238 3242
3239 // Loop through the arguments pushing them onto the execution 3243 // Loop through the arguments pushing them onto the execution
3240 // stack. 3244 // stack.
3241 Label invoke, loop; 3245 Label invoke, loop;
3242 // length is a small non-negative integer, due to the test above. 3246 // length is a small non-negative integer, due to the test above.
3243 __ cmp(length, Operand(0)); 3247 __ cmp(length, Operand::Zero());
3244 __ b(eq, &invoke); 3248 __ b(eq, &invoke);
3245 __ bind(&loop); 3249 __ bind(&loop);
3246 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 3250 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3247 __ push(scratch); 3251 __ push(scratch);
3248 __ sub(length, length, Operand(1), SetCC); 3252 __ sub(length, length, Operand(1), SetCC);
3249 __ b(ne, &loop); 3253 __ b(ne, &loop);
3250 3254
3251 __ bind(&invoke); 3255 __ bind(&invoke);
3252 ASSERT(instr->HasPointerMap()); 3256 ASSERT(instr->HasPointerMap());
3253 LPointerMap* pointers = instr->pointer_map(); 3257 LPointerMap* pointers = instr->pointer_map();
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
3437 __ StoreToSafepointRegisterSlot(tmp1, result); 3441 __ StoreToSafepointRegisterSlot(tmp1, result);
3438 } 3442 }
3439 3443
3440 __ bind(&done); 3444 __ bind(&done);
3441 } 3445 }
3442 3446
3443 3447
3444 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { 3448 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3445 Register input = ToRegister(instr->value()); 3449 Register input = ToRegister(instr->value());
3446 Register result = ToRegister(instr->result()); 3450 Register result = ToRegister(instr->result());
3447 __ cmp(input, Operand(0)); 3451 __ cmp(input, Operand::Zero());
3448 __ Move(result, input, pl); 3452 __ Move(result, input, pl);
3449 // We can make rsb conditional because the previous cmp instruction 3453 // We can make rsb conditional because the previous cmp instruction
3450 // will clear the V (overflow) flag and rsb won't set this flag 3454 // will clear the V (overflow) flag and rsb won't set this flag
3451 // if input is positive. 3455 // if input is positive.
3452 __ rsb(result, input, Operand(0), SetCC, mi); 3456 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3453 // Deoptimize on overflow. 3457 // Deoptimize on overflow.
3454 DeoptimizeIf(vs, instr->environment()); 3458 DeoptimizeIf(vs, instr->environment());
3455 } 3459 }
3456 3460
3457 3461
3458 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { 3462 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3459 // Class for deferred case. 3463 // Class for deferred case.
3460 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 3464 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3461 public: 3465 public:
3462 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3466 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
3499 __ EmitVFPTruncate(kRoundToMinusInf, 3503 __ EmitVFPTruncate(kRoundToMinusInf,
3500 result, 3504 result,
3501 input, 3505 input,
3502 scratch, 3506 scratch,
3503 double_scratch0()); 3507 double_scratch0());
3504 DeoptimizeIf(ne, instr->environment()); 3508 DeoptimizeIf(ne, instr->environment());
3505 3509
3506 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3510 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3507 // Test for -0. 3511 // Test for -0.
3508 Label done; 3512 Label done;
3509 __ cmp(result, Operand(0)); 3513 __ cmp(result, Operand::Zero());
3510 __ b(ne, &done); 3514 __ b(ne, &done);
3511 __ vmov(scratch, input.high()); 3515 __ vmov(scratch, input.high());
3512 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3516 __ tst(scratch, Operand(HeapNumber::kSignMask));
3513 DeoptimizeIf(ne, instr->environment()); 3517 DeoptimizeIf(ne, instr->environment());
3514 __ bind(&done); 3518 __ bind(&done);
3515 } 3519 }
3516 } 3520 }
3517 3521
3518 3522
3519 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3523 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3520 DoubleRegister input = ToDoubleRegister(instr->value()); 3524 DoubleRegister input = ToDoubleRegister(instr->value());
3521 Register result = ToRegister(instr->result()); 3525 Register result = ToRegister(instr->result());
3522 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3526 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3523 Register scratch = scratch0(); 3527 Register scratch = scratch0();
3524 Label done, check_sign_on_zero; 3528 Label done, check_sign_on_zero;
3525 3529
3526 // Extract exponent bits. 3530 // Extract exponent bits.
3527 __ vmov(result, input.high()); 3531 __ vmov(result, input.high());
3528 __ ubfx(scratch, 3532 __ ubfx(scratch,
3529 result, 3533 result,
3530 HeapNumber::kExponentShift, 3534 HeapNumber::kExponentShift,
3531 HeapNumber::kExponentBits); 3535 HeapNumber::kExponentBits);
3532 3536
3533 // If the number is in ]-0.5, +0.5[, the result is +/- 0. 3537 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3534 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2)); 3538 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3535 __ mov(result, Operand(0), LeaveCC, le); 3539 __ mov(result, Operand::Zero(), LeaveCC, le);
3536 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3540 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3537 __ b(le, &check_sign_on_zero); 3541 __ b(le, &check_sign_on_zero);
3538 } else { 3542 } else {
3539 __ b(le, &done); 3543 __ b(le, &done);
3540 } 3544 }
3541 3545
3542 // The following conversion will not work with numbers 3546 // The following conversion will not work with numbers
3543 // outside of ]-2^32, 2^32[. 3547 // outside of ]-2^32, 2^32[.
3544 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32)); 3548 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3545 DeoptimizeIf(ge, instr->environment()); 3549 DeoptimizeIf(ge, instr->environment());
3546 3550
3547 __ Vmov(double_scratch0(), 0.5, scratch); 3551 __ Vmov(double_scratch0(), 0.5, scratch);
3548 __ vadd(double_scratch0(), input, double_scratch0()); 3552 __ vadd(double_scratch0(), input, double_scratch0());
3549 3553
3550 // Save the original sign for later comparison. 3554 // Save the original sign for later comparison.
3551 __ and_(scratch, result, Operand(HeapNumber::kSignMask)); 3555 __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3552 3556
3553 // Check sign of the result: if the sign changed, the input 3557 // Check sign of the result: if the sign changed, the input
3554 // value was in ]0.5, 0[ and the result should be -0. 3558 // value was in ]0.5, 0[ and the result should be -0.
3555 __ vmov(result, double_scratch0().high()); 3559 __ vmov(result, double_scratch0().high());
3556 __ eor(result, result, Operand(scratch), SetCC); 3560 __ eor(result, result, Operand(scratch), SetCC);
3557 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3561 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3558 DeoptimizeIf(mi, instr->environment()); 3562 DeoptimizeIf(mi, instr->environment());
3559 } else { 3563 } else {
3560 __ mov(result, Operand(0), LeaveCC, mi); 3564 __ mov(result, Operand::Zero(), LeaveCC, mi);
3561 __ b(mi, &done); 3565 __ b(mi, &done);
3562 } 3566 }
3563 3567
3564 __ EmitVFPTruncate(kRoundToMinusInf, 3568 __ EmitVFPTruncate(kRoundToMinusInf,
3565 result, 3569 result,
3566 double_scratch0(), 3570 double_scratch0(),
3567 scratch, 3571 scratch,
3568 double_scratch1); 3572 double_scratch1);
3569 DeoptimizeIf(ne, instr->environment()); 3573 DeoptimizeIf(ne, instr->environment());
3570 3574
3571 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3575 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3572 // Test for -0. 3576 // Test for -0.
3573 __ cmp(result, Operand(0)); 3577 __ cmp(result, Operand::Zero());
3574 __ b(ne, &done); 3578 __ b(ne, &done);
3575 __ bind(&check_sign_on_zero); 3579 __ bind(&check_sign_on_zero);
3576 __ vmov(scratch, input.high()); 3580 __ vmov(scratch, input.high());
3577 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3581 __ tst(scratch, Operand(HeapNumber::kSignMask));
3578 DeoptimizeIf(ne, instr->environment()); 3582 DeoptimizeIf(ne, instr->environment());
3579 } 3583 }
3580 __ bind(&done); 3584 __ bind(&done);
3581 } 3585 }
3582 3586
3583 3587
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3663 STATIC_ASSERT(kPointerSize == kSeedSize); 3667 STATIC_ASSERT(kPointerSize == kSeedSize);
3664 3668
3665 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); 3669 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
3666 static const int kRandomSeedOffset = 3670 static const int kRandomSeedOffset =
3667 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; 3671 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3668 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset)); 3672 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3669 // r2: FixedArray of the native context's random seeds 3673 // r2: FixedArray of the native context's random seeds
3670 3674
3671 // Load state[0]. 3675 // Load state[0].
3672 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); 3676 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3673 __ cmp(r1, Operand(0)); 3677 __ cmp(r1, Operand::Zero());
3674 __ b(eq, deferred->entry()); 3678 __ b(eq, deferred->entry());
3675 // Load state[1]. 3679 // Load state[1].
3676 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); 3680 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3677 // r1: state[0]. 3681 // r1: state[0].
3678 // r0: state[1]. 3682 // r0: state[1].
3679 3683
3680 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) 3684 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3681 __ and_(r3, r1, Operand(0xFFFF)); 3685 __ and_(r3, r1, Operand(0xFFFF));
3682 __ mov(r4, Operand(18273)); 3686 __ mov(r4, Operand(18273));
3683 __ mul(r3, r3, r4); 3687 __ mul(r3, r3, r4);
(...skipping 14 matching lines...) Expand all
3698 __ add(r0, r0, Operand(r1, LSL, 14)); 3702 __ add(r0, r0, Operand(r1, LSL, 14));
3699 3703
3700 __ bind(deferred->exit()); 3704 __ bind(deferred->exit());
3701 // 0x41300000 is the top half of 1.0 x 2^20 as a double. 3705 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3702 // Create this constant using mov/orr to avoid PC relative load. 3706 // Create this constant using mov/orr to avoid PC relative load.
3703 __ mov(r1, Operand(0x41000000)); 3707 __ mov(r1, Operand(0x41000000));
3704 __ orr(r1, r1, Operand(0x300000)); 3708 __ orr(r1, r1, Operand(0x300000));
3705 // Move 0x41300000xxxxxxxx (x = random bits) to VFP. 3709 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3706 __ vmov(d7, r0, r1); 3710 __ vmov(d7, r0, r1);
3707 // Move 0x4130000000000000 to VFP. 3711 // Move 0x4130000000000000 to VFP.
3708 __ mov(r0, Operand(0, RelocInfo::NONE)); 3712 __ mov(r0, Operand::Zero());
3709 __ vmov(d8, r0, r1); 3713 __ vmov(d8, r0, r1);
3710 // Subtract and store the result in the heap number. 3714 // Subtract and store the result in the heap number.
3711 __ vsub(d7, d7, d8); 3715 __ vsub(d7, d7, d8);
3712 } 3716 }
3713 3717
3714 3718
3715 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3719 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3716 __ PrepareCallCFunction(1, scratch0()); 3720 __ PrepareCallCFunction(1, scratch0());
3717 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3721 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3718 // Return value is in r0. 3722 // Return value is in r0.
(...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after
4246 4250
4247 4251
4248 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4252 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4249 Register string = ToRegister(instr->string()); 4253 Register string = ToRegister(instr->string());
4250 Register result = ToRegister(instr->result()); 4254 Register result = ToRegister(instr->result());
4251 Register scratch = scratch0(); 4255 Register scratch = scratch0();
4252 4256
4253 // TODO(3095996): Get rid of this. For now, we need to make the 4257 // TODO(3095996): Get rid of this. For now, we need to make the
4254 // result register contain a valid pointer because it is already 4258 // result register contain a valid pointer because it is already
4255 // contained in the register pointer map. 4259 // contained in the register pointer map.
4256 __ mov(result, Operand(0)); 4260 __ mov(result, Operand::Zero());
4257 4261
4258 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4262 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4259 __ push(string); 4263 __ push(string);
4260 // Push the index as a smi. This is safe because of the checks in 4264 // Push the index as a smi. This is safe because of the checks in
4261 // DoStringCharCodeAt above. 4265 // DoStringCharCodeAt above.
4262 if (instr->index()->IsConstantOperand()) { 4266 if (instr->index()->IsConstantOperand()) {
4263 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4267 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4264 __ mov(scratch, Operand(Smi::FromInt(const_index))); 4268 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4265 __ push(scratch); 4269 __ push(scratch);
4266 } else { 4270 } else {
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
4306 } 4310 }
4307 4311
4308 4312
4309 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4313 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4310 Register char_code = ToRegister(instr->char_code()); 4314 Register char_code = ToRegister(instr->char_code());
4311 Register result = ToRegister(instr->result()); 4315 Register result = ToRegister(instr->result());
4312 4316
4313 // TODO(3095996): Get rid of this. For now, we need to make the 4317 // TODO(3095996): Get rid of this. For now, we need to make the
4314 // result register contain a valid pointer because it is already 4318 // result register contain a valid pointer because it is already
4315 // contained in the register pointer map. 4319 // contained in the register pointer map.
4316 __ mov(result, Operand(0)); 4320 __ mov(result, Operand::Zero());
4317 4321
4318 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4322 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4319 __ SmiTag(char_code); 4323 __ SmiTag(char_code);
4320 __ push(char_code); 4324 __ push(char_code);
4321 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); 4325 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4322 __ StoreToSafepointRegisterSlot(r0, result); 4326 __ StoreToSafepointRegisterSlot(r0, result);
4323 } 4327 }
4324 4328
4325 4329
4326 void LCodeGen::DoStringLength(LStringLength* instr) { 4330 void LCodeGen::DoStringLength(LStringLength* instr) {
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
4443 __ Move(dst, r5); 4447 __ Move(dst, r5);
4444 __ b(&done); 4448 __ b(&done);
4445 } 4449 }
4446 4450
4447 // Slow case: Call the runtime system to do the number allocation. 4451 // Slow case: Call the runtime system to do the number allocation.
4448 __ bind(&slow); 4452 __ bind(&slow);
4449 4453
4450 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4454 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4451 // register is stored, as this register is in the pointer map, but contains an 4455 // register is stored, as this register is in the pointer map, but contains an
4452 // integer value. 4456 // integer value.
4453 __ mov(ip, Operand(0)); 4457 __ mov(ip, Operand::Zero());
4454 __ StoreToSafepointRegisterSlot(ip, dst); 4458 __ StoreToSafepointRegisterSlot(ip, dst);
4455 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4459 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4456 __ Move(dst, r0); 4460 __ Move(dst, r0);
4457 __ sub(dst, dst, Operand(kHeapObjectTag)); 4461 __ sub(dst, dst, Operand(kHeapObjectTag));
4458 4462
4459 // Done. Put the value in dbl_scratch into the value of the allocated heap 4463 // Done. Put the value in dbl_scratch into the value of the allocated heap
4460 // number. 4464 // number.
4461 __ bind(&done); 4465 __ bind(&done);
4462 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4466 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4463 __ add(dst, dst, Operand(kHeapObjectTag)); 4467 __ add(dst, dst, Operand(kHeapObjectTag));
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
4496 // Now that we have finished with the object's real address tag it 4500 // Now that we have finished with the object's real address tag it
4497 __ add(reg, reg, Operand(kHeapObjectTag)); 4501 __ add(reg, reg, Operand(kHeapObjectTag));
4498 } 4502 }
4499 4503
4500 4504
4501 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4505 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4502 // TODO(3095996): Get rid of this. For now, we need to make the 4506 // TODO(3095996): Get rid of this. For now, we need to make the
4503 // result register contain a valid pointer because it is already 4507 // result register contain a valid pointer because it is already
4504 // contained in the register pointer map. 4508 // contained in the register pointer map.
4505 Register reg = ToRegister(instr->result()); 4509 Register reg = ToRegister(instr->result());
4506 __ mov(reg, Operand(0)); 4510 __ mov(reg, Operand::Zero());
4507 4511
4508 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4512 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4509 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4513 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4510 __ sub(r0, r0, Operand(kHeapObjectTag)); 4514 __ sub(r0, r0, Operand(kHeapObjectTag));
4511 __ StoreToSafepointRegisterSlot(r0, reg); 4515 __ StoreToSafepointRegisterSlot(r0, reg);
4512 } 4516 }
4513 4517
4514 4518
4515 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4519 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4516 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 4520 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4566 __ vldr(result_reg, ip, HeapNumber::kValueOffset); 4570 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4567 __ jmp(&done); 4571 __ jmp(&done);
4568 4572
4569 __ bind(&heap_number); 4573 __ bind(&heap_number);
4570 } 4574 }
4571 // Heap number to double register conversion. 4575 // Heap number to double register conversion.
4572 __ sub(ip, input_reg, Operand(kHeapObjectTag)); 4576 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4573 __ vldr(result_reg, ip, HeapNumber::kValueOffset); 4577 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4574 if (deoptimize_on_minus_zero) { 4578 if (deoptimize_on_minus_zero) {
4575 __ vmov(ip, result_reg.low()); 4579 __ vmov(ip, result_reg.low());
4576 __ cmp(ip, Operand(0)); 4580 __ cmp(ip, Operand::Zero());
4577 __ b(ne, &done); 4581 __ b(ne, &done);
4578 __ vmov(ip, result_reg.high()); 4582 __ vmov(ip, result_reg.high());
4579 __ cmp(ip, Operand(HeapNumber::kSignMask)); 4583 __ cmp(ip, Operand(HeapNumber::kSignMask));
4580 DeoptimizeIf(eq, env); 4584 DeoptimizeIf(eq, env);
4581 } 4585 }
4582 __ jmp(&done); 4586 __ jmp(&done);
4583 4587
4584 // Smi to double register conversion 4588 // Smi to double register conversion
4585 __ bind(&load_smi); 4589 __ bind(&load_smi);
4586 // scratch: untagged value of input_reg 4590 // scratch: untagged value of input_reg
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
4621 !scratch3.is(scratch2)); 4625 !scratch3.is(scratch2));
4622 // Performs a truncating conversion of a floating point number as used by 4626 // Performs a truncating conversion of a floating point number as used by
4623 // the JS bitwise operations. 4627 // the JS bitwise operations.
4624 Label heap_number; 4628 Label heap_number;
4625 __ b(eq, &heap_number); 4629 __ b(eq, &heap_number);
4626 // Check for undefined. Undefined is converted to zero for truncating 4630 // Check for undefined. Undefined is converted to zero for truncating
4627 // conversions. 4631 // conversions.
4628 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4632 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4629 __ cmp(input_reg, Operand(ip)); 4633 __ cmp(input_reg, Operand(ip));
4630 DeoptimizeIf(ne, instr->environment()); 4634 DeoptimizeIf(ne, instr->environment());
4631 __ mov(input_reg, Operand(0)); 4635 __ mov(input_reg, Operand::Zero());
4632 __ b(&done); 4636 __ b(&done);
4633 4637
4634 __ bind(&heap_number); 4638 __ bind(&heap_number);
4635 __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); 4639 __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
4636 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); 4640 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
4637 4641
4638 __ EmitECMATruncate(input_reg, 4642 __ EmitECMATruncate(input_reg,
4639 double_scratch2, 4643 double_scratch2,
4640 single_scratch, 4644 single_scratch,
4641 scratch1, 4645 scratch1,
4642 scratch2, 4646 scratch2,
4643 scratch3); 4647 scratch3);
4644 4648
4645 } else { 4649 } else {
4646 CpuFeatures::Scope scope(VFP3); 4650 CpuFeatures::Scope scope(VFP3);
4647 // Deoptimize if we don't have a heap number. 4651 // Deoptimize if we don't have a heap number.
4648 DeoptimizeIf(ne, instr->environment()); 4652 DeoptimizeIf(ne, instr->environment());
4649 4653
4650 __ sub(ip, input_reg, Operand(kHeapObjectTag)); 4654 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4651 __ vldr(double_scratch, ip, HeapNumber::kValueOffset); 4655 __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4652 __ EmitVFPTruncate(kRoundToZero, 4656 __ EmitVFPTruncate(kRoundToZero,
4653 input_reg, 4657 input_reg,
4654 double_scratch, 4658 double_scratch,
4655 scratch1, 4659 scratch1,
4656 double_scratch2, 4660 double_scratch2,
4657 kCheckForInexactConversion); 4661 kCheckForInexactConversion);
4658 DeoptimizeIf(ne, instr->environment()); 4662 DeoptimizeIf(ne, instr->environment());
4659 4663
4660 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4664 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4661 __ cmp(input_reg, Operand(0)); 4665 __ cmp(input_reg, Operand::Zero());
4662 __ b(ne, &done); 4666 __ b(ne, &done);
4663 __ vmov(scratch1, double_scratch.high()); 4667 __ vmov(scratch1, double_scratch.high());
4664 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 4668 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4665 DeoptimizeIf(ne, instr->environment()); 4669 DeoptimizeIf(ne, instr->environment());
4666 } 4670 }
4667 } 4671 }
4668 __ bind(&done); 4672 __ bind(&done);
4669 } 4673 }
4670 4674
4671 4675
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
4880 4884
4881 // Check for heap number 4885 // Check for heap number
4882 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4886 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4883 __ cmp(scratch, Operand(factory()->heap_number_map())); 4887 __ cmp(scratch, Operand(factory()->heap_number_map()));
4884 __ b(eq, &heap_number); 4888 __ b(eq, &heap_number);
4885 4889
4886 // Check for undefined. Undefined is converted to zero for clamping 4890 // Check for undefined. Undefined is converted to zero for clamping
4887 // conversions. 4891 // conversions.
4888 __ cmp(input_reg, Operand(factory()->undefined_value())); 4892 __ cmp(input_reg, Operand(factory()->undefined_value()));
4889 DeoptimizeIf(ne, instr->environment()); 4893 DeoptimizeIf(ne, instr->environment());
4890 __ mov(result_reg, Operand(0)); 4894 __ mov(result_reg, Operand::Zero());
4891 __ jmp(&done); 4895 __ jmp(&done);
4892 4896
4893 // Heap number 4897 // Heap number
4894 __ bind(&heap_number); 4898 __ bind(&heap_number);
4895 __ vldr(double_scratch0(), FieldMemOperand(input_reg, 4899 __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4896 HeapNumber::kValueOffset)); 4900 HeapNumber::kValueOffset));
4897 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); 4901 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4898 __ jmp(&done); 4902 __ jmp(&done);
4899 4903
4900 // smi 4904 // smi
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
5000 5004
5001 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { 5005 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
5002 Register result = ToRegister(instr->result()); 5006 Register result = ToRegister(instr->result());
5003 Handle<JSFunction> constructor = instr->hydrogen()->constructor(); 5007 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
5004 Handle<Map> initial_map(constructor->initial_map()); 5008 Handle<Map> initial_map(constructor->initial_map());
5005 int instance_size = initial_map->instance_size(); 5009 int instance_size = initial_map->instance_size();
5006 5010
5007 // TODO(3095996): Get rid of this. For now, we need to make the 5011 // TODO(3095996): Get rid of this. For now, we need to make the
5008 // result register contain a valid pointer because it is already 5012 // result register contain a valid pointer because it is already
5009 // contained in the register pointer map. 5013 // contained in the register pointer map.
5010 __ mov(result, Operand(0)); 5014 __ mov(result, Operand::Zero());
5011 5015
5012 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5016 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5013 __ mov(r0, Operand(Smi::FromInt(instance_size))); 5017 __ mov(r0, Operand(Smi::FromInt(instance_size)));
5014 __ push(r0); 5018 __ push(r0);
5015 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); 5019 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5016 __ StoreToSafepointRegisterSlot(r0, result); 5020 __ StoreToSafepointRegisterSlot(r0, result);
5017 } 5021 }
5018 5022
5019 5023
5020 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 5024 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
(...skipping 607 matching lines...) Expand 10 before | Expand all | Expand 10 after
5628 __ b(ne, &load_cache); 5632 __ b(ne, &load_cache);
5629 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 5633 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5630 __ jmp(&done); 5634 __ jmp(&done);
5631 5635
5632 __ bind(&load_cache); 5636 __ bind(&load_cache);
5633 __ LoadInstanceDescriptors(map, result, scratch); 5637 __ LoadInstanceDescriptors(map, result, scratch);
5634 __ ldr(result, 5638 __ ldr(result,
5635 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5639 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5636 __ ldr(result, 5640 __ ldr(result,
5637 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5641 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5638 __ cmp(result, Operand(0)); 5642 __ cmp(result, Operand::Zero());
5639 DeoptimizeIf(eq, instr->environment()); 5643 DeoptimizeIf(eq, instr->environment());
5640 5644
5641 __ bind(&done); 5645 __ bind(&done);
5642 } 5646 }
5643 5647
5644 5648
5645 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5649 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5646 Register object = ToRegister(instr->value()); 5650 Register object = ToRegister(instr->value());
5647 Register map = ToRegister(instr->map()); 5651 Register map = ToRegister(instr->map());
5648 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5652 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5649 __ cmp(map, scratch0()); 5653 __ cmp(map, scratch0());
5650 DeoptimizeIf(ne, instr->environment()); 5654 DeoptimizeIf(ne, instr->environment());
5651 } 5655 }
5652 5656
5653 5657
5654 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5658 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5655 Register object = ToRegister(instr->object()); 5659 Register object = ToRegister(instr->object());
5656 Register index = ToRegister(instr->index()); 5660 Register index = ToRegister(instr->index());
5657 Register result = ToRegister(instr->result()); 5661 Register result = ToRegister(instr->result());
5658 Register scratch = scratch0(); 5662 Register scratch = scratch0();
5659 5663
5660 Label out_of_object, done; 5664 Label out_of_object, done;
5661 __ cmp(index, Operand(0)); 5665 __ cmp(index, Operand::Zero());
5662 __ b(lt, &out_of_object); 5666 __ b(lt, &out_of_object);
5663 5667
5664 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); 5668 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5665 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 5669 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5666 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 5670 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5667 5671
5668 __ b(&done); 5672 __ b(&done);
5669 5673
5670 __ bind(&out_of_object); 5674 __ bind(&out_of_object);
5671 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5675 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5672 // Index is equal to negated out of object property index plus 1. 5676 // Index is equal to negated out of object property index plus 1.
5673 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 5677 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5674 __ ldr(result, FieldMemOperand(scratch, 5678 __ ldr(result, FieldMemOperand(scratch,
5675 FixedArray::kHeaderSize - kPointerSize)); 5679 FixedArray::kHeaderSize - kPointerSize));
5676 __ bind(&done); 5680 __ bind(&done);
5677 } 5681 }
5678 5682
5679 5683
5680 #undef __ 5684 #undef __
5681 5685
5682 } } // namespace v8::internal 5686 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698