Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(106)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 11745030: ARM: generate integer zero in a uniform manner. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.cc ('k') | src/arm/regexp-macro-assembler-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
297 vmov(dst, src); 297 vmov(dst, src);
298 } 298 }
299 } 299 }
300 300
301 301
302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, 302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
303 Condition cond) { 303 Condition cond) {
304 if (!src2.is_reg() && 304 if (!src2.is_reg() &&
305 !src2.must_output_reloc_info(this) && 305 !src2.must_output_reloc_info(this) &&
306 src2.immediate() == 0) { 306 src2.immediate() == 0) {
307 mov(dst, Operand(0, RelocInfo::NONE32), LeaveCC, cond); 307 mov(dst, Operand::Zero(), LeaveCC, cond);
308 } else if (!src2.is_single_instruction(this) && 308 } else if (!src2.is_single_instruction(this) &&
309 !src2.must_output_reloc_info(this) && 309 !src2.must_output_reloc_info(this) &&
310 CpuFeatures::IsSupported(ARMv7) && 310 CpuFeatures::IsSupported(ARMv7) &&
311 IsPowerOf2(src2.immediate() + 1)) { 311 IsPowerOf2(src2.immediate() + 1)) {
312 ubfx(dst, src1, 0, 312 ubfx(dst, src1, 0,
313 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); 313 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
314 } else { 314 } else {
315 and_(dst, src1, src2, LeaveCC, cond); 315 and_(dst, src1, src2, LeaveCC, cond);
316 } 316 }
317 } 317 }
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 int satval = (1 << satpos) - 1; 403 int satval = (1 << satpos) - 1;
404 404
405 if (cond != al) { 405 if (cond != al) {
406 b(NegateCondition(cond), &done); // Skip saturate if !condition. 406 b(NegateCondition(cond), &done); // Skip saturate if !condition.
407 } 407 }
408 if (!(src.is_reg() && dst.is(src.rm()))) { 408 if (!(src.is_reg() && dst.is(src.rm()))) {
409 mov(dst, src); 409 mov(dst, src);
410 } 410 }
411 tst(dst, Operand(~satval)); 411 tst(dst, Operand(~satval));
412 b(eq, &done); 412 b(eq, &done);
413 mov(dst, Operand(0, RelocInfo::NONE32), LeaveCC, mi); // 0 if negative. 413 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
414 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. 414 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
415 bind(&done); 415 bind(&done);
416 } else { 416 } else {
417 usat(dst, satpos, src, cond); 417 usat(dst, satpos, src, cond);
418 } 418 }
419 } 419 }
420 420
421 421
422 void MacroAssembler::LoadRoot(Register destination, 422 void MacroAssembler::LoadRoot(Register destination,
423 Heap::RootListIndex index, 423 Heap::RootListIndex index,
(...skipping 433 matching lines...) Expand 10 before | Expand all | Expand 10 after
857 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { 857 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
858 // Set up the frame structure on the stack. 858 // Set up the frame structure on the stack.
859 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); 859 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
860 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); 860 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
861 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); 861 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
862 Push(lr, fp); 862 Push(lr, fp);
863 mov(fp, Operand(sp)); // Set up new frame pointer. 863 mov(fp, Operand(sp)); // Set up new frame pointer.
864 // Reserve room for saved entry sp and code object. 864 // Reserve room for saved entry sp and code object.
865 sub(sp, sp, Operand(2 * kPointerSize)); 865 sub(sp, sp, Operand(2 * kPointerSize));
866 if (emit_debug_code()) { 866 if (emit_debug_code()) {
867 mov(ip, Operand(0)); 867 mov(ip, Operand::Zero());
868 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); 868 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
869 } 869 }
870 mov(ip, Operand(CodeObject())); 870 mov(ip, Operand(CodeObject()));
871 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); 871 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
872 872
873 // Save the frame pointer and the context in top. 873 // Save the frame pointer and the context in top.
874 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 874 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
875 str(fp, MemOperand(ip)); 875 str(fp, MemOperand(ip));
876 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 876 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
877 str(cp, MemOperand(ip)); 877 str(cp, MemOperand(ip));
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
941 // Calculate the stack location of the saved doubles and restore them. 941 // Calculate the stack location of the saved doubles and restore them.
942 const int offset = 2 * kPointerSize; 942 const int offset = 2 * kPointerSize;
943 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); 943 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
944 DwVfpRegister first = d0; 944 DwVfpRegister first = d0;
945 DwVfpRegister last = 945 DwVfpRegister last =
946 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); 946 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
947 vldm(ia, r3, first, last); 947 vldm(ia, r3, first, last);
948 } 948 }
949 949
950 // Clear top frame. 950 // Clear top frame.
951 mov(r3, Operand(0, RelocInfo::NONE32)); 951 mov(r3, Operand::Zero());
952 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 952 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
953 str(r3, MemOperand(ip)); 953 str(r3, MemOperand(ip));
954 954
955 // Restore current context from top and clear it in debug mode. 955 // Restore current context from top and clear it in debug mode.
956 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 956 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
957 ldr(cp, MemOperand(ip)); 957 ldr(cp, MemOperand(ip));
958 #ifdef DEBUG 958 #ifdef DEBUG
959 str(r3, MemOperand(ip)); 959 str(r3, MemOperand(ip));
960 #endif 960 #endif
961 961
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
1211 1211
1212 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 1212 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1213 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1213 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1214 tst(scratch, Operand(kIsNotStringMask)); 1214 tst(scratch, Operand(kIsNotStringMask));
1215 b(ne, fail); 1215 b(ne, fail);
1216 } 1216 }
1217 1217
1218 1218
1219 #ifdef ENABLE_DEBUGGER_SUPPORT 1219 #ifdef ENABLE_DEBUGGER_SUPPORT
1220 void MacroAssembler::DebugBreak() { 1220 void MacroAssembler::DebugBreak() {
1221 mov(r0, Operand(0, RelocInfo::NONE32)); 1221 mov(r0, Operand::Zero());
1222 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); 1222 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1223 CEntryStub ces(1); 1223 CEntryStub ces(1);
1224 ASSERT(AllowThisStubCall(&ces)); 1224 ASSERT(AllowThisStubCall(&ces));
1225 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 1225 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1226 } 1226 }
1227 #endif 1227 #endif
1228 1228
1229 1229
1230 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 1230 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1231 int handler_index) { 1231 int handler_index) {
(...skipping 10 matching lines...) Expand all
1242 // Set up the code object (r5) and the state (r6) for pushing. 1242 // Set up the code object (r5) and the state (r6) for pushing.
1243 unsigned state = 1243 unsigned state =
1244 StackHandler::IndexField::encode(handler_index) | 1244 StackHandler::IndexField::encode(handler_index) |
1245 StackHandler::KindField::encode(kind); 1245 StackHandler::KindField::encode(kind);
1246 mov(r5, Operand(CodeObject())); 1246 mov(r5, Operand(CodeObject()));
1247 mov(r6, Operand(state)); 1247 mov(r6, Operand(state));
1248 1248
1249 // Push the frame pointer, context, state, and code object. 1249 // Push the frame pointer, context, state, and code object.
1250 if (kind == StackHandler::JS_ENTRY) { 1250 if (kind == StackHandler::JS_ENTRY) {
1251 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. 1251 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
1252 mov(ip, Operand(0, RelocInfo::NONE32)); // NULL frame pointer. 1252 mov(ip, Operand::Zero()); // NULL frame pointer.
1253 stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit()); 1253 stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1254 } else { 1254 } else {
1255 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); 1255 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1256 } 1256 }
1257 1257
1258 // Link the current handler as the next handler. 1258 // Link the current handler as the next handler.
1259 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 1259 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1260 ldr(r5, MemOperand(r6)); 1260 ldr(r5, MemOperand(r6));
1261 push(r5); 1261 push(r5);
1262 // Set this new handler as the current one. 1262 // Set this new handler as the current one.
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
1366 Label same_contexts; 1366 Label same_contexts;
1367 1367
1368 ASSERT(!holder_reg.is(scratch)); 1368 ASSERT(!holder_reg.is(scratch));
1369 ASSERT(!holder_reg.is(ip)); 1369 ASSERT(!holder_reg.is(ip));
1370 ASSERT(!scratch.is(ip)); 1370 ASSERT(!scratch.is(ip));
1371 1371
1372 // Load current lexical context from the stack frame. 1372 // Load current lexical context from the stack frame.
1373 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 1373 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1374 // In debug mode, make sure the lexical context is set. 1374 // In debug mode, make sure the lexical context is set.
1375 #ifdef DEBUG 1375 #ifdef DEBUG
1376 cmp(scratch, Operand(0, RelocInfo::NONE32)); 1376 cmp(scratch, Operand::Zero());
1377 Check(ne, "we should not have an empty lexical context"); 1377 Check(ne, "we should not have an empty lexical context");
1378 #endif 1378 #endif
1379 1379
1380 // Load the native context of the current context. 1380 // Load the native context of the current context.
1381 int offset = 1381 int offset =
1382 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; 1382 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1383 ldr(scratch, FieldMemOperand(scratch, offset)); 1383 ldr(scratch, FieldMemOperand(scratch, offset));
1384 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 1384 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1385 1385
1386 // Check the context is a native context. 1386 // Check the context is a native context.
(...skipping 631 matching lines...) Expand 10 before | Expand all | Expand 10 after
2018 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + 2018 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
2019 sizeof(kHoleNanLower32); 2019 sizeof(kHoleNanLower32);
2020 str(exponent_reg, FieldMemOperand(scratch1, offset)); 2020 str(exponent_reg, FieldMemOperand(scratch1, offset));
2021 jmp(&done); 2021 jmp(&done);
2022 2022
2023 bind(&maybe_nan); 2023 bind(&maybe_nan);
2024 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise 2024 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2025 // it's an Infinity, and the non-NaN code path applies. 2025 // it's an Infinity, and the non-NaN code path applies.
2026 b(gt, &is_nan); 2026 b(gt, &is_nan);
2027 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); 2027 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
2028 cmp(mantissa_reg, Operand(0)); 2028 cmp(mantissa_reg, Operand::Zero());
2029 b(eq, &have_double_value); 2029 b(eq, &have_double_value);
2030 bind(&is_nan); 2030 bind(&is_nan);
2031 // Load canonical NaN for storing into the double array. 2031 // Load canonical NaN for storing into the double array.
2032 uint64_t nan_int64 = BitCast<uint64_t>( 2032 uint64_t nan_int64 = BitCast<uint64_t>(
2033 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); 2033 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
2034 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); 2034 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
2035 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); 2035 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
2036 jmp(&have_double_value); 2036 jmp(&have_double_value);
2037 2037
2038 bind(&smi_value); 2038 bind(&smi_value);
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
2270 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); 2270 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
2271 PopSafepointRegisters(); 2271 PopSafepointRegisters();
2272 } 2272 }
2273 2273
2274 Label promote_scheduled_exception; 2274 Label promote_scheduled_exception;
2275 Label delete_allocated_handles; 2275 Label delete_allocated_handles;
2276 Label leave_exit_frame; 2276 Label leave_exit_frame;
2277 2277
2278 // If result is non-zero, dereference to get the result value 2278 // If result is non-zero, dereference to get the result value
2279 // otherwise set it to undefined. 2279 // otherwise set it to undefined.
2280 cmp(r0, Operand(0)); 2280 cmp(r0, Operand::Zero());
2281 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); 2281 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2282 ldr(r0, MemOperand(r0), ne); 2282 ldr(r0, MemOperand(r0), ne);
2283 2283
2284 // No more valid handles (the result handle was the last one). Restore 2284 // No more valid handles (the result handle was the last one). Restore
2285 // previous handle scope. 2285 // previous handle scope.
2286 str(r4, MemOperand(r7, kNextOffset)); 2286 str(r4, MemOperand(r7, kNextOffset));
2287 if (emit_debug_code()) { 2287 if (emit_debug_code()) {
2288 ldr(r1, MemOperand(r7, kLevelOffset)); 2288 ldr(r1, MemOperand(r7, kLevelOffset));
2289 cmp(r1, r6); 2289 cmp(r1, r6);
2290 Check(eq, "Unexpected level after return from api call"); 2290 Check(eq, "Unexpected level after return from api call");
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
2449 Label right_exponent, done; 2449 Label right_exponent, done;
2450 // Get exponent word. 2450 // Get exponent word.
2451 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); 2451 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
2452 // Get exponent alone in scratch2. 2452 // Get exponent alone in scratch2.
2453 Ubfx(scratch2, 2453 Ubfx(scratch2,
2454 scratch, 2454 scratch,
2455 HeapNumber::kExponentShift, 2455 HeapNumber::kExponentShift,
2456 HeapNumber::kExponentBits); 2456 HeapNumber::kExponentBits);
2457 // Load dest with zero. We use this either for the final shift or 2457 // Load dest with zero. We use this either for the final shift or
2458 // for the answer. 2458 // for the answer.
2459 mov(dest, Operand(0, RelocInfo::NONE32)); 2459 mov(dest, Operand::Zero());
2460 // Check whether the exponent matches a 32 bit signed int that is not a Smi. 2460 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
2461 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is 2461 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2462 // the exponent that we are fastest at and also the highest exponent we can 2462 // the exponent that we are fastest at and also the highest exponent we can
2463 // handle here. 2463 // handle here.
2464 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; 2464 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2465 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we 2465 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
2466 // split it up to avoid a constant pool entry. You can't do that in general 2466 // split it up to avoid a constant pool entry. You can't do that in general
2467 // for cmp because of the overflow flag, but we know the exponent is in the 2467 // for cmp because of the overflow flag, but we know the exponent is in the
2468 // range 0-2047 so there is no overflow. 2468 // range 0-2047 so there is no overflow.
2469 int fudge_factor = 0x400; 2469 int fudge_factor = 0x400;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2503 tst(scratch, Operand(HeapNumber::kSignMask)); 2503 tst(scratch, Operand(HeapNumber::kSignMask));
2504 // Get the second half of the double. For some exponents we don't 2504 // Get the second half of the double. For some exponents we don't
2505 // actually need this because the bits get shifted out again, but 2505 // actually need this because the bits get shifted out again, but
2506 // it's probably slower to test than just to do it. 2506 // it's probably slower to test than just to do it.
2507 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); 2507 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
2508 // Shift down 22 bits to get the last 10 bits. 2508 // Shift down 22 bits to get the last 10 bits.
2509 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); 2509 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
2510 // Move down according to the exponent. 2510 // Move down according to the exponent.
2511 mov(dest, Operand(scratch, LSR, dest)); 2511 mov(dest, Operand(scratch, LSR, dest));
2512 // Fix sign if sign bit was set. 2512 // Fix sign if sign bit was set.
2513 rsb(dest, dest, Operand(0, RelocInfo::NONE32), LeaveCC, ne); 2513 rsb(dest, dest, Operand::Zero(), LeaveCC, ne);
2514 bind(&done); 2514 bind(&done);
2515 } 2515 }
2516 } 2516 }
2517 2517
2518 2518
2519 void MacroAssembler::TryFastDoubleToInt32(Register result, 2519 void MacroAssembler::TryFastDoubleToInt32(Register result,
2520 DwVfpRegister double_input, 2520 DwVfpRegister double_input,
2521 DwVfpRegister double_scratch, 2521 DwVfpRegister double_scratch,
2522 Label* done) { 2522 Label* done) {
2523 ASSERT(!double_input.is(double_scratch)); 2523 ASSERT(!double_input.is(double_scratch));
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
2594 Label done, normal_exponent, restore_sign; 2594 Label done, normal_exponent, restore_sign;
2595 2595
2596 // Extract the biased exponent in result. 2596 // Extract the biased exponent in result.
2597 Ubfx(result, 2597 Ubfx(result,
2598 input_high, 2598 input_high,
2599 HeapNumber::kExponentShift, 2599 HeapNumber::kExponentShift,
2600 HeapNumber::kExponentBits); 2600 HeapNumber::kExponentBits);
2601 2601
2602 // Check for Infinity and NaNs, which should return 0. 2602 // Check for Infinity and NaNs, which should return 0.
2603 cmp(result, Operand(HeapNumber::kExponentMask)); 2603 cmp(result, Operand(HeapNumber::kExponentMask));
2604 mov(result, Operand(0), LeaveCC, eq); 2604 mov(result, Operand::Zero(), LeaveCC, eq);
2605 b(eq, &done); 2605 b(eq, &done);
2606 2606
2607 // Express exponent as delta to (number of mantissa bits + 31). 2607 // Express exponent as delta to (number of mantissa bits + 31).
2608 sub(result, 2608 sub(result,
2609 result, 2609 result,
2610 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), 2610 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2611 SetCC); 2611 SetCC);
2612 2612
2613 // If the delta is strictly positive, all bits would be shifted away, 2613 // If the delta is strictly positive, all bits would be shifted away,
2614 // which means that we can return 0. 2614 // which means that we can return 0.
2615 b(le, &normal_exponent); 2615 b(le, &normal_exponent);
2616 mov(result, Operand(0)); 2616 mov(result, Operand::Zero());
2617 b(&done); 2617 b(&done);
2618 2618
2619 bind(&normal_exponent); 2619 bind(&normal_exponent);
2620 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; 2620 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2621 // Calculate shift. 2621 // Calculate shift.
2622 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); 2622 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2623 2623
2624 // Save the sign. 2624 // Save the sign.
2625 Register sign = result; 2625 Register sign = result;
2626 result = no_reg; 2626 result = no_reg;
2627 and_(sign, input_high, Operand(HeapNumber::kSignMask)); 2627 and_(sign, input_high, Operand(HeapNumber::kSignMask));
2628 2628
2629 // Set the implicit 1 before the mantissa part in input_high. 2629 // Set the implicit 1 before the mantissa part in input_high.
2630 orr(input_high, 2630 orr(input_high,
2631 input_high, 2631 input_high,
2632 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); 2632 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2633 // Shift the mantissa bits to the correct position. 2633 // Shift the mantissa bits to the correct position.
2634 // We don't need to clear non-mantissa bits as they will be shifted away. 2634 // We don't need to clear non-mantissa bits as they will be shifted away.
2635 // If they weren't, it would mean that the answer is in the 32bit range. 2635 // If they weren't, it would mean that the answer is in the 32bit range.
2636 mov(input_high, Operand(input_high, LSL, scratch)); 2636 mov(input_high, Operand(input_high, LSL, scratch));
2637 2637
2638 // Replace the shifted bits with bits from the lower mantissa word. 2638 // Replace the shifted bits with bits from the lower mantissa word.
2639 Label pos_shift, shift_done; 2639 Label pos_shift, shift_done;
2640 rsb(scratch, scratch, Operand(32), SetCC); 2640 rsb(scratch, scratch, Operand(32), SetCC);
2641 b(&pos_shift, ge); 2641 b(&pos_shift, ge);
2642 2642
2643 // Negate scratch. 2643 // Negate scratch.
2644 rsb(scratch, scratch, Operand(0)); 2644 rsb(scratch, scratch, Operand::Zero());
2645 mov(input_low, Operand(input_low, LSL, scratch)); 2645 mov(input_low, Operand(input_low, LSL, scratch));
2646 b(&shift_done); 2646 b(&shift_done);
2647 2647
2648 bind(&pos_shift); 2648 bind(&pos_shift);
2649 mov(input_low, Operand(input_low, LSR, scratch)); 2649 mov(input_low, Operand(input_low, LSR, scratch));
2650 2650
2651 bind(&shift_done); 2651 bind(&shift_done);
2652 orr(input_high, input_high, Operand(input_low)); 2652 orr(input_high, input_high, Operand(input_low));
2653 // Restore sign if necessary. 2653 // Restore sign if necessary.
2654 cmp(sign, Operand(0)); 2654 cmp(sign, Operand::Zero());
2655 result = sign; 2655 result = sign;
2656 sign = no_reg; 2656 sign = no_reg;
2657 rsb(result, input_high, Operand(0), LeaveCC, ne); 2657 rsb(result, input_high, Operand::Zero(), LeaveCC, ne);
2658 mov(result, input_high, LeaveCC, eq); 2658 mov(result, input_high, LeaveCC, eq);
2659 bind(&done); 2659 bind(&done);
2660 } 2660 }
2661 2661
2662 2662
2663 void MacroAssembler::EmitECMATruncate(Register result, 2663 void MacroAssembler::EmitECMATruncate(Register result,
2664 DwVfpRegister double_input, 2664 DwVfpRegister double_input,
2665 DwVfpRegister double_scratch, 2665 DwVfpRegister double_scratch,
2666 Register scratch, 2666 Register scratch,
2667 Register input_high, 2667 Register input_high,
(...skipping 620 matching lines...) Expand 10 before | Expand all | Expand 10 after
3288 3288
3289 3289
3290 void MacroAssembler::CopyBytes(Register src, 3290 void MacroAssembler::CopyBytes(Register src,
3291 Register dst, 3291 Register dst,
3292 Register length, 3292 Register length,
3293 Register scratch) { 3293 Register scratch) {
3294 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; 3294 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3295 3295
3296 // Align src before copying in word size chunks. 3296 // Align src before copying in word size chunks.
3297 bind(&align_loop); 3297 bind(&align_loop);
3298 cmp(length, Operand(0)); 3298 cmp(length, Operand::Zero());
3299 b(eq, &done); 3299 b(eq, &done);
3300 bind(&align_loop_1); 3300 bind(&align_loop_1);
3301 tst(src, Operand(kPointerSize - 1)); 3301 tst(src, Operand(kPointerSize - 1));
3302 b(eq, &word_loop); 3302 b(eq, &word_loop);
3303 ldrb(scratch, MemOperand(src, 1, PostIndex)); 3303 ldrb(scratch, MemOperand(src, 1, PostIndex));
3304 strb(scratch, MemOperand(dst, 1, PostIndex)); 3304 strb(scratch, MemOperand(dst, 1, PostIndex));
3305 sub(length, length, Operand(1), SetCC); 3305 sub(length, length, Operand(1), SetCC);
3306 b(ne, &byte_loop_1); 3306 b(ne, &byte_loop_1);
3307 3307
3308 // Copy bytes in word size chunks. 3308 // Copy bytes in word size chunks.
(...skipping 14 matching lines...) Expand all
3323 mov(scratch, Operand(scratch, LSR, 8)); 3323 mov(scratch, Operand(scratch, LSR, 8));
3324 strb(scratch, MemOperand(dst, 1, PostIndex)); 3324 strb(scratch, MemOperand(dst, 1, PostIndex));
3325 mov(scratch, Operand(scratch, LSR, 8)); 3325 mov(scratch, Operand(scratch, LSR, 8));
3326 strb(scratch, MemOperand(dst, 1, PostIndex)); 3326 strb(scratch, MemOperand(dst, 1, PostIndex));
3327 } 3327 }
3328 sub(length, length, Operand(kPointerSize)); 3328 sub(length, length, Operand(kPointerSize));
3329 b(&word_loop); 3329 b(&word_loop);
3330 3330
3331 // Copy the last bytes if any left. 3331 // Copy the last bytes if any left.
3332 bind(&byte_loop); 3332 bind(&byte_loop);
3333 cmp(length, Operand(0)); 3333 cmp(length, Operand::Zero());
3334 b(eq, &done); 3334 b(eq, &done);
3335 bind(&byte_loop_1); 3335 bind(&byte_loop_1);
3336 ldrb(scratch, MemOperand(src, 1, PostIndex)); 3336 ldrb(scratch, MemOperand(src, 1, PostIndex));
3337 strb(scratch, MemOperand(dst, 1, PostIndex)); 3337 strb(scratch, MemOperand(dst, 1, PostIndex));
3338 sub(length, length, Operand(1), SetCC); 3338 sub(length, length, Operand(1), SetCC);
3339 b(ne, &byte_loop_1); 3339 b(ne, &byte_loop_1);
3340 bind(&done); 3340 bind(&done);
3341 } 3341 }
3342 3342
3343 3343
(...skipping 17 matching lines...) Expand all
3361 ASSERT(!zeros.is(scratch)); 3361 ASSERT(!zeros.is(scratch));
3362 ASSERT(!scratch.is(ip)); 3362 ASSERT(!scratch.is(ip));
3363 ASSERT(!source.is(ip)); 3363 ASSERT(!source.is(ip));
3364 ASSERT(!zeros.is(ip)); 3364 ASSERT(!zeros.is(ip));
3365 #ifdef CAN_USE_ARMV5_INSTRUCTIONS 3365 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
3366 clz(zeros, source); // This instruction is only supported after ARM5. 3366 clz(zeros, source); // This instruction is only supported after ARM5.
3367 #else 3367 #else
3368 // Order of the next two lines is important: zeros register 3368 // Order of the next two lines is important: zeros register
3369 // can be the same as source register. 3369 // can be the same as source register.
3370 Move(scratch, source); 3370 Move(scratch, source);
3371 mov(zeros, Operand(0, RelocInfo::NONE32)); 3371 mov(zeros, Operand::Zero());
3372 // Top 16. 3372 // Top 16.
3373 tst(scratch, Operand(0xffff0000)); 3373 tst(scratch, Operand(0xffff0000));
3374 add(zeros, zeros, Operand(16), LeaveCC, eq); 3374 add(zeros, zeros, Operand(16), LeaveCC, eq);
3375 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); 3375 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
3376 // Top 8. 3376 // Top 8.
3377 tst(scratch, Operand(0xff000000)); 3377 tst(scratch, Operand(0xff000000));
3378 add(zeros, zeros, Operand(8), LeaveCC, eq); 3378 add(zeros, zeros, Operand(8), LeaveCC, eq);
3379 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); 3379 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
3380 // Top 4. 3380 // Top 4.
3381 tst(scratch, Operand(0xf0000000)); 3381 tst(scratch, Operand(0xf0000000));
(...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after
3793 DwVfpRegister temp_double_reg) { 3793 DwVfpRegister temp_double_reg) {
3794 Label above_zero; 3794 Label above_zero;
3795 Label done; 3795 Label done;
3796 Label in_bounds; 3796 Label in_bounds;
3797 3797
3798 Vmov(temp_double_reg, 0.0); 3798 Vmov(temp_double_reg, 0.0);
3799 VFPCompareAndSetFlags(input_reg, temp_double_reg); 3799 VFPCompareAndSetFlags(input_reg, temp_double_reg);
3800 b(gt, &above_zero); 3800 b(gt, &above_zero);
3801 3801
3802 // Double value is less than zero, NaN or Inf, return 0. 3802 // Double value is less than zero, NaN or Inf, return 0.
3803 mov(result_reg, Operand(0)); 3803 mov(result_reg, Operand::Zero());
3804 b(al, &done); 3804 b(al, &done);
3805 3805
3806 // Double value is >= 255, return 255. 3806 // Double value is >= 255, return 255.
3807 bind(&above_zero); 3807 bind(&above_zero);
3808 Vmov(temp_double_reg, 255.0, result_reg); 3808 Vmov(temp_double_reg, 255.0, result_reg);
3809 VFPCompareAndSetFlags(input_reg, temp_double_reg); 3809 VFPCompareAndSetFlags(input_reg, temp_double_reg);
3810 b(le, &in_bounds); 3810 b(le, &in_bounds);
3811 mov(result_reg, Operand(255)); 3811 mov(result_reg, Operand(255));
3812 b(al, &done); 3812 b(al, &done);
3813 3813
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
3942 void CodePatcher::EmitCondition(Condition cond) { 3942 void CodePatcher::EmitCondition(Condition cond) {
3943 Instr instr = Assembler::instr_at(masm_.pc_); 3943 Instr instr = Assembler::instr_at(masm_.pc_);
3944 instr = (instr & ~kCondMask) | cond; 3944 instr = (instr & ~kCondMask) | cond;
3945 masm_.emit(instr); 3945 masm_.emit(instr);
3946 } 3946 }
3947 3947
3948 3948
3949 } } // namespace v8::internal 3949 } } // namespace v8::internal
3950 3950
3951 #endif // V8_TARGET_ARCH_ARM 3951 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.cc ('k') | src/arm/regexp-macro-assembler-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698