| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 477 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 488 | 488 |
| 489 if (emit_debug_code()) { | 489 if (emit_debug_code()) { |
| 490 ldr(ip, MemOperand(address)); | 490 ldr(ip, MemOperand(address)); |
| 491 cmp(ip, value); | 491 cmp(ip, value); |
| 492 Check(eq, "Wrong address or value passed to RecordWrite"); | 492 Check(eq, "Wrong address or value passed to RecordWrite"); |
| 493 } | 493 } |
| 494 | 494 |
| 495 Label done; | 495 Label done; |
| 496 | 496 |
| 497 if (smi_check == INLINE_SMI_CHECK) { | 497 if (smi_check == INLINE_SMI_CHECK) { |
| 498 ASSERT_EQ(0, kSmiTag); | 498 JumpIfSmi(value, &done); |
| 499 tst(value, Operand(kSmiTagMask)); | |
| 500 b(eq, &done); | |
| 501 } | 499 } |
| 502 | 500 |
| 503 CheckPageFlag(value, | 501 CheckPageFlag(value, |
| 504 value, // Used as scratch. | 502 value, // Used as scratch. |
| 505 MemoryChunk::kPointersToHereAreInterestingMask, | 503 MemoryChunk::kPointersToHereAreInterestingMask, |
| 506 eq, | 504 eq, |
| 507 &done); | 505 &done); |
| 508 CheckPageFlag(object, | 506 CheckPageFlag(object, |
| 509 value, // Used as scratch. | 507 value, // Used as scratch. |
| 510 MemoryChunk::kPointersFromHereAreInterestingMask, | 508 MemoryChunk::kPointersFromHereAreInterestingMask, |
| (...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 971 add(ip, sp, Operand(kPointerSize)); | 969 add(ip, sp, Operand(kPointerSize)); |
| 972 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 970 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 973 } | 971 } |
| 974 | 972 |
| 975 | 973 |
| 976 void MacroAssembler::InitializeNewString(Register string, | 974 void MacroAssembler::InitializeNewString(Register string, |
| 977 Register length, | 975 Register length, |
| 978 Heap::RootListIndex map_index, | 976 Heap::RootListIndex map_index, |
| 979 Register scratch1, | 977 Register scratch1, |
| 980 Register scratch2) { | 978 Register scratch2) { |
| 981 mov(scratch1, Operand(length, LSL, kSmiTagSize)); | 979 SmiTag(scratch1, length); |
| 982 LoadRoot(scratch2, map_index); | 980 LoadRoot(scratch2, map_index); |
| 983 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | 981 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
| 984 mov(scratch1, Operand(String::kEmptyHashField)); | 982 mov(scratch1, Operand(String::kEmptyHashField)); |
| 985 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | 983 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 986 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); | 984 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); |
| 987 } | 985 } |
| 988 | 986 |
| 989 | 987 |
| 990 int MacroAssembler::ActivationFrameAlignment() { | 988 int MacroAssembler::ActivationFrameAlignment() { |
| 991 #if defined(V8_HOST_ARCH_ARM) | 989 #if defined(V8_HOST_ARCH_ARM) |
| (...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1214 ASSERT(fun.is(r1)); | 1212 ASSERT(fun.is(r1)); |
| 1215 | 1213 |
| 1216 Register expected_reg = r2; | 1214 Register expected_reg = r2; |
| 1217 Register code_reg = r3; | 1215 Register code_reg = r3; |
| 1218 | 1216 |
| 1219 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1217 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
| 1220 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1218 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 1221 ldr(expected_reg, | 1219 ldr(expected_reg, |
| 1222 FieldMemOperand(code_reg, | 1220 FieldMemOperand(code_reg, |
| 1223 SharedFunctionInfo::kFormalParameterCountOffset)); | 1221 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1224 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); | 1222 SmiUntag(expected_reg); |
| 1225 ldr(code_reg, | 1223 ldr(code_reg, |
| 1226 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1224 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 1227 | 1225 |
| 1228 ParameterCount expected(expected_reg); | 1226 ParameterCount expected(expected_reg); |
| 1229 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); | 1227 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); |
| 1230 } | 1228 } |
| 1231 | 1229 |
| 1232 | 1230 |
| 1233 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 1231 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
| 1234 const ParameterCount& expected, | 1232 const ParameterCount& expected, |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1352 | 1350 |
| 1353 void MacroAssembler::JumpToHandlerEntry() { | 1351 void MacroAssembler::JumpToHandlerEntry() { |
| 1354 // Compute the handler entry address and jump to it. The handler table is | 1352 // Compute the handler entry address and jump to it. The handler table is |
| 1355 // a fixed array of (smi-tagged) code offsets. | 1353 // a fixed array of (smi-tagged) code offsets. |
| 1356 // r0 = exception, r1 = code object, r2 = state. | 1354 // r0 = exception, r1 = code object, r2 = state. |
| 1357 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. | 1355 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. |
| 1358 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 1356 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 1359 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. | 1357 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. |
| 1360 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. | 1358 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. |
| 1361 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. | 1359 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. |
| 1362 add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump. | 1360 add(pc, r1, Operand::SmiUntag(r2)); // Jump |
| 1363 } | 1361 } |
| 1364 | 1362 |
| 1365 | 1363 |
| 1366 void MacroAssembler::Throw(Register value) { | 1364 void MacroAssembler::Throw(Register value) { |
| 1367 // Adjust this code if not the case. | 1365 // Adjust this code if not the case. |
| 1368 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1366 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1369 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1367 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 1370 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1368 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1371 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1369 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1372 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1370 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| (...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1568 // | 1566 // |
| 1569 // t1 - used to hold the capacity mask of the dictionary | 1567 // t1 - used to hold the capacity mask of the dictionary |
| 1570 // | 1568 // |
| 1571 // t2 - used for the index into the dictionary. | 1569 // t2 - used for the index into the dictionary. |
| 1572 Label done; | 1570 Label done; |
| 1573 | 1571 |
| 1574 GetNumberHash(t0, t1); | 1572 GetNumberHash(t0, t1); |
| 1575 | 1573 |
| 1576 // Compute the capacity mask. | 1574 // Compute the capacity mask. |
| 1577 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); | 1575 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); |
| 1578 mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int | 1576 SmiUntag(t1); |
| 1579 sub(t1, t1, Operand(1)); | 1577 sub(t1, t1, Operand(1)); |
| 1580 | 1578 |
| 1581 // Generate an unrolled loop that performs a few probes before giving up. | 1579 // Generate an unrolled loop that performs a few probes before giving up. |
| 1582 static const int kProbes = 4; | 1580 static const int kProbes = 4; |
| 1583 for (int i = 0; i < kProbes; i++) { | 1581 for (int i = 0; i < kProbes; i++) { |
| 1584 // Use t2 for index calculations and keep the hash intact in t0. | 1582 // Use t2 for index calculations and keep the hash intact in t0. |
| 1585 mov(t2, t0); | 1583 mov(t2, t0); |
| 1586 // Compute the masked index: (hash + i + i * i) & mask. | 1584 // Compute the masked index: (hash + i + i * i) & mask. |
| 1587 if (i > 0) { | 1585 if (i > 0) { |
| 1588 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1586 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
| (...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2088 // Force a canonical NaN. | 2086 // Force a canonical NaN. |
| 2089 if (emit_debug_code()) { | 2087 if (emit_debug_code()) { |
| 2090 vmrs(ip); | 2088 vmrs(ip); |
| 2091 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | 2089 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); |
| 2092 Assert(ne, "Default NaN mode not set"); | 2090 Assert(ne, "Default NaN mode not set"); |
| 2093 } | 2091 } |
| 2094 VFPCanonicalizeNaN(d0); | 2092 VFPCanonicalizeNaN(d0); |
| 2095 b(&store); | 2093 b(&store); |
| 2096 | 2094 |
| 2097 bind(&smi_value); | 2095 bind(&smi_value); |
| 2098 Register untagged_value = scratch1; | 2096 SmiToDouble(d0, value_reg); |
| 2099 SmiUntag(untagged_value, value_reg); | |
| 2100 vmov(s2, untagged_value); | |
| 2101 vcvt_f64_s32(d0, s2); | |
| 2102 | 2097 |
| 2103 bind(&store); | 2098 bind(&store); |
| 2104 add(scratch1, elements_reg, | 2099 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); |
| 2105 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | |
| 2106 vstr(d0, FieldMemOperand(scratch1, | 2100 vstr(d0, FieldMemOperand(scratch1, |
| 2107 FixedDoubleArray::kHeaderSize - elements_offset)); | 2101 FixedDoubleArray::kHeaderSize - elements_offset)); |
| 2108 } | 2102 } |
| 2109 | 2103 |
| 2110 | 2104 |
| 2111 void MacroAssembler::CompareMap(Register obj, | 2105 void MacroAssembler::CompareMap(Register obj, |
| 2112 Register scratch, | 2106 Register scratch, |
| 2113 Handle<Map> map, | 2107 Handle<Map> map, |
| 2114 Label* early_success, | 2108 Label* early_success, |
| 2115 CompareMapMode mode) { | 2109 CompareMapMode mode) { |
| (...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2383 | 2377 |
| 2384 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 2378 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
| 2385 // If the hash field contains an array index pick it out. The assert checks | 2379 // If the hash field contains an array index pick it out. The assert checks |
| 2386 // that the constants for the maximum number of digits for an array index | 2380 // that the constants for the maximum number of digits for an array index |
| 2387 // cached in the hash field and the number of bits reserved for it does not | 2381 // cached in the hash field and the number of bits reserved for it does not |
| 2388 // conflict. | 2382 // conflict. |
| 2389 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 2383 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
| 2390 (1 << String::kArrayIndexValueBits)); | 2384 (1 << String::kArrayIndexValueBits)); |
| 2391 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in | 2385 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in |
| 2392 // the low kHashShift bits. | 2386 // the low kHashShift bits. |
| 2393 STATIC_ASSERT(kSmiTag == 0); | |
| 2394 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); | 2387 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); |
| 2395 mov(index, Operand(hash, LSL, kSmiTagSize)); | 2388 SmiTag(index, hash); |
| 2396 } | 2389 } |
| 2397 | 2390 |
| 2398 | 2391 |
| 2399 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, | 2392 void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) { |
| 2400 Register outHighReg, | 2393 ASSERT(value.code() < 16); |
| 2401 Register outLowReg) { | 2394 if (CpuFeatures::IsSupported(VFP3)) { |
| 2402 // ARMv7 VFP3 instructions to implement integer to double conversion. | 2395 vmov(value.low(), smi); |
| 2403 mov(r7, Operand(inReg, ASR, kSmiTagSize)); | 2396 vcvt_f64_s32(value, 1); |
| 2404 vmov(s15, r7); | 2397 } else { |
| 2405 vcvt_f64_s32(d7, s15); | 2398 SmiUntag(ip, smi); |
| 2406 vmov(outLowReg, outHighReg, d7); | 2399 vmov(value.low(), ip); |
| 2400 vcvt_f64_s32(value, value.low()); |
| 2401 } |
| 2407 } | 2402 } |
| 2408 | 2403 |
| 2409 | 2404 |
| 2410 void MacroAssembler::ObjectToDoubleVFPRegister(Register object, | |
| 2411 DwVfpRegister result, | |
| 2412 Register scratch1, | |
| 2413 Register scratch2, | |
| 2414 Register heap_number_map, | |
| 2415 SwVfpRegister scratch3, | |
| 2416 Label* not_number, | |
| 2417 ObjectToDoubleFlags flags) { | |
| 2418 Label done; | |
| 2419 if ((flags & OBJECT_NOT_SMI) == 0) { | |
| 2420 Label not_smi; | |
| 2421 JumpIfNotSmi(object, ¬_smi); | |
| 2422 // Remove smi tag and convert to double. | |
| 2423 mov(scratch1, Operand(object, ASR, kSmiTagSize)); | |
| 2424 vmov(scratch3, scratch1); | |
| 2425 vcvt_f64_s32(result, scratch3); | |
| 2426 b(&done); | |
| 2427 bind(¬_smi); | |
| 2428 } | |
| 2429 // Check for heap number and load double value from it. | |
| 2430 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 2431 sub(scratch2, object, Operand(kHeapObjectTag)); | |
| 2432 cmp(scratch1, heap_number_map); | |
| 2433 b(ne, not_number); | |
| 2434 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { | |
| 2435 // If exponent is all ones the number is either a NaN or +/-Infinity. | |
| 2436 ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 2437 Sbfx(scratch1, | |
| 2438 scratch1, | |
| 2439 HeapNumber::kExponentShift, | |
| 2440 HeapNumber::kExponentBits); | |
| 2441 // All-one value sign extend to -1. | |
| 2442 cmp(scratch1, Operand(-1)); | |
| 2443 b(eq, not_number); | |
| 2444 } | |
| 2445 vldr(result, scratch2, HeapNumber::kValueOffset); | |
| 2446 bind(&done); | |
| 2447 } | |
| 2448 | |
| 2449 | |
| 2450 void MacroAssembler::SmiToDoubleVFPRegister(Register smi, | |
| 2451 DwVfpRegister value, | |
| 2452 Register scratch1, | |
| 2453 SwVfpRegister scratch2) { | |
| 2454 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); | |
| 2455 vmov(scratch2, scratch1); | |
| 2456 vcvt_f64_s32(value, scratch2); | |
| 2457 } | |
| 2458 | |
| 2459 | |
| 2460 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2405 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, |
| 2461 DwVfpRegister double_scratch) { | 2406 DwVfpRegister double_scratch) { |
| 2462 ASSERT(!double_input.is(double_scratch)); | 2407 ASSERT(!double_input.is(double_scratch)); |
| 2463 vcvt_s32_f64(double_scratch.low(), double_input); | 2408 vcvt_s32_f64(double_scratch.low(), double_input); |
| 2464 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2409 vcvt_f64_s32(double_scratch, double_scratch.low()); |
| 2465 VFPCompareAndSetFlags(double_input, double_scratch); | 2410 VFPCompareAndSetFlags(double_input, double_scratch); |
| 2466 } | 2411 } |
| 2467 | 2412 |
| 2468 | 2413 |
| 2469 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2414 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2603 bind(&done); | 2548 bind(&done); |
| 2604 } | 2549 } |
| 2605 | 2550 |
| 2606 | 2551 |
| 2607 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2552 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 2608 Register src, | 2553 Register src, |
| 2609 int num_least_bits) { | 2554 int num_least_bits) { |
| 2610 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2555 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { |
| 2611 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2556 ubfx(dst, src, kSmiTagSize, num_least_bits); |
| 2612 } else { | 2557 } else { |
| 2613 mov(dst, Operand(src, ASR, kSmiTagSize)); | 2558 SmiUntag(dst, src); |
| 2614 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2559 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
| 2615 } | 2560 } |
| 2616 } | 2561 } |
| 2617 | 2562 |
| 2618 | 2563 |
| 2619 void MacroAssembler::GetLeastBitsFromInt32(Register dst, | 2564 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
| 2620 Register src, | 2565 Register src, |
| 2621 int num_least_bits) { | 2566 int num_least_bits) { |
| 2622 and_(dst, src, Operand((1 << num_least_bits) - 1)); | 2567 and_(dst, src, Operand((1 << num_least_bits) - 1)); |
| 2623 } | 2568 } |
| (...skipping 374 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2998 STATIC_ASSERT(kSmiTag == 0); | 2943 STATIC_ASSERT(kSmiTag == 0); |
| 2999 tst(reg1, Operand(kSmiTagMask)); | 2944 tst(reg1, Operand(kSmiTagMask)); |
| 3000 tst(reg2, Operand(kSmiTagMask), eq); | 2945 tst(reg2, Operand(kSmiTagMask), eq); |
| 3001 b(ne, on_not_both_smi); | 2946 b(ne, on_not_both_smi); |
| 3002 } | 2947 } |
| 3003 | 2948 |
| 3004 | 2949 |
| 3005 void MacroAssembler::UntagAndJumpIfSmi( | 2950 void MacroAssembler::UntagAndJumpIfSmi( |
| 3006 Register dst, Register src, Label* smi_case) { | 2951 Register dst, Register src, Label* smi_case) { |
| 3007 STATIC_ASSERT(kSmiTag == 0); | 2952 STATIC_ASSERT(kSmiTag == 0); |
| 3008 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); | 2953 SmiUntag(dst, src, SetCC); |
| 3009 b(cc, smi_case); // Shifter carry is not set for a smi. | 2954 b(cc, smi_case); // Shifter carry is not set for a smi. |
| 3010 } | 2955 } |
| 3011 | 2956 |
| 3012 | 2957 |
| 3013 void MacroAssembler::UntagAndJumpIfNotSmi( | 2958 void MacroAssembler::UntagAndJumpIfNotSmi( |
| 3014 Register dst, Register src, Label* non_smi_case) { | 2959 Register dst, Register src, Label* non_smi_case) { |
| 3015 STATIC_ASSERT(kSmiTag == 0); | 2960 STATIC_ASSERT(kSmiTag == 0); |
| 3016 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); | 2961 SmiUntag(dst, src, SetCC); |
| 3017 b(cs, non_smi_case); // Shifter carry is set for a non-smi. | 2962 b(cs, non_smi_case); // Shifter carry is set for a non-smi. |
| 3018 } | 2963 } |
| 3019 | 2964 |
| 3020 | 2965 |
| 3021 void MacroAssembler::JumpIfEitherSmi(Register reg1, | 2966 void MacroAssembler::JumpIfEitherSmi(Register reg1, |
| 3022 Register reg2, | 2967 Register reg2, |
| 3023 Label* on_either_smi) { | 2968 Label* on_either_smi) { |
| 3024 STATIC_ASSERT(kSmiTag == 0); | 2969 STATIC_ASSERT(kSmiTag == 0); |
| 3025 tst(reg1, Operand(kSmiTagMask)); | 2970 tst(reg1, Operand(kSmiTagMask)); |
| 3026 tst(reg2, Operand(kSmiTagMask), ne); | 2971 tst(reg2, Operand(kSmiTagMask), ne); |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3113 scratch2, | 3058 scratch2, |
| 3114 failure); | 3059 failure); |
| 3115 } | 3060 } |
| 3116 | 3061 |
| 3117 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, | 3062 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, |
| 3118 Register second, | 3063 Register second, |
| 3119 Register scratch1, | 3064 Register scratch1, |
| 3120 Register scratch2, | 3065 Register scratch2, |
| 3121 Label* failure) { | 3066 Label* failure) { |
| 3122 // Check that neither is a smi. | 3067 // Check that neither is a smi. |
| 3123 STATIC_ASSERT(kSmiTag == 0); | |
| 3124 and_(scratch1, first, Operand(second)); | 3068 and_(scratch1, first, Operand(second)); |
| 3125 JumpIfSmi(scratch1, failure); | 3069 JumpIfSmi(scratch1, failure); |
| 3126 JumpIfNonSmisNotBothSequentialAsciiStrings(first, | 3070 JumpIfNonSmisNotBothSequentialAsciiStrings(first, |
| 3127 second, | 3071 second, |
| 3128 scratch1, | 3072 scratch1, |
| 3129 scratch2, | 3073 scratch2, |
| 3130 failure); | 3074 failure); |
| 3131 } | 3075 } |
| 3132 | 3076 |
| 3133 | 3077 |
| (...skipping 724 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3858 void CodePatcher::EmitCondition(Condition cond) { | 3802 void CodePatcher::EmitCondition(Condition cond) { |
| 3859 Instr instr = Assembler::instr_at(masm_.pc_); | 3803 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3860 instr = (instr & ~kCondMask) | cond; | 3804 instr = (instr & ~kCondMask) | cond; |
| 3861 masm_.emit(instr); | 3805 masm_.emit(instr); |
| 3862 } | 3806 } |
| 3863 | 3807 |
| 3864 | 3808 |
| 3865 } } // namespace v8::internal | 3809 } } // namespace v8::internal |
| 3866 | 3810 |
| 3867 #endif // V8_TARGET_ARCH_ARM | 3811 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |