Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 14119008: ARM: VFP cleanup now that VFP2 is the baseline. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 772 matching lines...) Expand 10 before | Expand all | Expand 10 after
783 if (value.bits == zero.bits) { 783 if (value.bits == zero.bits) {
784 vmov(dst, kDoubleRegZero); 784 vmov(dst, kDoubleRegZero);
785 } else if (value.bits == minus_zero.bits) { 785 } else if (value.bits == minus_zero.bits) {
786 vneg(dst, kDoubleRegZero); 786 vneg(dst, kDoubleRegZero);
787 } else { 787 } else {
788 vmov(dst, imm, scratch); 788 vmov(dst, imm, scratch);
789 } 789 }
790 } 790 }
791 791
792 792
793 void MacroAssembler::ConvertNumberToInt32(Register object,
794 Register dst,
795 Register heap_number_map,
796 Register scratch1,
797 Register scratch2,
798 Register scratch3,
799 DwVfpRegister double_scratch1,
800 DwVfpRegister double_scratch2,
801 Label* not_number) {
802 Label done;
803 UntagAndJumpIfSmi(dst, object, &done);
804 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
805 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
806 ECMAToInt32(dst, double_scratch1,
807 scratch1, scratch2, scratch3, double_scratch2);
808
809 bind(&done);
810 }
811
812
813 void MacroAssembler::LoadNumber(Register object,
814 DwVfpRegister dst,
815 Register heap_number_map,
816 Register scratch,
817 Label* not_number) {
818 Label is_smi, done;
819
820 UntagAndJumpIfSmi(scratch, object, &is_smi);
821 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
822
823 vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
824 b(&done);
825
826 // Handle loading a double from a smi.
827 bind(&is_smi);
828 vmov(dst.high(), scratch);
829 vcvt_f64_s32(dst, dst.high());
830
831 bind(&done);
832 }
833
834
835 void MacroAssembler::LoadNumberAsInt32Double(Register object,
836 DwVfpRegister double_dst,
837 Register heap_number_map,
838 Register scratch,
839 DwVfpRegister double_scratch,
840 Label* not_int32) {
841 ASSERT(!scratch.is(object));
842 ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
843
844 Label done, obj_is_not_smi;
845
846 UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
847 vmov(double_scratch.low(), scratch);
848 vcvt_f64_s32(double_dst, double_scratch.low());
849 b(&done);
850
851 bind(&obj_is_not_smi);
852 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
853
854 // Load the number.
855 // Load the double value.
856 vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
857
858 TestDoubleIsInt32(double_dst, double_scratch);
859 // Jump to not_int32 if the operation did not succeed.
860 b(ne, not_int32);
861
862 bind(&done);
863 }
864
865
866 void MacroAssembler::LoadNumberAsInt32(Register object,
867 Register dst,
868 Register heap_number_map,
869 Register scratch,
870 DwVfpRegister double_scratch0,
871 DwVfpRegister double_scratch1,
872 Label* not_int32) {
873 ASSERT(!dst.is(object));
874 ASSERT(!scratch.is(object));
875
876 Label done, maybe_undefined;
877
878 UntagAndJumpIfSmi(dst, object, &done);
879
880 JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
881
882 // Object is a heap number.
883 // Convert the floating point value to a 32-bit integer.
884 // Load the double value.
885 vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
886
887 TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
888 // Jump to not_int32 if the operation did not succeed.
889 b(ne, not_int32);
890 b(&done);
891
892 bind(&maybe_undefined);
893 CompareRoot(object, Heap::kUndefinedValueRootIndex);
894 b(ne, not_int32);
895 // |undefined| is truncated to 0.
896 mov(dst, Operand(Smi::FromInt(0)));
897 // Fall through.
898
899 bind(&done);
900 }
901
902
793 void MacroAssembler::EnterFrame(StackFrame::Type type) { 903 void MacroAssembler::EnterFrame(StackFrame::Type type) {
794 // r0-r3: preserved 904 // r0-r3: preserved
795 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 905 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
796 mov(ip, Operand(Smi::FromInt(type))); 906 mov(ip, Operand(Smi::FromInt(type)));
797 push(ip); 907 push(ip);
798 mov(ip, Operand(CodeObject())); 908 mov(ip, Operand(CodeObject()));
799 push(ip); 909 push(ip);
800 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP. 910 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
801 } 911 }
802 912
(...skipping 1135 matching lines...) Expand 10 before | Expand all | Expand 10 after
1938 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 2048 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1939 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); 2049 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1940 b(hi, fail); 2050 b(hi, fail);
1941 } 2051 }
1942 2052
1943 2053
1944 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, 2054 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1945 Register key_reg, 2055 Register key_reg,
1946 Register elements_reg, 2056 Register elements_reg,
1947 Register scratch1, 2057 Register scratch1,
1948 Register scratch2,
1949 Register scratch3,
1950 Register scratch4,
1951 Label* fail, 2058 Label* fail,
1952 int elements_offset) { 2059 int elements_offset) {
1953 Label smi_value, store; 2060 Label smi_value, store;
1954 Register mantissa_reg = scratch2;
1955 Register exponent_reg = scratch3;
1956 2061
1957 // Handle smi values specially. 2062 // Handle smi values specially.
1958 JumpIfSmi(value_reg, &smi_value); 2063 JumpIfSmi(value_reg, &smi_value);
1959 2064
1960 // Ensure that the object is a heap number 2065 // Ensure that the object is a heap number
1961 CheckMap(value_reg, 2066 CheckMap(value_reg,
1962 scratch1, 2067 scratch1,
1963 isolate()->factory()->heap_number_map(), 2068 isolate()->factory()->heap_number_map(),
1964 fail, 2069 fail,
1965 DONT_DO_SMI_CHECK); 2070 DONT_DO_SMI_CHECK);
1966 2071
1967 vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); 2072 vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1968 // Force a canonical NaN. 2073 // Force a canonical NaN.
1969 if (emit_debug_code()) { 2074 if (emit_debug_code()) {
1970 vmrs(ip); 2075 vmrs(ip);
1971 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); 2076 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
1972 Assert(ne, "Default NaN mode not set"); 2077 Assert(ne, "Default NaN mode not set");
1973 } 2078 }
1974 VFPCanonicalizeNaN(d0); 2079 VFPCanonicalizeNaN(d0);
1975 b(&store); 2080 b(&store);
1976 2081
1977 bind(&smi_value); 2082 bind(&smi_value);
1978 Register untagged_value = scratch1; 2083 Register untagged_value = scratch1;
1979 SmiUntag(untagged_value, value_reg); 2084 SmiUntag(untagged_value, value_reg);
1980 FloatingPointHelper::ConvertIntToDouble( 2085 vmov(s2, untagged_value);
1981 this, untagged_value, FloatingPointHelper::kVFPRegisters, d0, 2086 vcvt_f64_s32(d0, s2);
1982 mantissa_reg, exponent_reg, scratch4, s2);
1983 2087
1984 bind(&store); 2088 bind(&store);
1985 add(scratch1, elements_reg, 2089 add(scratch1, elements_reg,
1986 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); 2090 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1987 vstr(d0, FieldMemOperand(scratch1, 2091 vstr(d0, FieldMemOperand(scratch1,
1988 FixedDoubleArray::kHeaderSize - elements_offset)); 2092 FixedDoubleArray::kHeaderSize - elements_offset));
1989 } 2093 }
1990 2094
1991 2095
1992 void MacroAssembler::CompareMap(Register obj, 2096 void MacroAssembler::CompareMap(Register obj,
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
2394 // floor(x) <=> round_to_zero(x) - 1. 2498 // floor(x) <=> round_to_zero(x) - 1.
2395 bind(&negative); 2499 bind(&negative);
2396 sub(result, result, Operand(1), SetCC); 2500 sub(result, result, Operand(1), SetCC);
2397 // If result is still negative, go to done, result fetched. 2501 // If result is still negative, go to done, result fetched.
2398 // Else, we had an overflow and we fall through exception. 2502 // Else, we had an overflow and we fall through exception.
2399 b(mi, done); 2503 b(mi, done);
2400 bind(&exception); 2504 bind(&exception);
2401 } 2505 }
2402 2506
2403 2507
2404 void MacroAssembler::ECMAConvertNumberToInt32(Register source,
2405 Register result,
2406 Register input_low,
2407 Register input_high,
2408 Register scratch,
2409 DwVfpRegister double_scratch1,
2410 DwVfpRegister double_scratch2) {
2411 vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
2412 ECMAToInt32(result, double_scratch1, double_scratch2,
2413 scratch, input_high, input_low);
2414 }
2415
2416
2417 void MacroAssembler::ECMAToInt32(Register result, 2508 void MacroAssembler::ECMAToInt32(Register result,
2418 DwVfpRegister double_input, 2509 DwVfpRegister double_input,
2419 DwVfpRegister double_scratch,
2420 Register scratch, 2510 Register scratch,
2421 Register input_high, 2511 Register scratch_high,
2422 Register input_low) { 2512 Register scratch_low,
2423 ASSERT(!input_high.is(result)); 2513 DwVfpRegister double_scratch) {
2424 ASSERT(!input_low.is(result)); 2514 ASSERT(!scratch_high.is(result));
2425 ASSERT(!input_low.is(input_high)); 2515 ASSERT(!scratch_low.is(result));
2516 ASSERT(!scratch_low.is(scratch_high));
2426 ASSERT(!scratch.is(result) && 2517 ASSERT(!scratch.is(result) &&
2427 !scratch.is(input_high) && 2518 !scratch.is(scratch_high) &&
2428 !scratch.is(input_low)); 2519 !scratch.is(scratch_low));
2429 ASSERT(!double_input.is(double_scratch)); 2520 ASSERT(!double_input.is(double_scratch));
2430 2521
2431 Label out_of_range, negate, done; 2522 Label out_of_range, only_low, negate, done;
2432 2523
2433 vcvt_s32_f64(double_scratch.low(), double_input); 2524 vcvt_s32_f64(double_scratch.low(), double_input);
2434 vmov(result, double_scratch.low()); 2525 vmov(result, double_scratch.low());
2435 2526
2436 // If result is not saturated (0x7fffffff or 0x80000000), we are done. 2527 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2437 sub(scratch, result, Operand(1)); 2528 sub(scratch, result, Operand(1));
2438 cmp(scratch, Operand(0x7ffffffe)); 2529 cmp(scratch, Operand(0x7ffffffe));
2439 b(lt, &done); 2530 b(lt, &done);
2440 2531
2441 vmov(input_low, input_high, double_input); 2532 vmov(scratch_low, scratch_high, double_input);
2442 Ubfx(scratch, input_high, 2533 Ubfx(scratch, scratch_high,
2443 HeapNumber::kExponentShift, HeapNumber::kExponentBits); 2534 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2444 // Load scratch with exponent - 1. This is faster than loading 2535 // Load scratch with exponent - 1. This is faster than loading
2445 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. 2536 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2446 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); 2537 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2447 // If exponent is greater than or equal to 84, the 32 less significant 2538 // If exponent is greater than or equal to 84, the 32 less significant
2448 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), 2539 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2449 // the result is 0. 2540 // the result is 0.
2450 // Compare exponent with 84 (compare exponent - 1 with 83). 2541 // Compare exponent with 84 (compare exponent - 1 with 83).
2451 cmp(scratch, Operand(83)); 2542 cmp(scratch, Operand(83));
2452 b(ge, &out_of_range); 2543 b(ge, &out_of_range);
2453 2544
2454 // If we reach this code, 31 <= exponent <= 83. 2545 // If we reach this code, 31 <= exponent <= 83.
2455 // So, we don't have to handle cases where 0 <= exponent <= 20 for 2546 // So, we don't have to handle cases where 0 <= exponent <= 20 for
2456 // which we would need to shift right the high part of the mantissa. 2547 // which we would need to shift right the high part of the mantissa.
2457 ECMAToInt32Tail(result, scratch, input_high, input_low, 2548 // Scratch contains exponent - 1.
2458 &out_of_range, &negate, &done);
2459 }
2460
2461
2462 void MacroAssembler::ECMAToInt32Tail(Register result,
2463 Register scratch,
2464 Register input_high,
2465 Register input_low,
2466 Label* out_of_range,
2467 Label* negate,
2468 Label* done) {
2469 Label only_low;
2470
2471 // On entry, scratch contains exponent - 1.
2472 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). 2549 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2473 rsb(scratch, scratch, Operand(51), SetCC); 2550 rsb(scratch, scratch, Operand(51), SetCC);
2474 b(ls, &only_low); 2551 b(ls, &only_low);
2475 // 21 <= exponent <= 51, shift input_low and input_high 2552 // 21 <= exponent <= 51, shift scratch_low and scratch_high
2476 // to generate the result. 2553 // to generate the result.
2477 mov(input_low, Operand(input_low, LSR, scratch)); 2554 mov(scratch_low, Operand(scratch_low, LSR, scratch));
2478 // Scratch contains: 52 - exponent. 2555 // Scratch contains: 52 - exponent.
2479 // We needs: exponent - 20. 2556 // We needs: exponent - 20.
2480 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. 2557 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2481 rsb(scratch, scratch, Operand(32)); 2558 rsb(scratch, scratch, Operand(32));
2482 Ubfx(result, input_high, 2559 Ubfx(result, scratch_high,
2483 0, HeapNumber::kMantissaBitsInTopWord); 2560 0, HeapNumber::kMantissaBitsInTopWord);
2484 // Set the implicit 1 before the mantissa part in input_high. 2561 // Set the implicit 1 before the mantissa part in scratch_high.
2485 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); 2562 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2486 orr(result, input_low, Operand(result, LSL, scratch)); 2563 orr(result, scratch_low, Operand(result, LSL, scratch));
2487 b(negate); 2564 b(&negate);
2488 2565
2489 bind(out_of_range); 2566 bind(&out_of_range);
2490 mov(result, Operand::Zero()); 2567 mov(result, Operand::Zero());
2491 b(done); 2568 b(&done);
2492 2569
2493 bind(&only_low); 2570 bind(&only_low);
2494 // 52 <= exponent <= 83, shift only input_low. 2571 // 52 <= exponent <= 83, shift only scratch_low.
2495 // On entry, scratch contains: 52 - exponent. 2572 // On entry, scratch contains: 52 - exponent.
2496 rsb(scratch, scratch, Operand::Zero()); 2573 rsb(scratch, scratch, Operand::Zero());
2497 mov(result, Operand(input_low, LSL, scratch)); 2574 mov(result, Operand(scratch_low, LSL, scratch));
2498 2575
2499 bind(negate); 2576 bind(&negate);
2500 // If input was positive, input_high ASR 31 equals 0 and 2577 // If input was positive, scratch_high ASR 31 equals 0 and
2501 // input_high LSR 31 equals zero. 2578 // scratch_high LSR 31 equals zero.
2502 // New result = (result eor 0) + 0 = result. 2579 // New result = (result eor 0) + 0 = result.
2503 // If the input was negative, we have to negate the result. 2580 // If the input was negative, we have to negate the result.
2504 // Input_high ASR 31 equals 0xffffffff and input_high LSR 31 equals 1. 2581 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
2505 // New result = (result eor 0xffffffff) + 1 = 0 - result. 2582 // New result = (result eor 0xffffffff) + 1 = 0 - result.
2506 eor(result, result, Operand(input_high, ASR, 31)); 2583 eor(result, result, Operand(scratch_high, ASR, 31));
2507 add(result, result, Operand(input_high, LSR, 31)); 2584 add(result, result, Operand(scratch_high, LSR, 31));
2508 2585
2509 bind(done); 2586 bind(&done);
2510 } 2587 }
2511 2588
2512 2589
2513 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 2590 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2514 Register src, 2591 Register src,
2515 int num_least_bits) { 2592 int num_least_bits) {
2516 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { 2593 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2517 ubfx(dst, src, kSmiTagSize, num_least_bits); 2594 ubfx(dst, src, kSmiTagSize, num_least_bits);
2518 } else { 2595 } else {
2519 mov(dst, Operand(src, ASR, kSmiTagSize)); 2596 mov(dst, Operand(src, ASR, kSmiTagSize));
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
2681 } 2758 }
2682 } 2759 }
2683 2760
2684 2761
2685 void MacroAssembler::Assert(Condition cond, const char* msg) { 2762 void MacroAssembler::Assert(Condition cond, const char* msg) {
2686 if (emit_debug_code()) 2763 if (emit_debug_code())
2687 Check(cond, msg); 2764 Check(cond, msg);
2688 } 2765 }
2689 2766
2690 2767
2691 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2692 Heap::RootListIndex index) {
2693 if (emit_debug_code()) {
2694 LoadRoot(ip, index);
2695 cmp(reg, ip);
2696 Check(eq, "Register did not match expected root");
2697 }
2698 }
2699
2700
2701 void MacroAssembler::AssertFastElements(Register elements) { 2768 void MacroAssembler::AssertFastElements(Register elements) {
2702 if (emit_debug_code()) { 2769 if (emit_debug_code()) {
2703 ASSERT(!elements.is(ip)); 2770 ASSERT(!elements.is(ip));
2704 Label ok; 2771 Label ok;
2705 push(elements); 2772 push(elements);
2706 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 2773 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2707 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 2774 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2708 cmp(elements, ip); 2775 cmp(elements, ip);
2709 b(eq, &ok); 2776 b(eq, &ok);
2710 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); 2777 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
2984 push(object); 3051 push(object);
2985 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 3052 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2986 CompareInstanceType(object, object, LAST_NAME_TYPE); 3053 CompareInstanceType(object, object, LAST_NAME_TYPE);
2987 pop(object); 3054 pop(object);
2988 Check(le, "Operand is not a name"); 3055 Check(le, "Operand is not a name");
2989 } 3056 }
2990 } 3057 }
2991 3058
2992 3059
2993 3060
2994 void MacroAssembler::AssertRootValue(Register src, 3061 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2995 Heap::RootListIndex root_value_index,
2996 const char* message) {
2997 if (emit_debug_code()) { 3062 if (emit_debug_code()) {
2998 CompareRoot(src, root_value_index); 3063 CompareRoot(reg, index);
2999 Check(eq, message); 3064 Check(eq, "HeapNumberMap register clobbered.");
3000 } 3065 }
3001 } 3066 }
3002 3067
3003 3068
3004 void MacroAssembler::JumpIfNotHeapNumber(Register object, 3069 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3005 Register heap_number_map, 3070 Register heap_number_map,
3006 Register scratch, 3071 Register scratch,
3007 Label* on_not_heap_number) { 3072 Label* on_not_heap_number) {
3008 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3073 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3009 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3074 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3010 cmp(scratch, heap_number_map); 3075 cmp(scratch, heap_number_map);
3011 b(ne, on_not_heap_number); 3076 b(ne, on_not_heap_number);
3012 } 3077 }
3013 3078
3014 3079
3015 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( 3080 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3016 Register first, 3081 Register first,
3017 Register second, 3082 Register second,
3018 Register scratch1, 3083 Register scratch1,
3019 Register scratch2, 3084 Register scratch2,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
3056 Register scratch2, 3121 Register scratch2,
3057 Register heap_number_map, 3122 Register heap_number_map,
3058 Label* gc_required, 3123 Label* gc_required,
3059 TaggingMode tagging_mode) { 3124 TaggingMode tagging_mode) {
3060 // Allocate an object in the heap for the heap number and tag it as a heap 3125 // Allocate an object in the heap for the heap number and tag it as a heap
3061 // object. 3126 // object.
3062 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, 3127 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3063 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); 3128 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3064 3129
3065 // Store heap number map in the allocated object. 3130 // Store heap number map in the allocated object.
3066 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3131 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3067 if (tagging_mode == TAG_RESULT) { 3132 if (tagging_mode == TAG_RESULT) {
3068 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); 3133 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3069 } else { 3134 } else {
3070 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); 3135 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3071 } 3136 }
3072 } 3137 }
3073 3138
3074 3139
3075 void MacroAssembler::AllocateHeapNumberWithValue(Register result, 3140 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3076 DwVfpRegister value, 3141 DwVfpRegister value,
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after
3748 void CodePatcher::EmitCondition(Condition cond) { 3813 void CodePatcher::EmitCondition(Condition cond) {
3749 Instr instr = Assembler::instr_at(masm_.pc_); 3814 Instr instr = Assembler::instr_at(masm_.pc_);
3750 instr = (instr & ~kCondMask) | cond; 3815 instr = (instr & ~kCondMask) | cond;
3751 masm_.emit(instr); 3816 masm_.emit(instr);
3752 } 3817 }
3753 3818
3754 3819
3755 } } // namespace v8::internal 3820 } } // namespace v8::internal
3756 3821
3757 #endif // V8_TARGET_ARCH_ARM 3822 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698