OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
499 // Compute lower part of fraction (last 12 bits). | 499 // Compute lower part of fraction (last 12 bits). |
500 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 500 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
501 // And the top (top 20 bits). | 501 // And the top (top 20 bits). |
502 __ orr(exponent, | 502 __ orr(exponent, |
503 exponent, | 503 exponent, |
504 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 504 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
505 __ Ret(); | 505 __ Ret(); |
506 } | 506 } |
507 | 507 |
508 | 508 |
509 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | |
510 FloatingPointHelper::Destination destination, | |
511 Register scratch1, | |
512 Register scratch2) { | |
513 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | |
514 __ vmov(d7.high(), scratch1); | |
515 __ vcvt_f64_s32(d7, d7.high()); | |
516 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | |
517 __ vmov(d6.high(), scratch1); | |
518 __ vcvt_f64_s32(d6, d6.high()); | |
519 if (destination == kCoreRegisters) { | |
520 __ vmov(r2, r3, d7); | |
521 __ vmov(r0, r1, d6); | |
522 } | |
523 } | |
524 | |
525 | |
526 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | |
527 Destination destination, | |
528 Register object, | |
529 DwVfpRegister dst, | |
530 Register dst1, | |
531 Register dst2, | |
532 Register heap_number_map, | |
533 Register scratch1, | |
534 Register scratch2, | |
535 Label* not_number) { | |
536 __ AssertRootValue(heap_number_map, | |
537 Heap::kHeapNumberMapRootIndex, | |
538 "HeapNumberMap register clobbered."); | |
539 | |
540 Label is_smi, done; | |
541 | |
542 // Smi-check | |
543 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | |
544 // Heap number check | |
545 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | |
546 | |
547 // Handle loading a double from a heap number. | |
548 if (destination == kVFPRegisters) { | |
549 // Load the double from tagged HeapNumber to double register. | |
550 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
551 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | |
552 } else { | |
553 ASSERT(destination == kCoreRegisters); | |
554 // Load the double from heap number to dst1 and dst2 in double format. | |
555 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
556 } | |
557 __ jmp(&done); | |
558 | |
559 // Handle loading a double from a smi. | |
560 __ bind(&is_smi); | |
561 // Convert smi to double using VFP instructions. | |
562 __ vmov(dst.high(), scratch1); | |
563 __ vcvt_f64_s32(dst, dst.high()); | |
564 if (destination == kCoreRegisters) { | |
565 // Load the converted smi to dst1 and dst2 in double format. | |
566 __ vmov(dst1, dst2, dst); | |
567 } | |
568 | |
569 __ bind(&done); | |
570 } | |
571 | |
572 | |
573 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | |
574 Register object, | |
575 Register dst, | |
576 Register heap_number_map, | |
577 Register scratch1, | |
578 Register scratch2, | |
579 Register scratch3, | |
580 DwVfpRegister double_scratch1, | |
581 DwVfpRegister double_scratch2, | |
582 Label* not_number) { | |
583 Label done; | |
584 __ AssertRootValue(heap_number_map, | |
585 Heap::kHeapNumberMapRootIndex, | |
586 "HeapNumberMap register clobbered."); | |
587 | |
588 __ UntagAndJumpIfSmi(dst, object, &done); | |
589 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | |
590 __ cmp(scratch1, heap_number_map); | |
591 __ b(ne, not_number); | |
592 __ ECMAConvertNumberToInt32(object, dst, | |
593 scratch1, scratch2, scratch3, | |
594 double_scratch1, double_scratch2); | |
595 __ bind(&done); | |
596 } | |
597 | |
598 | |
599 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | |
600 Register int_scratch, | |
601 Destination destination, | |
602 DwVfpRegister double_dst, | |
603 Register dst_mantissa, | |
604 Register dst_exponent, | |
605 Register scratch2, | |
606 SwVfpRegister single_scratch) { | |
607 ASSERT(!int_scratch.is(scratch2)); | |
608 ASSERT(!int_scratch.is(dst_mantissa)); | |
609 ASSERT(!int_scratch.is(dst_exponent)); | |
610 | |
611 Label done; | |
612 | |
613 __ vmov(single_scratch, int_scratch); | |
614 __ vcvt_f64_s32(double_dst, single_scratch); | |
615 if (destination == kCoreRegisters) { | |
616 __ vmov(dst_mantissa, dst_exponent, double_dst); | |
617 } | |
618 __ bind(&done); | |
619 } | |
620 | |
621 | |
622 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | |
623 Register object, | |
624 Destination destination, | |
625 DwVfpRegister double_dst, | |
626 DwVfpRegister double_scratch, | |
627 Register dst_mantissa, | |
628 Register dst_exponent, | |
629 Register heap_number_map, | |
630 Register scratch1, | |
631 Register scratch2, | |
632 SwVfpRegister single_scratch, | |
633 Label* not_int32) { | |
634 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
635 ASSERT(!scratch1.is(scratch2)); | |
636 ASSERT(!heap_number_map.is(object) && | |
637 !heap_number_map.is(scratch1) && | |
638 !heap_number_map.is(scratch2)); | |
639 | |
640 Label done, obj_is_not_smi; | |
641 | |
642 __ JumpIfNotSmi(object, &obj_is_not_smi); | |
643 __ SmiUntag(scratch1, object); | |
644 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, | |
645 dst_exponent, scratch2, single_scratch); | |
646 __ b(&done); | |
647 | |
648 __ bind(&obj_is_not_smi); | |
649 __ AssertRootValue(heap_number_map, | |
650 Heap::kHeapNumberMapRootIndex, | |
651 "HeapNumberMap register clobbered."); | |
652 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | |
653 | |
654 // Load the number. | |
655 // Load the double value. | |
656 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
657 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | |
658 | |
659 __ TestDoubleIsInt32(double_dst, double_scratch); | |
660 // Jump to not_int32 if the operation did not succeed. | |
661 __ b(ne, not_int32); | |
662 | |
663 if (destination == kCoreRegisters) { | |
664 __ vmov(dst_mantissa, dst_exponent, double_dst); | |
665 } | |
666 __ bind(&done); | |
667 } | |
668 | |
669 | |
670 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | |
671 Register object, | |
672 Register dst, | |
673 Register heap_number_map, | |
674 Register scratch1, | |
675 Register scratch2, | |
676 Register scratch3, | |
677 DwVfpRegister double_scratch0, | |
678 DwVfpRegister double_scratch1, | |
679 Label* not_int32) { | |
680 ASSERT(!dst.is(object)); | |
681 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | |
682 ASSERT(!scratch1.is(scratch2) && | |
683 !scratch1.is(scratch3) && | |
684 !scratch2.is(scratch3)); | |
685 | |
686 Label done, maybe_undefined; | |
687 | |
688 __ UntagAndJumpIfSmi(dst, object, &done); | |
689 | |
690 __ AssertRootValue(heap_number_map, | |
691 Heap::kHeapNumberMapRootIndex, | |
692 "HeapNumberMap register clobbered."); | |
693 | |
694 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | |
695 | |
696 // Object is a heap number. | |
697 // Convert the floating point value to a 32-bit integer. | |
698 // Load the double value. | |
699 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
700 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | |
701 | |
702 __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); | |
703 // Jump to not_int32 if the operation did not succeed. | |
704 __ b(ne, not_int32); | |
705 __ b(&done); | |
706 | |
707 __ bind(&maybe_undefined); | |
708 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); | |
709 __ b(ne, not_int32); | |
710 // |undefined| is truncated to 0. | |
711 __ mov(dst, Operand(Smi::FromInt(0))); | |
712 // Fall through. | |
713 | |
714 __ bind(&done); | |
715 } | |
716 | |
717 | |
718 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | |
719 Register src_exponent, | |
720 Register src_mantissa, | |
721 Register dst, | |
722 Register scratch, | |
723 Label* not_int32) { | |
724 // Get exponent alone in scratch. | |
725 __ Ubfx(scratch, | |
726 src_exponent, | |
727 HeapNumber::kExponentShift, | |
728 HeapNumber::kExponentBits); | |
729 | |
730 // Substract the bias from the exponent. | |
731 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); | |
732 | |
733 // src1: higher (exponent) part of the double value. | |
734 // src2: lower (mantissa) part of the double value. | |
735 // scratch: unbiased exponent. | |
736 | |
737 // Fast cases. Check for obvious non 32-bit integer values. | |
738 // Negative exponent cannot yield 32-bit integers. | |
739 __ b(mi, not_int32); | |
740 // Exponent greater than 31 cannot yield 32-bit integers. | |
741 // Also, a positive value with an exponent equal to 31 is outside of the | |
742 // signed 32-bit integer range. | |
743 // Another way to put it is that if (exponent - signbit) > 30 then the | |
744 // number cannot be represented as an int32. | |
745 Register tmp = dst; | |
746 __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); | |
747 __ cmp(tmp, Operand(30)); | |
748 __ b(gt, not_int32); | |
749 // - Bits [21:0] in the mantissa are not null. | |
750 __ tst(src_mantissa, Operand(0x3fffff)); | |
751 __ b(ne, not_int32); | |
752 | |
753 // Otherwise the exponent needs to be big enough to shift left all the | |
754 // non zero bits left. So we need the (30 - exponent) last bits of the | |
755 // 31 higher bits of the mantissa to be null. | |
756 // Because bits [21:0] are null, we can check instead that the | |
757 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. | |
758 | |
759 // Get the 32 higher bits of the mantissa in dst. | |
760 __ Ubfx(dst, | |
761 src_mantissa, | |
762 HeapNumber::kMantissaBitsInTopWord, | |
763 32 - HeapNumber::kMantissaBitsInTopWord); | |
764 __ orr(dst, | |
765 dst, | |
766 Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | |
767 | |
768 // Create the mask and test the lower bits (of the higher bits). | |
769 __ rsb(scratch, scratch, Operand(32)); | |
770 __ mov(src_mantissa, Operand(1)); | |
771 __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); | |
772 __ sub(src_exponent, src_exponent, Operand(1)); | |
773 __ tst(dst, src_exponent); | |
774 __ b(ne, not_int32); | |
775 } | |
776 | |
777 | |
778 void FloatingPointHelper::CallCCodeForDoubleOperation( | |
779 MacroAssembler* masm, | |
780 Token::Value op, | |
781 Register heap_number_result, | |
782 Register scratch) { | |
783 // Using core registers: | |
784 // r0: Left value (least significant part of mantissa). | |
785 // r1: Left value (sign, exponent, top of mantissa). | |
786 // r2: Right value (least significant part of mantissa). | |
787 // r3: Right value (sign, exponent, top of mantissa). | |
788 | |
789 // Assert that heap_number_result is callee-saved. | |
790 // We currently always use r5 to pass it. | |
791 ASSERT(heap_number_result.is(r5)); | |
792 | |
793 // Push the current return address before the C call. Return will be | |
794 // through pop(pc) below. | |
795 __ push(lr); | |
796 __ PrepareCallCFunction(0, 2, scratch); | |
797 if (masm->use_eabi_hardfloat()) { | |
798 __ vmov(d0, r0, r1); | |
799 __ vmov(d1, r2, r3); | |
800 } | |
801 { | |
802 AllowExternalCallThatCantCauseGC scope(masm); | |
803 __ CallCFunction( | |
804 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | |
805 } | |
806 // Store answer in the overwritable heap number. Double returned in | |
807 // registers r0 and r1 or in d0. | |
808 if (masm->use_eabi_hardfloat()) { | |
809 __ vstr(d0, | |
810 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
811 } else { | |
812 __ Strd(r0, r1, FieldMemOperand(heap_number_result, | |
813 HeapNumber::kValueOffset)); | |
814 } | |
815 // Place heap_number_result in r0 and return to the pushed return address. | |
816 __ mov(r0, Operand(heap_number_result)); | |
817 __ pop(pc); | |
818 } | |
819 | |
820 | |
821 bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 509 bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
822 // These variants are compiled ahead of time. See next method. | 510 // These variants are compiled ahead of time. See next method. |
823 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { | 511 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { |
824 return true; | 512 return true; |
825 } | 513 } |
826 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { | 514 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { |
827 return true; | 515 return true; |
828 } | 516 } |
829 // Other register combinations are generated as and when they are needed, | 517 // Other register combinations are generated as and when they are needed, |
830 // so it is unsafe to call them from stubs (we can't generate a stub while | 518 // so it is unsafe to call them from stubs (we can't generate a stub while |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1048 // Rhs is a smi, lhs is a heap number. | 736 // Rhs is a smi, lhs is a heap number. |
1049 // Load the double from lhs, tagged HeapNumber r1, to d7. | 737 // Load the double from lhs, tagged HeapNumber r1, to d7. |
1050 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 738 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
1051 __ vldr(d7, r7, HeapNumber::kValueOffset); | 739 __ vldr(d7, r7, HeapNumber::kValueOffset); |
1052 // Convert rhs to a double in d6 . | 740 // Convert rhs to a double in d6 . |
1053 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | 741 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); |
1054 // Fall through to both_loaded_as_doubles. | 742 // Fall through to both_loaded_as_doubles. |
1055 } | 743 } |
1056 | 744 |
1057 | 745 |
1058 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | |
1059 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
1060 Register rhs_exponent = exp_first ? r0 : r1; | |
1061 Register lhs_exponent = exp_first ? r2 : r3; | |
1062 Register rhs_mantissa = exp_first ? r1 : r0; | |
1063 Register lhs_mantissa = exp_first ? r3 : r2; | |
1064 Label one_is_nan, neither_is_nan; | |
1065 | |
1066 __ Sbfx(r4, | |
1067 lhs_exponent, | |
1068 HeapNumber::kExponentShift, | |
1069 HeapNumber::kExponentBits); | |
1070 // NaNs have all-one exponents so they sign extend to -1. | |
1071 __ cmp(r4, Operand(-1)); | |
1072 __ b(ne, lhs_not_nan); | |
1073 __ mov(r4, | |
1074 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | |
1075 SetCC); | |
1076 __ b(ne, &one_is_nan); | |
1077 __ cmp(lhs_mantissa, Operand::Zero()); | |
1078 __ b(ne, &one_is_nan); | |
1079 | |
1080 __ bind(lhs_not_nan); | |
1081 __ Sbfx(r4, | |
1082 rhs_exponent, | |
1083 HeapNumber::kExponentShift, | |
1084 HeapNumber::kExponentBits); | |
1085 // NaNs have all-one exponents so they sign extend to -1. | |
1086 __ cmp(r4, Operand(-1)); | |
1087 __ b(ne, &neither_is_nan); | |
1088 __ mov(r4, | |
1089 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | |
1090 SetCC); | |
1091 __ b(ne, &one_is_nan); | |
1092 __ cmp(rhs_mantissa, Operand::Zero()); | |
1093 __ b(eq, &neither_is_nan); | |
1094 | |
1095 __ bind(&one_is_nan); | |
1096 // NaN comparisons always fail. | |
1097 // Load whatever we need in r0 to make the comparison fail. | |
1098 if (cond == lt || cond == le) { | |
1099 __ mov(r0, Operand(GREATER)); | |
1100 } else { | |
1101 __ mov(r0, Operand(LESS)); | |
1102 } | |
1103 __ Ret(); | |
1104 | |
1105 __ bind(&neither_is_nan); | |
1106 } | |
1107 | |
1108 | |
1109 // See comment at call site. | 746 // See comment at call site. |
1110 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 747 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
1111 Register lhs, | 748 Register lhs, |
1112 Register rhs) { | 749 Register rhs) { |
1113 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 750 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1114 (lhs.is(r1) && rhs.is(r0))); | 751 (lhs.is(r1) && rhs.is(r0))); |
1115 | 752 |
1116 // If either operand is a JS object or an oddball value, then they are | 753 // If either operand is a JS object or an oddball value, then they are |
1117 // not equal since their pointers are different. | 754 // not equal since their pointers are different. |
1118 // There is no test for undetectability in strict equality. | 755 // There is no test for undetectability in strict equality. |
(...skipping 709 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1828 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 1465 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
1829 __ mov(r0, Operand(r1)); | 1466 __ mov(r0, Operand(r1)); |
1830 } | 1467 } |
1831 __ Ret(); | 1468 __ Ret(); |
1832 } | 1469 } |
1833 | 1470 |
1834 | 1471 |
1835 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, | 1472 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, |
1836 Label* slow) { | 1473 Label* slow) { |
1837 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); | 1474 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); |
1475 | |
1838 // Convert the heap number in r0 to an untagged integer in r1. | 1476 // Convert the heap number in r0 to an untagged integer in r1. |
1839 __ ECMAConvertNumberToInt32(r0, r1, r2, r3, r4, d0, d1); | 1477 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
1478 __ ECMAToInt32(r1, d0, r2, r3, r4, d1); | |
1840 | 1479 |
1841 // Do the bitwise operation and check if the result fits in a smi. | 1480 // Do the bitwise operation and check if the result fits in a smi. |
1842 Label try_float; | 1481 Label try_float; |
1843 __ mvn(r1, Operand(r1)); | 1482 __ mvn(r1, Operand(r1)); |
1844 __ cmn(r1, Operand(0x40000000)); | 1483 __ cmn(r1, Operand(0x40000000)); |
1845 __ b(mi, &try_float); | 1484 __ b(mi, &try_float); |
1846 | 1485 |
1847 // Tag the result as a smi and we're done. | 1486 // Tag the result as a smi and we're done. |
1848 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 1487 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); |
1849 __ Ret(); | 1488 __ Ret(); |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1921 break; | 1560 break; |
1922 case Token::BIT_NOT: | 1561 case Token::BIT_NOT: |
1923 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 1562 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
1924 break; | 1563 break; |
1925 default: | 1564 default: |
1926 UNREACHABLE(); | 1565 UNREACHABLE(); |
1927 } | 1566 } |
1928 } | 1567 } |
1929 | 1568 |
1930 | 1569 |
1570 // Generates code to call a C function to do a double operation. | |
1571 // This code never falls through, but returns with a heap number containing | |
1572 // the result in r0. | |
1573 // Register heapnumber_result must be a heap number in which the | |
1574 // result of the operation will be stored. | |
1575 // Requires the following layout on entry: | |
1576 // d0: Left value. | |
1577 // d1: Right value. | |
1578 // If soft float ABI, use also r0, r1, r2, r3. | |
1579 static void CallCCodeForDoubleOperation(MacroAssembler* masm, | |
1580 Token::Value op, | |
1581 Register heap_number_result, | |
1582 Register scratch) { | |
1583 // Assert that heap_number_result is callee-saved. | |
1584 // We currently always use r5 to pass it. | |
1585 ASSERT(heap_number_result.is(r5)); | |
1586 | |
1587 // Push the current return address before the C call. Return will be | |
1588 // through pop(pc) below. | |
1589 __ push(lr); | |
1590 __ PrepareCallCFunction(0, 2, scratch); | |
1591 if (!masm->use_eabi_hardfloat()) { | |
1592 __ vmov(r0, r1, d0); | |
1593 __ vmov(r2, r3, d1); | |
1594 } | |
1595 { | |
1596 AllowExternalCallThatCantCauseGC scope(masm); | |
1597 __ CallCFunction( | |
1598 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | |
1599 } | |
1600 // Store answer in the overwritable heap number. Double returned in | |
1601 // registers r0 and r1 or in d0. | |
1602 if (masm->use_eabi_hardfloat()) { | |
1603 __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
1604 } else { | |
1605 __ Strd(r0, r1, | |
1606 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
1607 } | |
1608 // Place heap_number_result in r0 and return to the pushed return address. | |
1609 __ mov(r0, Operand(heap_number_result)); | |
1610 __ pop(pc); | |
1611 } | |
1612 | |
1613 | |
1931 void BinaryOpStub::Initialize() { | 1614 void BinaryOpStub::Initialize() { |
1932 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 | 1615 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
1933 } | 1616 } |
1934 | 1617 |
1935 | 1618 |
1936 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 1619 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
1937 Label get_result; | 1620 Label get_result; |
1938 | 1621 |
1939 __ Push(r1, r0); | 1622 __ Push(r1, r0); |
1940 | 1623 |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2198 | 1881 |
2199 Register heap_number_map = r9; | 1882 Register heap_number_map = r9; |
2200 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1883 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2201 | 1884 |
2202 switch (op) { | 1885 switch (op) { |
2203 case Token::ADD: | 1886 case Token::ADD: |
2204 case Token::SUB: | 1887 case Token::SUB: |
2205 case Token::MUL: | 1888 case Token::MUL: |
2206 case Token::DIV: | 1889 case Token::DIV: |
2207 case Token::MOD: { | 1890 case Token::MOD: { |
2208 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | |
2209 // depending on whether VFP3 is available or not. | |
2210 FloatingPointHelper::Destination destination = | |
2211 op != Token::MOD ? | |
2212 FloatingPointHelper::kVFPRegisters : | |
2213 FloatingPointHelper::kCoreRegisters; | |
2214 | |
2215 // Allocate new heap number for result. | 1891 // Allocate new heap number for result. |
2216 Register result = r5; | 1892 Register result = r5; |
2217 BinaryOpStub_GenerateHeapResultAllocation( | 1893 BinaryOpStub_GenerateHeapResultAllocation( |
2218 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1894 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
2219 | 1895 |
2220 // Load the operands. | 1896 // Load left and right operands into d6 and d7. |
ulan
2013/04/22 11:24:49
Load left and right operands into d0 and d1.
Rodolph Perfetta
2013/04/22 21:30:33
Done.
| |
2221 if (smi_operands) { | 1897 if (smi_operands) { |
2222 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 1898 __ SmiUntag(scratch1, r0); |
ulan
2013/04/22 11:24:49
We can use left and right instead of r1 and r0.
Rodolph Perfetta
2013/04/22 21:30:33
Done.
| |
1899 __ vmov(d1.high(), scratch1); | |
1900 __ vcvt_f64_s32(d1, d1.high()); | |
1901 __ SmiUntag(scratch1, r1); | |
1902 __ vmov(d0.high(), scratch1); | |
1903 __ vcvt_f64_s32(d0, d0.high()); | |
2223 } else { | 1904 } else { |
2224 // Load right operand to d7 or r2/r3. | 1905 // Load right operand to d7 or r2/r3. |
ulan
2013/04/22 11:24:49
Obsolete comments here and below.
Rodolph Perfetta
2013/04/22 21:30:33
Done.
| |
2225 if (right_type == BinaryOpIC::INT32) { | 1906 if (right_type == BinaryOpIC::INT32) { |
2226 FloatingPointHelper::LoadNumberAsInt32Double( | 1907 __ LoadNumberAsInt32Double( |
2227 masm, right, destination, d7, d8, r2, r3, heap_number_map, | 1908 right, d1, heap_number_map, scratch1, d8, miss); |
2228 scratch1, scratch2, s0, miss); | |
2229 } else { | 1909 } else { |
2230 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | 1910 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
2231 FloatingPointHelper::LoadNumber( | 1911 __ LoadNumber(right, d1, heap_number_map, scratch1, fail); |
2232 masm, destination, right, d7, r2, r3, heap_number_map, | |
2233 scratch1, scratch2, fail); | |
2234 } | 1912 } |
2235 // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it | 1913 // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it |
2236 // jumps to |miss|. | 1914 // jumps to |miss|. |
2237 if (left_type == BinaryOpIC::INT32) { | 1915 if (left_type == BinaryOpIC::INT32) { |
2238 FloatingPointHelper::LoadNumberAsInt32Double( | 1916 __ LoadNumberAsInt32Double( |
2239 masm, left, destination, d6, d8, r0, r1, heap_number_map, | 1917 left, d0, heap_number_map, scratch1, d8, miss); |
2240 scratch1, scratch2, s0, miss); | |
2241 } else { | 1918 } else { |
2242 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | 1919 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
2243 FloatingPointHelper::LoadNumber( | 1920 __ LoadNumber( |
2244 masm, destination, left, d6, r0, r1, heap_number_map, | 1921 left, d0, heap_number_map, scratch1, fail); |
2245 scratch1, scratch2, fail); | |
2246 } | 1922 } |
2247 } | 1923 } |
2248 | 1924 |
2249 // Calculate the result. | 1925 // Calculate the result. |
2250 if (destination == FloatingPointHelper::kVFPRegisters) { | 1926 if (op != Token::MOD) { |
2251 // Using VFP registers: | 1927 // Using VFP registers: |
2252 // d6: Left value | 1928 // d6: Left value |
2253 // d7: Right value | 1929 // d7: Right value |
2254 switch (op) { | 1930 switch (op) { |
2255 case Token::ADD: | 1931 case Token::ADD: |
2256 __ vadd(d5, d6, d7); | 1932 __ vadd(d5, d0, d1); |
2257 break; | 1933 break; |
2258 case Token::SUB: | 1934 case Token::SUB: |
2259 __ vsub(d5, d6, d7); | 1935 __ vsub(d5, d0, d1); |
2260 break; | 1936 break; |
2261 case Token::MUL: | 1937 case Token::MUL: |
2262 __ vmul(d5, d6, d7); | 1938 __ vmul(d5, d0, d1); |
2263 break; | 1939 break; |
2264 case Token::DIV: | 1940 case Token::DIV: |
2265 __ vdiv(d5, d6, d7); | 1941 __ vdiv(d5, d0, d1); |
2266 break; | 1942 break; |
2267 default: | 1943 default: |
2268 UNREACHABLE(); | 1944 UNREACHABLE(); |
2269 } | 1945 } |
2270 | 1946 |
2271 __ sub(r0, result, Operand(kHeapObjectTag)); | 1947 __ sub(r0, result, Operand(kHeapObjectTag)); |
2272 __ vstr(d5, r0, HeapNumber::kValueOffset); | 1948 __ vstr(d5, r0, HeapNumber::kValueOffset); |
2273 __ add(r0, r0, Operand(kHeapObjectTag)); | 1949 __ add(r0, r0, Operand(kHeapObjectTag)); |
2274 __ Ret(); | 1950 __ Ret(); |
2275 } else { | 1951 } else { |
2276 // Call the C function to handle the double operation. | 1952 // Call the C function to handle the double operation. |
2277 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 1953 CallCCodeForDoubleOperation(masm, op, result, scratch1); |
2278 op, | |
2279 result, | |
2280 scratch1); | |
2281 if (FLAG_debug_code) { | 1954 if (FLAG_debug_code) { |
2282 __ stop("Unreachable code."); | 1955 __ stop("Unreachable code."); |
2283 } | 1956 } |
2284 } | 1957 } |
2285 break; | 1958 break; |
2286 } | 1959 } |
2287 case Token::BIT_OR: | 1960 case Token::BIT_OR: |
2288 case Token::BIT_XOR: | 1961 case Token::BIT_XOR: |
2289 case Token::BIT_AND: | 1962 case Token::BIT_AND: |
2290 case Token::SAR: | 1963 case Token::SAR: |
2291 case Token::SHR: | 1964 case Token::SHR: |
2292 case Token::SHL: { | 1965 case Token::SHL: { |
2293 if (smi_operands) { | 1966 if (smi_operands) { |
2294 __ SmiUntag(r3, left); | 1967 __ SmiUntag(r3, left); |
2295 __ SmiUntag(r2, right); | 1968 __ SmiUntag(r2, right); |
2296 } else { | 1969 } else { |
2297 // Convert operands to 32-bit integers. Right in r2 and left in r3. | 1970 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
2298 FloatingPointHelper::ConvertNumberToInt32(masm, | 1971 __ ConvertNumberToInt32( |
2299 left, | 1972 left, r3, heap_number_map, |
2300 r3, | 1973 scratch1, scratch2, scratch3, d0, d1, not_numbers); |
2301 heap_number_map, | 1974 __ ConvertNumberToInt32( |
2302 scratch1, | 1975 right, r2, heap_number_map, |
2303 scratch2, | 1976 scratch1, scratch2, scratch3, d0, d1, not_numbers); |
2304 scratch3, | |
2305 d0, | |
2306 d1, | |
2307 not_numbers); | |
2308 FloatingPointHelper::ConvertNumberToInt32(masm, | |
2309 right, | |
2310 r2, | |
2311 heap_number_map, | |
2312 scratch1, | |
2313 scratch2, | |
2314 scratch3, | |
2315 d0, | |
2316 d1, | |
2317 not_numbers); | |
2318 } | 1977 } |
2319 | 1978 |
2320 Label result_not_a_smi; | 1979 Label result_not_a_smi; |
2321 switch (op) { | 1980 switch (op) { |
2322 case Token::BIT_OR: | 1981 case Token::BIT_OR: |
2323 __ orr(r2, r3, Operand(r2)); | 1982 __ orr(r2, r3, Operand(r2)); |
2324 break; | 1983 break; |
2325 case Token::BIT_XOR: | 1984 case Token::BIT_XOR: |
2326 __ eor(r2, r3, Operand(r2)); | 1985 __ eor(r2, r3, Operand(r2)); |
2327 break; | 1986 break; |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2526 // again if this changes. | 2185 // again if this changes. |
2527 if (left_type_ == BinaryOpIC::SMI) { | 2186 if (left_type_ == BinaryOpIC::SMI) { |
2528 __ JumpIfNotSmi(left, &transition); | 2187 __ JumpIfNotSmi(left, &transition); |
2529 } | 2188 } |
2530 if (right_type_ == BinaryOpIC::SMI) { | 2189 if (right_type_ == BinaryOpIC::SMI) { |
2531 __ JumpIfNotSmi(right, &transition); | 2190 __ JumpIfNotSmi(right, &transition); |
2532 } | 2191 } |
2533 // Load both operands and check that they are 32-bit integer. | 2192 // Load both operands and check that they are 32-bit integer. |
2534 // Jump to type transition if they are not. The registers r0 and r1 (right | 2193 // Jump to type transition if they are not. The registers r0 and r1 (right |
2535 // and left) are preserved for the runtime call. | 2194 // and left) are preserved for the runtime call. |
2536 FloatingPointHelper::Destination destination = (op_ != Token::MOD) | 2195 __ LoadNumberAsInt32Double( |
2537 ? FloatingPointHelper::kVFPRegisters | 2196 right, d1, heap_number_map, scratch1, d8, &transition); |
2538 : FloatingPointHelper::kCoreRegisters; | 2197 __ LoadNumberAsInt32Double( |
2198 left, d0, heap_number_map, scratch1, d8, &transition); | |
2539 | 2199 |
2540 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2200 if (op_ != Token::MOD) { |
2541 right, | |
2542 destination, | |
2543 d7, | |
2544 d8, | |
2545 r2, | |
2546 r3, | |
2547 heap_number_map, | |
2548 scratch1, | |
2549 scratch2, | |
2550 s0, | |
2551 &transition); | |
2552 FloatingPointHelper::LoadNumberAsInt32Double(masm, | |
2553 left, | |
2554 destination, | |
2555 d6, | |
2556 d8, | |
2557 r4, | |
2558 r5, | |
2559 heap_number_map, | |
2560 scratch1, | |
2561 scratch2, | |
2562 s0, | |
2563 &transition); | |
2564 | |
2565 if (destination == FloatingPointHelper::kVFPRegisters) { | |
2566 Label return_heap_number; | 2201 Label return_heap_number; |
2567 switch (op_) { | 2202 switch (op_) { |
2568 case Token::ADD: | 2203 case Token::ADD: |
2569 __ vadd(d5, d6, d7); | 2204 __ vadd(d5, d0, d1); |
2570 break; | 2205 break; |
2571 case Token::SUB: | 2206 case Token::SUB: |
2572 __ vsub(d5, d6, d7); | 2207 __ vsub(d5, d0, d1); |
2573 break; | 2208 break; |
2574 case Token::MUL: | 2209 case Token::MUL: |
2575 __ vmul(d5, d6, d7); | 2210 __ vmul(d5, d0, d1); |
2576 break; | 2211 break; |
2577 case Token::DIV: | 2212 case Token::DIV: |
2578 __ vdiv(d5, d6, d7); | 2213 __ vdiv(d5, d0, d1); |
2579 break; | 2214 break; |
2580 default: | 2215 default: |
2581 UNREACHABLE(); | 2216 UNREACHABLE(); |
2582 } | 2217 } |
2583 | 2218 |
2584 if (op_ != Token::DIV) { | 2219 if (op_ != Token::DIV) { |
2585 // These operations produce an integer result. | 2220 // These operations produce an integer result. |
2586 // Try to return a smi if we can. | 2221 // Try to return a smi if we can. |
2587 // Otherwise return a heap number if allowed, or jump to type | 2222 // Otherwise return a heap number if allowed, or jump to type |
2588 // transition. | 2223 // transition. |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2654 heap_number_map, | 2289 heap_number_map, |
2655 scratch1, | 2290 scratch1, |
2656 scratch2, | 2291 scratch2, |
2657 &pop_and_call_runtime, | 2292 &pop_and_call_runtime, |
2658 mode_); | 2293 mode_); |
2659 | 2294 |
2660 // Load the left value from the value saved on the stack. | 2295 // Load the left value from the value saved on the stack. |
2661 __ Pop(r1, r0); | 2296 __ Pop(r1, r0); |
2662 | 2297 |
2663 // Call the C function to handle the double operation. | 2298 // Call the C function to handle the double operation. |
2664 FloatingPointHelper::CallCCodeForDoubleOperation( | 2299 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
2665 masm, op_, heap_number_result, scratch1); | |
2666 if (FLAG_debug_code) { | 2300 if (FLAG_debug_code) { |
2667 __ stop("Unreachable code."); | 2301 __ stop("Unreachable code."); |
2668 } | 2302 } |
2669 | 2303 |
2670 __ bind(&pop_and_call_runtime); | 2304 __ bind(&pop_and_call_runtime); |
2671 __ Drop(2); | 2305 __ Drop(2); |
2672 __ b(&call_runtime); | 2306 __ b(&call_runtime); |
2673 } | 2307 } |
2674 | 2308 |
2675 break; | 2309 break; |
2676 } | 2310 } |
2677 | 2311 |
2678 case Token::BIT_OR: | 2312 case Token::BIT_OR: |
2679 case Token::BIT_XOR: | 2313 case Token::BIT_XOR: |
2680 case Token::BIT_AND: | 2314 case Token::BIT_AND: |
2681 case Token::SAR: | 2315 case Token::SAR: |
2682 case Token::SHR: | 2316 case Token::SHR: |
2683 case Token::SHL: { | 2317 case Token::SHL: { |
2684 Label return_heap_number; | 2318 Label return_heap_number; |
2685 Register scratch3 = r5; | |
2686 // Convert operands to 32-bit integers. Right in r2 and left in r3. The | 2319 // Convert operands to 32-bit integers. Right in r2 and left in r3. The |
2687 // registers r0 and r1 (right and left) are preserved for the runtime | 2320 // registers r0 and r1 (right and left) are preserved for the runtime |
2688 // call. | 2321 // call. |
2689 FloatingPointHelper::LoadNumberAsInt32(masm, | 2322 __ LoadNumberAsInt32(left, r3, heap_number_map, |
2690 left, | 2323 scratch1, d0, d1, &transition); |
2691 r3, | 2324 __ LoadNumberAsInt32(right, r2, heap_number_map, |
2692 heap_number_map, | 2325 scratch1, d0, d1, &transition); |
2693 scratch1, | |
2694 scratch2, | |
2695 scratch3, | |
2696 d0, | |
2697 d1, | |
2698 &transition); | |
2699 FloatingPointHelper::LoadNumberAsInt32(masm, | |
2700 right, | |
2701 r2, | |
2702 heap_number_map, | |
2703 scratch1, | |
2704 scratch2, | |
2705 scratch3, | |
2706 d0, | |
2707 d1, | |
2708 &transition); | |
2709 | 2326 |
2710 // The ECMA-262 standard specifies that, for shift operations, only the | 2327 // The ECMA-262 standard specifies that, for shift operations, only the |
2711 // 5 least significant bits of the shift value should be used. | 2328 // 5 least significant bits of the shift value should be used. |
2712 switch (op_) { | 2329 switch (op_) { |
2713 case Token::BIT_OR: | 2330 case Token::BIT_OR: |
2714 __ orr(r2, r3, Operand(r2)); | 2331 __ orr(r2, r3, Operand(r2)); |
2715 break; | 2332 break; |
2716 case Token::BIT_XOR: | 2333 case Token::BIT_XOR: |
2717 __ eor(r2, r3, Operand(r2)); | 2334 __ eor(r2, r3, Operand(r2)); |
2718 break; | 2335 break; |
(...skipping 4719 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7438 // and value is Smi. | 7055 // and value is Smi. |
7439 __ bind(&smi_element); | 7056 __ bind(&smi_element); |
7440 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7057 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
7441 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 7058 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); |
7442 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); | 7059 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); |
7443 __ Ret(); | 7060 __ Ret(); |
7444 | 7061 |
7445 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. | 7062 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. |
7446 __ bind(&double_elements); | 7063 __ bind(&double_elements); |
7447 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7064 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
7448 __ StoreNumberToDoubleElements(r0, r3, | 7065 __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements); |
7449 // Overwrites all regs after this. | |
7450 r5, r9, r6, r7, r2, | |
7451 &slow_elements); | |
7452 __ Ret(); | 7066 __ Ret(); |
7453 } | 7067 } |
7454 | 7068 |
7455 | 7069 |
7456 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 7070 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
7457 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | 7071 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
7458 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 7072 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
7459 int parameter_count_offset = | 7073 int parameter_count_offset = |
7460 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 7074 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
7461 __ ldr(r1, MemOperand(fp, parameter_count_offset)); | 7075 __ ldr(r1, MemOperand(fp, parameter_count_offset)); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7526 | 7140 |
7527 __ Pop(lr, r5, r1); | 7141 __ Pop(lr, r5, r1); |
7528 __ Ret(); | 7142 __ Ret(); |
7529 } | 7143 } |
7530 | 7144 |
7531 #undef __ | 7145 #undef __ |
7532 | 7146 |
7533 } } // namespace v8::internal | 7147 } } // namespace v8::internal |
7534 | 7148 |
7535 #endif // V8_TARGET_ARCH_ARM | 7149 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |