OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1610 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1621 __ bind(&cache_miss); | 1621 __ bind(&cache_miss); |
1622 __ IncrementCounter(counters->transcendental_cache_miss(), 1); | 1622 __ IncrementCounter(counters->transcendental_cache_miss(), 1); |
1623 // Update cache with new value. | 1623 // Update cache with new value. |
1624 if (tagged) { | 1624 if (tagged) { |
1625 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); | 1625 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); |
1626 } else { // UNTAGGED. | 1626 } else { // UNTAGGED. |
1627 __ AllocateHeapNumber(rax, rdi, &skip_cache); | 1627 __ AllocateHeapNumber(rax, rdi, &skip_cache); |
1628 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); | 1628 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); |
1629 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 1629 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
1630 } | 1630 } |
1631 GenerateOperation(masm); | 1631 GenerateOperation(masm, type_); |
1632 __ movq(Operand(rcx, 0), rbx); | 1632 __ movq(Operand(rcx, 0), rbx); |
1633 __ movq(Operand(rcx, 2 * kIntSize), rax); | 1633 __ movq(Operand(rcx, 2 * kIntSize), rax); |
1634 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 1634 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
1635 if (tagged) { | 1635 if (tagged) { |
1636 __ ret(kPointerSize); | 1636 __ ret(kPointerSize); |
1637 } else { // UNTAGGED. | 1637 } else { // UNTAGGED. |
1638 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); | 1638 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
1639 __ Ret(); | 1639 __ Ret(); |
1640 | 1640 |
1641 // Skip cache and return answer directly, only in untagged case. | 1641 // Skip cache and return answer directly, only in untagged case. |
1642 __ bind(&skip_cache); | 1642 __ bind(&skip_cache); |
1643 __ subq(rsp, Immediate(kDoubleSize)); | 1643 __ subq(rsp, Immediate(kDoubleSize)); |
1644 __ movsd(Operand(rsp, 0), xmm1); | 1644 __ movsd(Operand(rsp, 0), xmm1); |
1645 __ fld_d(Operand(rsp, 0)); | 1645 __ fld_d(Operand(rsp, 0)); |
1646 GenerateOperation(masm); | 1646 GenerateOperation(masm, type_); |
1647 __ fstp_d(Operand(rsp, 0)); | 1647 __ fstp_d(Operand(rsp, 0)); |
1648 __ movsd(xmm1, Operand(rsp, 0)); | 1648 __ movsd(xmm1, Operand(rsp, 0)); |
1649 __ addq(rsp, Immediate(kDoubleSize)); | 1649 __ addq(rsp, Immediate(kDoubleSize)); |
1650 // We return the value in xmm1 without adding it to the cache, but | 1650 // We return the value in xmm1 without adding it to the cache, but |
1651 // we cause a scavenging GC so that future allocations will succeed. | 1651 // we cause a scavenging GC so that future allocations will succeed. |
1652 { | 1652 { |
1653 FrameScope scope(masm, StackFrame::INTERNAL); | 1653 FrameScope scope(masm, StackFrame::INTERNAL); |
1654 // Allocate an unused object bigger than a HeapNumber. | 1654 // Allocate an unused object bigger than a HeapNumber. |
1655 __ Push(Smi::FromInt(2 * kDoubleSize)); | 1655 __ Push(Smi::FromInt(2 * kDoubleSize)); |
1656 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | 1656 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1688 case TranscendentalCache::COS: return Runtime::kMath_cos; | 1688 case TranscendentalCache::COS: return Runtime::kMath_cos; |
1689 case TranscendentalCache::TAN: return Runtime::kMath_tan; | 1689 case TranscendentalCache::TAN: return Runtime::kMath_tan; |
1690 case TranscendentalCache::LOG: return Runtime::kMath_log; | 1690 case TranscendentalCache::LOG: return Runtime::kMath_log; |
1691 default: | 1691 default: |
1692 UNIMPLEMENTED(); | 1692 UNIMPLEMENTED(); |
1693 return Runtime::kAbort; | 1693 return Runtime::kAbort; |
1694 } | 1694 } |
1695 } | 1695 } |
1696 | 1696 |
1697 | 1697 |
1698 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { | 1698 void TranscendentalCacheStub::GenerateOperation( |
1699 MacroAssembler* masm, TranscendentalCache::Type type) { | |
1699 // Registers: | 1700 // Registers: |
1700 // rax: Newly allocated HeapNumber, which must be preserved. | 1701 // rax: Newly allocated HeapNumber, which must be preserved. |
1701 // rbx: Bits of input double. Must be preserved. | 1702 // rbx: Bits of input double. Must be preserved. |
1702 // rcx: Pointer to cache entry. Must be preserved. | 1703 // rcx: Pointer to cache entry. Must be preserved. |
1703 // st(0): Input double | 1704 // st(0): Input double |
1704 Label done; | 1705 Label done; |
1705 if (type_ == TranscendentalCache::SIN || | 1706 if (type == TranscendentalCache::SIN || |
1706 type_ == TranscendentalCache::COS || | 1707 type == TranscendentalCache::COS || |
1707 type_ == TranscendentalCache::TAN) { | 1708 type == TranscendentalCache::TAN) { |
1708 // Both fsin and fcos require arguments in the range +/-2^63 and | 1709 // Both fsin and fcos require arguments in the range +/-2^63 and |
1709 // return NaN for infinities and NaN. They can share all code except | 1710 // return NaN for infinities and NaN. They can share all code except |
1710 // the actual fsin/fcos operation. | 1711 // the actual fsin/fcos operation. |
1711 Label in_range; | 1712 Label in_range; |
1712 // If argument is outside the range -2^63..2^63, fsin/cos doesn't | 1713 // If argument is outside the range -2^63..2^63, fsin/cos doesn't |
1713 // work. We must reduce it to the appropriate range. | 1714 // work. We must reduce it to the appropriate range. |
1714 __ movq(rdi, rbx); | 1715 __ movq(rdi, rbx); |
1715 // Move exponent and sign bits to low bits. | 1716 // Move exponent and sign bits to low bits. |
1716 __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); | 1717 __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); |
1717 // Remove sign bit. | 1718 // Remove sign bit. |
1718 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); | 1719 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); |
1719 int supported_exponent_limit = (63 + HeapNumber::kExponentBias); | 1720 int supported_exponent_limit = (63 + HeapNumber::kExponentBias); |
1720 __ cmpl(rdi, Immediate(supported_exponent_limit)); | 1721 __ cmpl(rdi, Immediate(supported_exponent_limit)); |
1721 __ j(below, &in_range); | 1722 __ j(below, &in_range); |
1722 // Check for infinity and NaN. Both return NaN for sin. | 1723 // Check for infinity and NaN. Both return NaN for sin. |
1723 __ cmpl(rdi, Immediate(0x7ff)); | 1724 __ cmpl(rdi, Immediate(0x7ff)); |
1724 Label non_nan_result; | 1725 Label non_nan_result; |
1725 __ j(not_equal, &non_nan_result, Label::kNear); | 1726 __ j(not_equal, &non_nan_result, Label::kNear); |
1726 // Input is +/-Infinity or NaN. Result is NaN. | 1727 // Input is +/-Infinity or NaN. Result is NaN. |
1727 __ fstp(0); | 1728 __ fstp(0); |
1728 __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex); | 1729 // NaN is represented by 0x7ff8000000000000. |
Yang
2012/03/03 11:05:01
Loading NaN from the root list won't work if we ar
Sven Panne
2012/03/05 07:45:47
I think the cleaner and more consistent way would
| |
1729 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); | 1730 __ subq(rsp, Immediate(kPointerSize)); |
1731 __ movl(Operand(rsp, 4), Immediate(0x7ff80000)); | |
1732 __ movl(Operand(rsp, 0), Immediate(0x00000000)); | |
1733 __ fld_d(Operand(rsp, 0)); | |
1734 __ addq(rsp, Immediate(kPointerSize)); | |
1730 __ jmp(&done); | 1735 __ jmp(&done); |
1731 | 1736 |
1732 __ bind(&non_nan_result); | 1737 __ bind(&non_nan_result); |
1733 | 1738 |
1734 // Use fpmod to restrict argument to the range +/-2*PI. | 1739 // Use fpmod to restrict argument to the range +/-2*PI. |
1735 __ movq(rdi, rax); // Save rax before using fnstsw_ax. | 1740 __ movq(rdi, rax); // Save rax before using fnstsw_ax. |
1736 __ fldpi(); | 1741 __ fldpi(); |
1737 __ fadd(0); | 1742 __ fadd(0); |
1738 __ fld(1); | 1743 __ fld(1); |
1739 // FPU Stack: input, 2*pi, input. | 1744 // FPU Stack: input, 2*pi, input. |
(...skipping 20 matching lines...) Expand all Loading... | |
1760 // continue computation. | 1765 // continue computation. |
1761 __ j(not_zero, &partial_remainder_loop); | 1766 __ j(not_zero, &partial_remainder_loop); |
1762 } | 1767 } |
1763 // FPU Stack: input, 2*pi, input % 2*pi | 1768 // FPU Stack: input, 2*pi, input % 2*pi |
1764 __ fstp(2); | 1769 __ fstp(2); |
1765 // FPU Stack: input % 2*pi, 2*pi, | 1770 // FPU Stack: input % 2*pi, 2*pi, |
1766 __ fstp(0); | 1771 __ fstp(0); |
1767 // FPU Stack: input % 2*pi | 1772 // FPU Stack: input % 2*pi |
1768 __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber. | 1773 __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber. |
1769 __ bind(&in_range); | 1774 __ bind(&in_range); |
1770 switch (type_) { | 1775 switch (type) { |
1771 case TranscendentalCache::SIN: | 1776 case TranscendentalCache::SIN: |
1772 __ fsin(); | 1777 __ fsin(); |
1773 break; | 1778 break; |
1774 case TranscendentalCache::COS: | 1779 case TranscendentalCache::COS: |
1775 __ fcos(); | 1780 __ fcos(); |
1776 break; | 1781 break; |
1777 case TranscendentalCache::TAN: | 1782 case TranscendentalCache::TAN: |
1778 // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the | 1783 // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the |
1779 // FP register stack. | 1784 // FP register stack. |
1780 __ fptan(); | 1785 __ fptan(); |
1781 __ fstp(0); // Pop FP register stack. | 1786 __ fstp(0); // Pop FP register stack. |
1782 break; | 1787 break; |
1783 default: | 1788 default: |
1784 UNREACHABLE(); | 1789 UNREACHABLE(); |
1785 } | 1790 } |
1786 __ bind(&done); | 1791 __ bind(&done); |
1787 } else { | 1792 } else { |
1788 ASSERT(type_ == TranscendentalCache::LOG); | 1793 ASSERT(type == TranscendentalCache::LOG); |
1789 __ fldln2(); | 1794 __ fldln2(); |
1790 __ fxch(); | 1795 __ fxch(); |
1791 __ fyl2x(); | 1796 __ fyl2x(); |
1792 } | 1797 } |
1793 } | 1798 } |
1794 | 1799 |
1795 | 1800 |
1796 // Input: rdx, rax are the left and right objects of a bit op. | 1801 // Input: rdx, rax are the left and right objects of a bit op. |
1797 // Output: rax, rcx are left and right integers for a bit op. | 1802 // Output: rax, rcx are left and right integers for a bit op. |
1798 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { | 1803 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { |
(...skipping 4527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6326 xmm0, | 6331 xmm0, |
6327 &slow_elements); | 6332 &slow_elements); |
6328 __ ret(0); | 6333 __ ret(0); |
6329 } | 6334 } |
6330 | 6335 |
6331 #undef __ | 6336 #undef __ |
6332 | 6337 |
6333 } } // namespace v8::internal | 6338 } } // namespace v8::internal |
6334 | 6339 |
6335 #endif // V8_TARGET_ARCH_X64 | 6340 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |