Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 18041003: Implement X87 stack tracking and x87 multiplication (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: fix x87 usage of bleeding_edge commits Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after
346 current_instruction_, 346 current_instruction_,
347 instr->hydrogen_value()->id(), 347 instr->hydrogen_value()->id(),
348 instr->Mnemonic()); 348 instr->Mnemonic());
349 } 349 }
350 350
351 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); 351 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
352 352
353 instr->CompileToNative(this); 353 instr->CompileToNative(this);
354 354
355 if (!CpuFeatures::IsSupported(SSE2)) { 355 if (!CpuFeatures::IsSupported(SSE2)) {
356 ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
357 if (FLAG_debug_code && FLAG_enable_slow_asserts) { 356 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
358 __ VerifyX87StackDepth(x87_stack_depth_); 357 __ VerifyX87StackDepth(x87_stack_depth_);
359 } 358 }
360 } 359 }
361 } 360 }
362 EnsureSpaceForLazyDeopt(); 361 EnsureSpaceForLazyDeopt();
363 return !is_aborted(); 362 return !is_aborted();
364 } 363 }
365 364
366 365
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
494 safepoints_.Emit(masm(), GetStackSlotCount()); 493 safepoints_.Emit(masm(), GetStackSlotCount());
495 return !is_aborted(); 494 return !is_aborted();
496 } 495 }
497 496
498 497
499 Register LCodeGen::ToRegister(int index) const { 498 Register LCodeGen::ToRegister(int index) const {
500 return Register::FromAllocationIndex(index); 499 return Register::FromAllocationIndex(index);
501 } 500 }
502 501
503 502
503 X87Register LCodeGen::ToX87Register(int index) const {
504 return X87Register::FromAllocationIndex(index);
505 }
506
507
504 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 508 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
505 return XMMRegister::FromAllocationIndex(index); 509 return XMMRegister::FromAllocationIndex(index);
506 } 510 }
507 511
508 512
509 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { 513 void LCodeGen::X87LoadForUsage(X87Register reg) {
510 return op->IsDoubleRegister(); 514 ASSERT(X87StackContains(reg));
515 X87Fxch(reg);
516 x87_stack_depth_--;
511 } 517 }
512 518
513 519
514 void LCodeGen::ReadX87Operand(Operand dst) { 520 void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
515 ASSERT(x87_stack_depth_ == 1); 521 ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
522 int i = X87ArrayIndex(reg);
523 int st = x87_st2idx(i);
524 if (st != other_slot) {
525 int other_i = x87_st2idx(other_slot);
526 X87Register other = x87_stack_[other_i];
527 x87_stack_[other_i] = reg;
528 x87_stack_[i] = other;
529 if (st == 0) {
530 __ fxch(other_slot);
531 } else if (other_slot == 0) {
532 __ fxch(st);
533 } else {
534 __ fxch(st);
535 __ fxch(other_slot);
536 __ fxch(st);
537 }
538 }
539 }
540
541
542 int LCodeGen::x87_st2idx(int pos) {
543 return x87_stack_depth_ - pos - 1;
544 }
545
546
547 int LCodeGen::X87ArrayIndex(X87Register reg) {
548 for (int i = 0; i < x87_stack_depth_; i++) {
549 if (x87_stack_[i].is(reg)) return i;
550 }
551 UNREACHABLE();
552 return -1;
553 }
554
555
556 bool LCodeGen::X87StackContains(X87Register reg) {
557 for (int i = 0; i < x87_stack_depth_; i++) {
558 if (x87_stack_[i].is(reg)) return true;
559 }
560 return false;
561 }
562
563
564 void LCodeGen::X87Free(X87Register reg) {
565 ASSERT(X87StackContains(reg));
566 int i = X87ArrayIndex(reg);
567 int st = x87_st2idx(i);
568 if (st > 0) {
569 // keep track of how fstp(i) changes the order of elements
570 int tos_i = x87_st2idx(0);
571 x87_stack_[i] = x87_stack_[tos_i];
572 }
573 x87_stack_depth_--;
574 __ fstp(st);
575 }
576
577
578 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
579 if (X87StackContains(dst)) {
580 X87Fxch(dst);
581 __ fstp(0);
582 } else {
583 ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
584 x87_stack_[x87_stack_depth_] = dst;
585 x87_stack_depth_++;
586 }
587 X87Fld(src, opts);
588 }
589
590
591 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
592 if (opts == kX87DoubleOperand) {
593 __ fld_d(src);
594 } else if (opts == kX87FloatOperand) {
595 __ fld_s(src);
596 } else if (opts == kX87IntOperand) {
597 __ fild_s(src);
598 } else {
599 UNREACHABLE();
600 }
601 }
602
603
604 void LCodeGen::X87Mov(Operand dst, X87Register src) {
605 X87Fxch(src);
516 __ fst_d(dst); 606 __ fst_d(dst);
517 } 607 }
518 608
519 609
520 void LCodeGen::PushX87DoubleOperand(Operand src) { 610 void LCodeGen::X87PrepareToWrite(X87Register reg) {
521 ASSERT(x87_stack_depth_ == 0); 611 if (X87StackContains(reg)) {
522 x87_stack_depth_++; 612 X87Free(reg);
523 __ fld_d(src); 613 }
614 // Mark this register as the next register to write to
615 x87_stack_[x87_stack_depth_] = reg;
524 } 616 }
525 617
526 618
527 void LCodeGen::PushX87FloatOperand(Operand src) { 619 void LCodeGen::X87CommitWrite(X87Register reg) {
528 ASSERT(x87_stack_depth_ == 0); 620 // Assert the reg is prepared to write, but not on the virtual stack yet
621 ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
622 x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
529 x87_stack_depth_++; 623 x87_stack_depth_++;
530 __ fld_s(src);
531 } 624 }
532 625
533 626
534 void LCodeGen::PopX87() { 627 void LCodeGen::X87PrepareBinaryOp(
535 ASSERT(x87_stack_depth_ == 1); 628 X87Register left, X87Register right, X87Register result) {
536 x87_stack_depth_--; 629 // You need to use DefineSameAsFirst for x87 instructions
537 __ fstp(0); 630 ASSERT(result.is(left));
538 } 631 X87Fxch(right, 1);
539 632 X87Fxch(left);
540
541 void LCodeGen::CurrentInstructionReturnsX87Result() {
542 ASSERT(x87_stack_depth_ <= 1);
543 if (x87_stack_depth_ == 0) {
544 x87_stack_depth_ = 1;
545 }
546 } 633 }
547 634
548 635
549 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) { 636 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
550 if (x87_stack_depth_ > 0) { 637 if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
551 if ((instr->ClobbersDoubleRegisters() || 638 bool double_inputs = instr->HasDoubleRegisterInput();
552 instr->HasDoubleRegisterResult()) && 639
553 !instr->HasDoubleRegisterInput()) { 640 // Flush stack from tos down, since FreeX87() will mess with tos
554 PopX87(); 641 for (int i = x87_stack_depth_-1; i >= 0; i--) {
642 X87Register reg = x87_stack_[i];
643 // Skip registers which contain the inputs for the next instruction
644 // when flushing the stack
645 if (double_inputs && instr->IsDoubleInput(reg, this)) {
646 continue;
647 }
648 X87Free(reg);
649 if (i < x87_stack_depth_-1) i++;
650 }
651 }
652 if (instr->IsReturn()) {
653 while (x87_stack_depth_ > 0) {
654 __ fstp(0);
655 x87_stack_depth_--;
555 } 656 }
556 } 657 }
557 } 658 }
558 659
559 660
560 Register LCodeGen::ToRegister(LOperand* op) const { 661 Register LCodeGen::ToRegister(LOperand* op) const {
561 ASSERT(op->IsRegister()); 662 ASSERT(op->IsRegister());
562 return ToRegister(op->index()); 663 return ToRegister(op->index());
563 } 664 }
564 665
565 666
667 X87Register LCodeGen::ToX87Register(LOperand* op) const {
668 ASSERT(op->IsDoubleRegister());
669 return ToX87Register(op->index());
670 }
671
672
566 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 673 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
567 ASSERT(op->IsDoubleRegister()); 674 ASSERT(op->IsDoubleRegister());
568 return ToDoubleRegister(op->index()); 675 return ToDoubleRegister(op->index());
569 } 676 }
570 677
571 678
572 int LCodeGen::ToInteger32(LConstantOperand* op) const { 679 int LCodeGen::ToInteger32(LConstantOperand* op) const {
573 HConstant* constant = chunk_->LookupConstant(op); 680 HConstant* constant = chunk_->LookupConstant(op);
574 return constant->Integer32Value(); 681 return constant->Integer32Value();
575 } 682 }
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
828 deoptimizations_.Add(environment, zone()); 935 deoptimizations_.Add(environment, zone());
829 } 936 }
830 } 937 }
831 938
832 939
833 void LCodeGen::DeoptimizeIf(Condition cc, 940 void LCodeGen::DeoptimizeIf(Condition cc,
834 LEnvironment* environment, 941 LEnvironment* environment,
835 Deoptimizer::BailoutType bailout_type) { 942 Deoptimizer::BailoutType bailout_type) {
836 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 943 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
837 ASSERT(environment->HasBeenRegistered()); 944 ASSERT(environment->HasBeenRegistered());
838 // It's an error to deoptimize with the x87 fp stack in use.
839 ASSERT(x87_stack_depth_ == 0);
840 int id = environment->deoptimization_index(); 945 int id = environment->deoptimization_index();
841 ASSERT(info()->IsOptimizing() || info()->IsStub()); 946 ASSERT(info()->IsOptimizing() || info()->IsStub());
842 Address entry = 947 Address entry =
843 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 948 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
844 if (entry == NULL) { 949 if (entry == NULL) {
845 Abort("bailout was not prepared"); 950 Abort("bailout was not prepared");
846 return; 951 return;
847 } 952 }
848 953
849 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 954 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
(...skipping 17 matching lines...) Expand all
867 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 972 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
868 973
869 __ bind(&no_deopt); 974 __ bind(&no_deopt);
870 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), 975 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
871 eax); 976 eax);
872 __ pop(ebx); 977 __ pop(ebx);
873 __ pop(eax); 978 __ pop(eax);
874 __ popfd(); 979 __ popfd();
875 } 980 }
876 981
982 // Before Instructions which can deopt, we normally flush the x87 stack. But
983 // we can have inputs or outputs of the current instruction on the stack,
984 // thus we need to flush them here, to leave the stack in a consistent state.
985 if (x87_stack_depth_ > 0) {
986 Label done;
987 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
988 for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
mvstanton 2013/07/10 09:57:33 Put the x87 stack flushing operation in a method c
989 __ bind(&done);
990 }
991
877 if (FLAG_trap_on_deopt && info()->IsOptimizing()) { 992 if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
878 Label done; 993 Label done;
879 if (cc != no_condition) { 994 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
880 __ j(NegateCondition(cc), &done, Label::kNear);
881 }
882 __ int3(); 995 __ int3();
883 __ bind(&done); 996 __ bind(&done);
884 } 997 }
885 998
886 ASSERT(info()->IsStub() || frame_is_built_); 999 ASSERT(info()->IsStub() || frame_is_built_);
887 if (cc == no_condition && frame_is_built_) { 1000 if (cc == no_condition && frame_is_built_) {
888 if (bailout_type == Deoptimizer::LAZY) { 1001 if (bailout_type == Deoptimizer::LAZY) {
889 __ call(entry, RelocInfo::RUNTIME_ENTRY); 1002 __ call(entry, RelocInfo::RUNTIME_ENTRY);
890 } else { 1003 } else {
891 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 1004 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
(...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after
1714 } 1827 }
1715 1828
1716 1829
1717 void LCodeGen::DoConstantD(LConstantD* instr) { 1830 void LCodeGen::DoConstantD(LConstantD* instr) {
1718 double v = instr->value(); 1831 double v = instr->value();
1719 uint64_t int_val = BitCast<uint64_t, double>(v); 1832 uint64_t int_val = BitCast<uint64_t, double>(v);
1720 int32_t lower = static_cast<int32_t>(int_val); 1833 int32_t lower = static_cast<int32_t>(int_val);
1721 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1834 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1722 1835
1723 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { 1836 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1837 __ push(Immediate(upper));
1724 __ push(Immediate(lower)); 1838 __ push(Immediate(lower));
1725 __ push(Immediate(upper)); 1839 X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
1726 PushX87DoubleOperand(Operand(esp, 0));
1727 __ add(Operand(esp), Immediate(kDoubleSize)); 1840 __ add(Operand(esp), Immediate(kDoubleSize));
1728 CurrentInstructionReturnsX87Result();
1729 } else { 1841 } else {
1730 CpuFeatureScope scope1(masm(), SSE2); 1842 CpuFeatureScope scope1(masm(), SSE2);
1731 ASSERT(instr->result()->IsDoubleRegister()); 1843 ASSERT(instr->result()->IsDoubleRegister());
1732 XMMRegister res = ToDoubleRegister(instr->result()); 1844 XMMRegister res = ToDoubleRegister(instr->result());
1733 if (int_val == 0) { 1845 if (int_val == 0) {
1734 __ xorps(res, res); 1846 __ xorps(res, res);
1735 } else { 1847 } else {
1736 Register temp = ToRegister(instr->temp()); 1848 Register temp = ToRegister(instr->temp());
1737 if (CpuFeatures::IsSupported(SSE4_1)) { 1849 if (CpuFeatures::IsSupported(SSE4_1)) {
1738 CpuFeatureScope scope2(masm(), SSE4_1); 1850 CpuFeatureScope scope2(masm(), SSE4_1);
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after
1983 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 2095 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1984 __ bind(&return_right); 2096 __ bind(&return_right);
1985 __ movsd(left_reg, right_reg); 2097 __ movsd(left_reg, right_reg);
1986 2098
1987 __ bind(&return_left); 2099 __ bind(&return_left);
1988 } 2100 }
1989 } 2101 }
1990 2102
1991 2103
1992 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2104 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1993 CpuFeatureScope scope(masm(), SSE2); 2105 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
1994 XMMRegister left = ToDoubleRegister(instr->left()); 2106 CpuFeatureScope scope(masm(), SSE2);
1995 XMMRegister right = ToDoubleRegister(instr->right()); 2107 XMMRegister left = ToDoubleRegister(instr->left());
1996 XMMRegister result = ToDoubleRegister(instr->result()); 2108 XMMRegister right = ToDoubleRegister(instr->right());
1997 // Modulo uses a fixed result register. 2109 XMMRegister result = ToDoubleRegister(instr->result());
1998 ASSERT(instr->op() == Token::MOD || left.is(result)); 2110 // Modulo uses a fixed result register.
1999 switch (instr->op()) { 2111 ASSERT(instr->op() == Token::MOD || left.is(result));
2000 case Token::ADD: 2112 switch (instr->op()) {
2001 __ addsd(left, right); 2113 case Token::ADD:
2002 break; 2114 __ addsd(left, right);
2003 case Token::SUB: 2115 break;
2004 __ subsd(left, right); 2116 case Token::SUB:
2005 break; 2117 __ subsd(left, right);
2006 case Token::MUL: 2118 break;
2007 __ mulsd(left, right); 2119 case Token::MUL:
2008 break; 2120 __ mulsd(left, right);
2009 case Token::DIV: 2121 break;
2010 __ divsd(left, right); 2122 case Token::DIV:
2011 // Don't delete this mov. It may improve performance on some CPUs, 2123 __ divsd(left, right);
2012 // when there is a mulsd depending on the result 2124 // Don't delete this mov. It may improve performance on some CPUs,
2013 __ movaps(left, left); 2125 // when there is a mulsd depending on the result
2014 break; 2126 __ movaps(left, left);
2015 case Token::MOD: { 2127 break;
2016 // Pass two doubles as arguments on the stack. 2128 case Token::MOD: {
2017 __ PrepareCallCFunction(4, eax); 2129 // Pass two doubles as arguments on the stack.
2018 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 2130 __ PrepareCallCFunction(4, eax);
2019 __ movdbl(Operand(esp, 1 * kDoubleSize), right); 2131 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
2020 __ CallCFunction( 2132 __ movdbl(Operand(esp, 1 * kDoubleSize), right);
2021 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2133 __ CallCFunction(
2022 4); 2134 ExternalReference::double_fp_operation(Token::MOD, isolate()),
2135 4);
2023 2136
2024 // Return value is in st(0) on ia32. 2137 // Return value is in st(0) on ia32.
2025 // Store it into the (fixed) result register. 2138 // Store it into the (fixed) result register.
2026 __ sub(Operand(esp), Immediate(kDoubleSize)); 2139 __ sub(Operand(esp), Immediate(kDoubleSize));
2027 __ fstp_d(Operand(esp, 0)); 2140 __ fstp_d(Operand(esp, 0));
2028 __ movdbl(result, Operand(esp, 0)); 2141 __ movdbl(result, Operand(esp, 0));
2029 __ add(Operand(esp), Immediate(kDoubleSize)); 2142 __ add(Operand(esp), Immediate(kDoubleSize));
2030 break; 2143 break;
2144 }
2145 default:
2146 UNREACHABLE();
2147 break;
2031 } 2148 }
2032 default: 2149 } else {
2033 UNREACHABLE(); 2150 X87Register left = ToX87Register(instr->left());
2034 break; 2151 X87Register right = ToX87Register(instr->right());
2152 X87Register result = ToX87Register(instr->result());
2153 X87PrepareBinaryOp(left, right, result);
2154 switch (instr->op()) {
2155 case Token::MUL:
2156 __ fmul_i(1);
2157 break;
2158 default:
2159 UNREACHABLE();
2160 break;
2161 }
2035 } 2162 }
2036 } 2163 }
2037 2164
2038 2165
2039 void LCodeGen::DoNegateNoSSE2D(LNegateNoSSE2D* instr) {
2040 __ push(Immediate(-1));
2041 __ fild_s(Operand(esp, 0));
2042 __ add(esp, Immediate(kPointerSize));
2043 __ fmulp();
2044 CurrentInstructionReturnsX87Result();
2045 }
2046
2047
2048
2049 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2166 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2050 ASSERT(ToRegister(instr->context()).is(esi)); 2167 ASSERT(ToRegister(instr->context()).is(esi));
2051 ASSERT(ToRegister(instr->left()).is(edx)); 2168 ASSERT(ToRegister(instr->left()).is(edx));
2052 ASSERT(ToRegister(instr->right()).is(eax)); 2169 ASSERT(ToRegister(instr->right()).is(eax));
2053 ASSERT(ToRegister(instr->result()).is(eax)); 2170 ASSERT(ToRegister(instr->result()).is(eax));
2054 2171
2055 BinaryOpStub stub(instr->op(), NO_OVERWRITE); 2172 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
2056 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 2173 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2057 __ nop(); // Signals no inlined code. 2174 __ nop(); // Signals no inlined code.
2058 } 2175 }
(...skipping 897 matching lines...) Expand 10 before | Expand all | Expand 10 after
2956 HObjectAccess access = instr->hydrogen()->access(); 3073 HObjectAccess access = instr->hydrogen()->access();
2957 int offset = access.offset(); 3074 int offset = access.offset();
2958 Register object = ToRegister(instr->object()); 3075 Register object = ToRegister(instr->object());
2959 if (FLAG_track_double_fields && 3076 if (FLAG_track_double_fields &&
2960 instr->hydrogen()->representation().IsDouble()) { 3077 instr->hydrogen()->representation().IsDouble()) {
2961 if (CpuFeatures::IsSupported(SSE2)) { 3078 if (CpuFeatures::IsSupported(SSE2)) {
2962 CpuFeatureScope scope(masm(), SSE2); 3079 CpuFeatureScope scope(masm(), SSE2);
2963 XMMRegister result = ToDoubleRegister(instr->result()); 3080 XMMRegister result = ToDoubleRegister(instr->result());
2964 __ movdbl(result, FieldOperand(object, offset)); 3081 __ movdbl(result, FieldOperand(object, offset));
2965 } else { 3082 } else {
2966 PushX87DoubleOperand(FieldOperand(object, offset)); 3083 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
2967 CurrentInstructionReturnsX87Result();
2968 } 3084 }
2969 return; 3085 return;
2970 } 3086 }
2971 3087
2972 Register result = ToRegister(instr->result()); 3088 Register result = ToRegister(instr->result());
2973 if (access.IsInobject()) { 3089 if (access.IsInobject()) {
2974 __ mov(result, FieldOperand(object, offset)); 3090 __ mov(result, FieldOperand(object, offset));
2975 } else { 3091 } else {
2976 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 3092 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2977 __ mov(result, FieldOperand(result, offset)); 3093 __ mov(result, FieldOperand(result, offset));
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
3202 elements_kind, 3318 elements_kind,
3203 0, 3319 0,
3204 instr->additional_index())); 3320 instr->additional_index()));
3205 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3321 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3206 if (CpuFeatures::IsSupported(SSE2)) { 3322 if (CpuFeatures::IsSupported(SSE2)) {
3207 CpuFeatureScope scope(masm(), SSE2); 3323 CpuFeatureScope scope(masm(), SSE2);
3208 XMMRegister result(ToDoubleRegister(instr->result())); 3324 XMMRegister result(ToDoubleRegister(instr->result()));
3209 __ movss(result, operand); 3325 __ movss(result, operand);
3210 __ cvtss2sd(result, result); 3326 __ cvtss2sd(result, result);
3211 } else { 3327 } else {
3212 PushX87FloatOperand(operand); 3328 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3213 CurrentInstructionReturnsX87Result();
3214 } 3329 }
3215 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3330 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3216 if (CpuFeatures::IsSupported(SSE2)) { 3331 if (CpuFeatures::IsSupported(SSE2)) {
3217 CpuFeatureScope scope(masm(), SSE2); 3332 CpuFeatureScope scope(masm(), SSE2);
3218 __ movdbl(ToDoubleRegister(instr->result()), operand); 3333 __ movdbl(ToDoubleRegister(instr->result()), operand);
3219 } else { 3334 } else {
3220 PushX87DoubleOperand(operand); 3335 X87Mov(ToX87Register(instr->result()), operand);
3221 CurrentInstructionReturnsX87Result();
3222 } 3336 }
3223 } else { 3337 } else {
3224 Register result(ToRegister(instr->result())); 3338 Register result(ToRegister(instr->result()));
3225 switch (elements_kind) { 3339 switch (elements_kind) {
3226 case EXTERNAL_BYTE_ELEMENTS: 3340 case EXTERNAL_BYTE_ELEMENTS:
3227 __ movsx_b(result, operand); 3341 __ movsx_b(result, operand);
3228 break; 3342 break;
3229 case EXTERNAL_PIXEL_ELEMENTS: 3343 case EXTERNAL_PIXEL_ELEMENTS:
3230 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3344 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3231 __ movzx_b(result, operand); 3345 __ movzx_b(result, operand);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
3282 instr->key(), 3396 instr->key(),
3283 instr->hydrogen()->key()->representation(), 3397 instr->hydrogen()->key()->representation(),
3284 FAST_DOUBLE_ELEMENTS, 3398 FAST_DOUBLE_ELEMENTS,
3285 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 3399 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3286 instr->additional_index()); 3400 instr->additional_index());
3287 if (CpuFeatures::IsSupported(SSE2)) { 3401 if (CpuFeatures::IsSupported(SSE2)) {
3288 CpuFeatureScope scope(masm(), SSE2); 3402 CpuFeatureScope scope(masm(), SSE2);
3289 XMMRegister result = ToDoubleRegister(instr->result()); 3403 XMMRegister result = ToDoubleRegister(instr->result());
3290 __ movdbl(result, double_load_operand); 3404 __ movdbl(result, double_load_operand);
3291 } else { 3405 } else {
3292 PushX87DoubleOperand(double_load_operand); 3406 X87Mov(ToX87Register(instr->result()), double_load_operand);
3293 CurrentInstructionReturnsX87Result();
3294 } 3407 }
3295 } 3408 }
3296 3409
3297 3410
3298 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3411 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3299 Register result = ToRegister(instr->result()); 3412 Register result = ToRegister(instr->result());
3300 3413
3301 // Load the result. 3414 // Load the result.
3302 __ mov(result, 3415 __ mov(result,
3303 BuildFastArrayOperand(instr->elements(), 3416 BuildFastArrayOperand(instr->elements(),
(...skipping 1651 matching lines...) Expand 10 before | Expand all | Expand 10 after
4955 5068
4956 Register reg = ToRegister(instr->result()); 5069 Register reg = ToRegister(instr->result());
4957 5070
4958 bool convert_hole = false; 5071 bool convert_hole = false;
4959 HValue* change_input = instr->hydrogen()->value(); 5072 HValue* change_input = instr->hydrogen()->value();
4960 if (change_input->IsLoadKeyed()) { 5073 if (change_input->IsLoadKeyed()) {
4961 HLoadKeyed* load = HLoadKeyed::cast(change_input); 5074 HLoadKeyed* load = HLoadKeyed::cast(change_input);
4962 convert_hole = load->UsesMustHandleHole(); 5075 convert_hole = load->UsesMustHandleHole();
4963 } 5076 }
4964 5077
5078 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5079 if (!use_sse2) {
5080 // Put the value to the top of stack
5081 X87Register src = ToX87Register(instr->value());
5082 X87LoadForUsage(src);
5083 }
5084
4965 Label no_special_nan_handling; 5085 Label no_special_nan_handling;
4966 Label done; 5086 Label done;
4967 if (convert_hole) { 5087 if (convert_hole) {
4968 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
4969 if (use_sse2) { 5088 if (use_sse2) {
4970 CpuFeatureScope scope(masm(), SSE2); 5089 CpuFeatureScope scope(masm(), SSE2);
4971 XMMRegister input_reg = ToDoubleRegister(instr->value()); 5090 XMMRegister input_reg = ToDoubleRegister(instr->value());
4972 __ ucomisd(input_reg, input_reg); 5091 __ ucomisd(input_reg, input_reg);
4973 } else { 5092 } else {
4974 __ fld(0); 5093 __ fld(0);
4975 __ fld(0);
4976 __ FCmp(); 5094 __ FCmp();
4977 } 5095 }
4978 5096
4979 __ j(parity_odd, &no_special_nan_handling); 5097 __ j(parity_odd, &no_special_nan_handling);
4980 __ sub(esp, Immediate(kDoubleSize)); 5098 __ sub(esp, Immediate(kDoubleSize));
4981 if (use_sse2) { 5099 if (use_sse2) {
4982 CpuFeatureScope scope(masm(), SSE2); 5100 CpuFeatureScope scope(masm(), SSE2);
4983 XMMRegister input_reg = ToDoubleRegister(instr->value()); 5101 XMMRegister input_reg = ToDoubleRegister(instr->value());
4984 __ movdbl(MemOperand(esp, 0), input_reg); 5102 __ movdbl(MemOperand(esp, 0), input_reg);
4985 } else { 5103 } else {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
5019 __ jmp(deferred->entry()); 5137 __ jmp(deferred->entry());
5020 } 5138 }
5021 __ bind(deferred->exit()); 5139 __ bind(deferred->exit());
5022 if (CpuFeatures::IsSupported(SSE2)) { 5140 if (CpuFeatures::IsSupported(SSE2)) {
5023 CpuFeatureScope scope(masm(), SSE2); 5141 CpuFeatureScope scope(masm(), SSE2);
5024 XMMRegister input_reg = ToDoubleRegister(instr->value()); 5142 XMMRegister input_reg = ToDoubleRegister(instr->value());
5025 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 5143 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5026 } else { 5144 } else {
5027 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); 5145 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
5028 } 5146 }
5147 if (!use_sse2) {
5148 // clean up the stack
5149 __ fstp(0);
5150 }
5029 __ bind(&done); 5151 __ bind(&done);
5030 } 5152 }
5031 5153
5032 5154
5033 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 5155 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5034 // TODO(3095996): Get rid of this. For now, we need to make the 5156 // TODO(3095996): Get rid of this. For now, we need to make the
5035 // result register contain a valid pointer because it is already 5157 // result register contain a valid pointer because it is already
5036 // contained in the register pointer map. 5158 // contained in the register pointer map.
5037 Register reg = ToRegister(instr->result()); 5159 Register reg = ToRegister(instr->result());
5038 __ Set(reg, Immediate(0)); 5160 __ Set(reg, Immediate(0));
(...skipping 29 matching lines...) Expand all
5068 DeoptimizeIf(not_zero, instr->environment()); 5190 DeoptimizeIf(not_zero, instr->environment());
5069 } else { 5191 } else {
5070 __ AssertSmi(result); 5192 __ AssertSmi(result);
5071 } 5193 }
5072 __ SmiUntag(result); 5194 __ SmiUntag(result);
5073 } 5195 }
5074 5196
5075 5197
5076 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, 5198 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5077 Register temp_reg, 5199 Register temp_reg,
5200 X87Register res_reg,
5078 bool allow_undefined_as_nan, 5201 bool allow_undefined_as_nan,
5079 bool deoptimize_on_minus_zero, 5202 bool deoptimize_on_minus_zero,
5080 LEnvironment* env, 5203 LEnvironment* env,
5081 NumberUntagDMode mode) { 5204 NumberUntagDMode mode) {
5082 Label load_smi, done; 5205 Label load_smi, done;
5083 5206
5207 X87PrepareToWrite(res_reg);
5084 STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE > 5208 STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
5085 NUMBER_CANDIDATE_IS_ANY_TAGGED); 5209 NUMBER_CANDIDATE_IS_ANY_TAGGED);
5086 if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5210 if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5087 // Smi check. 5211 // Smi check.
5088 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 5212 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5089 5213
5090 // Heap number map check. 5214 // Heap number map check.
5091 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5215 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5092 factory()->heap_number_map()); 5216 factory()->heap_number_map());
5093 if (!allow_undefined_as_nan) { 5217 if (!allow_undefined_as_nan) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5134 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 5258 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5135 } 5259 }
5136 5260
5137 __ bind(&load_smi); 5261 __ bind(&load_smi);
5138 __ SmiUntag(input_reg); // Untag smi before converting to float. 5262 __ SmiUntag(input_reg); // Untag smi before converting to float.
5139 __ push(input_reg); 5263 __ push(input_reg);
5140 __ fild_s(Operand(esp, 0)); 5264 __ fild_s(Operand(esp, 0));
5141 __ pop(input_reg); 5265 __ pop(input_reg);
5142 __ SmiTag(input_reg); // Retag smi. 5266 __ SmiTag(input_reg); // Retag smi.
5143 __ bind(&done); 5267 __ bind(&done);
5268 X87CommitWrite(res_reg);
5144 } 5269 }
5145 5270
5146 5271
5147 void LCodeGen::EmitNumberUntagD(Register input_reg, 5272 void LCodeGen::EmitNumberUntagD(Register input_reg,
5148 Register temp_reg, 5273 Register temp_reg,
5149 XMMRegister result_reg, 5274 XMMRegister result_reg,
5150 bool allow_undefined_as_nan, 5275 bool allow_undefined_as_nan,
5151 bool deoptimize_on_minus_zero, 5276 bool deoptimize_on_minus_zero,
5152 LEnvironment* env, 5277 LEnvironment* env,
5153 NumberUntagDMode mode) { 5278 NumberUntagDMode mode) {
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after
5515 EmitNumberUntagD(input_reg, 5640 EmitNumberUntagD(input_reg,
5516 temp_reg, 5641 temp_reg,
5517 result_reg, 5642 result_reg,
5518 instr->hydrogen()->allow_undefined_as_nan(), 5643 instr->hydrogen()->allow_undefined_as_nan(),
5519 deoptimize_on_minus_zero, 5644 deoptimize_on_minus_zero,
5520 instr->environment(), 5645 instr->environment(),
5521 mode); 5646 mode);
5522 } else { 5647 } else {
5523 EmitNumberUntagDNoSSE2(input_reg, 5648 EmitNumberUntagDNoSSE2(input_reg,
5524 temp_reg, 5649 temp_reg,
5650 ToX87Register(instr->result()),
5525 instr->hydrogen()->allow_undefined_as_nan(), 5651 instr->hydrogen()->allow_undefined_as_nan(),
5526 deoptimize_on_minus_zero, 5652 deoptimize_on_minus_zero,
5527 instr->environment(), 5653 instr->environment(),
5528 mode); 5654 mode);
5529 CurrentInstructionReturnsX87Result();
5530 } 5655 }
5531 } 5656 }
5532 5657
5533 5658
5534 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5659 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5535 LOperand* input = instr->value(); 5660 LOperand* input = instr->value();
5536 ASSERT(input->IsDoubleRegister()); 5661 ASSERT(input->IsDoubleRegister());
5537 LOperand* result = instr->result(); 5662 LOperand* result = instr->result();
5538 ASSERT(result->IsRegister()); 5663 ASSERT(result->IsRegister());
5539 CpuFeatureScope scope(masm(), SSE2); 5664 CpuFeatureScope scope(masm(), SSE2);
(...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after
6573 FixedArray::kHeaderSize - kPointerSize)); 6698 FixedArray::kHeaderSize - kPointerSize));
6574 __ bind(&done); 6699 __ bind(&done);
6575 } 6700 }
6576 6701
6577 6702
6578 #undef __ 6703 #undef __
6579 6704
6580 } } // namespace v8::internal 6705 } } // namespace v8::internal
6581 6706
6582 #endif // V8_TARGET_ARCH_IA32 6707 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698