| Index: src/ia32/lithium-codegen-ia32.cc
|
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
|
| index 654c04d2b2bf570ca181669d69be74a710d6e43b..b37da306c7ea28b20a20a721915fa86826f35a48 100644
|
| --- a/src/ia32/lithium-codegen-ia32.cc
|
| +++ b/src/ia32/lithium-codegen-ia32.cc
|
| @@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
| #endif
|
|
|
|
|
| +void LCodeGen::SaveCallerDoubles() {
|
| + ASSERT(info()->saves_caller_doubles());
|
| + ASSERT(NeedsEagerFrame());
|
| + Comment(";;; Save clobbered callee double registers");
|
| + CpuFeatureScope scope(masm(), SSE2);
|
| + int count = 0;
|
| + BitVector* doubles = chunk()->allocated_double_registers();
|
| + BitVector::Iterator save_iterator(doubles);
|
| + while (!save_iterator.Done()) {
|
| + __ movsd(MemOperand(esp, count * kDoubleSize),
|
| + XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
| + save_iterator.Advance();
|
| + count++;
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::RestoreCallerDoubles() {
|
| + ASSERT(info()->saves_caller_doubles());
|
| + ASSERT(NeedsEagerFrame());
|
| + Comment(";;; Restore clobbered callee double registers");
|
| + CpuFeatureScope scope(masm(), SSE2);
|
| + BitVector* doubles = chunk()->allocated_double_registers();
|
| + BitVector::Iterator save_iterator(doubles);
|
| + int count = 0;
|
| + while (!save_iterator.Done()) {
|
| + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
| + MemOperand(esp, count * kDoubleSize));
|
| + save_iterator.Advance();
|
| + count++;
|
| + }
|
| +}
|
| +
|
| +
|
| bool LCodeGen::GeneratePrologue() {
|
| ASSERT(is_generating());
|
|
|
| @@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
|
| }
|
|
|
| if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
| - Comment(";;; Save clobbered callee double registers");
|
| - CpuFeatureScope scope(masm(), SSE2);
|
| - int count = 0;
|
| - BitVector* doubles = chunk()->allocated_double_registers();
|
| - BitVector::Iterator save_iterator(doubles);
|
| - while (!save_iterator.Done()) {
|
| - __ movsd(MemOperand(esp, count * kDoubleSize),
|
| - XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
| - save_iterator.Advance();
|
| - count++;
|
| - }
|
| + SaveCallerDoubles();
|
| }
|
| }
|
|
|
| @@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
|
| Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
| }
|
| if (jump_table_[i].needs_frame) {
|
| + ASSERT(!info()->saves_caller_doubles());
|
| __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
|
| if (needs_frame.is_bound()) {
|
| __ jmp(&needs_frame);
|
| @@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
|
| __ ret(0); // Call the continuation without clobbering registers.
|
| }
|
| } else {
|
| + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
| + RestoreCallerDoubles();
|
| + }
|
| __ call(entry, RelocInfo::RUNTIME_ENTRY);
|
| }
|
| }
|
| @@ -3159,17 +3187,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
| __ CallRuntime(Runtime::kTraceExit, 1);
|
| }
|
| if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
| - ASSERT(NeedsEagerFrame());
|
| - CpuFeatureScope scope(masm(), SSE2);
|
| - BitVector* doubles = chunk()->allocated_double_registers();
|
| - BitVector::Iterator save_iterator(doubles);
|
| - int count = 0;
|
| - while (!save_iterator.Done()) {
|
| - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
| - MemOperand(esp, count * kDoubleSize));
|
| - save_iterator.Advance();
|
| - count++;
|
| - }
|
| + RestoreCallerDoubles();
|
| }
|
| if (dynamic_frame_alignment_) {
|
| // Fetch the state of the dynamic frame alignment.
|
|
|