Index: runtime/vm/flow_graph_compiler_x64.cc |
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc |
index 7668eeac94e8e6c09b9689024924e70bc8a5f824..49bdd795b97514d8b597867e57758ea806b9a7cb 100644 |
--- a/runtime/vm/flow_graph_compiler_x64.cc |
+++ b/runtime/vm/flow_graph_compiler_x64.cc |
@@ -42,8 +42,27 @@ void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler) { |
__ leaq(RSP, |
Address(RBP, ParsedFunction::kFirstLocalSlotIndex * kWordSize)); |
+ const intptr_t fixed_param_count = deoptimization_env_->fixed_param_count(); |
const GrowableArray<Value*>& values = deoptimization_env_->values(); |
- for (intptr_t i = 0; i < values.length(); i++) { |
+ |
+ for (intptr_t i = 0; i < fixed_param_count; i++) { |
+ const intptr_t offset = (2 + (fixed_param_count - 1) - i) * kWordSize; |
+ const Location loc = deoptimization_env_->LocationAt(i); |
+ if (loc.IsInvalid()) { |
+ ASSERT(values[i]->IsConstant()); |
+ __ StoreObject(Address(RBP, offset), values[i]->AsConstant()->value()); |
+ } else if (loc.IsRegister()) { |
+ __ movq(Address(RBP, offset), loc.reg()); |
+ } else if (loc.IsStackSlot() && |
+ (loc.stack_index() < 0) && |
+ (loc.stack_index() == (i - fixed_param_count))) { |
+ // Do nothing. |
+ } else { |
+ compiler->Bailout("unsupported deoptimization state"); |
+ } |
+ } |
srdjan
2012/07/26 00:33:56
Sync this with Kevin's upcoming change.
Vyacheslav Egorov (Google)
2012/07/26 11:33:53
Will do.
|
+ |
+ for (intptr_t i = fixed_param_count; i < values.length(); i++) { |
const Location loc = deoptimization_env_->LocationAt(i); |
if (loc.IsInvalid()) { |
ASSERT(values[i]->IsConstant()); |
@@ -960,15 +979,24 @@ void FlowGraphCompiler::CompileGraph() { |
} else { |
CopyParameters(); |
} |
+ |
+ // TODO(vegorov): introduce stack maps and stop initializing all spill slots |
+ // with null. |
+ const int stack_slot_count = |
+ is_ssa_ ? block_order_[0]->AsGraphEntry()->spill_slot_count() |
+ : local_count; |
+ |
+ const int slot_base = |
+ is_ssa_ ? -2 : parsed_function().first_stack_local_index(); |
srdjan
2012/07/26 00:33:56
ditto, not needed, I guess.
Vyacheslav Egorov (Google)
2012/07/26 11:33:53
Done.
|
+ |
// Initialize (non-argument) stack allocated locals to null. |
- if (local_count > 0) { |
+ if (stack_slot_count > 0) { |
const Immediate raw_null = |
Immediate(reinterpret_cast<intptr_t>(Object::null())); |
__ movq(RAX, raw_null); |
- const int base = parsed_function().first_stack_local_index(); |
- for (int i = 0; i < local_count; ++i) { |
+ for (int i = 0; i < stack_slot_count; ++i) { |
// Subtract index i (locals lie at lower addresses than RBP). |
- __ movq(Address(RBP, (base - i) * kWordSize), RAX); |
+ __ movq(Address(RBP, (slot_base - i) * kWordSize), RAX); |
} |
} |
@@ -1112,11 +1140,17 @@ void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, |
#define __ compiler_->assembler()-> |
-static Address ToAddress(Location loc) { |
- ASSERT(loc.IsSpillSlot()); |
- const intptr_t offset = |
- (ParsedFunction::kFirstLocalSlotIndex - loc.spill_index()) * kWordSize; |
- return Address(RBP, offset); |
+static Address ToStackSlotAddress(Location loc) { |
+ ASSERT(loc.IsStackSlot()); |
+ const intptr_t index = loc.stack_index(); |
+ if (index < 0) { |
+ const intptr_t offset = (1 - index) * kWordSize; |
+ return Address(RBP, offset); |
+ } else { |
+ const intptr_t offset = |
+ (ParsedFunction::kFirstLocalSlotIndex - index) * kWordSize; |
+ return Address(RBP, offset); |
+ } |
} |
@@ -1129,23 +1163,24 @@ void ParallelMoveResolver::EmitMove(int index) { |
if (destination.IsRegister()) { |
__ movq(destination.reg(), source.reg()); |
} else { |
- ASSERT(destination.IsSpillSlot()); |
- __ movq(ToAddress(destination), source.reg()); |
+ ASSERT(destination.IsStackSlot()); |
+ __ movq(ToStackSlotAddress(destination), source.reg()); |
} |
- } else if (source.IsSpillSlot()) { |
+ } else if (source.IsStackSlot()) { |
if (destination.IsRegister()) { |
- __ movq(destination.reg(), ToAddress(source)); |
+ __ movq(destination.reg(), ToStackSlotAddress(source)); |
} else { |
- ASSERT(destination.IsSpillSlot()); |
- MoveMemoryToMemory(ToAddress(destination), ToAddress(source)); |
+ ASSERT(destination.IsStackSlot()); |
+ MoveMemoryToMemory(ToStackSlotAddress(destination), |
+ ToStackSlotAddress(source)); |
} |
} else { |
ASSERT(source.IsConstant()); |
if (destination.IsRegister()) { |
__ LoadObject(destination.reg(), source.constant()); |
} else { |
- ASSERT(destination.IsSpillSlot()); |
- StoreObject(ToAddress(destination), source.constant()); |
+ ASSERT(destination.IsStackSlot()); |
+ StoreObject(ToStackSlotAddress(destination), source.constant()); |
} |
} |
@@ -1160,12 +1195,12 @@ void ParallelMoveResolver::EmitSwap(int index) { |
if (source.IsRegister() && destination.IsRegister()) { |
__ xchgq(destination.reg(), source.reg()); |
- } else if (source.IsRegister() && destination.IsSpillSlot()) { |
- Exchange(source.reg(), ToAddress(destination)); |
- } else if (source.IsSpillSlot() && destination.IsRegister()) { |
- Exchange(destination.reg(), ToAddress(source)); |
- } else if (source.IsSpillSlot() && destination.IsSpillSlot()) { |
- Exchange(ToAddress(destination), ToAddress(source)); |
+ } else if (source.IsRegister() && destination.IsStackSlot()) { |
+ Exchange(source.reg(), ToStackSlotAddress(destination)); |
+ } else if (source.IsStackSlot() && destination.IsRegister()) { |
+ Exchange(destination.reg(), ToStackSlotAddress(source)); |
+ } else if (source.IsStackSlot() && destination.IsStackSlot()) { |
+ Exchange(ToStackSlotAddress(destination), ToStackSlotAddress(source)); |
} else { |
UNREACHABLE(); |
} |