Index: runtime/vm/flow_graph_compiler_ia32.cc |
diff --git a/runtime/vm/flow_graph_compiler_ia32.cc b/runtime/vm/flow_graph_compiler_ia32.cc |
index f33c0513a952924afb728eae8ea504b2ae8ae0d5..be09e6dc030fb19f78b9e6ed2dd1edd5c164168f 100644 |
--- a/runtime/vm/flow_graph_compiler_ia32.cc |
+++ b/runtime/vm/flow_graph_compiler_ia32.cc |
@@ -46,13 +46,20 @@ void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler, |
const GrowableArray<Value*>& values = deoptimization_env_->values(); |
// 1. Build a parallel move representing the frame translation. |
+ intptr_t height = compiler->StackSize(); |
ParallelMoveInstr* move = new ParallelMoveInstr(); |
for (intptr_t i = 0; i < values.length(); i++) { |
Location destination = Location::StackSlot(i - fixed_parameter_count); |
Location source = deoptimization_env_->LocationAt(i); |
if (source.IsInvalid()) { |
- ASSERT(values[i]->IsConstant()); |
- source = Location::Constant(values[i]->AsConstant()->value()); |
+ Value* value = values[i]; |
+ if (value->IsConstant()) { |
+ source = Location::Constant(value->AsConstant()->value()); |
+ } else { |
+ ASSERT(value->IsUse() && |
+ value->AsUse()->definition()->IsPushArgument()); |
+ source = Location::StackSlot(height++); |
+ } |
} |
move->AddMove(destination, source); |
} |
@@ -62,6 +69,20 @@ void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler, |
const intptr_t top_offset = |
ParsedFunction::kFirstLocalSlotIndex - (local_slot_count - 1); |
+#if defined(DEBUG) |
+ intptr_t optimized_top_offset = |
+ (ParsedFunction::kFirstLocalSlotIndex - height) * kWordSize; |
+ Label height_ok; |
+ __ pushl(EAX); |
+ __ leal(EAX, Address(EBP, optimized_top_offset)); |
+ __ cmpl(EAX, ESP); |
+ // Some instructions might push temporary values above an optimized frame. |
+ __ j(ABOVE_EQUAL, &height_ok); |
+ __ Stop("Optimized frame height mismatch"); |
+ __ Bind(&height_ok); |
+ __ popl(EAX); |
+#endif |
+ |
// ParallelMoveResolver will use push and pop to allocate internally a |
// scratch register for memory to memory moves. This means we have to |
// ensure that these stack manipulations will not interfere with actual |
@@ -70,16 +91,14 @@ void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler, |
// we need to shrink stack area after resolving parallel move. This |
// guarantees that all moves happen below stack pointer and will not |
// interfere with additional push/pops. |
- const intptr_t spill_slot_count = compiler->StackSize(); |
- |
- if (local_slot_count > spill_slot_count) { |
+ if (local_slot_count >= height) { |
// Expand reserved stack area. |
__ leal(ESP, Address(EBP, top_offset * kWordSize)); |
} |
compiler->parallel_move_resolver()->EmitNativeCode(move); |
- if (local_slot_count < spill_slot_count) { |
+ if (local_slot_count < height) { |
// Shrink reserved stack area. |
__ leal(ESP, Address(EBP, top_offset * kWordSize)); |
} |