Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(302)

Unified Diff: src/ia32/deoptimizer-ia32.cc

Issue 10910161: Partial ia32 implementation of optimized try/catch (by Kevin Millikin) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fixed build. Created 8 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/ia32/builtins-ia32.cc ('k') | src/ia32/lithium-codegen-ia32.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/ia32/deoptimizer-ia32.cc
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index f50010b9d97b884e2dda6109b5bf9e43ca8c7fd8..6b550c2ddc424a44c28486fc2e0e7d0e76376827 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -351,6 +351,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
+ int handler_count = iterator.Next();
+ USE(handler_count);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = input_->GetFrameSize();
@@ -480,7 +482,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+ PrintF(" => pc=0x%08x]\n", output_[0]->GetPc());
}
}
@@ -825,19 +827,32 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
function = function_;
}
+ Code* non_optimized_code = function->shared()->code();
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
+ int handler_count = iterator->Next();
+ // If we are doing lazy deoptimization for catching in an optimized frame,
+ // one of the handlers in the frame description has already been dropped
+ // by throwing to it.
+ if (isolate_->optimized_handler_patch_buffer() != NULL) {
+ ASSERT(frame_index == 0 && output_count_ == 1);
+ ASSERT(bailout_type_ == LAZY);
+ ASSERT(handler_count > 0);
+ --handler_count;
+ }
+ int handlers_size = handler_count * StackHandlerConstants::kSize;
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+ unsigned varying_frame_size = height_in_bytes + handlers_size;
+ unsigned output_frame_size = fixed_frame_size + varying_frame_size;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n",
+ node_id.ToInt(), height_in_bytes + handlers_size);
+ }
// Allocate and store the output frame description.
FrameDescription* output_frame =
@@ -845,6 +860,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
+ ASSERT(is_bottommost || handler_count == 0); // We do not inline try-catch.
bool is_topmost = (output_count_ - 1 == frame_index);
ASSERT(frame_index >= 0 && frame_index < output_count_);
ASSERT(output_[frame_index] == NULL);
@@ -853,7 +869,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
+ unsigned input_offset = input_->GetFrameSize();
unsigned alignment_state_offset =
input_offset - parameter_count * kPointerSize -
@@ -876,7 +892,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ varying_frame_size + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -966,35 +982,141 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
top_address + output_offset, output_offset, value);
}
+
+ // Translate the stack-allocated locals.
+ int stack_slot_count = function->shared()->scope_info()->StackSlotCount();
+ for (int i = 0; i < stack_slot_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // If there are any exception handlers, translate them.
+ if (handler_count > 0) {
+ // Translate the handler blocks. The output frame needs:
+ //
+ // incoming args | fixed part | locals | handlers | expression stack
+ //
+ // because try occurs as a statement, i.e., with the expression stack
+ // empty. The input frame has:
+ //
+ // incoming args | fixed part | spill slots | handlers | outgoing args
+ //
+ // also because try is a statement, i.e., there is no pending call to
+ // interleave handlers and args. This would change if we began inlining
+ // functions containing try/catch.
+ //
+ // TODO(kmillikin): Begin inlining functions containing try/catch.
+ input_offset = ComputeOutgoingArgumentSize() + ComputeHandlersSize();
+ intptr_t next_handler = 0;
+ for (int i = 0; i < handler_count; ++i) {
+ // The first two fields (fp and context) are invariant under
+ // deoptimization.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_offset);
+ ASSERT(value == fp_value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] fp\n",
+ top_address + output_offset, output_offset, value, i);
+ }
+
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_offset);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] context\n",
+ top_address + output_offset, output_offset, value, i);
+ }
+
+ // The state is the same except that the kind is unoptimized.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value =
+ input_->GetFrameSlot(input_offset) & ~StackHandler::kIsOptimizedMask;
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] index + kind\n",
+ top_address + output_offset, output_offset, value, i);
+ }
+
+ // For all handlers in the frame, the code is the unoptimized code.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(non_optimized_code);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] code\n",
+ top_address + output_offset, output_offset, value, i);
+ }
+
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (i == 0) {
+ // Bottommost handler's next link is deoptimization invariant.
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = next_handler;
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ next_handler = top_address + output_offset;
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] next\n",
+ top_address + output_offset, output_offset, value, i);
+ }
+ }
+ *reinterpret_cast<intptr_t*>(isolate_->handler_address()) = next_handler;
+ }
+
// Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
+ while (output_offset > 0) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
- ASSERT(0 == output_offset);
// Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
+ int pc_offset;
+ FullCodeGenerator::State state;
+ Builtins::Name continuation;
+ if (isolate_->optimized_handler_patch_buffer() != NULL) {
+ ASSERT(frame_index == 0 && output_count_ == 1);
+ // Catching in optimized code. Deopt to the corresponding unoptimized
+ // catch handler (not to the instruction following the one that threw).
+ FixedArray* handler_table = non_optimized_code->handler_table();
+ int handler_index = isolate_->optimized_handler_handler_index();
+ pc_offset = Smi::cast(handler_table->get(handler_index))->value();
+ state = FullCodeGenerator::NO_REGISTERS;
+ continuation = Builtins::kNotifyLazyDeoptimized;
+ // The exception value is needed in eax.
+ output_frame->SetRegister(eax.code(),
+ reinterpret_cast<intptr_t>(isolate_->optimized_pending_exception()));
+ isolate_->clear_optimized_pending_exception();
+ } else {
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ state = FullCodeGenerator::StateField::decode(pc_and_state);
+ continuation = (bailout_type_ == EAGER)
+ ? Builtins::kNotifyDeoptimized
+ : Builtins::kNotifyLazyDeoptimized;
+ // For lazy deopt we preserve eax in the stack. Ensure that it is not
+ // the zap value (a probably-invalid HeapObject*).
+ // TODO(mmassi): According to Kevin the test should be "!= EAGER"...
+ if (bailout_type_ == EAGER && is_topmost) {
+ output_frame->SetRegister(eax.code(),
+ reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ }
+ }
+ byte* code_start = non_optimized_code->instruction_start();
+ output_frame->SetPc(reinterpret_cast<uint32_t>(code_start + pc_offset));
output_frame->SetState(Smi::FromInt(state));
-
// Set the continuation for the topmost frame.
if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
+ byte* entry = builtins->builtin(continuation)->entry();
+ output_frame->SetContinuation(reinterpret_cast<uint32_t>(entry));
}
}
« no previous file with comments | « src/ia32/builtins-ia32.cc ('k') | src/ia32/lithium-codegen-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698