| Index: src/deoptimizer.cc
|
| diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
|
| index eec86978ce4895df3dd74c6ca55e1fb88fa45c54..13488746642c00d3cf9f0719e501a981d83fe043 100644
|
| --- a/src/deoptimizer.cc
|
| +++ b/src/deoptimizer.cc
|
| @@ -155,6 +155,22 @@ size_t Deoptimizer::GetMaxDeoptTableSize() {
|
| Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
|
| Deoptimizer* result = isolate->deoptimizer_data()->current_;
|
| ASSERT(result != NULL);
|
| + if (isolate->optimized_handler_patch_buffer() != NULL) {
|
| + ASSERT(result->bailout_type_ == LAZY);
|
| + // Before allowing allocation, patch the optimized code back. The
|
| + // patched code has relocation info that does not agree with the code,
|
| + // so the patched code should not be seen by the garbage collector.
|
| + Code* code = result->compiled_code_;
|
| + DeoptimizationInputData* deopt_data =
|
| + DeoptimizationInputData::cast(code->deoptimization_data());
|
| + int deopt_index = isolate->optimized_handler_deopt_index();
|
| + byte* patch_address =
|
| + code->instruction_start() + deopt_data->Pc(deopt_index)->value();
|
| + memcpy(patch_address, isolate->optimized_handler_patch_buffer(),
|
| + patch_size());
|
| + // FIXME(mmassi): why doesn't it clear immediately also the patch buffer?
|
| + isolate->ClearOptimizedHandlerByDeopt(code);
|
| + }
|
| result->DeleteFrameDescriptions();
|
| isolate->deoptimizer_data()->current_ = NULL;
|
| return result;
|
| @@ -195,10 +211,17 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
|
| int deoptimization_index = safepoint_entry.deoptimization_index();
|
| ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
|
|
|
| + // Find the size of exception handlers.
|
| + DeoptimizationInputData* deoptimization_data =
|
| + DeoptimizationInputData::cast(code->deoptimization_data());
|
| + unsigned handler_count =
|
| + deoptimization_data->HandlerCount(deoptimization_index)->value();
|
| + unsigned handler_size = handler_count * StackHandlerConstants::kSize;
|
| +
|
| // Always use the actual stack slots when calculating the fp to sp
|
| // delta adding two for the function and context.
|
| unsigned stack_slots = code->stack_slots();
|
| - unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
|
| + unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize) + handler_size;
|
|
|
| Deoptimizer* deoptimizer = new Deoptimizer(isolate,
|
| function,
|
| @@ -747,6 +770,13 @@ void Deoptimizer::DoComputeOutputFrames() {
|
| int count = iterator.Next();
|
| iterator.Next(); // Drop JS frames count.
|
| ASSERT(output_ == NULL);
|
| +
|
| + // If this deoptimization is needed to transfer control to a catch clause
|
| + // only the first frame is relevant.
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL) {
|
| + count = 1;
|
| + }
|
| +
|
| output_ = new FrameDescription*[count];
|
| for (int i = 0; i < count; ++i) {
|
| output_[i] = NULL;
|
| @@ -831,19 +861,34 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
| ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
|
| function = function_;
|
| }
|
| + Code* non_optimized_code = function->shared()->code();
|
| unsigned height = iterator->Next();
|
| unsigned height_in_bytes = height * kPointerSize;
|
| - if (trace_) {
|
| - PrintF(" translating ");
|
| - function->PrintName();
|
| - PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
|
| - }
|
| + int handler_count = iterator->Next();
|
| + // If we are doing lazy deoptimization for catching in an optimized frame,
|
| + // one of the handlers in the frame description has already been dropped
|
| + // by throwing to it.
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL) {
|
| + ASSERT(frame_index == 0 && output_count_ == 1);
|
| + ASSERT(bailout_type_ == LAZY);
|
| + ASSERT(handler_count > 0);
|
| + --handler_count;
|
| + }
|
| + int handlers_size = handler_count * StackHandlerConstants::kSize;
|
|
|
| // The 'fixed' part of the frame consists of the incoming parameters and
|
| // the part described by JavaScriptFrameConstants.
|
| unsigned fixed_frame_size = ComputeFixedSize(function);
|
| - unsigned input_frame_size = input_->GetFrameSize();
|
| + //TODO(mmassi): what's this for?
|
| + //unsigned input_frame_size = input_->GetFrameSize();
|
| unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" translating ");
|
| + function->PrintName();
|
| + PrintF(" => node=%d, height=%d\n",
|
| + node_id.ToInt(), height_in_bytes + handlers_size);
|
| + }
|
| +
|
|
|
| // Allocate and store the output frame description.
|
| FrameDescription* output_frame =
|
| @@ -851,6 +896,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
| output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
|
|
|
| bool is_bottommost = (0 == frame_index);
|
| + ASSERT(is_bottommost || handler_count == 0); // We do not inline try-catch.
|
| bool is_topmost = (output_count_ - 1 == frame_index);
|
| ASSERT(frame_index >= 0 && frame_index < output_count_);
|
| ASSERT(output_[frame_index] == NULL);
|
| @@ -871,6 +917,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
| // is removed. Subtract 2 * kPointerSize for the context and function slots.
|
| top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
|
| height_in_bytes + has_alignment_padding_ * kPointerSize;
|
| + //TODO(mmassi): Is height_in_bytes the right value?
|
| } else {
|
| top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
| }
|
| @@ -879,7 +926,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
| // Compute the incoming parameter translation.
|
| int parameter_count = function->shared()->formal_parameter_count() + 1;
|
| unsigned output_offset = output_frame_size;
|
| - unsigned input_offset = input_frame_size;
|
| + unsigned input_offset = input_->GetFrameSize();
|
| for (int i = 0; i < parameter_count; ++i) {
|
| output_offset -= kPointerSize;
|
| DoTranslateCommand(iterator, frame_index, output_offset);
|
| @@ -968,40 +1015,146 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
| top_address + output_offset, output_offset, value);
|
| }
|
|
|
| + // Translate the stack-allocated locals.
|
| + int stack_slot_count = function->shared()->scope_info()->StackSlotCount();
|
| + for (int i = 0; i < stack_slot_count; ++i) {
|
| + output_offset -= kPointerSize;
|
| + DoTranslateCommand(iterator, frame_index, output_offset);
|
| + }
|
| +
|
| + // If there are any exception handlers, translate them.
|
| + if (handler_count > 0) {
|
| + // Translate the handler blocks. The output frame needs:
|
| + //
|
| + // incoming args | fixed part | locals | handlers | expression stack
|
| + //
|
| + // because try occurs as a statement, i.e., with the expression stack
|
| + // empty. The input frame has:
|
| + //
|
| + // incoming args | fixed part | spill slots | handlers | outgoing args
|
| + //
|
| + // also because try is a statement, i.e., there is no pending call to
|
| + // interleave handlers and args. This would change if we began inlining
|
| + // functions containing try/catch.
|
| + //
|
| + // TODO(kmillikin): Begin inlining functions containing try/catch.
|
| + input_offset = ComputeOutgoingArgumentSize() + ComputeHandlersSize();
|
| + intptr_t next_handler = 0;
|
| + for (int i = 0; i < handler_count; ++i) {
|
| + // The first two fields (fp and context) are invariant under
|
| + // deoptimization.
|
| + output_offset -= kPointerSize;
|
| + input_offset -= kPointerSize;
|
| + value = input_->GetFrameSlot(input_offset);
|
| + ASSERT(value == fp_value);
|
| + output_frame->SetFrameSlot(output_offset, value);
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] fp\n",
|
| + top_address + output_offset, output_offset, value, i);
|
| + }
|
| +
|
| + output_offset -= kPointerSize;
|
| + input_offset -= kPointerSize;
|
| + value = input_->GetFrameSlot(input_offset);
|
| + output_frame->SetFrameSlot(output_offset, value);
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] context\n",
|
| + top_address + output_offset, output_offset, value, i);
|
| + }
|
| +
|
| + // The state is the same except that the kind is unoptimized.
|
| + output_offset -= kPointerSize;
|
| + input_offset -= kPointerSize;
|
| + value =
|
| + input_->GetFrameSlot(input_offset) & ~StackHandler::kIsOptimizedMask;
|
| + output_frame->SetFrameSlot(output_offset, value);
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] index + kind\n",
|
| + top_address + output_offset, output_offset, value, i);
|
| + }
|
| +
|
| + // For all handlers in the frame, the code is the unoptimized code.
|
| + output_offset -= kPointerSize;
|
| + input_offset -= kPointerSize;
|
| + value = reinterpret_cast<uint32_t>(non_optimized_code);
|
| + output_frame->SetFrameSlot(output_offset, value);
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] code\n",
|
| + top_address + output_offset, output_offset, value, i);
|
| + }
|
| +
|
| + output_offset -= kPointerSize;
|
| + input_offset -= kPointerSize;
|
| + if (i == 0) {
|
| + // Bottommost handler's next link is deoptimization invariant.
|
| + value = input_->GetFrameSlot(input_offset);
|
| + } else {
|
| + value = next_handler;
|
| + }
|
| + output_frame->SetFrameSlot(output_offset, value);
|
| + next_handler = top_address + output_offset;
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; handler[%d] next\n",
|
| + top_address + output_offset, output_offset, value, i);
|
| + }
|
| + }
|
| + *reinterpret_cast<intptr_t*>(isolate_->handler_address()) = next_handler;
|
| + }
|
| +
|
| // Translate the rest of the frame.
|
| - for (unsigned i = 0; i < height; ++i) {
|
| + while (output_offset > 0) {
|
| output_offset -= kPointerSize;
|
| DoTranslateCommand(iterator, frame_index, output_offset);
|
| }
|
| ASSERT(0 == output_offset);
|
|
|
| // Compute this frame's PC, state, and continuation.
|
| - Code* non_optimized_code = function->shared()->code();
|
| - FixedArray* raw_data = non_optimized_code->deoptimization_data();
|
| - DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
| - Address start = non_optimized_code->instruction_start();
|
| - unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
|
| - unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
|
| - intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
|
| - output_frame->SetPc(pc_value);
|
| -
|
| - FullCodeGenerator::State state =
|
| - FullCodeGenerator::StateField::decode(pc_and_state);
|
| + //TODO(mmassi): Where is non_optimized_code needed?
|
| + //Code* non_optimized_code = function->shared()->code();
|
| + int pc_offset;
|
| + FullCodeGenerator::State state;
|
| + Builtins::Name continuation;
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL) {
|
| + ASSERT(frame_index == 0 && output_count_ == 1);
|
| + // Catching in optimized code. Deopt to the corresponding unoptimized
|
| + // catch handler (not to the instruction following the one that threw).
|
| + FixedArray* handler_table = non_optimized_code->handler_table();
|
| + int handler_index = isolate_->optimized_handler_handler_index();
|
| + pc_offset = Smi::cast(handler_table->get(handler_index))->value();
|
| + state = FullCodeGenerator::NO_REGISTERS;
|
| + continuation = Builtins::kNotifyLazyDeoptimized;
|
| + // The exception value is needed in eax.
|
| + output_frame->SetRegister(
|
| + eax.code(),
|
| + reinterpret_cast<intptr_t>(isolate_->optimized_pending_exception()));
|
| + isolate_->clear_optimized_pending_exception();
|
| + } else {
|
| + FixedArray* raw_data = non_optimized_code->deoptimization_data();
|
| + DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
| + unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
|
| + pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
|
| + state = FullCodeGenerator::StateField::decode(pc_and_state);
|
| + continuation = (bailout_type_ == EAGER)
|
| + ? Builtins::kNotifyDeoptimized
|
| + : Builtins::kNotifyLazyDeoptimized;
|
| + // For lazy deopt we preserve eax in the stack. Ensure that it is not
|
| + // the zap value (a probably-invalid HeapObject*).
|
| + // TODO(mmassi): According to Kevin the test should be "!= EAGER"...
|
| + if (bailout_type_ == EAGER && is_topmost) {
|
| + output_frame->SetRegister(eax.code(),
|
| + reinterpret_cast<intptr_t>(Smi::FromInt(0)));
|
| + }
|
| + }
|
| + byte* code_start = non_optimized_code->instruction_start();
|
| + output_frame->SetPc(reinterpret_cast<uint32_t>(code_start + pc_offset));
|
| +
|
| output_frame->SetState(Smi::FromInt(state));
|
|
|
| // Set the continuation for the topmost frame.
|
| if (is_topmost && bailout_type_ != DEBUGGER) {
|
| Builtins* builtins = isolate_->builtins();
|
| - Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
|
| - if (bailout_type_ == LAZY) {
|
| - continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
| - } else if (bailout_type_ == SOFT) {
|
| - continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
|
| - } else {
|
| - ASSERT(bailout_type_ == EAGER);
|
| - }
|
| - output_frame->SetContinuation(
|
| - reinterpret_cast<intptr_t>(continuation->entry()));
|
| + byte* entry = builtins->builtin(continuation)->entry();
|
| + output_frame->SetContinuation(reinterpret_cast<uint32_t>(entry));
|
| }
|
| }
|
|
|
| @@ -2047,7 +2200,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
|
| case Translation::STACK_SLOT: {
|
| int input_slot_index = iterator->Next();
|
| unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
|
| - intptr_t input_value = input_->GetFrameSlot(input_offset);
|
| + intptr_t input_value;
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL &&
|
| + input_offset >= input_->frame_size()) {
|
| + // Skip stack slots that were above the handler (they must be
|
| + // arguments that must be thrown away because try is a statement).
|
| + input_value = static_cast<intptr_t>(NULL);
|
| + if (FLAG_trace_deopt) {
|
| + PrintF(" SKIPPING SLOT ABOVE HANDLER (ARGUMENT)\n");
|
| + }
|
| + } else {
|
| + input_value = input_->GetFrameSlot(input_offset);
|
| + }
|
| if (trace_) {
|
| PrintF(" 0x%08" V8PRIxPTR ": ",
|
| output_[frame_index]->GetTop() + output_offset);
|
| @@ -2212,7 +2376,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
|
| case Translation::COMPILED_STUB_FRAME:
|
| case Translation::DUPLICATE:
|
| UNREACHABLE(); // Malformed input.
|
| - return false;
|
| + return false;
|
|
|
| case Translation::REGISTER: {
|
| int output_reg = iterator->Next();
|
| @@ -2468,10 +2632,15 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
|
|
|
|
|
| unsigned Deoptimizer::ComputeInputFrameSize() const {
|
| + // This is the size of the frame that is the same for both optimized and
|
| + // unoptimized frames---the incoming parameters and the fixed part of the
|
| + // frame.
|
| unsigned fixed_size = ComputeFixedSize(function_);
|
| +
|
| // The fp-to-sp delta already takes the context and the function
|
| // into account so we have to avoid double counting them (-2).
|
| unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
|
| +
|
| #ifdef DEBUG
|
| if (bailout_type_ == OSR) {
|
| // TODO(kasperl): It would be nice if we could verify that the
|
| @@ -2479,9 +2648,21 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
|
| // environment at the OSR entry. The code for that his built into
|
| // the DoComputeOsrOutputFrame function for now.
|
| } else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
|
| - unsigned stack_slots = compiled_code_->stack_slots();
|
| - unsigned outgoing_size = ComputeOutgoingArgumentSize();
|
| - ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
|
| + // Verify the actual frame size matches the expected (fixed part +
|
| + // spill slots + exception handlers + outgoing arguments).
|
| + unsigned spill_slots_size = compiled_code_->stack_slots() * kPointerSize;
|
| + unsigned handlers_size = ComputeHandlersSize();
|
| + // If we are doing lazy deoptimization for catching in an optimized
|
| + // frame, one of the handlers in the frame description has already been
|
| + // dropped by throwing to it.
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL) {
|
| + ASSERT(bailout_type_ == LAZY);
|
| + ASSERT(static_cast<int>(handlers_size) >= StackHandlerConstants::kSize);
|
| + handlers_size -= StackHandlerConstants::kSize;
|
| + }
|
| + unsigned outgoing_arg_size = ComputeOutgoingArgumentSize();
|
| + ASSERT(result ==
|
| + fixed_size + spill_slots_size + handlers_size + outgoing_arg_size);
|
| }
|
| #endif
|
| return result;
|
| @@ -2509,6 +2690,12 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
|
|
|
|
|
| unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
|
| + // Since try is a statement there will not be arguments left on the stack
|
| + // once we have popped the handler.
|
| + if (isolate_->optimized_handler_patch_buffer() != NULL) {
|
| + return 0;
|
| + }
|
| +
|
| DeoptimizationInputData* data = DeoptimizationInputData::cast(
|
| compiled_code_->deoptimization_data());
|
| unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
|
| @@ -2516,6 +2703,14 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
|
| }
|
|
|
|
|
| +unsigned Deoptimizer::ComputeHandlersSize() const {
|
| + DeoptimizationInputData* data = DeoptimizationInputData::cast(
|
| + compiled_code_->deoptimization_data());
|
| + unsigned handler_count = data->HandlerCount(bailout_id_)->value();
|
| + return handler_count * StackHandlerConstants::kSize;
|
| +}
|
| +
|
| +
|
| Object* Deoptimizer::ComputeLiteral(int index) const {
|
| DeoptimizationInputData* data = DeoptimizationInputData::cast(
|
| compiled_code_->deoptimization_data());
|
| @@ -2750,11 +2945,13 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
|
|
|
| void Translation::BeginJSFrame(BailoutId node_id,
|
| int literal_id,
|
| - unsigned height) {
|
| + unsigned height,
|
| + int handler_count) {
|
| buffer_->Add(JS_FRAME, zone());
|
| buffer_->Add(node_id.ToInt(), zone());
|
| buffer_->Add(literal_id, zone());
|
| buffer_->Add(height, zone());
|
| + buffer_->Add(handler_count, zone());
|
| }
|
|
|
|
|
| @@ -2851,7 +3048,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
|
| case CONSTRUCT_STUB_FRAME:
|
| return 2;
|
| case JS_FRAME:
|
| - return 3;
|
| + return 4;
|
| }
|
| UNREACHABLE();
|
| return -1;
|
|
|