Index: runtime/vm/flow_graph_compiler.cc |
diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc |
index d64690937963ea03d7a98e5bb2c0bd04ee100936..d045c7544efa45d282c8d529bece98faaf6d9d98 100644 |
--- a/runtime/vm/flow_graph_compiler.cc |
+++ b/runtime/vm/flow_graph_compiler.cc |
@@ -201,6 +201,8 @@ FlowGraphCompiler::FlowGraphCompiler( |
exception_handlers_list_(NULL), |
pc_descriptors_list_(NULL), |
stackmap_table_builder_(NULL), |
+ code_source_map_builder_(NULL), |
+ saved_code_size_(0), |
block_info_(block_order_.length()), |
deopt_infos_(), |
static_calls_target_table_(), |
@@ -531,7 +533,9 @@ void FlowGraphCompiler::VisitBlocks() { |
LoopInfoComment(assembler(), *entry, *loop_headers); |
entry->set_offset(assembler()->CodeSize()); |
+ SaveCodeSize(); |
entry->EmitNativeCode(this); |
+ MaybeEmitCodeSourceMapEntry(entry->token_pos()); |
// Compile all successors until an exit, branch, or a block entry. |
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
Instruction* instr = it.Current(); |
@@ -556,12 +560,14 @@ void FlowGraphCompiler::VisitBlocks() { |
if (instr->IsParallelMove()) { |
parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); |
} else { |
+ SaveCodeSize(); |
EmitInstructionPrologue(instr); |
ASSERT(pending_deoptimization_env_ == NULL); |
pending_deoptimization_env_ = instr->env(); |
instr->EmitNativeCode(this); |
pending_deoptimization_env_ = NULL; |
EmitInstructionEpilogue(instr); |
+ MaybeEmitCodeSourceMapEntry(instr->token_pos()); |
} |
#if defined(DEBUG) |
@@ -738,10 +744,14 @@ void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { |
void FlowGraphCompiler::GenerateDeferredCode() { |
for (intptr_t i = 0; i < slow_path_code_.length(); i++) { |
+ SaveCodeSize(); |
slow_path_code_[i]->GenerateCode(this); |
+ MaybeEmitCodeSourceMapEntry(TokenPosition::kDeferredSlowPath); |
} |
for (intptr_t i = 0; i < deopt_infos_.length(); i++) { |
+ SaveCodeSize(); |
deopt_infos_[i]->GenerateCode(this, i); |
+ MaybeEmitCodeSourceMapEntry(TokenPosition::kDeferredDeoptInfo); |
} |
} |
@@ -1438,7 +1448,9 @@ void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) { |
const MoveOperands& move = *moves_[i]; |
if (!move.IsEliminated()) { |
ASSERT(move.src().IsConstant()); |
+ compiler_->SaveCodeSize(); |
EmitMove(i); |
+ compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); |
} |
} |
@@ -1514,13 +1526,17 @@ void ParallelMoveResolver::PerformMove(int index) { |
const MoveOperands& other_move = *moves_[i]; |
if (other_move.Blocks(destination)) { |
ASSERT(other_move.IsPending()); |
+ compiler_->SaveCodeSize(); |
EmitSwap(index); |
+ compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); |
return; |
} |
} |
// This move is not blocked. |
+ compiler_->SaveCodeSize(); |
EmitMove(index); |
+ compiler_->MaybeEmitCodeSourceMapEntry(TokenPosition::kParallelMove); |
} |
@@ -1778,6 +1794,25 @@ RawArray* FlowGraphCompiler::CallerInliningIdMap() const { |
} |
+void FlowGraphCompiler::SaveCodeSize() { |
+ // Remember how many bytes of code we emitted. This function |
rmacnak
2016/02/25 23:04:40
NOT_IN_PRODUCT
rmacnak
2016/02/25 23:04:40
emitted so far
Cutch
2016/02/26 15:59:22
Done.
Cutch
2016/02/26 15:59:22
Done.
|
+ // is called before we call into an instruction's EmitNativeCode. |
+ saved_code_size_ = assembler()->CodeSize(); |
+} |
+ |
+ |
+bool FlowGraphCompiler::MaybeEmitCodeSourceMapEntry(TokenPosition token_pos) { |
+ // This function is called after each instructions' EmitNativeCode. |
rmacnak
2016/02/25 23:04:40
NOT_IN_PRODUCT
Cutch
2016/02/26 15:59:22
Done.
|
+ if (saved_code_size_ < assembler()->CodeSize()) { |
+ // We emitted code, now associate the emitted code chunk with |token_pos|. |
+ code_source_map_builder()->AddEntry(saved_code_size_, token_pos); |
+ SaveCodeSize(); |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+ |
void FlowGraphCompiler::EmitPolymorphicInstanceCall( |
const ICData& ic_data, |
intptr_t argument_count, |