Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(218)

Side by Side Diff: runtime/vm/flow_graph_compiler.cc

Issue 1740503002: Build CodeSourceMap for each code object (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
6 6
7 #include "vm/flow_graph_compiler.h" 7 #include "vm/flow_graph_compiler.h"
8 8
9 #include "vm/bit_vector.h" 9 #include "vm/bit_vector.h"
10 #include "vm/cha.h" 10 #include "vm/cha.h"
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
184 : thread_(Thread::Current()), 184 : thread_(Thread::Current()),
185 zone_(Thread::Current()->zone()), 185 zone_(Thread::Current()->zone()),
186 assembler_(assembler), 186 assembler_(assembler),
187 parsed_function_(parsed_function), 187 parsed_function_(parsed_function),
188 flow_graph_(*flow_graph), 188 flow_graph_(*flow_graph),
189 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), 189 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)),
190 current_block_(NULL), 190 current_block_(NULL),
191 exception_handlers_list_(NULL), 191 exception_handlers_list_(NULL),
192 pc_descriptors_list_(NULL), 192 pc_descriptors_list_(NULL),
193 stackmap_table_builder_(NULL), 193 stackmap_table_builder_(NULL),
194 code_source_map_builder_(NULL),
195 saved_code_size_(0),
194 block_info_(block_order_.length()), 196 block_info_(block_order_.length()),
195 deopt_infos_(), 197 deopt_infos_(),
196 static_calls_target_table_(), 198 static_calls_target_table_(),
197 is_optimizing_(is_optimizing), 199 is_optimizing_(is_optimizing),
198 may_reoptimize_(false), 200 may_reoptimize_(false),
199 intrinsic_mode_(false), 201 intrinsic_mode_(false),
200 double_class_(Class::ZoneHandle( 202 double_class_(Class::ZoneHandle(
201 isolate()->object_store()->double_class())), 203 isolate()->object_store()->double_class())),
202 mint_class_(Class::ZoneHandle( 204 mint_class_(Class::ZoneHandle(
203 isolate()->object_store()->mint_class())), 205 isolate()->object_store()->mint_class())),
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
520 522
521 #if defined(DEBUG) 523 #if defined(DEBUG)
522 if (!is_optimizing()) { 524 if (!is_optimizing()) {
523 FrameStateClear(); 525 FrameStateClear();
524 } 526 }
525 #endif 527 #endif
526 528
527 LoopInfoComment(assembler(), *entry, *loop_headers); 529 LoopInfoComment(assembler(), *entry, *loop_headers);
528 530
529 entry->set_offset(assembler()->CodeSize()); 531 entry->set_offset(assembler()->CodeSize());
532 BeginCodeSourceRange();
530 entry->EmitNativeCode(this); 533 entry->EmitNativeCode(this);
534 EndCodeSourceRange(entry->token_pos());
531 // Compile all successors until an exit, branch, or a block entry. 535 // Compile all successors until an exit, branch, or a block entry.
532 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { 536 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
533 Instruction* instr = it.Current(); 537 Instruction* instr = it.Current();
534 // Compose intervals. 538 // Compose intervals.
535 if (instr->has_inlining_id() && is_optimizing()) { 539 if (instr->has_inlining_id() && is_optimizing()) {
536 if (prev_inlining_id != instr->inlining_id()) { 540 if (prev_inlining_id != instr->inlining_id()) {
537 intervals.Add( 541 intervals.Add(
538 IntervalStruct(prev_offset, prev_inlining_pos, prev_inlining_id)); 542 IntervalStruct(prev_offset, prev_inlining_pos, prev_inlining_id));
539 prev_offset = assembler()->CodeSize(); 543 prev_offset = assembler()->CodeSize();
540 prev_inlining_id = instr->inlining_id(); 544 prev_inlining_id = instr->inlining_id();
541 prev_inlining_pos = inline_id_to_token_pos_[prev_inlining_id]; 545 prev_inlining_pos = inline_id_to_token_pos_[prev_inlining_id];
542 if (prev_inlining_id > max_inlining_id) { 546 if (prev_inlining_id > max_inlining_id) {
543 max_inlining_id = prev_inlining_id; 547 max_inlining_id = prev_inlining_id;
544 } 548 }
545 } 549 }
546 } 550 }
547 if (FLAG_code_comments || 551 if (FLAG_code_comments ||
548 FLAG_disassemble || FLAG_disassemble_optimized) { 552 FLAG_disassemble || FLAG_disassemble_optimized) {
549 if (FLAG_source_lines) { 553 if (FLAG_source_lines) {
550 EmitSourceLine(instr); 554 EmitSourceLine(instr);
551 } 555 }
552 EmitComment(instr); 556 EmitComment(instr);
553 } 557 }
554 if (instr->IsParallelMove()) { 558 if (instr->IsParallelMove()) {
555 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); 559 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove());
556 } else { 560 } else {
561 BeginCodeSourceRange();
557 EmitInstructionPrologue(instr); 562 EmitInstructionPrologue(instr);
558 ASSERT(pending_deoptimization_env_ == NULL); 563 ASSERT(pending_deoptimization_env_ == NULL);
559 pending_deoptimization_env_ = instr->env(); 564 pending_deoptimization_env_ = instr->env();
560 instr->EmitNativeCode(this); 565 instr->EmitNativeCode(this);
561 pending_deoptimization_env_ = NULL; 566 pending_deoptimization_env_ = NULL;
562 EmitInstructionEpilogue(instr); 567 EmitInstructionEpilogue(instr);
568 EndCodeSourceRange(instr->token_pos());
563 } 569 }
564 570
565 #if defined(DEBUG) 571 #if defined(DEBUG)
566 if (!is_optimizing()) { 572 if (!is_optimizing()) {
567 FrameStateUpdateWith(instr); 573 FrameStateUpdateWith(instr);
568 } 574 }
569 #endif 575 #endif
570 } 576 }
571 577
572 #if defined(DEBUG) 578 #if defined(DEBUG)
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
730 } 736 }
731 737
732 738
733 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { 739 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) {
734 slow_path_code_.Add(code); 740 slow_path_code_.Add(code);
735 } 741 }
736 742
737 743
738 void FlowGraphCompiler::GenerateDeferredCode() { 744 void FlowGraphCompiler::GenerateDeferredCode() {
739 for (intptr_t i = 0; i < slow_path_code_.length(); i++) { 745 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
746 BeginCodeSourceRange();
740 slow_path_code_[i]->GenerateCode(this); 747 slow_path_code_[i]->GenerateCode(this);
748 EndCodeSourceRange(TokenPosition::kDeferredSlowPath);
741 } 749 }
742 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { 750 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
751 BeginCodeSourceRange();
743 deopt_infos_[i]->GenerateCode(this, i); 752 deopt_infos_[i]->GenerateCode(this, i);
753 EndCodeSourceRange(TokenPosition::kDeferredDeoptInfo);
744 } 754 }
745 } 755 }
746 756
747 757
748 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, 758 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index,
749 intptr_t outer_try_index, 759 intptr_t outer_try_index,
750 intptr_t pc_offset, 760 intptr_t pc_offset,
751 const Array& handler_types, 761 const Array& handler_types,
752 bool needs_stacktrace) { 762 bool needs_stacktrace) {
753 exception_handlers_list_->AddHandler(try_index, 763 exception_handlers_list_->AddHandler(try_index,
(...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after
1430 // and skipping such moves with register destinations keeps those 1440 // and skipping such moves with register destinations keeps those
1431 // registers free for the whole algorithm. 1441 // registers free for the whole algorithm.
1432 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); 1442 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i);
1433 } 1443 }
1434 1444
1435 // Perform the moves with constant sources. 1445 // Perform the moves with constant sources.
1436 for (int i = 0; i < moves_.length(); ++i) { 1446 for (int i = 0; i < moves_.length(); ++i) {
1437 const MoveOperands& move = *moves_[i]; 1447 const MoveOperands& move = *moves_[i];
1438 if (!move.IsEliminated()) { 1448 if (!move.IsEliminated()) {
1439 ASSERT(move.src().IsConstant()); 1449 ASSERT(move.src().IsConstant());
1450 compiler_->BeginCodeSourceRange();
1440 EmitMove(i); 1451 EmitMove(i);
1452 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
1441 } 1453 }
1442 } 1454 }
1443 1455
1444 moves_.Clear(); 1456 moves_.Clear();
1445 } 1457 }
1446 1458
1447 1459
1448 void ParallelMoveResolver::BuildInitialMoveList( 1460 void ParallelMoveResolver::BuildInitialMoveList(
1449 ParallelMoveInstr* parallel_move) { 1461 ParallelMoveInstr* parallel_move) {
1450 // Perform a linear sweep of the moves to add them to the initial list of 1462 // Perform a linear sweep of the moves to add them to the initial list of
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1506 return; 1518 return;
1507 } 1519 }
1508 1520
1509 // The move may be blocked on a (at most one) pending move, in which case 1521 // The move may be blocked on a (at most one) pending move, in which case
1510 // we have a cycle. Search for such a blocking move and perform a swap to 1522 // we have a cycle. Search for such a blocking move and perform a swap to
1511 // resolve it. 1523 // resolve it.
1512 for (int i = 0; i < moves_.length(); ++i) { 1524 for (int i = 0; i < moves_.length(); ++i) {
1513 const MoveOperands& other_move = *moves_[i]; 1525 const MoveOperands& other_move = *moves_[i];
1514 if (other_move.Blocks(destination)) { 1526 if (other_move.Blocks(destination)) {
1515 ASSERT(other_move.IsPending()); 1527 ASSERT(other_move.IsPending());
1528 compiler_->BeginCodeSourceRange();
1516 EmitSwap(index); 1529 EmitSwap(index);
1530 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
1517 return; 1531 return;
1518 } 1532 }
1519 } 1533 }
1520 1534
1521 // This move is not blocked. 1535 // This move is not blocked.
1536 compiler_->BeginCodeSourceRange();
1522 EmitMove(index); 1537 EmitMove(index);
1538 compiler_->EndCodeSourceRange(TokenPosition::kParallelMove);
1523 } 1539 }
1524 1540
1525 1541
1526 bool ParallelMoveResolver::IsScratchLocation(Location loc) { 1542 bool ParallelMoveResolver::IsScratchLocation(Location loc) {
1527 for (int i = 0; i < moves_.length(); ++i) { 1543 for (int i = 0; i < moves_.length(); ++i) {
1528 if (moves_[i]->Blocks(loc)) { 1544 if (moves_[i]->Blocks(loc)) {
1529 return false; 1545 return false;
1530 } 1546 }
1531 } 1547 }
1532 1548
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
1785 Array::New(caller_inline_id_.length(), Heap::kOld)); 1801 Array::New(caller_inline_id_.length(), Heap::kOld));
1786 Smi& smi = Smi::Handle(); 1802 Smi& smi = Smi::Handle();
1787 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) { 1803 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) {
1788 smi = Smi::New(caller_inline_id_[i]); 1804 smi = Smi::New(caller_inline_id_[i]);
1789 res.SetAt(i, smi); 1805 res.SetAt(i, smi);
1790 } 1806 }
1791 return res.raw(); 1807 return res.raw();
1792 } 1808 }
1793 1809
1794 1810
1811 void FlowGraphCompiler::BeginCodeSourceRange() {
1812 NOT_IN_PRODUCT(
1813 // Remember how many bytes of code we emitted so far. This function
1814 // is called before we call into an instruction's EmitNativeCode.
1815 saved_code_size_ = assembler()->CodeSize();
1816 );
1817 }
1818
1819
1820 bool FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) {
1821 NOT_IN_PRODUCT(
1822 // This function is called after each instructions' EmitNativeCode.
1823 if (saved_code_size_ < assembler()->CodeSize()) {
1824 // We emitted more code, now associate the emitted code chunk with
1825 // |token_pos|.
1826 code_source_map_builder()->AddEntry(saved_code_size_, token_pos);
1827 BeginCodeSourceRange();
1828 return true;
1829 }
1830 );
1831 return false;
1832 }
1833
1834
1795 void FlowGraphCompiler::EmitPolymorphicInstanceCall( 1835 void FlowGraphCompiler::EmitPolymorphicInstanceCall(
1796 const ICData& ic_data, 1836 const ICData& ic_data,
1797 intptr_t argument_count, 1837 intptr_t argument_count,
1798 const Array& argument_names, 1838 const Array& argument_names,
1799 intptr_t deopt_id, 1839 intptr_t deopt_id,
1800 TokenPosition token_pos, 1840 TokenPosition token_pos,
1801 LocationSummary* locs) { 1841 LocationSummary* locs) {
1802 if (FLAG_polymorphic_with_deopt) { 1842 if (FLAG_polymorphic_with_deopt) {
1803 Label* deopt = AddDeoptStub(deopt_id, 1843 Label* deopt = AddDeoptStub(deopt_id,
1804 ICData::kDeoptPolymorphicInstanceCallTestFail); 1844 ICData::kDeoptPolymorphicInstanceCallTestFail);
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 1935
1896 1936
1897 void FlowGraphCompiler::FrameStateClear() { 1937 void FlowGraphCompiler::FrameStateClear() {
1898 ASSERT(!is_optimizing()); 1938 ASSERT(!is_optimizing());
1899 frame_state_.TruncateTo(0); 1939 frame_state_.TruncateTo(0);
1900 } 1940 }
1901 #endif 1941 #endif
1902 1942
1903 1943
1904 } // namespace dart 1944 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698