OLD | NEW |
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. |
6 | 6 |
7 #include "vm/flow_graph_compiler.h" | 7 #include "vm/flow_graph_compiler.h" |
8 | 8 |
9 #include "vm/dart_entry.h" | 9 #include "vm/dart_entry.h" |
10 #include "vm/debugger.h" | 10 #include "vm/debugger.h" |
(...skipping 15 matching lines...) Expand all Loading... |
26 DECLARE_FLAG(bool, report_usage_count); | 26 DECLARE_FLAG(bool, report_usage_count); |
27 DECLARE_FLAG(bool, trace_functions); | 27 DECLARE_FLAG(bool, trace_functions); |
28 DECLARE_FLAG(int, optimization_counter_threshold); | 28 DECLARE_FLAG(int, optimization_counter_threshold); |
29 | 29 |
30 | 30 |
31 FlowGraphCompiler::FlowGraphCompiler( | 31 FlowGraphCompiler::FlowGraphCompiler( |
32 Assembler* assembler, | 32 Assembler* assembler, |
33 const ParsedFunction& parsed_function, | 33 const ParsedFunction& parsed_function, |
34 const GrowableArray<BlockEntryInstr*>& block_order, | 34 const GrowableArray<BlockEntryInstr*>& block_order, |
35 bool is_optimizing, | 35 bool is_optimizing, |
| 36 bool is_ssa, |
36 bool is_leaf) | 37 bool is_leaf) |
37 : assembler_(assembler), | 38 : assembler_(assembler), |
38 parsed_function_(parsed_function), | 39 parsed_function_(parsed_function), |
39 block_order_(block_order), | 40 block_order_(block_order), |
40 current_block_(NULL), | 41 current_block_(NULL), |
41 exception_handlers_list_(NULL), | 42 exception_handlers_list_(NULL), |
42 pc_descriptors_list_(NULL), | 43 pc_descriptors_list_(NULL), |
43 stackmap_builder_(NULL), | 44 stackmap_builder_(NULL), |
44 block_info_(block_order.length()), | 45 block_info_(block_order.length()), |
45 deopt_stubs_(), | 46 deopt_stubs_(), |
46 is_optimizing_(is_optimizing), | 47 is_optimizing_(is_optimizing), |
| 48 is_ssa_(is_ssa), |
47 is_dart_leaf_(is_leaf), | 49 is_dart_leaf_(is_leaf), |
48 bool_true_(Bool::ZoneHandle(Bool::True())), | 50 bool_true_(Bool::ZoneHandle(Bool::True())), |
49 bool_false_(Bool::ZoneHandle(Bool::False())), | 51 bool_false_(Bool::ZoneHandle(Bool::False())), |
50 double_class_(Class::ZoneHandle( | 52 double_class_(Class::ZoneHandle( |
51 Isolate::Current()->object_store()->double_class())), | 53 Isolate::Current()->object_store()->double_class())), |
52 frame_register_allocator_(this, is_optimizing) { | 54 frame_register_allocator_(this, is_optimizing, is_ssa), |
| 55 parallel_move_resolver_(this) { |
53 ASSERT(assembler != NULL); | 56 ASSERT(assembler != NULL); |
| 57 ASSERT(is_optimizing_ || !is_ssa_); |
54 } | 58 } |
55 | 59 |
56 | 60 |
57 FlowGraphCompiler::~FlowGraphCompiler() { | 61 FlowGraphCompiler::~FlowGraphCompiler() { |
58 // BlockInfos are zone-allocated, so their destructors are not called. | 62 // BlockInfos are zone-allocated, so their destructors are not called. |
59 // Verify the labels explicitly here. | 63 // Verify the labels explicitly here. |
60 for (int i = 0; i < block_info_.length(); ++i) { | 64 for (int i = 0; i < block_info_.length(); ++i) { |
61 ASSERT(!block_info_[i]->label.IsLinked()); | 65 ASSERT(!block_info_[i]->label.IsLinked()); |
62 ASSERT(!block_info_[i]->label.HasNear()); | 66 ASSERT(!block_info_[i]->label.HasNear()); |
63 } | 67 } |
(...skipping 30 matching lines...) Expand all Loading... |
94 assembler()->Comment("B%d", i); | 98 assembler()->Comment("B%d", i); |
95 // Compile the block entry. | 99 // Compile the block entry. |
96 BlockEntryInstr* entry = block_order()[i]; | 100 BlockEntryInstr* entry = block_order()[i]; |
97 set_current_block(entry); | 101 set_current_block(entry); |
98 entry->PrepareEntry(this); | 102 entry->PrepareEntry(this); |
99 // Compile all successors until an exit, branch, or a block entry. | 103 // Compile all successors until an exit, branch, or a block entry. |
100 Instruction* instr = entry; | 104 Instruction* instr = entry; |
101 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 105 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
102 instr = it.Current(); | 106 instr = it.Current(); |
103 if (FLAG_code_comments) EmitComment(instr); | 107 if (FLAG_code_comments) EmitComment(instr); |
104 ASSERT(instr->locs() != NULL); | 108 if (instr->IsParallelMove()) { |
105 EmitInstructionPrologue(instr); | 109 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); |
106 instr->EmitNativeCode(this); | 110 } else { |
| 111 ASSERT(instr->locs() != NULL); |
| 112 EmitInstructionPrologue(instr); |
| 113 pending_deoptimization_env_ = instr->env(); |
| 114 instr->EmitNativeCode(this); |
| 115 } |
107 } | 116 } |
108 if (instr->next() != NULL) { | 117 if (instr->next() != NULL) { |
109 BlockEntryInstr* successor = instr->next()->AsBlockEntry(); | 118 BlockEntryInstr* successor = instr->next()->AsBlockEntry(); |
110 ASSERT(successor != NULL); | 119 ASSERT(successor != NULL); |
111 frame_register_allocator()->Spill(); | 120 frame_register_allocator()->Spill(); |
112 // The block ended with a "goto". We can fall through if it is the | 121 // The block ended with a "goto". We can fall through if it is the |
113 // next block in the list. Otherwise, we need a jump. | 122 // next block in the list. Otherwise, we need a jump. |
114 if ((i == block_order().length() - 1) || | 123 if ((i == block_order().length() - 1) || |
115 (block_order()[i + 1] != successor)) { | 124 (block_order()[i + 1] != successor)) { |
116 assembler()->jmp(GetBlockLabel(successor)); | 125 assembler()->jmp(GetBlockLabel(successor)); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
182 | 191 |
183 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, | 192 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, |
184 intptr_t deopt_token_pos, | 193 intptr_t deopt_token_pos, |
185 intptr_t try_index, | 194 intptr_t try_index, |
186 DeoptReasonId reason, | 195 DeoptReasonId reason, |
187 Register reg1, | 196 Register reg1, |
188 Register reg2, | 197 Register reg2, |
189 Register reg3) { | 198 Register reg3) { |
190 DeoptimizationStub* stub = | 199 DeoptimizationStub* stub = |
191 new DeoptimizationStub(deopt_id, deopt_token_pos, try_index, reason); | 200 new DeoptimizationStub(deopt_id, deopt_token_pos, try_index, reason); |
192 frame_register_allocator()->SpillInDeoptStub(stub); | 201 if (pending_deoptimization_env_ == NULL) { |
193 if (reg1 != kNoRegister) stub->Push(reg1); | 202 frame_register_allocator()->SpillInDeoptStub(stub); |
194 if (reg2 != kNoRegister) stub->Push(reg2); | 203 if (reg1 != kNoRegister) stub->Push(reg1); |
195 if (reg3 != kNoRegister) stub->Push(reg3); | 204 if (reg2 != kNoRegister) stub->Push(reg2); |
| 205 if (reg3 != kNoRegister) stub->Push(reg3); |
| 206 } else { |
| 207 stub->set_deoptimization_env(pending_deoptimization_env_); |
| 208 } |
196 deopt_stubs_.Add(stub); | 209 deopt_stubs_.Add(stub); |
197 return stub->entry_label(); | 210 return stub->entry_label(); |
198 } | 211 } |
199 | 212 |
200 | 213 |
201 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { | 214 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { |
202 ASSERT(exception_handlers_list_ != NULL); | 215 ASSERT(exception_handlers_list_ != NULL); |
203 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( | 216 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( |
204 exception_handlers_list_->FinalizeExceptionHandlers(code.EntryPoint())); | 217 exception_handlers_list_->FinalizeExceptionHandlers(code.EntryPoint())); |
205 code.set_exception_handlers(handlers); | 218 code.set_exception_handlers(handlers); |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
469 return reg; | 482 return reg; |
470 } | 483 } |
471 | 484 |
472 | 485 |
473 void FrameRegisterAllocator::SpillRegister(Register reg) { | 486 void FrameRegisterAllocator::SpillRegister(Register reg) { |
474 while (registers_[reg] != NULL) SpillFirst(); | 487 while (registers_[reg] != NULL) SpillFirst(); |
475 } | 488 } |
476 | 489 |
477 | 490 |
478 void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { | 491 void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
| 492 if (is_ssa_) return; |
| 493 |
479 LocationSummary* locs = instr->locs(); | 494 LocationSummary* locs = instr->locs(); |
480 | 495 |
481 bool blocked_registers[kNumberOfCpuRegisters]; | 496 bool blocked_registers[kNumberOfCpuRegisters]; |
482 bool blocked_temp_registers[kNumberOfCpuRegisters]; | 497 bool blocked_temp_registers[kNumberOfCpuRegisters]; |
483 | 498 |
484 bool spill = false; | 499 bool spill = false; |
485 | 500 |
486 // Mark all available registers free. | 501 // Mark all available registers free. |
487 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { | 502 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { |
488 blocked_registers[i] = false; | 503 blocked_registers[i] = false; |
489 blocked_temp_registers[i] = false; | 504 blocked_temp_registers[i] = false; |
490 } | 505 } |
491 | 506 |
492 // Mark all fixed input, temp and output registers as used. | 507 // Mark all fixed input, temp and output registers as used. |
493 for (intptr_t i = 0; i < locs->input_count(); i++) { | 508 for (intptr_t i = 0; i < locs->input_count(); i++) { |
494 Location loc = locs->in(i); | 509 Location loc = locs->in(i); |
495 if (loc.kind() == Location::kRegister) { | 510 if (loc.IsRegister()) { |
496 ASSERT(!blocked_registers[loc.reg()]); | 511 ASSERT(!blocked_registers[loc.reg()]); |
497 blocked_registers[loc.reg()] = true; | 512 blocked_registers[loc.reg()] = true; |
498 if (registers_[loc.reg()] != NULL) { | 513 if (registers_[loc.reg()] != NULL) { |
499 intptr_t stack_index = stack_.length() - (locs->input_count() - i); | 514 intptr_t stack_index = stack_.length() - (locs->input_count() - i); |
500 if ((stack_index < 0) || (stack_[stack_index] != loc.reg())) { | 515 if ((stack_index < 0) || (stack_[stack_index] != loc.reg())) { |
501 spill = true; | 516 spill = true; |
502 } | 517 } |
503 } | 518 } |
504 } | 519 } |
505 } | 520 } |
506 | 521 |
507 if (spill) Spill(); | 522 if (spill) Spill(); |
508 | 523 |
509 for (intptr_t i = 0; i < locs->temp_count(); i++) { | 524 for (intptr_t i = 0; i < locs->temp_count(); i++) { |
510 Location loc = locs->temp(i); | 525 Location loc = locs->temp(i); |
511 if (loc.kind() == Location::kRegister) { | 526 if (loc.IsRegister()) { |
512 ASSERT(!blocked_registers[loc.reg()]); | 527 ASSERT(!blocked_registers[loc.reg()]); |
513 blocked_registers[loc.reg()] = true; | 528 blocked_registers[loc.reg()] = true; |
514 blocked_temp_registers[loc.reg()] = true; | 529 blocked_temp_registers[loc.reg()] = true; |
515 } | 530 } |
516 } | 531 } |
517 | 532 |
518 if (locs->out().kind() == Location::kRegister) { | 533 if (locs->out().IsRegister()) { |
519 // Fixed output registers are allowed to overlap with | 534 // Fixed output registers are allowed to overlap with |
520 // temps and inputs. | 535 // temps and inputs. |
521 blocked_registers[locs->out().reg()] = true; | 536 blocked_registers[locs->out().reg()] = true; |
522 } | 537 } |
523 | 538 |
524 // Do not allocate known registers. | 539 // Do not allocate known registers. |
525 blocked_registers[CTX] = true; | 540 blocked_registers[CTX] = true; |
526 blocked_registers[SPREG] = true; | 541 blocked_registers[SPREG] = true; |
527 blocked_registers[FPREG] = true; | 542 blocked_registers[FPREG] = true; |
528 if (TMP != kNoRegister) { | 543 if (TMP != kNoRegister) { |
529 blocked_registers[TMP] = true; | 544 blocked_registers[TMP] = true; |
530 } | 545 } |
531 | 546 |
532 // Allocate all unallocated input locations. | 547 // Allocate all unallocated input locations. |
533 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) { | 548 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) { |
534 Location loc = locs->in(i); | 549 Location loc = locs->in(i); |
535 Register reg = kNoRegister; | 550 Register reg = kNoRegister; |
536 if (loc.kind() == Location::kRegister) { | 551 if (loc.IsRegister()) { |
537 reg = loc.reg(); | 552 reg = loc.reg(); |
538 } else if (loc.kind() == Location::kUnallocated) { | 553 } else if (loc.IsUnallocated()) { |
539 ASSERT(loc.policy() == Location::kRequiresRegister); | 554 ASSERT(loc.policy() == Location::kRequiresRegister); |
540 if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) { | 555 if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) { |
541 reg = stack_.Last(); | 556 reg = stack_.Last(); |
542 blocked_registers[reg] = true; | 557 blocked_registers[reg] = true; |
543 } else { | 558 } else { |
544 reg = AllocateFreeRegister(blocked_registers); | 559 reg = AllocateFreeRegister(blocked_registers); |
545 } | 560 } |
546 locs->set_in(i, Location::RegisterLocation(reg)); | 561 locs->set_in(i, Location::RegisterLocation(reg)); |
547 } | 562 } |
548 | 563 |
549 Pop(reg, instr->InputAt(i)); | 564 Pop(reg, instr->InputAt(i)); |
550 } | 565 } |
551 | 566 |
552 // If this instruction is call spill everything that was not consumed by | 567 // If this instruction is call spill everything that was not consumed by |
553 // input locations. | 568 // input locations. |
554 if (locs->is_call() || locs->is_branch()) { | 569 if (locs->is_call() || locs->is_branch()) { |
555 Spill(); | 570 Spill(); |
556 } | 571 } |
557 | 572 |
558 // Allocate all unallocated temp locations. | 573 // Allocate all unallocated temp locations. |
559 for (intptr_t i = 0; i < locs->temp_count(); i++) { | 574 for (intptr_t i = 0; i < locs->temp_count(); i++) { |
560 Location loc = locs->temp(i); | 575 Location loc = locs->temp(i); |
561 if (loc.kind() == Location::kUnallocated) { | 576 if (loc.IsUnallocated()) { |
562 ASSERT(loc.policy() == Location::kRequiresRegister); | 577 ASSERT(loc.policy() == Location::kRequiresRegister); |
563 loc = Location::RegisterLocation( | 578 loc = Location::RegisterLocation( |
564 AllocateFreeRegister(blocked_registers)); | 579 AllocateFreeRegister(blocked_registers)); |
565 locs->set_temp(i, loc); | 580 locs->set_temp(i, loc); |
566 } | 581 } |
567 SpillRegister(loc.reg()); | 582 SpillRegister(loc.reg()); |
568 } | 583 } |
569 | 584 |
570 Location result_location = locs->out(); | 585 Location result_location = locs->out(); |
571 if (result_location.kind() == Location::kUnallocated) { | 586 if (result_location.IsUnallocated()) { |
572 switch (result_location.policy()) { | 587 switch (result_location.policy()) { |
573 case Location::kRequiresRegister: | 588 case Location::kRequiresRegister: |
574 result_location = Location::RegisterLocation( | 589 result_location = Location::RegisterLocation( |
575 AllocateFreeRegister(blocked_registers)); | 590 AllocateFreeRegister(blocked_registers)); |
576 break; | 591 break; |
577 case Location::kSameAsFirstInput: | 592 case Location::kSameAsFirstInput: |
578 result_location = locs->in(0); | 593 result_location = locs->in(0); |
579 break; | 594 break; |
580 } | 595 } |
581 locs->set_out(result_location); | 596 locs->set_out(result_location); |
582 } | 597 } |
583 | 598 |
584 if (result_location.kind() == Location::kRegister) { | 599 if (result_location.IsRegister()) { |
585 SpillRegister(result_location.reg()); | 600 SpillRegister(result_location.reg()); |
586 } | 601 } |
587 } | 602 } |
588 | 603 |
589 | 604 |
590 void FrameRegisterAllocator::Pop(Register dst, Value* val) { | 605 void FrameRegisterAllocator::Pop(Register dst, Value* val) { |
| 606 if (is_ssa_) return; |
| 607 |
591 if (stack_.length() > 0) { | 608 if (stack_.length() > 0) { |
592 ASSERT(keep_values_in_registers_); | 609 ASSERT(keep_values_in_registers_); |
593 Register src = stack_.Last(); | 610 Register src = stack_.Last(); |
594 ASSERT(val->AsUse()->definition() == registers_[src]); | 611 ASSERT(val->AsUse()->definition() == registers_[src]); |
595 stack_.RemoveLast(); | 612 stack_.RemoveLast(); |
596 registers_[src] = NULL; | 613 registers_[src] = NULL; |
597 compiler()->assembler()->MoveRegister(dst, src); | 614 compiler()->assembler()->MoveRegister(dst, src); |
598 } else { | 615 } else { |
599 compiler()->assembler()->PopRegister(dst); | 616 compiler()->assembler()->PopRegister(dst); |
600 } | 617 } |
601 } | 618 } |
602 | 619 |
603 | 620 |
604 void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { | 621 void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { |
| 622 if (is_ssa_) return; |
| 623 |
605 ASSERT(registers_[reg] == NULL); | 624 ASSERT(registers_[reg] == NULL); |
606 if (keep_values_in_registers_) { | 625 if (keep_values_in_registers_) { |
607 registers_[reg] = val; | 626 registers_[reg] = val; |
608 stack_.Add(reg); | 627 stack_.Add(reg); |
609 } else { | 628 } else { |
610 compiler()->assembler()->PushRegister(reg); | 629 compiler()->assembler()->PushRegister(reg); |
611 } | 630 } |
612 } | 631 } |
613 | 632 |
614 | 633 |
615 void FrameRegisterAllocator::Spill() { | 634 void FrameRegisterAllocator::Spill() { |
| 635 if (is_ssa_) return; |
| 636 |
616 for (int i = 0; i < stack_.length(); i++) { | 637 for (int i = 0; i < stack_.length(); i++) { |
617 Register r = stack_[i]; | 638 Register r = stack_[i]; |
618 registers_[r] = NULL; | 639 registers_[r] = NULL; |
619 compiler()->assembler()->PushRegister(r); | 640 compiler()->assembler()->PushRegister(r); |
620 } | 641 } |
621 stack_.Clear(); | 642 stack_.Clear(); |
622 } | 643 } |
623 | 644 |
624 | 645 |
625 void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) { | 646 void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) { |
| 647 if (is_ssa_) return; |
| 648 |
626 for (int i = 0; i < stack_.length(); i++) { | 649 for (int i = 0; i < stack_.length(); i++) { |
627 stub->Push(stack_[i]); | 650 stub->Push(stack_[i]); |
628 } | 651 } |
629 } | 652 } |
630 | 653 |
631 | 654 |
| 655 ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler) |
| 656 : compiler_(compiler), moves_(32) {} |
| 657 |
| 658 |
| 659 void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) { |
| 660 ASSERT(moves_.is_empty()); |
| 661 // Build up a worklist of moves. |
| 662 BuildInitialMoveList(parallel_move); |
| 663 |
| 664 for (int i = 0; i < moves_.length(); ++i) { |
| 665 MoveOperands move = moves_[i]; |
| 666 // Skip constants to perform them last. They don't block other moves |
| 667 // and skipping such moves with register destinations keeps those |
| 668 // registers free for the whole algorithm. |
| 669 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i); |
| 670 } |
| 671 |
| 672 // Perform the moves with constant sources. |
| 673 for (int i = 0; i < moves_.length(); ++i) { |
| 674 if (!moves_[i].IsEliminated()) { |
| 675 ASSERT(moves_[i].src().IsConstant()); |
| 676 EmitMove(i); |
| 677 } |
| 678 } |
| 679 |
| 680 moves_.Clear(); |
| 681 } |
| 682 |
| 683 |
| 684 void ParallelMoveResolver::BuildInitialMoveList( |
| 685 ParallelMoveInstr* parallel_move) { |
| 686 // Perform a linear sweep of the moves to add them to the initial list of |
| 687 // moves to perform, ignoring any move that is redundant (the source is |
| 688 // the same as the destination, the destination is ignored and |
| 689 // unallocated, or the move was already eliminated). |
| 690 const GrowableArray<MoveOperands>& moves = parallel_move->moves(); |
| 691 for (int i = 0; i < moves.length(); ++i) { |
| 692 MoveOperands move = moves[i]; |
| 693 if (!move.IsRedundant()) moves_.Add(move); |
| 694 } |
| 695 } |
| 696 |
| 697 |
| 698 void ParallelMoveResolver::PerformMove(int index) { |
| 699 // Each call to this function performs a move and deletes it from the move |
| 700 // graph. We first recursively perform any move blocking this one. We |
| 701 // mark a move as "pending" on entry to PerformMove in order to detect |
| 702 // cycles in the move graph. We use operand swaps to resolve cycles, |
| 703 // which means that a call to PerformMove could change any source operand |
| 704 // in the move graph. |
| 705 |
| 706 ASSERT(!moves_[index].IsPending()); |
| 707 ASSERT(!moves_[index].IsRedundant()); |
| 708 |
| 709 // Clear this move's destination to indicate a pending move. The actual |
| 710 // destination is saved in a stack-allocated local. Recursion may allow |
| 711 // multiple moves to be pending. |
| 712 ASSERT(!moves_[index].src().IsInvalid()); |
| 713 Location destination = moves_[index].MarkPending(); |
| 714 |
| 715 // Perform a depth-first traversal of the move graph to resolve |
| 716 // dependencies. Any unperformed, unpending move with a source the same |
| 717 // as this one's destination blocks this one so recursively perform all |
| 718 // such moves. |
| 719 for (int i = 0; i < moves_.length(); ++i) { |
| 720 MoveOperands other_move = moves_[i]; |
| 721 if (other_move.Blocks(destination) && !other_move.IsPending()) { |
| 722 // Though PerformMove can change any source operand in the move graph, |
| 723 // this call cannot create a blocking move via a swap (this loop does |
| 724 // not miss any). Assume there is a non-blocking move with source A |
| 725 // and this move is blocked on source B and there is a swap of A and |
| 726 // B. Then A and B must be involved in the same cycle (or they would |
| 727 // not be swapped). Since this move's destination is B and there is |
| 728 // only a single incoming edge to an operand, this move must also be |
| 729 // involved in the same cycle. In that case, the blocking move will |
| 730 // be created but will be "pending" when we return from PerformMove. |
| 731 PerformMove(i); |
| 732 } |
| 733 } |
| 734 |
| 735 // We are about to resolve this move and don't need it marked as |
| 736 // pending, so restore its destination. |
| 737 moves_[index].ClearPending(destination); |
| 738 |
| 739 // This move's source may have changed due to swaps to resolve cycles and |
| 740 // so it may now be the last move in the cycle. If so remove it. |
| 741 if (moves_[index].src().Equals(destination)) { |
| 742 moves_[index].Eliminate(); |
| 743 return; |
| 744 } |
| 745 |
| 746 // The move may be blocked on a (at most one) pending move, in which case |
| 747 // we have a cycle. Search for such a blocking move and perform a swap to |
| 748 // resolve it. |
| 749 for (int i = 0; i < moves_.length(); ++i) { |
| 750 MoveOperands other_move = moves_[i]; |
| 751 if (other_move.Blocks(destination)) { |
| 752 ASSERT(other_move.IsPending()); |
| 753 EmitSwap(index); |
| 754 return; |
| 755 } |
| 756 } |
| 757 |
| 758 // This move is not blocked. |
| 759 EmitMove(index); |
| 760 } |
| 761 |
| 762 |
632 } // namespace dart | 763 } // namespace dart |
OLD | NEW |