Index: runtime/vm/flow_graph_allocator.cc |
diff --git a/runtime/vm/flow_graph_allocator.cc b/runtime/vm/flow_graph_allocator.cc |
index 9230dac1452c6c69b3a3881f2052462221db101c..05c307aa3ce5370cdf0e7f2531897935bf284835 100644 |
--- a/runtime/vm/flow_graph_allocator.cc |
+++ b/runtime/vm/flow_graph_allocator.cc |
@@ -28,12 +28,15 @@ DEFINE_FLAG(bool, trace_ssa_allocator, false, |
static const intptr_t kNoVirtualRegister = -1; |
static const intptr_t kTempVirtualRegister = -2; |
-static UseInterval* const kPermanentlyBlocked = |
- reinterpret_cast<UseInterval*>(-1); |
static const intptr_t kIllegalPosition = -1; |
static const intptr_t kMaxPosition = 0x7FFFFFFF; |
+static intptr_t MinPosition(intptr_t a, intptr_t b) { |
+ return (a < b) ? a : b; |
+} |
+ |
+ |
FlowGraphAllocator::FlowGraphAllocator( |
const GrowableArray<BlockEntryInstr*>& block_order, |
FlowGraphBuilder* builder) |
@@ -48,15 +51,15 @@ FlowGraphAllocator::FlowGraphAllocator( |
for (intptr_t i = 0; i < vreg_count_; i++) live_ranges_.Add(NULL); |
for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
- cpu_regs_[reg] = NULL; |
+ blocked_cpu_regs_[reg] = false; |
} |
- cpu_regs_[CTX] = kPermanentlyBlocked; |
+ blocked_cpu_regs_[CTX] = true; |
if (TMP != kNoRegister) { |
- cpu_regs_[TMP] = kPermanentlyBlocked; |
+ blocked_cpu_regs_[TMP] = true; |
} |
- cpu_regs_[SPREG] = kPermanentlyBlocked; |
- cpu_regs_[FPREG] = kPermanentlyBlocked; |
+ blocked_cpu_regs_[SPREG] = true; |
+ blocked_cpu_regs_[FPREG] = true; |
} |
@@ -213,76 +216,54 @@ void FlowGraphAllocator::DumpLiveness() { |
} |
-void UseInterval::Print() { |
- OS::Print(" [%d, %d) uses {", start_, end_); |
- for (UsePosition* use_pos = uses_; |
- use_pos != NULL && use_pos->pos() <= end(); |
- use_pos = use_pos->next()) { |
- if (use_pos != uses_) OS::Print(", "); |
- OS::Print("%d", use_pos->pos()); |
- } |
- OS::Print("}\n"); |
-} |
- |
- |
-void UseInterval::AddUse(Instruction* instr, |
- intptr_t pos, |
- Location* location_slot) { |
- ASSERT((start_ <= pos) && (pos <= end_)); |
- ASSERT((instr == NULL) || (instr->lifetime_position() == pos)); |
+void LiveRange::AddUse(intptr_t pos, |
+ Location* location_slot) { |
+ ASSERT((first_use_interval_->start_ <= pos) && |
+ (pos <= first_use_interval_->end_)); |
if ((uses_ != NULL) && (uses_->pos() == pos)) { |
if ((location_slot == NULL) || (uses_->location_slot() == location_slot)) { |
return; |
- } else if ((uses_->location_slot() == NULL) && (instr == NULL)) { |
+ } else if (uses_->location_slot() == NULL) { |
uses_->set_location_slot(location_slot); |
return; |
} |
} |
- uses_ = new UsePosition(instr, pos, uses_, location_slot); |
-} |
- |
- |
-void LiveRange::Print() { |
- OS::Print("vreg %d live intervals:\n", vreg_); |
- for (UseInterval* interval = head_; |
- interval != NULL; |
- interval = interval->next_) { |
- interval->Print(); |
- } |
+ uses_ = new UsePosition(pos, uses_, location_slot); |
} |
void LiveRange::AddUseInterval(intptr_t start, intptr_t end) { |
- if ((head_ != NULL) && (head_->start_ == end)) { |
- head_->start_ = start; |
+ if ((first_use_interval_ != NULL) && (first_use_interval_->start_ == end)) { |
+ first_use_interval_->start_ = start; |
return; |
} |
- head_ = new UseInterval(vreg_, start, end, head_); |
+ first_use_interval_ = new UseInterval(start, end, first_use_interval_); |
+ if (last_use_interval_ == NULL) last_use_interval_ = first_use_interval_; |
} |
-void LiveRange::DefineAt(Instruction* instr, intptr_t pos, Location* loc) { |
- if (head_ != NULL) { |
- ASSERT(head_->start_ <= pos); |
- head_->start_ = pos; |
+void LiveRange::DefineAt(intptr_t pos, Location* loc) { |
+ if (first_use_interval_ != NULL) { |
+ ASSERT(first_use_interval_->start_ <= pos); |
+ first_use_interval_->start_ = pos; |
} else { |
// Definition without a use. |
- head_ = new UseInterval(vreg_, pos, pos + 1, NULL); |
+ first_use_interval_ = new UseInterval(pos, pos + 1, NULL); |
} |
- head_->AddUse(instr, pos, loc); |
+ |
+ AddUse(pos, loc); |
} |
// TODO(vegorov): encode use_at_start vs. use_at_end in the location itself? |
-void LiveRange::UseAt(Instruction* instr, |
- intptr_t def, intptr_t use, |
+void LiveRange::UseAt(intptr_t def, intptr_t use, |
bool use_at_end, |
Location* loc) { |
- if (head_ == NULL || head_->start_ != def) { |
+ if (first_use_interval_ == NULL || first_use_interval_->start_ != def) { |
srdjan
2012/07/19 22:54:39
Add parenthesis
|
AddUseInterval(def, use + (use_at_end ? 1 : 0)); |
} |
- head_->AddUse(instr, use, loc); |
+ AddUse(use, loc); |
} |
@@ -297,75 +278,86 @@ LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) { |
void FlowGraphAllocator::BlockLocation(Location loc, intptr_t pos) { |
ASSERT(loc.IsRegister()); |
const Register reg = loc.reg(); |
- UseInterval* last = cpu_regs_[reg]; |
- if (last == kPermanentlyBlocked) return; |
- if ((last != NULL) && (last->start() == pos)) return; |
- cpu_regs_[reg] = new UseInterval(kNoVirtualRegister, pos, pos + 1, last); |
+ if (blocked_cpu_regs_[reg]) return; |
+ if (cpu_regs_[reg].length() == 0) { |
+ cpu_regs_[reg].Add(new LiveRange(kNoVirtualRegister)); |
+ } |
+ cpu_regs_[reg][0]->AddUseInterval(pos, pos + 1); |
} |
-void FlowGraphAllocator::Define(Instruction* instr, |
- intptr_t pos, |
+void FlowGraphAllocator::Define(intptr_t pos, |
intptr_t vreg, |
Location* loc) { |
LiveRange* range = GetLiveRange(vreg); |
ASSERT(loc != NULL); |
if (loc->IsRegister()) { |
BlockLocation(*loc, pos); |
- range->DefineAt(instr, pos + 1, loc); |
+ range->DefineAt(pos + 1, loc); |
} else if (loc->IsUnallocated()) { |
- range->DefineAt(instr, pos, loc); |
+ range->DefineAt(pos, loc); |
} else { |
UNREACHABLE(); |
} |
- |
- AddToUnallocated(range->head()); |
+ AddToUnallocated(range); |
} |
-void FlowGraphAllocator::UseValue(Instruction* instr, |
- intptr_t def_pos, |
+void FlowGraphAllocator::UseValue(intptr_t def_pos, |
intptr_t use_pos, |
intptr_t vreg, |
Location* loc, |
bool use_at_end) { |
LiveRange* range = GetLiveRange(vreg); |
if (loc == NULL) { |
- range->UseAt(NULL, def_pos, use_pos, true, loc); |
+ range->UseAt(def_pos, use_pos, true, loc); |
} else if (loc->IsRegister()) { |
// We have a fixed use. |
BlockLocation(*loc, use_pos); |
- range->UseAt(instr, def_pos, use_pos, false, loc); |
+ range->UseAt(def_pos, use_pos, false, loc); |
} else if (loc->IsUnallocated()) { |
- ASSERT(loc->policy() == Location::kRequiresRegister); |
- range->UseAt(use_at_end ? NULL : instr, def_pos, use_pos, use_at_end, loc); |
+ range->UseAt(def_pos, use_pos, use_at_end, loc); |
} |
} |
-static void PrintChain(UseInterval* chain) { |
- if (chain == kPermanentlyBlocked) { |
- OS::Print(" not for allocation\n"); |
- return; |
+void LiveRange::Print() { |
+ OS::Print(" live range v%d [%d, %d)\n", vreg(), Start(), End()); |
+ UsePosition* use_pos = uses_; |
+ for (UseInterval* interval = first_use_interval_; |
+ interval != NULL; |
+ interval = interval->next()) { |
+ OS::Print(" use interval [%d, %d)\n", |
+ interval->start(), |
+ interval->end()); |
+ while ((use_pos != NULL) && (use_pos->pos() <= interval->end())) { |
+ OS::Print(" use at %d as %s\n", |
+ use_pos->pos(), |
+ (use_pos->location_slot() == NULL) |
+ ? "-" : use_pos->location_slot()->Name()); |
+ use_pos = use_pos->next(); |
+ } |
} |
- while (chain != NULL) { |
- chain->Print(); |
- chain = chain->next(); |
+ if (next_sibling() != NULL) { |
+ next_sibling()->Print(); |
} |
} |
void FlowGraphAllocator::PrintLiveRanges() { |
for (intptr_t i = 0; i < unallocated_.length(); i++) { |
- OS::Print("unallocated chain for vr%d\n", unallocated_[i]->vreg()); |
- PrintChain(unallocated_[i]); |
+ unallocated_[i]->Print(); |
} |
for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
- OS::Print("blocking chain for %s\n", |
+ if (blocked_cpu_regs_[reg]) continue; |
+ if (cpu_regs_[reg].length() == 0) continue; |
+ |
+ ASSERT(cpu_regs_[reg].length() == 1); |
+ OS::Print("blocking live range for %s\n", |
Location::RegisterLocation(static_cast<Register>(reg)).Name()); |
- PrintChain(cpu_regs_[reg]); |
+ cpu_regs_[reg][0]->Print(); |
} |
} |
@@ -419,15 +411,15 @@ void FlowGraphAllocator::BuildLiveRanges() { |
Value* val = phi->InputAt(pred_idx); |
- MoveOperands move = parallel_move->moves()[move_idx]; |
+ MoveOperands* move = &(parallel_move->moves()[move_idx]); |
srdjan
2012/07/19 22:54:39
Why make it a pointer here?
|
if (val->IsUse()) { |
const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); |
- Location* slot = move.src_slot(); |
- *slot = Location::RequiresRegister(); |
- GetLiveRange(use)->head()->AddUse(NULL, pos, slot); |
+ Location* slot = move->src_slot(); |
+ *slot = Location::PrefersRegister(); |
+ UseValue(block->start_pos(), pos, use, slot, false); |
} else { |
ASSERT(val->IsConstant()); |
- move.set_src(Location::Constant(val->AsConstant()->value())); |
+ move->set_src(Location::Constant(val->AsConstant()->value())); |
} |
move_idx++; |
@@ -457,13 +449,11 @@ void FlowGraphAllocator::BuildLiveRanges() { |
locs->in_slot(j) : NULL; |
const bool use_at_end = (j > 0) || (in_ref == NULL) || |
!output_same_as_first_input; |
- UseValue(current, block->start_pos(), pos, use, in_ref, use_at_end); |
+ UseValue(block->start_pos(), pos, use, in_ref, use_at_end); |
} |
} |
// Add uses from the deoptimization environment. |
- // TODO(vegorov): these uses should _not_ require register but for now |
- // they do because we don't support spilling at all. |
if (current->env() != NULL) { |
const GrowableArray<Value*>& values = current->env()->values(); |
GrowableArray<Location>* locations = current->env()->locations(); |
@@ -471,10 +461,9 @@ void FlowGraphAllocator::BuildLiveRanges() { |
for (intptr_t j = 0; j < values.length(); j++) { |
Value* val = values[j]; |
if (val->IsUse()) { |
- locations->Add(Location::RequiresRegister()); |
+ locations->Add(Location::Any()); |
const intptr_t use = val->AsUse()->definition()->ssa_temp_index(); |
- UseValue(current, |
- block->start_pos(), |
+ UseValue(block->start_pos(), |
pos, |
use, |
&(*locations)[j], |
@@ -491,10 +480,7 @@ void FlowGraphAllocator::BuildLiveRanges() { |
if (temp.IsRegister()) { |
BlockLocation(temp, pos); |
} else if (temp.IsUnallocated()) { |
- UseInterval* temp_interval = new UseInterval( |
- kTempVirtualRegister, pos, pos + 1, NULL); |
- temp_interval->AddUse(NULL, pos, locs->temp_slot(j)); |
- AddToUnallocated(temp_interval); |
+ AddToUnallocated(LiveRange::MakeTemp(pos, locs->temp_slot(j))); |
} else { |
UNREACHABLE(); |
} |
@@ -506,6 +492,9 @@ void FlowGraphAllocator::BuildLiveRanges() { |
BlockLocation(Location::RegisterLocation(static_cast<Register>(reg)), |
pos); |
} |
+ for (intptr_t j = 0; j < locs->temp_count(); j++) { |
+ ASSERT(!locs->temp(j).IsUnallocated()); |
+ } |
} |
if (locs->out().IsRegister()) { |
@@ -514,8 +503,7 @@ void FlowGraphAllocator::BuildLiveRanges() { |
Definition* def = current->AsDefinition(); |
if ((def != NULL) && (def->ssa_temp_index() >= 0)) { |
- Define(output_same_as_first_input ? current : NULL, |
- pos, |
+ Define(pos, |
srdjan
2012/07/19 22:54:39
Seems to be able to fit in one line?
|
def->ssa_temp_index(), |
locs->out_slot()); |
} |
@@ -539,8 +527,7 @@ void FlowGraphAllocator::BuildLiveRanges() { |
ASSERT(def != -1); |
LiveRange* range = GetLiveRange(def); |
- range->DefineAt(NULL, pos, NULL); |
- UseInterval* interval = GetLiveRange(def)->head(); |
+ range->DefineAt(pos, NULL); |
for (intptr_t k = 0; k < phi->InputCount(); k++) { |
BlockEntryInstr* pred = block->PredecessorAt(k); |
@@ -548,12 +535,12 @@ void FlowGraphAllocator::BuildLiveRanges() { |
Location* slot = pred->last_instruction()->AsParallelMove()-> |
moves()[move_idx].dest_slot(); |
- *slot = Location::RequiresRegister(); |
- interval->AddUse(NULL, pos, slot); |
+ *slot = Location::PrefersRegister(); |
+ range->AddUse(pos, slot); |
} |
// All phi resolution moves are connected. Phi's live range is complete. |
- AddToUnallocated(interval); |
+ AddToUnallocated(range); |
move_idx++; |
} |
@@ -562,6 +549,30 @@ void FlowGraphAllocator::BuildLiveRanges() { |
} |
+static ParallelMoveInstr* GetLastParallelMove(BlockEntryInstr* block) { |
+ // TODO(vegorov): fix this for explicit Goto. |
+ Instruction* last = block->last_instruction(); |
+ if (!last->IsParallelMove()) { |
+ ParallelMoveInstr* move = new ParallelMoveInstr(); |
+ move->set_next(last->next()); |
+ move->set_previous(last); |
+ last->set_next(move); |
+ block->set_last_instruction(move); |
+ return move; |
+ } |
+ return last->AsParallelMove(); |
+} |
+ |
+ |
+Instruction* FlowGraphAllocator::InstructionAt(intptr_t pos) { |
+ return instructions_[pos >> 1]; |
+} |
+ |
+ |
+bool FlowGraphAllocator::IsBlockEntry(intptr_t pos) { |
+ return ((pos & 1) == 0) && (InstructionAt(pos)->IsBlockEntry()); |
+} |
+ |
void FlowGraphAllocator::NumberInstructions() { |
intptr_t pos = 0; |
@@ -569,6 +580,7 @@ void FlowGraphAllocator::NumberInstructions() { |
for (intptr_t i = block_count - 1; i >= 0; i--) { |
BlockEntryInstr* block = postorder_[i]; |
+ instructions_.Add(block); |
block->set_start_pos(pos); |
pos += 2; |
Instruction* current = block->next(); |
@@ -577,6 +589,7 @@ void FlowGraphAllocator::NumberInstructions() { |
if (!last->IsParallelMove()) last = last->next(); |
while (current != last) { |
+ instructions_.Add(current); |
current->set_lifetime_position(pos); |
current = current->next(); |
pos += 2; |
@@ -591,11 +604,7 @@ void FlowGraphAllocator::NumberInstructions() { |
BlockEntryInstr* pred = block->PredecessorAt(i); |
ASSERT(!pred->last_instruction()->IsParallelMove()); |
- ParallelMoveInstr* move = new ParallelMoveInstr(); |
- move->set_next(block); |
- move->set_previous(pred->last_instruction()); |
- pred->last_instruction()->set_next(move); |
- pred->set_last_instruction(move); |
+ ParallelMoveInstr* move = GetLastParallelMove(pred); |
// Populate ParallelMove with empty moves. |
for (intptr_t j = 0; j < phi_count; j++) { |
@@ -607,6 +616,58 @@ void FlowGraphAllocator::NumberInstructions() { |
} |
+static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) { |
+ while ((use != NULL) && (use->pos() < after)) { |
+ use = use->next(); |
+ } |
+ return use; |
+} |
+ |
+ |
+Location AllocationFinger::FirstHint() { |
+ UsePosition* use = first_hinted_use_; |
+ |
+ while (use != NULL) { |
+ if (use->HasHint()) return use->hint(); |
+ use = use->next(); |
+ } |
+ |
+ return Location::NoLocation(); |
+} |
+ |
+ |
+UsePosition* AllocationFinger::FirstRegisterUse(intptr_t after) { |
+ for (UsePosition* use = FirstUseAfter(first_register_use_, after); |
+ use != NULL; |
+ use = use->next()) { |
+ Location* loc = use->location_slot(); |
+ if ((loc != NULL) && |
+ loc->IsUnallocated() && |
+ (loc->policy() == Location::kRequiresRegister)) { |
+ first_register_use_ = use; |
+ return use; |
+ } |
+ } |
+ return NULL; |
+} |
+ |
+ |
+UsePosition* AllocationFinger::FirstRegisterBeneficialUse(intptr_t after) { |
+ for (UsePosition* use = FirstUseAfter(first_register_beneficial_use_, after); |
+ use != NULL; |
+ use = use->next()) { |
+ Location* loc = use->location_slot(); |
+ if ((loc != NULL) && |
+ (loc->IsRegister() || |
+ (loc->IsUnallocated() && loc->IsRegisterBeneficial()))) { |
+ first_register_beneficial_use_ = use; |
+ return use; |
+ } |
+ } |
+ return NULL; |
+} |
+ |
+ |
intptr_t UseInterval::Intersect(UseInterval* other) { |
if (this->start() <= other->start()) { |
if (other->start() < this->end()) return other->start(); |
@@ -623,7 +684,7 @@ static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) { |
if (pos != kIllegalPosition) return pos; |
if (a->start() < u->start()) { |
- a = a->next_allocated(); |
+ a = a->next(); |
} else { |
u = u->next(); |
} |
@@ -633,30 +694,170 @@ static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) { |
} |
-static Location LookAheadForHint(UseInterval* interval) { |
- UsePosition* use = interval->first_use(); |
+LiveRange* LiveRange::MakeTemp(intptr_t pos, Location* location_slot) { |
+ LiveRange* range = new LiveRange(kTempVirtualRegister); |
+ range->AddUseInterval(pos, pos + 1); |
+ range->AddUse(pos, location_slot); |
+ return range; |
+} |
- while (use != NULL) { |
- if (use->HasHint()) return use->hint(); |
- use = use->next(); |
+ |
+LiveRange* LiveRange::SplitAt(intptr_t split_pos) { |
+ if (split_pos == Start()) return this; |
+ |
+ UseInterval* interval = finger_.first_pending_use_interval(); |
+ |
+ ASSERT(interval->start() <= split_pos); |
+ |
+ // Corner case. We need to start over to find previous interval. |
+ if (interval->start() == split_pos) interval = first_use_interval_; |
+ |
+ UseInterval* last_before_split = NULL; |
+ while (split_pos < interval->start()) { |
+ last_before_split = interval; |
+ interval = interval->next(); |
} |
- return Location::NoLocation(); |
+ const bool split_at_start = (interval->start() == split_pos); |
+ |
+ UseInterval* first_after_split = interval; |
+ if (!split_at_start && interval->Contains(split_pos)) { |
+ first_after_split = new UseInterval(split_pos, |
+ interval->end(), |
+ interval->next()); |
+ interval->end_ = split_pos; |
+ interval->next_ = first_after_split; |
+ last_before_split = interval; |
+ } |
+ |
+ ASSERT(last_before_split->next() == first_after_split); |
+ ASSERT(last_before_split->end() <= split_pos); |
+ ASSERT(split_pos <= first_after_split->start()); |
+ |
+ UsePosition* last_use_before_split = NULL; |
+ UsePosition* use = uses_; |
+ if (split_at_start) { |
+ while ((use != NULL) && (use->pos() < split_pos)) { |
+ last_use_before_split = use; |
+ use = use->next(); |
+ } |
+ } else { |
+ while ((use != NULL) && (use->pos() <= split_pos)) { |
+ last_use_before_split = use; |
+ use = use->next(); |
+ } |
+ } |
+ UsePosition* first_use_after_split = use; |
+ |
+ if (last_use_before_split == NULL) { |
+ uses_ = NULL; |
+ } else { |
+ last_use_before_split->set_next(NULL); |
+ } |
+ |
+ next_sibling_ = new LiveRange(vreg(), |
+ first_use_after_split, |
+ first_after_split, |
+ last_use_interval_, |
+ next_sibling_); |
+ last_use_interval_ = last_before_split; |
+ last_use_interval_->next_ = NULL; |
+ return next_sibling_; |
+} |
+ |
+ |
+LiveRange* FlowGraphAllocator::SplitBetween(LiveRange* range, |
+ intptr_t from, |
+ intptr_t to) { |
+ // TODO(vegorov): select optimal split position based on loop structure. |
+ if (to == range->End()) to--; |
+ TRACE_ALLOC(("split %d [%d, %d) between [%d, %d)\n", |
+ range->vreg(), range->Start(), range->End(), from, to)); |
+ return range->SplitAt(to); |
+} |
+ |
+ |
+void FlowGraphAllocator::SpillBetween(LiveRange* range, |
+ intptr_t from, |
+ intptr_t to) { |
+ ASSERT(from < to); |
+ TRACE_ALLOC(("spill %d [%d, %d) between [%d, %d)\n", |
+ range->vreg(), range->Start(), range->End(), from, to)); |
+ LiveRange* tail = range->SplitAt(from); |
+ |
+ if (tail->Start() < to) { |
+ // There is an intersection of tail and [from, to). |
+ LiveRange* tail_tail = SplitBetween(tail, tail->Start(), to); |
+ Spill(tail); |
+ AddToUnallocated(tail_tail); |
+ } else { |
+ // No intersection between tail and [from, to). |
+ AddToUnallocated(tail); |
+ } |
+} |
+ |
+ |
+void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) { |
+ TRACE_ALLOC(("spill %d [%d, %d) after %d\n", |
+ range->vreg(), range->Start(), range->End(), from)); |
+ LiveRange* tail = range->SplitAt(from); |
+ Spill(tail); |
+} |
+ |
+ |
+intptr_t FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) { |
+ for (intptr_t i = 0; i < spill_slots_.length(); i++) { |
+ if (spill_slots_[i] <= range->Start()) { |
+ return i; |
+ } |
+ } |
+ spill_slots_.Add(0); |
+ return spill_slots_.length() - 1; |
+} |
+ |
+ |
+void FlowGraphAllocator::Spill(LiveRange* range) { |
+ const intptr_t spill_index = AllocateSpillSlotFor(range); |
+ ASSERT(spill_slots_[spill_index] < range->Start()); |
+ spill_slots_[spill_index] = range->End(); |
+ range->set_assigned_location(Location::SpillSlot(spill_index)); |
+ ConvertAllUses(range); |
+} |
+ |
+ |
+intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated( |
+ Register reg, LiveRange* unallocated) { |
+ intptr_t intersection = kMaxPosition; |
+ for (intptr_t i = 0; i < cpu_regs_[reg].length(); i++) { |
+ LiveRange* allocated = cpu_regs_[reg][i]; |
+ if (allocated == NULL) continue; |
+ |
+ UseInterval* allocated_head = |
+ allocated->finger()->first_pending_use_interval(); |
+ if (allocated_head->start() >= intersection) continue; |
+ |
+ const intptr_t pos = FirstIntersection( |
+ unallocated->finger()->first_pending_use_interval(), |
+ allocated_head); |
+ if (pos < intersection) intersection = pos; |
+ } |
+ return intersection; |
} |
-bool FlowGraphAllocator::AllocateFreeRegister(UseInterval* unallocated) { |
+ |
+bool FlowGraphAllocator::AllocateFreeRegister(LiveRange* unallocated) { |
Register candidate = kNoRegister; |
intptr_t free_until = 0; |
// If hint is available try hint first. |
- // TODO(vegorov): ensure that phis are hinted on the backedge. |
- Location hint = LookAheadForHint(unallocated); |
+ // TODO(vegorov): ensure that phis are hinted on the back edge. |
+ Location hint = unallocated->finger()->FirstHint(); |
if (!hint.IsInvalid()) { |
ASSERT(hint.IsRegister()); |
- if (cpu_regs_[hint.reg()] != kPermanentlyBlocked) { |
- free_until = FirstIntersection(cpu_regs_[hint.reg()], unallocated); |
+ if (!blocked_cpu_regs_[hint.reg()]) { |
+ free_until = FirstIntersectionWithAllocated(hint.reg(), unallocated); |
candidate = hint.reg(); |
} |
@@ -665,8 +866,8 @@ bool FlowGraphAllocator::AllocateFreeRegister(UseInterval* unallocated) { |
} |
if (free_until != kMaxPosition) { |
- for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
- if (cpu_regs_[reg] == NULL) { |
+ for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
+ if (!blocked_cpu_regs_[reg] && cpu_regs_[reg].length() == 0) { |
candidate = static_cast<Register>(reg); |
free_until = kMaxPosition; |
break; |
@@ -676,103 +877,197 @@ bool FlowGraphAllocator::AllocateFreeRegister(UseInterval* unallocated) { |
ASSERT(0 <= kMaxPosition); |
if (free_until != kMaxPosition) { |
- for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
- if (cpu_regs_[reg] == kPermanentlyBlocked) continue; |
- if (reg == candidate) continue; |
+ for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
+ if (blocked_cpu_regs_[reg] || (reg == candidate)) continue; |
- const intptr_t pos = FirstIntersection(cpu_regs_[reg], unallocated); |
+ const intptr_t intersection = |
+ FirstIntersectionWithAllocated(static_cast<Register>(reg), unallocated); |
- if (pos > free_until) { |
+ if (intersection > free_until) { |
candidate = static_cast<Register>(reg); |
- free_until = pos; |
+ free_until = intersection; |
if (free_until == kMaxPosition) break; |
} |
} |
} |
// All registers are blocked by active ranges. |
- if (free_until <= unallocated->start()) return false; |
+ if (free_until <= unallocated->Start()) return false; |
+ |
+ TRACE_ALLOC(("assigning free register %s to %d\n", |
+ Location::RegisterLocation(candidate).Name(), |
+ unallocated->vreg())); |
+ |
+ if (free_until != kMaxPosition) { |
+ // There was an intersection. Split unallocated. |
+ TRACE_ALLOC((" splitting at %d\n", free_until)); |
+ LiveRange* tail = unallocated->SplitAt(free_until); |
+ AddToUnallocated(tail); |
+ } |
+ |
+ cpu_regs_[candidate].Add(unallocated); |
+ unallocated->set_assigned_location(Location::RegisterLocation(candidate)); |
- AssignFreeRegister(unallocated, candidate); |
return true; |
} |
-UseInterval* UseInterval::Split(intptr_t pos) { |
- if (pos == start()) return this; |
- ASSERT(Contains(pos)); |
- UseInterval* tail = new UseInterval(vreg(), pos, end(), next()); |
+void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) { |
+ UsePosition* register_use = |
+ unallocated->finger()->FirstRegisterUse(unallocated->Start()); |
+ if (register_use == NULL) { |
+ Spill(unallocated); |
+ return; |
+ } |
- UsePosition* use = uses_; |
- while (use != NULL && use->pos() <= pos) { |
- use = use->next(); |
+ Register candidate = kNoRegister; |
+ intptr_t free_until = 0; |
+ intptr_t blocked_at = kMaxPosition; |
+ |
+ for (int reg = 0; reg < kNumberOfCpuRegisters; ++reg) { |
+ if (blocked_cpu_regs_[reg]) continue; |
+ if (UpdateFreeUntil(static_cast<Register>(reg), |
+ unallocated, |
+ &free_until, |
+ &blocked_at)) { |
+ candidate = static_cast<Register>(reg); |
+ } |
} |
- tail->uses_ = use; |
+ if (free_until < register_use->pos()) { |
+ // Can't acquire free register. Spill until we really need one. |
+ ASSERT(unallocated->Start() < register_use->pos()); |
+ SpillBetween(unallocated, unallocated->Start(), register_use->pos()); |
+ return; |
+ } |
- end_ = pos; |
+ if (blocked_at < unallocated->End()) { |
+ LiveRange* tail = SplitBetween(unallocated, |
+ unallocated->Start(), |
+ blocked_at); |
+ AddToUnallocated(tail); |
+ } |
- return tail; |
+ AssignBlockedRegister(unallocated, candidate); |
} |
-void FlowGraphAllocator::AssignFreeRegister(UseInterval* unallocated, |
- Register reg) { |
- TRACE_ALLOC(("assigning free register %s to %d\n", |
- Location::RegisterLocation(reg).Name(), |
- unallocated->vreg())); |
+bool FlowGraphAllocator::UpdateFreeUntil(Register reg, |
+ LiveRange* unallocated, |
+ intptr_t* cur_free_until, |
+ intptr_t* cur_blocked_at) { |
+ intptr_t free_until = kMaxPosition; |
+ intptr_t blocked_at = kMaxPosition; |
+ const intptr_t start = unallocated->Start(); |
+ |
+ for (intptr_t i = 0; i < cpu_regs_[reg].length(); i++) { |
+ LiveRange* allocated = cpu_regs_[reg][i]; |
+ |
+ UseInterval* first_pending_use_interval = |
+ allocated->finger()->first_pending_use_interval(); |
+ if (first_pending_use_interval->Contains(start)) { |
+ // This is an active interval. |
+ if (allocated->vreg() <= 0) { |
+ // This register blocked by an interval that |
+ // can't be spilled. |
+ return false; |
+ } |
- UseInterval* a = cpu_regs_[reg]; |
- if (a == NULL) { |
- // Register is completely free. |
- cpu_regs_[reg] = unallocated; |
- return; |
+ const UsePosition* use = |
+ allocated->finger()->FirstRegisterBeneficialUse(unallocated->Start()); |
+ |
+ if ((use != NULL) && ((use->pos() - start) <= 1)) { |
+ // This register is blocked by interval that is used |
+ // as register in the current instruction and can't |
+ // be spilled. |
+ return false; |
+ } |
+ |
+ const intptr_t use_pos = (use != NULL) ? use->pos() |
+ : allocated->End(); |
+ |
+ if (use_pos < free_until) free_until = use_pos; |
+ } else { |
+ // This is inactive interval. |
+ const intptr_t intersection = FirstIntersection( |
+ first_pending_use_interval, unallocated->first_use_interval()); |
+ if (intersection != kMaxPosition) { |
+ if (intersection < free_until) free_until = intersection; |
+ if (allocated->vreg() == kNoVirtualRegister) blocked_at = intersection; |
+ } |
+ } |
+ |
+ if (free_until <= *cur_free_until) { |
+ return false; |
+ } |
} |
- UseInterval* u = unallocated; |
- ASSERT(u->start() < a->start()); // Register is free. |
- cpu_regs_[reg] = u; |
- if (u->next() == NULL || u->next()->start() >= a->start()) { |
- u->set_next_allocated(a); |
+ ASSERT(free_until > *cur_free_until); |
+ *cur_free_until = free_until; |
+ *cur_blocked_at = blocked_at; |
+ return true; |
+} |
+ |
+ |
+void FlowGraphAllocator::RemoveEvicted(Register reg, intptr_t first_evicted) { |
+ intptr_t to = first_evicted; |
+ intptr_t from = first_evicted + 1; |
+ while (from < cpu_regs_[reg].length()) { |
+ LiveRange* allocated = cpu_regs_[reg][from++]; |
+ if (allocated != NULL) cpu_regs_[reg][to++] = allocated; |
} |
+ cpu_regs_[reg].TruncateTo(to); |
+} |
- while (a != NULL && u != NULL) { |
- const intptr_t pos = a->Intersect(u); |
- if (pos != kIllegalPosition) { |
- // TODO(vegorov): split live ranges might require control flow resolution |
- // which is not implemented yet. |
- builder_->Bailout("ssa allocator: control flow resolution required"); |
- |
- TRACE_ALLOC((" splitting at %d\n", pos)); |
- // Reached intersection |
- UseInterval* tail = u->Split(pos); |
- AddToUnallocated(tail); |
- ASSERT(tail == u || u->next_allocated() == a); |
- return; |
+ |
+void FlowGraphAllocator::AssignBlockedRegister(LiveRange* unallocated, |
+ Register reg) { |
+ TRACE_ALLOC(("assigning blocked register %s to live range %d\n", |
+ Location::RegisterLocation(reg).Name(), |
+ unallocated->vreg())); |
+ |
+ intptr_t first_evicted = -1; |
+ for (intptr_t i = cpu_regs_[reg].length() - 1; i >= 0; i--) { |
+ LiveRange* allocated = cpu_regs_[reg][i]; |
+ if (allocated->vreg() < 0) continue; // Can't be evicted. |
+ if (EvictIntersection(allocated, |
+ unallocated)) { |
+ cpu_regs_[reg][i] = NULL; |
+ first_evicted = i; |
} |
+ } |
- if (a->start() < u->start()) { |
- if (a->next_allocated() == NULL) { |
- a->set_next_allocated(u); |
- break; |
- } |
+ // Remove evicted ranges from the array. |
+ if (first_evicted != -1) RemoveEvicted(reg, first_evicted); |
- UseInterval* next = a->next_allocated(); |
- if (next->start() > u->start()) { |
- a->set_next_allocated(u); |
- u->set_next_allocated(next); |
- } |
+ cpu_regs_[reg].Add(unallocated); |
+ unallocated->set_assigned_location(Location::RegisterLocation(reg)); |
+} |
- a = next; |
- } else { |
- UseInterval* next = u->next(); |
- if (next == NULL || next->start() >= a->start()) { |
- u->set_next_allocated(a); |
- } |
- u = next; |
- } |
+bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated, |
+ LiveRange* unallocated) { |
+ UseInterval* first_unallocated = |
+ unallocated->finger()->first_pending_use_interval(); |
+ const intptr_t intersection = FirstIntersection( |
+ allocated->finger()->first_pending_use_interval(), |
+ first_unallocated); |
srdjan
2012/07/19 22:54:39
Indent 4 spaces above.
Vyacheslav Egorov (Google)
2012/07/24 12:26:41
Done.
|
+ if (intersection == kMaxPosition) return false; |
+ |
+ const intptr_t spill_position = first_unallocated->start(); |
+ UsePosition* use = allocated->finger()->FirstRegisterUse(spill_position); |
+ if (use == NULL) { |
+ // No register uses after this point. |
+ SpillAfter(allocated, spill_position); |
+ } else { |
+ const intptr_t restore_position = |
+ (spill_position < intersection) ? MinPosition(intersection, use->pos()) |
+ : use->pos(); |
+ |
+ SpillBetween(allocated, spill_position, restore_position); |
} |
+ |
+ return true; |
} |
@@ -790,82 +1085,111 @@ static void InsertMoveBefore(Instruction* instr, Location to, Location from) { |
} |
-void UsePosition::AssignLocation(Location loc) { |
- if (location_slot_ == NULL) return; |
+void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) { |
+ if (use->location_slot() == NULL) return; |
- if (location_slot_->IsUnallocated()) { |
- if (location_slot_->policy() == Location::kSameAsFirstInput) { |
- Instruction* instr = this->instr(); |
+ Location* slot = use->location_slot(); |
+ if (slot->IsUnallocated()) { |
+ if (slot->policy() == Location::kSameAsFirstInput) { |
+ Instruction* instr = InstructionAt(use->pos()); |
LocationSummary* locs = instr->locs(); |
if (!locs->in(0).IsUnallocated()) { |
InsertMoveBefore(instr, loc, locs->in(0)); |
} |
locs->set_in(0, loc); |
} |
- TRACE_ALLOC((" use at %d converted to %s\n", pos(), loc.Name())); |
- *location_slot_ = loc; |
- } else if (location_slot_->IsRegister()) { |
- InsertMoveBefore(this->instr(), *location_slot_, loc); |
+ TRACE_ALLOC((" use at %d converted to %s\n", use->pos(), loc.Name())); |
+ *slot = loc; |
+ } else if (slot->IsRegister()) { |
+ InsertMoveBefore(InstructionAt(use->pos()), *slot, loc); |
+ } else { |
+ UNREACHABLE(); |
} |
} |
-void FlowGraphAllocator::FinalizeInterval(UseInterval* interval, Location loc) { |
- if (interval->vreg() == kNoVirtualRegister) return; |
+void FlowGraphAllocator::ConvertAllUses(LiveRange* range) { |
+ if (range->vreg() == kNoVirtualRegister) return; |
+ TRACE_ALLOC(("range [%d, %d) for v%d has been allocated to %s:\n", |
+ range->Start(), |
+ range->End(), |
+ range->vreg(), |
+ range->assigned_location().Name())); |
+ ASSERT(!range->assigned_location().IsInvalid()); |
+ const Location loc = range->assigned_location(); |
+ for (UsePosition* use = range->first_use(); use != NULL; use = use->next()) { |
+ ConvertUseTo(use, loc); |
+ } |
+} |
- TRACE_ALLOC(("assigning location %s to interval [%d, %d)\n", loc.Name(), |
- interval->start(), interval->end())); |
- for (UsePosition* use = interval->first_use(); |
- use != NULL && use->pos() <= interval->end(); |
- use = use->next()) { |
- use->AssignLocation(loc); |
+bool AllocationFinger::Advance(const intptr_t start) { |
+ UseInterval* a = first_pending_use_interval_; |
+ while (a != NULL && a->end() <= start) a = a->next(); |
+ first_pending_use_interval_ = a; |
+ if (first_pending_use_interval_ == NULL) { |
+ return true; |
} |
+ return false; |
} |
void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) { |
- for (int reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
- if (cpu_regs_[reg] == NULL) continue; |
- if (cpu_regs_[reg] == kPermanentlyBlocked) continue; |
- |
- UseInterval* a = cpu_regs_[reg]; |
- while (a != NULL && a->end() <= start) { |
- FinalizeInterval(a, |
- Location::RegisterLocation(static_cast<Register>(reg))); |
- a = a->next_allocated(); |
+ for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) { |
+ if (cpu_regs_[reg].length() == 0) continue; |
srdjan
2012/07/19 22:54:39
use is_empty() instead of length() == 0.
Vyacheslav Egorov (Google)
2012/07/24 12:26:41
Done.
|
+ |
+ intptr_t first_evicted = -1; |
+ for (intptr_t i = cpu_regs_[reg].length() - 1; i >= 0; i--) { |
+ LiveRange* range = cpu_regs_[reg][i]; |
+ if (range->finger()->Advance(start)) { |
+ ConvertAllUses(range); |
+ cpu_regs_[reg][i] = NULL; |
+ first_evicted = i; |
+ } |
} |
- cpu_regs_[reg] = a; |
+ if (first_evicted != -1) { |
+ RemoveEvicted(static_cast<Register>(reg), first_evicted); |
+ } |
} |
} |
-static inline bool ShouldBeAllocatedBefore(UseInterval* a, UseInterval* b) { |
- return a->start() <= b->start(); |
+void AllocationFinger::Initialize(LiveRange* range) { |
+ first_pending_use_interval_ = range->first_use_interval(); |
+ first_register_use_ = range->first_use(); |
+ first_register_beneficial_use_ = range->first_use(); |
+ first_hinted_use_ = range->first_use(); |
} |
-void FlowGraphAllocator::AddToUnallocated(UseInterval* chain) { |
+static inline bool ShouldBeAllocatedBefore(LiveRange* a, LiveRange* b) { |
+ return a->Start() <= b->Start(); |
+} |
+ |
+ |
+void FlowGraphAllocator::AddToUnallocated(LiveRange* range) { |
+ range->finger()->Initialize(range); |
+ |
if (unallocated_.is_empty()) { |
- unallocated_.Add(chain); |
+ unallocated_.Add(range); |
return; |
} |
for (intptr_t i = unallocated_.length() - 1; i >= 0; i--) { |
- if (ShouldBeAllocatedBefore(chain, unallocated_[i])) { |
- unallocated_.InsertAt(i + 1, chain); |
+ if (ShouldBeAllocatedBefore(range, unallocated_[i])) { |
+ unallocated_.InsertAt(i + 1, range); |
return; |
} |
} |
- unallocated_.InsertAt(0, chain); |
+ unallocated_.InsertAt(0, range); |
} |
bool FlowGraphAllocator::UnallocatedIsSorted() { |
for (intptr_t i = unallocated_.length() - 1; i >= 1; i--) { |
- UseInterval* a = unallocated_[i]; |
- UseInterval* b = unallocated_[i - 1]; |
+ LiveRange* a = unallocated_[i]; |
+ LiveRange* b = unallocated_[i - 1]; |
if (!ShouldBeAllocatedBefore(a, b)) return false; |
} |
return true; |
@@ -875,11 +1199,18 @@ bool FlowGraphAllocator::UnallocatedIsSorted() { |
void FlowGraphAllocator::AllocateCPURegisters() { |
ASSERT(UnallocatedIsSorted()); |
+ for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { |
+ if (cpu_regs_[i].length() == 1) { |
+ LiveRange* range = cpu_regs_[i][0]; |
+ range->finger()->Initialize(range); |
+ } |
+ } |
+ |
while (!unallocated_.is_empty()) { |
- UseInterval* range = unallocated_.Last(); |
+ LiveRange* range = unallocated_.Last(); |
unallocated_.RemoveLast(); |
- const intptr_t start = range->start(); |
- TRACE_ALLOC(("Processing interval chain for vreg %d starting at %d\n", |
+ const intptr_t start = range->Start(); |
+ TRACE_ALLOC(("Processing live range for vreg %d starting at %d\n", |
range->vreg(), |
start)); |
@@ -887,8 +1218,7 @@ void FlowGraphAllocator::AllocateCPURegisters() { |
AdvanceActiveIntervals(start); |
if (!AllocateFreeRegister(range)) { |
- builder_->Bailout("ssa allocator: spilling required"); |
- return; |
+ AllocateAnyRegister(range); |
} |
} |
@@ -901,6 +1231,73 @@ void FlowGraphAllocator::AllocateCPURegisters() { |
} |
+void FlowGraphAllocator::ConnectSplitSiblings(LiveRange* range, |
+ BlockEntryInstr* source_block, |
+ BlockEntryInstr* target_block) { |
+ if (range->next_sibling() == NULL) { |
+ // Nothing to connect everything is allocated to the same location. |
+ return; |
+ } |
+ |
+ const intptr_t source_pos = source_block->end_pos() - 1; |
+ const intptr_t target_pos = target_block->start_pos(); |
+ |
+ Location target; |
+ Location source; |
+ |
+ while ((range != NULL) && (source.IsInvalid() || target.IsInvalid())) { |
+ if (range->CanCover(source_pos)) { |
+ ASSERT(source.IsInvalid()); |
+ source = range->assigned_location(); |
+ } |
+ if (range->CanCover(target_pos)) { |
+ ASSERT(target.IsInvalid()); |
+ target = range->assigned_location(); |
+ } |
+ } |
+ |
+ // Siblings were allocated to the same register. |
+ if (source.Equals(target)) return; |
+ |
+ GetLastParallelMove(source_block)->AddMove(source, target); |
+} |
+ |
+ |
+void FlowGraphAllocator::ResolveControlFlow() { |
+ // Resolve linear control flow between touching split siblings |
+ // inside basic blocks. |
+ for (intptr_t vreg = 0; vreg < live_ranges_.length(); vreg++) { |
+ LiveRange* range = live_ranges_[vreg]; |
+ if (range == NULL) continue; |
+ |
+ while (range->next_sibling() != NULL) { |
+ LiveRange* sibling = range->next_sibling(); |
+ if ((range->End() == sibling->Start()) && |
+ !range->assigned_location().Equals(sibling->assigned_location()) && |
+ !IsBlockEntry(range->End())) { |
+ ASSERT((sibling->Start() & 1) == 0); |
+ InsertMoveBefore(InstructionAt(sibling->Start()), |
+ sibling->assigned_location(), |
+ range->assigned_location()); |
+ } |
+ range = sibling; |
+ } |
+ } |
+ |
+ // Resolve non-linear control flow across branches. |
+ for (intptr_t i = 1; i < block_order_.length(); i++) { |
+ BlockEntryInstr* block = block_order_[0]; |
+ BitVector* live = live_in_[block->postorder_number()]; |
+ for (BitVector::Iterator it(live); !it.Done(); it.Advance()) { |
+ LiveRange* range = GetLiveRange(it.Current()); |
+ for (intptr_t j = 0; j < block->PredecessorCount(); j++) { |
+ ConnectSplitSiblings(range, block->PredecessorAt(j), block); |
+ } |
+ } |
+ } |
+} |
+ |
+ |
void FlowGraphAllocator::AllocateRegisters() { |
GraphEntryInstr* entry = block_order_[0]->AsGraphEntry(); |
ASSERT(entry != NULL); |
@@ -925,6 +1322,8 @@ void FlowGraphAllocator::AllocateRegisters() { |
AllocateCPURegisters(); |
+ ResolveControlFlow(); |
+ |
if (FLAG_trace_ssa_allocator) { |
OS::Print("-- ir after allocation -------------------------\n"); |
FlowGraphPrinter printer(Function::Handle(), block_order_, true); |