Index: src/incremental-marking.cc |
=================================================================== |
--- src/incremental-marking.cc (revision 12583) |
+++ src/incremental-marking.cc (working copy) |
@@ -52,7 +52,7 @@ |
steps_count_since_last_gc_(0), |
steps_took_since_last_gc_(0), |
should_hurry_(false), |
- allocation_marking_factor_(0), |
+ marking_speed_(0), |
allocated_(0), |
no_marking_scope_depth_(0) { |
} |
@@ -81,17 +81,19 @@ |
Object* value, |
Isolate* isolate) { |
ASSERT(obj->IsHeapObject()); |
+ IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
+ ASSERT(!marking->is_compacting_); |
- // Fast cases should already be covered by RecordWriteStub. |
- ASSERT(value->IsHeapObject()); |
- ASSERT(!value->IsHeapNumber()); |
- ASSERT(!value->IsString() || |
- value->IsConsString() || |
- value->IsSlicedString()); |
- ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value)))); |
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
+ int counter = chunk->write_barrier_counter(); |
+ if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
+ marking->write_barriers_invoked_since_last_step_ += |
+ MemoryChunk::kWriteBarrierCounterGranularity - |
+ chunk->write_barrier_counter(); |
+ chunk->set_write_barrier_counter( |
+ MemoryChunk::kWriteBarrierCounterGranularity); |
+ } |
- IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
- ASSERT(!marking->is_compacting_); |
marking->RecordWrite(obj, NULL, value); |
} |
@@ -99,8 +101,20 @@ |
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, |
Object** slot, |
Isolate* isolate) { |
+ ASSERT(obj->IsHeapObject()); |
IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
ASSERT(marking->is_compacting_); |
+ |
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
+ int counter = chunk->write_barrier_counter(); |
+ if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
+ marking->write_barriers_invoked_since_last_step_ += |
+ MemoryChunk::kWriteBarrierCounterGranularity - |
+ chunk->write_barrier_counter(); |
+ chunk->set_write_barrier_counter( |
+ MemoryChunk::kWriteBarrierCounterGranularity); |
+ } |
+ |
marking->RecordWrite(obj, slot, *slot); |
} |
@@ -504,7 +518,7 @@ |
state_ = SWEEPING; |
} |
- heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); |
+ heap_->new_space()->LowerInlineAllocationLimit(kIncrementalMarkingThreshold); |
} |
@@ -773,11 +787,24 @@ |
allocated_ += allocated_bytes; |
- if (allocated_ < kAllocatedThreshold) return; |
+ if (allocated_ < kIncrementalMarkingThreshold && |
+ write_barriers_invoked_since_last_step_ < kIncrementalMarkingThreshold) { |
Michael Starzinger
2012/09/26 08:06:38
Even if the two thresholds we are comparing agains
|
+ return; |
+ } |
if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
- intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; |
+ // The marking speed is driven either by the allocation rate or by the rate |
+ // at which we are having to check the color of objects in the write barrier. |
+ // It is possible for a tight non-allocating loop to run a lot of write |
+ // barriers before we get here and check them (marking can only take place on |
+ // allocation), so to reduce the lumpiness we don't use the write barriers |
+ // invoked since last step directly to determine the amount of work to do. |
+ intptr_t bytes_to_process = |
+ marking_speed_ * Max(allocated_, kIncrementalMarkingThreshold); |
+ allocated_ = 0; |
+ write_barriers_invoked_since_last_step_ = 0; |
+ |
bytes_scanned_ += bytes_to_process; |
double start = 0; |
@@ -832,17 +859,15 @@ |
if (marking_deque_.IsEmpty()) MarkingComplete(action); |
} |
- allocated_ = 0; |
- |
steps_count_++; |
steps_count_since_last_gc_++; |
bool speed_up = false; |
- if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { |
+ if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
if (FLAG_trace_gc) { |
PrintPID("Speed up marking after %d steps\n", |
- static_cast<int>(kAllocationMarkingFactorSpeedupInterval)); |
+ static_cast<int>(kMarkingSpeedAccellerationInterval)); |
} |
speed_up = true; |
} |
@@ -851,7 +876,7 @@ |
(old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
bool only_1_nth_of_space_that_was_available_still_left = |
- (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) < |
+ (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
old_generation_space_available_at_start_of_incremental_); |
if (space_left_is_very_small || |
@@ -862,7 +887,7 @@ |
bool size_of_old_space_multiplied_by_n_during_marking = |
(heap_->PromotedTotalSize() > |
- (allocation_marking_factor_ + 1) * |
+ (marking_speed_ + 1) * |
old_generation_space_used_at_start_of_incremental_); |
if (size_of_old_space_multiplied_by_n_during_marking) { |
speed_up = true; |
@@ -873,7 +898,7 @@ |
int64_t promoted_during_marking = heap_->PromotedTotalSize() |
- old_generation_space_used_at_start_of_incremental_; |
- intptr_t delay = allocation_marking_factor_ * MB; |
+ intptr_t delay = marking_speed_ * MB; |
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
// We try to scan at at least twice the speed that we are allocating. |
@@ -890,12 +915,12 @@ |
PrintPID("Postponing speeding up marking until marking starts\n"); |
} |
} else { |
- allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; |
- allocation_marking_factor_ = static_cast<int>( |
- Min(kMaxAllocationMarkingFactor, |
- static_cast<intptr_t>(allocation_marking_factor_ * 1.3))); |
+ marking_speed_ += kMarkingSpeedAccellerationInterval; |
+ marking_speed_ = static_cast<int>( |
+ Min(kMaxMarkingSpeed, |
+ static_cast<intptr_t>(marking_speed_ * 1.3))); |
if (FLAG_trace_gc) { |
- PrintPID("Marking speed increased to %d\n", allocation_marking_factor_); |
+ PrintPID("Marking speed increased to %d\n", marking_speed_); |
} |
} |
} |
@@ -921,8 +946,9 @@ |
steps_count_since_last_gc_ = 0; |
steps_took_since_last_gc_ = 0; |
bytes_rescanned_ = 0; |
- allocation_marking_factor_ = kInitialAllocationMarkingFactor; |
+ marking_speed_ = kInitialMarkingSpeed; |
bytes_scanned_ = 0; |
+ write_barriers_invoked_since_last_step_ = 0; |
} |