Index: src/mark-compact.cc |
diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
index 8cd9d026510e3c7e1262e6a12f1e447432b73f1c..dbec91ec3c0808432a06cb345e3dda7bbcb563d3 100644 |
--- a/src/mark-compact.cc |
+++ b/src/mark-compact.cc |
@@ -230,6 +230,18 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
} |
+static void TraceFragmentation(PagedSpace* space) { |
+ int number_of_pages = space->CountTotalPages(); |
+ intptr_t reserved = (number_of_pages * Page::kObjectAreaSize); |
+ intptr_t free = reserved - space->SizeOfObjects(); |
+ PrintF("[%s]: %d pages, %d (%.1f%%) free\n", |
+ AllocationSpaceName(space->identity()), |
+ number_of_pages, |
+ static_cast<int>(free), |
+ static_cast<double>(free) * 100 / reserved); |
+} |
+ |
+ |
bool MarkCompactCollector::StartCompaction() { |
if (!compacting_) { |
ASSERT(evacuation_candidates_.length() == 0); |
@@ -239,6 +251,13 @@ bool MarkCompactCollector::StartCompaction() { |
if (FLAG_compact_code_space) { |
CollectEvacuationCandidates(heap()->code_space()); |
+ } else if (FLAG_trace_fragmentation) { |
+ TraceFragmentation(heap()->code_space()); |
+ } |
+ |
+ if (FLAG_trace_fragmentation) { |
+ TraceFragmentation(heap()->map_space()); |
+ TraceFragmentation(heap()->cell_space()); |
} |
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); |
@@ -414,6 +433,65 @@ const char* AllocationSpaceName(AllocationSpace space) { |
} |
+// Returns zero for pages that have so little fragmentation that it is not |
+// worth defragmenting them. Otherwise a positive integer that gives an |
+// estimate of fragmentation on an arbitrary scale. |
+static int FreeListFragmentation(PagedSpace* space, Page* p) { |
+ // If page was not swept then there are no free list items on it. |
+ if (!p->WasSwept()) { |
+ if (FLAG_trace_fragmentation) { |
+ PrintF("%p [%s]: %d bytes live (unswept)\n", |
+ reinterpret_cast<void*>(p), |
+ AllocationSpaceName(space->identity()), |
+ p->LiveBytes()); |
+ } |
+ return 0; |
+ } |
+ |
+ FreeList::SizeStats sizes; |
+ space->CountFreeListItems(p, &sizes); |
+ |
+ intptr_t ratio; |
+ intptr_t ratio_threshold; |
+ if (space->identity() == CODE_SPACE) { |
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / |
+ Page::kObjectAreaSize; |
+ ratio_threshold = 10; |
+ } else { |
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / |
+ Page::kObjectAreaSize; |
+ ratio_threshold = 15; |
+ } |
+ |
+ if (FLAG_trace_fragmentation) { |
+ PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
+ reinterpret_cast<void*>(p), |
+ AllocationSpaceName(space->identity()), |
+ static_cast<int>(sizes.small_size_), |
+ static_cast<double>(sizes.small_size_ * 100) / |
+ Page::kObjectAreaSize, |
+ static_cast<int>(sizes.medium_size_), |
+ static_cast<double>(sizes.medium_size_ * 100) / |
+ Page::kObjectAreaSize, |
+ static_cast<int>(sizes.large_size_), |
+ static_cast<double>(sizes.large_size_ * 100) / |
+ Page::kObjectAreaSize, |
+ static_cast<int>(sizes.huge_size_), |
+ static_cast<double>(sizes.huge_size_ * 100) / |
+ Page::kObjectAreaSize, |
+ (ratio > ratio_threshold) ? "[fragmented]" : ""); |
+ } |
+ |
+ if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) { |
+ return 1; |
+ } |
+ |
+ if (ratio <= ratio_threshold) return 0; // Not fragmented. |
+ |
+ return static_cast<int>(ratio - ratio_threshold); |
+} |
+ |
+ |
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
ASSERT(space->identity() == OLD_POINTER_SPACE || |
space->identity() == OLD_DATA_SPACE || |
@@ -421,7 +499,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
int number_of_pages = space->CountTotalPages(); |
- PageIterator it(space); |
const int kMaxMaxEvacuationCandidates = 1000; |
int max_evacuation_candidates = Min( |
kMaxMaxEvacuationCandidates, |
@@ -444,22 +521,86 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
Page* page_; |
}; |
+ enum CompactionMode { |
+ COMPACT_FREE_LISTS, |
+ REDUCE_MEMORY_FOOTPRINT |
+ }; |
+ |
+ CompactionMode mode = COMPACT_FREE_LISTS; |
+ |
+ intptr_t reserved = number_of_pages * Page::kObjectAreaSize; |
+ intptr_t over_reserved = reserved - space->SizeOfObjects(); |
+ static const intptr_t kFreenessThreshold = 50; |
+ |
+ if (over_reserved >= 2 * Page::kObjectAreaSize && |
+ reduce_memory_footprint_) { |
+ mode = REDUCE_MEMORY_FOOTPRINT; |
+ max_evacuation_candidates = (over_reserved / Page::kObjectAreaSize) / 2; |
+ |
+ if (FLAG_trace_fragmentation) { |
+ PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n", |
+ static_cast<double>(over_reserved) / MB, |
+ kFreenessThreshold); |
+ } |
+ } |
+ |
+ intptr_t estimated_release = 0; |
+ |
Candidate candidates[kMaxMaxEvacuationCandidates]; |
int count = 0; |
- if (it.has_next()) it.next(); // Never compact the first page. |
int fragmentation = 0; |
Candidate* least = NULL; |
+ |
+ PageIterator it(space); |
+ if (it.has_next()) it.next(); // Never compact the first page. |
+ |
while (it.has_next()) { |
Page* p = it.next(); |
p->ClearEvacuationCandidate(); |
+ |
if (FLAG_stress_compaction) { |
int counter = space->heap()->ms_count(); |
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
+ } else if (mode == REDUCE_MEMORY_FOOTPRINT) { |
+ // Don't try to release too many pages. |
+ if (estimated_release >= ((over_reserved * 3) / 4)) { |
+ continue; |
+ } |
+ |
+ intptr_t free_bytes = 0; |
+ |
+ if (!p->WasSwept()) { |
+ free_bytes = (Page::kObjectAreaSize - p->LiveBytes()); |
+ } else { |
+ FreeList::SizeStats sizes; |
+ space->CountFreeListItems(p, &sizes); |
Erik Corry
2012/02/03 10:12:39
This call worries me. It can potentially take a l
|
+ free_bytes = sizes.Total(); |
+ } |
+ |
+ int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize); |
+ |
+ if (free_pct >= kFreenessThreshold) { |
+ estimated_release += Page::kObjectAreaSize + |
+ (Page::kObjectAreaSize - free_bytes); |
+ fragmentation = free_pct; |
+ } else { |
+ fragmentation = 0; |
+ } |
+ |
+ if (FLAG_trace_fragmentation) { |
+ PrintF("%p [%s]: %d (%.2f%%) free %s\n", |
+ reinterpret_cast<void*>(p), |
+ AllocationSpaceName(space->identity()), |
+ free_bytes, |
+ static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize, |
+ (fragmentation > 0) ? "[fragmented]" : ""); |
+ } |
} else { |
- fragmentation = space->Fragmentation(p); |
+ fragmentation = FreeListFragmentation(space, p); |
} |
+ |
if (fragmentation != 0) { |
if (count < max_evacuation_candidates) { |
candidates[count++] = Candidate(fragmentation, p); |
@@ -479,6 +620,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
} |
} |
} |
+ |
for (int i = 0; i < count; i++) { |
AddEvacuationCandidate(candidates[i].page()); |
} |
@@ -3242,6 +3384,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
p->set_scan_on_scavenge(false); |
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
p->ClearEvacuationCandidate(); |
+ p->ResetLiveBytes(); |
+ space->ReleasePage(p); |
} |
evacuation_candidates_.Rewind(0); |
compacting_ = false; |