Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(777)

Unified Diff: src/mark-compact.cc

Issue 11782028: Parallel and concurrent sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8ca14db5063ea5122c25e7414d96f0700b4b3b4c..039fc3d8855037d81dbae7e5ed6f353a4ba1ee08 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -494,6 +494,42 @@ void MarkCompactCollector::ClearMarkbits() {
}
+void MarkCompactCollector::StartSweeperThreads(SweeperType sweeper_type) {
+ SweeperThread::set_sweeping_pending(true);
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->StartSweeping(sweeper_type);
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ if (SweeperThread::sweeping_pending()) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+ }
+ SweeperThread::set_sweeping_pending(false);
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ heap()->FreeQueuedChunks();
+ }
+}
+
+
+intptr_t MarkCompactCollector::
+ StealMemoryFromSweeperThreads(PagedSpace* space) {
+ intptr_t freed_bytes = 0;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+ }
+ return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+ return heap()->isolate()->sweeper_threads() != NULL;
+}
+
+
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -796,6 +832,10 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
Michael Starzinger 2013/01/28 16:30:18 Needs a comment that for now we wait, but it might
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ WaitUntilSweepingCompleted();
+ }
+
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
@@ -2728,6 +2768,7 @@ enum SkipListRebuildingMode {
// if requested.
template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
static void SweepPrecisely(PagedSpace* space,
+ FreeList* free_list,
Michael Starzinger 2013/01/28 16:30:18 No need to pass the free-list, will not be called
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
Page* p,
ObjectVisitor* v) {
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
@@ -2771,7 +2812,8 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
- space->Free(free_start, static_cast<int>(free_end - free_start));
+ MarkCompactCollector::Free(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -2797,7 +2839,8 @@ static void SweepPrecisely(PagedSpace* space,
cells[cell_index] = 0;
}
if (free_start != p->area_end()) {
- space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+ MarkCompactCollector::Free(space, free_list, free_start,
+ static_cast<int>(p->area_end() - free_start));
}
p->ResetLiveBytes();
}
@@ -3027,15 +3070,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively(space, p);
+ SweepConservatively(space, space->free_list(), p);
Michael Starzinger 2013/01/28 16:30:18 This call-path leads to a mismatch in the accounti
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
- space, p, &updating_visitor);
+ space, space->free_list(), p, &updating_visitor);
break;
case CODE_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
+ space, space->free_list(), p, &updating_visitor);
break;
default:
UNREACHABLE();
@@ -3383,6 +3426,19 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
}
+intptr_t MarkCompactCollector::Free(PagedSpace* space,
Michael Starzinger 2013/01/28 16:30:18 Make this a static helper ... this should never ev
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ FreeList* free_list,
+ Address start,
+ int size) {
+ if (space->heap()->mark_compact_collector()->AreSweeperThreadsActivated()) {
+ intptr_t wasted = free_list->Free(start, size);
+ return size - wasted;
+ } else {
+ return space->Free(start, size);
+ }
+}
+
+
// Sweeps a space conservatively. After this has been done the larger free
// spaces have been put on the free list and the smaller ones have been
// ignored and left untouched. A free space is always either ignored or put
@@ -3390,7 +3446,9 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
// because it means that any FreeSpace maps left actually describe a region of
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
Michael Starzinger 2013/01/28 16:30:18 There are basically two different sweepers at work
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ FreeList* free_list,
+ Page* p) {
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptConservatively();
@@ -3418,8 +3476,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
size_t size = block_address - p->area_start();
if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->area_start(),
- static_cast<int>(size)));
+ freed_bytes += Free(space, free_list, p->area_start(),
+ static_cast<int>(size));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
@@ -3428,8 +3486,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes += space->Free(p->area_start(),
- static_cast<int>(size));
+ freed_bytes += Free(space, free_list, p->area_start(),
+ static_cast<int>(size));
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
@@ -3458,8 +3516,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+ freed_bytes += Free(space, free_list, free_start,
Michael Starzinger 2013/01/28 16:30:18 This looks like it will tank non-parallel sweeping
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
@@ -3473,8 +3531,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ freed_bytes += Free(space, free_list, free_start,
+ static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
@@ -3482,10 +3540,91 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
+void MarkCompactCollector::PrepareParallelSweeping(PagedSpace* space) {
Michael Starzinger 2013/01/28 16:30:18 Please merge all of this into SweepSpace() and int
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ bool unused_page_present = false;
+
+ space->set_was_swept_conservatively(true);
+
+ space->ClearStats();
+
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ // Clear sweeping flags indicating that marking bits are still intact.
+ p->ClearSweptPrecisely();
+ p->ClearSweptConservatively();
+ p->set_parallel_sweeping(0);
+
+ if (p->IsEvacuationCandidate()) {
+ ASSERT(evacuation_candidates_.length() > 0);
+ continue;
+ }
+
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ // Will be processed in EvacuateNewSpaceAndCandidates.
+ continue;
+ }
+
+ // One unused page is kept, all further are released before sweeping them.
+ if (p->LiveBytes() == 0) {
+ if (unused_page_present) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ // Adjust unswept free bytes because releasing a page expects said
+ // counter to be accurate for unswept pages.
+ space->IncreaseUnsweptFreeBytes(p);
+ space->ReleasePage(p);
+ continue;
+ }
+ unused_page_present = true;
+ }
+ }
+}
+
+
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+ SweeperType sweeper_type,
+ FreeList* private_free_list,
+ FreeList* free_list) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ if (p->IsEvacuationCandidate()) {
+ ASSERT(evacuation_candidates_.length() > 0);
+ continue;
Michael Starzinger 2013/01/28 16:30:18 In concurrent mode we have a race here, because th
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ }
+
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ // Will be processed in EvacuateNewSpaceAndCandidates.
+ continue;
+ }
+
+ if (p->TryParallelSweeping()) {
+ if (sweeper_type == CONSERVATIVE || sweeper_type == LAZY_CONSERVATIVE) {
Michael Starzinger 2013/01/28 16:30:18 This logic is no longer needed, we always sweep co
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ SweepConservatively(space, private_free_list, p);
+ } else {
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(
+ space, private_free_list, p, NULL);
+ }
+ free_list->Concatenate(private_free_list);
+ }
+ }
+}
+
+
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
sweeper == LAZY_CONSERVATIVE);
-
+ ASSERT(!(space->identity() == OLD_DATA_SPACE &&
+ FLAG_parallel_sweeping &&
+ FLAG_concurrent_sweeping));
+ ASSERT(!(space->identity() == OLD_POINTER_SPACE &&
+ FLAG_parallel_sweeping &&
+ FLAG_concurrent_sweeping));
Michael Starzinger 2013/01/28 16:30:18 These assertions are bogus, they will always be fa
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
space->ClearStats();
PageIterator it(space);
@@ -3543,7 +3682,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively(space, p);
+ SweepConservatively(space, space->free_list(), p);
pages_swept++;
break;
}
@@ -3552,7 +3691,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
reinterpret_cast<intptr_t>(p));
}
- freed_bytes += SweepConservatively(space, p);
+ freed_bytes += SweepConservatively(space, space->free_list(), p);
pages_swept++;
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
@@ -3564,9 +3703,11 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
reinterpret_cast<intptr_t>(p));
}
if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(
+ space, space->free_list(), p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(
+ space, space->free_list(), p, NULL);
}
pages_swept++;
break;
@@ -3602,8 +3743,18 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
+
+ if (AreSweeperThreadsActivated()) {
+ PrepareParallelSweeping(heap()->old_pointer_space());
Michael Starzinger 2013/01/28 16:30:18 See the comment in PrepareParallelSweeping about t
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ PrepareParallelSweeping(heap()->old_data_space());
+ StartSweeperThreads(how_to_sweep);
Michael Starzinger 2013/01/28 16:30:18 Move the StartSweeperThreads to below EvacuateNewS
Hannes Payer (out of office) 2013/01/30 10:11:27 Done.
+ if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+ WaitUntilSweepingCompleted();
+ }
+ } else {
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+ SweepSpace(heap()->old_data_space(), how_to_sweep);
+ }
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);

Powered by Google App Engine
This is Rietveld 408576698