Chromium Code Reviews| Index: src/mark-compact.cc |
| diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
| index 8ca14db5063ea5122c25e7414d96f0700b4b3b4c..02dc143dab95ef2aec0ba24dbe2f95f78d4def00 100644 |
| --- a/src/mark-compact.cc |
| +++ b/src/mark-compact.cc |
| @@ -41,6 +41,7 @@ |
| #include "objects-visiting.h" |
| #include "objects-visiting-inl.h" |
| #include "stub-cache.h" |
| +#include "sweeper-thread.h" |
| namespace v8 { |
| namespace internal { |
| @@ -494,6 +495,42 @@ void MarkCompactCollector::ClearMarkbits() { |
| } |
| +void MarkCompactCollector::StartSweeperThreads() { |
| + SweeperThread::set_sweeping_pending(true); |
| + for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| + heap()->isolate()->sweeper_threads()[i]->StartSweeping(); |
| + } |
| +} |
| + |
| + |
| +void MarkCompactCollector::WaitUntilSweepingCompleted() { |
| + if (SweeperThread::sweeping_pending()) { |
| + for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| + heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
| + } |
| + SweeperThread::set_sweeping_pending(false); |
| + StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); |
| + StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); |
| + heap()->FreeQueuedChunks(); |
| + } |
| +} |
| + |
| + |
| +intptr_t MarkCompactCollector:: |
| + StealMemoryFromSweeperThreads(PagedSpace* space) { |
| + intptr_t freed_bytes = 0; |
| + for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| + freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); |
| + } |
| + return freed_bytes; |
| +} |
| + |
| + |
| +bool MarkCompactCollector::AreSweeperThreadsActivated() { |
| + return heap()->isolate()->sweeper_threads() != NULL; |
| +} |
| + |
| + |
| bool Marking::TransferMark(Address old_start, Address new_start) { |
| // This is only used when resizing an object. |
| ASSERT(MemoryChunk::FromAddress(old_start) == |
| @@ -796,6 +833,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { |
| ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
| + if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) { |
| + // Instead of waiting we could also abort the sweeper threads here. |
| + WaitUntilSweepingCompleted(); |
| + } |
| + |
| // Clear marking bits if incremental marking is aborted. |
| if (was_marked_incrementally_ && abort_incremental_marking_) { |
| heap()->incremental_marking()->Abort(); |
| @@ -3027,7 +3069,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| switch (space->identity()) { |
| case OLD_DATA_SPACE: |
| - SweepConservatively(space, p); |
| + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| break; |
| case OLD_POINTER_SPACE: |
| SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( |
| @@ -3383,6 +3425,13 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
| } |
| +static intptr_t Free(FreeList* free_list, |
| + Address start, |
| + int size) { |
| + return size - free_list->Free(start, size); |
| +} |
| + |
| + |
| // Sweeps a space conservatively. After this has been done the larger free |
| // spaces have been put on the free list and the smaller ones have been |
| // ignored and left untouched. A free space is always either ignored or put |
| @@ -3390,8 +3439,16 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
| // because it means that any FreeSpace maps left actually describe a region of |
| // memory that can be ignored when scanning. Dead objects other than free |
| // spaces will not contain the free space map. |
| -intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| +template<MarkCompactCollector::SweepingParallelism mode> |
| +intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, |
| + FreeList* free_list, |
| + Page* p) { |
| ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| + ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && |
| + free_list != NULL) || |
| + (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && |
| + free_list == NULL)); |
| + |
| MarkBit::CellType* cells = p->markbits()->cells(); |
| p->MarkSweptConservatively(); |
| @@ -3418,8 +3475,12 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| } |
| size_t size = block_address - p->area_start(); |
| if (cell_index == last_cell_index) { |
| - freed_bytes += static_cast<int>(space->Free(p->area_start(), |
| - static_cast<int>(size))); |
| + if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
|
Michael Starzinger
2013/01/30 11:01:19
You could factor out this logic and move it into t
Hannes Payer (out of office)
2013/01/30 12:07:32
I templatized Free, the code looks cleaner.
On 20
|
| + freed_bytes += static_cast<int>(space->Free(p->area_start(), |
| + static_cast<int>(size))); |
| + } else { |
| + freed_bytes += Free(free_list, p->area_start(), static_cast<int>(size)); |
| + } |
| ASSERT_EQ(0, p->LiveBytes()); |
| return freed_bytes; |
| } |
| @@ -3428,8 +3489,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| // Free the first free space. |
| size = free_end - p->area_start(); |
| - freed_bytes += space->Free(p->area_start(), |
| - static_cast<int>(size)); |
| + if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
| + freed_bytes += space->Free(p->area_start(), static_cast<int>(size)); |
| + } else { |
| + freed_bytes += Free(free_list, p->area_start(), static_cast<int>(size)); |
| + } |
| // The start of the current free area is represented in undigested form by |
| // the address of the last 32-word section that contained a live object and |
| // the marking bitmap for that cell, which describes where the live object |
| @@ -3458,8 +3522,13 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| // so now we need to find the start of the first live object at the |
| // end of the free space. |
| free_end = StartOfLiveObject(block_address, cell); |
| - freed_bytes += space->Free(free_start, |
| - static_cast<int>(free_end - free_start)); |
| + if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
| + freed_bytes += space->Free(free_start, |
| + static_cast<int>(free_end - free_start)); |
| + } else { |
| + freed_bytes += Free(free_list, free_start, |
| + static_cast<int>(free_end - free_start)); |
| + } |
| } |
| } |
| // Update our undigested record of where the current free area started. |
| @@ -3473,8 +3542,13 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| // Handle the free space at the end of the page. |
| if (block_address - free_start > 32 * kPointerSize) { |
| free_start = DigestFreeStart(free_start, free_start_cell); |
| - freed_bytes += space->Free(free_start, |
| - static_cast<int>(block_address - free_start)); |
| + if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
| + freed_bytes += space->Free(free_start, |
| + static_cast<int>(block_address - free_start)); |
| + } else { |
| + freed_bytes += Free(free_list, free_start, |
| + static_cast<int>(block_address - free_start)); |
| + } |
| } |
| p->ResetLiveBytes(); |
| @@ -3482,10 +3556,24 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| } |
| +void MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| + FreeList* private_free_list, |
| + FreeList* free_list) { |
| + PageIterator it(space); |
| + while (it.has_next()) { |
| + Page* p = it.next(); |
| + |
| + if (p->TryParallelSweeping()) { |
| + SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p); |
| + free_list->Concatenate(private_free_list); |
| + } |
| + } |
| +} |
| + |
| + |
| void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| sweeper == LAZY_CONSERVATIVE); |
| - |
| space->ClearStats(); |
| PageIterator it(space); |
| @@ -3498,6 +3586,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| while (it.has_next()) { |
| Page* p = it.next(); |
| + ASSERT(p->parallel_sweeping() == 0); |
| // Clear sweeping flags indicating that marking bits are still intact. |
| p->ClearSweptPrecisely(); |
| p->ClearSweptConservatively(); |
| @@ -3543,7 +3632,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
| reinterpret_cast<intptr_t>(p)); |
| } |
| - SweepConservatively(space, p); |
| + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| pages_swept++; |
| break; |
| } |
| @@ -3552,12 +3641,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", |
| reinterpret_cast<intptr_t>(p)); |
| } |
| - freed_bytes += SweepConservatively(space, p); |
| + freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| pages_swept++; |
| space->SetPagesToSweep(p->next_page()); |
| lazy_sweeping_active = true; |
| break; |
| } |
| + case PARALLEL_CONSERVATIVE: { |
| + if (FLAG_gc_verbose) { |
| + PrintF("Prepare conservatively parallel sweeping 0x%" V8PRIxPTR ".\n", |
|
Michael Starzinger
2013/01/30 11:01:19
Let's rephrase this log line to "Sweeping 0x??? co
Hannes Payer (out of office)
2013/01/30 12:07:32
Done.
|
| + reinterpret_cast<intptr_t>(p)); |
| + } |
| + p->set_parallel_sweeping(1); |
| + break; |
| + } |
| case PRECISE: { |
| if (FLAG_gc_verbose) { |
| PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| @@ -3597,14 +3694,16 @@ void MarkCompactCollector::SweepSpaces() { |
| FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
| if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; |
| if (sweep_precisely_) how_to_sweep = PRECISE; |
| + if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE; |
| // Noncompacting collections simply sweep the spaces to clear the mark |
| // bits and free the nonlive blocks (for old and map spaces). We sweep |
| // the map space last because freeing non-live maps overwrites them and |
| // the other spaces rely on possibly non-live maps to get the sizes for |
| // non-live objects. |
| + |
| SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| SweepSpace(heap()->old_data_space(), how_to_sweep); |
| - |
| + |
|
Michael Starzinger
2013/01/30 11:01:19
I think you have white-space on that empty line. S
Hannes Payer (out of office)
2013/01/30 12:07:32
Done.
|
| RemoveDeadInvalidatedCode(); |
| SweepSpace(heap()->code_space(), PRECISE); |
| @@ -3612,6 +3711,15 @@ void MarkCompactCollector::SweepSpaces() { |
| EvacuateNewSpaceAndCandidates(); |
| + if (AreSweeperThreadsActivated()) { |
| + // The starting of the sweeper threads should be after SweepSpace |
|
Michael Starzinger
2013/01/30 11:01:19
Let's turn this comment into a TODO().
Hannes Payer (out of office)
2013/01/30 12:07:32
Done.
|
| + // old data space. |
| + StartSweeperThreads(); |
| + if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) { |
| + WaitUntilSweepingCompleted(); |
| + } |
| + } |
| + |
| // ClearNonLiveTransitions depends on precise sweeping of map space to |
| // detect whether unmarked map became dead in this collection or in one |
| // of the previous ones. |