Chromium Code Reviews| Index: src/spaces.cc |
| diff --git a/src/spaces.cc b/src/spaces.cc |
| index 35619d172333c6421ab98b3ee8a64f8b75ac4458..a765ac1abbfefaaa1c7185bbbac13abd181d15fb 100644 |
| --- a/src/spaces.cc |
| +++ b/src/spaces.cc |
| @@ -450,6 +450,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
| chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| chunk->progress_bar_ = 0; |
| chunk->high_water_mark_ = static_cast<int>(area_start - base); |
| + chunk->parallel_sweeping_ = 0; |
| chunk->ResetLiveBytes(); |
| Bitmap::Clear(chunk); |
| chunk->initialize_scan_on_scavenge(false); |
| @@ -1930,6 +1931,29 @@ void FreeListNode::set_next(FreeListNode* next) { |
| } |
| +intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
| + intptr_t free_bytes = 0; |
| + if (category->top_ != NULL) { |
| + ASSERT(category->end_ != NULL); |
| + // This is safe (not going to deadlock) since Concatenate operations |
| + // are never performed on the same free lists at the same time in |
| + // reverse order. |
| + ScopedLock lock_target(mutex_); |
| + ScopedLock lock_source(category->mutex()); |
| + free_bytes = category->available(); |
| + if (end_ == NULL) { |
| + end_ = category->end(); |
| + } else { |
| + category->end()->set_next(top_); |
| + } |
| + top_ = category->top(); |
| + available_ += category->available(); |
| + category->Reset(); |
| + } |
| + return free_bytes; |
| +} |
| + |
| + |
| void FreeListCategory::Reset() { |
| top_ = NULL; |
| end_ = NULL; |
| @@ -2028,6 +2052,16 @@ FreeList::FreeList(PagedSpace* owner) |
| } |
| +intptr_t FreeList::Concatenate(FreeList* free_list) { |
| + intptr_t free_bytes = 0; |
| + free_bytes += small_list_.Concatenate(free_list->small_list()); |
| + free_bytes += medium_list_.Concatenate(free_list->medium_list()); |
| + free_bytes += large_list_.Concatenate(free_list->large_list()); |
| + free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
| + return free_bytes; |
| +} |
| + |
| + |
| void FreeList::Reset() { |
| small_list_.Reset(); |
| medium_list_.Reset(); |
| @@ -2392,7 +2426,8 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| reinterpret_cast<intptr_t>(p)); |
| } |
| DecreaseUnsweptFreeBytes(p); |
| - freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| + freed_bytes += MarkCompactCollector::SweepConservatively( |
| + this, this->free_list(), p); |
| } |
| p = next_page; |
| } while (p != anchor() && freed_bytes < bytes_to_sweep); |
| @@ -2426,18 +2461,42 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| // Allocation in this space has failed. |
| + if (heap()->IsConcurrentSweepingActivated()) { |
|
Michael Starzinger
2013/01/24 17:15:54
The SlowAllocateRaw() function is getting too comp
Hannes Payer (out of office)
2013/01/25 10:46:49
Done.
|
| + heap()->StealMemoryFromSweeperThreads(this); |
| + HeapObject* object = free_list_.Allocate(size_in_bytes); |
| + if (object != NULL) return object; |
| - // If there are unswept pages advance lazy sweeper a bounded number of times |
| - // until we find a size_in_bytes contiguous piece of memory |
| - const int kMaxSweepingTries = 5; |
| - bool sweeping_complete = false; |
| + if (heap()->IsConcurrentSweepingPending()) { |
| + heap()->WaitUntilParallelSweepingCompleted(); |
| + } |
| - for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { |
| - sweeping_complete = AdvanceSweeper(size_in_bytes); |
| + heap()->StealMemoryFromSweeperThreads(this); |
| - // Retry the free list allocation. |
| - HeapObject* object = free_list_.Allocate(size_in_bytes); |
| + object = free_list_.Allocate(size_in_bytes); |
| if (object != NULL) return object; |
| + } else { |
| + // If there are unswept pages advance lazy sweeper a bounded number of |
| + // times until we find a size_in_bytes contiguous piece of memory |
| + const int kMaxSweepingTries = 5; |
| + bool sweeping_complete = false; |
| + |
| + for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { |
| + sweeping_complete = AdvanceSweeper(size_in_bytes); |
| + |
| + // Retry the free list allocation. |
| + HeapObject* object = free_list_.Allocate(size_in_bytes); |
| + if (object != NULL) return object; |
| + } |
| + |
| + // Last ditch, sweep all the remaining pages to try to find space. This may |
| + // cause a pause. |
| + if (!IsSweepingComplete()) { |
| + AdvanceSweeper(kMaxInt); |
| + |
| + // Retry the free list allocation. |
| + HeapObject* object = free_list_.Allocate(size_in_bytes); |
| + if (object != NULL) return object; |
| + } |
| } |
| // Free list allocation failed and there is no next page. Fail if we have |
| @@ -2453,16 +2512,6 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| return free_list_.Allocate(size_in_bytes); |
| } |
| - // Last ditch, sweep all the remaining pages to try to find space. This may |
| - // cause a pause. |
| - if (!IsSweepingComplete()) { |
| - AdvanceSweeper(kMaxInt); |
| - |
| - // Retry the free list allocation. |
| - HeapObject* object = free_list_.Allocate(size_in_bytes); |
| - if (object != NULL) return object; |
| - } |
| - |
| // Finally, fail. |
| return NULL; |
| } |