| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 ClearNonLiveReferences(); | 314 ClearNonLiveReferences(); |
| 315 | 315 |
| 316 RecordObjectStats(); | 316 RecordObjectStats(); |
| 317 | 317 |
| 318 #ifdef VERIFY_HEAP | 318 #ifdef VERIFY_HEAP |
| 319 if (FLAG_verify_heap) { | 319 if (FLAG_verify_heap) { |
| 320 VerifyMarking(heap_); | 320 VerifyMarking(heap_); |
| 321 } | 321 } |
| 322 #endif | 322 #endif |
| 323 | 323 |
| 324 StartSweepSpaces(); | 324 SweepSpaces(); |
| 325 | 325 |
| 326 EvacuateNewSpaceAndCandidates(); | 326 EvacuateNewSpaceAndCandidates(); |
| 327 | 327 |
| 328 Finish(); | 328 Finish(); |
| 329 } | 329 } |
| 330 | 330 |
| 331 | 331 |
| 332 #ifdef VERIFY_HEAP | 332 #ifdef VERIFY_HEAP |
| 333 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 333 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 334 for (Page* p : *space) { | 334 for (Page* p : *space) { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 444 | 444 |
| 445 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 445 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
| 446 }; | 446 }; |
| 447 | 447 |
| 448 void MarkCompactCollector::Sweeper::StartSweeping() { | 448 void MarkCompactCollector::Sweeper::StartSweeping() { |
| 449 sweeping_in_progress_ = true; | 449 sweeping_in_progress_ = true; |
| 450 ForAllSweepingSpaces([this](AllocationSpace space) { | 450 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 451 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), | 451 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), |
| 452 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); | 452 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
| 453 }); | 453 }); |
| 454 } | 454 if (FLAG_concurrent_sweeping) { |
| 455 | |
| 456 void MarkCompactCollector::Sweeper::StartSweeperTasks() { | |
| 457 if (FLAG_concurrent_sweeping && sweeping_in_progress_) { | |
| 458 ForAllSweepingSpaces([this](AllocationSpace space) { | 455 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 459 if (space == NEW_SPACE) return; | 456 if (space == NEW_SPACE) return; |
| 460 num_sweeping_tasks_.Increment(1); | 457 StartSweepingHelper(space); |
| 461 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 462 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space), | |
| 463 v8::Platform::kShortRunningTask); | |
| 464 }); | 458 }); |
| 465 } | 459 } |
| 466 } | 460 } |
| 467 | 461 |
| 462 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
| 463 AllocationSpace space_to_start) { |
| 464 num_sweeping_tasks_.Increment(1); |
| 465 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 466 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
| 467 v8::Platform::kShortRunningTask); |
| 468 } |
| 469 |
| 468 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 470 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 469 Page* page) { | 471 Page* page) { |
| 470 if (!page->SweepingDone()) { | 472 if (!page->SweepingDone()) { |
| 471 ParallelSweepPage(page, page->owner()->identity()); | 473 ParallelSweepPage(page, page->owner()->identity()); |
| 472 if (!page->SweepingDone()) { | 474 if (!page->SweepingDone()) { |
| 473 // We were not able to sweep that page, i.e., a concurrent | 475 // We were not able to sweep that page, i.e., a concurrent |
| 474 // sweeper thread currently owns this page. Wait for the sweeper | 476 // sweeper thread currently owns this page. Wait for the sweeper |
| 475 // thread to be done with this page. | 477 // thread to be done with this page. |
| 476 page->WaitUntilSweepingCompleted(); | 478 page->WaitUntilSweepingCompleted(); |
| 477 } | 479 } |
| 478 } | 480 } |
| 479 } | 481 } |
| 480 | 482 |
| 481 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 483 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
| 482 if (FLAG_concurrent_sweeping && | 484 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { |
| 483 !sweeper().IsSweepingCompleted(space->identity())) { | |
| 484 sweeper().ParallelSweepSpace(space->identity(), 0); | 485 sweeper().ParallelSweepSpace(space->identity(), 0); |
| 485 space->RefillFreeList(); | 486 space->RefillFreeList(); |
| 486 } | 487 } |
| 487 } | 488 } |
| 488 | 489 |
| 489 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { | 490 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { |
| 490 base::LockGuard<base::Mutex> guard(&mutex_); | 491 base::LockGuard<base::Mutex> guard(&mutex_); |
| 491 SweptList& list = swept_list_[space->identity()]; | 492 SweptList& list = swept_list_[space->identity()]; |
| 492 if (list.length() > 0) { | 493 if (list.length() > 0) { |
| 493 return list.RemoveLast(); | 494 return list.RemoveLast(); |
| 494 } | 495 } |
| 495 return nullptr; | 496 return nullptr; |
| 496 } | 497 } |
| 497 | 498 |
| 498 void MarkCompactCollector::Sweeper::EnsureCompleted() { | 499 void MarkCompactCollector::Sweeper::EnsureCompleted() { |
| 499 if (!sweeping_in_progress_) return; | 500 if (!sweeping_in_progress_) return; |
| 500 | 501 |
| 501 // If sweeping is not completed or not running at all, we try to complete it | 502 // If sweeping is not completed or not running at all, we try to complete it |
| 502 // here. | 503 // here. |
| 503 ForAllSweepingSpaces([this](AllocationSpace space) { | 504 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 504 if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) { | 505 ForAllSweepingSpaces( |
| 505 ParallelSweepSpace(space, 0); | 506 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
| 506 } | 507 } |
| 507 }); | |
| 508 | 508 |
| 509 if (FLAG_concurrent_sweeping) { | 509 if (FLAG_concurrent_sweeping) { |
| 510 while (num_sweeping_tasks_.Value() > 0) { | 510 while (num_sweeping_tasks_.Value() > 0) { |
| 511 pending_sweeper_tasks_semaphore_.Wait(); | 511 pending_sweeper_tasks_semaphore_.Wait(); |
| 512 num_sweeping_tasks_.Increment(-1); | 512 num_sweeping_tasks_.Increment(-1); |
| 513 } | 513 } |
| 514 } | 514 } |
| 515 | 515 |
| 516 ForAllSweepingSpaces([this](AllocationSpace space) { | 516 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 517 if (space == NEW_SPACE) { | 517 if (space == NEW_SPACE) { |
| 518 swept_list_[NEW_SPACE].Clear(); | 518 swept_list_[NEW_SPACE].Clear(); |
| 519 } | 519 } |
| 520 DCHECK(sweeping_list_[space].empty()); | 520 DCHECK(sweeping_list_[space].empty()); |
| 521 }); | 521 }); |
| 522 late_pages_ = false; |
| 522 sweeping_in_progress_ = false; | 523 sweeping_in_progress_ = false; |
| 523 } | 524 } |
| 524 | 525 |
| 525 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { | 526 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { |
| 526 if (!sweeping_in_progress_) return; | 527 if (!sweeping_in_progress_) return; |
| 527 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) { | 528 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 528 for (Page* p : *heap_->new_space()) { | 529 for (Page* p : *heap_->new_space()) { |
| 529 SweepOrWaitUntilSweepingCompleted(p); | 530 SweepOrWaitUntilSweepingCompleted(p); |
| 530 } | 531 } |
| 531 } | 532 } |
| 532 } | 533 } |
| 533 | 534 |
| 534 void MarkCompactCollector::EnsureSweepingCompleted() { | 535 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 535 if (!sweeper().sweeping_in_progress()) return; | 536 if (!sweeper().sweeping_in_progress()) return; |
| 536 | 537 |
| 537 sweeper().EnsureCompleted(); | 538 sweeper().EnsureCompleted(); |
| 538 heap()->old_space()->RefillFreeList(); | 539 heap()->old_space()->RefillFreeList(); |
| 539 heap()->code_space()->RefillFreeList(); | 540 heap()->code_space()->RefillFreeList(); |
| 540 heap()->map_space()->RefillFreeList(); | 541 heap()->map_space()->RefillFreeList(); |
| 541 | 542 |
| 542 #ifdef VERIFY_HEAP | 543 #ifdef VERIFY_HEAP |
| 543 if (FLAG_verify_heap && !evacuation()) { | 544 if (FLAG_verify_heap && !evacuation()) { |
| 544 VerifyEvacuation(heap_); | 545 VerifyEvacuation(heap_); |
| 545 } | 546 } |
| 546 #endif | 547 #endif |
| 547 } | 548 } |
| 548 | 549 |
| 549 bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() { | 550 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
| 550 DCHECK(FLAG_concurrent_sweeping); | 551 DCHECK(FLAG_concurrent_sweeping); |
| 551 while (pending_sweeper_tasks_semaphore_.WaitFor( | 552 while (pending_sweeper_tasks_semaphore_.WaitFor( |
| 552 base::TimeDelta::FromSeconds(0))) { | 553 base::TimeDelta::FromSeconds(0))) { |
| 553 num_sweeping_tasks_.Increment(-1); | 554 num_sweeping_tasks_.Increment(-1); |
| 554 } | 555 } |
| 555 return num_sweeping_tasks_.Value() != 0; | 556 return num_sweeping_tasks_.Value() == 0; |
| 556 } | |
| 557 | |
| 558 bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) { | |
| 559 DCHECK(FLAG_concurrent_sweeping); | |
| 560 if (AreSweeperTasksRunning()) return false; | |
| 561 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 562 return sweeping_list_[space].empty(); | |
| 563 } | 557 } |
| 564 | 558 |
| 565 const char* AllocationSpaceName(AllocationSpace space) { | 559 const char* AllocationSpaceName(AllocationSpace space) { |
| 566 switch (space) { | 560 switch (space) { |
| 567 case NEW_SPACE: | 561 case NEW_SPACE: |
| 568 return "NEW_SPACE"; | 562 return "NEW_SPACE"; |
| 569 case OLD_SPACE: | 563 case OLD_SPACE: |
| 570 return "OLD_SPACE"; | 564 return "OLD_SPACE"; |
| 571 case CODE_SPACE: | 565 case CODE_SPACE: |
| 572 return "CODE_SPACE"; | 566 return "CODE_SPACE"; |
| (...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 831 if (!was_marked_incrementally_ && FLAG_verify_heap) { | 825 if (!was_marked_incrementally_ && FLAG_verify_heap) { |
| 832 VerifyMarkbitsAreClean(); | 826 VerifyMarkbitsAreClean(); |
| 833 } | 827 } |
| 834 #endif | 828 #endif |
| 835 } | 829 } |
| 836 | 830 |
| 837 | 831 |
| 838 void MarkCompactCollector::Finish() { | 832 void MarkCompactCollector::Finish() { |
| 839 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); | 833 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); |
| 840 | 834 |
| 841 sweeper().StartSweeperTasks(); | 835 if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) { |
| 836 // If we added some more pages during MC, we need to start at least one |
| 837 // more task as all other tasks might already be finished. |
| 838 sweeper().StartSweepingHelper(OLD_SPACE); |
| 839 } |
| 842 | 840 |
| 843 // The hashing of weak_object_to_code_table is no longer valid. | 841 // The hashing of weak_object_to_code_table is no longer valid. |
| 844 heap()->weak_object_to_code_table()->Rehash( | 842 heap()->weak_object_to_code_table()->Rehash( |
| 845 heap()->isolate()->factory()->undefined_value()); | 843 heap()->isolate()->factory()->undefined_value()); |
| 846 | 844 |
| 847 // Clear the marking state of live large objects. | 845 // Clear the marking state of live large objects. |
| 848 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); | 846 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); |
| 849 | 847 |
| 850 #ifdef DEBUG | 848 #ifdef DEBUG |
| 851 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | 849 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
| (...skipping 2316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3168 } | 3166 } |
| 3169 | 3167 |
| 3170 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3168 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
| 3171 intptr_t live_bytes) { | 3169 intptr_t live_bytes) { |
| 3172 if (!FLAG_parallel_compaction) return 1; | 3170 if (!FLAG_parallel_compaction) return 1; |
| 3173 // Compute the number of needed tasks based on a target compaction time, the | 3171 // Compute the number of needed tasks based on a target compaction time, the |
| 3174 // profiled compaction speed and marked live memory. | 3172 // profiled compaction speed and marked live memory. |
| 3175 // | 3173 // |
| 3176 // The number of parallel compaction tasks is limited by: | 3174 // The number of parallel compaction tasks is limited by: |
| 3177 // - #evacuation pages | 3175 // - #evacuation pages |
| 3178 // - #cores | 3176 // - (#cores - 1) |
| 3179 const double kTargetCompactionTimeInMs = .5; | 3177 const double kTargetCompactionTimeInMs = .5; |
| 3178 const int kNumSweepingTasks = 3; |
| 3180 | 3179 |
| 3181 double compaction_speed = | 3180 double compaction_speed = |
| 3182 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3181 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3183 | 3182 |
| 3184 const int available_cores = Max( | 3183 const int available_cores = Max( |
| 3185 1, static_cast<int>( | 3184 1, static_cast<int>( |
| 3186 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | 3185 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) - |
| 3186 kNumSweepingTasks - 1); |
| 3187 int tasks; | 3187 int tasks; |
| 3188 if (compaction_speed > 0) { | 3188 if (compaction_speed > 0) { |
| 3189 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / | 3189 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / |
| 3190 kTargetCompactionTimeInMs); | 3190 kTargetCompactionTimeInMs); |
| 3191 } else { | 3191 } else { |
| 3192 tasks = pages; | 3192 tasks = pages; |
| 3193 } | 3193 } |
| 3194 const int tasks_capped_pages = Min(pages, tasks); | 3194 const int tasks_capped_pages = Min(pages, tasks); |
| 3195 return Min(available_cores, tasks_capped_pages); | 3195 return Min(available_cores, tasks_capped_pages); |
| 3196 } | 3196 } |
| (...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3536 // still contain stale pointers. We only free the chunks after pointer updates | 3536 // still contain stale pointers. We only free the chunks after pointer updates |
| 3537 // to still have access to page headers. | 3537 // to still have access to page headers. |
| 3538 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3538 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3539 | 3539 |
| 3540 { | 3540 { |
| 3541 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3541 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| 3542 | 3542 |
| 3543 for (Page* p : newspace_evacuation_candidates_) { | 3543 for (Page* p : newspace_evacuation_candidates_) { |
| 3544 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 3544 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3545 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); | 3545 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); |
| 3546 sweeper().AddPage(p->owner()->identity(), p); | 3546 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3547 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | 3547 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { |
| 3548 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | 3548 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 3549 p->ForAllFreeListCategories( | 3549 p->ForAllFreeListCategories( |
| 3550 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | 3550 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); |
| 3551 sweeper().AddPage(p->owner()->identity(), p); | 3551 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3552 } | 3552 } |
| 3553 } | 3553 } |
| 3554 newspace_evacuation_candidates_.Rewind(0); | 3554 newspace_evacuation_candidates_.Rewind(0); |
| 3555 | 3555 |
| 3556 for (Page* p : evacuation_candidates_) { | 3556 for (Page* p : evacuation_candidates_) { |
| 3557 // Important: skip list should be cleared only after roots were updated | 3557 // Important: skip list should be cleared only after roots were updated |
| 3558 // because root iteration traverses the stack and might have to find | 3558 // because root iteration traverses the stack and might have to find |
| 3559 // code objects from non-updated pc pointing into evacuation candidate. | 3559 // code objects from non-updated pc pointing into evacuation candidate. |
| 3560 SkipList* list = p->skip_list(); | 3560 SkipList* list = p->skip_list(); |
| 3561 if (list != NULL) list->Clear(); | 3561 if (list != NULL) list->Clear(); |
| 3562 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3562 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3563 sweeper().AddPage(p->owner()->identity(), p); | 3563 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3564 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3564 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| 3565 } | 3565 } |
| 3566 } | 3566 } |
| 3567 | 3567 |
| 3568 // Deallocate evacuated candidate pages. | 3568 // Deallocate evacuated candidate pages. |
| 3569 ReleaseEvacuationCandidates(); | 3569 ReleaseEvacuationCandidates(); |
| 3570 } | 3570 } |
| 3571 | 3571 |
| 3572 #ifdef VERIFY_HEAP | 3572 #ifdef VERIFY_HEAP |
| 3573 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { | 3573 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
| (...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3855 base::LockGuard<base::Mutex> guard(&mutex_); | 3855 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3856 swept_list_[identity].Add(page); | 3856 swept_list_[identity].Add(page); |
| 3857 } | 3857 } |
| 3858 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3858 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3859 page->mutex()->Unlock(); | 3859 page->mutex()->Unlock(); |
| 3860 } | 3860 } |
| 3861 return max_freed; | 3861 return max_freed; |
| 3862 } | 3862 } |
| 3863 | 3863 |
| 3864 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3864 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
| 3865 DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning()); | 3865 DCHECK(!sweeping_in_progress_); |
| 3866 PrepareToBeSweptPage(space, page); | 3866 PrepareToBeSweptPage(space, page); |
| 3867 sweeping_list_[space].push_back(page); | 3867 sweeping_list_[space].push_back(page); |
| 3868 } | 3868 } |
| 3869 | 3869 |
| 3870 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
| 3871 Page* page) { |
| 3872 DCHECK(sweeping_in_progress_); |
| 3873 PrepareToBeSweptPage(space, page); |
| 3874 late_pages_ = true; |
| 3875 AddSweepingPageSafe(space, page); |
| 3876 } |
| 3877 |
| 3870 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3878 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
| 3871 Page* page) { | 3879 Page* page) { |
| 3872 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3880 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3873 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes())); | 3881 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes())); |
| 3874 size_t to_sweep = page->area_size() - page->LiveBytes(); | 3882 size_t to_sweep = page->area_size() - page->LiveBytes(); |
| 3875 if (space != NEW_SPACE) | 3883 if (space != NEW_SPACE) |
| 3876 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | 3884 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
| 3877 } | 3885 } |
| 3878 | 3886 |
| 3879 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3887 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3940 sweeper().AddPage(space->identity(), p); | 3948 sweeper().AddPage(space->identity(), p); |
| 3941 will_be_swept++; | 3949 will_be_swept++; |
| 3942 } | 3950 } |
| 3943 | 3951 |
| 3944 if (FLAG_gc_verbose) { | 3952 if (FLAG_gc_verbose) { |
| 3945 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", | 3953 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", |
| 3946 AllocationSpaceName(space->identity()), will_be_swept); | 3954 AllocationSpaceName(space->identity()), will_be_swept); |
| 3947 } | 3955 } |
| 3948 } | 3956 } |
| 3949 | 3957 |
| 3950 void MarkCompactCollector::StartSweepSpaces() { | 3958 |
| 3959 void MarkCompactCollector::SweepSpaces() { |
| 3951 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 3960 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 3952 #ifdef DEBUG | 3961 #ifdef DEBUG |
| 3953 state_ = SWEEP_SPACES; | 3962 state_ = SWEEP_SPACES; |
| 3954 #endif | 3963 #endif |
| 3955 | 3964 |
| 3956 { | 3965 { |
| 3957 { | 3966 { |
| 3958 GCTracer::Scope sweep_scope(heap()->tracer(), | 3967 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 3959 GCTracer::Scope::MC_SWEEP_OLD); | 3968 GCTracer::Scope::MC_SWEEP_OLD); |
| 3960 StartSweepSpace(heap()->old_space()); | 3969 StartSweepSpace(heap()->old_space()); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4010 // The target is always in old space, we don't have to record the slot in | 4019 // The target is always in old space, we don't have to record the slot in |
| 4011 // the old-to-new remembered set. | 4020 // the old-to-new remembered set. |
| 4012 DCHECK(!heap()->InNewSpace(target)); | 4021 DCHECK(!heap()->InNewSpace(target)); |
| 4013 RecordRelocSlot(host, &rinfo, target); | 4022 RecordRelocSlot(host, &rinfo, target); |
| 4014 } | 4023 } |
| 4015 } | 4024 } |
| 4016 } | 4025 } |
| 4017 | 4026 |
| 4018 } // namespace internal | 4027 } // namespace internal |
| 4019 } // namespace v8 | 4028 } // namespace v8 |
| OLD | NEW |