OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_MARK_COMPACT_H_ | 5 #ifndef V8_HEAP_MARK_COMPACT_H_ |
6 #define V8_HEAP_MARK_COMPACT_H_ | 6 #define V8_HEAP_MARK_COMPACT_H_ |
7 | 7 |
8 #include <deque> | 8 #include <deque> |
9 | 9 |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
292 typedef std::deque<Page*> SweepingList; | 292 typedef std::deque<Page*> SweepingList; |
293 typedef List<Page*> SweptList; | 293 typedef List<Page*> SweptList; |
294 | 294 |
295 static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode, | 295 static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode, |
296 FreeSpaceTreatmentMode free_space_mode); | 296 FreeSpaceTreatmentMode free_space_mode); |
297 | 297 |
298 explicit Sweeper(Heap* heap) | 298 explicit Sweeper(Heap* heap) |
299 : heap_(heap), | 299 : heap_(heap), |
300 pending_sweeper_tasks_semaphore_(0), | 300 pending_sweeper_tasks_semaphore_(0), |
301 sweeping_in_progress_(false), | 301 sweeping_in_progress_(false), |
| 302 late_pages_(false), |
302 num_sweeping_tasks_(0) {} | 303 num_sweeping_tasks_(0) {} |
303 | 304 |
304 bool sweeping_in_progress() { return sweeping_in_progress_; } | 305 bool sweeping_in_progress() { return sweeping_in_progress_; } |
| 306 bool contains_late_pages() { return late_pages_; } |
305 | 307 |
306 void AddPage(AllocationSpace space, Page* page); | 308 void AddPage(AllocationSpace space, Page* page); |
| 309 void AddLatePage(AllocationSpace space, Page* page); |
307 | 310 |
308 int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, | 311 int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, |
309 int max_pages = 0); | 312 int max_pages = 0); |
310 int ParallelSweepPage(Page* page, AllocationSpace identity); | 313 int ParallelSweepPage(Page* page, AllocationSpace identity); |
311 | 314 |
312 // After calling this function sweeping is considered to be in progress | |
313 // and the main thread can sweep lazily, but the background sweeper tasks | |
314 // are not running yet. | |
315 void StartSweeping(); | 315 void StartSweeping(); |
316 void StartSweeperTasks(); | 316 void StartSweepingHelper(AllocationSpace space_to_start); |
317 void EnsureCompleted(); | 317 void EnsureCompleted(); |
318 void EnsureNewSpaceCompleted(); | 318 void EnsureNewSpaceCompleted(); |
319 bool AreSweeperTasksRunning(); | 319 bool IsSweepingCompleted(); |
320 bool IsSweepingCompleted(AllocationSpace space); | |
321 void SweepOrWaitUntilSweepingCompleted(Page* page); | 320 void SweepOrWaitUntilSweepingCompleted(Page* page); |
322 | 321 |
323 void AddSweptPageSafe(PagedSpace* space, Page* page); | 322 void AddSweptPageSafe(PagedSpace* space, Page* page); |
324 Page* GetSweptPageSafe(PagedSpace* space); | 323 Page* GetSweptPageSafe(PagedSpace* space); |
325 | 324 |
326 private: | 325 private: |
327 static const int kAllocationSpaces = LAST_PAGED_SPACE + 1; | 326 static const int kAllocationSpaces = LAST_PAGED_SPACE + 1; |
328 | 327 |
329 template <typename Callback> | 328 template <typename Callback> |
330 void ForAllSweepingSpaces(Callback callback) { | 329 void ForAllSweepingSpaces(Callback callback) { |
331 for (int i = 0; i < kAllocationSpaces; i++) { | 330 for (int i = 0; i < kAllocationSpaces; i++) { |
332 callback(static_cast<AllocationSpace>(i)); | 331 callback(static_cast<AllocationSpace>(i)); |
333 } | 332 } |
334 } | 333 } |
335 | 334 |
336 Page* GetSweepingPageSafe(AllocationSpace space); | 335 Page* GetSweepingPageSafe(AllocationSpace space); |
337 void AddSweepingPageSafe(AllocationSpace space, Page* page); | 336 void AddSweepingPageSafe(AllocationSpace space, Page* page); |
338 | 337 |
339 void PrepareToBeSweptPage(AllocationSpace space, Page* page); | 338 void PrepareToBeSweptPage(AllocationSpace space, Page* page); |
340 | 339 |
341 Heap* heap_; | 340 Heap* heap_; |
342 base::Semaphore pending_sweeper_tasks_semaphore_; | 341 base::Semaphore pending_sweeper_tasks_semaphore_; |
343 base::Mutex mutex_; | 342 base::Mutex mutex_; |
344 SweptList swept_list_[kAllocationSpaces]; | 343 SweptList swept_list_[kAllocationSpaces]; |
345 SweepingList sweeping_list_[kAllocationSpaces]; | 344 SweepingList sweeping_list_[kAllocationSpaces]; |
346 bool sweeping_in_progress_; | 345 bool sweeping_in_progress_; |
| 346 bool late_pages_; |
347 base::AtomicNumber<intptr_t> num_sweeping_tasks_; | 347 base::AtomicNumber<intptr_t> num_sweeping_tasks_; |
348 }; | 348 }; |
349 | 349 |
350 enum IterationMode { | 350 enum IterationMode { |
351 kKeepMarking, | 351 kKeepMarking, |
352 kClearMarkbits, | 352 kClearMarkbits, |
353 }; | 353 }; |
354 | 354 |
355 static void Initialize(); | 355 static void Initialize(); |
356 | 356 |
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
636 // We have to remove all encountered weak maps from the list of weak | 636 // We have to remove all encountered weak maps from the list of weak |
637 // collections when incremental marking is aborted. | 637 // collections when incremental marking is aborted. |
638 void AbortWeakCollections(); | 638 void AbortWeakCollections(); |
639 | 639 |
640 void ClearWeakCells(Object** non_live_map_list, | 640 void ClearWeakCells(Object** non_live_map_list, |
641 DependentCode** dependent_code_list); | 641 DependentCode** dependent_code_list); |
642 void AbortWeakCells(); | 642 void AbortWeakCells(); |
643 | 643 |
644 void AbortTransitionArrays(); | 644 void AbortTransitionArrays(); |
645 | 645 |
646 // Starts sweeping of spaces by contributing on the main thread and setting | 646 // ----------------------------------------------------------------------- |
647 // up other pages for sweeping. Does not start sweeper tasks. | 647 // Phase 2: Sweeping to clear mark bits and free non-live objects for |
648 void StartSweepSpaces(); | 648 // a non-compacting collection. |
649 void StartSweepSpace(PagedSpace* space); | 649 // |
| 650 // Before: Live objects are marked and non-live objects are unmarked. |
| 651 // |
| 652 // After: Live objects are unmarked, non-live regions have been added to |
| 653 // their space's free list. Active eden semispace is compacted by |
| 654 // evacuation. |
| 655 // |
| 656 |
| 657 // If we are not compacting the heap, we simply sweep the spaces except |
| 658 // for the large object space, clearing mark bits and adding unmarked |
| 659 // regions to each space's free list. |
| 660 void SweepSpaces(); |
650 | 661 |
651 void EvacuateNewSpacePrologue(); | 662 void EvacuateNewSpacePrologue(); |
652 | 663 |
653 void EvacuatePagesInParallel(); | 664 void EvacuatePagesInParallel(); |
654 | 665 |
655 // The number of parallel compaction tasks, including the main thread. | 666 // The number of parallel compaction tasks, including the main thread. |
656 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); | 667 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); |
657 | 668 |
658 void EvacuateNewSpaceAndCandidates(); | 669 void EvacuateNewSpaceAndCandidates(); |
659 | 670 |
660 void UpdatePointersAfterEvacuation(); | 671 void UpdatePointersAfterEvacuation(); |
661 | 672 |
662 // Iterates through all live objects on a page using marking information. | 673 // Iterates through all live objects on a page using marking information. |
663 // Returns whether all objects have successfully been visited. | 674 // Returns whether all objects have successfully been visited. |
664 template <class Visitor> | 675 template <class Visitor> |
665 bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor, | 676 bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor, |
666 IterationMode mode); | 677 IterationMode mode); |
667 | 678 |
668 void RecomputeLiveBytes(MemoryChunk* page); | 679 void RecomputeLiveBytes(MemoryChunk* page); |
669 | 680 |
670 void ReleaseEvacuationCandidates(); | 681 void ReleaseEvacuationCandidates(); |
671 | 682 |
| 683 // Starts sweeping of a space by contributing on the main thread and setting |
| 684 // up other pages for sweeping. |
| 685 void StartSweepSpace(PagedSpace* space); |
672 | 686 |
673 #ifdef DEBUG | 687 #ifdef DEBUG |
674 friend class MarkObjectVisitor; | 688 friend class MarkObjectVisitor; |
675 static void VisitObject(HeapObject* obj); | 689 static void VisitObject(HeapObject* obj); |
676 | 690 |
677 friend class UnmarkObjectVisitor; | 691 friend class UnmarkObjectVisitor; |
678 static void UnmarkObject(HeapObject* obj); | 692 static void UnmarkObject(HeapObject* obj); |
679 #endif | 693 #endif |
680 | 694 |
681 Heap* heap_; | 695 Heap* heap_; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
738 | 752 |
739 private: | 753 private: |
740 MarkCompactCollector* collector_; | 754 MarkCompactCollector* collector_; |
741 }; | 755 }; |
742 | 756 |
743 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space); | 757 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space); |
744 } // namespace internal | 758 } // namespace internal |
745 } // namespace v8 | 759 } // namespace v8 |
746 | 760 |
747 #endif // V8_HEAP_MARK_COMPACT_H_ | 761 #endif // V8_HEAP_MARK_COMPACT_H_ |
OLD | NEW |