OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_MARK_COMPACT_H_ | 5 #ifndef V8_HEAP_MARK_COMPACT_H_ |
6 #define V8_HEAP_MARK_COMPACT_H_ | 6 #define V8_HEAP_MARK_COMPACT_H_ |
7 | 7 |
8 #include <deque> | 8 #include <deque> |
9 | 9 |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
292 typedef std::deque<Page*> SweepingList; | 292 typedef std::deque<Page*> SweepingList; |
293 typedef List<Page*> SweptList; | 293 typedef List<Page*> SweptList; |
294 | 294 |
295 static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode, | 295 static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode, |
296 FreeSpaceTreatmentMode free_space_mode); | 296 FreeSpaceTreatmentMode free_space_mode); |
297 | 297 |
298 explicit Sweeper(Heap* heap) | 298 explicit Sweeper(Heap* heap) |
299 : heap_(heap), | 299 : heap_(heap), |
300 pending_sweeper_tasks_semaphore_(0), | 300 pending_sweeper_tasks_semaphore_(0), |
301 sweeping_in_progress_(false), | 301 sweeping_in_progress_(false), |
302 late_pages_(false), | |
303 num_sweeping_tasks_(0) {} | 302 num_sweeping_tasks_(0) {} |
304 | 303 |
305 bool sweeping_in_progress() { return sweeping_in_progress_; } | 304 bool sweeping_in_progress() { return sweeping_in_progress_; } |
306 bool contains_late_pages() { return late_pages_; } | |
307 | 305 |
308 void AddPage(AllocationSpace space, Page* page); | 306 void AddPage(AllocationSpace space, Page* page); |
309 void AddLatePage(AllocationSpace space, Page* page); | |
310 | 307 |
311 int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, | 308 int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, |
312 int max_pages = 0); | 309 int max_pages = 0); |
313 int ParallelSweepPage(Page* page, AllocationSpace identity); | 310 int ParallelSweepPage(Page* page, AllocationSpace identity); |
314 | 311 |
| 312 // After calling this function sweeping is considered to be in progress |
| 313 // and the main thread can sweep lazily, but the background sweeper tasks |
| 314 // are not running yet. |
315 void StartSweeping(); | 315 void StartSweeping(); |
316 void StartSweepingHelper(AllocationSpace space_to_start); | 316 void StartSweeperTasks(); |
317 void EnsureCompleted(); | 317 void EnsureCompleted(); |
318 void EnsureNewSpaceCompleted(); | 318 void EnsureNewSpaceCompleted(); |
319 bool IsSweepingCompleted(); | 319 bool AreSweeperTasksRunning(); |
| 320 bool IsSweepingCompleted(AllocationSpace space); |
320 void SweepOrWaitUntilSweepingCompleted(Page* page); | 321 void SweepOrWaitUntilSweepingCompleted(Page* page); |
321 | 322 |
322 void AddSweptPageSafe(PagedSpace* space, Page* page); | 323 void AddSweptPageSafe(PagedSpace* space, Page* page); |
323 Page* GetSweptPageSafe(PagedSpace* space); | 324 Page* GetSweptPageSafe(PagedSpace* space); |
324 | 325 |
325 private: | 326 private: |
326 static const int kAllocationSpaces = LAST_PAGED_SPACE + 1; | 327 static const int kAllocationSpaces = LAST_PAGED_SPACE + 1; |
327 | 328 |
328 template <typename Callback> | 329 template <typename Callback> |
329 void ForAllSweepingSpaces(Callback callback) { | 330 void ForAllSweepingSpaces(Callback callback) { |
330 for (int i = 0; i < kAllocationSpaces; i++) { | 331 for (int i = 0; i < kAllocationSpaces; i++) { |
331 callback(static_cast<AllocationSpace>(i)); | 332 callback(static_cast<AllocationSpace>(i)); |
332 } | 333 } |
333 } | 334 } |
334 | 335 |
335 Page* GetSweepingPageSafe(AllocationSpace space); | 336 Page* GetSweepingPageSafe(AllocationSpace space); |
336 void AddSweepingPageSafe(AllocationSpace space, Page* page); | 337 void AddSweepingPageSafe(AllocationSpace space, Page* page); |
337 | 338 |
338 void PrepareToBeSweptPage(AllocationSpace space, Page* page); | 339 void PrepareToBeSweptPage(AllocationSpace space, Page* page); |
339 | 340 |
340 Heap* heap_; | 341 Heap* heap_; |
341 base::Semaphore pending_sweeper_tasks_semaphore_; | 342 base::Semaphore pending_sweeper_tasks_semaphore_; |
342 base::Mutex mutex_; | 343 base::Mutex mutex_; |
343 SweptList swept_list_[kAllocationSpaces]; | 344 SweptList swept_list_[kAllocationSpaces]; |
344 SweepingList sweeping_list_[kAllocationSpaces]; | 345 SweepingList sweeping_list_[kAllocationSpaces]; |
345 bool sweeping_in_progress_; | 346 bool sweeping_in_progress_; |
346 bool late_pages_; | |
347 base::AtomicNumber<intptr_t> num_sweeping_tasks_; | 347 base::AtomicNumber<intptr_t> num_sweeping_tasks_; |
348 }; | 348 }; |
349 | 349 |
350 enum IterationMode { | 350 enum IterationMode { |
351 kKeepMarking, | 351 kKeepMarking, |
352 kClearMarkbits, | 352 kClearMarkbits, |
353 }; | 353 }; |
354 | 354 |
355 static void Initialize(); | 355 static void Initialize(); |
356 | 356 |
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
636 // We have to remove all encountered weak maps from the list of weak | 636 // We have to remove all encountered weak maps from the list of weak |
637 // collections when incremental marking is aborted. | 637 // collections when incremental marking is aborted. |
638 void AbortWeakCollections(); | 638 void AbortWeakCollections(); |
639 | 639 |
640 void ClearWeakCells(Object** non_live_map_list, | 640 void ClearWeakCells(Object** non_live_map_list, |
641 DependentCode** dependent_code_list); | 641 DependentCode** dependent_code_list); |
642 void AbortWeakCells(); | 642 void AbortWeakCells(); |
643 | 643 |
644 void AbortTransitionArrays(); | 644 void AbortTransitionArrays(); |
645 | 645 |
646 // ----------------------------------------------------------------------- | 646 // Starts sweeping of spaces by contributing on the main thread and setting |
647 // Phase 2: Sweeping to clear mark bits and free non-live objects for | 647 // up other pages for sweeping. Does not start sweeper tasks. |
648 // a non-compacting collection. | 648 void StartSweepSpaces(); |
649 // | 649 void StartSweepSpace(PagedSpace* space); |
650 // Before: Live objects are marked and non-live objects are unmarked. | |
651 // | |
652 // After: Live objects are unmarked, non-live regions have been added to | |
653 // their space's free list. Active eden semispace is compacted by | |
654 // evacuation. | |
655 // | |
656 | |
657 // If we are not compacting the heap, we simply sweep the spaces except | |
658 // for the large object space, clearing mark bits and adding unmarked | |
659 // regions to each space's free list. | |
660 void SweepSpaces(); | |
661 | 650 |
662 void EvacuateNewSpacePrologue(); | 651 void EvacuateNewSpacePrologue(); |
663 | 652 |
664 void EvacuatePagesInParallel(); | 653 void EvacuatePagesInParallel(); |
665 | 654 |
666 // The number of parallel compaction tasks, including the main thread. | 655 // The number of parallel compaction tasks, including the main thread. |
667 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); | 656 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); |
668 | 657 |
669 void EvacuateNewSpaceAndCandidates(); | 658 void EvacuateNewSpaceAndCandidates(); |
670 | 659 |
671 void UpdatePointersAfterEvacuation(); | 660 void UpdatePointersAfterEvacuation(); |
672 | 661 |
673 // Iterates through all live objects on a page using marking information. | 662 // Iterates through all live objects on a page using marking information. |
674 // Returns whether all objects have successfully been visited. | 663 // Returns whether all objects have successfully been visited. |
675 template <class Visitor> | 664 template <class Visitor> |
676 bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor, | 665 bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor, |
677 IterationMode mode); | 666 IterationMode mode); |
678 | 667 |
679 void RecomputeLiveBytes(MemoryChunk* page); | 668 void RecomputeLiveBytes(MemoryChunk* page); |
680 | 669 |
681 void ReleaseEvacuationCandidates(); | 670 void ReleaseEvacuationCandidates(); |
682 | 671 |
683 // Starts sweeping of a space by contributing on the main thread and setting | |
684 // up other pages for sweeping. | |
685 void StartSweepSpace(PagedSpace* space); | |
686 | 672 |
687 #ifdef DEBUG | 673 #ifdef DEBUG |
688 friend class MarkObjectVisitor; | 674 friend class MarkObjectVisitor; |
689 static void VisitObject(HeapObject* obj); | 675 static void VisitObject(HeapObject* obj); |
690 | 676 |
691 friend class UnmarkObjectVisitor; | 677 friend class UnmarkObjectVisitor; |
692 static void UnmarkObject(HeapObject* obj); | 678 static void UnmarkObject(HeapObject* obj); |
693 #endif | 679 #endif |
694 | 680 |
695 Heap* heap_; | 681 Heap* heap_; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
752 | 738 |
753 private: | 739 private: |
754 MarkCompactCollector* collector_; | 740 MarkCompactCollector* collector_; |
755 }; | 741 }; |
756 | 742 |
757 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space); | 743 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space); |
758 } // namespace internal | 744 } // namespace internal |
759 } // namespace v8 | 745 } // namespace v8 |
760 | 746 |
761 #endif // V8_HEAP_MARK_COMPACT_H_ | 747 #endif // V8_HEAP_MARK_COMPACT_H_ |
OLD | NEW |