Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 373 return compacting_; | 373 return compacting_; |
| 374 } | 374 } |
| 375 | 375 |
| 376 | 376 |
| 377 void MarkCompactCollector::CollectGarbage() { | 377 void MarkCompactCollector::CollectGarbage() { |
| 378 // Make sure that Prepare() has been called. The individual steps below will | 378 // Make sure that Prepare() has been called. The individual steps below will |
| 379 // update the state as they proceed. | 379 // update the state as they proceed. |
| 380 ASSERT(state_ == PREPARE_GC); | 380 ASSERT(state_ == PREPARE_GC); |
| 381 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); | 381 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
| 382 | 382 |
| 383 if (heap_->IsConcurrentSweepingActivated() && | |
| 384 heap_->IsConcurrentSweepingPending()) { | |
| 385 heap_->WaitUntilParallelSweepingCompleted(); | |
|
Michael Starzinger
2013/01/24 17:28:57
This should be moved into MarkCompact::Prepare, be
Hannes Payer (out of office)
2013/01/25 10:46:49
Done.
| |
| 386 } | |
| 387 | |
| 383 MarkLiveObjects(); | 388 MarkLiveObjects(); |
| 384 ASSERT(heap_->incremental_marking()->IsStopped()); | 389 ASSERT(heap_->incremental_marking()->IsStopped()); |
| 385 | 390 |
| 386 if (FLAG_collect_maps) ClearNonLiveTransitions(); | 391 if (FLAG_collect_maps) ClearNonLiveTransitions(); |
| 387 | 392 |
| 388 ClearWeakMaps(); | 393 ClearWeakMaps(); |
| 389 | 394 |
| 390 #ifdef VERIFY_HEAP | 395 #ifdef VERIFY_HEAP |
| 391 if (FLAG_verify_heap) { | 396 if (FLAG_verify_heap) { |
| 392 VerifyMarking(heap_); | 397 VerifyMarking(heap_); |
| (...skipping 2328 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2721 | 2726 |
| 2722 | 2727 |
| 2723 // Sweep a space precisely. After this has been done the space can | 2728 // Sweep a space precisely. After this has been done the space can |
| 2724 // be iterated precisely, hitting only the live objects. Code space | 2729 // be iterated precisely, hitting only the live objects. Code space |
| 2725 // is always swept precisely because we want to be able to iterate | 2730 // is always swept precisely because we want to be able to iterate |
| 2726 // over it. Map space is swept precisely, because it is not compacted. | 2731 // over it. Map space is swept precisely, because it is not compacted. |
| 2727 // Slots in live objects pointing into evacuation candidates are updated | 2732 // Slots in live objects pointing into evacuation candidates are updated |
| 2728 // if requested. | 2733 // if requested. |
| 2729 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode> | 2734 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode> |
| 2730 static void SweepPrecisely(PagedSpace* space, | 2735 static void SweepPrecisely(PagedSpace* space, |
| 2736 FreeList* free_list, | |
| 2731 Page* p, | 2737 Page* p, |
| 2732 ObjectVisitor* v) { | 2738 ObjectVisitor* v) { |
| 2733 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); | 2739 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 2734 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 2740 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 2735 space->identity() == CODE_SPACE); | 2741 space->identity() == CODE_SPACE); |
| 2736 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 2742 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 2737 | 2743 |
| 2738 MarkBit::CellType* cells = p->markbits()->cells(); | 2744 MarkBit::CellType* cells = p->markbits()->cells(); |
| 2739 p->MarkSweptPrecisely(); | 2745 p->MarkSweptPrecisely(); |
| 2740 | 2746 |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 2764 cell_index++, object_address += 32 * kPointerSize) { | 2770 cell_index++, object_address += 32 * kPointerSize) { |
| 2765 ASSERT((unsigned)cell_index == | 2771 ASSERT((unsigned)cell_index == |
| 2766 Bitmap::IndexToCell( | 2772 Bitmap::IndexToCell( |
| 2767 Bitmap::CellAlignIndex( | 2773 Bitmap::CellAlignIndex( |
| 2768 p->AddressToMarkbitIndex(object_address)))); | 2774 p->AddressToMarkbitIndex(object_address)))); |
| 2769 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); | 2775 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 2770 int live_index = 0; | 2776 int live_index = 0; |
| 2771 for ( ; live_objects != 0; live_objects--) { | 2777 for ( ; live_objects != 0; live_objects--) { |
| 2772 Address free_end = object_address + offsets[live_index++] * kPointerSize; | 2778 Address free_end = object_address + offsets[live_index++] * kPointerSize; |
| 2773 if (free_end != free_start) { | 2779 if (free_end != free_start) { |
| 2774 space->Free(free_start, static_cast<int>(free_end - free_start)); | 2780 MarkCompactCollector::Free(space, free_list, free_start, |
| 2781 static_cast<int>(free_end - free_start)); | |
| 2775 } | 2782 } |
| 2776 HeapObject* live_object = HeapObject::FromAddress(free_end); | 2783 HeapObject* live_object = HeapObject::FromAddress(free_end); |
| 2777 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); | 2784 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
| 2778 Map* map = live_object->map(); | 2785 Map* map = live_object->map(); |
| 2779 int size = live_object->SizeFromMap(map); | 2786 int size = live_object->SizeFromMap(map); |
| 2780 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 2787 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
| 2781 live_object->IterateBody(map->instance_type(), size, v); | 2788 live_object->IterateBody(map->instance_type(), size, v); |
| 2782 } | 2789 } |
| 2783 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | 2790 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
| 2784 int new_region_start = | 2791 int new_region_start = |
| 2785 SkipList::RegionNumber(free_end); | 2792 SkipList::RegionNumber(free_end); |
| 2786 int new_region_end = | 2793 int new_region_end = |
| 2787 SkipList::RegionNumber(free_end + size - kPointerSize); | 2794 SkipList::RegionNumber(free_end + size - kPointerSize); |
| 2788 if (new_region_start != curr_region || | 2795 if (new_region_start != curr_region || |
| 2789 new_region_end != curr_region) { | 2796 new_region_end != curr_region) { |
| 2790 skip_list->AddObject(free_end, size); | 2797 skip_list->AddObject(free_end, size); |
| 2791 curr_region = new_region_end; | 2798 curr_region = new_region_end; |
| 2792 } | 2799 } |
| 2793 } | 2800 } |
| 2794 free_start = free_end + size; | 2801 free_start = free_end + size; |
| 2795 } | 2802 } |
| 2796 // Clear marking bits for current cell. | 2803 // Clear marking bits for current cell. |
| 2797 cells[cell_index] = 0; | 2804 cells[cell_index] = 0; |
| 2798 } | 2805 } |
| 2799 if (free_start != p->area_end()) { | 2806 if (free_start != p->area_end()) { |
| 2800 space->Free(free_start, static_cast<int>(p->area_end() - free_start)); | 2807 MarkCompactCollector::Free(space, free_list, free_start, |
| 2808 static_cast<int>(p->area_end() - free_start)); | |
| 2801 } | 2809 } |
| 2802 p->ResetLiveBytes(); | 2810 p->ResetLiveBytes(); |
| 2803 } | 2811 } |
| 2804 | 2812 |
| 2805 | 2813 |
| 2806 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | 2814 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { |
| 2807 Page* p = Page::FromAddress(code->address()); | 2815 Page* p = Page::FromAddress(code->address()); |
| 2808 | 2816 |
| 2809 if (p->IsEvacuationCandidate() || | 2817 if (p->IsEvacuationCandidate() || |
| 2810 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | 2818 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3020 } else { | 3028 } else { |
| 3021 if (FLAG_gc_verbose) { | 3029 if (FLAG_gc_verbose) { |
| 3022 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3030 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3023 reinterpret_cast<intptr_t>(p)); | 3031 reinterpret_cast<intptr_t>(p)); |
| 3024 } | 3032 } |
| 3025 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3033 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3026 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3034 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3027 | 3035 |
| 3028 switch (space->identity()) { | 3036 switch (space->identity()) { |
| 3029 case OLD_DATA_SPACE: | 3037 case OLD_DATA_SPACE: |
| 3030 SweepConservatively(space, p); | 3038 SweepConservatively(space, space->free_list(), p); |
| 3031 break; | 3039 break; |
| 3032 case OLD_POINTER_SPACE: | 3040 case OLD_POINTER_SPACE: |
| 3033 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( | 3041 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( |
| 3034 space, p, &updating_visitor); | 3042 space, space->free_list(), p, &updating_visitor); |
| 3035 break; | 3043 break; |
| 3036 case CODE_SPACE: | 3044 case CODE_SPACE: |
| 3037 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( | 3045 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( |
| 3038 space, p, &updating_visitor); | 3046 space, space->free_list(), p, &updating_visitor); |
| 3039 break; | 3047 break; |
| 3040 default: | 3048 default: |
| 3041 UNREACHABLE(); | 3049 UNREACHABLE(); |
| 3042 break; | 3050 break; |
| 3043 } | 3051 } |
| 3044 } | 3052 } |
| 3045 } | 3053 } |
| 3046 } | 3054 } |
| 3047 | 3055 |
| 3048 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS); | 3056 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS); |
| (...skipping 327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3376 } | 3384 } |
| 3377 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; | 3385 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
| 3378 ASSERT((first_set_bit & cell) == first_set_bit); | 3386 ASSERT((first_set_bit & cell) == first_set_bit); |
| 3379 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); | 3387 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
| 3380 ASSERT(live_objects == 1); | 3388 ASSERT(live_objects == 1); |
| 3381 USE(live_objects); | 3389 USE(live_objects); |
| 3382 return block_address + offsets[0] * kPointerSize; | 3390 return block_address + offsets[0] * kPointerSize; |
| 3383 } | 3391 } |
| 3384 | 3392 |
| 3385 | 3393 |
| 3394 intptr_t MarkCompactCollector::Free(PagedSpace* space, | |
| 3395 FreeList* free_list, | |
| 3396 Address start, | |
| 3397 int size) { | |
| 3398 if (space->heap()->AreSweepingThreadsActivated()) { | |
| 3399 intptr_t wasted = free_list->Free(start, size); | |
| 3400 return size - wasted; | |
| 3401 } else { | |
| 3402 return space->Free(start, size); | |
| 3403 } | |
| 3404 } | |
| 3405 | |
| 3406 | |
| 3386 // Sweeps a space conservatively. After this has been done the larger free | 3407 // Sweeps a space conservatively. After this has been done the larger free |
| 3387 // spaces have been put on the free list and the smaller ones have been | 3408 // spaces have been put on the free list and the smaller ones have been |
| 3388 // ignored and left untouched. A free space is always either ignored or put | 3409 // ignored and left untouched. A free space is always either ignored or put |
| 3389 // on the free list, never split up into two parts. This is important | 3410 // on the free list, never split up into two parts. This is important |
| 3390 // because it means that any FreeSpace maps left actually describe a region of | 3411 // because it means that any FreeSpace maps left actually describe a region of |
| 3391 // memory that can be ignored when scanning. Dead objects other than free | 3412 // memory that can be ignored when scanning. Dead objects other than free |
| 3392 // spaces will not contain the free space map. | 3413 // spaces will not contain the free space map. |
| 3393 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { | 3414 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, |
| 3415 FreeList* free_list, | |
| 3416 Page* p) { | |
| 3394 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); | 3417 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3395 MarkBit::CellType* cells = p->markbits()->cells(); | 3418 MarkBit::CellType* cells = p->markbits()->cells(); |
| 3396 p->MarkSweptConservatively(); | 3419 p->MarkSweptConservatively(); |
| 3397 | 3420 |
| 3398 int last_cell_index = | 3421 int last_cell_index = |
| 3399 Bitmap::IndexToCell( | 3422 Bitmap::IndexToCell( |
| 3400 Bitmap::CellAlignIndex( | 3423 Bitmap::CellAlignIndex( |
| 3401 p->AddressToMarkbitIndex(p->area_end()))); | 3424 p->AddressToMarkbitIndex(p->area_end()))); |
| 3402 | 3425 |
| 3403 int cell_index = | 3426 int cell_index = |
| 3404 Bitmap::IndexToCell( | 3427 Bitmap::IndexToCell( |
| 3405 Bitmap::CellAlignIndex( | 3428 Bitmap::CellAlignIndex( |
| 3406 p->AddressToMarkbitIndex(p->area_start()))); | 3429 p->AddressToMarkbitIndex(p->area_start()))); |
| 3407 | 3430 |
| 3408 intptr_t freed_bytes = 0; | 3431 intptr_t freed_bytes = 0; |
| 3409 | 3432 |
| 3410 // This is the start of the 32 word block that we are currently looking at. | 3433 // This is the start of the 32 word block that we are currently looking at. |
| 3411 Address block_address = p->area_start(); | 3434 Address block_address = p->area_start(); |
| 3412 | 3435 |
| 3413 // Skip over all the dead objects at the start of the page and mark them free. | 3436 // Skip over all the dead objects at the start of the page and mark them free. |
| 3414 for (; | 3437 for (; |
| 3415 cell_index < last_cell_index; | 3438 cell_index < last_cell_index; |
| 3416 cell_index++, block_address += 32 * kPointerSize) { | 3439 cell_index++, block_address += 32 * kPointerSize) { |
| 3417 if (cells[cell_index] != 0) break; | 3440 if (cells[cell_index] != 0) break; |
| 3418 } | 3441 } |
| 3419 size_t size = block_address - p->area_start(); | 3442 size_t size = block_address - p->area_start(); |
| 3420 if (cell_index == last_cell_index) { | 3443 if (cell_index == last_cell_index) { |
| 3421 freed_bytes += static_cast<int>(space->Free(p->area_start(), | 3444 freed_bytes += Free(space, free_list, p->area_start(), |
| 3422 static_cast<int>(size))); | 3445 static_cast<int>(size)); |
| 3423 ASSERT_EQ(0, p->LiveBytes()); | 3446 ASSERT_EQ(0, p->LiveBytes()); |
| 3424 return freed_bytes; | 3447 return freed_bytes; |
| 3425 } | 3448 } |
| 3426 // Grow the size of the start-of-page free space a little to get up to the | 3449 // Grow the size of the start-of-page free space a little to get up to the |
| 3427 // first live object. | 3450 // first live object. |
| 3428 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); | 3451 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 3429 // Free the first free space. | 3452 // Free the first free space. |
| 3430 size = free_end - p->area_start(); | 3453 size = free_end - p->area_start(); |
| 3431 freed_bytes += space->Free(p->area_start(), | 3454 freed_bytes += Free(space, free_list, p->area_start(), |
| 3432 static_cast<int>(size)); | 3455 static_cast<int>(size)); |
| 3433 // The start of the current free area is represented in undigested form by | 3456 // The start of the current free area is represented in undigested form by |
| 3434 // the address of the last 32-word section that contained a live object and | 3457 // the address of the last 32-word section that contained a live object and |
| 3435 // the marking bitmap for that cell, which describes where the live object | 3458 // the marking bitmap for that cell, which describes where the live object |
| 3436 // started. Unless we find a large free space in the bitmap we will not | 3459 // started. Unless we find a large free space in the bitmap we will not |
| 3437 // digest this pair into a real address. We start the iteration here at the | 3460 // digest this pair into a real address. We start the iteration here at the |
| 3438 // first word in the marking bit map that indicates a live object. | 3461 // first word in the marking bit map that indicates a live object. |
| 3439 Address free_start = block_address; | 3462 Address free_start = block_address; |
| 3440 uint32_t free_start_cell = cells[cell_index]; | 3463 uint32_t free_start_cell = cells[cell_index]; |
| 3441 | 3464 |
| 3442 for ( ; | 3465 for ( ; |
| 3443 cell_index < last_cell_index; | 3466 cell_index < last_cell_index; |
| 3444 cell_index++, block_address += 32 * kPointerSize) { | 3467 cell_index++, block_address += 32 * kPointerSize) { |
| 3445 ASSERT((unsigned)cell_index == | 3468 ASSERT((unsigned)cell_index == |
| 3446 Bitmap::IndexToCell( | 3469 Bitmap::IndexToCell( |
| 3447 Bitmap::CellAlignIndex( | 3470 Bitmap::CellAlignIndex( |
| 3448 p->AddressToMarkbitIndex(block_address)))); | 3471 p->AddressToMarkbitIndex(block_address)))); |
| 3449 uint32_t cell = cells[cell_index]; | 3472 uint32_t cell = cells[cell_index]; |
| 3450 if (cell != 0) { | 3473 if (cell != 0) { |
| 3451 // We have a live object. Check approximately whether it is more than 32 | 3474 // We have a live object. Check approximately whether it is more than 32 |
| 3452 // words since the last live object. | 3475 // words since the last live object. |
| 3453 if (block_address - free_start > 32 * kPointerSize) { | 3476 if (block_address - free_start > 32 * kPointerSize) { |
| 3454 free_start = DigestFreeStart(free_start, free_start_cell); | 3477 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3455 if (block_address - free_start > 32 * kPointerSize) { | 3478 if (block_address - free_start > 32 * kPointerSize) { |
| 3456 // Now that we know the exact start of the free space it still looks | 3479 // Now that we know the exact start of the free space it still looks |
| 3457 // like we have a large enough free space to be worth bothering with. | 3480 // like we have a large enough free space to be worth bothering with. |
| 3458 // so now we need to find the start of the first live object at the | 3481 // so now we need to find the start of the first live object at the |
| 3459 // end of the free space. | 3482 // end of the free space. |
| 3460 free_end = StartOfLiveObject(block_address, cell); | 3483 free_end = StartOfLiveObject(block_address, cell); |
| 3461 freed_bytes += space->Free(free_start, | 3484 freed_bytes += Free(space, free_list, free_start, |
| 3462 static_cast<int>(free_end - free_start)); | 3485 static_cast<int>(free_end - free_start)); |
| 3463 } | 3486 } |
| 3464 } | 3487 } |
| 3465 // Update our undigested record of where the current free area started. | 3488 // Update our undigested record of where the current free area started. |
| 3466 free_start = block_address; | 3489 free_start = block_address; |
| 3467 free_start_cell = cell; | 3490 free_start_cell = cell; |
| 3468 // Clear marking bits for current cell. | 3491 // Clear marking bits for current cell. |
| 3469 cells[cell_index] = 0; | 3492 cells[cell_index] = 0; |
| 3470 } | 3493 } |
| 3471 } | 3494 } |
| 3472 | 3495 |
| 3473 // Handle the free space at the end of the page. | 3496 // Handle the free space at the end of the page. |
| 3474 if (block_address - free_start > 32 * kPointerSize) { | 3497 if (block_address - free_start > 32 * kPointerSize) { |
| 3475 free_start = DigestFreeStart(free_start, free_start_cell); | 3498 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3476 freed_bytes += space->Free(free_start, | 3499 freed_bytes += Free(space, free_list, free_start, |
| 3477 static_cast<int>(block_address - free_start)); | 3500 static_cast<int>(block_address - free_start)); |
| 3478 } | 3501 } |
| 3479 | 3502 |
| 3480 p->ResetLiveBytes(); | 3503 p->ResetLiveBytes(); |
| 3481 return freed_bytes; | 3504 return freed_bytes; |
| 3482 } | 3505 } |
| 3483 | 3506 |
| 3484 | 3507 |
| 3508 void MarkCompactCollector::PrepareParallelSweeping(PagedSpace* space) { | |
| 3509 bool unused_page_present = false; | |
| 3510 | |
| 3511 space->set_was_swept_conservatively(true); | |
| 3512 | |
| 3513 space->ClearStats(); | |
| 3514 | |
| 3515 PageIterator it(space); | |
| 3516 while (it.has_next()) { | |
| 3517 Page* p = it.next(); | |
| 3518 | |
| 3519 // Clear sweeping flags indicating that marking bits are still intact. | |
| 3520 p->ClearSweptPrecisely(); | |
| 3521 p->ClearSweptConservatively(); | |
| 3522 p->set_parallel_sweeping(0); | |
| 3523 | |
| 3524 if (p->IsEvacuationCandidate()) { | |
| 3525 ASSERT(evacuation_candidates_.length() > 0); | |
| 3526 continue; | |
| 3527 } | |
| 3528 | |
| 3529 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
| 3530 // Will be processed in EvacuateNewSpaceAndCandidates. | |
| 3531 continue; | |
| 3532 } | |
| 3533 | |
| 3534 // One unused page is kept, all further are released before sweeping them. | |
| 3535 if (p->LiveBytes() == 0) { | |
| 3536 if (unused_page_present) { | |
| 3537 if (FLAG_gc_verbose) { | |
| 3538 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | |
| 3539 reinterpret_cast<intptr_t>(p)); | |
| 3540 } | |
| 3541 // Adjust unswept free bytes because releasing a page expects said | |
| 3542 // counter to be accurate for unswept pages. | |
| 3543 space->IncreaseUnsweptFreeBytes(p); | |
| 3544 space->ReleasePage(p); | |
| 3545 continue; | |
| 3546 } | |
| 3547 unused_page_present = true; | |
| 3548 } | |
| 3549 } | |
| 3550 } | |
| 3551 | |
| 3552 | |
| 3553 void MarkCompactCollector::SweepInParallel(PagedSpace* space, | |
| 3554 SweeperType sweeper_type, | |
| 3555 FreeList* private_free_list, | |
| 3556 FreeList* free_list) { | |
| 3557 PageIterator it(space); | |
| 3558 while (it.has_next()) { | |
| 3559 Page* p = it.next(); | |
| 3560 | |
| 3561 if (p->IsEvacuationCandidate()) { | |
| 3562 ASSERT(evacuation_candidates_.length() > 0); | |
| 3563 continue; | |
| 3564 } | |
| 3565 | |
| 3566 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
| 3567 // Will be processed in EvacuateNewSpaceAndCandidates. | |
| 3568 continue; | |
| 3569 } | |
| 3570 | |
| 3571 if (p->TryParallelSweeping()) { | |
| 3572 if (sweeper_type == CONSERVATIVE || sweeper_type == LAZY_CONSERVATIVE) { | |
| 3573 SweepConservatively(space, private_free_list, p); | |
| 3574 } else { | |
| 3575 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>( | |
| 3576 space, private_free_list, p, NULL); | |
| 3577 } | |
| 3578 free_list->Concatenate(private_free_list); | |
| 3579 } | |
| 3580 } | |
| 3581 } | |
| 3582 | |
| 3583 | |
| 3485 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | 3584 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 3486 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || | 3585 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3487 sweeper == LAZY_CONSERVATIVE); | 3586 sweeper == LAZY_CONSERVATIVE); |
| 3488 | 3587 ASSERT(!(space->identity() == OLD_DATA_SPACE && |
| 3588 FLAG_parallel_sweeping && | |
| 3589 FLAG_concurrent_sweeping)); | |
| 3590 ASSERT(!(space->identity() == OLD_POINTER_SPACE && | |
| 3591 FLAG_parallel_sweeping && | |
| 3592 FLAG_concurrent_sweeping)); | |
| 3489 space->ClearStats(); | 3593 space->ClearStats(); |
| 3490 | 3594 |
| 3491 PageIterator it(space); | 3595 PageIterator it(space); |
| 3492 | 3596 |
| 3493 intptr_t freed_bytes = 0; | 3597 intptr_t freed_bytes = 0; |
| 3494 int pages_swept = 0; | 3598 int pages_swept = 0; |
| 3495 bool lazy_sweeping_active = false; | 3599 bool lazy_sweeping_active = false; |
| 3496 bool unused_page_present = false; | 3600 bool unused_page_present = false; |
| 3497 | 3601 |
| 3498 while (it.has_next()) { | 3602 while (it.has_next()) { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3536 space->IncreaseUnsweptFreeBytes(p); | 3640 space->IncreaseUnsweptFreeBytes(p); |
| 3537 continue; | 3641 continue; |
| 3538 } | 3642 } |
| 3539 | 3643 |
| 3540 switch (sweeper) { | 3644 switch (sweeper) { |
| 3541 case CONSERVATIVE: { | 3645 case CONSERVATIVE: { |
| 3542 if (FLAG_gc_verbose) { | 3646 if (FLAG_gc_verbose) { |
| 3543 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | 3647 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
| 3544 reinterpret_cast<intptr_t>(p)); | 3648 reinterpret_cast<intptr_t>(p)); |
| 3545 } | 3649 } |
| 3546 SweepConservatively(space, p); | 3650 SweepConservatively(space, space->free_list(), p); |
| 3547 pages_swept++; | 3651 pages_swept++; |
| 3548 break; | 3652 break; |
| 3549 } | 3653 } |
| 3550 case LAZY_CONSERVATIVE: { | 3654 case LAZY_CONSERVATIVE: { |
| 3551 if (FLAG_gc_verbose) { | 3655 if (FLAG_gc_verbose) { |
| 3552 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", | 3656 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", |
| 3553 reinterpret_cast<intptr_t>(p)); | 3657 reinterpret_cast<intptr_t>(p)); |
| 3554 } | 3658 } |
| 3555 freed_bytes += SweepConservatively(space, p); | 3659 freed_bytes += SweepConservatively(space, space->free_list(), p); |
| 3556 pages_swept++; | 3660 pages_swept++; |
| 3557 space->SetPagesToSweep(p->next_page()); | 3661 space->SetPagesToSweep(p->next_page()); |
| 3558 lazy_sweeping_active = true; | 3662 lazy_sweeping_active = true; |
| 3559 break; | 3663 break; |
| 3560 } | 3664 } |
| 3561 case PRECISE: { | 3665 case PRECISE: { |
| 3562 if (FLAG_gc_verbose) { | 3666 if (FLAG_gc_verbose) { |
| 3563 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | 3667 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| 3564 reinterpret_cast<intptr_t>(p)); | 3668 reinterpret_cast<intptr_t>(p)); |
| 3565 } | 3669 } |
| 3566 if (space->identity() == CODE_SPACE) { | 3670 if (space->identity() == CODE_SPACE) { |
| 3567 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); | 3671 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>( |
| 3672 space, space->free_list(), p, NULL); | |
| 3568 } else { | 3673 } else { |
| 3569 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); | 3674 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>( |
| 3675 space, space->free_list(), p, NULL); | |
| 3570 } | 3676 } |
| 3571 pages_swept++; | 3677 pages_swept++; |
| 3572 break; | 3678 break; |
| 3573 } | 3679 } |
| 3574 default: { | 3680 default: { |
| 3575 UNREACHABLE(); | 3681 UNREACHABLE(); |
| 3576 } | 3682 } |
| 3577 } | 3683 } |
| 3578 } | 3684 } |
| 3579 | 3685 |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 3595 #endif | 3701 #endif |
| 3596 SweeperType how_to_sweep = | 3702 SweeperType how_to_sweep = |
| 3597 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; | 3703 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
| 3598 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; | 3704 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; |
| 3599 if (sweep_precisely_) how_to_sweep = PRECISE; | 3705 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 3600 // Noncompacting collections simply sweep the spaces to clear the mark | 3706 // Noncompacting collections simply sweep the spaces to clear the mark |
| 3601 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3707 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 3602 // the map space last because freeing non-live maps overwrites them and | 3708 // the map space last because freeing non-live maps overwrites them and |
| 3603 // the other spaces rely on possibly non-live maps to get the sizes for | 3709 // the other spaces rely on possibly non-live maps to get the sizes for |
| 3604 // non-live objects. | 3710 // non-live objects. |
| 3605 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3711 |
| 3606 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3712 if (heap()->IsConcurrentSweepingActivated()) { |
| 3713 PrepareParallelSweeping(heap()->old_pointer_space()); | |
| 3714 PrepareParallelSweeping(heap()->old_data_space()); | |
| 3715 heap_->StartParallelSweeping(how_to_sweep); | |
| 3716 if (FLAG_parallel_sweeping) { | |
| 3717 heap_->WaitUntilParallelSweepingCompleted(); | |
| 3718 } | |
| 3719 } else { | |
| 3720 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | |
| 3721 SweepSpace(heap()->old_data_space(), how_to_sweep); | |
| 3722 } | |
| 3607 | 3723 |
| 3608 RemoveDeadInvalidatedCode(); | 3724 RemoveDeadInvalidatedCode(); |
| 3609 SweepSpace(heap()->code_space(), PRECISE); | 3725 SweepSpace(heap()->code_space(), PRECISE); |
| 3610 | 3726 |
| 3611 SweepSpace(heap()->cell_space(), PRECISE); | 3727 SweepSpace(heap()->cell_space(), PRECISE); |
| 3612 | 3728 |
| 3613 EvacuateNewSpaceAndCandidates(); | 3729 EvacuateNewSpaceAndCandidates(); |
| 3614 | 3730 |
| 3615 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3731 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 3616 // detect whether unmarked map became dead in this collection or in one | 3732 // detect whether unmarked map became dead in this collection or in one |
| (...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3812 while (buffer != NULL) { | 3928 while (buffer != NULL) { |
| 3813 SlotsBuffer* next_buffer = buffer->next(); | 3929 SlotsBuffer* next_buffer = buffer->next(); |
| 3814 DeallocateBuffer(buffer); | 3930 DeallocateBuffer(buffer); |
| 3815 buffer = next_buffer; | 3931 buffer = next_buffer; |
| 3816 } | 3932 } |
| 3817 *buffer_address = NULL; | 3933 *buffer_address = NULL; |
| 3818 } | 3934 } |
| 3819 | 3935 |
| 3820 | 3936 |
| 3821 } } // namespace v8::internal | 3937 } } // namespace v8::internal |
| OLD | NEW |