Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/heap.cc

Issue 11782028: Parallel and concurrent sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after
556 void Heap::CollectAllGarbage(int flags, const char* gc_reason) { 556 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
557 // Since we are ignoring the return value, the exact choice of space does 557 // Since we are ignoring the return value, the exact choice of space does
558 // not matter, so long as we do not specify NEW_SPACE, which would not 558 // not matter, so long as we do not specify NEW_SPACE, which would not
559 // cause a full GC. 559 // cause a full GC.
560 mark_compact_collector_.SetFlags(flags); 560 mark_compact_collector_.SetFlags(flags);
561 CollectGarbage(OLD_POINTER_SPACE, gc_reason); 561 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
562 mark_compact_collector_.SetFlags(kNoGCFlags); 562 mark_compact_collector_.SetFlags(kNoGCFlags);
563 } 563 }
564 564
565 565
566 void Heap::StartParallelSweeping(SweeperType sweeper_type) {
567 SweeperThread::set_sweeping_pending(true);
568 for (int i = 0; i < FLAG_sweeper_threads; i++) {
569 isolate()->sweeper_threads()[i]->StartSweeping(sweeper_type);
570 }
571 }
572
573
574 void Heap::WaitUntilParallelSweepingCompleted() {
575 for (int i = 0; i < FLAG_sweeper_threads; i++) {
576 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
577 }
578 SweeperThread::set_sweeping_pending(false);
579 FinalizeParallelSweeping();
580 }
581
582
583 void Heap::StealMemoryFromSweeperThreads(PagedSpace* space) {
584 for (int i = 0; i < FLAG_sweeper_threads; i++) {
585 isolate()->sweeper_threads()[i]->StealMemory(space);
586 }
587 }
588
589
590 void Heap::FinalizeParallelSweeping() {
591 StealMemoryFromSweeperThreads(paged_space(OLD_DATA_SPACE));
592 StealMemoryFromSweeperThreads(paged_space(OLD_POINTER_SPACE));
593 FreeQueuedChunks();
594 }
595
596
597 bool Heap::IsConcurrentSweepingPending() {
598 return SweeperThread::sweeping_pending();
599 }
600
601
602 bool Heap::IsConcurrentSweepingActivated() {
603 return isolate()->sweeper_threads() != NULL &&
604 FLAG_concurrent_sweeping;
605 }
606
607
608 bool Heap::AreSweepingThreadsActivated() {
609 return isolate()->sweeper_threads() != NULL &&
610 (FLAG_concurrent_sweeping || FLAG_parallel_sweeping);
611 }
612
613
566 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { 614 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
567 // Since we are ignoring the return value, the exact choice of space does 615 // Since we are ignoring the return value, the exact choice of space does
568 // not matter, so long as we do not specify NEW_SPACE, which would not 616 // not matter, so long as we do not specify NEW_SPACE, which would not
569 // cause a full GC. 617 // cause a full GC.
570 // Major GC would invoke weak handle callbacks on weakly reachable 618 // Major GC would invoke weak handle callbacks on weakly reachable
571 // handles, but won't collect weakly reachable objects until next 619 // handles, but won't collect weakly reachable objects until next
572 // major GC. Therefore if we collect aggressively and weak handle callback 620 // major GC. Therefore if we collect aggressively and weak handle callback
573 // has been invoked, we rerun major GC to release objects which become 621 // has been invoked, we rerun major GC to release objects which become
574 // garbage. 622 // garbage.
575 // Note: as weak callbacks can execute arbitrary code, we cannot 623 // Note: as weak callbacks can execute arbitrary code, we cannot
(...skipping 690 matching lines...) Expand 10 before | Expand all | Expand 10 after
1266 1314
1267 // Used for updating survived_since_last_expansion_ at function end. 1315 // Used for updating survived_since_last_expansion_ at function end.
1268 intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); 1316 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1269 1317
1270 CheckNewSpaceExpansionCriteria(); 1318 CheckNewSpaceExpansionCriteria();
1271 1319
1272 SelectScavengingVisitorsTable(); 1320 SelectScavengingVisitorsTable();
1273 1321
1274 incremental_marking()->PrepareForScavenge(); 1322 incremental_marking()->PrepareForScavenge();
1275 1323
1276 AdvanceSweepers(static_cast<int>(new_space_.Size())); 1324 if (AreSweepingThreadsActivated()) {
1325 StealMemoryFromSweeperThreads(paged_space(OLD_DATA_SPACE));
1326 StealMemoryFromSweeperThreads(paged_space(OLD_POINTER_SPACE));
1327 } else {
1328 AdvanceSweepers(static_cast<int>(new_space_.Size()));
1329 }
1277 1330
1278 // Flip the semispaces. After flipping, to space is empty, from space has 1331 // Flip the semispaces. After flipping, to space is empty, from space has
1279 // live objects. 1332 // live objects.
1280 new_space_.Flip(); 1333 new_space_.Flip();
1281 new_space_.ResetAllocationInfo(); 1334 new_space_.ResetAllocationInfo();
1282 1335
1283 // We need to sweep newly copied objects which can be either in the 1336 // We need to sweep newly copied objects which can be either in the
1284 // to space or promoted to the old generation. For to-space 1337 // to space or promoted to the old generation. For to-space
1285 // objects, we treat the bottom of the to space as a queue. Newly 1338 // objects, we treat the bottom of the to space as a queue. Newly
1286 // copied and unswept objects lie between a 'front' mark and the 1339 // copied and unswept objects lie between a 'front' mark and the
(...skipping 6069 matching lines...) Expand 10 before | Expand all | Expand 10 after
7356 static_cast<int>(object_sizes_last_time_[index])); 7409 static_cast<int>(object_sizes_last_time_[index]));
7357 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) 7410 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7358 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7411 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7359 7412
7360 memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7413 memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7361 memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 7414 memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7362 ClearObjectStats(); 7415 ClearObjectStats();
7363 } 7416 }
7364 7417
7365 } } // namespace v8::internal 7418 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698