Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(305)

Side by Side Diff: src/heap.cc

Issue 9323007: Tweak compaction candidate selection to avoid keeping page with low occupancy around. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/mark-compact.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 229
230 230
231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) { 231 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
232 if (IntrusiveMarking::IsMarked(object)) { 232 if (IntrusiveMarking::IsMarked(object)) {
233 return IntrusiveMarking::SizeOfMarkedObject(object); 233 return IntrusiveMarking::SizeOfMarkedObject(object);
234 } 234 }
235 return object->SizeFromMap(object->map()); 235 return object->SizeFromMap(object->map());
236 } 236 }
237 237
238 238
239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { 239 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
240 const char** reason) {
240 // Is global GC requested? 241 // Is global GC requested?
241 if (space != NEW_SPACE || FLAG_gc_global) { 242 if (space != NEW_SPACE || FLAG_gc_global) {
242 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); 243 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
244 *reason = "GC in old space requested";
243 return MARK_COMPACTOR; 245 return MARK_COMPACTOR;
244 } 246 }
245 247
246 // Is enough data promoted to justify a global GC? 248 // Is enough data promoted to justify a global GC?
247 if (OldGenerationPromotionLimitReached()) { 249 if (OldGenerationPromotionLimitReached()) {
248 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); 250 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
251 *reason = "promotion limit reached";
249 return MARK_COMPACTOR; 252 return MARK_COMPACTOR;
250 } 253 }
251 254
252 // Have allocation in OLD and LO failed? 255 // Have allocation in OLD and LO failed?
253 if (old_gen_exhausted_) { 256 if (old_gen_exhausted_) {
254 isolate_->counters()-> 257 isolate_->counters()->
255 gc_compactor_caused_by_oldspace_exhaustion()->Increment(); 258 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
259 *reason = "old generations exhausted";
256 return MARK_COMPACTOR; 260 return MARK_COMPACTOR;
257 } 261 }
258 262
259 // Is there enough space left in OLD to guarantee that a scavenge can 263 // Is there enough space left in OLD to guarantee that a scavenge can
260 // succeed? 264 // succeed?
261 // 265 //
262 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available 266 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
263 // for object promotion. It counts only the bytes that the memory 267 // for object promotion. It counts only the bytes that the memory
264 // allocator has not yet allocated from the OS and assigned to any space, 268 // allocator has not yet allocated from the OS and assigned to any space,
265 // and does not count available bytes already in the old space or code 269 // and does not count available bytes already in the old space or code
266 // space. Undercounting is safe---we may get an unrequested full GC when 270 // space. Undercounting is safe---we may get an unrequested full GC when
267 // a scavenge would have succeeded. 271 // a scavenge would have succeeded.
268 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) { 272 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
269 isolate_->counters()-> 273 isolate_->counters()->
270 gc_compactor_caused_by_oldspace_exhaustion()->Increment(); 274 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
275 *reason = "scavenge might not succeed";
271 return MARK_COMPACTOR; 276 return MARK_COMPACTOR;
272 } 277 }
273 278
274 // Default 279 // Default
280 *reason = NULL;
275 return SCAVENGER; 281 return SCAVENGER;
276 } 282 }
277 283
278 284
279 // TODO(1238405): Combine the infrastructure for --heap-stats and 285 // TODO(1238405): Combine the infrastructure for --heap-stats and
280 // --log-gc to avoid the complicated preprocessor and flag testing. 286 // --log-gc to avoid the complicated preprocessor and flag testing.
281 void Heap::ReportStatisticsBeforeGC() { 287 void Heap::ReportStatisticsBeforeGC() {
282 // Heap::ReportHeapStatistics will also log NewSpace statistics when 288 // Heap::ReportHeapStatistics will also log NewSpace statistics when
283 // compiled --log-gc is set. The following logic is used to avoid 289 // compiled --log-gc is set. The following logic is used to avoid
284 // double logging. 290 // double logging.
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
424 symbol_table()->NumberOfElements()); 430 symbol_table()->NumberOfElements());
425 #if defined(DEBUG) 431 #if defined(DEBUG)
426 ReportStatisticsAfterGC(); 432 ReportStatisticsAfterGC();
427 #endif // DEBUG 433 #endif // DEBUG
428 #ifdef ENABLE_DEBUGGER_SUPPORT 434 #ifdef ENABLE_DEBUGGER_SUPPORT
429 isolate_->debug()->AfterGarbageCollection(); 435 isolate_->debug()->AfterGarbageCollection();
430 #endif // ENABLE_DEBUGGER_SUPPORT 436 #endif // ENABLE_DEBUGGER_SUPPORT
431 } 437 }
432 438
433 439
434 void Heap::CollectAllGarbage(int flags) { 440 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
435 // Since we are ignoring the return value, the exact choice of space does 441 // Since we are ignoring the return value, the exact choice of space does
436 // not matter, so long as we do not specify NEW_SPACE, which would not 442 // not matter, so long as we do not specify NEW_SPACE, which would not
437 // cause a full GC. 443 // cause a full GC.
438 mark_compact_collector_.SetFlags(flags); 444 mark_compact_collector_.SetFlags(flags);
439 CollectGarbage(OLD_POINTER_SPACE); 445 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
440 mark_compact_collector_.SetFlags(kNoGCFlags); 446 mark_compact_collector_.SetFlags(kNoGCFlags);
441 } 447 }
442 448
443 449
444 void Heap::CollectAllAvailableGarbage() { 450 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
445 // Since we are ignoring the return value, the exact choice of space does 451 // Since we are ignoring the return value, the exact choice of space does
446 // not matter, so long as we do not specify NEW_SPACE, which would not 452 // not matter, so long as we do not specify NEW_SPACE, which would not
447 // cause a full GC. 453 // cause a full GC.
448 // Major GC would invoke weak handle callbacks on weakly reachable 454 // Major GC would invoke weak handle callbacks on weakly reachable
449 // handles, but won't collect weakly reachable objects until next 455 // handles, but won't collect weakly reachable objects until next
450 // major GC. Therefore if we collect aggressively and weak handle callback 456 // major GC. Therefore if we collect aggressively and weak handle callback
451 // has been invoked, we rerun major GC to release objects which become 457 // has been invoked, we rerun major GC to release objects which become
452 // garbage. 458 // garbage.
453 // Note: as weak callbacks can execute arbitrary code, we cannot 459 // Note: as weak callbacks can execute arbitrary code, we cannot
454 // hope that eventually there will be no weak callbacks invocations. 460 // hope that eventually there will be no weak callbacks invocations.
455 // Therefore stop recollecting after several attempts. 461 // Therefore stop recollecting after several attempts.
456 mark_compact_collector()->SetFlags(kMakeHeapIterableMask); 462 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
463 kReduceMemoryFootprintMask);
457 isolate_->compilation_cache()->Clear(); 464 isolate_->compilation_cache()->Clear();
458 const int kMaxNumberOfAttempts = 7; 465 const int kMaxNumberOfAttempts = 7;
459 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 466 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
460 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { 467 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
461 break; 468 break;
462 } 469 }
463 } 470 }
464 mark_compact_collector()->SetFlags(kNoGCFlags); 471 mark_compact_collector()->SetFlags(kNoGCFlags);
465 new_space_.Shrink(); 472 new_space_.Shrink();
466 UncommitFromSpace(); 473 UncommitFromSpace();
467 Shrink(); 474 Shrink();
468 incremental_marking()->UncommitMarkingDeque(); 475 incremental_marking()->UncommitMarkingDeque();
469 } 476 }
470 477
471 478
472 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { 479 bool Heap::CollectGarbage(AllocationSpace space,
480 GarbageCollector collector,
481 const char* gc_reason,
482 const char* collector_reason) {
473 // The VM is in the GC state until exiting this function. 483 // The VM is in the GC state until exiting this function.
474 VMState state(isolate_, GC); 484 VMState state(isolate_, GC);
475 485
476 #ifdef DEBUG 486 #ifdef DEBUG
477 // Reset the allocation timeout to the GC interval, but make sure to 487 // Reset the allocation timeout to the GC interval, but make sure to
478 // allow at least a few allocations after a collection. The reason 488 // allow at least a few allocations after a collection. The reason
479 // for this is that we have a lot of allocation sequences and we 489 // for this is that we have a lot of allocation sequences and we
480 // assume that a garbage collection will allow the subsequent 490 // assume that a garbage collection will allow the subsequent
481 // allocation attempts to go through. 491 // allocation attempts to go through.
482 allocation_timeout_ = Max(6, FLAG_gc_interval); 492 allocation_timeout_ = Max(6, FLAG_gc_interval);
483 #endif 493 #endif
484 494
485 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { 495 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
486 if (FLAG_trace_incremental_marking) { 496 if (FLAG_trace_incremental_marking) {
487 PrintF("[IncrementalMarking] Scavenge during marking.\n"); 497 PrintF("[IncrementalMarking] Scavenge during marking.\n");
488 } 498 }
489 } 499 }
490 500
491 if (collector == MARK_COMPACTOR && 501 if (collector == MARK_COMPACTOR &&
492 !mark_compact_collector()->PreciseSweepingRequired() && 502 !mark_compact_collector()->PreciseSweepingRequired() &&
493 !incremental_marking()->IsStopped() && 503 !incremental_marking()->IsStopped() &&
494 !incremental_marking()->should_hurry() && 504 !incremental_marking()->should_hurry() &&
495 FLAG_incremental_marking_steps) { 505 FLAG_incremental_marking_steps) {
496 if (FLAG_trace_incremental_marking) { 506 if (FLAG_trace_incremental_marking) {
497 PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); 507 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
498 } 508 }
499 collector = SCAVENGER; 509 collector = SCAVENGER;
510 collector_reason = "incremental marking delaying mark-sweep";
500 } 511 }
501 512
502 bool next_gc_likely_to_collect_more = false; 513 bool next_gc_likely_to_collect_more = false;
503 514
504 { GCTracer tracer(this); 515 { GCTracer tracer(this, gc_reason, collector_reason);
505 GarbageCollectionPrologue(); 516 GarbageCollectionPrologue();
506 // The GC count was incremented in the prologue. Tell the tracer about 517 // The GC count was incremented in the prologue. Tell the tracer about
507 // it. 518 // it.
508 tracer.set_gc_count(gc_count_); 519 tracer.set_gc_count(gc_count_);
509 520
510 // Tell the tracer which collector we've selected. 521 // Tell the tracer which collector we've selected.
511 tracer.set_collector(collector); 522 tracer.set_collector(collector);
512 523
513 HistogramTimer* rate = (collector == SCAVENGER) 524 HistogramTimer* rate = (collector == SCAVENGER)
514 ? isolate_->counters()->gc_scavenger() 525 ? isolate_->counters()->gc_scavenger()
(...skipping 11 matching lines...) Expand all
526 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { 537 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
527 incremental_marking()->Start(); 538 incremental_marking()->Start();
528 } 539 }
529 } 540 }
530 541
531 return next_gc_likely_to_collect_more; 542 return next_gc_likely_to_collect_more;
532 } 543 }
533 544
534 545
535 void Heap::PerformScavenge() { 546 void Heap::PerformScavenge() {
536 GCTracer tracer(this); 547 GCTracer tracer(this, NULL, NULL);
537 if (incremental_marking()->IsStopped()) { 548 if (incremental_marking()->IsStopped()) {
538 PerformGarbageCollection(SCAVENGER, &tracer); 549 PerformGarbageCollection(SCAVENGER, &tracer);
539 } else { 550 } else {
540 PerformGarbageCollection(MARK_COMPACTOR, &tracer); 551 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
541 } 552 }
542 } 553 }
543 554
544 555
545 #ifdef DEBUG 556 #ifdef DEBUG
546 // Helper class for verifying the symbol table. 557 // Helper class for verifying the symbol table.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
581 PagedSpace* code_space = Heap::code_space(); 592 PagedSpace* code_space = Heap::code_space();
582 PagedSpace* map_space = Heap::map_space(); 593 PagedSpace* map_space = Heap::map_space();
583 PagedSpace* cell_space = Heap::cell_space(); 594 PagedSpace* cell_space = Heap::cell_space();
584 LargeObjectSpace* lo_space = Heap::lo_space(); 595 LargeObjectSpace* lo_space = Heap::lo_space();
585 bool gc_performed = true; 596 bool gc_performed = true;
586 int counter = 0; 597 int counter = 0;
587 static const int kThreshold = 20; 598 static const int kThreshold = 20;
588 while (gc_performed && counter++ < kThreshold) { 599 while (gc_performed && counter++ < kThreshold) {
589 gc_performed = false; 600 gc_performed = false;
590 if (!new_space->ReserveSpace(new_space_size)) { 601 if (!new_space->ReserveSpace(new_space_size)) {
591 Heap::CollectGarbage(NEW_SPACE); 602 Heap::CollectGarbage(NEW_SPACE,
603 "failed to reserve space in the new space");
592 gc_performed = true; 604 gc_performed = true;
593 } 605 }
594 if (!old_pointer_space->ReserveSpace(pointer_space_size)) { 606 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
595 Heap::CollectGarbage(OLD_POINTER_SPACE); 607 Heap::CollectGarbage(OLD_POINTER_SPACE,
608 "failed to reserve space in the old pointer space");
596 gc_performed = true; 609 gc_performed = true;
597 } 610 }
598 if (!(old_data_space->ReserveSpace(data_space_size))) { 611 if (!(old_data_space->ReserveSpace(data_space_size))) {
599 Heap::CollectGarbage(OLD_DATA_SPACE); 612 Heap::CollectGarbage(OLD_DATA_SPACE,
613 "failed to reserve space in the old data space");
600 gc_performed = true; 614 gc_performed = true;
601 } 615 }
602 if (!(code_space->ReserveSpace(code_space_size))) { 616 if (!(code_space->ReserveSpace(code_space_size))) {
603 Heap::CollectGarbage(CODE_SPACE); 617 Heap::CollectGarbage(CODE_SPACE,
618 "failed to reserve space in the code space");
604 gc_performed = true; 619 gc_performed = true;
605 } 620 }
606 if (!(map_space->ReserveSpace(map_space_size))) { 621 if (!(map_space->ReserveSpace(map_space_size))) {
607 Heap::CollectGarbage(MAP_SPACE); 622 Heap::CollectGarbage(MAP_SPACE,
623 "failed to reserve space in the map space");
608 gc_performed = true; 624 gc_performed = true;
609 } 625 }
610 if (!(cell_space->ReserveSpace(cell_space_size))) { 626 if (!(cell_space->ReserveSpace(cell_space_size))) {
611 Heap::CollectGarbage(CELL_SPACE); 627 Heap::CollectGarbage(CELL_SPACE,
628 "failed to reserve space in the cell space");
612 gc_performed = true; 629 gc_performed = true;
613 } 630 }
614 // We add a slack-factor of 2 in order to have space for a series of 631 // We add a slack-factor of 2 in order to have space for a series of
615 // large-object allocations that are only just larger than the page size. 632 // large-object allocations that are only just larger than the page size.
616 large_object_size *= 2; 633 large_object_size *= 2;
617 // The ReserveSpace method on the large object space checks how much 634 // The ReserveSpace method on the large object space checks how much
618 // we can expand the old generation. This includes expansion caused by 635 // we can expand the old generation. This includes expansion caused by
619 // allocation in the other spaces. 636 // allocation in the other spaces.
620 large_object_size += cell_space_size + map_space_size + code_space_size + 637 large_object_size += cell_space_size + map_space_size + code_space_size +
621 data_space_size + pointer_space_size; 638 data_space_size + pointer_space_size;
622 if (!(lo_space->ReserveSpace(large_object_size))) { 639 if (!(lo_space->ReserveSpace(large_object_size))) {
623 Heap::CollectGarbage(LO_SPACE); 640 Heap::CollectGarbage(LO_SPACE,
641 "failed to reserve space in the large object space");
624 gc_performed = true; 642 gc_performed = true;
625 } 643 }
626 } 644 }
627 645
628 if (gc_performed) { 646 if (gc_performed) {
629 // Failed to reserve the space after several attempts. 647 // Failed to reserve the space after several attempts.
630 V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); 648 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
631 } 649 }
632 } 650 }
633 651
(...skipping 4101 matching lines...) Expand 10 before | Expand all | Expand 10 after
4735 4753
4736 bool Heap::IsHeapIterable() { 4754 bool Heap::IsHeapIterable() {
4737 return (!old_pointer_space()->was_swept_conservatively() && 4755 return (!old_pointer_space()->was_swept_conservatively() &&
4738 !old_data_space()->was_swept_conservatively()); 4756 !old_data_space()->was_swept_conservatively());
4739 } 4757 }
4740 4758
4741 4759
4742 void Heap::EnsureHeapIsIterable() { 4760 void Heap::EnsureHeapIsIterable() {
4743 ASSERT(IsAllocationAllowed()); 4761 ASSERT(IsAllocationAllowed());
4744 if (!IsHeapIterable()) { 4762 if (!IsHeapIterable()) {
4745 CollectAllGarbage(kMakeHeapIterableMask); 4763 CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
4746 } 4764 }
4747 ASSERT(IsHeapIterable()); 4765 ASSERT(IsHeapIterable());
4748 } 4766 }
4749 4767
4750 4768
4751 bool Heap::IdleNotification(int hint) { 4769 bool Heap::IdleNotification(int hint) {
4752 if (hint >= 1000) return IdleGlobalGC(); 4770 if (hint >= 1000) return IdleGlobalGC();
4753 if (contexts_disposed_ > 0 || !FLAG_incremental_marking || 4771 if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
4754 FLAG_expose_gc || Serializer::enabled()) { 4772 FLAG_expose_gc || Serializer::enabled()) {
4755 return true; 4773 return true;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4805 incremental_marking()->Step(step_size); 4823 incremental_marking()->Step(step_size);
4806 idle_notification_will_schedule_next_gc_ = false; 4824 idle_notification_will_schedule_next_gc_ = false;
4807 4825
4808 if (incremental_marking()->IsComplete()) { 4826 if (incremental_marking()->IsComplete()) {
4809 bool uncommit = false; 4827 bool uncommit = false;
4810 if (gc_count_at_last_idle_gc_ == gc_count_) { 4828 if (gc_count_at_last_idle_gc_ == gc_count_) {
4811 // No GC since the last full GC, the mutator is probably not active. 4829 // No GC since the last full GC, the mutator is probably not active.
4812 isolate_->compilation_cache()->Clear(); 4830 isolate_->compilation_cache()->Clear();
4813 uncommit = true; 4831 uncommit = true;
4814 } 4832 }
4815 CollectAllGarbage(kNoGCFlags); 4833 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4816 gc_count_at_last_idle_gc_ = gc_count_; 4834 gc_count_at_last_idle_gc_ = gc_count_;
4817 if (uncommit) { 4835 if (uncommit) {
4818 new_space_.Shrink(); 4836 new_space_.Shrink();
4819 UncommitFromSpace(); 4837 UncommitFromSpace();
4820 } 4838 }
4821 } 4839 }
4822 return false; 4840 return false;
4823 } 4841 }
4824 4842
4825 4843
(...skipping 20 matching lines...) Expand all
4846 number_idle_notifications_ = 4864 number_idle_notifications_ =
4847 Min(number_idle_notifications_ + 1, kMaxIdleCount); 4865 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4848 } else { 4866 } else {
4849 number_idle_notifications_ = 0; 4867 number_idle_notifications_ = 0;
4850 last_idle_notification_gc_count_ = gc_count_; 4868 last_idle_notification_gc_count_ = gc_count_;
4851 } 4869 }
4852 4870
4853 if (number_idle_notifications_ == kIdlesBeforeScavenge) { 4871 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4854 if (contexts_disposed_ > 0) { 4872 if (contexts_disposed_ > 0) {
4855 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4873 HistogramTimerScope scope(isolate_->counters()->gc_context());
4856 CollectAllGarbage(kNoGCFlags); 4874 CollectAllGarbage(kReduceMemoryFootprintMask,
4875 "idle notification: contexts disposed");
4857 } else { 4876 } else {
4858 CollectGarbage(NEW_SPACE); 4877 CollectGarbage(NEW_SPACE, "idle notification");
4859 } 4878 }
4860 new_space_.Shrink(); 4879 new_space_.Shrink();
4861 last_idle_notification_gc_count_ = gc_count_; 4880 last_idle_notification_gc_count_ = gc_count_;
4862 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) { 4881 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4863 // Before doing the mark-sweep collections we clear the 4882 // Before doing the mark-sweep collections we clear the
4864 // compilation cache to avoid hanging on to source code and 4883 // compilation cache to avoid hanging on to source code and
4865 // generated code for cached functions. 4884 // generated code for cached functions.
4866 isolate_->compilation_cache()->Clear(); 4885 isolate_->compilation_cache()->Clear();
4867 4886
4868 CollectAllGarbage(kNoGCFlags); 4887 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4869 new_space_.Shrink(); 4888 new_space_.Shrink();
4870 last_idle_notification_gc_count_ = gc_count_; 4889 last_idle_notification_gc_count_ = gc_count_;
4871 4890
4872 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) { 4891 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4873 CollectAllGarbage(kNoGCFlags); 4892 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4874 new_space_.Shrink(); 4893 new_space_.Shrink();
4875 last_idle_notification_gc_count_ = gc_count_; 4894 last_idle_notification_gc_count_ = gc_count_;
4876 number_idle_notifications_ = 0; 4895 number_idle_notifications_ = 0;
4877 finished = true; 4896 finished = true;
4878 } else if (contexts_disposed_ > 0) { 4897 } else if (contexts_disposed_ > 0) {
4879 if (FLAG_expose_gc) { 4898 if (FLAG_expose_gc) {
4880 contexts_disposed_ = 0; 4899 contexts_disposed_ = 0;
4881 } else { 4900 } else {
4882 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4901 HistogramTimerScope scope(isolate_->counters()->gc_context());
4883 CollectAllGarbage(kNoGCFlags); 4902 CollectAllGarbage(kReduceMemoryFootprintMask,
4903 "idle notification: contexts disposed");
4884 last_idle_notification_gc_count_ = gc_count_; 4904 last_idle_notification_gc_count_ = gc_count_;
4885 } 4905 }
4886 // If this is the first idle notification, we reset the 4906 // If this is the first idle notification, we reset the
4887 // notification count to avoid letting idle notifications for 4907 // notification count to avoid letting idle notifications for
4888 // context disposal garbage collections start a potentially too 4908 // context disposal garbage collections start a potentially too
4889 // aggressive idle GC cycle. 4909 // aggressive idle GC cycle.
4890 if (number_idle_notifications_ <= 1) { 4910 if (number_idle_notifications_ <= 1) {
4891 number_idle_notifications_ = 0; 4911 number_idle_notifications_ = 0;
4892 uncommit = false; 4912 uncommit = false;
4893 } 4913 }
(...skipping 1610 matching lines...) Expand 10 before | Expand all | Expand 10 after
6504 OldSpaces spaces; 6524 OldSpaces spaces;
6505 for (OldSpace* space = spaces.next(); 6525 for (OldSpace* space = spaces.next();
6506 space != NULL; 6526 space != NULL;
6507 space = spaces.next()) { 6527 space = spaces.next()) {
6508 holes_size += space->Waste() + space->Available(); 6528 holes_size += space->Waste() + space->Available();
6509 } 6529 }
6510 return holes_size; 6530 return holes_size;
6511 } 6531 }
6512 6532
6513 6533
6514 GCTracer::GCTracer(Heap* heap) 6534 GCTracer::GCTracer(Heap* heap,
6535 const char* gc_reason,
6536 const char* collector_reason)
6515 : start_time_(0.0), 6537 : start_time_(0.0),
6516 start_size_(0), 6538 start_object_size_(0),
6539 start_memory_size_(0),
6517 gc_count_(0), 6540 gc_count_(0),
6518 full_gc_count_(0), 6541 full_gc_count_(0),
6519 allocated_since_last_gc_(0), 6542 allocated_since_last_gc_(0),
6520 spent_in_mutator_(0), 6543 spent_in_mutator_(0),
6521 promoted_objects_size_(0), 6544 promoted_objects_size_(0),
6522 heap_(heap) { 6545 heap_(heap),
6546 gc_reason_(gc_reason),
6547 collector_reason_(collector_reason) {
6523 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; 6548 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6524 start_time_ = OS::TimeCurrentMillis(); 6549 start_time_ = OS::TimeCurrentMillis();
6525 start_size_ = heap_->SizeOfObjects(); 6550 start_object_size_ = heap_->SizeOfObjects();
6551 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6526 6552
6527 for (int i = 0; i < Scope::kNumberOfScopes; i++) { 6553 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6528 scopes_[i] = 0; 6554 scopes_[i] = 0;
6529 } 6555 }
6530 6556
6531 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(); 6557 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6532 6558
6533 allocated_since_last_gc_ = 6559 allocated_since_last_gc_ =
6534 heap_->SizeOfObjects() - heap_->alive_after_last_gc_; 6560 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6535 6561
(...skipping 29 matching lines...) Expand all
6565 heap_->alive_after_last_gc_); 6591 heap_->alive_after_last_gc_);
6566 if (!first_gc) { 6592 if (!first_gc) {
6567 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_, 6593 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6568 static_cast<int>(spent_in_mutator_)); 6594 static_cast<int>(spent_in_mutator_));
6569 } 6595 }
6570 } 6596 }
6571 6597
6572 if (!FLAG_trace_gc_nvp) { 6598 if (!FLAG_trace_gc_nvp) {
6573 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]); 6599 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6574 6600
6575 PrintF("%s %.1f -> %.1f MB, ", 6601 double end_memory_size_mb =
6602 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6603
6604 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6576 CollectorString(), 6605 CollectorString(),
6577 static_cast<double>(start_size_) / MB, 6606 static_cast<double>(start_object_size_) / MB,
6578 SizeOfHeapObjects()); 6607 static_cast<double>(start_memory_size_) / MB,
6608 SizeOfHeapObjects(),
6609 end_memory_size_mb);
6579 6610
6580 if (external_time > 0) PrintF("%d / ", external_time); 6611 if (external_time > 0) PrintF("%d / ", external_time);
6581 PrintF("%d ms", time); 6612 PrintF("%d ms", time);
6582 if (steps_count_ > 0) { 6613 if (steps_count_ > 0) {
6583 if (collector_ == SCAVENGER) { 6614 if (collector_ == SCAVENGER) {
6584 PrintF(" (+ %d ms in %d steps since last GC)", 6615 PrintF(" (+ %d ms in %d steps since last GC)",
6585 static_cast<int>(steps_took_since_last_gc_), 6616 static_cast<int>(steps_took_since_last_gc_),
6586 steps_count_since_last_gc_); 6617 steps_count_since_last_gc_);
6587 } else { 6618 } else {
6588 PrintF(" (+ %d ms in %d steps since start of marking, " 6619 PrintF(" (+ %d ms in %d steps since start of marking, "
6589 "biggest step %f ms)", 6620 "biggest step %f ms)",
6590 static_cast<int>(steps_took_), 6621 static_cast<int>(steps_took_),
6591 steps_count_, 6622 steps_count_,
6592 longest_step_); 6623 longest_step_);
6593 } 6624 }
6594 } 6625 }
6626
6627 if (gc_reason_ != NULL) {
6628 PrintF(" [%s]", gc_reason_);
6629 }
6630
6631 if (collector_reason_ != NULL) {
6632 PrintF(" [%s]", collector_reason_);
6633 }
6634
6595 PrintF(".\n"); 6635 PrintF(".\n");
6596 } else { 6636 } else {
6597 PrintF("pause=%d ", time); 6637 PrintF("pause=%d ", time);
6598 PrintF("mutator=%d ", 6638 PrintF("mutator=%d ",
6599 static_cast<int>(spent_in_mutator_)); 6639 static_cast<int>(spent_in_mutator_));
6600 6640
6601 PrintF("gc="); 6641 PrintF("gc=");
6602 switch (collector_) { 6642 switch (collector_) {
6603 case SCAVENGER: 6643 case SCAVENGER:
6604 PrintF("s"); 6644 PrintF("s");
(...skipping 17 matching lines...) Expand all
6622 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS])); 6662 static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
6623 PrintF("old_new=%d ", 6663 PrintF("old_new=%d ",
6624 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS])); 6664 static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
6625 PrintF("compaction_ptrs=%d ", 6665 PrintF("compaction_ptrs=%d ",
6626 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED])); 6666 static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
6627 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[ 6667 PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
6628 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED])); 6668 Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
6629 PrintF("misc_compaction=%d ", 6669 PrintF("misc_compaction=%d ",
6630 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS])); 6670 static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
6631 6671
6632 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_); 6672 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
6633 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); 6673 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6634 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", 6674 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6635 in_free_list_or_wasted_before_gc_); 6675 in_free_list_or_wasted_before_gc_);
6636 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize()); 6676 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6637 6677
6638 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); 6678 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6639 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); 6679 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6640 6680
6641 if (collector_ == SCAVENGER) { 6681 if (collector_ == SCAVENGER) {
6642 PrintF("stepscount=%d ", steps_count_since_last_gc_); 6682 PrintF("stepscount=%d ", steps_count_since_last_gc_);
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
6838 isolate_->heap()->store_buffer()->Compact(); 6878 isolate_->heap()->store_buffer()->Compact();
6839 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6879 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6840 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6880 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6841 next = chunk->next_chunk(); 6881 next = chunk->next_chunk();
6842 isolate_->memory_allocator()->Free(chunk); 6882 isolate_->memory_allocator()->Free(chunk);
6843 } 6883 }
6844 chunks_queued_for_free_ = NULL; 6884 chunks_queued_for_free_ = NULL;
6845 } 6885 }
6846 6886
6847 } } // namespace v8::internal 6887 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/mark-compact.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698