Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 45 marking_deque_memory_(NULL), | 45 marking_deque_memory_(NULL), |
| 46 marking_deque_memory_committed_(false), | 46 marking_deque_memory_committed_(false), |
| 47 steps_count_(0), | 47 steps_count_(0), |
| 48 steps_took_(0), | 48 steps_took_(0), |
| 49 longest_step_(0.0), | 49 longest_step_(0.0), |
| 50 old_generation_space_available_at_start_of_incremental_(0), | 50 old_generation_space_available_at_start_of_incremental_(0), |
| 51 old_generation_space_used_at_start_of_incremental_(0), | 51 old_generation_space_used_at_start_of_incremental_(0), |
| 52 steps_count_since_last_gc_(0), | 52 steps_count_since_last_gc_(0), |
| 53 steps_took_since_last_gc_(0), | 53 steps_took_since_last_gc_(0), |
| 54 should_hurry_(false), | 54 should_hurry_(false), |
| 55 allocation_marking_factor_(0), | 55 marking_speed_(0), |
| 56 allocated_(0), | 56 allocated_(0), |
| 57 no_marking_scope_depth_(0) { | 57 no_marking_scope_depth_(0) { |
| 58 } | 58 } |
| 59 | 59 |
| 60 | 60 |
| 61 void IncrementalMarking::TearDown() { | 61 void IncrementalMarking::TearDown() { |
| 62 delete marking_deque_memory_; | 62 delete marking_deque_memory_; |
| 63 } | 63 } |
| 64 | 64 |
| 65 | 65 |
| 66 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, | 66 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, |
| 67 Object** slot, | 67 Object** slot, |
| 68 Object* value) { | 68 Object* value) { |
| 69 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { | 69 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { |
| 70 MarkBit obj_bit = Marking::MarkBitFrom(obj); | 70 MarkBit obj_bit = Marking::MarkBitFrom(obj); |
| 71 if (Marking::IsBlack(obj_bit)) { | 71 if (Marking::IsBlack(obj_bit)) { |
| 72 // Object is not going to be rescanned we need to record the slot. | 72 // Object is not going to be rescanned we need to record the slot. |
| 73 heap_->mark_compact_collector()->RecordSlot( | 73 heap_->mark_compact_collector()->RecordSlot( |
| 74 HeapObject::RawField(obj, 0), slot, value); | 74 HeapObject::RawField(obj, 0), slot, value); |
| 75 } | 75 } |
| 76 } | 76 } |
| 77 } | 77 } |
| 78 | 78 |
| 79 | 79 |
| 80 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, | 80 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, |
| 81 Object* value, | 81 Object* value, |
| 82 Isolate* isolate) { | 82 Isolate* isolate) { |
| 83 ASSERT(obj->IsHeapObject()); | 83 ASSERT(obj->IsHeapObject()); |
| 84 | |
| 85 // Fast cases should already be covered by RecordWriteStub. | |
| 86 ASSERT(value->IsHeapObject()); | |
| 87 ASSERT(!value->IsHeapNumber()); | |
| 88 ASSERT(!value->IsString() || | |
| 89 value->IsConsString() || | |
| 90 value->IsSlicedString()); | |
| 91 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value)))); | |
| 92 | |
| 93 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 84 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 94 ASSERT(!marking->is_compacting_); | 85 ASSERT(!marking->is_compacting_); |
| 86 | |
| 87 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
| 88 int counter = chunk->write_barrier_counter(); | |
| 89 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | |
| 90 marking->write_barriers_invoked_since_last_step_ += | |
| 91 MemoryChunk::kWriteBarrierCounterGranularity - | |
| 92 chunk->write_barrier_counter(); | |
| 93 chunk->set_write_barrier_counter( | |
| 94 MemoryChunk::kWriteBarrierCounterGranularity); | |
| 95 } | |
| 96 | |
| 95 marking->RecordWrite(obj, NULL, value); | 97 marking->RecordWrite(obj, NULL, value); |
| 96 } | 98 } |
| 97 | 99 |
| 98 | 100 |
| 99 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, | 101 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, |
| 100 Object** slot, | 102 Object** slot, |
| 101 Isolate* isolate) { | 103 Isolate* isolate) { |
| 104 ASSERT(obj->IsHeapObject()); | |
| 102 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 105 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 103 ASSERT(marking->is_compacting_); | 106 ASSERT(marking->is_compacting_); |
| 107 | |
| 108 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
| 109 int counter = chunk->write_barrier_counter(); | |
| 110 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | |
| 111 marking->write_barriers_invoked_since_last_step_ += | |
| 112 MemoryChunk::kWriteBarrierCounterGranularity - | |
| 113 chunk->write_barrier_counter(); | |
| 114 chunk->set_write_barrier_counter( | |
| 115 MemoryChunk::kWriteBarrierCounterGranularity); | |
| 116 } | |
| 117 | |
| 104 marking->RecordWrite(obj, slot, *slot); | 118 marking->RecordWrite(obj, slot, *slot); |
| 105 } | 119 } |
| 106 | 120 |
| 107 | 121 |
| 108 void IncrementalMarking::RecordCodeTargetPatch(Code* host, | 122 void IncrementalMarking::RecordCodeTargetPatch(Code* host, |
| 109 Address pc, | 123 Address pc, |
| 110 HeapObject* value) { | 124 HeapObject* value) { |
| 111 if (IsMarking()) { | 125 if (IsMarking()) { |
| 112 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 126 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 113 RecordWriteIntoCode(host, &rinfo, value); | 127 RecordWriteIntoCode(host, &rinfo, value); |
| (...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 497 if (heap_->old_pointer_space()->IsSweepingComplete() && | 511 if (heap_->old_pointer_space()->IsSweepingComplete() && |
| 498 heap_->old_data_space()->IsSweepingComplete()) { | 512 heap_->old_data_space()->IsSweepingComplete()) { |
| 499 StartMarking(ALLOW_COMPACTION); | 513 StartMarking(ALLOW_COMPACTION); |
| 500 } else { | 514 } else { |
| 501 if (FLAG_trace_incremental_marking) { | 515 if (FLAG_trace_incremental_marking) { |
| 502 PrintF("[IncrementalMarking] Start sweeping.\n"); | 516 PrintF("[IncrementalMarking] Start sweeping.\n"); |
| 503 } | 517 } |
| 504 state_ = SWEEPING; | 518 state_ = SWEEPING; |
| 505 } | 519 } |
| 506 | 520 |
| 507 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); | 521 heap_->new_space()->LowerInlineAllocationLimit(kIncrementalMarkingThreshold); |
| 508 } | 522 } |
| 509 | 523 |
| 510 | 524 |
| 511 static void MarkObjectGreyDoNotEnqueue(Object* obj) { | 525 static void MarkObjectGreyDoNotEnqueue(Object* obj) { |
| 512 if (obj->IsHeapObject()) { | 526 if (obj->IsHeapObject()) { |
| 513 HeapObject* heap_obj = HeapObject::cast(obj); | 527 HeapObject* heap_obj = HeapObject::cast(obj); |
| 514 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); | 528 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); |
| 515 if (Marking::IsBlack(mark_bit)) { | 529 if (Marking::IsBlack(mark_bit)) { |
| 516 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), | 530 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), |
| 517 -heap_obj->Size()); | 531 -heap_obj->Size()); |
| (...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 766 CompletionAction action) { | 780 CompletionAction action) { |
| 767 if (heap_->gc_state() != Heap::NOT_IN_GC || | 781 if (heap_->gc_state() != Heap::NOT_IN_GC || |
| 768 !FLAG_incremental_marking || | 782 !FLAG_incremental_marking || |
| 769 !FLAG_incremental_marking_steps || | 783 !FLAG_incremental_marking_steps || |
| 770 (state_ != SWEEPING && state_ != MARKING)) { | 784 (state_ != SWEEPING && state_ != MARKING)) { |
| 771 return; | 785 return; |
| 772 } | 786 } |
| 773 | 787 |
| 774 allocated_ += allocated_bytes; | 788 allocated_ += allocated_bytes; |
| 775 | 789 |
| 776 if (allocated_ < kAllocatedThreshold) return; | 790 if (allocated_ < kIncrementalMarkingThreshold && |
| 791 write_barriers_invoked_since_last_step_ < kIncrementalMarkingThreshold) { | |
|
Michael Starzinger
2012/09/26 08:06:38
Even if the two thresholds we are comparing agains
| |
| 792 return; | |
| 793 } | |
| 777 | 794 |
| 778 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | 795 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
| 779 | 796 |
| 780 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; | 797 // The marking speed is driven either by the allocation rate or by the rate |
| 798 // at which we are having to check the color of objects in the write barrier. | |
| 799 // It is possible for a tight non-allocating loop to run a lot of write | |
| 800 // barriers before we get here and check them (marking can only take place on | |
| 801 // allocation), so to reduce the lumpiness we don't use the write barriers | |
| 802 // invoked since last step directly to determine the amount of work to do. | |
| 803 intptr_t bytes_to_process = | |
| 804 marking_speed_ * Max(allocated_, kIncrementalMarkingThreshold); | |
| 805 allocated_ = 0; | |
| 806 write_barriers_invoked_since_last_step_ = 0; | |
| 807 | |
| 781 bytes_scanned_ += bytes_to_process; | 808 bytes_scanned_ += bytes_to_process; |
| 782 | 809 |
| 783 double start = 0; | 810 double start = 0; |
| 784 | 811 |
| 785 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 812 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
| 786 start = OS::TimeCurrentMillis(); | 813 start = OS::TimeCurrentMillis(); |
| 787 } | 814 } |
| 788 | 815 |
| 789 if (state_ == SWEEPING) { | 816 if (state_ == SWEEPING) { |
| 790 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) { | 817 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) { |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 825 | 852 |
| 826 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); | 853 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); |
| 827 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || | 854 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || |
| 828 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); | 855 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); |
| 829 Marking::MarkBlack(obj_mark_bit); | 856 Marking::MarkBlack(obj_mark_bit); |
| 830 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); | 857 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); |
| 831 } | 858 } |
| 832 if (marking_deque_.IsEmpty()) MarkingComplete(action); | 859 if (marking_deque_.IsEmpty()) MarkingComplete(action); |
| 833 } | 860 } |
| 834 | 861 |
| 835 allocated_ = 0; | |
| 836 | |
| 837 steps_count_++; | 862 steps_count_++; |
| 838 steps_count_since_last_gc_++; | 863 steps_count_since_last_gc_++; |
| 839 | 864 |
| 840 bool speed_up = false; | 865 bool speed_up = false; |
| 841 | 866 |
| 842 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { | 867 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
| 843 if (FLAG_trace_gc) { | 868 if (FLAG_trace_gc) { |
| 844 PrintPID("Speed up marking after %d steps\n", | 869 PrintPID("Speed up marking after %d steps\n", |
| 845 static_cast<int>(kAllocationMarkingFactorSpeedupInterval)); | 870 static_cast<int>(kMarkingSpeedAccellerationInterval)); |
| 846 } | 871 } |
| 847 speed_up = true; | 872 speed_up = true; |
| 848 } | 873 } |
| 849 | 874 |
| 850 bool space_left_is_very_small = | 875 bool space_left_is_very_small = |
| 851 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); | 876 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
| 852 | 877 |
| 853 bool only_1_nth_of_space_that_was_available_still_left = | 878 bool only_1_nth_of_space_that_was_available_still_left = |
| 854 (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) < | 879 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
| 855 old_generation_space_available_at_start_of_incremental_); | 880 old_generation_space_available_at_start_of_incremental_); |
| 856 | 881 |
| 857 if (space_left_is_very_small || | 882 if (space_left_is_very_small || |
| 858 only_1_nth_of_space_that_was_available_still_left) { | 883 only_1_nth_of_space_that_was_available_still_left) { |
| 859 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); | 884 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); |
| 860 speed_up = true; | 885 speed_up = true; |
| 861 } | 886 } |
| 862 | 887 |
| 863 bool size_of_old_space_multiplied_by_n_during_marking = | 888 bool size_of_old_space_multiplied_by_n_during_marking = |
| 864 (heap_->PromotedTotalSize() > | 889 (heap_->PromotedTotalSize() > |
| 865 (allocation_marking_factor_ + 1) * | 890 (marking_speed_ + 1) * |
| 866 old_generation_space_used_at_start_of_incremental_); | 891 old_generation_space_used_at_start_of_incremental_); |
| 867 if (size_of_old_space_multiplied_by_n_during_marking) { | 892 if (size_of_old_space_multiplied_by_n_during_marking) { |
| 868 speed_up = true; | 893 speed_up = true; |
| 869 if (FLAG_trace_gc) { | 894 if (FLAG_trace_gc) { |
| 870 PrintPID("Speed up marking because of heap size increase\n"); | 895 PrintPID("Speed up marking because of heap size increase\n"); |
| 871 } | 896 } |
| 872 } | 897 } |
| 873 | 898 |
| 874 int64_t promoted_during_marking = heap_->PromotedTotalSize() | 899 int64_t promoted_during_marking = heap_->PromotedTotalSize() |
| 875 - old_generation_space_used_at_start_of_incremental_; | 900 - old_generation_space_used_at_start_of_incremental_; |
| 876 intptr_t delay = allocation_marking_factor_ * MB; | 901 intptr_t delay = marking_speed_ * MB; |
| 877 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); | 902 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
| 878 | 903 |
| 879 // We try to scan at at least twice the speed that we are allocating. | 904 // We try to scan at at least twice the speed that we are allocating. |
| 880 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { | 905 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
| 881 if (FLAG_trace_gc) { | 906 if (FLAG_trace_gc) { |
| 882 PrintPID("Speed up marking because marker was not keeping up\n"); | 907 PrintPID("Speed up marking because marker was not keeping up\n"); |
| 883 } | 908 } |
| 884 speed_up = true; | 909 speed_up = true; |
| 885 } | 910 } |
| 886 | 911 |
| 887 if (speed_up) { | 912 if (speed_up) { |
| 888 if (state_ != MARKING) { | 913 if (state_ != MARKING) { |
| 889 if (FLAG_trace_gc) { | 914 if (FLAG_trace_gc) { |
| 890 PrintPID("Postponing speeding up marking until marking starts\n"); | 915 PrintPID("Postponing speeding up marking until marking starts\n"); |
| 891 } | 916 } |
| 892 } else { | 917 } else { |
| 893 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; | 918 marking_speed_ += kMarkingSpeedAccellerationInterval; |
| 894 allocation_marking_factor_ = static_cast<int>( | 919 marking_speed_ = static_cast<int>( |
| 895 Min(kMaxAllocationMarkingFactor, | 920 Min(kMaxMarkingSpeed, |
| 896 static_cast<intptr_t>(allocation_marking_factor_ * 1.3))); | 921 static_cast<intptr_t>(marking_speed_ * 1.3))); |
| 897 if (FLAG_trace_gc) { | 922 if (FLAG_trace_gc) { |
| 898 PrintPID("Marking speed increased to %d\n", allocation_marking_factor_); | 923 PrintPID("Marking speed increased to %d\n", marking_speed_); |
| 899 } | 924 } |
| 900 } | 925 } |
| 901 } | 926 } |
| 902 | 927 |
| 903 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 928 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
| 904 double end = OS::TimeCurrentMillis(); | 929 double end = OS::TimeCurrentMillis(); |
| 905 double delta = (end - start); | 930 double delta = (end - start); |
| 906 longest_step_ = Max(longest_step_, delta); | 931 longest_step_ = Max(longest_step_, delta); |
| 907 steps_took_ += delta; | 932 steps_took_ += delta; |
| 908 steps_took_since_last_gc_ += delta; | 933 steps_took_since_last_gc_ += delta; |
| 909 } | 934 } |
| 910 } | 935 } |
| 911 | 936 |
| 912 | 937 |
| 913 void IncrementalMarking::ResetStepCounters() { | 938 void IncrementalMarking::ResetStepCounters() { |
| 914 steps_count_ = 0; | 939 steps_count_ = 0; |
| 915 steps_took_ = 0; | 940 steps_took_ = 0; |
| 916 longest_step_ = 0.0; | 941 longest_step_ = 0.0; |
| 917 old_generation_space_available_at_start_of_incremental_ = | 942 old_generation_space_available_at_start_of_incremental_ = |
| 918 SpaceLeftInOldSpace(); | 943 SpaceLeftInOldSpace(); |
| 919 old_generation_space_used_at_start_of_incremental_ = | 944 old_generation_space_used_at_start_of_incremental_ = |
| 920 heap_->PromotedTotalSize(); | 945 heap_->PromotedTotalSize(); |
| 921 steps_count_since_last_gc_ = 0; | 946 steps_count_since_last_gc_ = 0; |
| 922 steps_took_since_last_gc_ = 0; | 947 steps_took_since_last_gc_ = 0; |
| 923 bytes_rescanned_ = 0; | 948 bytes_rescanned_ = 0; |
| 924 allocation_marking_factor_ = kInitialAllocationMarkingFactor; | 949 marking_speed_ = kInitialMarkingSpeed; |
| 925 bytes_scanned_ = 0; | 950 bytes_scanned_ = 0; |
| 951 write_barriers_invoked_since_last_step_ = 0; | |
| 926 } | 952 } |
| 927 | 953 |
| 928 | 954 |
| 929 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 955 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 930 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | 956 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
| 931 } | 957 } |
| 932 | 958 |
| 933 } } // namespace v8::internal | 959 } } // namespace v8::internal |
| OLD | NEW |