| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 198 MarkObject(target); | 198 MarkObject(target); |
| 199 } | 199 } |
| 200 | 200 |
| 201 void VisitCodeEntry(Address entry_address) { | 201 void VisitCodeEntry(Address entry_address) { |
| 202 Object* target = Code::GetObjectFromEntryAddress(entry_address); | 202 Object* target = Code::GetObjectFromEntryAddress(entry_address); |
| 203 heap_->mark_compact_collector()-> | 203 heap_->mark_compact_collector()-> |
| 204 RecordCodeEntrySlot(entry_address, Code::cast(target)); | 204 RecordCodeEntrySlot(entry_address, Code::cast(target)); |
| 205 MarkObject(target); | 205 MarkObject(target); |
| 206 } | 206 } |
| 207 | 207 |
| 208 void VisitSharedFunctionInfo(SharedFunctionInfo* shared) { |
| 209 if (shared->ic_age() != heap_->global_ic_age()) { |
| 210 shared->ResetForNewContext(heap_->global_ic_age()); |
| 211 } |
| 212 } |
| 213 |
| 208 void VisitPointer(Object** p) { | 214 void VisitPointer(Object** p) { |
| 209 Object* obj = *p; | 215 Object* obj = *p; |
| 210 if (obj->NonFailureIsHeapObject()) { | 216 if (obj->NonFailureIsHeapObject()) { |
| 211 heap_->mark_compact_collector()->RecordSlot(p, p, obj); | 217 heap_->mark_compact_collector()->RecordSlot(p, p, obj); |
| 212 MarkObject(obj); | 218 MarkObject(obj); |
| 213 } | 219 } |
| 214 } | 220 } |
| 215 | 221 |
| 216 void VisitPointers(Object** start, Object** end) { | 222 void VisitPointers(Object** start, Object** end) { |
| 217 for (Object** p = start; p < end; p++) { | 223 for (Object** p = start; p < end; p++) { |
| (...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 736 IncrementalMarking::set_should_hurry(false); | 742 IncrementalMarking::set_should_hurry(false); |
| 737 ResetStepCounters(); | 743 ResetStepCounters(); |
| 738 PatchIncrementalMarkingRecordWriteStubs(heap_, | 744 PatchIncrementalMarkingRecordWriteStubs(heap_, |
| 739 RecordWriteStub::STORE_BUFFER_ONLY); | 745 RecordWriteStub::STORE_BUFFER_ONLY); |
| 740 DeactivateIncrementalWriteBarrier(); | 746 DeactivateIncrementalWriteBarrier(); |
| 741 ASSERT(marking_deque_.IsEmpty()); | 747 ASSERT(marking_deque_.IsEmpty()); |
| 742 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); | 748 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); |
| 743 } | 749 } |
| 744 | 750 |
| 745 | 751 |
| 746 void IncrementalMarking::MarkingComplete() { | 752 void IncrementalMarking::MarkingComplete(CompletionAction action) { |
| 747 state_ = COMPLETE; | 753 state_ = COMPLETE; |
| 748 // We will set the stack guard to request a GC now. This will mean the rest | 754 // We will set the stack guard to request a GC now. This will mean the rest |
| 749 // of the GC gets performed as soon as possible (we can't do a GC here in a | 755 // of the GC gets performed as soon as possible (we can't do a GC here in a |
| 750 // record-write context). If a few things get allocated between now and then | 756 // record-write context). If a few things get allocated between now and then |
| 751 // that shouldn't make us do a scavenge and keep being incremental, so we set | 757 // that shouldn't make us do a scavenge and keep being incremental, so we set |
| 752 // the should-hurry flag to indicate that there can't be much work left to do. | 758 // the should-hurry flag to indicate that there can't be much work left to do. |
| 753 set_should_hurry(true); | 759 set_should_hurry(true); |
| 754 if (FLAG_trace_incremental_marking) { | 760 if (FLAG_trace_incremental_marking) { |
| 755 PrintF("[IncrementalMarking] Complete (normal).\n"); | 761 PrintF("[IncrementalMarking] Complete (normal).\n"); |
| 756 } | 762 } |
| 757 if (!heap_->idle_notification_will_schedule_next_gc()) { | 763 if (action == GC_VIA_STACK_GUARD) { |
| 758 heap_->isolate()->stack_guard()->RequestGC(); | 764 heap_->isolate()->stack_guard()->RequestGC(); |
| 759 } | 765 } |
| 760 } | 766 } |
| 761 | 767 |
| 762 | 768 |
| 763 void IncrementalMarking::Step(intptr_t allocated_bytes) { | 769 void IncrementalMarking::Step(intptr_t allocated_bytes, |
| 770 CompletionAction action) { |
| 764 if (heap_->gc_state() != Heap::NOT_IN_GC || | 771 if (heap_->gc_state() != Heap::NOT_IN_GC || |
| 765 !FLAG_incremental_marking || | 772 !FLAG_incremental_marking || |
| 766 !FLAG_incremental_marking_steps || | 773 !FLAG_incremental_marking_steps || |
| 767 (state_ != SWEEPING && state_ != MARKING)) { | 774 (state_ != SWEEPING && state_ != MARKING)) { |
| 768 return; | 775 return; |
| 769 } | 776 } |
| 770 | 777 |
| 771 allocated_ += allocated_bytes; | 778 allocated_ += allocated_bytes; |
| 772 | 779 |
| 773 if (allocated_ < kAllocatedThreshold) return; | 780 if (allocated_ < kAllocatedThreshold) return; |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 826 } else { | 833 } else { |
| 827 obj->IterateBody(map->instance_type(), size, &marking_visitor); | 834 obj->IterateBody(map->instance_type(), size, &marking_visitor); |
| 828 } | 835 } |
| 829 | 836 |
| 830 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); | 837 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); |
| 831 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || | 838 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || |
| 832 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); | 839 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); |
| 833 Marking::MarkBlack(obj_mark_bit); | 840 Marking::MarkBlack(obj_mark_bit); |
| 834 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); | 841 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); |
| 835 } | 842 } |
| 836 if (marking_deque_.IsEmpty()) MarkingComplete(); | 843 if (marking_deque_.IsEmpty()) MarkingComplete(action); |
| 837 } | 844 } |
| 838 | 845 |
| 839 allocated_ = 0; | 846 allocated_ = 0; |
| 840 | 847 |
| 841 steps_count_++; | 848 steps_count_++; |
| 842 steps_count_since_last_gc_++; | 849 steps_count_since_last_gc_++; |
| 843 | 850 |
| 844 bool speed_up = false; | 851 bool speed_up = false; |
| 845 | 852 |
| 846 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { | 853 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 928 allocation_marking_factor_ = kInitialAllocationMarkingFactor; | 935 allocation_marking_factor_ = kInitialAllocationMarkingFactor; |
| 929 bytes_scanned_ = 0; | 936 bytes_scanned_ = 0; |
| 930 } | 937 } |
| 931 | 938 |
| 932 | 939 |
| 933 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 940 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 934 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); | 941 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); |
| 935 } | 942 } |
| 936 | 943 |
| 937 } } // namespace v8::internal | 944 } } // namespace v8::internal |
| OLD | NEW |