Chromium Code Reviews| Index: src/mark-compact.cc |
| =================================================================== |
| --- src/mark-compact.cc (revision 12610) |
| +++ src/mark-compact.cc (working copy) |
| @@ -1053,19 +1053,46 @@ |
| MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
| } |
| - INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
| + INLINE(static void VisitPointers(Heap* heap, |
| + Object** anchor, |
| + Object** start, |
| + Object** end)) { |
| // Mark all objects pointed to in [start, end). |
| const int kMinRangeForMarkingRecursion = 64; |
| if (end - start >= kMinRangeForMarkingRecursion) { |
| - if (VisitUnmarkedObjects(heap, start, end)) return; |
| + if (VisitUnmarkedObjects(heap, anchor, start, end)) return; |
| // We are close to a stack overflow, so just mark the objects. |
| } |
| MarkCompactCollector* collector = heap->mark_compact_collector(); |
| for (Object** p = start; p < end; p++) { |
| - MarkObjectByPointer(collector, start, p); |
| + MarkObjectByPointer(collector, anchor, p); |
| } |
| } |
| + static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length); |
| + |
| + // The deque is contiguous and we use new space, it is therefore contained in |
| + // one page minus the header. It also has a size that is a power of two so |
| + // it is half the size of a page. We want to scan a number of array entries |
| + // that is less than the number of entries in the deque, so we divide by 2 |
| + // once more. |
| + static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize; |
| + |
| + INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) { |
| + FixedArray* array = FixedArray::cast(object); |
| + int length = array->length(); |
| + Heap* heap = map->GetHeap(); |
| + |
| + if (length < kScanningChunk || |
| + MemoryChunk::FromAddress(array->address())->owner()->identity() != |
| + LO_SPACE) { |
| + Object** start = array->data_start(); |
| + VisitPointers(heap, start, start, start + length); |
| + } else { |
| + VisitHugeFixedArray(heap, array, length); |
| + } |
| + } |
| + |
| // Marks the object black and pushes it on the marking stack. |
| INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
| MarkBit mark = Marking::MarkBitFrom(object); |
| @@ -1115,6 +1142,7 @@ |
| // Visit all unmarked objects pointed to by [start, end). |
| // Returns false if the operation fails (lack of stack space). |
| static inline bool VisitUnmarkedObjects(Heap* heap, |
| + Object** anchor, |
| Object** start, |
| Object** end) { |
| // Return false is we are close to the stack limit. |
| @@ -1126,7 +1154,7 @@ |
| for (Object** p = start; p < end; p++) { |
| Object* o = *p; |
| if (!o->IsHeapObject()) continue; |
| - collector->RecordSlot(start, p, o); |
| + collector->RecordSlot(anchor, p, o); |
| HeapObject* obj = HeapObject::cast(o); |
| MarkBit mark = Marking::MarkBitFrom(obj); |
| if (mark.Get()) continue; |
| @@ -1447,9 +1475,11 @@ |
| bool flush_code_candidate) { |
| Heap* heap = map->GetHeap(); |
| - VisitPointers(heap, |
| - HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
| - HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
| + Object** start = |
| + HeapObject::RawField(object, JSFunction::kPropertiesOffset); |
| + Object** end = |
| + HeapObject::RawField(object, JSFunction::kCodeEntryOffset); |
| + VisitPointers(heap, start, start, end); |
| if (!flush_code_candidate) { |
| VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
| @@ -1473,11 +1503,10 @@ |
| } |
| } |
| - VisitPointers( |
| - heap, |
| - HeapObject::RawField(object, |
| - JSFunction::kCodeEntryOffset + kPointerSize), |
| - HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); |
| + start = HeapObject::RawField(object, |
| + JSFunction::kCodeEntryOffset + kPointerSize); |
| + end = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); |
| + VisitPointers(heap, start, start, end); |
| } |
| @@ -1493,17 +1522,40 @@ |
| SharedFunctionInfo::kCodeOffset)); |
| } |
| - VisitPointers( |
| - heap, |
| + Object** start = |
| HeapObject::RawField(object, |
| - SharedFunctionInfo::kOptimizedCodeMapOffset), |
| - HeapObject::RawField(object, SharedFunctionInfo::kSize)); |
| + SharedFunctionInfo::kOptimizedCodeMapOffset); |
| + Object** end = |
| + HeapObject::RawField(object, SharedFunctionInfo::kSize); |
| + |
| + VisitPointers(heap, start, start, end); |
| } |
| static VisitorDispatchTable<Callback> non_count_table_; |
| }; |
| +void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap, |
| + FixedArray* array, |
| + int length) { |
| + MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); |
| + |
| + ASSERT(chunk->owner()->identity() == LO_SPACE); |
| + |
| + Object** start = array->data_start(); |
| + int from = |
| + chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0; |
| + int to = Min(from + kScanningChunk, length); |
| + VisitPointers(heap, start, start + from, start + to); |
| + |
| + if (to == length) { |
| + chunk->SetCompletelyScanned(); |
| + } else { |
| + chunk->SetPartiallyScannedProgress(to); |
| + } |
| +} |
| + |
| + |
| void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( |
| FixedArrayBase* fixed_array, |
| FixedArraySubInstanceType fast_type, |
| @@ -1645,6 +1697,9 @@ |
| table_.Register(kVisitJSRegExp, |
| &VisitRegExpAndFlushCode); |
| + table_.Register(kVisitFixedArray, |
| + &VisitFixedArray); |
| + |
| if (FLAG_track_gc_object_stats) { |
| // Copy the visitor table to make call-through possible. |
| non_count_table_.CopyFrom(&table_); |
| @@ -1669,7 +1724,7 @@ |
| } |
| void VisitPointers(Object** start, Object** end) { |
| - MarkCompactMarkingVisitor::VisitPointers(heap_, start, end); |
| + MarkCompactMarkingVisitor::VisitPointers(heap_, start, start, end); |
| } |
| private: |
| @@ -2128,6 +2183,7 @@ |
| MarkCompactMarkingVisitor::IterateBody(map, object); |
| } |
| + ProcessLargePostponedArrays(heap(), &marking_deque_); |
| // Process encountered weak maps, mark objects only reachable by those |
| // weak maps and repeat until fix-point is reached. |
| @@ -2136,12 +2192,29 @@ |
| } |
| +void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap, |
|
Michael Starzinger
2012/09/26 11:40:07
Just make this non-static and you don't need to pa
Erik Corry
2012/09/26 11:42:20
The incremental marker also uses it, to avoid code
|
| + MarkingDeque* deque) { |
| + ASSERT(deque->IsEmpty()); |
| + LargeObjectIterator it(heap->lo_space()); |
| + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| + if (!obj->IsFixedArray()) continue; |
| + MemoryChunk* p = MemoryChunk::FromAddress(obj->address()); |
| + if (p->IsPartiallyScanned()) { |
| + deque->PushBlack(obj); |
| + } |
| + } |
| +} |
| + |
| + |
| // Sweep the heap for overflowed objects, clear their overflow bits, and |
| // push them on the marking stack. Stop early if the marking stack fills |
| // before sweeping completes. If sweeping completes, there are no remaining |
| // overflowed objects in the heap so the overflow flag on the markings stack |
| // is cleared. |
| void MarkCompactCollector::RefillMarkingDeque() { |
| + if (FLAG_trace_gc) { |
| + PrintPID("Marking queue overflowed\n"); |
| + } |
| ASSERT(marking_deque_.overflowed()); |
| SemiSpaceIterator new_it(heap()->new_space()); |