Index: src/mark-compact.cc |
=================================================================== |
--- src/mark-compact.cc (revision 12610) |
+++ src/mark-compact.cc (working copy) |
@@ -1053,19 +1053,46 @@ |
MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
} |
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
+ INLINE(static void VisitPointers(Heap* heap, |
+ Object** anchor, |
+ Object** start, |
+ Object** end)) { |
// Mark all objects pointed to in [start, end). |
const int kMinRangeForMarkingRecursion = 64; |
if (end - start >= kMinRangeForMarkingRecursion) { |
- if (VisitUnmarkedObjects(heap, start, end)) return; |
+ if (VisitUnmarkedObjects(heap, anchor, start, end)) return; |
// We are close to a stack overflow, so just mark the objects. |
} |
MarkCompactCollector* collector = heap->mark_compact_collector(); |
for (Object** p = start; p < end; p++) { |
- MarkObjectByPointer(collector, start, p); |
+ MarkObjectByPointer(collector, anchor, p); |
} |
} |
+ static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length); |
+ |
+ // The deque is contiguous and we use new space, it is therefore contained in |
+ // one page minus the header. It also has a size that is a power of two so |
+ // it is half the size of a page. We want to scan a number of array entries |
+ // that is less than the number of entries in the deque, so we divide by 2 |
+ // once more. |
+ static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize; |
+ |
+ INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) { |
+ FixedArray* array = FixedArray::cast(object); |
+ int length = array->length(); |
+ Heap* heap = map->GetHeap(); |
+ |
+ if (length < kScanningChunk || |
+ MemoryChunk::FromAddress(array->address())->owner()->identity() != |
+ LO_SPACE) { |
+ Object** start_slot = array->data_start(); |
+ VisitPointers(heap, start_slot, start_slot, start_slot + length); |
+ } else { |
+ VisitHugeFixedArray(heap, array, length); |
+ } |
+ } |
+ |
// Marks the object black and pushes it on the marking stack. |
INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
MarkBit mark = Marking::MarkBitFrom(object); |
@@ -1112,21 +1139,22 @@ |
IterateBody(map, obj); |
} |
- // Visit all unmarked objects pointed to by [start, end). |
+ // Visit all unmarked objects pointed to by [start_slot, end_slot). |
// Returns false if the operation fails (lack of stack space). |
static inline bool VisitUnmarkedObjects(Heap* heap, |
- Object** start, |
- Object** end) { |
+ Object** anchor_slot, |
+ Object** start_slot, |
+ Object** end_slot) { |
// Return false is we are close to the stack limit. |
StackLimitCheck check(heap->isolate()); |
if (check.HasOverflowed()) return false; |
MarkCompactCollector* collector = heap->mark_compact_collector(); |
// Visit the unmarked objects. |
- for (Object** p = start; p < end; p++) { |
+ for (Object** p = start_slot; p < end_slot; p++) { |
Object* o = *p; |
if (!o->IsHeapObject()) continue; |
- collector->RecordSlot(start, p, o); |
+ collector->RecordSlot(anchor_slot, p, o); |
HeapObject* obj = HeapObject::cast(o); |
MarkBit mark = Marking::MarkBitFrom(obj); |
if (mark.Get()) continue; |
@@ -1447,9 +1475,11 @@ |
bool flush_code_candidate) { |
Heap* heap = map->GetHeap(); |
- VisitPointers(heap, |
- HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
+ Object** start_slot = |
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset); |
+ Object** end_slot = |
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset); |
+ VisitPointers(heap, start_slot, start_slot, end_slot); |
if (!flush_code_candidate) { |
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
@@ -1473,11 +1503,12 @@ |
} |
} |
- VisitPointers( |
- heap, |
+ start_slot = |
HeapObject::RawField(object, |
- JSFunction::kCodeEntryOffset + kPointerSize), |
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); |
+ JSFunction::kCodeEntryOffset + kPointerSize); |
+ end_slot = |
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); |
+ VisitPointers(heap, start_slot, start_slot, end_slot); |
} |
@@ -1493,17 +1524,40 @@ |
SharedFunctionInfo::kCodeOffset)); |
} |
- VisitPointers( |
- heap, |
+ Object** start_slot = |
HeapObject::RawField(object, |
- SharedFunctionInfo::kOptimizedCodeMapOffset), |
- HeapObject::RawField(object, SharedFunctionInfo::kSize)); |
+ SharedFunctionInfo::kOptimizedCodeMapOffset); |
+ Object** end_slot = |
+ HeapObject::RawField(object, SharedFunctionInfo::kSize); |
+ |
+ VisitPointers(heap, start_slot, start_slot, end_slot); |
} |
static VisitorDispatchTable<Callback> non_count_table_; |
}; |
+void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap, |
+ FixedArray* array, |
+ int length) { |
+ MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); |
+ |
+ ASSERT(chunk->owner()->identity() == LO_SPACE); |
+ |
+ Object** start_slot = array->data_start(); |
+ int from = |
+ chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0; |
+ int to = Min(from + kScanningChunk, length); |
+ VisitPointers(heap, start_slot, start_slot + from, start_slot + to); |
+ |
+ if (to == length) { |
+ chunk->SetCompletelyScanned(); |
+ } else { |
+ chunk->SetPartiallyScannedProgress(to); |
+ } |
+} |
+ |
+ |
void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( |
FixedArrayBase* fixed_array, |
FixedArraySubInstanceType fast_type, |
@@ -1645,6 +1699,9 @@ |
table_.Register(kVisitJSRegExp, |
&VisitRegExpAndFlushCode); |
+ table_.Register(kVisitFixedArray, |
+ &VisitFixedArray); |
+ |
if (FLAG_track_gc_object_stats) { |
// Copy the visitor table to make call-through possible. |
non_count_table_.CopyFrom(&table_); |
@@ -1668,8 +1725,9 @@ |
MarkCompactMarkingVisitor::VisitPointer(heap_, p); |
} |
- void VisitPointers(Object** start, Object** end) { |
- MarkCompactMarkingVisitor::VisitPointers(heap_, start, end); |
+ void VisitPointers(Object** start_slot, Object** end_slot) { |
+ MarkCompactMarkingVisitor::VisitPointers( |
+ heap_, start_slot, start_slot, end_slot); |
} |
private: |
@@ -1696,8 +1754,8 @@ |
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
: collector_(collector) {} |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) VisitPointer(p); |
+ void VisitPointers(Object** start_slot, Object** end_slot) { |
+ for (Object** p = start_slot; p < end_slot; p++) VisitPointer(p); |
} |
void VisitPointer(Object** slot) { |
@@ -1808,8 +1866,8 @@ |
MarkObjectByPointer(p); |
} |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
+ void VisitPointers(Object** start_slot, Object** end_slot) { |
+ for (Object** p = start_slot; p < end_slot; p++) MarkObjectByPointer(p); |
} |
private: |
@@ -1845,9 +1903,9 @@ |
explicit SymbolTableCleaner(Heap* heap) |
: heap_(heap), pointers_removed_(0) { } |
- virtual void VisitPointers(Object** start, Object** end) { |
- // Visit all HeapObject pointers in [start, end). |
- for (Object** p = start; p < end; p++) { |
+ virtual void VisitPointers(Object** start_slot, Object** end_slot) { |
+ // Visit all HeapObject pointers in [start_slot, end_slot). |
+ for (Object** p = start_slot; p < end_slot; p++) { |
Object* o = *p; |
if (o->IsHeapObject() && |
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { |
@@ -2128,6 +2186,7 @@ |
MarkCompactMarkingVisitor::IterateBody(map, object); |
} |
+ ProcessLargePostponedArrays(heap(), &marking_deque_); |
// Process encountered weak maps, mark objects only reachable by those |
// weak maps and repeat until fix-point is reached. |
@@ -2136,12 +2195,29 @@ |
} |
+void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap, |
+ MarkingDeque* deque) { |
+ ASSERT(deque->IsEmpty()); |
+ LargeObjectIterator it(heap->lo_space()); |
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
+ if (!obj->IsFixedArray()) continue; |
+ MemoryChunk* p = MemoryChunk::FromAddress(obj->address()); |
+ if (p->IsPartiallyScanned()) { |
+ deque->PushBlack(obj); |
+ } |
+ } |
+} |
+ |
+ |
// Sweep the heap for overflowed objects, clear their overflow bits, and |
// push them on the marking stack. Stop early if the marking stack fills |
// before sweeping completes. If sweeping completes, there are no remaining |
// overflowed objects in the heap so the overflow flag on the markings stack |
// is cleared. |
void MarkCompactCollector::RefillMarkingDeque() { |
+ if (FLAG_trace_gc) { |
+ PrintPID("Marking queue overflowed\n"); |
+ } |
ASSERT(marking_deque_.overflowed()); |
SemiSpaceIterator new_it(heap()->new_space()); |
@@ -2632,8 +2708,8 @@ |
UpdatePointer(p); |
} |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) UpdatePointer(p); |
+ void VisitPointers(Object** start_slot, Object** end_slot) { |
+ for (Object** p = start_slot; p < end_slot; p++) UpdatePointer(p); |
} |
void VisitEmbeddedPointer(RelocInfo* rinfo) { |