Chromium Code Reviews| Index: src/mark-compact.cc |
| diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
| index 0aa119219adfaad70c22460973ca125b3aa21086..1c26c96411ddbdda7f846aac256c931cea1ddb8d 100644 |
| --- a/src/mark-compact.cc |
| +++ b/src/mark-compact.cc |
| @@ -64,13 +64,14 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT |
| abort_incremental_marking_(false), |
| compacting_(false), |
| was_marked_incrementally_(false), |
| - collect_maps_(FLAG_collect_maps), |
| + clear_map_transitions_(true), |
| flush_monomorphic_ics_(false), |
| tracer_(NULL), |
| migration_slots_buffer_(NULL), |
| heap_(NULL), |
| code_flusher_(NULL), |
| - encountered_weak_maps_(NULL) { } |
| + encountered_weak_maps_(NULL), |
| + marker_(this, this) { } |
| #ifdef DEBUG |
| @@ -282,7 +283,7 @@ void MarkCompactCollector::CollectGarbage() { |
| MarkLiveObjects(); |
| ASSERT(heap_->incremental_marking()->IsStopped()); |
| - if (collect_maps_) ClearNonLiveTransitions(); |
| + if (FLAG_collect_maps) ClearNonLiveTransitions(); |
|
Vyacheslav Egorov (Chromium)
2012/05/11 12:58:35
It seems that running with --nocollect-maps would
Michael Starzinger
2012/05/11 14:51:53
Yes, now the flag treats pointers in both directio
|
| ClearWeakMaps(); |
| @@ -294,7 +295,7 @@ void MarkCompactCollector::CollectGarbage() { |
| SweepSpaces(); |
| - if (!collect_maps_) ReattachInitialMaps(); |
| + if (!FLAG_collect_maps) ReattachInitialMaps(); |
| Finish(); |
| @@ -658,10 +659,10 @@ void MarkCompactCollector::AbortCompaction() { |
| void MarkCompactCollector::Prepare(GCTracer* tracer) { |
| was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); |
| - // Disable collection of maps if incremental marking is enabled. |
| - // Map collection algorithm relies on a special map transition tree traversal |
| - // order which is not implemented for incremental marking. |
| - collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; |
| + // TODO(1465): Implement heuristic to switch between clearing map transitions |
| + // by marking strongly "up the tree" and collecting whole transition trees |
| + // which requires marking strongly "down the tree". |
| + clear_map_transitions_ = true; |
|
Vyacheslav Egorov (Chromium)
2012/05/11 12:58:35
Why does not it depend on FLAG_collect_maps?
Flag
Michael Starzinger
2012/05/11 14:51:53
Done, removed flag for now.
|
| // Monomorphic ICs are preserved when possible, but need to be flushed |
| // when they might be keeping a Context alive, or when the heap is about |
| @@ -1798,11 +1799,22 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
| heap_->ClearCacheOnMap(map); |
| // When map collection is enabled we have to mark through map's transitions |
| - // in a special way to make transition links weak. |
| - // Only maps for subclasses of JSReceiver can have transitions. |
| + // in a special way to make transition links weak. Only maps for subclasses |
| + // of JSReceiver can have transitions. |
| STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
| - if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
| - MarkMapContents(map); |
| + if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
| + marker_.MarkMapContents(map); |
| + |
| + // Mark the Object* fields of the Map. Since the descriptor array has been |
|
Vyacheslav Egorov (Chromium)
2012/05/11 12:58:35
Try to move this to MarkMapContents to further sha
Michael Starzinger
2012/05/11 14:51:53
Done.
|
| + // marked already, it is fine that one of these fields contains a pointer |
| + // to it. But make sure to skip back pointer and prototype transitions. |
| + STATIC_ASSERT(Map::kPointerFieldsEndOffset == |
| + Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize); |
| + Object** start_slot = HeapObject::RawField( |
| + map, Map::kPointerFieldsBeginOffset); |
| + Object** end_slot = HeapObject::RawField( |
| + map, Map::kPrototypeTransitionsOrBackPointerOffset); |
| + StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
| } else { |
| marking_deque_.PushBlack(map); |
| } |
| @@ -1812,85 +1824,73 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
| } |
| -void MarkCompactCollector::MarkMapContents(Map* map) { |
| +// Force instantiation of template instances. |
| +template void Marker<IncrementalMarking>::MarkMapContents(Map* map); |
| +template void Marker<MarkCompactCollector>::MarkMapContents(Map* map); |
| + |
| + |
| +template <class T> |
| +void Marker<T>::MarkMapContents(Map* map) { |
| // Mark prototype transitions array but don't push it into marking stack. |
| // This will make references from it weak. We will clean dead prototype |
| - // transitions in ClearNonLiveTransitions. But make sure that back pointers |
| - // stored inside prototype transitions arrays are marked. |
| - Object* raw_proto_transitions = map->unchecked_prototype_transitions(); |
| - if (raw_proto_transitions->IsFixedArray()) { |
| - FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions); |
| + // transitions in ClearNonLiveTransitions. |
| + Object** proto_trans_slot = |
| + HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset); |
| + HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); |
| + if (prototype_transitions->IsFixedArray()) { |
| + mark_compact_collector()->RecordSlot(proto_trans_slot, |
| + proto_trans_slot, |
| + prototype_transitions); |
| MarkBit mark = Marking::MarkBitFrom(prototype_transitions); |
| if (!mark.Get()) { |
| mark.Set(); |
| MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), |
| prototype_transitions->Size()); |
| - MarkObjectAndPush(HeapObject::cast( |
| - prototype_transitions->get(Map::kProtoTransitionBackPointerOffset))); |
| } |
| } |
| - Object** raw_descriptor_array_slot = |
| - HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
| - Object* raw_descriptor_array = *raw_descriptor_array_slot; |
| - if (!raw_descriptor_array->IsSmi()) { |
| - MarkDescriptorArray( |
| - reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); |
| - } |
| - |
| - // Mark the Object* fields of the Map. |
| - // Since the descriptor array has been marked already, it is fine |
| - // that one of these fields contains a pointer to it. |
| - Object** start_slot = HeapObject::RawField(map, |
| - Map::kPointerFieldsBeginOffset); |
| - |
| - Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
| + // Make sure that the back pointer stored either in the map itself or inside |
| + // its prototype transitions array is marked when clearing map transitions. |
| + // Treat pointers in the descriptor array as weak and also mark that array to |
| + // prevent visiting it later. |
| + if (mark_compact_collector()->clear_map_transitions_) { |
| + base_marker()->MarkObject(HeapObject::cast(map->GetBackPointer())); |
| - StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
| -} |
| - |
| - |
| -void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, |
| - int offset) { |
| - Object** slot = HeapObject::RawField(accessors, offset); |
| - HeapObject* accessor = HeapObject::cast(*slot); |
| - if (accessor->IsMap()) return; |
| - RecordSlot(slot, slot, accessor); |
| - MarkObjectAndPush(accessor); |
| + Object** descriptor_array_slot = |
| + HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
| + Object* descriptor_array = *descriptor_array_slot; |
| + if (!descriptor_array->IsSmi()) { |
| + MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array)); |
| + } |
| + } |
| } |
| -void MarkCompactCollector::MarkDescriptorArray( |
| - DescriptorArray* descriptors) { |
| - MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); |
| - if (descriptors_mark.Get()) return; |
| +template <class T> |
| +void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) { |
| // Empty descriptor array is marked as a root before any maps are marked. |
| - ASSERT(descriptors != heap()->empty_descriptor_array()); |
| - SetMark(descriptors, descriptors_mark); |
| + ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array()); |
| - FixedArray* contents = reinterpret_cast<FixedArray*>( |
| + // The DescriptorArray contains a pointer to its contents array, but the |
| + // contents array will be marked black and hence not be visited again. |
| + if (!base_marker()->MarkObject(descriptors)) return; |
| + FixedArray* contents = FixedArray::cast( |
| descriptors->get(DescriptorArray::kContentArrayIndex)); |
| - ASSERT(contents->IsHeapObject()); |
| - ASSERT(!IsMarked(contents)); |
| - ASSERT(contents->IsFixedArray()); |
| ASSERT(contents->length() >= 2); |
| - MarkBit contents_mark = Marking::MarkBitFrom(contents); |
| - SetMark(contents, contents_mark); |
| - // Contents contains (value, details) pairs. If the details say that the type |
| - // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, |
| - // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as |
| - // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and |
| - // CONSTANT_TRANSITION is the value an Object* (a Map*). |
| + ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents))); |
| + base_marker()->MarkObjectWithoutPush(contents); |
| + |
| + // Contents contains (value, details) pairs. If the descriptor contains a |
| + // transition (value is a Map), we don't mark the value as live. It might |
| + // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later. |
| for (int i = 0; i < contents->length(); i += 2) { |
| - // If the pair (value, details) at index i, i+1 is not |
| - // a transition or null descriptor, mark the value. |
| PropertyDetails details(Smi::cast(contents->get(i + 1))); |
| Object** slot = contents->data_start() + i; |
| if (!(*slot)->IsHeapObject()) continue; |
| HeapObject* value = HeapObject::cast(*slot); |
| - RecordSlot(slot, slot, *slot); |
| + mark_compact_collector()->RecordSlot(slot, slot, *slot); |
| switch (details.type()) { |
| case NORMAL: |
| @@ -1898,21 +1898,22 @@ void MarkCompactCollector::MarkDescriptorArray( |
| case CONSTANT_FUNCTION: |
| case HANDLER: |
| case INTERCEPTOR: |
| - MarkObjectAndPush(value); |
| + base_marker()->MarkObject(value); |
| break; |
| case CALLBACKS: |
| if (!value->IsAccessorPair()) { |
| - MarkObjectAndPush(value); |
| - } else if (!MarkObjectWithoutPush(value)) { |
| - MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); |
| - MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); |
| + base_marker()->MarkObject(value); |
| + } else if (base_marker()->MarkObjectWithoutPush(value)) { |
| + AccessorPair* accessors = AccessorPair::cast(value); |
| + MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset); |
| + MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset); |
| } |
| break; |
| case ELEMENTS_TRANSITION: |
| // For maps with multiple elements transitions, the transition maps are |
| // stored in a FixedArray. Keep the fixed array alive but not the maps |
| // that it refers to. |
| - if (value->IsFixedArray()) MarkObjectWithoutPush(value); |
| + if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value); |
| break; |
| case MAP_TRANSITION: |
| case CONSTANT_TRANSITION: |
| @@ -1920,9 +1921,16 @@ void MarkCompactCollector::MarkDescriptorArray( |
| break; |
| } |
| } |
| - // The DescriptorArray descriptors contains a pointer to its contents array, |
| - // but the contents array is already marked. |
| - marking_deque_.PushBlack(descriptors); |
| +} |
| + |
| + |
| +template <class T> |
| +void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) { |
| + Object** slot = HeapObject::RawField(accessors, offset); |
| + HeapObject* accessor = HeapObject::cast(*slot); |
| + if (accessor->IsMap()) return; |
| + mark_compact_collector()->RecordSlot(slot, slot, accessor); |
| + base_marker()->MarkObject(accessor); |
| } |
| @@ -2526,6 +2534,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { |
| void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, |
| MarkBit map_mark) { |
| + if (!clear_map_transitions_) return; |
| Object* potential_parent = map->GetBackPointer(); |
| if (!potential_parent->IsMap()) return; |
| Map* parent = Map::cast(potential_parent); |