OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
57 | 57 |
58 MarkCompactCollector::MarkCompactCollector() : // NOLINT | 58 MarkCompactCollector::MarkCompactCollector() : // NOLINT |
59 #ifdef DEBUG | 59 #ifdef DEBUG |
60 state_(IDLE), | 60 state_(IDLE), |
61 #endif | 61 #endif |
62 sweep_precisely_(false), | 62 sweep_precisely_(false), |
63 reduce_memory_footprint_(false), | 63 reduce_memory_footprint_(false), |
64 abort_incremental_marking_(false), | 64 abort_incremental_marking_(false), |
65 compacting_(false), | 65 compacting_(false), |
66 was_marked_incrementally_(false), | 66 was_marked_incrementally_(false), |
| 67 collect_maps_(FLAG_collect_maps), |
67 flush_monomorphic_ics_(false), | 68 flush_monomorphic_ics_(false), |
68 tracer_(NULL), | 69 tracer_(NULL), |
69 migration_slots_buffer_(NULL), | 70 migration_slots_buffer_(NULL), |
70 heap_(NULL), | 71 heap_(NULL), |
71 code_flusher_(NULL), | 72 code_flusher_(NULL), |
72 encountered_weak_maps_(NULL), | 73 encountered_weak_maps_(NULL) { } |
73 marker_(this, this) { } | |
74 | 74 |
75 | 75 |
76 #ifdef DEBUG | 76 #ifdef DEBUG |
77 class VerifyMarkingVisitor: public ObjectVisitor { | 77 class VerifyMarkingVisitor: public ObjectVisitor { |
78 public: | 78 public: |
79 void VisitPointers(Object** start, Object** end) { | 79 void VisitPointers(Object** start, Object** end) { |
80 for (Object** current = start; current < end; current++) { | 80 for (Object** current = start; current < end; current++) { |
81 if ((*current)->IsHeapObject()) { | 81 if ((*current)->IsHeapObject()) { |
82 HeapObject* object = HeapObject::cast(*current); | 82 HeapObject* object = HeapObject::cast(*current); |
83 ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); | 83 ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 | 275 |
276 void MarkCompactCollector::CollectGarbage() { | 276 void MarkCompactCollector::CollectGarbage() { |
277 // Make sure that Prepare() has been called. The individual steps below will | 277 // Make sure that Prepare() has been called. The individual steps below will |
278 // update the state as they proceed. | 278 // update the state as they proceed. |
279 ASSERT(state_ == PREPARE_GC); | 279 ASSERT(state_ == PREPARE_GC); |
280 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); | 280 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
281 | 281 |
282 MarkLiveObjects(); | 282 MarkLiveObjects(); |
283 ASSERT(heap_->incremental_marking()->IsStopped()); | 283 ASSERT(heap_->incremental_marking()->IsStopped()); |
284 | 284 |
285 if (FLAG_collect_maps) ClearNonLiveTransitions(); | 285 if (collect_maps_) ClearNonLiveTransitions(); |
286 | 286 |
287 ClearWeakMaps(); | 287 ClearWeakMaps(); |
288 | 288 |
289 #ifdef DEBUG | 289 #ifdef DEBUG |
290 if (FLAG_verify_heap) { | 290 if (FLAG_verify_heap) { |
291 VerifyMarking(heap_); | 291 VerifyMarking(heap_); |
292 } | 292 } |
293 #endif | 293 #endif |
294 | 294 |
295 SweepSpaces(); | 295 SweepSpaces(); |
296 | 296 |
297 if (!FLAG_collect_maps) ReattachInitialMaps(); | 297 if (!collect_maps_) ReattachInitialMaps(); |
298 | 298 |
299 Finish(); | 299 Finish(); |
300 | 300 |
301 tracer_ = NULL; | 301 tracer_ = NULL; |
302 } | 302 } |
303 | 303 |
304 | 304 |
305 #ifdef DEBUG | 305 #ifdef DEBUG |
306 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 306 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
307 PageIterator it(space); | 307 PageIterator it(space); |
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
651 evacuation_candidates_.Rewind(0); | 651 evacuation_candidates_.Rewind(0); |
652 invalidated_code_.Rewind(0); | 652 invalidated_code_.Rewind(0); |
653 } | 653 } |
654 ASSERT_EQ(0, evacuation_candidates_.length()); | 654 ASSERT_EQ(0, evacuation_candidates_.length()); |
655 } | 655 } |
656 | 656 |
657 | 657 |
658 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 658 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
659 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); | 659 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); |
660 | 660 |
| 661 // Disable collection of maps if incremental marking is enabled. |
| 662 // Map collection algorithm relies on a special map transition tree traversal |
| 663 // order which is not implemented for incremental marking. |
| 664 collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; |
| 665 |
661 // Monomorphic ICs are preserved when possible, but need to be flushed | 666 // Monomorphic ICs are preserved when possible, but need to be flushed |
662 // when they might be keeping a Context alive, or when the heap is about | 667 // when they might be keeping a Context alive, or when the heap is about |
663 // to be serialized. | 668 // to be serialized. |
664 flush_monomorphic_ics_ = | 669 flush_monomorphic_ics_ = |
665 heap()->isolate()->context_exit_happened() || Serializer::enabled(); | 670 heap()->isolate()->context_exit_happened() || Serializer::enabled(); |
666 | 671 |
667 // Rather than passing the tracer around we stash it in a static member | 672 // Rather than passing the tracer around we stash it in a static member |
668 // variable. | 673 // variable. |
669 tracer_ = tracer; | 674 tracer_ = tracer; |
670 | 675 |
(...skipping 1115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1786 | 1791 |
1787 | 1792 |
1788 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { | 1793 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
1789 ASSERT(IsMarked(object)); | 1794 ASSERT(IsMarked(object)); |
1790 ASSERT(HEAP->Contains(object)); | 1795 ASSERT(HEAP->Contains(object)); |
1791 if (object->IsMap()) { | 1796 if (object->IsMap()) { |
1792 Map* map = Map::cast(object); | 1797 Map* map = Map::cast(object); |
1793 heap_->ClearCacheOnMap(map); | 1798 heap_->ClearCacheOnMap(map); |
1794 | 1799 |
1795 // When map collection is enabled we have to mark through map's transitions | 1800 // When map collection is enabled we have to mark through map's transitions |
1796 // in a special way to make transition links weak. Only maps for subclasses | 1801 // in a special way to make transition links weak. |
1797 // of JSReceiver can have transitions. | 1802 // Only maps for subclasses of JSReceiver can have transitions. |
1798 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 1803 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
1799 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { | 1804 if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
1800 marker_.MarkMapContents(map); | 1805 MarkMapContents(map); |
1801 } else { | 1806 } else { |
1802 marking_deque_.PushBlack(map); | 1807 marking_deque_.PushBlack(map); |
1803 } | 1808 } |
1804 } else { | 1809 } else { |
1805 marking_deque_.PushBlack(object); | 1810 marking_deque_.PushBlack(object); |
1806 } | 1811 } |
1807 } | 1812 } |
1808 | 1813 |
1809 | 1814 |
1810 // Force instantiation of template instances. | 1815 void MarkCompactCollector::MarkMapContents(Map* map) { |
1811 template void Marker<IncrementalMarking>::MarkMapContents(Map* map); | |
1812 template void Marker<MarkCompactCollector>::MarkMapContents(Map* map); | |
1813 | |
1814 | |
1815 template <class T> | |
1816 void Marker<T>::MarkMapContents(Map* map) { | |
1817 // Mark prototype transitions array but don't push it into marking stack. | 1816 // Mark prototype transitions array but don't push it into marking stack. |
1818 // This will make references from it weak. We will clean dead prototype | 1817 // This will make references from it weak. We will clean dead prototype |
1819 // transitions in ClearNonLiveTransitions. | 1818 // transitions in ClearNonLiveTransitions. But make sure that back pointers |
1820 Object** proto_trans_slot = | 1819 // stored inside prototype transitions arrays are marked. |
1821 HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset); | 1820 Object* raw_proto_transitions = map->unchecked_prototype_transitions(); |
1822 HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); | 1821 if (raw_proto_transitions->IsFixedArray()) { |
1823 if (prototype_transitions->IsFixedArray()) { | 1822 FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions); |
1824 mark_compact_collector()->RecordSlot(proto_trans_slot, | |
1825 proto_trans_slot, | |
1826 prototype_transitions); | |
1827 MarkBit mark = Marking::MarkBitFrom(prototype_transitions); | 1823 MarkBit mark = Marking::MarkBitFrom(prototype_transitions); |
1828 if (!mark.Get()) { | 1824 if (!mark.Get()) { |
1829 mark.Set(); | 1825 mark.Set(); |
1830 MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), | 1826 MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), |
1831 prototype_transitions->Size()); | 1827 prototype_transitions->Size()); |
| 1828 MarkObjectAndPush(HeapObject::cast( |
| 1829 prototype_transitions->get(Map::kProtoTransitionBackPointerOffset))); |
1832 } | 1830 } |
1833 } | 1831 } |
1834 | 1832 |
1835 // Make sure that the back pointer stored either in the map itself or inside | 1833 Object** raw_descriptor_array_slot = |
1836 // its prototype transitions array is marked. Treat pointers in the descriptor | |
1837 // array as weak and also mark that array to prevent visiting it later. | |
1838 base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer())); | |
1839 | |
1840 Object** descriptor_array_slot = | |
1841 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); | 1834 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
1842 Object* descriptor_array = *descriptor_array_slot; | 1835 Object* raw_descriptor_array = *raw_descriptor_array_slot; |
1843 if (!descriptor_array->IsSmi()) { | 1836 if (!raw_descriptor_array->IsSmi()) { |
1844 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array)); | 1837 MarkDescriptorArray( |
| 1838 reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); |
1845 } | 1839 } |
1846 | 1840 |
1847 // Mark the Object* fields of the Map. Since the descriptor array has been | 1841 // Mark the Object* fields of the Map. |
1848 // marked already, it is fine that one of these fields contains a pointer | 1842 // Since the descriptor array has been marked already, it is fine |
1849 // to it. But make sure to skip back pointer and prototype transitions. | 1843 // that one of these fields contains a pointer to it. |
1850 STATIC_ASSERT(Map::kPointerFieldsEndOffset == | 1844 Object** start_slot = HeapObject::RawField(map, |
1851 Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize); | 1845 Map::kPointerFieldsBeginOffset); |
1852 Object** start_slot = HeapObject::RawField( | 1846 |
1853 map, Map::kPointerFieldsBeginOffset); | 1847 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
1854 Object** end_slot = HeapObject::RawField( | 1848 |
1855 map, Map::kPrototypeTransitionsOrBackPointerOffset); | 1849 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
1856 for (Object** slot = start_slot; slot < end_slot; slot++) { | |
1857 Object* obj = *slot; | |
1858 if (!obj->NonFailureIsHeapObject()) continue; | |
1859 mark_compact_collector()->RecordSlot(start_slot, slot, obj); | |
1860 base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj)); | |
1861 } | |
1862 } | 1850 } |
1863 | 1851 |
1864 | 1852 |
1865 template <class T> | 1853 void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, |
1866 void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) { | 1854 int offset) { |
| 1855 Object** slot = HeapObject::RawField(accessors, offset); |
| 1856 HeapObject* accessor = HeapObject::cast(*slot); |
| 1857 if (accessor->IsMap()) return; |
| 1858 RecordSlot(slot, slot, accessor); |
| 1859 MarkObjectAndPush(accessor); |
| 1860 } |
| 1861 |
| 1862 |
| 1863 void MarkCompactCollector::MarkDescriptorArray( |
| 1864 DescriptorArray* descriptors) { |
| 1865 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); |
| 1866 if (descriptors_mark.Get()) return; |
1867 // Empty descriptor array is marked as a root before any maps are marked. | 1867 // Empty descriptor array is marked as a root before any maps are marked. |
1868 ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array()); | 1868 ASSERT(descriptors != heap()->empty_descriptor_array()); |
| 1869 SetMark(descriptors, descriptors_mark); |
1869 | 1870 |
1870 // The DescriptorArray contains a pointer to its contents array, but the | 1871 FixedArray* contents = reinterpret_cast<FixedArray*>( |
1871 // contents array will be marked black and hence not be visited again. | |
1872 if (!base_marker()->MarkObjectAndPush(descriptors)) return; | |
1873 FixedArray* contents = FixedArray::cast( | |
1874 descriptors->get(DescriptorArray::kContentArrayIndex)); | 1872 descriptors->get(DescriptorArray::kContentArrayIndex)); |
| 1873 ASSERT(contents->IsHeapObject()); |
| 1874 ASSERT(!IsMarked(contents)); |
| 1875 ASSERT(contents->IsFixedArray()); |
1875 ASSERT(contents->length() >= 2); | 1876 ASSERT(contents->length() >= 2); |
1876 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents))); | 1877 MarkBit contents_mark = Marking::MarkBitFrom(contents); |
1877 base_marker()->MarkObjectWithoutPush(contents); | 1878 SetMark(contents, contents_mark); |
1878 | 1879 // Contents contains (value, details) pairs. If the details say that the type |
1879 // Contents contains (value, details) pairs. If the descriptor contains a | 1880 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, |
1880 // transition (value is a Map), we don't mark the value as live. It might | 1881 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as |
1881 // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later. | 1882 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and |
| 1883 // CONSTANT_TRANSITION is the value an Object* (a Map*). |
1882 for (int i = 0; i < contents->length(); i += 2) { | 1884 for (int i = 0; i < contents->length(); i += 2) { |
| 1885 // If the pair (value, details) at index i, i+1 is not |
| 1886 // a transition or null descriptor, mark the value. |
1883 PropertyDetails details(Smi::cast(contents->get(i + 1))); | 1887 PropertyDetails details(Smi::cast(contents->get(i + 1))); |
1884 | 1888 |
1885 Object** slot = contents->data_start() + i; | 1889 Object** slot = contents->data_start() + i; |
1886 if (!(*slot)->IsHeapObject()) continue; | 1890 if (!(*slot)->IsHeapObject()) continue; |
1887 HeapObject* value = HeapObject::cast(*slot); | 1891 HeapObject* value = HeapObject::cast(*slot); |
1888 | 1892 |
1889 mark_compact_collector()->RecordSlot(slot, slot, *slot); | 1893 RecordSlot(slot, slot, *slot); |
1890 | 1894 |
1891 switch (details.type()) { | 1895 switch (details.type()) { |
1892 case NORMAL: | 1896 case NORMAL: |
1893 case FIELD: | 1897 case FIELD: |
1894 case CONSTANT_FUNCTION: | 1898 case CONSTANT_FUNCTION: |
1895 case HANDLER: | 1899 case HANDLER: |
1896 case INTERCEPTOR: | 1900 case INTERCEPTOR: |
1897 base_marker()->MarkObjectAndPush(value); | 1901 MarkObjectAndPush(value); |
1898 break; | 1902 break; |
1899 case CALLBACKS: | 1903 case CALLBACKS: |
1900 if (!value->IsAccessorPair()) { | 1904 if (!value->IsAccessorPair()) { |
1901 base_marker()->MarkObjectAndPush(value); | 1905 MarkObjectAndPush(value); |
1902 } else if (base_marker()->MarkObjectWithoutPush(value)) { | 1906 } else if (!MarkObjectWithoutPush(value)) { |
1903 AccessorPair* accessors = AccessorPair::cast(value); | 1907 MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); |
1904 MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset); | 1908 MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); |
1905 MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset); | |
1906 } | 1909 } |
1907 break; | 1910 break; |
1908 case ELEMENTS_TRANSITION: | 1911 case ELEMENTS_TRANSITION: |
1909 // For maps with multiple elements transitions, the transition maps are | 1912 // For maps with multiple elements transitions, the transition maps are |
1910 // stored in a FixedArray. Keep the fixed array alive but not the maps | 1913 // stored in a FixedArray. Keep the fixed array alive but not the maps |
1911 // that it refers to. | 1914 // that it refers to. |
1912 if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value); | 1915 if (value->IsFixedArray()) MarkObjectWithoutPush(value); |
1913 break; | 1916 break; |
1914 case MAP_TRANSITION: | 1917 case MAP_TRANSITION: |
1915 case CONSTANT_TRANSITION: | 1918 case CONSTANT_TRANSITION: |
1916 case NULL_DESCRIPTOR: | 1919 case NULL_DESCRIPTOR: |
1917 break; | 1920 break; |
1918 } | 1921 } |
1919 } | 1922 } |
| 1923 // The DescriptorArray descriptors contains a pointer to its contents array, |
| 1924 // but the contents array is already marked. |
| 1925 marking_deque_.PushBlack(descriptors); |
1920 } | 1926 } |
1921 | 1927 |
1922 | 1928 |
1923 template <class T> | |
1924 void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) { | |
1925 Object** slot = HeapObject::RawField(accessors, offset); | |
1926 HeapObject* accessor = HeapObject::cast(*slot); | |
1927 if (accessor->IsMap()) return; | |
1928 mark_compact_collector()->RecordSlot(slot, slot, accessor); | |
1929 base_marker()->MarkObjectAndPush(accessor); | |
1930 } | |
1931 | |
1932 | |
1933 // Fill the marking stack with overflowed objects returned by the given | 1929 // Fill the marking stack with overflowed objects returned by the given |
1934 // iterator. Stop when the marking stack is filled or the end of the space | 1930 // iterator. Stop when the marking stack is filled or the end of the space |
1935 // is reached, whichever comes first. | 1931 // is reached, whichever comes first. |
1936 template<class T> | 1932 template<class T> |
1937 static void DiscoverGreyObjectsWithIterator(Heap* heap, | 1933 static void DiscoverGreyObjectsWithIterator(Heap* heap, |
1938 MarkingDeque* marking_deque, | 1934 MarkingDeque* marking_deque, |
1939 T* it) { | 1935 T* it) { |
1940 // The caller should ensure that the marking stack is initially not full, | 1936 // The caller should ensure that the marking stack is initially not full, |
1941 // so that we don't waste effort pointlessly scanning for objects. | 1937 // so that we don't waste effort pointlessly scanning for objects. |
1942 ASSERT(!marking_deque->IsFull()); | 1938 ASSERT(!marking_deque->IsFull()); |
(...skipping 2157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4100 while (buffer != NULL) { | 4096 while (buffer != NULL) { |
4101 SlotsBuffer* next_buffer = buffer->next(); | 4097 SlotsBuffer* next_buffer = buffer->next(); |
4102 DeallocateBuffer(buffer); | 4098 DeallocateBuffer(buffer); |
4103 buffer = next_buffer; | 4099 buffer = next_buffer; |
4104 } | 4100 } |
4105 *buffer_address = NULL; | 4101 *buffer_address = NULL; |
4106 } | 4102 } |
4107 | 4103 |
4108 | 4104 |
4109 } } // namespace v8::internal | 4105 } } // namespace v8::internal |
OLD | NEW |