OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1048 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1059 if (end - start >= kMinRangeForMarkingRecursion) { | 1059 if (end - start >= kMinRangeForMarkingRecursion) { |
1060 if (VisitUnmarkedObjects(heap, start, end)) return; | 1060 if (VisitUnmarkedObjects(heap, start, end)) return; |
1061 // We are close to a stack overflow, so just mark the objects. | 1061 // We are close to a stack overflow, so just mark the objects. |
1062 } | 1062 } |
1063 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1063 MarkCompactCollector* collector = heap->mark_compact_collector(); |
1064 for (Object** p = start; p < end; p++) { | 1064 for (Object** p = start; p < end; p++) { |
1065 MarkObjectByPointer(collector, start, p); | 1065 MarkObjectByPointer(collector, start, p); |
1066 } | 1066 } |
1067 } | 1067 } |
1068 | 1068 |
1069 static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length); | |
1070 | |
1071 // The deque is contiguous and we use new space, it is therefore contained in | |
1072 // one page minus the header. It also has a size that is a power of two so | |
1073 // it is half the size of a page. We want to scan a number of array entries | |
1074 // that is less than the number of entries in the deque, so we divide by 2 | |
1075 // once more. | |
1076 static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize; | |
1077 | |
1078 INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) { | |
1079 FixedArray* array = FixedArray::cast(object); | |
1080 int length = array->length(); | |
1081 Heap* heap = map->GetHeap(); | |
1082 | |
1083 if (length < kScanningChunk || | |
1084 MemoryChunk::FromAddress(array->address())->owner()->identity() != | |
1085 LO_SPACE) { | |
1086 Object** start = array->data_start(); | |
1087 VisitPointers(heap, start, start + length); | |
1088 } else { | |
1089 VisitHugeFixedArray(heap, array, length); | |
1090 } | |
1091 } | |
1092 | |
1069 // Marks the object black and pushes it on the marking stack. | 1093 // Marks the object black and pushes it on the marking stack. |
1070 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { | 1094 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
1071 MarkBit mark = Marking::MarkBitFrom(object); | 1095 MarkBit mark = Marking::MarkBitFrom(object); |
1072 heap->mark_compact_collector()->MarkObject(object, mark); | 1096 heap->mark_compact_collector()->MarkObject(object, mark); |
1073 } | 1097 } |
1074 | 1098 |
1075 // Marks the object black without pushing it on the marking stack. | 1099 // Marks the object black without pushing it on the marking stack. |
1076 // Returns true if object needed marking and false otherwise. | 1100 // Returns true if object needed marking and false otherwise. |
1077 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { | 1101 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { |
1078 MarkBit mark_bit = Marking::MarkBitFrom(object); | 1102 MarkBit mark_bit = Marking::MarkBitFrom(object); |
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1497 heap, | 1521 heap, |
1498 HeapObject::RawField(object, | 1522 HeapObject::RawField(object, |
1499 SharedFunctionInfo::kOptimizedCodeMapOffset), | 1523 SharedFunctionInfo::kOptimizedCodeMapOffset), |
1500 HeapObject::RawField(object, SharedFunctionInfo::kSize)); | 1524 HeapObject::RawField(object, SharedFunctionInfo::kSize)); |
1501 } | 1525 } |
1502 | 1526 |
1503 static VisitorDispatchTable<Callback> non_count_table_; | 1527 static VisitorDispatchTable<Callback> non_count_table_; |
1504 }; | 1528 }; |
1505 | 1529 |
1506 | 1530 |
1531 void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap, | |
1532 FixedArray* array, | |
1533 int length) { | |
1534 MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); | |
1535 | |
1536 ASSERT(chunk->owner()->identity() == LO_SPACE); | |
1537 | |
1538 Object** start = array->data_start(); | |
1539 int from = | |
1540 chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0; | |
1541 int to = Min(from + kScanningChunk, length); | |
1542 VisitPointers(heap, start + from, start + to); | |
1543 | |
1544 if (to == length) { | |
1545 chunk->SetCompletelyScanned(); | |
1546 } else { | |
1547 chunk->SetPartiallyScannedProgress(to); | |
1548 } | |
1549 } | |
1550 | |
1551 | |
1507 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( | 1552 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( |
1508 FixedArrayBase* fixed_array, | 1553 FixedArrayBase* fixed_array, |
1509 FixedArraySubInstanceType fast_type, | 1554 FixedArraySubInstanceType fast_type, |
1510 FixedArraySubInstanceType dictionary_type) { | 1555 FixedArraySubInstanceType dictionary_type) { |
1511 Heap* heap = fixed_array->map()->GetHeap(); | 1556 Heap* heap = fixed_array->map()->GetHeap(); |
1512 if (fixed_array->map() != heap->fixed_cow_array_map() && | 1557 if (fixed_array->map() != heap->fixed_cow_array_map() && |
1513 fixed_array->map() != heap->fixed_double_array_map() && | 1558 fixed_array->map() != heap->fixed_double_array_map() && |
1514 fixed_array != heap->empty_fixed_array()) { | 1559 fixed_array != heap->empty_fixed_array()) { |
1515 if (fixed_array->IsDictionary()) { | 1560 if (fixed_array->IsDictionary()) { |
1516 heap->RecordObjectStats(FIXED_ARRAY_TYPE, | 1561 heap->RecordObjectStats(FIXED_ARRAY_TYPE, |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1638 | 1683 |
1639 table_.Register(kVisitSharedFunctionInfo, | 1684 table_.Register(kVisitSharedFunctionInfo, |
1640 &VisitSharedFunctionInfoAndFlushCode); | 1685 &VisitSharedFunctionInfoAndFlushCode); |
1641 | 1686 |
1642 table_.Register(kVisitJSFunction, | 1687 table_.Register(kVisitJSFunction, |
1643 &VisitJSFunctionAndFlushCode); | 1688 &VisitJSFunctionAndFlushCode); |
1644 | 1689 |
1645 table_.Register(kVisitJSRegExp, | 1690 table_.Register(kVisitJSRegExp, |
1646 &VisitRegExpAndFlushCode); | 1691 &VisitRegExpAndFlushCode); |
1647 | 1692 |
1693 table_.Register(kVisitFixedArray, | |
1694 &VisitFixedArray); | |
1695 | |
1648 if (FLAG_track_gc_object_stats) { | 1696 if (FLAG_track_gc_object_stats) { |
1649 // Copy the visitor table to make call-through possible. | 1697 // Copy the visitor table to make call-through possible. |
1650 non_count_table_.CopyFrom(&table_); | 1698 non_count_table_.CopyFrom(&table_); |
1651 #define VISITOR_ID_COUNT_FUNCTION(id) \ | 1699 #define VISITOR_ID_COUNT_FUNCTION(id) \ |
1652 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); | 1700 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); |
1653 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) | 1701 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) |
1654 #undef VISITOR_ID_COUNT_FUNCTION | 1702 #undef VISITOR_ID_COUNT_FUNCTION |
1655 } | 1703 } |
1656 } | 1704 } |
1657 | 1705 |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2109 ref_groups->Rewind(last); | 2157 ref_groups->Rewind(last); |
2110 } | 2158 } |
2111 | 2159 |
2112 | 2160 |
2113 // Mark all objects reachable from the objects on the marking stack. | 2161 // Mark all objects reachable from the objects on the marking stack. |
2114 // Before: the marking stack contains zero or more heap object pointers. | 2162 // Before: the marking stack contains zero or more heap object pointers. |
2115 // After: the marking stack is empty, and all objects reachable from the | 2163 // After: the marking stack is empty, and all objects reachable from the |
2116 // marking stack have been marked, or are overflowed in the heap. | 2164 // marking stack have been marked, or are overflowed in the heap. |
2117 void MarkCompactCollector::EmptyMarkingDeque() { | 2165 void MarkCompactCollector::EmptyMarkingDeque() { |
2118 while (!marking_deque_.IsEmpty()) { | 2166 while (!marking_deque_.IsEmpty()) { |
2119 while (!marking_deque_.IsEmpty()) { | 2167 do { |
2120 HeapObject* object = marking_deque_.Pop(); | 2168 while (!marking_deque_.IsEmpty()) { |
2121 ASSERT(object->IsHeapObject()); | 2169 HeapObject* object = marking_deque_.Pop(); |
2122 ASSERT(heap()->Contains(object)); | 2170 ASSERT(object->IsHeapObject()); |
2123 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); | 2171 ASSERT(heap()->Contains(object)); |
2172 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); | |
2124 | 2173 |
2125 Map* map = object->map(); | 2174 Map* map = object->map(); |
2126 MarkBit map_mark = Marking::MarkBitFrom(map); | 2175 MarkBit map_mark = Marking::MarkBitFrom(map); |
2127 MarkObject(map, map_mark); | 2176 MarkObject(map, map_mark); |
2128 | 2177 |
2129 MarkCompactMarkingVisitor::IterateBody(map, object); | 2178 MarkCompactMarkingVisitor::IterateBody(map, object); |
2130 } | 2179 } |
2180 FillMarkingDequeFromLargePostponedArrays(); | |
2181 } while (!marking_deque_.IsEmpty()); | |
Michael Starzinger
2012/09/20 13:29:48
Instead of adding yet another loop we could just r
| |
2131 | 2182 |
2132 // Process encountered weak maps, mark objects only reachable by those | 2183 // Process encountered weak maps, mark objects only reachable by those |
2133 // weak maps and repeat until fix-point is reached. | 2184 // weak maps and repeat until fix-point is reached. |
2134 ProcessWeakMaps(); | 2185 ProcessWeakMaps(); |
2135 } | 2186 } |
2136 } | 2187 } |
2137 | 2188 |
2138 | 2189 |
2190 void MarkCompactCollector::FillMarkingDequeFromLargePostponedArrays() { | |
Michael Starzinger
2012/09/20 13:29:48
How about naming this "ProcessLargePostponedArrays
| |
2191 ASSERT(marking_deque_.IsEmpty()); | |
2192 LargeObjectIterator it(heap_->lo_space()); | |
2193 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
2194 if (!obj->IsFixedArray()) continue; | |
2195 MemoryChunk* p = MemoryChunk::FromAddress(obj->address()); | |
2196 if (p->IsPartiallyScanned()) { | |
2197 marking_deque_.PushBlack(obj); | |
2198 } | |
2199 } | |
2200 } | |
2201 | |
2202 | |
2139 // Sweep the heap for overflowed objects, clear their overflow bits, and | 2203 // Sweep the heap for overflowed objects, clear their overflow bits, and |
2140 // push them on the marking stack. Stop early if the marking stack fills | 2204 // push them on the marking stack. Stop early if the marking stack fills |
2141 // before sweeping completes. If sweeping completes, there are no remaining | 2205 // before sweeping completes. If sweeping completes, there are no remaining |
2142 // overflowed objects in the heap so the overflow flag on the markings stack | 2206 // overflowed objects in the heap so the overflow flag on the markings stack |
2143 // is cleared. | 2207 // is cleared. |
2144 void MarkCompactCollector::RefillMarkingDeque() { | 2208 void MarkCompactCollector::RefillMarkingDeque() { |
2209 if (FLAG_trace_gc) { | |
2210 PrintPID("Marking queue overflowed\n"); | |
2211 } | |
2145 ASSERT(marking_deque_.overflowed()); | 2212 ASSERT(marking_deque_.overflowed()); |
2146 | 2213 |
2147 SemiSpaceIterator new_it(heap()->new_space()); | 2214 SemiSpaceIterator new_it(heap()->new_space()); |
2148 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); | 2215 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
2149 if (marking_deque_.IsFull()) return; | 2216 if (marking_deque_.IsFull()) return; |
2150 | 2217 |
2151 DiscoverGreyObjectsInSpace(heap(), | 2218 DiscoverGreyObjectsInSpace(heap(), |
2152 &marking_deque_, | 2219 &marking_deque_, |
2153 heap()->old_pointer_space()); | 2220 heap()->old_pointer_space()); |
2154 if (marking_deque_.IsFull()) return; | 2221 if (marking_deque_.IsFull()) return; |
(...skipping 1910 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4065 while (buffer != NULL) { | 4132 while (buffer != NULL) { |
4066 SlotsBuffer* next_buffer = buffer->next(); | 4133 SlotsBuffer* next_buffer = buffer->next(); |
4067 DeallocateBuffer(buffer); | 4134 DeallocateBuffer(buffer); |
4068 buffer = next_buffer; | 4135 buffer = next_buffer; |
4069 } | 4136 } |
4070 *buffer_address = NULL; | 4137 *buffer_address = NULL; |
4071 } | 4138 } |
4072 | 4139 |
4073 | 4140 |
4074 } } // namespace v8::internal | 4141 } } // namespace v8::internal |
OLD | NEW |