OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1035 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1046 public: | 1046 public: |
1047 static inline void Visit(Map* map, HeapObject* obj); | 1047 static inline void Visit(Map* map, HeapObject* obj); |
1048 }; | 1048 }; |
1049 | 1049 |
1050 static void Initialize(); | 1050 static void Initialize(); |
1051 | 1051 |
1052 INLINE(static void VisitPointer(Heap* heap, Object** p)) { | 1052 INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
1053 MarkObjectByPointer(heap->mark_compact_collector(), p, p); | 1053 MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
1054 } | 1054 } |
1055 | 1055 |
1056 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { | 1056 INLINE(static void VisitPointers(Heap* heap, |
1057 Object** anchor, | |
1058 Object** start, | |
1059 Object** end)) { | |
1057 // Mark all objects pointed to in [start, end). | 1060 // Mark all objects pointed to in [start, end). |
1058 const int kMinRangeForMarkingRecursion = 64; | 1061 const int kMinRangeForMarkingRecursion = 64; |
1059 if (end - start >= kMinRangeForMarkingRecursion) { | 1062 if (end - start >= kMinRangeForMarkingRecursion) { |
1060 if (VisitUnmarkedObjects(heap, start, end)) return; | 1063 if (VisitUnmarkedObjects(heap, anchor, start, end)) return; |
1061 // We are close to a stack overflow, so just mark the objects. | 1064 // We are close to a stack overflow, so just mark the objects. |
1062 } | 1065 } |
1063 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1066 MarkCompactCollector* collector = heap->mark_compact_collector(); |
1064 for (Object** p = start; p < end; p++) { | 1067 for (Object** p = start; p < end; p++) { |
1065 MarkObjectByPointer(collector, start, p); | 1068 MarkObjectByPointer(collector, anchor, p); |
1069 } | |
1070 } | |
1071 | |
1072 static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length); | |
1073 | |
1074 // The deque is contiguous and we use new space, it is therefore contained in | |
1075 // one page minus the header. It also has a size that is a power of two so | |
1076 // it is half the size of a page. We want to scan a number of array entries | |
1077 // that is less than the number of entries in the deque, so we divide by 2 | |
1078 // once more. | |
1079 static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize; | |
1080 | |
1081 INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) { | |
1082 FixedArray* array = FixedArray::cast(object); | |
1083 int length = array->length(); | |
1084 Heap* heap = map->GetHeap(); | |
1085 | |
1086 if (length < kScanningChunk || | |
1087 MemoryChunk::FromAddress(array->address())->owner()->identity() != | |
1088 LO_SPACE) { | |
1089 Object** start = array->data_start(); | |
1090 VisitPointers(heap, start, start, start + length); | |
1091 } else { | |
1092 VisitHugeFixedArray(heap, array, length); | |
1066 } | 1093 } |
1067 } | 1094 } |
1068 | 1095 |
1069 // Marks the object black and pushes it on the marking stack. | 1096 // Marks the object black and pushes it on the marking stack. |
1070 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { | 1097 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
1071 MarkBit mark = Marking::MarkBitFrom(object); | 1098 MarkBit mark = Marking::MarkBitFrom(object); |
1072 heap->mark_compact_collector()->MarkObject(object, mark); | 1099 heap->mark_compact_collector()->MarkObject(object, mark); |
1073 } | 1100 } |
1074 | 1101 |
1075 // Marks the object black without pushing it on the marking stack. | 1102 // Marks the object black without pushing it on the marking stack. |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1108 heap->mark_compact_collector()->SetMark(obj, mark); | 1135 heap->mark_compact_collector()->SetMark(obj, mark); |
1109 // Mark the map pointer and the body. | 1136 // Mark the map pointer and the body. |
1110 MarkBit map_mark = Marking::MarkBitFrom(map); | 1137 MarkBit map_mark = Marking::MarkBitFrom(map); |
1111 heap->mark_compact_collector()->MarkObject(map, map_mark); | 1138 heap->mark_compact_collector()->MarkObject(map, map_mark); |
1112 IterateBody(map, obj); | 1139 IterateBody(map, obj); |
1113 } | 1140 } |
1114 | 1141 |
1115 // Visit all unmarked objects pointed to by [start, end). | 1142 // Visit all unmarked objects pointed to by [start, end). |
1116 // Returns false if the operation fails (lack of stack space). | 1143 // Returns false if the operation fails (lack of stack space). |
1117 static inline bool VisitUnmarkedObjects(Heap* heap, | 1144 static inline bool VisitUnmarkedObjects(Heap* heap, |
1145 Object** anchor, | |
1118 Object** start, | 1146 Object** start, |
1119 Object** end) { | 1147 Object** end) { |
1120 // Return false is we are close to the stack limit. | 1148 // Return false is we are close to the stack limit. |
1121 StackLimitCheck check(heap->isolate()); | 1149 StackLimitCheck check(heap->isolate()); |
1122 if (check.HasOverflowed()) return false; | 1150 if (check.HasOverflowed()) return false; |
1123 | 1151 |
1124 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1152 MarkCompactCollector* collector = heap->mark_compact_collector(); |
1125 // Visit the unmarked objects. | 1153 // Visit the unmarked objects. |
1126 for (Object** p = start; p < end; p++) { | 1154 for (Object** p = start; p < end; p++) { |
1127 Object* o = *p; | 1155 Object* o = *p; |
1128 if (!o->IsHeapObject()) continue; | 1156 if (!o->IsHeapObject()) continue; |
1129 collector->RecordSlot(start, p, o); | 1157 collector->RecordSlot(anchor, p, o); |
1130 HeapObject* obj = HeapObject::cast(o); | 1158 HeapObject* obj = HeapObject::cast(o); |
1131 MarkBit mark = Marking::MarkBitFrom(obj); | 1159 MarkBit mark = Marking::MarkBitFrom(obj); |
1132 if (mark.Get()) continue; | 1160 if (mark.Get()) continue; |
1133 VisitUnmarkedObject(collector, obj); | 1161 VisitUnmarkedObject(collector, obj); |
1134 } | 1162 } |
1135 return true; | 1163 return true; |
1136 } | 1164 } |
1137 | 1165 |
1138 static void VisitJSWeakMap(Map* map, HeapObject* object) { | 1166 static void VisitJSWeakMap(Map* map, HeapObject* object) { |
1139 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); | 1167 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
(...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1440 reinterpret_cast<JSFunction*>(object), | 1468 reinterpret_cast<JSFunction*>(object), |
1441 false); | 1469 false); |
1442 } | 1470 } |
1443 | 1471 |
1444 | 1472 |
1445 static inline void VisitJSFunctionFields(Map* map, | 1473 static inline void VisitJSFunctionFields(Map* map, |
1446 JSFunction* object, | 1474 JSFunction* object, |
1447 bool flush_code_candidate) { | 1475 bool flush_code_candidate) { |
1448 Heap* heap = map->GetHeap(); | 1476 Heap* heap = map->GetHeap(); |
1449 | 1477 |
1450 VisitPointers(heap, | 1478 Object** start = |
1451 HeapObject::RawField(object, JSFunction::kPropertiesOffset), | 1479 HeapObject::RawField(object, JSFunction::kPropertiesOffset); |
1452 HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); | 1480 Object** end = |
1481 HeapObject::RawField(object, JSFunction::kCodeEntryOffset); | |
1482 VisitPointers(heap, start, start, end); | |
1453 | 1483 |
1454 if (!flush_code_candidate) { | 1484 if (!flush_code_candidate) { |
1455 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); | 1485 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
1456 } else { | 1486 } else { |
1457 // Don't visit code object. | 1487 // Don't visit code object. |
1458 | 1488 |
1459 // Visit shared function info to avoid double checking of its | 1489 // Visit shared function info to avoid double checking of its |
1460 // flushability. | 1490 // flushability. |
1461 SharedFunctionInfo* shared_info = object->unchecked_shared(); | 1491 SharedFunctionInfo* shared_info = object->unchecked_shared(); |
1462 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); | 1492 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); |
1463 if (!shared_info_mark.Get()) { | 1493 if (!shared_info_mark.Get()) { |
1464 Map* shared_info_map = shared_info->map(); | 1494 Map* shared_info_map = shared_info->map(); |
1465 MarkBit shared_info_map_mark = | 1495 MarkBit shared_info_map_mark = |
1466 Marking::MarkBitFrom(shared_info_map); | 1496 Marking::MarkBitFrom(shared_info_map); |
1467 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); | 1497 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); |
1468 heap->mark_compact_collector()->MarkObject(shared_info_map, | 1498 heap->mark_compact_collector()->MarkObject(shared_info_map, |
1469 shared_info_map_mark); | 1499 shared_info_map_mark); |
1470 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, | 1500 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, |
1471 shared_info, | 1501 shared_info, |
1472 true); | 1502 true); |
1473 } | 1503 } |
1474 } | 1504 } |
1475 | 1505 |
1476 VisitPointers( | 1506 start = HeapObject::RawField(object, |
1477 heap, | 1507 JSFunction::kCodeEntryOffset + kPointerSize); |
1478 HeapObject::RawField(object, | 1508 end = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); |
1479 JSFunction::kCodeEntryOffset + kPointerSize), | 1509 VisitPointers(heap, start, start, end); |
1480 HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); | |
1481 } | 1510 } |
1482 | 1511 |
1483 | 1512 |
1484 static void VisitSharedFunctionInfoFields(Heap* heap, | 1513 static void VisitSharedFunctionInfoFields(Heap* heap, |
1485 HeapObject* object, | 1514 HeapObject* object, |
1486 bool flush_code_candidate) { | 1515 bool flush_code_candidate) { |
1487 VisitPointer(heap, | 1516 VisitPointer(heap, |
1488 HeapObject::RawField(object, SharedFunctionInfo::kNameOffset)); | 1517 HeapObject::RawField(object, SharedFunctionInfo::kNameOffset)); |
1489 | 1518 |
1490 if (!flush_code_candidate) { | 1519 if (!flush_code_candidate) { |
1491 VisitPointer(heap, | 1520 VisitPointer(heap, |
1492 HeapObject::RawField(object, | 1521 HeapObject::RawField(object, |
1493 SharedFunctionInfo::kCodeOffset)); | 1522 SharedFunctionInfo::kCodeOffset)); |
1494 } | 1523 } |
1495 | 1524 |
1496 VisitPointers( | 1525 Object** start = |
1497 heap, | |
1498 HeapObject::RawField(object, | 1526 HeapObject::RawField(object, |
1499 SharedFunctionInfo::kOptimizedCodeMapOffset), | 1527 SharedFunctionInfo::kOptimizedCodeMapOffset); |
1500 HeapObject::RawField(object, SharedFunctionInfo::kSize)); | 1528 Object** end = |
1529 HeapObject::RawField(object, SharedFunctionInfo::kSize); | |
1530 | |
1531 VisitPointers(heap, start, start, end); | |
1501 } | 1532 } |
1502 | 1533 |
1503 static VisitorDispatchTable<Callback> non_count_table_; | 1534 static VisitorDispatchTable<Callback> non_count_table_; |
1504 }; | 1535 }; |
1505 | 1536 |
1506 | 1537 |
1538 void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap, | |
1539 FixedArray* array, | |
1540 int length) { | |
1541 MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); | |
1542 | |
1543 ASSERT(chunk->owner()->identity() == LO_SPACE); | |
1544 | |
1545 Object** start = array->data_start(); | |
1546 int from = | |
1547 chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0; | |
1548 int to = Min(from + kScanningChunk, length); | |
1549 VisitPointers(heap, start, start + from, start + to); | |
1550 | |
1551 if (to == length) { | |
1552 chunk->SetCompletelyScanned(); | |
1553 } else { | |
1554 chunk->SetPartiallyScannedProgress(to); | |
1555 } | |
1556 } | |
1557 | |
1558 | |
1507 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( | 1559 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( |
1508 FixedArrayBase* fixed_array, | 1560 FixedArrayBase* fixed_array, |
1509 FixedArraySubInstanceType fast_type, | 1561 FixedArraySubInstanceType fast_type, |
1510 FixedArraySubInstanceType dictionary_type) { | 1562 FixedArraySubInstanceType dictionary_type) { |
1511 Heap* heap = fixed_array->map()->GetHeap(); | 1563 Heap* heap = fixed_array->map()->GetHeap(); |
1512 if (fixed_array->map() != heap->fixed_cow_array_map() && | 1564 if (fixed_array->map() != heap->fixed_cow_array_map() && |
1513 fixed_array->map() != heap->fixed_double_array_map() && | 1565 fixed_array->map() != heap->fixed_double_array_map() && |
1514 fixed_array != heap->empty_fixed_array()) { | 1566 fixed_array != heap->empty_fixed_array()) { |
1515 if (fixed_array->IsDictionary()) { | 1567 if (fixed_array->IsDictionary()) { |
1516 heap->RecordObjectStats(FIXED_ARRAY_TYPE, | 1568 heap->RecordObjectStats(FIXED_ARRAY_TYPE, |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1638 | 1690 |
1639 table_.Register(kVisitSharedFunctionInfo, | 1691 table_.Register(kVisitSharedFunctionInfo, |
1640 &VisitSharedFunctionInfoAndFlushCode); | 1692 &VisitSharedFunctionInfoAndFlushCode); |
1641 | 1693 |
1642 table_.Register(kVisitJSFunction, | 1694 table_.Register(kVisitJSFunction, |
1643 &VisitJSFunctionAndFlushCode); | 1695 &VisitJSFunctionAndFlushCode); |
1644 | 1696 |
1645 table_.Register(kVisitJSRegExp, | 1697 table_.Register(kVisitJSRegExp, |
1646 &VisitRegExpAndFlushCode); | 1698 &VisitRegExpAndFlushCode); |
1647 | 1699 |
1700 table_.Register(kVisitFixedArray, | |
1701 &VisitFixedArray); | |
1702 | |
1648 if (FLAG_track_gc_object_stats) { | 1703 if (FLAG_track_gc_object_stats) { |
1649 // Copy the visitor table to make call-through possible. | 1704 // Copy the visitor table to make call-through possible. |
1650 non_count_table_.CopyFrom(&table_); | 1705 non_count_table_.CopyFrom(&table_); |
1651 #define VISITOR_ID_COUNT_FUNCTION(id) \ | 1706 #define VISITOR_ID_COUNT_FUNCTION(id) \ |
1652 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); | 1707 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); |
1653 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) | 1708 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) |
1654 #undef VISITOR_ID_COUNT_FUNCTION | 1709 #undef VISITOR_ID_COUNT_FUNCTION |
1655 } | 1710 } |
1656 } | 1711 } |
1657 | 1712 |
1658 | 1713 |
1659 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> | 1714 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> |
1660 MarkCompactMarkingVisitor::non_count_table_; | 1715 MarkCompactMarkingVisitor::non_count_table_; |
1661 | 1716 |
1662 | 1717 |
1663 class MarkingVisitor : public ObjectVisitor { | 1718 class MarkingVisitor : public ObjectVisitor { |
1664 public: | 1719 public: |
1665 explicit MarkingVisitor(Heap* heap) : heap_(heap) { } | 1720 explicit MarkingVisitor(Heap* heap) : heap_(heap) { } |
1666 | 1721 |
1667 void VisitPointer(Object** p) { | 1722 void VisitPointer(Object** p) { |
1668 MarkCompactMarkingVisitor::VisitPointer(heap_, p); | 1723 MarkCompactMarkingVisitor::VisitPointer(heap_, p); |
1669 } | 1724 } |
1670 | 1725 |
1671 void VisitPointers(Object** start, Object** end) { | 1726 void VisitPointers(Object** start, Object** end) { |
1672 MarkCompactMarkingVisitor::VisitPointers(heap_, start, end); | 1727 MarkCompactMarkingVisitor::VisitPointers(heap_, start, start, end); |
1673 } | 1728 } |
1674 | 1729 |
1675 private: | 1730 private: |
1676 Heap* heap_; | 1731 Heap* heap_; |
1677 }; | 1732 }; |
1678 | 1733 |
1679 | 1734 |
1680 class CodeMarkingVisitor : public ThreadVisitor { | 1735 class CodeMarkingVisitor : public ThreadVisitor { |
1681 public: | 1736 public: |
1682 explicit CodeMarkingVisitor(MarkCompactCollector* collector) | 1737 explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
(...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2121 ASSERT(object->IsHeapObject()); | 2176 ASSERT(object->IsHeapObject()); |
2122 ASSERT(heap()->Contains(object)); | 2177 ASSERT(heap()->Contains(object)); |
2123 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); | 2178 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
2124 | 2179 |
2125 Map* map = object->map(); | 2180 Map* map = object->map(); |
2126 MarkBit map_mark = Marking::MarkBitFrom(map); | 2181 MarkBit map_mark = Marking::MarkBitFrom(map); |
2127 MarkObject(map, map_mark); | 2182 MarkObject(map, map_mark); |
2128 | 2183 |
2129 MarkCompactMarkingVisitor::IterateBody(map, object); | 2184 MarkCompactMarkingVisitor::IterateBody(map, object); |
2130 } | 2185 } |
2186 ProcessLargePostponedArrays(heap(), &marking_deque_); | |
2131 | 2187 |
2132 // Process encountered weak maps, mark objects only reachable by those | 2188 // Process encountered weak maps, mark objects only reachable by those |
2133 // weak maps and repeat until fix-point is reached. | 2189 // weak maps and repeat until fix-point is reached. |
2134 ProcessWeakMaps(); | 2190 ProcessWeakMaps(); |
2135 } | 2191 } |
2136 } | 2192 } |
2137 | 2193 |
2138 | 2194 |
2195 void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap, | |
Michael Starzinger
2012/09/26 11:40:07
Just make this non-static and you don't need to pa
Erik Corry
2012/09/26 11:42:20
The incremental marker also uses it, to avoid code
| |
2196 MarkingDeque* deque) { | |
2197 ASSERT(deque->IsEmpty()); | |
2198 LargeObjectIterator it(heap->lo_space()); | |
2199 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
2200 if (!obj->IsFixedArray()) continue; | |
2201 MemoryChunk* p = MemoryChunk::FromAddress(obj->address()); | |
2202 if (p->IsPartiallyScanned()) { | |
2203 deque->PushBlack(obj); | |
2204 } | |
2205 } | |
2206 } | |
2207 | |
2208 | |
2139 // Sweep the heap for overflowed objects, clear their overflow bits, and | 2209 // Sweep the heap for overflowed objects, clear their overflow bits, and |
2140 // push them on the marking stack. Stop early if the marking stack fills | 2210 // push them on the marking stack. Stop early if the marking stack fills |
2141 // before sweeping completes. If sweeping completes, there are no remaining | 2211 // before sweeping completes. If sweeping completes, there are no remaining |
2142 // overflowed objects in the heap so the overflow flag on the markings stack | 2212 // overflowed objects in the heap so the overflow flag on the markings stack |
2143 // is cleared. | 2213 // is cleared. |
2144 void MarkCompactCollector::RefillMarkingDeque() { | 2214 void MarkCompactCollector::RefillMarkingDeque() { |
2215 if (FLAG_trace_gc) { | |
2216 PrintPID("Marking queue overflowed\n"); | |
2217 } | |
2145 ASSERT(marking_deque_.overflowed()); | 2218 ASSERT(marking_deque_.overflowed()); |
2146 | 2219 |
2147 SemiSpaceIterator new_it(heap()->new_space()); | 2220 SemiSpaceIterator new_it(heap()->new_space()); |
2148 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); | 2221 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
2149 if (marking_deque_.IsFull()) return; | 2222 if (marking_deque_.IsFull()) return; |
2150 | 2223 |
2151 DiscoverGreyObjectsInSpace(heap(), | 2224 DiscoverGreyObjectsInSpace(heap(), |
2152 &marking_deque_, | 2225 &marking_deque_, |
2153 heap()->old_pointer_space()); | 2226 heap()->old_pointer_space()); |
2154 if (marking_deque_.IsFull()) return; | 2227 if (marking_deque_.IsFull()) return; |
(...skipping 1910 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4065 while (buffer != NULL) { | 4138 while (buffer != NULL) { |
4066 SlotsBuffer* next_buffer = buffer->next(); | 4139 SlotsBuffer* next_buffer = buffer->next(); |
4067 DeallocateBuffer(buffer); | 4140 DeallocateBuffer(buffer); |
4068 buffer = next_buffer; | 4141 buffer = next_buffer; |
4069 } | 4142 } |
4070 *buffer_address = NULL; | 4143 *buffer_address = NULL; |
4071 } | 4144 } |
4072 | 4145 |
4073 | 4146 |
4074 } } // namespace v8::internal | 4147 } } // namespace v8::internal |
OLD | NEW |