OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
228 class MemoryChunk { | 228 class MemoryChunk { |
229 public: | 229 public: |
230 enum MemoryChunkFlags { | 230 enum MemoryChunkFlags { |
231 IS_EXECUTABLE, | 231 IS_EXECUTABLE, |
232 POINTERS_TO_HERE_ARE_INTERESTING, | 232 POINTERS_TO_HERE_ARE_INTERESTING, |
233 POINTERS_FROM_HERE_ARE_INTERESTING, | 233 POINTERS_FROM_HERE_ARE_INTERESTING, |
234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
235 IN_TO_SPACE, // All pages in new space has one of these two set. | 235 IN_TO_SPACE, // All pages in new space has one of these two set. |
236 NEW_SPACE_BELOW_AGE_MARK, | 236 NEW_SPACE_BELOW_AGE_MARK, |
237 EVACUATION_CANDIDATE, | 237 EVACUATION_CANDIDATE, |
238 NEVER_EVACUATE, // May contain immortal immutables. | 238 |
| 239 // |NEVER_EVACUATE|: A page tagged with this flag will never be selected |
| 240 // for evacuation. Typically used for immortal immovable pages. |
| 241 NEVER_EVACUATE, |
239 | 242 |
240 // Large objects can have a progress bar in their page header. These object | 243 // Large objects can have a progress bar in their page header. These object |
241 // are scanned in increments and will be kept black while being scanned. | 244 // are scanned in increments and will be kept black while being scanned. |
242 // Even if the mutator writes to them they will be kept black and a white | 245 // Even if the mutator writes to them they will be kept black and a white |
243 // to grey transition is performed in the value. | 246 // to grey transition is performed in the value. |
244 HAS_PROGRESS_BAR, | 247 HAS_PROGRESS_BAR, |
245 | 248 |
246 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted | 249 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted |
247 // from new to old space during evacuation. | 250 // from new to old space during evacuation. |
248 PAGE_NEW_OLD_PROMOTION, | 251 PAGE_NEW_OLD_PROMOTION, |
(...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
715 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 718 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
716 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 719 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
717 | 720 |
718 // Maximum object size that gets allocated into regular pages. Objects larger | 721 // Maximum object size that gets allocated into regular pages. Objects larger |
719 // than that size are allocated in large object space and are never moved in | 722 // than that size are allocated in large object space and are never moved in |
720 // memory. This also applies to new space allocation, since objects are never | 723 // memory. This also applies to new space allocation, since objects are never |
721 // migrated from new space to large object space. Takes double alignment into | 724 // migrated from new space to large object space. Takes double alignment into |
722 // account. | 725 // account. |
723 // TODO(hpayer): This limit should be way smaller but we currently have | 726 // TODO(hpayer): This limit should be way smaller but we currently have |
724 // short living objects >256K. | 727 // short living objects >256K. |
725 static const int kMaxRegularHeapObjectSize = 600 * KB; | 728 static const int kMaxRegularHeapObjectSize = 400 * KB; |
726 | 729 |
727 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); | 730 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); |
728 | 731 |
729 // Returns the page containing a given address. The address ranges | 732 // Returns the page containing a given address. The address ranges |
730 // from [page_addr .. page_addr + kPageSize[. This only works if the object | 733 // from [page_addr .. page_addr + kPageSize[. This only works if the object |
731 // is in fact in a page. | 734 // is in fact in a page. |
732 static Page* FromAddress(Address addr) { | 735 static Page* FromAddress(Address addr) { |
733 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); | 736 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); |
734 } | 737 } |
735 | 738 |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
816 | 819 |
817 bool is_anchor() { return IsFlagSet(Page::ANCHOR); } | 820 bool is_anchor() { return IsFlagSet(Page::ANCHOR); } |
818 | 821 |
819 intptr_t wasted_memory() { return wasted_memory_.Value(); } | 822 intptr_t wasted_memory() { return wasted_memory_.Value(); } |
820 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } | 823 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } |
821 intptr_t available_in_free_list() { return available_in_free_list_.Value(); } | 824 intptr_t available_in_free_list() { return available_in_free_list_.Value(); } |
822 void add_available_in_free_list(intptr_t available) { | 825 void add_available_in_free_list(intptr_t available) { |
823 available_in_free_list_.Increment(available); | 826 available_in_free_list_.Increment(available); |
824 } | 827 } |
825 | 828 |
| 829 size_t ShrinkToHighWaterMark(); |
| 830 |
826 #ifdef DEBUG | 831 #ifdef DEBUG |
827 void Print(); | 832 void Print(); |
828 #endif // DEBUG | 833 #endif // DEBUG |
829 | 834 |
830 private: | 835 private: |
831 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; | 836 enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; |
832 | 837 |
833 template <InitializationMode mode = kFreeMemory> | 838 template <InitializationMode mode = kFreeMemory> |
834 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, | 839 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
835 Executability executable, PagedSpace* owner); | 840 Executability executable, PagedSpace* owner); |
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1296 void ReportStatistics(); | 1301 void ReportStatistics(); |
1297 #endif | 1302 #endif |
1298 | 1303 |
1299 // Returns a MemoryChunk in which the memory region from commit_area_size to | 1304 // Returns a MemoryChunk in which the memory region from commit_area_size to |
1300 // reserve_area_size of the chunk area is reserved but not committed, it | 1305 // reserve_area_size of the chunk area is reserved but not committed, it |
1301 // could be committed later by calling MemoryChunk::CommitArea. | 1306 // could be committed later by calling MemoryChunk::CommitArea. |
1302 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, | 1307 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
1303 intptr_t commit_area_size, | 1308 intptr_t commit_area_size, |
1304 Executability executable, Space* space); | 1309 Executability executable, Space* space); |
1305 | 1310 |
| 1311 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink); |
| 1312 |
1306 Address ReserveAlignedMemory(size_t requested, size_t alignment, | 1313 Address ReserveAlignedMemory(size_t requested, size_t alignment, |
1307 base::VirtualMemory* controller); | 1314 base::VirtualMemory* controller); |
1308 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, | 1315 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
1309 size_t alignment, Executability executable, | 1316 size_t alignment, Executability executable, |
1310 base::VirtualMemory* controller); | 1317 base::VirtualMemory* controller); |
1311 | 1318 |
1312 bool CommitMemory(Address addr, size_t size, Executability executable); | 1319 bool CommitMemory(Address addr, size_t size, Executability executable); |
1313 | 1320 |
1314 void FreeMemory(base::VirtualMemory* reservation, Executability executable); | 1321 void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
1315 void PartialFreeMemory(MemoryChunk* chunk, Address start_free); | 1322 void PartialFreeMemory(MemoryChunk* chunk, Address start_free); |
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1611 CHECK(size_ >= 0); | 1618 CHECK(size_ >= 0); |
1612 } | 1619 } |
1613 | 1620 |
1614 // Shrink the space by removing available bytes. Since shrinking is done | 1621 // Shrink the space by removing available bytes. Since shrinking is done |
1615 // during sweeping, bytes have been marked as being in use (part of the size) | 1622 // during sweeping, bytes have been marked as being in use (part of the size) |
1616 // and are hereby freed. | 1623 // and are hereby freed. |
1617 void ShrinkSpace(int size_in_bytes) { | 1624 void ShrinkSpace(int size_in_bytes) { |
1618 capacity_ -= size_in_bytes; | 1625 capacity_ -= size_in_bytes; |
1619 size_ -= size_in_bytes; | 1626 size_ -= size_in_bytes; |
1620 CHECK_GE(size_, 0); | 1627 CHECK_GE(size_, 0); |
| 1628 CHECK_GE(capacity_, 0); |
1621 } | 1629 } |
1622 | 1630 |
1623 // Allocate from available bytes (available -> size). | 1631 // Allocate from available bytes (available -> size). |
1624 void AllocateBytes(intptr_t size_in_bytes) { | 1632 void AllocateBytes(intptr_t size_in_bytes) { |
1625 size_ += size_in_bytes; | 1633 size_ += size_in_bytes; |
1626 CHECK_GE(size_, 0); | 1634 CHECK_GE(size_, 0); |
1627 } | 1635 } |
1628 | 1636 |
1629 // Free allocated bytes, making them available (size -> available). | 1637 // Free allocated bytes, making them available (size -> available). |
1630 void DeallocateBytes(intptr_t size_in_bytes) { | 1638 void DeallocateBytes(intptr_t size_in_bytes) { |
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2176 FreeList* free_list() { return &free_list_; } | 2184 FreeList* free_list() { return &free_list_; } |
2177 | 2185 |
2178 base::Mutex* mutex() { return &space_mutex_; } | 2186 base::Mutex* mutex() { return &space_mutex_; } |
2179 | 2187 |
2180 inline void UnlinkFreeListCategories(Page* page); | 2188 inline void UnlinkFreeListCategories(Page* page); |
2181 inline intptr_t RelinkFreeListCategories(Page* page); | 2189 inline intptr_t RelinkFreeListCategories(Page* page); |
2182 | 2190 |
2183 iterator begin() { return iterator(anchor_.next_page()); } | 2191 iterator begin() { return iterator(anchor_.next_page()); } |
2184 iterator end() { return iterator(&anchor_); } | 2192 iterator end() { return iterator(&anchor_); } |
2185 | 2193 |
| 2194 // Shrink immortal immovable pages of the space to be exactly the size needed |
| 2195 // using the high water mark. |
| 2196 void ShrinkImmortalImmovablePages(); |
| 2197 |
2186 protected: | 2198 protected: |
2187 // PagedSpaces that should be included in snapshots have different, i.e., | 2199 // PagedSpaces that should be included in snapshots have different, i.e., |
2188 // smaller, initial pages. | 2200 // smaller, initial pages. |
2189 virtual bool snapshotable() { return true; } | 2201 virtual bool snapshotable() { return true; } |
2190 | 2202 |
2191 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2203 bool HasPages() { return anchor_.next_page() != &anchor_; } |
2192 | 2204 |
2193 // Cleans up the space, frees all pages in this space except those belonging | 2205 // Cleans up the space, frees all pages in this space except those belonging |
2194 // to the initial chunk, uncommits addresses in the initial chunk. | 2206 // to the initial chunk, uncommits addresses in the initial chunk. |
2195 void TearDown(); | 2207 void TearDown(); |
(...skipping 832 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3028 count = 0; | 3040 count = 0; |
3029 } | 3041 } |
3030 // Must be small, since an iteration is used for lookup. | 3042 // Must be small, since an iteration is used for lookup. |
3031 static const int kMaxComments = 64; | 3043 static const int kMaxComments = 64; |
3032 }; | 3044 }; |
3033 #endif | 3045 #endif |
3034 } // namespace internal | 3046 } // namespace internal |
3035 } // namespace v8 | 3047 } // namespace v8 |
3036 | 3048 |
3037 #endif // V8_HEAP_SPACES_H_ | 3049 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |