OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone/accounting-allocator.h" | 5 #include "src/zone/accounting-allocator.h" |
6 | 6 |
7 #include <cstdlib> | 7 #include <cstdlib> |
8 | 8 |
9 #if V8_LIBC_BIONIC | 9 #if V8_LIBC_BIONIC |
10 #include <malloc.h> // NOLINT | 10 #include <malloc.h> // NOLINT |
11 #endif | 11 #endif |
12 | 12 |
13 namespace v8 { | 13 namespace v8 { |
14 namespace internal { | 14 namespace internal { |
15 | 15 |
16 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() { | 16 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() { |
| 17 static const size_t kDefaultBucketMaxSize = 5; |
| 18 |
17 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone); | 19 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone); |
18 std::fill(unused_segments_heads_, | 20 std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets, |
19 unused_segments_heads_ + | |
20 (1 + kMaxSegmentSizePower - kMinSegmentSizePower), | |
21 nullptr); | 21 nullptr); |
22 std::fill( | 22 std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0); |
23 unused_segments_sizes, | 23 std::fill(unused_segments_max_sizes_, |
24 unused_segments_sizes + (1 + kMaxSegmentSizePower - kMinSegmentSizePower), | 24 unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize); |
25 0); | |
26 } | 25 } |
27 | 26 |
28 AccountingAllocator::~AccountingAllocator() { ClearPool(); } | 27 AccountingAllocator::~AccountingAllocator() { ClearPool(); } |
29 | 28 |
30 void AccountingAllocator::MemoryPressureNotification( | 29 void AccountingAllocator::MemoryPressureNotification( |
31 MemoryPressureLevel level) { | 30 MemoryPressureLevel level) { |
32 memory_pressure_level_.SetValue(level); | 31 memory_pressure_level_.SetValue(level); |
33 | 32 |
34 if (level != MemoryPressureLevel::kNone) { | 33 if (level != MemoryPressureLevel::kNone) { |
35 ClearPool(); | 34 ClearPool(); |
36 } | 35 } |
37 } | 36 } |
38 | 37 |
| 38 void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) { |
| 39 // The sum of the bytes of one segment of each size. |
| 40 static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) - |
| 41 (size_t(1) << kMinSegmentSizePower); |
| 42 size_t fits_fully = max_pool_size / full_size; |
| 43 |
| 44 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
| 45 |
| 46 // We assume few zones (less than 'fits_fully' many) to be active at the same |
| 47 // time. When zones grow regularly, they will keep requesting segments of |
| 48 // increasing size each time. Therefore we try to get as many segments with an |
| 49 // equal number of segments of each size as possible. |
| 50 // The remaining space is used to make more room for an 'incomplete set' of |
| 51 // segments beginning with the smaller ones. |
| 52 // This code will work best if the max_pool_size is a multiple of the |
| 53 // full_size. If max_pool_size is no sum of segment sizes the actual pool |
| 54 // size might be smaller then max_pool_size. Note that no actual memory gets |
| 55 // wasted though. |
| 56 // TODO(heimbuef): Determine better strategy generating a segment sizes |
| 57 // distribution that is closer to real/benchmark usecases and uses the given |
| 58 // max_pool_size more efficiently. |
| 59 size_t total_size = fits_fully * full_size; |
| 60 |
| 61 for (size_t power = 0; power < kNumberBuckets; ++power) { |
| 62 if (total_size + (size_t(1) << power) <= max_pool_size) { |
| 63 unused_segments_max_sizes_[power] = fits_fully + 1; |
| 64 total_size += size_t(1) << power; |
| 65 } else { |
| 66 unused_segments_max_sizes_[power] = fits_fully; |
| 67 } |
| 68 } |
| 69 } |
| 70 |
39 Segment* AccountingAllocator::GetSegment(size_t bytes) { | 71 Segment* AccountingAllocator::GetSegment(size_t bytes) { |
40 Segment* result = GetSegmentFromPool(bytes); | 72 Segment* result = GetSegmentFromPool(bytes); |
41 if (result == nullptr) { | 73 if (result == nullptr) { |
42 result = AllocateSegment(bytes); | 74 result = AllocateSegment(bytes); |
43 result->Initialize(bytes); | 75 result->Initialize(bytes); |
44 } | 76 } |
45 | 77 |
46 return result; | 78 return result; |
47 } | 79 } |
48 | 80 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 | 118 |
87 size_t AccountingAllocator::GetCurrentPoolSize() const { | 119 size_t AccountingAllocator::GetCurrentPoolSize() const { |
88 return base::NoBarrier_Load(¤t_pool_size_); | 120 return base::NoBarrier_Load(¤t_pool_size_); |
89 } | 121 } |
90 | 122 |
91 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) { | 123 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) { |
92 if (requested_size > (1 << kMaxSegmentSizePower)) { | 124 if (requested_size > (1 << kMaxSegmentSizePower)) { |
93 return nullptr; | 125 return nullptr; |
94 } | 126 } |
95 | 127 |
96 uint8_t power = kMinSegmentSizePower; | 128 size_t power = kMinSegmentSizePower; |
97 while (requested_size > (static_cast<size_t>(1) << power)) power++; | 129 while (requested_size > (static_cast<size_t>(1) << power)) power++; |
98 | 130 |
99 DCHECK_GE(power, kMinSegmentSizePower + 0); | 131 DCHECK_GE(power, kMinSegmentSizePower + 0); |
100 power -= kMinSegmentSizePower; | 132 power -= kMinSegmentSizePower; |
101 | 133 |
102 Segment* segment; | 134 Segment* segment; |
103 { | 135 { |
104 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 136 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
105 | 137 |
106 segment = unused_segments_heads_[power]; | 138 segment = unused_segments_heads_[power]; |
107 | 139 |
108 if (segment != nullptr) { | 140 if (segment != nullptr) { |
109 unused_segments_heads_[power] = segment->next(); | 141 unused_segments_heads_[power] = segment->next(); |
110 segment->set_next(nullptr); | 142 segment->set_next(nullptr); |
111 | 143 |
112 unused_segments_sizes[power]--; | 144 unused_segments_sizes_[power]--; |
113 base::NoBarrier_AtomicIncrement( | 145 base::NoBarrier_AtomicIncrement( |
114 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size())); | 146 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size())); |
115 } | 147 } |
116 } | 148 } |
117 | 149 |
118 if (segment) { | 150 if (segment) { |
119 DCHECK_GE(segment->size(), requested_size); | 151 DCHECK_GE(segment->size(), requested_size); |
120 } | 152 } |
121 return segment; | 153 return segment; |
122 } | 154 } |
123 | 155 |
124 bool AccountingAllocator::AddSegmentToPool(Segment* segment) { | 156 bool AccountingAllocator::AddSegmentToPool(Segment* segment) { |
125 size_t size = segment->size(); | 157 size_t size = segment->size(); |
126 | 158 |
127 if (size >= (1 << (kMaxSegmentSizePower + 1))) return false; | 159 if (size >= (1 << (kMaxSegmentSizePower + 1))) return false; |
128 | 160 |
129 if (size < (1 << kMinSegmentSizePower)) return false; | 161 if (size < (1 << kMinSegmentSizePower)) return false; |
130 | 162 |
131 uint8_t power = kMaxSegmentSizePower; | 163 size_t power = kMaxSegmentSizePower; |
132 | 164 |
133 while (size < (static_cast<size_t>(1) << power)) power--; | 165 while (size < (static_cast<size_t>(1) << power)) power--; |
134 | 166 |
135 DCHECK_GE(power, kMinSegmentSizePower + 0); | 167 DCHECK_GE(power, kMinSegmentSizePower + 0); |
136 power -= kMinSegmentSizePower; | 168 power -= kMinSegmentSizePower; |
137 | 169 |
138 { | 170 { |
139 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 171 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
140 | 172 |
141 if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) { | 173 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) { |
142 return false; | 174 return false; |
143 } | 175 } |
144 | 176 |
145 segment->set_next(unused_segments_heads_[power]); | 177 segment->set_next(unused_segments_heads_[power]); |
146 unused_segments_heads_[power] = segment; | 178 unused_segments_heads_[power] = segment; |
147 base::NoBarrier_AtomicIncrement(¤t_pool_size_, size); | 179 base::NoBarrier_AtomicIncrement(¤t_pool_size_, size); |
148 unused_segments_sizes[power]++; | 180 unused_segments_sizes_[power]++; |
149 } | 181 } |
150 | 182 |
151 return true; | 183 return true; |
152 } | 184 } |
153 | 185 |
154 void AccountingAllocator::ClearPool() { | 186 void AccountingAllocator::ClearPool() { |
155 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 187 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
156 | 188 |
157 for (uint8_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower; | 189 for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower; |
158 power++) { | 190 power++) { |
159 Segment* current = unused_segments_heads_[power]; | 191 Segment* current = unused_segments_heads_[power]; |
160 while (current) { | 192 while (current) { |
161 Segment* next = current->next(); | 193 Segment* next = current->next(); |
162 FreeSegment(current); | 194 FreeSegment(current); |
163 current = next; | 195 current = next; |
164 } | 196 } |
165 unused_segments_heads_[power] = nullptr; | 197 unused_segments_heads_[power] = nullptr; |
166 } | 198 } |
167 } | 199 } |
168 | 200 |
169 } // namespace internal | 201 } // namespace internal |
170 } // namespace v8 | 202 } // namespace v8 |
OLD | NEW |