OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_allocator_android.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <unistd.h> | 8 #include <unistd.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
11 #include <cmath> | 11 #include <cmath> |
12 #include <limits> | 12 #include <limits> |
13 #include <set> | 13 #include <set> |
14 #include <utility> | 14 #include <utility> |
15 | 15 |
16 #include "base/basictypes.h" | 16 #include "base/basictypes.h" |
17 #include "base/containers/hash_tables.h" | 17 #include "base/containers/hash_tables.h" |
18 #include "base/file_util.h" | 18 #include "base/file_util.h" |
19 #include "base/files/scoped_file.h" | 19 #include "base/files/scoped_file.h" |
20 #include "base/logging.h" | 20 #include "base/logging.h" |
21 #include "base/memory/discardable_memory.h" | |
22 #include "base/memory/scoped_vector.h" | 21 #include "base/memory/scoped_vector.h" |
23 #include "base/synchronization/lock.h" | |
24 #include "base/threading/thread_checker.h" | |
25 #include "third_party/ashmem/ashmem.h" | 22 #include "third_party/ashmem/ashmem.h" |
26 | 23 |
27 // The allocator consists of three parts (classes): | 24 // The allocator consists of three parts (classes): |
28 // - DiscardableMemoryAllocator: entry point of all allocations (through its | 25 // - DiscardableMemoryAllocator: entry point of all allocations (through its |
29 // Allocate() method) that are dispatched to the AshmemRegion instances (which | 26 // Allocate() method) that are dispatched to the AshmemRegion instances (which |
30 // it owns). | 27 // it owns). |
31 // - AshmemRegion: manages allocations and destructions inside a single large | 28 // - AshmemRegion: manages allocations and destructions inside a single large |
32 // (e.g. 32 MBytes) ashmem region. | 29 // (e.g. 32 MBytes) ashmem region. |
33 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | 30 // - DiscardableAshmemChunk: class mimicking the DiscardableMemory interface |
34 // whose instances are returned to the client. DiscardableAshmemChunk lets the | 31 // whose instances are returned to the client. |
35 // client seamlessly operate on a subrange of the ashmem region managed by | |
36 // AshmemRegion. | |
37 | 32 |
38 namespace base { | 33 namespace base { |
39 namespace { | 34 namespace { |
40 | 35 |
41 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | 36 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed |
42 // to the allocator when a free chunk is reused). The client can cause such | 37 // to the allocator when a free chunk is reused). The client can cause such |
43 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | 38 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to |
44 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | 39 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is |
45 // currently the maximum allowed). If the client requests 4096 bytes and a free | 40 // currently the maximum allowed). If the client requests 4096 bytes and a free |
46 // chunk of 8192 bytes is available then the free chunk gets splitted into two | 41 // chunk of 8192 bytes is available then the free chunk gets splitted into two |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 | 90 |
96 bool CloseAshmemRegion(int fd, size_t size, void* address) { | 91 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
97 if (munmap(address, size) == -1) { | 92 if (munmap(address, size) == -1) { |
98 DPLOG(ERROR) << "Failed to unmap memory."; | 93 DPLOG(ERROR) << "Failed to unmap memory."; |
99 close(fd); | 94 close(fd); |
100 return false; | 95 return false; |
101 } | 96 } |
102 return close(fd) == 0; | 97 return close(fd) == 0; |
103 } | 98 } |
104 | 99 |
105 DiscardableMemoryLockStatus LockAshmemRegion(int fd, size_t off, size_t size) { | 100 bool LockAshmemRegion(int fd, size_t off, size_t size) { |
106 const int result = ashmem_pin_region(fd, off, size); | 101 return ashmem_pin_region(fd, off, size) != ASHMEM_WAS_PURGED; |
107 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED | |
108 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; | |
109 } | 102 } |
110 | 103 |
111 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { | 104 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { |
112 const int failed = ashmem_unpin_region(fd, off, size); | 105 const int failed = ashmem_unpin_region(fd, off, size); |
113 if (failed) | 106 if (failed) |
114 DLOG(ERROR) << "Failed to unpin memory."; | 107 DLOG(ERROR) << "Failed to unpin memory."; |
115 return !failed; | 108 return !failed; |
116 } | 109 } |
117 | 110 |
118 } // namespace | 111 } // namespace |
119 | 112 |
120 namespace internal { | 113 namespace internal { |
121 | 114 |
122 class DiscardableMemoryAllocator::DiscardableAshmemChunk | 115 class AshmemRegion { |
123 : public DiscardableMemory { | |
124 public: | |
125 // Note that |ashmem_region| must outlive |this|. | |
126 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
127 int fd, | |
128 void* address, | |
129 size_t offset, | |
130 size_t size) | |
131 : ashmem_region_(ashmem_region), | |
132 fd_(fd), | |
133 address_(address), | |
134 offset_(offset), | |
135 size_(size), | |
136 locked_(true) { | |
137 } | |
138 | |
139 // Implemented below AshmemRegion since this requires the full definition of | |
140 // AshmemRegion. | |
141 virtual ~DiscardableAshmemChunk(); | |
142 | |
143 // DiscardableMemory: | |
144 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | |
145 DCHECK(!locked_); | |
146 locked_ = true; | |
147 return LockAshmemRegion(fd_, offset_, size_); | |
148 } | |
149 | |
150 virtual void Unlock() OVERRIDE { | |
151 DCHECK(locked_); | |
152 locked_ = false; | |
153 UnlockAshmemRegion(fd_, offset_, size_); | |
154 } | |
155 | |
156 virtual void* Memory() const OVERRIDE { | |
157 return address_; | |
158 } | |
159 | |
160 private: | |
161 AshmemRegion* const ashmem_region_; | |
162 const int fd_; | |
163 void* const address_; | |
164 const size_t offset_; | |
165 const size_t size_; | |
166 bool locked_; | |
167 | |
168 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
169 }; | |
170 | |
171 class DiscardableMemoryAllocator::AshmemRegion { | |
172 public: | 116 public: |
173 // Note that |allocator| must outlive |this|. | 117 // Note that |allocator| must outlive |this|. |
174 static scoped_ptr<AshmemRegion> Create( | 118 static scoped_ptr<AshmemRegion> Create( |
175 size_t size, | 119 size_t size, |
176 const std::string& name, | 120 const std::string& name, |
177 DiscardableMemoryAllocator* allocator) { | 121 DiscardableMemoryAllocator* allocator) { |
178 DCHECK_EQ(size, AlignToNextPage(size)); | 122 DCHECK_EQ(size, AlignToNextPage(size)); |
179 int fd; | 123 int fd; |
180 void* base; | 124 void* base; |
181 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) | 125 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) |
182 return scoped_ptr<AshmemRegion>(); | 126 return scoped_ptr<AshmemRegion>(); |
183 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | 127 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); |
184 } | 128 } |
185 | 129 |
186 ~AshmemRegion() { | 130 ~AshmemRegion() { |
187 const bool result = CloseAshmemRegion(fd_, size_, base_); | 131 const bool result = CloseAshmemRegion(fd_, size_, base_); |
188 DCHECK(result); | 132 DCHECK(result); |
189 DCHECK(!highest_allocated_chunk_); | 133 DCHECK(!highest_allocated_chunk_); |
190 } | 134 } |
191 | 135 |
192 // Returns a new instance of DiscardableMemory whose size is greater or equal | 136 // Returns a new instance of DiscardableAshmemChunk whose size is greater or |
193 // than |actual_size| (which is expected to be greater or equal than | 137 // equal than |actual_size| (which is expected to be greater or equal than |
194 // |client_requested_size|). | 138 // |client_requested_size|). |
195 // Allocation works as follows: | 139 // Allocation works as follows: |
196 // 1) Reuse a previously freed chunk and return it if it succeeded. See | 140 // 1) Reuse a previously freed chunk and return it if it succeeded. See |
197 // ReuseFreeChunk_Locked() below for more information. | 141 // ReuseFreeChunk() below for more information. |
198 // 2) If no free chunk could be reused and the region is not big enough for | 142 // 2) If no free chunk could be reused and the region is not big enough for |
199 // the requested size then NULL is returned. | 143 // the requested size then NULL is returned. |
200 // 3) If there is enough room in the ashmem region then a new chunk is | 144 // 3) If there is enough room in the ashmem region then a new chunk is |
201 // returned. This new chunk starts at |offset_| which is the end of the | 145 // returned. This new chunk starts at |offset_| which is the end of the |
202 // previously highest chunk in the region. | 146 // previously highest chunk in the region. |
203 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | 147 scoped_ptr<DiscardableAshmemChunk> Allocate(size_t client_requested_size, |
204 size_t actual_size) { | 148 size_t actual_size) { |
205 DCHECK_LE(client_requested_size, actual_size); | 149 DCHECK_LE(client_requested_size, actual_size); |
206 allocator_->lock_.AssertAcquired(); | |
207 | 150 |
208 // Check that the |highest_allocated_chunk_| field doesn't contain a stale | 151 // Check that the |highest_allocated_chunk_| field doesn't contain a stale |
209 // pointer. It should point to either a free chunk or a used chunk. | 152 // pointer. It should point to either a free chunk or a used chunk. |
210 DCHECK(!highest_allocated_chunk_ || | 153 DCHECK(!highest_allocated_chunk_ || |
211 address_to_free_chunk_map_.find(highest_allocated_chunk_) != | 154 address_to_free_chunk_map_.find(highest_allocated_chunk_) != |
212 address_to_free_chunk_map_.end() || | 155 address_to_free_chunk_map_.end() || |
213 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != | 156 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != |
214 used_to_previous_chunk_map_.end()); | 157 used_to_previous_chunk_map_.end()); |
215 | 158 |
216 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | 159 scoped_ptr<DiscardableAshmemChunk> memory = ReuseFreeChunk( |
217 client_requested_size, actual_size); | 160 client_requested_size, actual_size); |
218 if (memory) | 161 if (memory) |
219 return memory.Pass(); | 162 return memory.Pass(); |
220 | 163 |
221 if (size_ - offset_ < actual_size) { | 164 if (size_ - offset_ < actual_size) { |
222 // This region does not have enough space left to hold the requested size. | 165 // This region does not have enough space left to hold the requested size. |
223 return scoped_ptr<DiscardableMemory>(); | 166 return scoped_ptr<DiscardableAshmemChunk>(); |
224 } | 167 } |
225 | 168 |
226 void* const address = static_cast<char*>(base_) + offset_; | 169 void* const address = static_cast<char*>(base_) + offset_; |
227 memory.reset( | 170 memory.reset( |
228 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | 171 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); |
229 | 172 |
230 used_to_previous_chunk_map_.insert( | 173 used_to_previous_chunk_map_.insert( |
231 std::make_pair(address, highest_allocated_chunk_)); | 174 std::make_pair(address, highest_allocated_chunk_)); |
232 highest_allocated_chunk_ = address; | 175 highest_allocated_chunk_ = address; |
233 offset_ += actual_size; | 176 offset_ += actual_size; |
234 DCHECK_LE(offset_, size_); | 177 DCHECK_LE(offset_, size_); |
235 return memory.Pass(); | 178 return memory.Pass(); |
236 } | 179 } |
237 | 180 |
238 void OnChunkDeletion(void* chunk, size_t size) { | 181 void OnChunkDeletion(void* chunk, size_t size) { |
239 AutoLock auto_lock(allocator_->lock_); | 182 MergeAndAddFreeChunk(chunk, size); |
240 MergeAndAddFreeChunk_Locked(chunk, size); | |
241 // Note that |this| might be deleted beyond this point. | 183 // Note that |this| might be deleted beyond this point. |
242 } | 184 } |
243 | 185 |
244 private: | 186 private: |
245 struct FreeChunk { | 187 struct FreeChunk { |
246 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} | 188 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} |
247 | 189 |
248 explicit FreeChunk(size_t size) | 190 explicit FreeChunk(size_t size) |
249 : previous_chunk(NULL), | 191 : previous_chunk(NULL), |
250 start(NULL), | 192 start(NULL), |
(...skipping 29 matching lines...) Expand all Loading... |
280 allocator_(allocator), | 222 allocator_(allocator), |
281 highest_allocated_chunk_(NULL), | 223 highest_allocated_chunk_(NULL), |
282 offset_(0) { | 224 offset_(0) { |
283 DCHECK_GE(fd_, 0); | 225 DCHECK_GE(fd_, 0); |
284 DCHECK_GE(size, kMinAshmemRegionSize); | 226 DCHECK_GE(size, kMinAshmemRegionSize); |
285 DCHECK(base); | 227 DCHECK(base); |
286 DCHECK(allocator); | 228 DCHECK(allocator); |
287 } | 229 } |
288 | 230 |
289 // Tries to reuse a previously freed chunk by doing a closest size match. | 231 // Tries to reuse a previously freed chunk by doing a closest size match. |
290 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | 232 scoped_ptr<DiscardableAshmemChunk> ReuseFreeChunk( |
291 size_t client_requested_size, | 233 size_t client_requested_size, |
292 size_t actual_size) { | 234 size_t actual_size) { |
293 allocator_->lock_.AssertAcquired(); | 235 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator( |
294 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | |
295 free_chunks_.lower_bound(FreeChunk(actual_size))); | 236 free_chunks_.lower_bound(FreeChunk(actual_size))); |
296 if (reused_chunk.is_null()) | 237 if (reused_chunk.is_null()) |
297 return scoped_ptr<DiscardableMemory>(); | 238 return scoped_ptr<DiscardableAshmemChunk>(); |
298 | 239 |
299 used_to_previous_chunk_map_.insert( | 240 used_to_previous_chunk_map_.insert( |
300 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | 241 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); |
301 size_t reused_chunk_size = reused_chunk.size; | 242 size_t reused_chunk_size = reused_chunk.size; |
302 // |client_requested_size| is used below rather than |actual_size| to | 243 // |client_requested_size| is used below rather than |actual_size| to |
303 // reflect the amount of bytes that would not be usable by the client (i.e. | 244 // reflect the amount of bytes that would not be usable by the client (i.e. |
304 // wasted). Using |actual_size| instead would not allow us to detect | 245 // wasted). Using |actual_size| instead would not allow us to detect |
305 // fragmentation caused by the client if he did misaligned allocations. | 246 // fragmentation caused by the client if he did misaligned allocations. |
306 DCHECK_GE(reused_chunk.size, client_requested_size); | 247 DCHECK_GE(reused_chunk.size, client_requested_size); |
307 const size_t fragmentation_bytes = | 248 const size_t fragmentation_bytes = |
308 reused_chunk.size - client_requested_size; | 249 reused_chunk.size - client_requested_size; |
309 | 250 |
310 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | 251 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { |
311 // Split the free chunk being recycled so that its unused tail doesn't get | 252 // Split the free chunk being recycled so that its unused tail doesn't get |
312 // reused (i.e. locked) which would prevent it from being evicted under | 253 // reused (i.e. locked) which would prevent it from being evicted under |
313 // memory pressure. | 254 // memory pressure. |
314 reused_chunk_size = actual_size; | 255 reused_chunk_size = actual_size; |
315 void* const new_chunk_start = | 256 void* const new_chunk_start = |
316 static_cast<char*>(reused_chunk.start) + actual_size; | 257 static_cast<char*>(reused_chunk.start) + actual_size; |
317 if (reused_chunk.start == highest_allocated_chunk_) { | 258 if (reused_chunk.start == highest_allocated_chunk_) { |
318 // We also need to update the pointer to the highest allocated chunk in | 259 // We also need to update the pointer to the highest allocated chunk in |
319 // case we are splitting the highest chunk. | 260 // case we are splitting the highest chunk. |
320 highest_allocated_chunk_ = new_chunk_start; | 261 highest_allocated_chunk_ = new_chunk_start; |
321 } | 262 } |
322 DCHECK_GT(reused_chunk.size, actual_size); | 263 DCHECK_GT(reused_chunk.size, actual_size); |
323 const size_t new_chunk_size = reused_chunk.size - actual_size; | 264 const size_t new_chunk_size = reused_chunk.size - actual_size; |
324 // Note that merging is not needed here since there can't be contiguous | 265 // Note that merging is not needed here since there can't be contiguous |
325 // free chunks at this point. | 266 // free chunks at this point. |
326 AddFreeChunk_Locked( | 267 AddFreeChunk( |
327 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | 268 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); |
328 } | 269 } |
329 | 270 |
330 const size_t offset = | 271 const size_t offset = |
331 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | 272 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); |
332 LockAshmemRegion(fd_, offset, reused_chunk_size); | 273 LockAshmemRegion(fd_, offset, reused_chunk_size); |
333 scoped_ptr<DiscardableMemory> memory( | 274 scoped_ptr<DiscardableAshmemChunk> memory( |
334 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | 275 new DiscardableAshmemChunk( |
335 reused_chunk_size)); | 276 this, fd_, reused_chunk.start, offset, reused_chunk_size)); |
336 return memory.Pass(); | 277 return memory.Pass(); |
337 } | 278 } |
338 | 279 |
339 // Makes the chunk identified with the provided arguments free and possibly | 280 // Makes the chunk identified with the provided arguments free and possibly |
340 // merges this chunk with the previous and next contiguous ones. | 281 // merges this chunk with the previous and next contiguous ones. |
341 // If the provided chunk is the only one used (and going to be freed) in the | 282 // If the provided chunk is the only one used (and going to be freed) in the |
342 // region then the internal ashmem region is closed so that the underlying | 283 // region then the internal ashmem region is closed so that the underlying |
343 // physical pages are immediately released. | 284 // physical pages are immediately released. |
344 // Note that free chunks are unlocked therefore they can be reclaimed by the | 285 // Note that free chunks are unlocked therefore they can be reclaimed by the |
345 // kernel if needed (under memory pressure) but they are not immediately | 286 // kernel if needed (under memory pressure) but they are not immediately |
346 // released unfortunately since madvise(MADV_REMOVE) and | 287 // released unfortunately since madvise(MADV_REMOVE) and |
347 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | 288 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might |
348 // change in versions of kernel >=3.5 though. The fact that free chunks are | 289 // change in versions of kernel >=3.5 though. The fact that free chunks are |
349 // not immediately released is the reason why we are trying to minimize | 290 // not immediately released is the reason why we are trying to minimize |
350 // fragmentation in order not to cause "artificial" memory pressure. | 291 // fragmentation in order not to cause "artificial" memory pressure. |
351 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | 292 void MergeAndAddFreeChunk(void* chunk, size_t size) { |
352 allocator_->lock_.AssertAcquired(); | |
353 size_t new_free_chunk_size = size; | 293 size_t new_free_chunk_size = size; |
354 // Merge with the previous chunk. | 294 // Merge with the previous chunk. |
355 void* first_free_chunk = chunk; | 295 void* first_free_chunk = chunk; |
356 DCHECK(!used_to_previous_chunk_map_.empty()); | 296 DCHECK(!used_to_previous_chunk_map_.empty()); |
357 const hash_map<void*, void*>::iterator previous_chunk_it = | 297 const hash_map<void*, void*>::iterator previous_chunk_it = |
358 used_to_previous_chunk_map_.find(chunk); | 298 used_to_previous_chunk_map_.find(chunk); |
359 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | 299 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); |
360 void* previous_chunk = previous_chunk_it->second; | 300 void* previous_chunk = previous_chunk_it->second; |
361 used_to_previous_chunk_map_.erase(previous_chunk_it); | 301 used_to_previous_chunk_map_.erase(previous_chunk_it); |
362 | 302 |
363 if (previous_chunk) { | 303 if (previous_chunk) { |
364 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | 304 const FreeChunk free_chunk = RemoveFreeChunk(previous_chunk); |
365 if (!free_chunk.is_null()) { | 305 if (!free_chunk.is_null()) { |
366 new_free_chunk_size += free_chunk.size; | 306 new_free_chunk_size += free_chunk.size; |
367 first_free_chunk = previous_chunk; | 307 first_free_chunk = previous_chunk; |
368 if (chunk == highest_allocated_chunk_) | 308 if (chunk == highest_allocated_chunk_) |
369 highest_allocated_chunk_ = previous_chunk; | 309 highest_allocated_chunk_ = previous_chunk; |
370 | 310 |
371 // There should not be more contiguous previous free chunks. | 311 // There should not be more contiguous previous free chunks. |
372 previous_chunk = free_chunk.previous_chunk; | 312 previous_chunk = free_chunk.previous_chunk; |
373 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); | 313 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); |
374 } | 314 } |
375 } | 315 } |
376 | 316 |
377 // Merge with the next chunk if free and present. | 317 // Merge with the next chunk if free and present. |
378 void* next_chunk = static_cast<char*>(chunk) + size; | 318 void* next_chunk = static_cast<char*>(chunk) + size; |
379 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | 319 const FreeChunk next_free_chunk = RemoveFreeChunk(next_chunk); |
380 if (!next_free_chunk.is_null()) { | 320 if (!next_free_chunk.is_null()) { |
381 new_free_chunk_size += next_free_chunk.size; | 321 new_free_chunk_size += next_free_chunk.size; |
382 if (next_free_chunk.start == highest_allocated_chunk_) | 322 if (next_free_chunk.start == highest_allocated_chunk_) |
383 highest_allocated_chunk_ = first_free_chunk; | 323 highest_allocated_chunk_ = first_free_chunk; |
384 | 324 |
385 // Same as above. | 325 // Same as above. |
386 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + | 326 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + |
387 next_free_chunk.size)); | 327 next_free_chunk.size)); |
388 } | 328 } |
389 | 329 |
390 const bool whole_ashmem_region_is_free = | 330 const bool whole_ashmem_region_is_free = |
391 used_to_previous_chunk_map_.empty(); | 331 used_to_previous_chunk_map_.empty(); |
392 if (!whole_ashmem_region_is_free) { | 332 if (!whole_ashmem_region_is_free) { |
393 AddFreeChunk_Locked( | 333 AddFreeChunk( |
394 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | 334 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); |
395 return; | 335 return; |
396 } | 336 } |
397 | 337 |
398 // The whole ashmem region is free thus it can be deleted. | 338 // The whole ashmem region is free thus it can be deleted. |
399 DCHECK_EQ(base_, first_free_chunk); | 339 DCHECK_EQ(base_, first_free_chunk); |
400 DCHECK_EQ(base_, highest_allocated_chunk_); | 340 DCHECK_EQ(base_, highest_allocated_chunk_); |
401 DCHECK(free_chunks_.empty()); | 341 DCHECK(free_chunks_.empty()); |
402 DCHECK(address_to_free_chunk_map_.empty()); | 342 DCHECK(address_to_free_chunk_map_.empty()); |
403 DCHECK(used_to_previous_chunk_map_.empty()); | 343 DCHECK(used_to_previous_chunk_map_.empty()); |
404 highest_allocated_chunk_ = NULL; | 344 highest_allocated_chunk_ = NULL; |
405 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. | 345 allocator_->DeleteAshmemRegion(this); // Deletes |this|. |
406 } | 346 } |
407 | 347 |
408 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | 348 void AddFreeChunk(const FreeChunk& free_chunk) { |
409 allocator_->lock_.AssertAcquired(); | |
410 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | 349 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( |
411 free_chunk); | 350 free_chunk); |
412 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | 351 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); |
413 // Update the next used contiguous chunk, if any, since its previous chunk | 352 // Update the next used contiguous chunk, if any, since its previous chunk |
414 // may have changed due to free chunks merging/splitting. | 353 // may have changed due to free chunks merging/splitting. |
415 void* const next_used_contiguous_chunk = | 354 void* const next_used_contiguous_chunk = |
416 static_cast<char*>(free_chunk.start) + free_chunk.size; | 355 static_cast<char*>(free_chunk.start) + free_chunk.size; |
417 hash_map<void*, void*>::iterator previous_it = | 356 hash_map<void*, void*>::iterator previous_it = |
418 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | 357 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); |
419 if (previous_it != used_to_previous_chunk_map_.end()) | 358 if (previous_it != used_to_previous_chunk_map_.end()) |
420 previous_it->second = free_chunk.start; | 359 previous_it->second = free_chunk.start; |
421 } | 360 } |
422 | 361 |
423 // Finds and removes the free chunk, if any, whose start address is | 362 // Finds and removes the free chunk, if any, whose start address is |
424 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | 363 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk |
425 // whose content is null if it was not found. | 364 // whose content is null if it was not found. |
426 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | 365 FreeChunk RemoveFreeChunk(void* chunk_start) { |
427 allocator_->lock_.AssertAcquired(); | |
428 const hash_map< | 366 const hash_map< |
429 void*, std::multiset<FreeChunk>::iterator>::iterator it = | 367 void*, std::multiset<FreeChunk>::iterator>::iterator it = |
430 address_to_free_chunk_map_.find(chunk_start); | 368 address_to_free_chunk_map_.find(chunk_start); |
431 if (it == address_to_free_chunk_map_.end()) | 369 if (it == address_to_free_chunk_map_.end()) |
432 return FreeChunk(); | 370 return FreeChunk(); |
433 return RemoveFreeChunkFromIterator_Locked(it->second); | 371 return RemoveFreeChunkFromIterator(it->second); |
434 } | 372 } |
435 | 373 |
436 // Same as above but takes an iterator in. | 374 // Same as above but takes an iterator in. |
437 FreeChunk RemoveFreeChunkFromIterator_Locked( | 375 FreeChunk RemoveFreeChunkFromIterator( |
438 std::multiset<FreeChunk>::iterator free_chunk_it) { | 376 std::multiset<FreeChunk>::iterator free_chunk_it) { |
439 allocator_->lock_.AssertAcquired(); | |
440 if (free_chunk_it == free_chunks_.end()) | 377 if (free_chunk_it == free_chunks_.end()) |
441 return FreeChunk(); | 378 return FreeChunk(); |
442 DCHECK(free_chunk_it != free_chunks_.end()); | 379 DCHECK(free_chunk_it != free_chunks_.end()); |
443 const FreeChunk free_chunk(*free_chunk_it); | 380 const FreeChunk free_chunk(*free_chunk_it); |
444 address_to_free_chunk_map_.erase(free_chunk_it->start); | 381 address_to_free_chunk_map_.erase(free_chunk_it->start); |
445 free_chunks_.erase(free_chunk_it); | 382 free_chunks_.erase(free_chunk_it); |
446 return free_chunk; | 383 return free_chunk; |
447 } | 384 } |
448 | 385 |
449 const int fd_; | 386 const int fd_; |
(...skipping 16 matching lines...) Expand all Loading... |
466 // obviously). | 403 // obviously). |
467 hash_map< | 404 hash_map< |
468 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | 405 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; |
469 // Maps the address of *used* chunks to the address of their previous | 406 // Maps the address of *used* chunks to the address of their previous |
470 // contiguous chunk. | 407 // contiguous chunk. |
471 hash_map<void*, void*> used_to_previous_chunk_map_; | 408 hash_map<void*, void*> used_to_previous_chunk_map_; |
472 | 409 |
473 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | 410 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
474 }; | 411 }; |
475 | 412 |
476 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | 413 DiscardableAshmemChunk::~DiscardableAshmemChunk() { |
477 if (locked_) | 414 if (locked_) |
478 UnlockAshmemRegion(fd_, offset_, size_); | 415 UnlockAshmemRegion(fd_, offset_, size_); |
479 ashmem_region_->OnChunkDeletion(address_, size_); | 416 ashmem_region_->OnChunkDeletion(address_, size_); |
480 } | 417 } |
481 | 418 |
| 419 bool DiscardableAshmemChunk::Lock() { |
| 420 DCHECK(!locked_); |
| 421 locked_ = true; |
| 422 return LockAshmemRegion(fd_, offset_, size_); |
| 423 } |
| 424 |
| 425 void DiscardableAshmemChunk::Unlock() { |
| 426 DCHECK(locked_); |
| 427 locked_ = false; |
| 428 UnlockAshmemRegion(fd_, offset_, size_); |
| 429 } |
| 430 |
| 431 void* DiscardableAshmemChunk::Memory() const { |
| 432 return address_; |
| 433 } |
| 434 |
| 435 // Note that |ashmem_region| must outlive |this|. |
| 436 DiscardableAshmemChunk::DiscardableAshmemChunk(AshmemRegion* ashmem_region, |
| 437 int fd, |
| 438 void* address, |
| 439 size_t offset, |
| 440 size_t size) |
| 441 : ashmem_region_(ashmem_region), |
| 442 fd_(fd), |
| 443 address_(address), |
| 444 offset_(offset), |
| 445 size_(size), |
| 446 locked_(true) { |
| 447 } |
| 448 |
482 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | 449 DiscardableMemoryAllocator::DiscardableMemoryAllocator( |
483 const std::string& name, | 450 const std::string& name, |
484 size_t ashmem_region_size) | 451 size_t ashmem_region_size) |
485 : name_(name), | 452 : name_(name), |
486 ashmem_region_size_( | 453 ashmem_region_size_( |
487 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | 454 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), |
488 last_ashmem_region_size_(0) { | 455 last_ashmem_region_size_(0) { |
489 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | 456 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); |
490 } | 457 } |
491 | 458 |
492 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | 459 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { |
493 DCHECK(thread_checker_.CalledOnValidThread()); | |
494 DCHECK(ashmem_regions_.empty()); | 460 DCHECK(ashmem_regions_.empty()); |
495 } | 461 } |
496 | 462 |
497 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | 463 scoped_ptr<DiscardableAshmemChunk> DiscardableMemoryAllocator::Allocate( |
498 size_t size) { | 464 size_t size) { |
499 const size_t aligned_size = AlignToNextPage(size); | 465 const size_t aligned_size = AlignToNextPage(size); |
500 if (!aligned_size) | 466 if (!aligned_size) |
501 return scoped_ptr<DiscardableMemory>(); | 467 return scoped_ptr<DiscardableAshmemChunk>(); |
502 // TODO(pliard): make this function less naive by e.g. moving the free chunks | 468 // TODO(pliard): make this function less naive by e.g. moving the free chunks |
503 // multiset to the allocator itself in order to decrease even more | 469 // multiset to the allocator itself in order to decrease even more |
504 // fragmentation/speedup allocation. Note that there should not be more than a | 470 // fragmentation/speedup allocation. Note that there should not be more than a |
505 // couple (=5) of AshmemRegion instances in practice though. | 471 // couple (=5) of AshmemRegion instances in practice though. |
506 AutoLock auto_lock(lock_); | |
507 DCHECK_LE(ashmem_regions_.size(), 5U); | 472 DCHECK_LE(ashmem_regions_.size(), 5U); |
508 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | 473 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
509 it != ashmem_regions_.end(); ++it) { | 474 it != ashmem_regions_.end(); ++it) { |
510 scoped_ptr<DiscardableMemory> memory( | 475 scoped_ptr<DiscardableAshmemChunk> memory( |
511 (*it)->Allocate_Locked(size, aligned_size)); | 476 (*it)->Allocate(size, aligned_size)); |
512 if (memory) | 477 if (memory) |
513 return memory.Pass(); | 478 return memory.Pass(); |
514 } | 479 } |
515 // The creation of the (large) ashmem region might fail if the address space | 480 // The creation of the (large) ashmem region might fail if the address space |
516 // is too fragmented. In case creation fails the allocator retries by | 481 // is too fragmented. In case creation fails the allocator retries by |
517 // repetitively dividing the size by 2. | 482 // repetitively dividing the size by 2. |
518 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | 483 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); |
519 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | 484 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); |
520 region_size >= min_region_size; | 485 region_size >= min_region_size; |
521 region_size = AlignToNextPage(region_size / 2)) { | 486 region_size = AlignToNextPage(region_size / 2)) { |
522 scoped_ptr<AshmemRegion> new_region( | 487 scoped_ptr<AshmemRegion> new_region( |
523 AshmemRegion::Create(region_size, name_.c_str(), this)); | 488 AshmemRegion::Create(region_size, name_.c_str(), this)); |
524 if (!new_region) | 489 if (!new_region) |
525 continue; | 490 continue; |
526 last_ashmem_region_size_ = region_size; | 491 last_ashmem_region_size_ = region_size; |
527 ashmem_regions_.push_back(new_region.release()); | 492 ashmem_regions_.push_back(new_region.release()); |
528 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | 493 return ashmem_regions_.back()->Allocate(size, aligned_size); |
529 } | 494 } |
530 // TODO(pliard): consider adding an histogram to see how often this happens. | 495 // TODO(pliard): consider adding an histogram to see how often this happens. |
531 return scoped_ptr<DiscardableMemory>(); | 496 return scoped_ptr<DiscardableAshmemChunk>(); |
532 } | 497 } |
533 | 498 |
534 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { | 499 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { |
535 AutoLock auto_lock(lock_); | |
536 return last_ashmem_region_size_; | 500 return last_ashmem_region_size_; |
537 } | 501 } |
538 | 502 |
539 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | 503 void DiscardableMemoryAllocator::DeleteAshmemRegion(AshmemRegion* region) { |
540 AshmemRegion* region) { | |
541 lock_.AssertAcquired(); | |
542 // Note that there should not be more than a couple of ashmem region instances | 504 // Note that there should not be more than a couple of ashmem region instances |
543 // in |ashmem_regions_|. | 505 // in |ashmem_regions_|. |
544 DCHECK_LE(ashmem_regions_.size(), 5U); | 506 DCHECK_LE(ashmem_regions_.size(), 5U); |
545 const ScopedVector<AshmemRegion>::iterator it = std::find( | 507 const ScopedVector<AshmemRegion>::iterator it = std::find( |
546 ashmem_regions_.begin(), ashmem_regions_.end(), region); | 508 ashmem_regions_.begin(), ashmem_regions_.end(), region); |
547 DCHECK_NE(ashmem_regions_.end(), it); | 509 DCHECK_NE(ashmem_regions_.end(), it); |
548 std::swap(*it, ashmem_regions_.back()); | 510 std::swap(*it, ashmem_regions_.back()); |
549 ashmem_regions_.pop_back(); | 511 ashmem_regions_.pop_back(); |
550 } | 512 } |
551 | 513 |
552 } // namespace internal | 514 } // namespace internal |
553 } // namespace base | 515 } // namespace base |
OLD | NEW |