OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_ashmem_allocator.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <unistd.h> | 8 #include <unistd.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
11 #include <cmath> | 11 #include <cmath> |
12 #include <limits> | 12 #include <limits> |
13 #include <set> | 13 #include <set> |
14 #include <utility> | 14 #include <utility> |
15 | 15 |
16 #include "base/basictypes.h" | 16 #include "base/basictypes.h" |
17 #include "base/containers/hash_tables.h" | 17 #include "base/containers/hash_tables.h" |
18 #include "base/file_util.h" | 18 #include "base/file_util.h" |
19 #include "base/files/scoped_file.h" | 19 #include "base/files/scoped_file.h" |
20 #include "base/logging.h" | 20 #include "base/logging.h" |
21 #include "base/memory/discardable_memory.h" | |
22 #include "base/memory/scoped_vector.h" | 21 #include "base/memory/scoped_vector.h" |
23 #include "base/synchronization/lock.h" | |
24 #include "base/threading/thread_checker.h" | |
25 #include "third_party/ashmem/ashmem.h" | 22 #include "third_party/ashmem/ashmem.h" |
26 | 23 |
27 // The allocator consists of three parts (classes): | 24 // The allocator consists of three parts (classes): |
28 // - DiscardableMemoryAllocator: entry point of all allocations (through its | 25 // - DiscardableMemoryAshmemAllocator: entry point of all allocations (through |
29 // Allocate() method) that are dispatched to the AshmemRegion instances (which | 26 // its Allocate() method) that are dispatched to the AshmemRegion instances |
30 // it owns). | 27 // (which it owns). |
31 // - AshmemRegion: manages allocations and destructions inside a single large | 28 // - AshmemRegion: manages allocations and destructions inside a single large |
32 // (e.g. 32 MBytes) ashmem region. | 29 // (e.g. 32 MBytes) ashmem region. |
33 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | 30 // - DiscardableAshmemChunk: class mimicking the DiscardableMemory interface |
34 // whose instances are returned to the client. DiscardableAshmemChunk lets the | 31 // whose instances are returned to the client. |
35 // client seamlessly operate on a subrange of the ashmem region managed by | |
36 // AshmemRegion. | |
37 | 32 |
38 namespace base { | 33 namespace base { |
39 namespace { | 34 namespace { |
40 | 35 |
41 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | 36 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed |
42 // to the allocator when a free chunk is reused). The client can cause such | 37 // to the allocator when a free chunk is reused). The client can cause such |
43 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | 38 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to |
44 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | 39 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is |
45 // currently the maximum allowed). If the client requests 4096 bytes and a free | 40 // currently the maximum allowed). If the client requests 4096 bytes and a free |
46 // chunk of 8192 bytes is available then the free chunk gets splitted into two | 41 // chunk of 8192 bytes is available then the free chunk gets splitted into two |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 | 90 |
96 bool CloseAshmemRegion(int fd, size_t size, void* address) { | 91 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
97 if (munmap(address, size) == -1) { | 92 if (munmap(address, size) == -1) { |
98 DPLOG(ERROR) << "Failed to unmap memory."; | 93 DPLOG(ERROR) << "Failed to unmap memory."; |
99 close(fd); | 94 close(fd); |
100 return false; | 95 return false; |
101 } | 96 } |
102 return close(fd) == 0; | 97 return close(fd) == 0; |
103 } | 98 } |
104 | 99 |
105 DiscardableMemoryLockStatus LockAshmemRegion(int fd, size_t off, size_t size) { | 100 bool LockAshmemRegion(int fd, size_t off, size_t size) { |
106 const int result = ashmem_pin_region(fd, off, size); | 101 return ashmem_pin_region(fd, off, size) != ASHMEM_WAS_PURGED; |
107 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED | |
108 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; | |
109 } | 102 } |
110 | 103 |
111 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { | 104 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { |
112 const int failed = ashmem_unpin_region(fd, off, size); | 105 const int failed = ashmem_unpin_region(fd, off, size); |
113 if (failed) | 106 if (failed) |
114 DLOG(ERROR) << "Failed to unpin memory."; | 107 DLOG(ERROR) << "Failed to unpin memory."; |
115 return !failed; | 108 return !failed; |
116 } | 109 } |
117 | 110 |
118 } // namespace | 111 } // namespace |
119 | 112 |
120 namespace internal { | 113 namespace internal { |
121 | 114 |
122 class DiscardableMemoryAllocator::DiscardableAshmemChunk | 115 class AshmemRegion { |
123 : public DiscardableMemory { | |
124 public: | |
125 // Note that |ashmem_region| must outlive |this|. | |
126 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
127 int fd, | |
128 void* address, | |
129 size_t offset, | |
130 size_t size) | |
131 : ashmem_region_(ashmem_region), | |
132 fd_(fd), | |
133 address_(address), | |
134 offset_(offset), | |
135 size_(size), | |
136 locked_(true) { | |
137 } | |
138 | |
139 // Implemented below AshmemRegion since this requires the full definition of | |
140 // AshmemRegion. | |
141 virtual ~DiscardableAshmemChunk(); | |
142 | |
143 // DiscardableMemory: | |
144 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | |
145 DCHECK(!locked_); | |
146 locked_ = true; | |
147 return LockAshmemRegion(fd_, offset_, size_); | |
148 } | |
149 | |
150 virtual void Unlock() OVERRIDE { | |
151 DCHECK(locked_); | |
152 locked_ = false; | |
153 UnlockAshmemRegion(fd_, offset_, size_); | |
154 } | |
155 | |
156 virtual void* Memory() const OVERRIDE { | |
157 return address_; | |
158 } | |
159 | |
160 private: | |
161 AshmemRegion* const ashmem_region_; | |
162 const int fd_; | |
163 void* const address_; | |
164 const size_t offset_; | |
165 const size_t size_; | |
166 bool locked_; | |
167 | |
168 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
169 }; | |
170 | |
171 class DiscardableMemoryAllocator::AshmemRegion { | |
172 public: | 116 public: |
173 // Note that |allocator| must outlive |this|. | 117 // Note that |allocator| must outlive |this|. |
174 static scoped_ptr<AshmemRegion> Create( | 118 static scoped_ptr<AshmemRegion> Create( |
175 size_t size, | 119 size_t size, |
176 const std::string& name, | 120 const std::string& name, |
177 DiscardableMemoryAllocator* allocator) { | 121 DiscardableMemoryAshmemAllocator* allocator) { |
178 DCHECK_EQ(size, AlignToNextPage(size)); | 122 DCHECK_EQ(size, AlignToNextPage(size)); |
179 int fd; | 123 int fd; |
180 void* base; | 124 void* base; |
181 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) | 125 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) |
182 return scoped_ptr<AshmemRegion>(); | 126 return scoped_ptr<AshmemRegion>(); |
183 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | 127 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); |
184 } | 128 } |
185 | 129 |
186 ~AshmemRegion() { | 130 ~AshmemRegion() { |
187 const bool result = CloseAshmemRegion(fd_, size_, base_); | 131 const bool result = CloseAshmemRegion(fd_, size_, base_); |
188 DCHECK(result); | 132 DCHECK(result); |
189 DCHECK(!highest_allocated_chunk_); | 133 DCHECK(!highest_allocated_chunk_); |
190 } | 134 } |
191 | 135 |
192 // Returns a new instance of DiscardableMemory whose size is greater or equal | 136 // Returns a new instance of DiscardableAshmemChunk whose size is greater or |
193 // than |actual_size| (which is expected to be greater or equal than | 137 // equal than |actual_size| (which is expected to be greater or equal than |
194 // |client_requested_size|). | 138 // |client_requested_size|). |
195 // Allocation works as follows: | 139 // Allocation works as follows: |
196 // 1) Reuse a previously freed chunk and return it if it succeeded. See | 140 // 1) Reuse a previously freed chunk and return it if it succeeded. See |
197 // ReuseFreeChunk_Locked() below for more information. | 141 // ReuseFreeChunk_Locked() below for more information. |
198 // 2) If no free chunk could be reused and the region is not big enough for | 142 // 2) If no free chunk could be reused and the region is not big enough for |
199 // the requested size then NULL is returned. | 143 // the requested size then NULL is returned. |
200 // 3) If there is enough room in the ashmem region then a new chunk is | 144 // 3) If there is enough room in the ashmem region then a new chunk is |
201 // returned. This new chunk starts at |offset_| which is the end of the | 145 // returned. This new chunk starts at |offset_| which is the end of the |
202 // previously highest chunk in the region. | 146 // previously highest chunk in the region. |
203 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | 147 scoped_ptr<DiscardableAshmemChunk> Allocate_Locked( |
204 size_t actual_size) { | 148 size_t client_requested_size, |
| 149 size_t actual_size) { |
205 DCHECK_LE(client_requested_size, actual_size); | 150 DCHECK_LE(client_requested_size, actual_size); |
206 allocator_->lock_.AssertAcquired(); | 151 allocator_->lock_.AssertAcquired(); |
207 | 152 |
208 // Check that the |highest_allocated_chunk_| field doesn't contain a stale | 153 // Check that the |highest_allocated_chunk_| field doesn't contain a stale |
209 // pointer. It should point to either a free chunk or a used chunk. | 154 // pointer. It should point to either a free chunk or a used chunk. |
210 DCHECK(!highest_allocated_chunk_ || | 155 DCHECK(!highest_allocated_chunk_ || |
211 address_to_free_chunk_map_.find(highest_allocated_chunk_) != | 156 address_to_free_chunk_map_.find(highest_allocated_chunk_) != |
212 address_to_free_chunk_map_.end() || | 157 address_to_free_chunk_map_.end() || |
213 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != | 158 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != |
214 used_to_previous_chunk_map_.end()); | 159 used_to_previous_chunk_map_.end()); |
215 | 160 |
216 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | 161 scoped_ptr<DiscardableAshmemChunk> memory = ReuseFreeChunk_Locked( |
217 client_requested_size, actual_size); | 162 client_requested_size, actual_size); |
218 if (memory) | 163 if (memory) |
219 return memory.Pass(); | 164 return memory.Pass(); |
220 | 165 |
221 if (size_ - offset_ < actual_size) { | 166 if (size_ - offset_ < actual_size) { |
222 // This region does not have enough space left to hold the requested size. | 167 // This region does not have enough space left to hold the requested size. |
223 return scoped_ptr<DiscardableMemory>(); | 168 return scoped_ptr<DiscardableAshmemChunk>(); |
224 } | 169 } |
225 | 170 |
226 void* const address = static_cast<char*>(base_) + offset_; | 171 void* const address = static_cast<char*>(base_) + offset_; |
227 memory.reset( | 172 memory.reset( |
228 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | 173 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); |
229 | 174 |
230 used_to_previous_chunk_map_.insert( | 175 used_to_previous_chunk_map_.insert( |
231 std::make_pair(address, highest_allocated_chunk_)); | 176 std::make_pair(address, highest_allocated_chunk_)); |
232 highest_allocated_chunk_ = address; | 177 highest_allocated_chunk_ = address; |
233 offset_ += actual_size; | 178 offset_ += actual_size; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 | 211 |
267 bool operator<(const FreeChunk& other) const { | 212 bool operator<(const FreeChunk& other) const { |
268 return size < other.size; | 213 return size < other.size; |
269 } | 214 } |
270 }; | 215 }; |
271 | 216 |
272 // Note that |allocator| must outlive |this|. | 217 // Note that |allocator| must outlive |this|. |
273 AshmemRegion(int fd, | 218 AshmemRegion(int fd, |
274 size_t size, | 219 size_t size, |
275 void* base, | 220 void* base, |
276 DiscardableMemoryAllocator* allocator) | 221 DiscardableMemoryAshmemAllocator* allocator) |
277 : fd_(fd), | 222 : fd_(fd), |
278 size_(size), | 223 size_(size), |
279 base_(base), | 224 base_(base), |
280 allocator_(allocator), | 225 allocator_(allocator), |
281 highest_allocated_chunk_(NULL), | 226 highest_allocated_chunk_(NULL), |
282 offset_(0) { | 227 offset_(0) { |
283 DCHECK_GE(fd_, 0); | 228 DCHECK_GE(fd_, 0); |
284 DCHECK_GE(size, kMinAshmemRegionSize); | 229 DCHECK_GE(size, kMinAshmemRegionSize); |
285 DCHECK(base); | 230 DCHECK(base); |
286 DCHECK(allocator); | 231 DCHECK(allocator); |
287 } | 232 } |
288 | 233 |
289 // Tries to reuse a previously freed chunk by doing a closest size match. | 234 // Tries to reuse a previously freed chunk by doing a closest size match. |
290 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | 235 scoped_ptr<DiscardableAshmemChunk> ReuseFreeChunk_Locked( |
291 size_t client_requested_size, | 236 size_t client_requested_size, |
292 size_t actual_size) { | 237 size_t actual_size) { |
293 allocator_->lock_.AssertAcquired(); | 238 allocator_->lock_.AssertAcquired(); |
294 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | 239 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( |
295 free_chunks_.lower_bound(FreeChunk(actual_size))); | 240 free_chunks_.lower_bound(FreeChunk(actual_size))); |
296 if (reused_chunk.is_null()) | 241 if (reused_chunk.is_null()) |
297 return scoped_ptr<DiscardableMemory>(); | 242 return scoped_ptr<DiscardableAshmemChunk>(); |
298 | 243 |
299 used_to_previous_chunk_map_.insert( | 244 used_to_previous_chunk_map_.insert( |
300 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | 245 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); |
301 size_t reused_chunk_size = reused_chunk.size; | 246 size_t reused_chunk_size = reused_chunk.size; |
302 // |client_requested_size| is used below rather than |actual_size| to | 247 // |client_requested_size| is used below rather than |actual_size| to |
303 // reflect the amount of bytes that would not be usable by the client (i.e. | 248 // reflect the amount of bytes that would not be usable by the client (i.e. |
304 // wasted). Using |actual_size| instead would not allow us to detect | 249 // wasted). Using |actual_size| instead would not allow us to detect |
305 // fragmentation caused by the client if he did misaligned allocations. | 250 // fragmentation caused by the client if he did misaligned allocations. |
306 DCHECK_GE(reused_chunk.size, client_requested_size); | 251 DCHECK_GE(reused_chunk.size, client_requested_size); |
307 const size_t fragmentation_bytes = | 252 const size_t fragmentation_bytes = |
(...skipping 15 matching lines...) Expand all Loading... |
323 const size_t new_chunk_size = reused_chunk.size - actual_size; | 268 const size_t new_chunk_size = reused_chunk.size - actual_size; |
324 // Note that merging is not needed here since there can't be contiguous | 269 // Note that merging is not needed here since there can't be contiguous |
325 // free chunks at this point. | 270 // free chunks at this point. |
326 AddFreeChunk_Locked( | 271 AddFreeChunk_Locked( |
327 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | 272 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); |
328 } | 273 } |
329 | 274 |
330 const size_t offset = | 275 const size_t offset = |
331 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | 276 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); |
332 LockAshmemRegion(fd_, offset, reused_chunk_size); | 277 LockAshmemRegion(fd_, offset, reused_chunk_size); |
333 scoped_ptr<DiscardableMemory> memory( | 278 scoped_ptr<DiscardableAshmemChunk> memory( |
334 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | 279 new DiscardableAshmemChunk( |
335 reused_chunk_size)); | 280 this, fd_, reused_chunk.start, offset, reused_chunk_size)); |
336 return memory.Pass(); | 281 return memory.Pass(); |
337 } | 282 } |
338 | 283 |
339 // Makes the chunk identified with the provided arguments free and possibly | 284 // Makes the chunk identified with the provided arguments free and possibly |
340 // merges this chunk with the previous and next contiguous ones. | 285 // merges this chunk with the previous and next contiguous ones. |
341 // If the provided chunk is the only one used (and going to be freed) in the | 286 // If the provided chunk is the only one used (and going to be freed) in the |
342 // region then the internal ashmem region is closed so that the underlying | 287 // region then the internal ashmem region is closed so that the underlying |
343 // physical pages are immediately released. | 288 // physical pages are immediately released. |
344 // Note that free chunks are unlocked therefore they can be reclaimed by the | 289 // Note that free chunks are unlocked therefore they can be reclaimed by the |
345 // kernel if needed (under memory pressure) but they are not immediately | 290 // kernel if needed (under memory pressure) but they are not immediately |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
442 DCHECK(free_chunk_it != free_chunks_.end()); | 387 DCHECK(free_chunk_it != free_chunks_.end()); |
443 const FreeChunk free_chunk(*free_chunk_it); | 388 const FreeChunk free_chunk(*free_chunk_it); |
444 address_to_free_chunk_map_.erase(free_chunk_it->start); | 389 address_to_free_chunk_map_.erase(free_chunk_it->start); |
445 free_chunks_.erase(free_chunk_it); | 390 free_chunks_.erase(free_chunk_it); |
446 return free_chunk; | 391 return free_chunk; |
447 } | 392 } |
448 | 393 |
449 const int fd_; | 394 const int fd_; |
450 const size_t size_; | 395 const size_t size_; |
451 void* const base_; | 396 void* const base_; |
452 DiscardableMemoryAllocator* const allocator_; | 397 DiscardableMemoryAshmemAllocator* const allocator_; |
453 // Points to the chunk with the highest address in the region. This pointer | 398 // Points to the chunk with the highest address in the region. This pointer |
454 // needs to be carefully updated when chunks are merged/split. | 399 // needs to be carefully updated when chunks are merged/split. |
455 void* highest_allocated_chunk_; | 400 void* highest_allocated_chunk_; |
456 // Points to the end of |highest_allocated_chunk_|. | 401 // Points to the end of |highest_allocated_chunk_|. |
457 size_t offset_; | 402 size_t offset_; |
458 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | 403 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). |
459 // Note that FreeChunk values are indexed by their size and also note that | 404 // Note that FreeChunk values are indexed by their size and also note that |
460 // multiple free chunks can have the same size (which is why multiset<> is | 405 // multiple free chunks can have the same size (which is why multiset<> is |
461 // used instead of e.g. set<>). | 406 // used instead of e.g. set<>). |
462 std::multiset<FreeChunk> free_chunks_; | 407 std::multiset<FreeChunk> free_chunks_; |
463 // Used while merging free contiguous chunks to erase free chunks (from their | 408 // Used while merging free contiguous chunks to erase free chunks (from their |
464 // start address) in constant time. Note that multiset<>::{insert,erase}() | 409 // start address) in constant time. Note that multiset<>::{insert,erase}() |
465 // don't invalidate iterators (except the one for the element being removed | 410 // don't invalidate iterators (except the one for the element being removed |
466 // obviously). | 411 // obviously). |
467 hash_map< | 412 hash_map< |
468 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | 413 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; |
469 // Maps the address of *used* chunks to the address of their previous | 414 // Maps the address of *used* chunks to the address of their previous |
470 // contiguous chunk. | 415 // contiguous chunk. |
471 hash_map<void*, void*> used_to_previous_chunk_map_; | 416 hash_map<void*, void*> used_to_previous_chunk_map_; |
472 | 417 |
473 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | 418 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
474 }; | 419 }; |
475 | 420 |
476 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | 421 DiscardableAshmemChunk::~DiscardableAshmemChunk() { |
477 if (locked_) | 422 if (locked_) |
478 UnlockAshmemRegion(fd_, offset_, size_); | 423 UnlockAshmemRegion(fd_, offset_, size_); |
479 ashmem_region_->OnChunkDeletion(address_, size_); | 424 ashmem_region_->OnChunkDeletion(address_, size_); |
480 } | 425 } |
481 | 426 |
482 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | 427 bool DiscardableAshmemChunk::Lock() { |
| 428 DCHECK(!locked_); |
| 429 locked_ = true; |
| 430 return LockAshmemRegion(fd_, offset_, size_); |
| 431 } |
| 432 |
| 433 void DiscardableAshmemChunk::Unlock() { |
| 434 DCHECK(locked_); |
| 435 locked_ = false; |
| 436 UnlockAshmemRegion(fd_, offset_, size_); |
| 437 } |
| 438 |
| 439 void* DiscardableAshmemChunk::Memory() const { |
| 440 return address_; |
| 441 } |
| 442 |
| 443 // Note that |ashmem_region| must outlive |this|. |
| 444 DiscardableAshmemChunk::DiscardableAshmemChunk(AshmemRegion* ashmem_region, |
| 445 int fd, |
| 446 void* address, |
| 447 size_t offset, |
| 448 size_t size) |
| 449 : ashmem_region_(ashmem_region), |
| 450 fd_(fd), |
| 451 address_(address), |
| 452 offset_(offset), |
| 453 size_(size), |
| 454 locked_(true) { |
| 455 } |
| 456 |
| 457 DiscardableMemoryAshmemAllocator::DiscardableMemoryAshmemAllocator( |
483 const std::string& name, | 458 const std::string& name, |
484 size_t ashmem_region_size) | 459 size_t ashmem_region_size) |
485 : name_(name), | 460 : name_(name), |
486 ashmem_region_size_( | 461 ashmem_region_size_( |
487 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | 462 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), |
488 last_ashmem_region_size_(0) { | 463 last_ashmem_region_size_(0) { |
489 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | 464 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); |
490 } | 465 } |
491 | 466 |
492 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | 467 DiscardableMemoryAshmemAllocator::~DiscardableMemoryAshmemAllocator() { |
493 DCHECK(thread_checker_.CalledOnValidThread()); | |
494 DCHECK(ashmem_regions_.empty()); | 468 DCHECK(ashmem_regions_.empty()); |
495 } | 469 } |
496 | 470 |
497 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | 471 scoped_ptr<DiscardableAshmemChunk> DiscardableMemoryAshmemAllocator::Allocate( |
498 size_t size) { | 472 size_t size) { |
499 const size_t aligned_size = AlignToNextPage(size); | 473 const size_t aligned_size = AlignToNextPage(size); |
500 if (!aligned_size) | 474 if (!aligned_size) |
501 return scoped_ptr<DiscardableMemory>(); | 475 return scoped_ptr<DiscardableAshmemChunk>(); |
502 // TODO(pliard): make this function less naive by e.g. moving the free chunks | 476 // TODO(pliard): make this function less naive by e.g. moving the free chunks |
503 // multiset to the allocator itself in order to decrease even more | 477 // multiset to the allocator itself in order to decrease even more |
504 // fragmentation/speedup allocation. Note that there should not be more than a | 478 // fragmentation/speedup allocation. Note that there should not be more than a |
505 // couple (=5) of AshmemRegion instances in practice though. | 479 // couple (=5) of AshmemRegion instances in practice though. |
506 AutoLock auto_lock(lock_); | 480 AutoLock auto_lock(lock_); |
507 DCHECK_LE(ashmem_regions_.size(), 5U); | 481 DCHECK_LE(ashmem_regions_.size(), 5U); |
508 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | 482 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
509 it != ashmem_regions_.end(); ++it) { | 483 it != ashmem_regions_.end(); ++it) { |
510 scoped_ptr<DiscardableMemory> memory( | 484 scoped_ptr<DiscardableAshmemChunk> memory( |
511 (*it)->Allocate_Locked(size, aligned_size)); | 485 (*it)->Allocate_Locked(size, aligned_size)); |
512 if (memory) | 486 if (memory) |
513 return memory.Pass(); | 487 return memory.Pass(); |
514 } | 488 } |
515 // The creation of the (large) ashmem region might fail if the address space | 489 // The creation of the (large) ashmem region might fail if the address space |
516 // is too fragmented. In case creation fails the allocator retries by | 490 // is too fragmented. In case creation fails the allocator retries by |
517 // repetitively dividing the size by 2. | 491 // repetitively dividing the size by 2. |
518 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | 492 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); |
519 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | 493 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); |
520 region_size >= min_region_size; | 494 region_size >= min_region_size; |
521 region_size = AlignToNextPage(region_size / 2)) { | 495 region_size = AlignToNextPage(region_size / 2)) { |
522 scoped_ptr<AshmemRegion> new_region( | 496 scoped_ptr<AshmemRegion> new_region( |
523 AshmemRegion::Create(region_size, name_.c_str(), this)); | 497 AshmemRegion::Create(region_size, name_.c_str(), this)); |
524 if (!new_region) | 498 if (!new_region) |
525 continue; | 499 continue; |
526 last_ashmem_region_size_ = region_size; | 500 last_ashmem_region_size_ = region_size; |
527 ashmem_regions_.push_back(new_region.release()); | 501 ashmem_regions_.push_back(new_region.release()); |
528 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | 502 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); |
529 } | 503 } |
530 // TODO(pliard): consider adding an histogram to see how often this happens. | 504 // TODO(pliard): consider adding an histogram to see how often this happens. |
531 return scoped_ptr<DiscardableMemory>(); | 505 return scoped_ptr<DiscardableAshmemChunk>(); |
532 } | 506 } |
533 | 507 |
534 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { | 508 size_t DiscardableMemoryAshmemAllocator::last_ashmem_region_size() const { |
535 AutoLock auto_lock(lock_); | 509 AutoLock auto_lock(lock_); |
536 return last_ashmem_region_size_; | 510 return last_ashmem_region_size_; |
537 } | 511 } |
538 | 512 |
539 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | 513 void DiscardableMemoryAshmemAllocator::DeleteAshmemRegion_Locked( |
540 AshmemRegion* region) { | 514 AshmemRegion* region) { |
541 lock_.AssertAcquired(); | 515 lock_.AssertAcquired(); |
542 // Note that there should not be more than a couple of ashmem region instances | 516 // Note that there should not be more than a couple of ashmem region instances |
543 // in |ashmem_regions_|. | 517 // in |ashmem_regions_|. |
544 DCHECK_LE(ashmem_regions_.size(), 5U); | 518 DCHECK_LE(ashmem_regions_.size(), 5U); |
545 const ScopedVector<AshmemRegion>::iterator it = std::find( | 519 const ScopedVector<AshmemRegion>::iterator it = std::find( |
546 ashmem_regions_.begin(), ashmem_regions_.end(), region); | 520 ashmem_regions_.begin(), ashmem_regions_.end(), region); |
547 DCHECK_NE(ashmem_regions_.end(), it); | 521 DCHECK_NE(ashmem_regions_.end(), it); |
548 std::swap(*it, ashmem_regions_.back()); | 522 std::swap(*it, ashmem_regions_.back()); |
549 ashmem_regions_.pop_back(); | 523 ashmem_regions_.pop_back(); |
550 } | 524 } |
551 | 525 |
552 } // namespace internal | 526 } // namespace internal |
553 } // namespace base | 527 } // namespace base |
OLD | NEW |