OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_allocator_android.h" | |
6 | |
7 #include <sys/types.h> | |
8 #include <unistd.h> | |
9 | |
10 #include "base/memory/discardable_memory.h" | |
11 #include "base/memory/scoped_ptr.h" | |
12 #include "base/strings/string_number_conversions.h" | |
13 #include "base/strings/string_split.h" | |
14 #include "base/strings/stringprintf.h" | |
15 #include "build/build_config.h" | |
16 #include "testing/gtest/include/gtest/gtest.h" | |
17 | |
18 namespace base { | |
19 namespace internal { | |
20 | |
21 const char kAllocatorName[] = "allocator-for-testing"; | |
22 | |
23 const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024; | |
24 const size_t kPageSize = 4096; | |
25 | |
26 const size_t kMaxAllowedAllocationSize = | |
27 std::numeric_limits<size_t>::max() - kPageSize + 1; | |
28 | |
29 class DiscardableMemoryAllocatorTest : public testing::Test { | |
30 protected: | |
31 DiscardableMemoryAllocatorTest() | |
32 : allocator_(kAllocatorName, kAshmemRegionSizeForTesting) { | |
33 } | |
34 | |
35 DiscardableMemoryAllocator allocator_; | |
36 }; | |
37 | |
38 void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) { | |
39 // Write to the first and the last pages only to avoid paging in up to 64 | |
40 // MBytes. | |
41 static_cast<char*>(memory->Memory())[0] = 'a'; | |
42 static_cast<char*>(memory->Memory())[size - 1] = 'a'; | |
43 } | |
44 | |
45 TEST_F(DiscardableMemoryAllocatorTest, Basic) { | |
46 const size_t size = 128; | |
47 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); | |
48 ASSERT_TRUE(memory); | |
49 WriteToDiscardableMemory(memory.get(), size); | |
50 } | |
51 | |
52 TEST_F(DiscardableMemoryAllocatorTest, ZeroAllocationIsNotSupported) { | |
53 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(0)); | |
54 ASSERT_FALSE(memory); | |
55 } | |
56 | |
57 TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) { | |
58 scoped_ptr<DiscardableMemory> memory( | |
59 allocator_.Allocate(kMaxAllowedAllocationSize + 1)); | |
60 // Page-alignment would have caused an overflow resulting in a small | |
61 // allocation if the input size wasn't checked correctly. | |
62 ASSERT_FALSE(memory); | |
63 } | |
64 | |
65 TEST_F(DiscardableMemoryAllocatorTest, | |
66 AshmemRegionsAreNotSmallerThanRequestedSize) { | |
67 // The creation of the underlying ashmem region is expected to fail since | |
68 // there should not be enough room in the address space. When ashmem creation | |
69 // fails, the allocator repetitively retries by dividing the size by 2. This | |
70 // size should not be smaller than the size the user requested so the | |
71 // allocation here should just fail (and not succeed with the minimum ashmem | |
72 // region size). | |
73 scoped_ptr<DiscardableMemory> memory( | |
74 allocator_.Allocate(kMaxAllowedAllocationSize)); | |
75 ASSERT_FALSE(memory); | |
76 } | |
77 | |
78 TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreAlwaysPageAligned) { | |
79 // Use a separate allocator here so that we can override the ashmem region | |
80 // size. | |
81 DiscardableMemoryAllocator allocator( | |
82 kAllocatorName, kMaxAllowedAllocationSize); | |
83 scoped_ptr<DiscardableMemory> memory(allocator.Allocate(kPageSize)); | |
84 ASSERT_TRUE(memory); | |
85 EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size()); | |
86 ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0); | |
87 } | |
88 | |
89 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { | |
90 // Note that large allocations should just use DiscardableMemoryAndroidSimple | |
91 // instead. | |
92 const size_t size = 64 * 1024 * 1024; | |
93 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); | |
94 ASSERT_TRUE(memory); | |
95 WriteToDiscardableMemory(memory.get(), size); | |
96 } | |
97 | |
98 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) { | |
99 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | |
100 ASSERT_TRUE(memory); | |
101 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); | |
102 WriteToDiscardableMemory(memory.get(), kPageSize); | |
103 } | |
104 | |
105 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) { | |
106 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | |
107 // Extra allocation that prevents the region from being deleted when |memory| | |
108 // gets deleted. | |
109 scoped_ptr<DiscardableMemory> memory_lock(allocator_.Allocate(kPageSize)); | |
110 ASSERT_TRUE(memory); | |
111 void* const address = memory->Memory(); | |
112 memory->Unlock(); // Tests that the reused chunk is being locked correctly. | |
113 memory.reset(); | |
114 memory = allocator_.Allocate(kPageSize); | |
115 ASSERT_TRUE(memory); | |
116 // The previously freed chunk should be reused. | |
117 EXPECT_EQ(address, memory->Memory()); | |
118 WriteToDiscardableMemory(memory.get(), kPageSize); | |
119 } | |
120 | |
121 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) { | |
122 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | |
123 ASSERT_TRUE(memory); | |
124 const int kMagic = 0xdeadbeef; | |
125 *static_cast<int*>(memory->Memory()) = kMagic; | |
126 memory.reset(); | |
127 // The previous ashmem region should have been closed thus it should not be | |
128 // reused. | |
129 memory = allocator_.Allocate(kPageSize); | |
130 ASSERT_TRUE(memory); | |
131 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); | |
132 } | |
133 | |
134 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) { | |
135 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize)); | |
136 ASSERT_TRUE(memory1); | |
137 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize)); | |
138 ASSERT_TRUE(memory2); | |
139 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize)); | |
140 ASSERT_TRUE(memory3); | |
141 void* const address_3 = memory3->Memory(); | |
142 memory1.reset(); | |
143 // Don't free |memory2| to avoid merging the 3 blocks together. | |
144 memory3.reset(); | |
145 memory1 = allocator_.Allocate(1 * kPageSize); | |
146 ASSERT_TRUE(memory1); | |
147 // The chunk whose size is closest to the requested size should be reused. | |
148 EXPECT_EQ(address_3, memory1->Memory()); | |
149 WriteToDiscardableMemory(memory1.get(), kPageSize); | |
150 } | |
151 | |
152 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) { | |
153 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); | |
154 ASSERT_TRUE(memory1); | |
155 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize)); | |
156 ASSERT_TRUE(memory2); | |
157 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); | |
158 ASSERT_TRUE(memory3); | |
159 scoped_ptr<DiscardableMemory> memory4(allocator_.Allocate(kPageSize)); | |
160 ASSERT_TRUE(memory4); | |
161 void* const memory1_address = memory1->Memory(); | |
162 memory1.reset(); | |
163 memory3.reset(); | |
164 // Freeing |memory2| (located between memory1 and memory3) should merge the | |
165 // three free blocks together. | |
166 memory2.reset(); | |
167 memory1 = allocator_.Allocate(3 * kPageSize); | |
168 EXPECT_EQ(memory1_address, memory1->Memory()); | |
169 } | |
170 | |
171 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) { | |
172 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | |
173 ASSERT_TRUE(memory1); | |
174 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | |
175 ASSERT_TRUE(memory2); | |
176 void* const memory1_address = memory1->Memory(); | |
177 memory1.reset(); | |
178 memory1 = allocator_.Allocate(2 * kPageSize); | |
179 memory2.reset(); | |
180 // At this point, the region should be in this state: | |
181 // 8 KBytes (used), 24 KBytes (free). | |
182 memory2 = allocator_.Allocate(6 * kPageSize); | |
183 EXPECT_EQ( | |
184 static_cast<const char*>(memory2->Memory()), | |
185 static_cast<const char*>(memory1_address) + 2 * kPageSize); | |
186 } | |
187 | |
188 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) { | |
189 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | |
190 ASSERT_TRUE(memory1); | |
191 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | |
192 ASSERT_TRUE(memory2); | |
193 void* const memory1_address = memory1->Memory(); | |
194 memory1.reset(); | |
195 memory1 = allocator_.Allocate(2 * kPageSize); | |
196 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); | |
197 // At this point, the region should be in this state: | |
198 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | |
199 memory3.reset(); | |
200 memory2.reset(); | |
201 // At this point, the region should be in this state: | |
202 // 8 KBytes (used), 24 KBytes (free). | |
203 memory2 = allocator_.Allocate(6 * kPageSize); | |
204 EXPECT_EQ( | |
205 static_cast<const char*>(memory2->Memory()), | |
206 static_cast<const char*>(memory1_address) + 2 * kPageSize); | |
207 } | |
208 | |
209 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) { | |
210 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | |
211 ASSERT_TRUE(memory1); | |
212 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | |
213 ASSERT_TRUE(memory2); | |
214 memory1.reset(); | |
215 memory1 = allocator_.Allocate(2 * kPageSize); | |
216 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); | |
217 // At this point, the region should be in this state: | |
218 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | |
219 memory1.reset(); | |
220 memory3.reset(); | |
221 // At this point, the region should be in this state: | |
222 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free). | |
223 const int kMagic = 0xdeadbeef; | |
224 *static_cast<int*>(memory2->Memory()) = kMagic; | |
225 memory2.reset(); | |
226 // The whole region should have been deleted. | |
227 memory2 = allocator_.Allocate(2 * kPageSize); | |
228 EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory())); | |
229 } | |
230 | |
231 TEST_F(DiscardableMemoryAllocatorTest, | |
232 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { | |
233 // Keep |memory_1| below allocated so that the ashmem region doesn't get | |
234 // closed when |memory_2| is deleted. | |
235 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024)); | |
236 ASSERT_TRUE(memory_1); | |
237 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024)); | |
238 ASSERT_TRUE(memory_2); | |
239 void* const address = memory_2->Memory(); | |
240 memory_2.reset(); | |
241 const size_t size = 16 * 1024; | |
242 memory_2 = allocator_.Allocate(size); | |
243 ASSERT_TRUE(memory_2); | |
244 EXPECT_EQ(address, memory_2->Memory()); | |
245 WriteToDiscardableMemory(memory_2.get(), size); | |
246 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size)); | |
247 // The unused tail (16 KBytes large) of the previously freed chunk should be | |
248 // reused. | |
249 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory()); | |
250 WriteToDiscardableMemory(memory_3.get(), size); | |
251 } | |
252 | |
253 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) { | |
254 // Leave one page untouched at the end of the ashmem region. | |
255 const size_t size = kAshmemRegionSizeForTesting - kPageSize; | |
256 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size)); | |
257 ASSERT_TRUE(memory1); | |
258 WriteToDiscardableMemory(memory1.get(), size); | |
259 | |
260 scoped_ptr<DiscardableMemory> memory2( | |
261 allocator_.Allocate(kAshmemRegionSizeForTesting)); | |
262 ASSERT_TRUE(memory2); | |
263 WriteToDiscardableMemory(memory2.get(), kAshmemRegionSizeForTesting); | |
264 // The last page of the first ashmem region should be used for this | |
265 // allocation. | |
266 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); | |
267 ASSERT_TRUE(memory3); | |
268 WriteToDiscardableMemory(memory3.get(), kPageSize); | |
269 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size); | |
270 } | |
271 | |
272 TEST_F(DiscardableMemoryAllocatorTest, | |
273 HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) { | |
274 // Prevents the ashmem region from getting closed when |memory2| gets freed. | |
275 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); | |
276 ASSERT_TRUE(memory1); | |
277 | |
278 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | |
279 ASSERT_TRUE(memory2); | |
280 | |
281 memory2.reset(); | |
282 memory2 = allocator_.Allocate(kPageSize); | |
283 // There should now be a free chunk of size 3 * |kPageSize| starting at offset | |
284 // 2 * |kPageSize| and the pointer to the highest allocated chunk should have | |
285 // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to | |
286 // maintain the container mapping a chunk address to its previous chunk and | |
287 // this map is in turn used while merging previous contiguous chunks. | |
288 | |
289 // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 * | |
290 // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead. | |
291 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(4 * kPageSize)); | |
292 ASSERT_TRUE(memory3); | |
293 | |
294 // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge | |
295 // with its previous chunk which is the free chunk of size |3 * kPageSize|. | |
296 memory3.reset(); | |
297 memory3 = allocator_.Allocate((3 + 4) * kPageSize); | |
298 EXPECT_EQ(memory3->Memory(), | |
299 static_cast<const char*>(memory2->Memory()) + kPageSize); | |
300 } | |
301 | |
302 } // namespace internal | |
303 } // namespace base | |
OLD | NEW |