OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
130 | 130 |
131 void raninit(ranctx* x, u4 seed) { | 131 void raninit(ranctx* x, u4 seed) { |
132 u4 i; | 132 u4 i; |
133 x->a = 0xf1ea5eed; | 133 x->a = 0xf1ea5eed; |
134 x->b = x->c = x->d = seed; | 134 x->b = x->c = x->d = seed; |
135 for (i = 0; i < 20; ++i) { | 135 for (i = 0; i < 20; ++i) { |
136 (void) ranval(x); | 136 (void) ranval(x); |
137 } | 137 } |
138 } | 138 } |
139 | 139 |
140 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
141 // will simply ignore it. So we give a hint that has a good chance of | |
142 // working. | |
143 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
144 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
145 // should make allocations that are below this area, which would be | |
146 // 0x7ffbf8000000. | |
jar (doing other things)
2013/02/04 18:34:08
Question: In a 64 address space, why do they only
jln (very slow on Chromium)
2013/02/04 19:23:04
It's an architectural limitation. It saves transis
| |
147 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
148 // address space. In the unlikely case where fragmentation would become an | |
149 // issue, the kernel will still have another half to use. | |
150 const uint64_t kRandomAddressMask = 0x3ffffffff000ULL; | |
151 | |
140 #endif // defined(ASLR_IS_SUPPORTED) | 152 #endif // defined(ASLR_IS_SUPPORTED) |
141 | 153 |
142 // Give a random "hint" that is suitable for use with mmap(). This cannot make | 154 // Give a random "hint" that is suitable for use with mmap(). This cannot make |
143 // mmap fail, as the kernel will simply not follow the hint if it can't. | 155 // mmap fail, as the kernel will simply not follow the hint if it can't. |
144 // However, this will create address space fragmentation. Currently, we only | 156 // However, this will create address space fragmentation. Currently, we only |
145 // implement it on x86_64, where we have a 47 bits userland address space and | 157 // implement it on x86_64, where we have a 47 bits userland address space and |
146 // fragmentation is not an issue. | 158 // fragmentation is not an issue. |
147 void* GetRandomAddrHint() { | 159 void* GetRandomAddrHint() { |
148 #if !defined(ASLR_IS_SUPPORTED) | 160 #if !defined(ASLR_IS_SUPPORTED) |
149 return NULL; | 161 return NULL; |
(...skipping 20 matching lines...) Expand all Loading... | |
170 ssize_t len; | 182 ssize_t len; |
171 len = read(urandom_fd, &seed, sizeof(seed)); | 183 len = read(urandom_fd, &seed, sizeof(seed)); |
172 ASSERT(len == sizeof(seed)); | 184 ASSERT(len == sizeof(seed)); |
173 int ret = close(urandom_fd); | 185 int ret = close(urandom_fd); |
174 ASSERT(ret == 0); | 186 ASSERT(ret == 0); |
175 } | 187 } |
176 raninit(&ctx, seed); | 188 raninit(&ctx, seed); |
177 } | 189 } |
178 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | | 190 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | |
179 ranval(&ctx); | 191 ranval(&ctx); |
180 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
181 // will simply ignore it. So we give a hint that has a good chance of | |
182 // working. | |
183 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
184 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
185 // should make allocations that are below this area, which would be | |
186 // 0x7ffbf8000000. | |
187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
188 // address space. In the unlikely case where fragmentation would become an | |
189 // issue, the kernel will still have another half to use. | |
190 // A a bit-wise "and" won't bias our random distribution. | 192 // A a bit-wise "and" won't bias our random distribution. |
191 random_address &= 0x3ffffffff000ULL; | 193 random_address &= kRandomAddressMask; |
192 return reinterpret_cast<void*>(random_address); | 194 return reinterpret_cast<void*>(random_address); |
193 #endif // ASLR_IS_SUPPORTED | 195 #endif // ASLR_IS_SUPPORTED |
194 } | 196 } |
195 | 197 |
198 // Allocate |length| bytes of memory using mmap(). The memory will be | |
199 // readable and writeable, but not executable. | |
200 // Like mmap(), we will return MAP_FAILED on failure. | |
201 // |is_aslr_enabled| controls address space layout randomization. When true, we | |
202 // will put the first mapping at a random address and will then try to grow it. | |
203 // If it's not possible to grow an existing mapping, a new one will be created. | |
204 void* AllocWithMmap(size_t length, bool is_aslr_enabled) { | |
205 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. | |
206 static void* address_hint = NULL; | |
207 if (is_aslr_enabled && | |
208 (!address_hint || | |
209 reinterpret_cast<uint64_t>(address_hint) & ~kRandomAddressMask)) { | |
jln (very slow on Chromium)
2013/02/01 23:16:30
Another way to look at this (which may help convin
| |
210 address_hint = GetRandomAddrHint(); | |
211 } | |
212 | |
213 // address_hint is likely to make us grow an existing mapping. | |
214 void* result = mmap(address_hint, length, PROT_READ|PROT_WRITE, | |
215 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
216 if (result == address_hint) { | |
217 // If mmap() succeeded at a address_hint, our next mmap() will try to grow | |
218 // the current mapping as long as it's compatible with our ASLR mask. | |
219 // This has been done for performance reasons, see crbug.com/173371. | |
220 // It should be possible to strike a better balance between performance | |
221 // and security but will be done at a later date. | |
222 // If this overflows, it could only set address_hint to NULL, which is | |
223 // what we want (and can't happen on the currently supported architecture). | |
224 address_hint = static_cast<char*>(result) + length; | |
225 } else { | |
226 // mmap failed or a collision prevented the kernel from honoring the hint, | |
227 // reset the hint. | |
228 address_hint = NULL; | |
229 } | |
230 return result; | |
231 } | |
232 | |
196 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 233 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
197 | 234 |
198 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 235 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
199 address_bits_larger_than_pointer_size); | 236 address_bits_larger_than_pointer_size); |
200 | 237 |
201 // Structure for discovering alignment | 238 // Structure for discovering alignment |
202 union MemoryAligner { | 239 union MemoryAligner { |
203 void* p; | 240 void* p; |
204 double d; | 241 double d; |
205 size_t s; | 242 size_t s; |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
398 // Ask for extra memory if alignment > pagesize | 435 // Ask for extra memory if alignment > pagesize |
399 size_t extra = 0; | 436 size_t extra = 0; |
400 if (alignment > pagesize) { | 437 if (alignment > pagesize) { |
401 extra = alignment - pagesize; | 438 extra = alignment - pagesize; |
402 } | 439 } |
403 | 440 |
404 // Note: size + extra does not overflow since: | 441 // Note: size + extra does not overflow since: |
405 // size + alignment < (1<<NBITS). | 442 // size + alignment < (1<<NBITS). |
406 // and extra <= alignment | 443 // and extra <= alignment |
407 // therefore size + extra < (1<<NBITS) | 444 // therefore size + extra < (1<<NBITS) |
408 void* address_hint = NULL; | 445 void* result = AllocWithMmap(size + extra, FLAGS_malloc_random_allocator); |
409 if (FLAGS_malloc_random_allocator) { | |
410 address_hint = GetRandomAddrHint(); | |
411 } | |
412 void* result = mmap(address_hint, size + extra, | |
413 PROT_READ|PROT_WRITE, | |
414 MAP_PRIVATE|MAP_ANONYMOUS, | |
415 -1, 0); | |
416 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 446 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
417 return NULL; | 447 return NULL; |
418 } | 448 } |
419 | 449 |
420 // Adjust the return memory so it is aligned | 450 // Adjust the return memory so it is aligned |
421 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 451 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
422 size_t adjust = 0; | 452 size_t adjust = 0; |
423 if ((ptr & (alignment - 1)) != 0) { | 453 if ((ptr & (alignment - 1)) != 0) { |
424 adjust = alignment - (ptr & (alignment - 1)); | 454 adjust = alignment - (ptr & (alignment - 1)); |
425 } | 455 } |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
652 } | 682 } |
653 } | 683 } |
654 #endif | 684 #endif |
655 } | 685 } |
656 | 686 |
657 void TCMalloc_SystemCommit(void* start, size_t length) { | 687 void TCMalloc_SystemCommit(void* start, size_t length) { |
658 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 688 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
659 // such that they need to be re-committed before they can be used by the | 689 // such that they need to be re-committed before they can be used by the |
660 // application. | 690 // application. |
661 } | 691 } |
OLD | NEW |