OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
130 | 130 |
131 void raninit(ranctx* x, u4 seed) { | 131 void raninit(ranctx* x, u4 seed) { |
132 u4 i; | 132 u4 i; |
133 x->a = 0xf1ea5eed; | 133 x->a = 0xf1ea5eed; |
134 x->b = x->c = x->d = seed; | 134 x->b = x->c = x->d = seed; |
135 for (i = 0; i < 20; ++i) { | 135 for (i = 0; i < 20; ++i) { |
136 (void) ranval(x); | 136 (void) ranval(x); |
137 } | 137 } |
138 } | 138 } |
139 | 139 |
| 140 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it |
| 141 // will simply ignore it. So we give a hint that has a good chance of |
| 142 // working. |
| 143 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, |
| 144 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We |
| 145 // should make allocations that are below this area, which would be |
| 146 // 0x7ffbf8000000. |
| 147 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the |
| 148 // address space. In the unlikely case where fragmentation would become an |
| 149 // issue, the kernel will still have another half to use. |
| 150 const uint64_t kRandomAddressMask = 0x3ffffffff000ULL; |
| 151 |
140 #endif // defined(ASLR_IS_SUPPORTED) | 152 #endif // defined(ASLR_IS_SUPPORTED) |
141 | 153 |
142 // Give a random "hint" that is suitable for use with mmap(). This cannot make | 154 // Give a random "hint" that is suitable for use with mmap(). This cannot make |
143 // mmap fail, as the kernel will simply not follow the hint if it can't. | 155 // mmap fail, as the kernel will simply not follow the hint if it can't. |
144 // However, this will create address space fragmentation. Currently, we only | 156 // However, this will create address space fragmentation. Currently, we only |
145 // implement it on x86_64, where we have a 47 bits userland address space and | 157 // implement it on x86_64, where we have a 47 bits userland address space and |
146 // fragmentation is not an issue. | 158 // fragmentation is not an issue. |
147 void* GetRandomAddrHint() { | 159 void* GetRandomAddrHint() { |
148 #if !defined(ASLR_IS_SUPPORTED) | 160 #if !defined(ASLR_IS_SUPPORTED) |
149 return NULL; | 161 return NULL; |
(...skipping 20 matching lines...) Expand all Loading... |
170 ssize_t len; | 182 ssize_t len; |
171 len = read(urandom_fd, &seed, sizeof(seed)); | 183 len = read(urandom_fd, &seed, sizeof(seed)); |
172 ASSERT(len == sizeof(seed)); | 184 ASSERT(len == sizeof(seed)); |
173 int ret = close(urandom_fd); | 185 int ret = close(urandom_fd); |
174 ASSERT(ret == 0); | 186 ASSERT(ret == 0); |
175 } | 187 } |
176 raninit(&ctx, seed); | 188 raninit(&ctx, seed); |
177 } | 189 } |
178 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | | 190 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | |
179 ranval(&ctx); | 191 ranval(&ctx); |
180 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
181 // will simply ignore it. So we give a hint that has a good chance of | |
182 // working. | |
183 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
184 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
185 // should make allocations that are below this area, which would be | |
186 // 0x7ffbf8000000. | |
187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
188 // address space. In the unlikely case where fragmentation would become an | |
189 // issue, the kernel will still have another half to use. | |
190 // A a bit-wise "and" won't bias our random distribution. | 192 // A a bit-wise "and" won't bias our random distribution. |
191 random_address &= 0x3ffffffff000ULL; | 193 random_address &= kRandomAddressMask; |
192 return reinterpret_cast<void*>(random_address); | 194 return reinterpret_cast<void*>(random_address); |
193 #endif // ASLR_IS_SUPPORTED | 195 #endif // ASLR_IS_SUPPORTED |
194 } | 196 } |
195 | 197 |
| 198 // Allocate |length| bytes of memory using mmap(). The memory will be |
| 199 // readable and writeable, but not executable. |
| 200 // Like mmap(), we will return MAP_FAILED on failure. |
| 201 // |is_aslr_enabled| controls address space layout randomization. When true, we |
| 202 // will put the first mapping at a random address and will then try to grow it. |
| 203 // If it's not possible to grow an existing mapping, a new one will be created. |
| 204 void* AllocWithMmap(size_t length, bool is_aslr_enabled) { |
| 205 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. |
| 206 static void* address_hint = NULL; |
| 207 #if defined(ASLR_IS_SUPPORTED) |
| 208 if (is_aslr_enabled && |
| 209 (!address_hint || |
| 210 reinterpret_cast<uint64_t>(address_hint) & ~kRandomAddressMask)) { |
| 211 address_hint = GetRandomAddrHint(); |
| 212 } |
| 213 #endif // ASLR_IS_SUPPORTED |
| 214 |
| 215 // address_hint is likely to make us grow an existing mapping. |
| 216 void* result = mmap(address_hint, length, PROT_READ|PROT_WRITE, |
| 217 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
| 218 #if defined(ASLR_IS_SUPPORTED) |
| 219 if (result == address_hint) { |
| 220 // If mmap() succeeded at a address_hint, our next mmap() will try to grow |
| 221 // the current mapping as long as it's compatible with our ASLR mask. |
| 222 // This has been done for performance reasons, see crbug.com/173371. |
| 223 // It should be possible to strike a better balance between performance |
| 224 // and security but will be done at a later date. |
| 225 // If this overflows, it could only set address_hint to NULL, which is |
| 226 // what we want (and can't happen on the currently supported architecture). |
| 227 address_hint = static_cast<char*>(result) + length; |
| 228 } else { |
| 229 // mmap failed or a collision prevented the kernel from honoring the hint, |
| 230 // reset the hint. |
| 231 address_hint = NULL; |
| 232 } |
| 233 #endif // ASLR_IS_SUPPORTED |
| 234 return result; |
| 235 } |
| 236 |
196 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 237 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
197 | 238 |
198 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 239 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
199 address_bits_larger_than_pointer_size); | 240 address_bits_larger_than_pointer_size); |
200 | 241 |
201 // Structure for discovering alignment | 242 // Structure for discovering alignment |
202 union MemoryAligner { | 243 union MemoryAligner { |
203 void* p; | 244 void* p; |
204 double d; | 245 double d; |
205 size_t s; | 246 size_t s; |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
398 // Ask for extra memory if alignment > pagesize | 439 // Ask for extra memory if alignment > pagesize |
399 size_t extra = 0; | 440 size_t extra = 0; |
400 if (alignment > pagesize) { | 441 if (alignment > pagesize) { |
401 extra = alignment - pagesize; | 442 extra = alignment - pagesize; |
402 } | 443 } |
403 | 444 |
404 // Note: size + extra does not overflow since: | 445 // Note: size + extra does not overflow since: |
405 // size + alignment < (1<<NBITS). | 446 // size + alignment < (1<<NBITS). |
406 // and extra <= alignment | 447 // and extra <= alignment |
407 // therefore size + extra < (1<<NBITS) | 448 // therefore size + extra < (1<<NBITS) |
408 void* address_hint = NULL; | 449 void* result = AllocWithMmap(size + extra, FLAGS_malloc_random_allocator); |
409 if (FLAGS_malloc_random_allocator) { | |
410 address_hint = GetRandomAddrHint(); | |
411 } | |
412 void* result = mmap(address_hint, size + extra, | |
413 PROT_READ|PROT_WRITE, | |
414 MAP_PRIVATE|MAP_ANONYMOUS, | |
415 -1, 0); | |
416 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 450 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
417 return NULL; | 451 return NULL; |
418 } | 452 } |
419 | 453 |
420 // Adjust the return memory so it is aligned | 454 // Adjust the return memory so it is aligned |
421 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 455 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
422 size_t adjust = 0; | 456 size_t adjust = 0; |
423 if ((ptr & (alignment - 1)) != 0) { | 457 if ((ptr & (alignment - 1)) != 0) { |
424 adjust = alignment - (ptr & (alignment - 1)); | 458 adjust = alignment - (ptr & (alignment - 1)); |
425 } | 459 } |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
652 } | 686 } |
653 } | 687 } |
654 #endif | 688 #endif |
655 } | 689 } |
656 | 690 |
657 void TCMalloc_SystemCommit(void* start, size_t length) { | 691 void TCMalloc_SystemCommit(void* start, size_t length) { |
658 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 692 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
659 // such that they need to be re-committed before they can be used by the | 693 // such that they need to be re-committed before they can be used by the |
660 // application. | 694 // application. |
661 } | 695 } |
OLD | NEW |