Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 186 // 0x7ffbf8000000. | 186 // 0x7ffbf8000000. |
| 187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | 187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the |
| 188 // address space. In the unlikely case where fragmentation would become an | 188 // address space. In the unlikely case where fragmentation would become an |
| 189 // issue, the kernel will still have another half to use. | 189 // issue, the kernel will still have another half to use. |
| 190 // A a bit-wise "and" won't bias our random distribution. | 190 // A a bit-wise "and" won't bias our random distribution. |
| 191 random_address &= 0x3ffffffff000ULL; | 191 random_address &= 0x3ffffffff000ULL; |
| 192 return reinterpret_cast<void*>(random_address); | 192 return reinterpret_cast<void*>(random_address); |
| 193 #endif // ASLR_IS_SUPPORTED | 193 #endif // ASLR_IS_SUPPORTED |
| 194 } | 194 } |
| 195 | 195 |
| 196 // Allocate |length| bytes of memory using mmap(). The memory will be | |
| 197 // readable and writeable, but not executable. | |
| 198 // Like mmap(), we will return MAP_FAILED on failure. | |
| 199 // |is_aslr_enabled| controls address space layout randomization. When true, we | |
| 200 // will put the first mapping at a random address and will then try to grow it. | |
| 201 // If it's not possible to grow an existing mapping, a new one will be created. | |
| 202 void* AllocWithMmap(size_t length, bool is_aslr_enabled) { | |
| 203 static void* address_hint = NULL; | |
|
Chris Evans
2013/02/01 09:22:30
What's the threading story here? Is it possible fo
jln (very slow on Chromium)
2013/02/01 09:53:10
Done.
| |
| 204 if (!address_hint && is_aslr_enabled) { | |
| 205 address_hint = GetRandomAddrHint(); | |
| 206 } | |
|
Chris Evans
2013/02/01 09:22:30
Style: don't need braces for single-line if with s
jln (very slow on Chromium)
2013/02/01 09:53:10
The style guide is open on this. I usually always
| |
| 207 void* result = mmap(address_hint, length, PROT_READ|PROT_WRITE, | |
|
Chris Evans
2013/02/01 09:22:30
Maybe some form of comment that the intent here is
jln (very slow on Chromium)
2013/02/01 09:53:10
Isn't the comment below good enough ? Should I mov
| |
| 208 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
| 209 if (result != static_cast<void*>(MAP_FAILED) && is_aslr_enabled) { | |
|
Chris Evans
2013/02/01 09:22:30
I think you can simplify this block to simply
if
jln (very slow on Chromium)
2013/02/01 09:53:10
But is that what we want ? If the goal is to grow
| |
| 210 // If mmap() succeeded at a random address, our next mmap() will try to grow | |
| 211 // the current mapping. | |
| 212 // This has been done for performance reasons, see crbug.com/173371. | |
| 213 // It should be possible to strike a better balance between performance | |
| 214 // and security but will be done at a later date. | |
| 215 address_hint = static_cast<char*>(result) + length; | |
| 216 if (address_hint < result) | |
| 217 address_hint = NULL; | |
| 218 ASSERT((reinterpret_cast<uintptr_t>(address_hint) & 0xfff) == 0); | |
| 219 } | |
| 220 return result; | |
| 221 } | |
| 222 | |
| 196 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 223 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
| 197 | 224 |
| 198 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 225 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
| 199 address_bits_larger_than_pointer_size); | 226 address_bits_larger_than_pointer_size); |
| 200 | 227 |
| 201 // Structure for discovering alignment | 228 // Structure for discovering alignment |
| 202 union MemoryAligner { | 229 union MemoryAligner { |
| 203 void* p; | 230 void* p; |
| 204 double d; | 231 double d; |
| 205 size_t s; | 232 size_t s; |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 398 // Ask for extra memory if alignment > pagesize | 425 // Ask for extra memory if alignment > pagesize |
| 399 size_t extra = 0; | 426 size_t extra = 0; |
| 400 if (alignment > pagesize) { | 427 if (alignment > pagesize) { |
| 401 extra = alignment - pagesize; | 428 extra = alignment - pagesize; |
| 402 } | 429 } |
| 403 | 430 |
| 404 // Note: size + extra does not overflow since: | 431 // Note: size + extra does not overflow since: |
| 405 // size + alignment < (1<<NBITS). | 432 // size + alignment < (1<<NBITS). |
| 406 // and extra <= alignment | 433 // and extra <= alignment |
| 407 // therefore size + extra < (1<<NBITS) | 434 // therefore size + extra < (1<<NBITS) |
| 408 void* address_hint = NULL; | 435 void* result = AllocWithMmap(size + extra, FLAGS_malloc_random_allocator); |
| 409 if (FLAGS_malloc_random_allocator) { | |
| 410 address_hint = GetRandomAddrHint(); | |
| 411 } | |
| 412 void* result = mmap(address_hint, size + extra, | |
| 413 PROT_READ|PROT_WRITE, | |
| 414 MAP_PRIVATE|MAP_ANONYMOUS, | |
| 415 -1, 0); | |
| 416 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 436 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
| 417 return NULL; | 437 return NULL; |
| 418 } | 438 } |
| 419 | 439 |
| 420 // Adjust the return memory so it is aligned | 440 // Adjust the return memory so it is aligned |
| 421 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 441 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
| 422 size_t adjust = 0; | 442 size_t adjust = 0; |
| 423 if ((ptr & (alignment - 1)) != 0) { | 443 if ((ptr & (alignment - 1)) != 0) { |
| 424 adjust = alignment - (ptr & (alignment - 1)); | 444 adjust = alignment - (ptr & (alignment - 1)); |
| 425 } | 445 } |
| (...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 652 } | 672 } |
| 653 } | 673 } |
| 654 #endif | 674 #endif |
| 655 } | 675 } |
| 656 | 676 |
| 657 void TCMalloc_SystemCommit(void* start, size_t length) { | 677 void TCMalloc_SystemCommit(void* start, size_t length) { |
| 658 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 678 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
| 659 // such that they need to be re-committed before they can be used by the | 679 // such that they need to be re-committed before they can be used by the |
| 660 // application. | 680 // application. |
| 661 } | 681 } |
| OLD | NEW |