Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 93 // Check that no bit is set at position ADDRESS_BITS or higher. | 93 // Check that no bit is set at position ADDRESS_BITS or higher. |
| 94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { | 94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { |
| 95 return (ptr >> ADDRESS_BITS) == 0; | 95 return (ptr >> ADDRESS_BITS) == 0; |
| 96 } | 96 } |
| 97 | 97 |
| 98 // Specialize for the bit width of a pointer to avoid undefined shift. | 98 // Specialize for the bit width of a pointer to avoid undefined shift. |
| 99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { | 99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { |
| 100 return true; | 100 return true; |
| 101 } | 101 } |
| 102 | 102 |
| 103 // From libdieharder, public domain library by Bob Jenkins (rngav.c). | |
| 104 // Described at http://burtleburtle.net/bob/rand/smallprng.html. | |
| 105 // Not cryptographically secure, but good enough for what we need. | |
| 106 typedef uint32_t u4; | |
| 107 typedef struct ranctx { u4 a; u4 b; u4 c; u4 d; } ranctx; | |
| 108 | |
| 109 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) | |
| 110 | |
| 111 u4 ranval(ranctx* x) { | |
| 112 /* xxx: the generator being tested */ | |
| 113 u4 e = x->a - rot(x->b, 27); | |
| 114 x->a = x->b ^ rot(x->c, 17); | |
| 115 x->b = x->c + x->d; | |
| 116 x->c = x->d + e; | |
| 117 x->d = e + x->a; | |
| 118 return x->d; | |
| 119 } | |
| 120 | |
| 121 void raninit(ranctx* x, u4 seed) { | |
| 122 u4 i; | |
| 123 x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; | |
| 124 for (i=0; i<20; ++i) { | |
| 125 (void)ranval(x); | |
| 126 } | |
| 127 } | |
| 128 | |
| 129 // End PRNG code. | |
|
jar (doing other things)
2013/01/30 03:33:26
nit: all this PRNG code can drop into a if defined
jln (very slow on Chromium)
2013/01/30 03:39:27
Done.
| |
| 130 | |
| 131 #if (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) | |
| 132 #define ASLR_IS_SUPPORTED | |
| 133 #endif | |
| 134 | |
| 135 // Give a random "hint" that is suitable for use with mmap(). This cannot make | |
| 136 // mmap fail, as the kernel will simply not follow the hint if it can't. | |
| 137 // However, this will create address space fragmentation. Currently, we only | |
| 138 // implement it on x86_64, where we have a 47 bits userland address space and | |
| 139 // fragmentation is not an issue. | |
| 140 void* GetRandomAddrHint() { | |
| 141 #if !defined(ASLR_IS_SUPPORTED) | |
| 142 return NULL; | |
| 143 #else | |
| 144 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. Given | |
| 145 // the nature of what we're doing, it wouldn't be critical if we weren't for | |
| 146 // ctx, but it is for the "initialized" variable. | |
| 147 // It's nice to share the state between threads, because scheduling will add | |
| 148 // some randomness to the succession of ranval() calls. | |
| 149 static ranctx ctx; | |
| 150 static bool initialized = false; | |
| 151 if (!initialized) { | |
| 152 initialized = true; | |
| 153 // We really want this to be a stack variable and don't want any compiler | |
| 154 // optimization. We're using its address as a poor-man source of | |
| 155 // randomness. | |
| 156 volatile char c; | |
| 157 // Pre-initialize our seed with a "random" address in case /dev/urandom is | |
| 158 // not available. | |
| 159 uint32_t seed = (reinterpret_cast<uint64_t>(&c) >> 32) ^ | |
| 160 reinterpret_cast<uint64_t>(&c); | |
| 161 int urandom_fd = open("/dev/urandom", O_RDONLY); | |
| 162 if (urandom_fd >= 0) { | |
| 163 ssize_t len; | |
| 164 len = read(urandom_fd, &seed, sizeof(seed)); | |
| 165 ASSERT(len == sizeof(seed)); | |
| 166 int ret = close(urandom_fd); | |
| 167 ASSERT(ret == 0); | |
| 168 } | |
| 169 raninit(&ctx, seed); | |
| 170 } | |
| 171 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | | |
| 172 ranval(&ctx); | |
| 173 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
| 174 // will simply ignore it. So we give a hint that has a good chance of | |
| 175 // working. | |
| 176 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
| 177 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
| 178 // should make allocations that are below this area, which would be | |
| 179 // 0x7ffbf8000000. | |
| 180 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
| 181 // address space. In the unlikely case where fragmentation would become an | |
| 182 // issue, the kernel will still have another half to use. | |
| 183 // A a bit-wise "and" won't bias our random distribution. | |
| 184 random_address &= 0x3ffffffff000ULL; | |
| 185 return reinterpret_cast<void*>(random_address); | |
| 186 #endif // ASLR_IS_SUPPORTED | |
| 187 } | |
| 188 | |
| 103 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 189 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
| 104 | 190 |
| 105 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 191 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
| 106 address_bits_larger_than_pointer_size); | 192 address_bits_larger_than_pointer_size); |
| 107 | 193 |
| 108 // Structure for discovering alignment | 194 // Structure for discovering alignment |
| 109 union MemoryAligner { | 195 union MemoryAligner { |
| 110 void* p; | 196 void* p; |
| 111 double d; | 197 double d; |
| 112 size_t s; | 198 size_t s; |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 132 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), | 218 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), |
| 133 "Physical memory limit location in MB for /dev/mem allocation." | 219 "Physical memory limit location in MB for /dev/mem allocation." |
| 134 " Setting this to 0 means no limit."); | 220 " Setting this to 0 means no limit."); |
| 135 DEFINE_bool(malloc_skip_sbrk, | 221 DEFINE_bool(malloc_skip_sbrk, |
| 136 EnvToBool("TCMALLOC_SKIP_SBRK", false), | 222 EnvToBool("TCMALLOC_SKIP_SBRK", false), |
| 137 "Whether sbrk can be used to obtain memory."); | 223 "Whether sbrk can be used to obtain memory."); |
| 138 DEFINE_bool(malloc_skip_mmap, | 224 DEFINE_bool(malloc_skip_mmap, |
| 139 EnvToBool("TCMALLOC_SKIP_MMAP", false), | 225 EnvToBool("TCMALLOC_SKIP_MMAP", false), |
| 140 "Whether mmap can be used to obtain memory."); | 226 "Whether mmap can be used to obtain memory."); |
| 141 | 227 |
| 228 DEFINE_bool(malloc_random_allocator, | |
| 229 #if defined(ASLR_IS_SUPPORTED) | |
| 230 EnvToBool("TCMALLOC_ASLR", true), | |
| 231 #else | |
| 232 EnvToBool("TCMALLOC_ASLR", false), | |
| 233 #endif | |
| 234 "Whether to randomize the address space via mmap()."); | |
| 235 | |
| 142 // static allocators | 236 // static allocators |
| 143 class SbrkSysAllocator : public SysAllocator { | 237 class SbrkSysAllocator : public SysAllocator { |
| 144 public: | 238 public: |
| 145 SbrkSysAllocator() : SysAllocator() { | 239 SbrkSysAllocator() : SysAllocator() { |
| 146 } | 240 } |
| 147 void* Alloc(size_t size, size_t *actual_size, size_t alignment); | 241 void* Alloc(size_t size, size_t *actual_size, size_t alignment); |
| 148 }; | 242 }; |
| 149 static char sbrk_space[sizeof(SbrkSysAllocator)]; | 243 static char sbrk_space[sizeof(SbrkSysAllocator)]; |
| 150 | 244 |
| 151 class MmapSysAllocator : public SysAllocator { | 245 class MmapSysAllocator : public SysAllocator { |
| (...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 297 // Ask for extra memory if alignment > pagesize | 391 // Ask for extra memory if alignment > pagesize |
| 298 size_t extra = 0; | 392 size_t extra = 0; |
| 299 if (alignment > pagesize) { | 393 if (alignment > pagesize) { |
| 300 extra = alignment - pagesize; | 394 extra = alignment - pagesize; |
| 301 } | 395 } |
| 302 | 396 |
| 303 // Note: size + extra does not overflow since: | 397 // Note: size + extra does not overflow since: |
| 304 // size + alignment < (1<<NBITS). | 398 // size + alignment < (1<<NBITS). |
| 305 // and extra <= alignment | 399 // and extra <= alignment |
| 306 // therefore size + extra < (1<<NBITS) | 400 // therefore size + extra < (1<<NBITS) |
| 307 void* result = mmap(NULL, size + extra, | 401 void* address_hint = NULL; |
| 402 if (FLAGS_malloc_random_allocator) { | |
| 403 address_hint = GetRandomAddrHint(); | |
| 404 } | |
| 405 void* result = mmap(address_hint, size + extra, | |
| 308 PROT_READ|PROT_WRITE, | 406 PROT_READ|PROT_WRITE, |
| 309 MAP_PRIVATE|MAP_ANONYMOUS, | 407 MAP_PRIVATE|MAP_ANONYMOUS, |
| 310 -1, 0); | 408 -1, 0); |
| 311 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 409 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
| 312 return NULL; | 410 return NULL; |
| 313 } | 411 } |
| 314 | 412 |
| 315 // Adjust the return memory so it is aligned | 413 // Adjust the return memory so it is aligned |
| 316 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 414 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
| 317 size_t adjust = 0; | 415 size_t adjust = 0; |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 446 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); | 544 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); |
| 447 | 545 |
| 448 // In 64-bit debug mode, place the mmap allocator first since it | 546 // In 64-bit debug mode, place the mmap allocator first since it |
| 449 // allocates pointers that do not fit in 32 bits and therefore gives | 547 // allocates pointers that do not fit in 32 bits and therefore gives |
| 450 // us better testing of code's 64-bit correctness. It also leads to | 548 // us better testing of code's 64-bit correctness. It also leads to |
| 451 // less false negatives in heap-checking code. (Numbers are less | 549 // less false negatives in heap-checking code. (Numbers are less |
| 452 // likely to look like pointers and therefore the conservative gc in | 550 // likely to look like pointers and therefore the conservative gc in |
| 453 // the heap-checker is less likely to misinterpret a number as a | 551 // the heap-checker is less likely to misinterpret a number as a |
| 454 // pointer). | 552 // pointer). |
| 455 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | 553 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); |
| 554 // Unfortunately, this code runs before flags are initialized. So | |
| 555 // we can't use FLAGS_malloc_random_allocator. | |
| 556 #if defined(ASLR_IS_SUPPORTED) | |
| 557 // Our only random allocator is mmap. | |
| 558 sdef->SetChildAllocator(mmap, 0, mmap_name); | |
| 559 #else | |
| 456 if (kDebugMode && sizeof(void*) > 4) { | 560 if (kDebugMode && sizeof(void*) > 4) { |
| 457 sdef->SetChildAllocator(mmap, 0, mmap_name); | 561 sdef->SetChildAllocator(mmap, 0, mmap_name); |
| 458 sdef->SetChildAllocator(sbrk, 1, sbrk_name); | 562 sdef->SetChildAllocator(sbrk, 1, sbrk_name); |
| 459 } else { | 563 } else { |
| 460 sdef->SetChildAllocator(sbrk, 0, sbrk_name); | 564 sdef->SetChildAllocator(sbrk, 0, sbrk_name); |
| 461 sdef->SetChildAllocator(mmap, 1, mmap_name); | 565 sdef->SetChildAllocator(mmap, 1, mmap_name); |
| 462 } | 566 } |
| 567 #endif // ASLR_IS_SUPPORTED | |
| 463 sys_alloc = sdef; | 568 sys_alloc = sdef; |
| 464 } | 569 } |
| 465 | 570 |
| 466 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, | 571 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, |
| 467 size_t alignment) { | 572 size_t alignment) { |
| 468 // Discard requests that overflow | 573 // Discard requests that overflow |
| 469 if (size + alignment < size) return NULL; | 574 if (size + alignment < size) return NULL; |
| 470 | 575 |
| 471 SpinLockHolder lock_holder(&spinlock); | 576 SpinLockHolder lock_holder(&spinlock); |
| 472 | 577 |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 540 } | 645 } |
| 541 } | 646 } |
| 542 #endif | 647 #endif |
| 543 } | 648 } |
| 544 | 649 |
| 545 void TCMalloc_SystemCommit(void* start, size_t length) { | 650 void TCMalloc_SystemCommit(void* start, size_t length) { |
| 546 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 651 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
| 547 // such that they need to be re-committed before they can be used by the | 652 // such that they need to be re-committed before they can be used by the |
| 548 // application. | 653 // application. |
| 549 } | 654 } |
| OLD | NEW |