OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
93 // Check that no bit is set at position ADDRESS_BITS or higher. | 93 // Check that no bit is set at position ADDRESS_BITS or higher. |
94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { | 94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { |
95 return (ptr >> ADDRESS_BITS) == 0; | 95 return (ptr >> ADDRESS_BITS) == 0; |
96 } | 96 } |
97 | 97 |
98 // Specialize for the bit width of a pointer to avoid undefined shift. | 98 // Specialize for the bit width of a pointer to avoid undefined shift. |
99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { | 99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { |
100 return true; | 100 return true; |
101 } | 101 } |
102 | 102 |
| 103 #if (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) |
| 104 #define ASLR_IS_SUPPORTED |
| 105 #endif |
| 106 |
| 107 #if defined(ASLR_IS_SUPPORTED) |
| 108 // From libdieharder, public domain library by Bob Jenkins (rngav.c). |
| 109 // Described at http://burtleburtle.net/bob/rand/smallprng.html. |
| 110 // Not cryptographically secure, but good enough for what we need. |
| 111 typedef uint32_t u4; |
| 112 struct ranctx { |
| 113 u4 a; |
| 114 u4 b; |
| 115 u4 c; |
| 116 u4 d; |
| 117 }; |
| 118 |
| 119 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) |
| 120 |
| 121 u4 ranval(ranctx* x) { |
| 122 /* xxx: the generator being tested */ |
| 123 u4 e = x->a - rot(x->b, 27); |
| 124 x->a = x->b ^ rot(x->c, 17); |
| 125 x->b = x->c + x->d; |
| 126 x->c = x->d + e; |
| 127 x->d = e + x->a; |
| 128 return x->d; |
| 129 } |
| 130 |
| 131 void raninit(ranctx* x, u4 seed) { |
| 132 u4 i; |
| 133 x->a = 0xf1ea5eed; |
| 134 x->b = x->c = x->d = seed; |
| 135 for (i = 0; i < 20; ++i) { |
| 136 (void) ranval(x); |
| 137 } |
| 138 } |
| 139 |
| 140 #endif // defined(ASLR_IS_SUPPORTED) |
| 141 |
| 142 // Give a random "hint" that is suitable for use with mmap(). This cannot make |
| 143 // mmap fail, as the kernel will simply not follow the hint if it can't. |
| 144 // However, this will create address space fragmentation. Currently, we only |
| 145 // implement it on x86_64, where we have a 47 bits userland address space and |
| 146 // fragmentation is not an issue. |
| 147 void* GetRandomAddrHint() { |
| 148 #if !defined(ASLR_IS_SUPPORTED) |
| 149 return NULL; |
| 150 #else |
| 151 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. Given |
| 152 // the nature of what we're doing, it wouldn't be critical if we weren't for |
| 153 // ctx, but it is for the "initialized" variable. |
| 154 // It's nice to share the state between threads, because scheduling will add |
| 155 // some randomness to the succession of ranval() calls. |
| 156 static ranctx ctx; |
| 157 static bool initialized = false; |
| 158 if (!initialized) { |
| 159 initialized = true; |
| 160 // We really want this to be a stack variable and don't want any compiler |
| 161 // optimization. We're using its address as a poor-man source of |
| 162 // randomness. |
| 163 volatile char c; |
| 164 // Pre-initialize our seed with a "random" address in case /dev/urandom is |
| 165 // not available. |
| 166 uint32_t seed = (reinterpret_cast<uint64_t>(&c) >> 32) ^ |
| 167 reinterpret_cast<uint64_t>(&c); |
| 168 int urandom_fd = open("/dev/urandom", O_RDONLY); |
| 169 if (urandom_fd >= 0) { |
| 170 ssize_t len; |
| 171 len = read(urandom_fd, &seed, sizeof(seed)); |
| 172 ASSERT(len == sizeof(seed)); |
| 173 int ret = close(urandom_fd); |
| 174 ASSERT(ret == 0); |
| 175 } |
| 176 raninit(&ctx, seed); |
| 177 } |
| 178 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | |
| 179 ranval(&ctx); |
| 180 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it |
| 181 // will simply ignore it. So we give a hint that has a good chance of |
| 182 // working. |
| 183 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, |
| 184 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We |
| 185 // should make allocations that are below this area, which would be |
| 186 // 0x7ffbf8000000. |
| 187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the |
| 188 // address space. In the unlikely case where fragmentation would become an |
| 189 // issue, the kernel will still have another half to use. |
| 190 // A a bit-wise "and" won't bias our random distribution. |
| 191 random_address &= 0x3ffffffff000ULL; |
| 192 return reinterpret_cast<void*>(random_address); |
| 193 #endif // ASLR_IS_SUPPORTED |
| 194 } |
| 195 |
103 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 196 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
104 | 197 |
105 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 198 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
106 address_bits_larger_than_pointer_size); | 199 address_bits_larger_than_pointer_size); |
107 | 200 |
108 // Structure for discovering alignment | 201 // Structure for discovering alignment |
109 union MemoryAligner { | 202 union MemoryAligner { |
110 void* p; | 203 void* p; |
111 double d; | 204 double d; |
112 size_t s; | 205 size_t s; |
(...skipping 19 matching lines...) Expand all Loading... |
132 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), | 225 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), |
133 "Physical memory limit location in MB for /dev/mem allocation." | 226 "Physical memory limit location in MB for /dev/mem allocation." |
134 " Setting this to 0 means no limit."); | 227 " Setting this to 0 means no limit."); |
135 DEFINE_bool(malloc_skip_sbrk, | 228 DEFINE_bool(malloc_skip_sbrk, |
136 EnvToBool("TCMALLOC_SKIP_SBRK", false), | 229 EnvToBool("TCMALLOC_SKIP_SBRK", false), |
137 "Whether sbrk can be used to obtain memory."); | 230 "Whether sbrk can be used to obtain memory."); |
138 DEFINE_bool(malloc_skip_mmap, | 231 DEFINE_bool(malloc_skip_mmap, |
139 EnvToBool("TCMALLOC_SKIP_MMAP", false), | 232 EnvToBool("TCMALLOC_SKIP_MMAP", false), |
140 "Whether mmap can be used to obtain memory."); | 233 "Whether mmap can be used to obtain memory."); |
141 | 234 |
| 235 DEFINE_bool(malloc_random_allocator, |
| 236 #if defined(ASLR_IS_SUPPORTED) |
| 237 EnvToBool("TCMALLOC_ASLR", true), |
| 238 #else |
| 239 EnvToBool("TCMALLOC_ASLR", false), |
| 240 #endif |
| 241 "Whether to randomize the address space via mmap()."); |
| 242 |
142 // static allocators | 243 // static allocators |
143 class SbrkSysAllocator : public SysAllocator { | 244 class SbrkSysAllocator : public SysAllocator { |
144 public: | 245 public: |
145 SbrkSysAllocator() : SysAllocator() { | 246 SbrkSysAllocator() : SysAllocator() { |
146 } | 247 } |
147 void* Alloc(size_t size, size_t *actual_size, size_t alignment); | 248 void* Alloc(size_t size, size_t *actual_size, size_t alignment); |
148 }; | 249 }; |
149 static char sbrk_space[sizeof(SbrkSysAllocator)]; | 250 static char sbrk_space[sizeof(SbrkSysAllocator)]; |
150 | 251 |
151 class MmapSysAllocator : public SysAllocator { | 252 class MmapSysAllocator : public SysAllocator { |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
297 // Ask for extra memory if alignment > pagesize | 398 // Ask for extra memory if alignment > pagesize |
298 size_t extra = 0; | 399 size_t extra = 0; |
299 if (alignment > pagesize) { | 400 if (alignment > pagesize) { |
300 extra = alignment - pagesize; | 401 extra = alignment - pagesize; |
301 } | 402 } |
302 | 403 |
303 // Note: size + extra does not overflow since: | 404 // Note: size + extra does not overflow since: |
304 // size + alignment < (1<<NBITS). | 405 // size + alignment < (1<<NBITS). |
305 // and extra <= alignment | 406 // and extra <= alignment |
306 // therefore size + extra < (1<<NBITS) | 407 // therefore size + extra < (1<<NBITS) |
307 void* result = mmap(NULL, size + extra, | 408 void* address_hint = NULL; |
| 409 if (FLAGS_malloc_random_allocator) { |
| 410 address_hint = GetRandomAddrHint(); |
| 411 } |
| 412 void* result = mmap(address_hint, size + extra, |
308 PROT_READ|PROT_WRITE, | 413 PROT_READ|PROT_WRITE, |
309 MAP_PRIVATE|MAP_ANONYMOUS, | 414 MAP_PRIVATE|MAP_ANONYMOUS, |
310 -1, 0); | 415 -1, 0); |
311 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 416 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
312 return NULL; | 417 return NULL; |
313 } | 418 } |
314 | 419 |
315 // Adjust the return memory so it is aligned | 420 // Adjust the return memory so it is aligned |
316 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 421 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
317 size_t adjust = 0; | 422 size_t adjust = 0; |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
446 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); | 551 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); |
447 | 552 |
448 // In 64-bit debug mode, place the mmap allocator first since it | 553 // In 64-bit debug mode, place the mmap allocator first since it |
449 // allocates pointers that do not fit in 32 bits and therefore gives | 554 // allocates pointers that do not fit in 32 bits and therefore gives |
450 // us better testing of code's 64-bit correctness. It also leads to | 555 // us better testing of code's 64-bit correctness. It also leads to |
451 // less false negatives in heap-checking code. (Numbers are less | 556 // less false negatives in heap-checking code. (Numbers are less |
452 // likely to look like pointers and therefore the conservative gc in | 557 // likely to look like pointers and therefore the conservative gc in |
453 // the heap-checker is less likely to misinterpret a number as a | 558 // the heap-checker is less likely to misinterpret a number as a |
454 // pointer). | 559 // pointer). |
455 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | 560 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); |
| 561 // Unfortunately, this code runs before flags are initialized. So |
| 562 // we can't use FLAGS_malloc_random_allocator. |
| 563 #if defined(ASLR_IS_SUPPORTED) |
| 564 // Our only random allocator is mmap. |
| 565 sdef->SetChildAllocator(mmap, 0, mmap_name); |
| 566 #else |
456 if (kDebugMode && sizeof(void*) > 4) { | 567 if (kDebugMode && sizeof(void*) > 4) { |
457 sdef->SetChildAllocator(mmap, 0, mmap_name); | 568 sdef->SetChildAllocator(mmap, 0, mmap_name); |
458 sdef->SetChildAllocator(sbrk, 1, sbrk_name); | 569 sdef->SetChildAllocator(sbrk, 1, sbrk_name); |
459 } else { | 570 } else { |
460 sdef->SetChildAllocator(sbrk, 0, sbrk_name); | 571 sdef->SetChildAllocator(sbrk, 0, sbrk_name); |
461 sdef->SetChildAllocator(mmap, 1, mmap_name); | 572 sdef->SetChildAllocator(mmap, 1, mmap_name); |
462 } | 573 } |
| 574 #endif // ASLR_IS_SUPPORTED |
463 sys_alloc = sdef; | 575 sys_alloc = sdef; |
464 } | 576 } |
465 | 577 |
466 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, | 578 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, |
467 size_t alignment) { | 579 size_t alignment) { |
468 // Discard requests that overflow | 580 // Discard requests that overflow |
469 if (size + alignment < size) return NULL; | 581 if (size + alignment < size) return NULL; |
470 | 582 |
471 SpinLockHolder lock_holder(&spinlock); | 583 SpinLockHolder lock_holder(&spinlock); |
472 | 584 |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 } | 652 } |
541 } | 653 } |
542 #endif | 654 #endif |
543 } | 655 } |
544 | 656 |
545 void TCMalloc_SystemCommit(void* start, size_t length) { | 657 void TCMalloc_SystemCommit(void* start, size_t length) { |
546 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 658 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
547 // such that they need to be re-committed before they can be used by the | 659 // such that they need to be re-committed before they can be used by the |
548 // application. | 660 // application. |
549 } | 661 } |
OLD | NEW |