OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
93 // Check that no bit is set at position ADDRESS_BITS or higher. | 93 // Check that no bit is set at position ADDRESS_BITS or higher. |
94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { | 94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { |
95 return (ptr >> ADDRESS_BITS) == 0; | 95 return (ptr >> ADDRESS_BITS) == 0; |
96 } | 96 } |
97 | 97 |
98 // Specialize for the bit width of a pointer to avoid undefined shift. | 98 // Specialize for the bit width of a pointer to avoid undefined shift. |
99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { | 99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { |
100 return true; | 100 return true; |
101 } | 101 } |
102 | 102 |
103 // From libdieharder, public domain library by Bob Jenkins (rngav.c). | |
104 // Described at http://burtleburtle.net/bob/rand/smallprng.html. | |
105 // Not cryptographically secure, but good enough for what we need. | |
106 typedef uint32_t u4; | |
107 typedef struct ranctx { u4 a; u4 b; u4 c; u4 d; } ranctx; | |
108 | |
109 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) | |
110 | |
111 u4 ranval(ranctx* x) { | |
112 /* xxx: the generator being tested */ | |
113 u4 e = x->a - rot(x->b, 27); | |
114 x->a = x->b ^ rot(x->c, 17); | |
115 x->b = x->c + x->d; | |
116 x->c = x->d + e; | |
117 x->d = e + x->a; | |
118 return x->d; | |
119 } | |
120 | |
121 void raninit(ranctx* x, u4 seed) { | |
122 u4 i; | |
123 x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; | |
124 for (i=0; i<20; ++i) { | |
125 (void)ranval(x); | |
126 } | |
127 } | |
128 | |
129 // End PRNG code. | |
130 | |
131 #define ASLR_IS_SUPPORTED \\ | |
132 (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) | |
133 | |
134 // Give a random "hint" that is suitable for use with mmap(). This cannot make | |
135 // mmap fail , as the kernel will simply not follow the hint if it can't. | |
136 // However, this will create address space fragmentation. Currently, we only | |
137 // implement it on x86_64, where we have a 47 bits userland address space and | |
138 // fragmentation is not an issue. | |
139 void* GetRandomAddrHint() { | |
140 #if defined(ASLR_IS_SUPPORTED) | |
141 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. Given | |
142 // the nature of what we're doing, it wouldn't be critical if we weren't. | |
143 // It's nice to share the state between threads, because scheduling will add | |
144 // some randomness to the succession of ranval() calls. | |
145 static ranctx ctx; | |
146 static bool initialized = false; | |
147 if (!initialized) { | |
148 volatile char c; | |
149 // Pre-initialize our seed with a "random" address in case /dev/urandom is | |
150 // not available. | |
151 uint32_t seed = (reinterpret_cast<uint64_t>(&c) >> 32) | | |
Marius
2013/01/29 07:41:27
^ instead of | ?
jln (very slow on Chromium)
2013/01/29 07:48:15
Ooch. Decidedly, I should stop working for today.
| |
152 reinterpret_cast<uint64_t>(&c); | |
153 int urandom_fd = open("/dev/urandom", O_RDONLY); | |
154 ASSERT(urandom_fd >= 0); | |
155 if (urandom_fd >= 0) { | |
156 ssize_t len; | |
157 len = read(urandom_fd, &seed, sizeof(seed)); | |
158 ASSERT(len == sizeof(seed)); | |
159 int ret = close(urandom_fd); | |
160 ASSERT(ret == 0); | |
161 } | |
162 raninit(&ctx, seed); | |
163 initialized = true; | |
164 } | |
165 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | | |
166 ranval(&ctx); | |
167 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
168 // will simply ignore it. So we give a hint that has a good chance of | |
169 // working. | |
170 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
171 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
172 // should make allocations that are below this area, which would be | |
173 // 0x7ffbf8000000. | |
174 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
175 // address space. In the unlikely case where fragmentation would become an | |
176 // issue, the kernel will still have another half to use. | |
177 // A a bit-wise "and" won't bias our random distribution. | |
178 random_address &= 0x3ffffffff000ULL; | |
179 return reinterpret_cast<void*>(random_address); | |
180 #else | |
181 return NULL; | |
182 #endif // ASLR_IS_SUPPORTED | |
183 } | |
184 | |
103 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 185 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
104 | 186 |
105 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 187 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
106 address_bits_larger_than_pointer_size); | 188 address_bits_larger_than_pointer_size); |
107 | 189 |
108 // Structure for discovering alignment | 190 // Structure for discovering alignment |
109 union MemoryAligner { | 191 union MemoryAligner { |
110 void* p; | 192 void* p; |
111 double d; | 193 double d; |
112 size_t s; | 194 size_t s; |
(...skipping 19 matching lines...) Expand all Loading... | |
132 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), | 214 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), |
133 "Physical memory limit location in MB for /dev/mem allocation." | 215 "Physical memory limit location in MB for /dev/mem allocation." |
134 " Setting this to 0 means no limit."); | 216 " Setting this to 0 means no limit."); |
135 DEFINE_bool(malloc_skip_sbrk, | 217 DEFINE_bool(malloc_skip_sbrk, |
136 EnvToBool("TCMALLOC_SKIP_SBRK", false), | 218 EnvToBool("TCMALLOC_SKIP_SBRK", false), |
137 "Whether sbrk can be used to obtain memory."); | 219 "Whether sbrk can be used to obtain memory."); |
138 DEFINE_bool(malloc_skip_mmap, | 220 DEFINE_bool(malloc_skip_mmap, |
139 EnvToBool("TCMALLOC_SKIP_MMAP", false), | 221 EnvToBool("TCMALLOC_SKIP_MMAP", false), |
140 "Whether mmap can be used to obtain memory."); | 222 "Whether mmap can be used to obtain memory."); |
141 | 223 |
224 DEFINE_bool(malloc_random_allocator, | |
225 EnvToBool("TCMALLOC_ASLR", | |
226 #if defined(ASLR_IS_SUPPORTED) | |
227 true), | |
228 #else | |
229 false), | |
230 #endif | |
231 "Whether to randomize the address space via mmap()."); | |
232 | |
142 // static allocators | 233 // static allocators |
143 class SbrkSysAllocator : public SysAllocator { | 234 class SbrkSysAllocator : public SysAllocator { |
144 public: | 235 public: |
145 SbrkSysAllocator() : SysAllocator() { | 236 SbrkSysAllocator() : SysAllocator() { |
146 } | 237 } |
147 void* Alloc(size_t size, size_t *actual_size, size_t alignment); | 238 void* Alloc(size_t size, size_t *actual_size, size_t alignment); |
148 }; | 239 }; |
149 static char sbrk_space[sizeof(SbrkSysAllocator)]; | 240 static char sbrk_space[sizeof(SbrkSysAllocator)]; |
150 | 241 |
151 class MmapSysAllocator : public SysAllocator { | 242 class MmapSysAllocator : public SysAllocator { |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
297 // Ask for extra memory if alignment > pagesize | 388 // Ask for extra memory if alignment > pagesize |
298 size_t extra = 0; | 389 size_t extra = 0; |
299 if (alignment > pagesize) { | 390 if (alignment > pagesize) { |
300 extra = alignment - pagesize; | 391 extra = alignment - pagesize; |
301 } | 392 } |
302 | 393 |
303 // Note: size + extra does not overflow since: | 394 // Note: size + extra does not overflow since: |
304 // size + alignment < (1<<NBITS). | 395 // size + alignment < (1<<NBITS). |
305 // and extra <= alignment | 396 // and extra <= alignment |
306 // therefore size + extra < (1<<NBITS) | 397 // therefore size + extra < (1<<NBITS) |
307 void* result = mmap(NULL, size + extra, | 398 void* address_hint = NULL; |
399 if (FLAGS_malloc_random_allocator) { | |
400 address_hint = GetRandomAddrHint(); | |
401 } | |
402 void* result = mmap(address_hint, size + extra, | |
308 PROT_READ|PROT_WRITE, | 403 PROT_READ|PROT_WRITE, |
309 MAP_PRIVATE|MAP_ANONYMOUS, | 404 MAP_PRIVATE|MAP_ANONYMOUS, |
310 -1, 0); | 405 -1, 0); |
311 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 406 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
312 return NULL; | 407 return NULL; |
313 } | 408 } |
314 | 409 |
315 // Adjust the return memory so it is aligned | 410 // Adjust the return memory so it is aligned |
316 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 411 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
317 size_t adjust = 0; | 412 size_t adjust = 0; |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
438 failed_[i] = false; | 533 failed_[i] = false; |
439 } | 534 } |
440 return NULL; | 535 return NULL; |
441 } | 536 } |
442 | 537 |
443 static bool system_alloc_inited = false; | 538 static bool system_alloc_inited = false; |
444 void InitSystemAllocators(void) { | 539 void InitSystemAllocators(void) { |
445 MmapSysAllocator *mmap = new (mmap_space) MmapSysAllocator(); | 540 MmapSysAllocator *mmap = new (mmap_space) MmapSysAllocator(); |
446 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); | 541 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); |
447 | 542 |
543 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | |
544 | |
545 // Unfortunately, this code runs before flags are initialized. So | |
546 // we can't use FLAGS_malloc_random_allocator. | |
547 #if defined(ASLR_IS_SUPPORTED) | |
548 // Our only random allocator is mmap. | |
549 sdef->SetChildAllocator(mmap, 0, mmap_name); | |
550 #else | |
448 // In 64-bit debug mode, place the mmap allocator first since it | 551 // In 64-bit debug mode, place the mmap allocator first since it |
449 // allocates pointers that do not fit in 32 bits and therefore gives | 552 // allocates pointers that do not fit in 32 bits and therefore gives |
450 // us better testing of code's 64-bit correctness. It also leads to | 553 // us better testing of code's 64-bit correctness. It also leads to |
451 // less false negatives in heap-checking code. (Numbers are less | 554 // less false negatives in heap-checking code. (Numbers are less |
452 // likely to look like pointers and therefore the conservative gc in | 555 // likely to look like pointers and therefore the conservative gc in |
453 // the heap-checker is less likely to misinterpret a number as a | 556 // the heap-checker is less likely to misinterpret a number as a |
454 // pointer). | 557 // pointer). |
455 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | |
456 if (kDebugMode && sizeof(void*) > 4) { | 558 if (kDebugMode && sizeof(void*) > 4) { |
457 sdef->SetChildAllocator(mmap, 0, mmap_name); | 559 sdef->SetChildAllocator(mmap, 0, mmap_name); |
458 sdef->SetChildAllocator(sbrk, 1, sbrk_name); | 560 sdef->SetChildAllocator(sbrk, 1, sbrk_name); |
459 } else { | 561 } else { |
460 sdef->SetChildAllocator(sbrk, 0, sbrk_name); | 562 sdef->SetChildAllocator(sbrk, 0, sbrk_name); |
461 sdef->SetChildAllocator(mmap, 1, mmap_name); | 563 sdef->SetChildAllocator(mmap, 1, mmap_name); |
462 } | 564 } |
565 #endif // ASLR_IS_SUPPORTED | |
463 sys_alloc = sdef; | 566 sys_alloc = sdef; |
464 } | 567 } |
465 | 568 |
466 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, | 569 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, |
467 size_t alignment) { | 570 size_t alignment) { |
468 // Discard requests that overflow | 571 // Discard requests that overflow |
469 if (size + alignment < size) return NULL; | 572 if (size + alignment < size) return NULL; |
470 | 573 |
471 SpinLockHolder lock_holder(&spinlock); | 574 SpinLockHolder lock_holder(&spinlock); |
472 | 575 |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
540 } | 643 } |
541 } | 644 } |
542 #endif | 645 #endif |
543 } | 646 } |
544 | 647 |
545 void TCMalloc_SystemCommit(void* start, size_t length) { | 648 void TCMalloc_SystemCommit(void* start, size_t length) { |
546 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 649 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
547 // such that they need to be re-committed before they can be used by the | 650 // such that they need to be re-committed before they can be used by the |
548 // application. | 651 // application. |
549 } | 652 } |
OLD | NEW |