Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(102)

Side by Side Diff: third_party/tcmalloc/chromium/src/system-alloc.cc

Issue 12090112: Linux: grow a unique random mapping in ASLR (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address comments from Chris. Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« base/security_unittest.cc ('K') | « base/security_unittest.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2005, Google Inc. 1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
130 130
131 void raninit(ranctx* x, u4 seed) { 131 void raninit(ranctx* x, u4 seed) {
132 u4 i; 132 u4 i;
133 x->a = 0xf1ea5eed; 133 x->a = 0xf1ea5eed;
134 x->b = x->c = x->d = seed; 134 x->b = x->c = x->d = seed;
135 for (i = 0; i < 20; ++i) { 135 for (i = 0; i < 20; ++i) {
136 (void) ranval(x); 136 (void) ranval(x);
137 } 137 }
138 } 138 }
139 139
140 const uint64_t kRandomAddressMask = 0x3ffffffff000ULL;
141
140 #endif // defined(ASLR_IS_SUPPORTED) 142 #endif // defined(ASLR_IS_SUPPORTED)
141 143
142 // Give a random "hint" that is suitable for use with mmap(). This cannot make 144 // Give a random "hint" that is suitable for use with mmap(). This cannot make
143 // mmap fail, as the kernel will simply not follow the hint if it can't. 145 // mmap fail, as the kernel will simply not follow the hint if it can't.
144 // However, this will create address space fragmentation. Currently, we only 146 // However, this will create address space fragmentation. Currently, we only
145 // implement it on x86_64, where we have a 47 bits userland address space and 147 // implement it on x86_64, where we have a 47 bits userland address space and
146 // fragmentation is not an issue. 148 // fragmentation is not an issue.
147 void* GetRandomAddrHint() { 149 void* GetRandomAddrHint() {
148 #if !defined(ASLR_IS_SUPPORTED) 150 #if !defined(ASLR_IS_SUPPORTED)
149 return NULL; 151 return NULL;
(...skipping 23 matching lines...) Expand all
173 int ret = close(urandom_fd); 175 int ret = close(urandom_fd);
174 ASSERT(ret == 0); 176 ASSERT(ret == 0);
175 } 177 }
176 raninit(&ctx, seed); 178 raninit(&ctx, seed);
177 } 179 }
178 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | 180 uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) |
179 ranval(&ctx); 181 ranval(&ctx);
180 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it 182 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it
181 // will simply ignore it. So we give a hint that has a good chance of 183 // will simply ignore it. So we give a hint that has a good chance of
182 // working. 184 // working.
183 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, 185 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap,
jar (doing other things) 2013/02/01 22:31:27 nit: This comment (lines 185-192) should be placed
jln (very slow on Chromium) 2013/02/01 22:50:47 Done.
184 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We 186 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We
185 // should make allocations that are below this area, which would be 187 // should make allocations that are below this area, which would be
186 // 0x7ffbf8000000. 188 // 0x7ffbf8000000.
187 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the 189 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the
188 // address space. In the unlikely case where fragmentation would become an 190 // address space. In the unlikely case where fragmentation would become an
189 // issue, the kernel will still have another half to use. 191 // issue, the kernel will still have another half to use.
190 // A a bit-wise "and" won't bias our random distribution. 192 // A a bit-wise "and" won't bias our random distribution.
191 random_address &= 0x3ffffffff000ULL; 193 random_address &= kRandomAddressMask;
192 return reinterpret_cast<void*>(random_address); 194 return reinterpret_cast<void*>(random_address);
193 #endif // ASLR_IS_SUPPORTED 195 #endif // ASLR_IS_SUPPORTED
194 } 196 }
195 197
198 // Allocate |length| bytes of memory using mmap(). The memory will be
199 // readable and writeable, but not executable.
200 // Like mmap(), we will return MAP_FAILED on failure.
201 // |is_aslr_enabled| controls address space layout randomization. When true, we
202 // will put the first mapping at a random address and will then try to grow it.
203 // If it's not possible to grow an existing mapping, a new one will be created.
204 void* AllocWithMmap(size_t length, bool is_aslr_enabled) {
205 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock.
206 static void* address_hint = NULL;
207 if (is_aslr_enabled &&
208 (!address_hint ||
209 reinterpret_cast<uint64_t>(address_hint) & ~kRandomAddressMask)) {
Chris Evans 2013/02/01 19:03:23 I'm not sure the extra complexity here is worth is
jln (very slow on Chromium) 2013/02/01 19:15:41 I think it's more correct. Our line happens to giv
jar (doing other things) 2013/02/01 22:31:27 Do we have concerns about blocking stack growth? I
jln (very slow on Chromium) 2013/02/01 22:50:47 It's a little more complex than "ignored". It's "i
210 address_hint = GetRandomAddrHint();
211 }
212
213 // address_hint is likely to make us grow an existing mapping.
214 void* result = mmap(address_hint, length, PROT_READ|PROT_WRITE,
215 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
216 if (result == address_hint) {
217 // If mmap() succeeded at a address_hint, our next mmap() will try to grow
218 // the current mapping as long as it's compatible with our ASLR mask.
219 // This has been done for performance reasons, see crbug.com/173371.
220 // It should be possible to strike a better balance between performance
221 // and security but will be done at a later date.
222 // If this overflows, it could only set address_hint to NULL, which is
223 // what we want (and can't happen on the currently supported architecture).
224 address_hint = static_cast<char*>(result) + length;
225 } else {
226 // mmap failed or a collision prevented the kernel from honoring the hint,
227 // reset the hint.
228 address_hint = NULL;
229 }
230 return result;
231 }
232
196 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". 233 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits".
197 234
198 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), 235 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*),
199 address_bits_larger_than_pointer_size); 236 address_bits_larger_than_pointer_size);
200 237
201 // Structure for discovering alignment 238 // Structure for discovering alignment
202 union MemoryAligner { 239 union MemoryAligner {
203 void* p; 240 void* p;
204 double d; 241 double d;
205 size_t s; 242 size_t s;
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
398 // Ask for extra memory if alignment > pagesize 435 // Ask for extra memory if alignment > pagesize
399 size_t extra = 0; 436 size_t extra = 0;
400 if (alignment > pagesize) { 437 if (alignment > pagesize) {
401 extra = alignment - pagesize; 438 extra = alignment - pagesize;
402 } 439 }
403 440
404 // Note: size + extra does not overflow since: 441 // Note: size + extra does not overflow since:
405 // size + alignment < (1<<NBITS). 442 // size + alignment < (1<<NBITS).
406 // and extra <= alignment 443 // and extra <= alignment
407 // therefore size + extra < (1<<NBITS) 444 // therefore size + extra < (1<<NBITS)
408 void* address_hint = NULL; 445 void* result = AllocWithMmap(size + extra, FLAGS_malloc_random_allocator);
409 if (FLAGS_malloc_random_allocator) {
410 address_hint = GetRandomAddrHint();
411 }
412 void* result = mmap(address_hint, size + extra,
413 PROT_READ|PROT_WRITE,
414 MAP_PRIVATE|MAP_ANONYMOUS,
415 -1, 0);
416 if (result == reinterpret_cast<void*>(MAP_FAILED)) { 446 if (result == reinterpret_cast<void*>(MAP_FAILED)) {
417 return NULL; 447 return NULL;
418 } 448 }
419 449
420 // Adjust the return memory so it is aligned 450 // Adjust the return memory so it is aligned
421 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 451 uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
422 size_t adjust = 0; 452 size_t adjust = 0;
423 if ((ptr & (alignment - 1)) != 0) { 453 if ((ptr & (alignment - 1)) != 0) {
424 adjust = alignment - (ptr & (alignment - 1)); 454 adjust = alignment - (ptr & (alignment - 1));
425 } 455 }
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
652 } 682 }
653 } 683 }
654 #endif 684 #endif
655 } 685 }
656 686
657 void TCMalloc_SystemCommit(void* start, size_t length) { 687 void TCMalloc_SystemCommit(void* start, size_t length) {
658 // Nothing to do here. TCMalloc_SystemRelease does not alter pages 688 // Nothing to do here. TCMalloc_SystemRelease does not alter pages
659 // such that they need to be re-committed before they can be used by the 689 // such that they need to be re-committed before they can be used by the
660 // application. 690 // application.
661 } 691 }
OLDNEW
« base/security_unittest.cc ('K') | « base/security_unittest.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698