Index: third_party/tcmalloc/chromium/src/system-alloc.cc |
diff --git a/third_party/tcmalloc/chromium/src/system-alloc.cc b/third_party/tcmalloc/chromium/src/system-alloc.cc |
index b755b3fdc9bb3e0d34e644b03b7e16582a3aa61a..9bc7f0d0c3752f791005a195b39181f44195ac75 100644 |
--- a/third_party/tcmalloc/chromium/src/system-alloc.cc |
+++ b/third_party/tcmalloc/chromium/src/system-alloc.cc |
@@ -100,6 +100,87 @@ template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { |
return true; |
} |
+// From libdieharder, public domain library by Bob Jenkins (rngav.c). |
+// Described at http://burtleburtle.net/bob/rand/smallprng.html. |
+// Not cryptographically secure, but good enough for what we need. |
jar (doing other things)
2013/01/30 02:29:13
Why are we using this, rather than more standard c
jln (very slow on Chromium)
2013/01/30 02:52:13
What would be a more standard crypto function ? We
|
+typedef uint32_t u4; |
+typedef struct ranctx { u4 a; u4 b; u4 c; u4 d; } ranctx; |
+ |
+#define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) |
+ |
+u4 ranval(ranctx* x) { |
+ /* xxx: the generator being tested */ |
+ u4 e = x->a - rot(x->b, 27); |
+ x->a = x->b ^ rot(x->c, 17); |
+ x->b = x->c + x->d; |
+ x->c = x->d + e; |
+ x->d = e + x->a; |
+ return x->d; |
+} |
+ |
+void raninit(ranctx* x, u4 seed) { |
+ u4 i; |
+ x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; |
+ for (i=0; i<20; ++i) { |
+ (void)ranval(x); |
+ } |
+} |
+ |
+// End PRNG code. |
+ |
+#define ASLR_IS_SUPPORTED \\ |
jar (doing other things)
2013/01/30 02:29:13
I doubt you meant to postpend a double backslash..
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+ (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) |
jar (doing other things)
2013/01/30 02:29:13
"defined" is only meaningful in the context of a "
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+ |
+// Give a random "hint" that is suitable for use with mmap(). This cannot make |
+// mmap fail , as the kernel will simply not follow the hint if it can't. |
jar (doing other things)
2013/01/30 02:29:13
nit: Remove extra space: "fail , as" ---> "fail,
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+// However, this will create address space fragmentation. Currently, we only |
+// implement it on x86_64, where we have a 47 bits userland address space and |
+// fragmentation is not an issue. |
+void* GetRandomAddrHint() { |
+#if defined(ASLR_IS_SUPPORTED) |
jar (doing other things)
2013/01/30 02:29:13
I'm pretty sure this will always evaluate to true,
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+ // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. Given |
+ // the nature of what we're doing, it wouldn't be critical if we weren't. |
+ // It's nice to share the state between threads, because scheduling will add |
+ // some randomness to the succession of ranval() calls. |
+ static ranctx ctx; |
+ static bool initialized = false; |
+ if (!initialized) { |
+ volatile char c; |
jar (doing other things)
2013/01/30 02:29:13
Why did you use the keyword volatile here? What d
jln (very slow on Chromium)
2013/01/30 02:52:13
I'm trying to tame a potential processor optimizat
|
+ // Pre-initialize our seed with a "random" address in case /dev/urandom is |
+ // not available. |
+ uint32_t seed = (reinterpret_cast<uint64_t>(&c) >> 32) ^ |
+ reinterpret_cast<uint64_t>(&c); |
+ int urandom_fd = open("/dev/urandom", O_RDONLY); |
+ if (urandom_fd >= 0) { |
+ ssize_t len; |
+ len = read(urandom_fd, &seed, sizeof(seed)); |
+ ASSERT(len == sizeof(seed)); |
+ int ret = close(urandom_fd); |
+ ASSERT(ret == 0); |
+ } |
+ raninit(&ctx, seed); |
+ initialized = true; |
jar (doing other things)
2013/01/30 02:29:13
FWIW: the compiler can optimize this line to perfo
jln (very slow on Chromium)
2013/01/30 02:52:13
Yes, that comment was for the ranctx, but you're r
|
+ } |
+ uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) | |
+ ranval(&ctx); |
+ // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it |
+ // will simply ignore it. So we give a hint that has a good chance of |
+ // working. |
+ // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, |
+ // with a gap that depends on the max stack size. See x86/mm/mmap.c. We |
+ // should make allocations that are below this area, which would be |
+ // 0x7ffbf8000000. |
+ // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the |
+ // address space. In the unlikely case where fragmentation would become an |
+ // issue, the kernel will still have another half to use. |
+ // A a bit-wise "and" won't bias our random distribution. |
+ random_address &= 0x3ffffffff000ULL; |
+ return reinterpret_cast<void*>(random_address); |
+#else |
+ return NULL; |
jar (doing other things)
2013/01/30 02:29:13
nit: With if's (and certainly #if), it is much eas
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+#endif // ASLR_IS_SUPPORTED |
+} |
+ |
} // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
@@ -139,6 +220,15 @@ DEFINE_bool(malloc_skip_mmap, |
EnvToBool("TCMALLOC_SKIP_MMAP", false), |
"Whether mmap can be used to obtain memory."); |
+DEFINE_bool(malloc_random_allocator, |
+ EnvToBool("TCMALLOC_ASLR", |
+#if defined(ASLR_IS_SUPPORTED) |
jar (doing other things)
2013/01/30 02:29:13
As noted, this is always true here :-(.
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+ true), |
jar (doing other things)
2013/01/30 02:29:13
nit: breaking coded fragments (single short lines)
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+#else |
+ false), |
+#endif |
+ "Whether to randomize the address space via mmap()."); |
+ |
// static allocators |
class SbrkSysAllocator : public SysAllocator { |
public: |
@@ -304,7 +394,11 @@ void* MmapSysAllocator::Alloc(size_t size, size_t *actual_size, |
// size + alignment < (1<<NBITS). |
// and extra <= alignment |
// therefore size + extra < (1<<NBITS) |
- void* result = mmap(NULL, size + extra, |
+ void* address_hint = NULL; |
+ if (FLAGS_malloc_random_allocator) { |
+ address_hint = GetRandomAddrHint(); |
+ } |
+ void* result = mmap(address_hint, size + extra, |
PROT_READ|PROT_WRITE, |
MAP_PRIVATE|MAP_ANONYMOUS, |
-1, 0); |
@@ -445,6 +539,14 @@ void InitSystemAllocators(void) { |
MmapSysAllocator *mmap = new (mmap_space) MmapSysAllocator(); |
SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); |
+ DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); |
+ |
+ // Unfortunately, this code runs before flags are initialized. So |
+ // we can't use FLAGS_malloc_random_allocator. |
+#if defined(ASLR_IS_SUPPORTED) |
+ // Our only random allocator is mmap. |
+ sdef->SetChildAllocator(mmap, 0, mmap_name); |
jar (doing other things)
2013/01/30 02:29:13
Instead of moving line at 455 up to line 542 above
jln (very slow on Chromium)
2013/01/30 02:52:13
Done.
|
+#else |
// In 64-bit debug mode, place the mmap allocator first since it |
// allocates pointers that do not fit in 32 bits and therefore gives |
// us better testing of code's 64-bit correctness. It also leads to |
@@ -452,7 +554,6 @@ void InitSystemAllocators(void) { |
// likely to look like pointers and therefore the conservative gc in |
// the heap-checker is less likely to misinterpret a number as a |
// pointer). |
- DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); |
if (kDebugMode && sizeof(void*) > 4) { |
sdef->SetChildAllocator(mmap, 0, mmap_name); |
sdef->SetChildAllocator(sbrk, 1, sbrk_name); |
jar (doing other things)
2013/01/30 02:29:13
Why is this line not included in the new ASLR code
jln (very slow on Chromium)
2013/01/30 02:52:13
At first, I thought we didn't want a fallback to t
|
@@ -460,6 +561,7 @@ void InitSystemAllocators(void) { |
sdef->SetChildAllocator(sbrk, 0, sbrk_name); |
sdef->SetChildAllocator(mmap, 1, mmap_name); |
} |
+#endif // ASLR_IS_SUPPORTED |
sys_alloc = sdef; |
} |