OLD | NEW |
---|---|
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <fcntl.h> | 5 #include <fcntl.h> |
6 #include <stdio.h> | 6 #include <stdio.h> |
7 #include <stdlib.h> | 7 #include <stdlib.h> |
8 #include <string.h> | 8 #include <string.h> |
9 #include <sys/mman.h> | |
9 #include <sys/stat.h> | 10 #include <sys/stat.h> |
10 #include <sys/types.h> | 11 #include <sys/types.h> |
12 #include <unistd.h> | |
11 | 13 |
12 #include <algorithm> | 14 #include <algorithm> |
13 #include <limits> | 15 #include <limits> |
14 | 16 |
15 #include "base/file_util.h" | 17 #include "base/file_util.h" |
16 #include "base/logging.h" | 18 #include "base/logging.h" |
17 #include "base/memory/scoped_ptr.h" | 19 #include "base/memory/scoped_ptr.h" |
18 #include "testing/gtest/include/gtest/gtest.h" | 20 #include "testing/gtest/include/gtest/gtest.h" |
19 | 21 |
20 using std::nothrow; | 22 using std::nothrow; |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
111 file_util::ScopedFD fd_closer(&fd); | 113 file_util::ScopedFD fd_closer(&fd); |
112 ASSERT_GE(fd, 0); | 114 ASSERT_GE(fd, 0); |
113 char buffer[1<<13]; | 115 char buffer[1<<13]; |
114 int ret; | 116 int ret; |
115 ret = read(fd, buffer, sizeof(buffer) - 1); | 117 ret = read(fd, buffer, sizeof(buffer) - 1); |
116 ASSERT_GT(ret, 0); | 118 ASSERT_GT(ret, 0); |
117 buffer[ret - 1] = 0; | 119 buffer[ret - 1] = 0; |
118 fprintf(stdout, "%s\n", buffer); | 120 fprintf(stdout, "%s\n", buffer); |
119 } | 121 } |
120 | 122 |
123 // Check if ptr1 and ptr2 are separated by less than size chars. | |
124 bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) { | |
125 ptrdiff_t ptr_diff = reinterpret_cast<char*>(std::max(ptr1, ptr2)) - | |
126 reinterpret_cast<char*>(std::min(ptr1, ptr2)); | |
127 return static_cast<size_t>(ptr_diff) <= size; | |
128 } | |
129 | |
121 // Check if TCMalloc uses an underlying random memory allocator. | 130 // Check if TCMalloc uses an underlying random memory allocator. |
122 TEST(SecurityTest, ALLOC_TEST(RandomMemoryAllocations)) { | 131 TEST(SecurityTest, ALLOC_TEST(RandomMemoryAllocations)) { |
Chris Evans
2013/02/01 19:03:23
I defer to @jar to check the test changes, since h
jln (very slow on Chromium)
2013/02/01 19:15:41
It's completely different though. The previous tes
| |
123 if (IsTcMallocBypassed()) | 132 if (IsTcMallocBypassed()) |
124 return; | 133 return; |
125 // Two successsive calls to mmap() have roughly one chance out of 2^6 to | 134 size_t kPageSize = 4096; // We support x86_64 only. |
126 // have the same two high order nibbles, which is what we are looking at in | 135 // Check that malloc() returns an address that is neither the kernel's |
127 // this test. (In the implementation, we mask these two nibbles with 0x3f, | 136 // un-hinted mmap area, nor the current brk() area. The first malloc() may |
128 // hence the 6 bits). | 137 // not be at a random address because TCMalloc will first exhaust any memory |
129 // With 32 allocations, we see ~16 that end-up in different buckets (i.e. | 138 // that it has allocated early on, before starting the sophisticated |
130 // zones mapped via mmap(), so the chances of this test flaking is roughly | 139 // allocators. |
131 // 2^-(6*15). | 140 void* default_mmap_heap_address = |
132 const int kAllocNumber = 32; | 141 mmap(0, kPageSize, PROT_READ|PROT_WRITE, |
133 // Make kAllocNumber successive allocations of growing size and compare the | 142 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
134 // successive pointers to detect adjacent mappings. We grow the size because | 143 ASSERT_NE(default_mmap_heap_address, |
135 // TCMalloc can sometimes over-allocate. | 144 static_cast<void*>(MAP_FAILED)); |
136 scoped_ptr<char, base::FreeDeleter> ptr[kAllocNumber]; | 145 ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0); |
137 for (int i = 0; i < kAllocNumber; ++i) { | 146 void* brk_heap_address = sbrk(0); |
138 // Grow the Malloc size slightly sub-exponentially. | 147 ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1)); |
139 const size_t kMallocSize = 1 << (12 + (i>>1)); | 148 ASSERT_TRUE(brk_heap_address != NULL); |
140 ptr[i].reset(static_cast<char*>(malloc(kMallocSize))); | 149 // 1 MB should get us past what TCMalloc pre-allocated before initializing |
141 ASSERT_TRUE(ptr[i] != NULL); | 150 // the sophisticated allocators. |
jar (doing other things)
2013/02/01 22:31:27
Won't all allocations, even the early (small) clas
jln (very slow on Chromium)
2013/02/01 22:50:47
I am not sure why yet, but that's not the case. Ev
| |
142 if (i > 0) { | 151 size_t kAllocSize = 1<<20; |
143 // Without mmap randomization, the two high order nibbles | 152 scoped_ptr<char, base::FreeDeleter> ptr( |
144 // of a 47 bits userland address address will be identical. | 153 static_cast<char*>(malloc(kAllocSize))); |
145 // We're only watching the 6 bits that we actually do touch | 154 ASSERT_TRUE(ptr != NULL); |
146 // in our implementation. | 155 // If two pointers are separated by less than 512MB, they are considered |
147 const uintptr_t kHighOrderMask = 0x3f0000000000ULL; | 156 // to be in the same area. |
148 bool pointer_have_same_high_order = | 157 // Our random pointer could be anywhere within 0x3fffffffffff (46bits), |
149 (reinterpret_cast<size_t>(ptr[i].get()) & kHighOrderMask) == | 158 // and we are checking that it's not withing 1GB (30 bits) from two |
150 (reinterpret_cast<size_t>(ptr[i - 1].get()) & kHighOrderMask); | 159 // addresses (brk and mmap heap). We have roughly one chance out of |
151 if (!pointer_have_same_high_order) { | 160 // 2^15 to flake. |
jar (doing other things)
2013/02/01 22:31:27
nit: that's "only" 1 in 32K. If we run 1000 tests
jln (very slow on Chromium)
2013/02/01 22:50:47
I would have loved too. But it doesn't work. I've
| |
152 // PrintProcSelfMaps(); | 161 const size_t kAreaRadius = 1<<29; |
153 return; // Test passes. | 162 bool in_default_mmap_heap = ArePointersToSameArea( |
154 } | 163 ptr.get(), default_mmap_heap_address, kAreaRadius); |
155 } | 164 EXPECT_FALSE(in_default_mmap_heap); |
156 } | 165 |
157 ASSERT_TRUE(false); // NOTREACHED(); | 166 bool in_default_brk_heap = ArePointersToSameArea( |
167 ptr.get(), brk_heap_address, kAreaRadius); | |
168 EXPECT_FALSE(in_default_brk_heap); | |
169 | |
170 // In the implementation, we always mask our random addresses with | |
171 // kRandomMask, so we use it as an additional detection mechanism. | |
172 const uintptr_t kRandomMask = 0x3fffffffffffULL; | |
173 bool impossible_random_address = | |
174 reinterpret_cast<uintptr_t>(ptr.get()) & ~kRandomMask; | |
175 EXPECT_FALSE(impossible_random_address); | |
158 } | 176 } |
159 | 177 |
160 #endif // (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) | 178 #endif // (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) |
161 | 179 |
162 } // namespace | 180 } // namespace |
OLD | NEW |