OLD | NEW |
1 // Copyright (c) 2005, 2007, Google Inc. | 1 // Copyright (c) 2005, 2007, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
11 // copyright notice, this list of conditions and the following disclaimer | 11 // copyright notice, this list of conditions and the following disclaimer |
12 // in the documentation and/or other materials provided with the | 12 // in the documentation and/or other materials provided with the |
13 // distribution. | 13 // distribution. |
14 // * Neither the name of Google Inc. nor the names of its | 14 // * Neither the name of Google Inc. nor the names of its |
15 // contributors may be used to endorse or promote products derived from | 15 // contributors may be used to endorse or promote products derived from |
16 // this software without specific prior written permission. | 16 // this software without specific prior written permission. |
17 // | 17 // |
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 | 65 |
66 static SpinLock spinlock = SPINLOCK_INITIALIZER; | 66 static SpinLock spinlock = SPINLOCK_INITIALIZER; |
67 | 67 |
68 // Page size is initialized on demand | 68 // Page size is initialized on demand |
69 static size_t pagesize = 0; | 69 static size_t pagesize = 0; |
70 | 70 |
71 // Configuration parameters. | 71 // Configuration parameters. |
72 | 72 |
73 #if HAVE(MMAP) | 73 #if HAVE(MMAP) |
74 static bool use_mmap = true; | 74 static bool use_mmap = true; |
75 #endif | 75 #endif |
76 | 76 |
77 #if HAVE(VIRTUALALLOC) | 77 #if HAVE(VIRTUALALLOC) |
78 static bool use_VirtualAlloc = true; | 78 static bool use_VirtualAlloc = true; |
79 #endif | 79 #endif |
80 | 80 |
81 // Flags to keep us from retrying allocators that failed. | 81 // Flags to keep us from retrying allocators that failed. |
82 static bool devmem_failure = false; | 82 static bool devmem_failure = false; |
83 static bool sbrk_failure = false; | 83 static bool sbrk_failure = false; |
84 static bool mmap_failure = false; | 84 static bool mmap_failure = false; |
85 static bool VirtualAlloc_failure = false; | 85 static bool VirtualAlloc_failure = false; |
86 | 86 |
87 static const int32_t FLAGS_malloc_devmem_start = 0; | 87 static const int32_t FLAGS_malloc_devmem_start = 0; |
88 static const int32_t FLAGS_malloc_devmem_limit = 0; | 88 static const int32_t FLAGS_malloc_devmem_limit = 0; |
89 | 89 |
90 #if HAVE(MMAP) | 90 #if HAVE(MMAP) |
91 | 91 |
92 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { | 92 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { |
93 // Enforce page alignment | 93 // Enforce page alignment |
94 if (pagesize == 0) pagesize = getpagesize(); | 94 if (pagesize == 0) pagesize = getpagesize(); |
95 if (alignment < pagesize) alignment = pagesize; | 95 if (alignment < pagesize) alignment = pagesize; |
96 size = ((size + alignment - 1) / alignment) * alignment; | 96 size = ((size + alignment - 1) / alignment) * alignment; |
97 | 97 |
98 // could theoretically return the "extra" bytes here, but this | 98 // could theoretically return the "extra" bytes here, but this |
99 // is simple and correct. | 99 // is simple and correct. |
100 if (actual_size) | 100 if (actual_size) |
101 *actual_size = size; | 101 *actual_size = size; |
102 | 102 |
103 // Ask for extra memory if alignment > pagesize | 103 // Ask for extra memory if alignment > pagesize |
104 size_t extra = 0; | 104 size_t extra = 0; |
105 if (alignment > pagesize) { | 105 if (alignment > pagesize) { |
106 extra = alignment - pagesize; | 106 extra = alignment - pagesize; |
107 } | 107 } |
108 Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize; | 108 Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize; |
109 void* result = mmap(NULL, mapSize.unsafeGet(), | 109 void* result = mmap(NULL, mapSize.unsafeGet(), |
110 PROT_READ | PROT_WRITE, | 110 PROT_READ | PROT_WRITE, |
111 MAP_PRIVATE|MAP_ANONYMOUS, | 111 MAP_PRIVATE|MAP_ANONYMOUS, |
112 VM_TAG_FOR_TCMALLOC_MEMORY, 0); | 112 VM_TAG_FOR_TCMALLOC_MEMORY, 0); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
146 SYSTEM_INFO system_info; | 146 SYSTEM_INFO system_info; |
147 GetSystemInfo(&system_info); | 147 GetSystemInfo(&system_info); |
148 pagesize = system_info.dwPageSize; | 148 pagesize = system_info.dwPageSize; |
149 } | 149 } |
150 | 150 |
151 if (alignment < pagesize) alignment = pagesize; | 151 if (alignment < pagesize) alignment = pagesize; |
152 size = ((size + alignment - 1) / alignment) * alignment; | 152 size = ((size + alignment - 1) / alignment) * alignment; |
153 | 153 |
154 // could theoretically return the "extra" bytes here, but this | 154 // could theoretically return the "extra" bytes here, but this |
155 // is simple and correct. | 155 // is simple and correct. |
156 if (actual_size) | 156 if (actual_size) |
157 *actual_size = size; | 157 *actual_size = size; |
158 | 158 |
159 // Ask for extra memory if alignment > pagesize | 159 // Ask for extra memory if alignment > pagesize |
160 size_t extra = 0; | 160 size_t extra = 0; |
161 if (alignment > pagesize) { | 161 if (alignment > pagesize) { |
162 extra = alignment - pagesize; | 162 extra = alignment - pagesize; |
163 } | 163 } |
164 void* result = VirtualAlloc(NULL, size + extra, | 164 void* result = VirtualAlloc(NULL, size + extra, |
165 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, | 165 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, |
166 PAGE_READWRITE); | 166 PAGE_READWRITE); |
167 | 167 |
168 if (result == NULL) { | 168 if (result == NULL) { |
169 VirtualAlloc_failure = true; | 169 VirtualAlloc_failure = true; |
170 return NULL; | 170 return NULL; |
171 } | 171 } |
172 | 172 |
173 // Adjust the return memory so it is aligned | 173 // Adjust the return memory so it is aligned |
174 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 174 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
175 size_t adjust = 0; | 175 size_t adjust = 0; |
(...skipping 12 matching lines...) Expand all Loading... |
188 | 188 |
189 ptr += adjust; | 189 ptr += adjust; |
190 return reinterpret_cast<void*>(ptr); | 190 return reinterpret_cast<void*>(ptr); |
191 } | 191 } |
192 | 192 |
193 #endif /* HAVE(MMAP) */ | 193 #endif /* HAVE(MMAP) */ |
194 | 194 |
195 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { | 195 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { |
196 // Discard requests that overflow | 196 // Discard requests that overflow |
197 if (size + alignment < size) return NULL; | 197 if (size + alignment < size) return NULL; |
198 | 198 |
199 SpinLockHolder lock_holder(&spinlock); | 199 SpinLockHolder lock_holder(&spinlock); |
200 | 200 |
201 // Enforce minimum alignment | 201 // Enforce minimum alignment |
202 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); | 202 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); |
203 | 203 |
204 // Try twice, once avoiding allocators that failed before, and once | 204 // Try twice, once avoiding allocators that failed before, and once |
205 // more trying all allocators even if they failed before. | 205 // more trying all allocators even if they failed before. |
206 for (int i = 0; i < 2; i++) { | 206 for (int i = 0; i < 2; i++) { |
207 | 207 |
208 #if HAVE(MMAP) | 208 #if HAVE(MMAP) |
209 if (use_mmap && !mmap_failure) { | 209 if (use_mmap && !mmap_failure) { |
210 void* result = TryMmap(size, actual_size, alignment); | 210 void* result = TryMmap(size, actual_size, alignment); |
211 if (result != NULL) return result; | 211 if (result != NULL) return result; |
212 } | 212 } |
213 #endif | 213 #endif |
214 | 214 |
215 #if HAVE(VIRTUALALLOC) | 215 #if HAVE(VIRTUALALLOC) |
216 if (use_VirtualAlloc && !VirtualAlloc_failure) { | 216 if (use_VirtualAlloc && !VirtualAlloc_failure) { |
217 void* result = TryVirtualAlloc(size, actual_size, alignment); | 217 void* result = TryVirtualAlloc(size, actual_size, alignment); |
218 if (result != NULL) return result; | 218 if (result != NULL) return result; |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
361 | 361 |
362 #else | 362 #else |
363 | 363 |
364 // Platforms that don't need to explicitly commit memory use an empty inline ver
sion of TCMalloc_SystemCommit | 364 // Platforms that don't need to explicitly commit memory use an empty inline ver
sion of TCMalloc_SystemCommit |
365 // declared in TCSystemAlloc.h | 365 // declared in TCSystemAlloc.h |
366 | 366 |
367 #endif | 367 #endif |
368 | 368 |
369 #endif // #if !USE(SYSTEM_MALLOC) | 369 #endif // #if !USE(SYSTEM_MALLOC) |
370 | 370 |
OLD | NEW |