Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 289 Log(kCrash, __FILE__, __LINE__, | 289 Log(kCrash, __FILE__, __LINE__, |
| 290 "Attempt to realloc invalid pointer", old_ptr); | 290 "Attempt to realloc invalid pointer", old_ptr); |
| 291 return 0; | 291 return 0; |
| 292 } | 292 } |
| 293 | 293 |
| 294 size_t InvalidGetAllocatedSize(const void* ptr) { | 294 size_t InvalidGetAllocatedSize(const void* ptr) { |
| 295 Log(kCrash, __FILE__, __LINE__, | 295 Log(kCrash, __FILE__, __LINE__, |
| 296 "Attempt to get the size of an invalid pointer", ptr); | 296 "Attempt to get the size of an invalid pointer", ptr); |
| 297 return 0; | 297 return 0; |
| 298 } | 298 } |
| 299 | |
| 300 // For security reasons, we want to limit the size of allocations. | |
| 301 // See crbug.com/169327. | |
| 302 inline bool IsAllocSizePermitted(size_t alloc_size) { | |
| 303 // Never allow an allocation larger than what can be indexed via an int. | |
| 304 // Remove kPageSize to account for various rounding, padding and to have a | |
| 305 // small margin. | |
| 306 return alloc_size <= ((std::numeric_limits<int>::max)() - kPageSize); | |
| 307 } | |
| 308 | |
| 299 } // unnamed namespace | 309 } // unnamed namespace |
| 300 | 310 |
| 301 // Extract interesting stats | 311 // Extract interesting stats |
| 302 struct TCMallocStats { | 312 struct TCMallocStats { |
| 303 uint64_t thread_bytes; // Bytes in thread caches | 313 uint64_t thread_bytes; // Bytes in thread caches |
| 304 uint64_t central_bytes; // Bytes in central cache | 314 uint64_t central_bytes; // Bytes in central cache |
| 305 uint64_t transfer_bytes; // Bytes in central transfer cache | 315 uint64_t transfer_bytes; // Bytes in central transfer cache |
| 306 uint64_t metadata_bytes; // Bytes alloced for metadata | 316 uint64_t metadata_bytes; // Bytes alloced for metadata |
| 307 uint64_t metadata_unmapped_bytes; // Address space reserved for metadata | 317 uint64_t metadata_unmapped_bytes; // Address space reserved for metadata |
| 308 // but is not committed. | 318 // but is not committed. |
| (...skipping 762 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1071 return result; | 1081 return result; |
| 1072 } | 1082 } |
| 1073 | 1083 |
| 1074 inline void* do_malloc(size_t size) { | 1084 inline void* do_malloc(size_t size) { |
| 1075 AddRoomForMark(&size); | 1085 AddRoomForMark(&size); |
| 1076 | 1086 |
| 1077 void* ret = NULL; | 1087 void* ret = NULL; |
| 1078 | 1088 |
| 1079 // The following call forces module initialization | 1089 // The following call forces module initialization |
| 1080 ThreadCache* heap = ThreadCache::GetCache(); | 1090 ThreadCache* heap = ThreadCache::GetCache(); |
| 1081 if (size <= kMaxSize) { | 1091 // First, check if our security policy allows this size. |
| 1082 size_t cl = Static::sizemap()->SizeClass(size); | 1092 if (IsAllocSizePermitted(size)) { |
|
Chris Evans
2013/01/15 02:13:32
Move the test so it's in the "else" branch of "siz
jln (very slow on Chromium)
2013/01/15 03:10:57
In release mode, here is the assembly:
cmp r1
| |
| 1083 size = Static::sizemap()->class_to_size(cl); | 1093 if (size <= kMaxSize) { |
| 1094 size_t cl = Static::sizemap()->SizeClass(size); | |
| 1095 size = Static::sizemap()->class_to_size(cl); | |
| 1084 | 1096 |
| 1085 // TODO(jar): If this has any detectable performance impact, it can be | 1097 // TODO(jar): If this has any detectable performance impact, it can be |
| 1086 // optimized by only tallying sizes if the profiler was activated to recall | 1098 // optimized by only tallying sizes if the profiler was activated to |
| 1087 // these tallies. I don't think this is performance critical, but we really | 1099 // recall these tallies. I don't think this is performance critical, but |
| 1088 // should measure it. | 1100 // we really should measure it. |
| 1089 heap->AddToByteAllocatedTotal(size); // Chromium profiling. | 1101 heap->AddToByteAllocatedTotal(size); // Chromium profiling. |
| 1090 | 1102 |
| 1091 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1103 if ((FLAGS_tcmalloc_sample_parameter > 0) && |
| 1092 ret = DoSampledAllocation(size); | 1104 heap->SampleAllocation(size)) { |
| 1105 ret = DoSampledAllocation(size); | |
| 1106 MarkAllocatedRegion(ret); | |
| 1107 } else { | |
| 1108 // The common case, and also the simplest. This just pops the | |
| 1109 // size-appropriate freelist, after replenishing it if it's empty. | |
| 1110 ret = CheckMallocResult(heap->Allocate(size, cl)); | |
| 1111 } | |
| 1112 } else { | |
| 1113 ret = do_malloc_pages(heap, size); | |
| 1093 MarkAllocatedRegion(ret); | 1114 MarkAllocatedRegion(ret); |
| 1094 } else { | |
| 1095 // The common case, and also the simplest. This just pops the | |
| 1096 // size-appropriate freelist, after replenishing it if it's empty. | |
| 1097 ret = CheckMallocResult(heap->Allocate(size, cl)); | |
| 1098 } | 1115 } |
| 1099 } else { | |
| 1100 ret = do_malloc_pages(heap, size); | |
| 1101 MarkAllocatedRegion(ret); | |
| 1102 } | 1116 } |
| 1103 if (ret == NULL) errno = ENOMEM; | 1117 if (ret == NULL) errno = ENOMEM; |
| 1104 return ret; | 1118 return ret; |
| 1105 } | 1119 } |
| 1106 | 1120 |
| 1107 inline void* do_calloc(size_t n, size_t elem_size) { | 1121 inline void* do_calloc(size_t n, size_t elem_size) { |
| 1108 // Overflow check | 1122 // Overflow check |
| 1109 const size_t size = n * elem_size; | 1123 const size_t size = n * elem_size; |
| 1110 if (elem_size != 0 && size / elem_size != n) return NULL; | 1124 if (elem_size != 0 && size / elem_size != n) return NULL; |
| 1111 | 1125 |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1226 AddRoomForMark(&new_size); | 1240 AddRoomForMark(&new_size); |
| 1227 // Get the size of the old entry | 1241 // Get the size of the old entry |
| 1228 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); | 1242 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); |
| 1229 | 1243 |
| 1230 // Reallocate if the new size is larger than the old size, | 1244 // Reallocate if the new size is larger than the old size, |
| 1231 // or if the new size is significantly smaller than the old size. | 1245 // or if the new size is significantly smaller than the old size. |
| 1232 // We do hysteresis to avoid resizing ping-pongs: | 1246 // We do hysteresis to avoid resizing ping-pongs: |
| 1233 // . If we need to grow, grow to max(new_size, old_size * 1.X) | 1247 // . If we need to grow, grow to max(new_size, old_size * 1.X) |
| 1234 // . Don't shrink unless new_size < old_size * 0.Y | 1248 // . Don't shrink unless new_size < old_size * 0.Y |
| 1235 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. | 1249 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. |
| 1236 const int lower_bound_to_grow = old_size + old_size / 4; | 1250 const size_t lower_bound_to_grow = old_size + old_size / 4; |
|
jar (doing other things)
2013/01/15 20:06:00
These two lines should be upstreamed to google3.
| |
| 1237 const int upper_bound_to_shrink = old_size / 2; | 1251 const size_t upper_bound_to_shrink = old_size / 2; |
| 1238 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { | 1252 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { |
| 1239 // Need to reallocate. | 1253 // Need to reallocate. |
| 1240 void* new_ptr = NULL; | 1254 void* new_ptr = NULL; |
| 1241 | 1255 |
| 1242 if (new_size > old_size && new_size < lower_bound_to_grow) { | 1256 if (new_size > old_size && new_size < lower_bound_to_grow) { |
| 1243 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); | 1257 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); |
| 1244 } | 1258 } |
| 1245 ExcludeMarkFromSize(&new_size); // do_malloc will add space if needed. | 1259 ExcludeMarkFromSize(&new_size); // do_malloc will add space if needed. |
| 1246 if (new_ptr == NULL) { | 1260 if (new_ptr == NULL) { |
| 1247 // Either new_size is not a tiny increment, or last do_malloc failed. | 1261 // Either new_size is not a tiny increment, or last do_malloc failed. |
| (...skipping 606 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1854 *mark = ~allocated_mark; // Distinctively not allocated. | 1868 *mark = ~allocated_mark; // Distinctively not allocated. |
| 1855 } | 1869 } |
| 1856 | 1870 |
| 1857 static void MarkAllocatedRegion(void* ptr) { | 1871 static void MarkAllocatedRegion(void* ptr) { |
| 1858 if (ptr == NULL) return; | 1872 if (ptr == NULL) return; |
| 1859 MarkType* mark = GetMarkLocation(ptr); | 1873 MarkType* mark = GetMarkLocation(ptr); |
| 1860 *mark = GetMarkValue(ptr, mark); | 1874 *mark = GetMarkValue(ptr, mark); |
| 1861 } | 1875 } |
| 1862 | 1876 |
| 1863 #endif // TCMALLOC_VALIDATION | 1877 #endif // TCMALLOC_VALIDATION |
| OLD | NEW |