OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
171 using tcmalloc::kCrashWithStats; | 171 using tcmalloc::kCrashWithStats; |
172 using tcmalloc::Log; | 172 using tcmalloc::Log; |
173 using tcmalloc::PageHeap; | 173 using tcmalloc::PageHeap; |
174 using tcmalloc::PageHeapAllocator; | 174 using tcmalloc::PageHeapAllocator; |
175 using tcmalloc::SizeMap; | 175 using tcmalloc::SizeMap; |
176 using tcmalloc::Span; | 176 using tcmalloc::Span; |
177 using tcmalloc::StackTrace; | 177 using tcmalloc::StackTrace; |
178 using tcmalloc::Static; | 178 using tcmalloc::Static; |
179 using tcmalloc::ThreadCache; | 179 using tcmalloc::ThreadCache; |
180 | 180 |
181 // ---- Double free debug declarations | 181 // ---- Functions doing validation with an extra mark. |
182 static size_t ExcludeSpaceForMark(size_t size); | 182 static size_t ExcludeSpaceForMark(size_t size); |
183 static void AddRoomForMark(size_t* size); | 183 static void AddRoomForMark(size_t* size); |
184 static void ExcludeMarkFromSize(size_t* new_size); | 184 static void ExcludeMarkFromSize(size_t* new_size); |
185 static void MarkAllocatedRegion(void* ptr); | 185 static void MarkAllocatedRegion(void* ptr); |
186 static void ValidateAllocatedRegion(void* ptr, size_t cl); | 186 static void ValidateAllocatedRegion(void* ptr, size_t cl); |
187 // ---- End Double free debug declarations | 187 // ---- End validation functions. |
188 | 188 |
189 DECLARE_int64(tcmalloc_sample_parameter); | 189 DECLARE_int64(tcmalloc_sample_parameter); |
190 DECLARE_double(tcmalloc_release_rate); | 190 DECLARE_double(tcmalloc_release_rate); |
191 | 191 |
192 // For windows, the printf we use to report large allocs is | 192 // For windows, the printf we use to report large allocs is |
193 // potentially dangerous: it could cause a malloc that would cause an | 193 // potentially dangerous: it could cause a malloc that would cause an |
194 // infinite loop. So by default we set the threshold to a huge number | 194 // infinite loop. So by default we set the threshold to a huge number |
195 // on windows, so this bad situation will never trigger. You can | 195 // on windows, so this bad situation will never trigger. You can |
196 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you | 196 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you |
197 // want this functionality. | 197 // want this functionality. |
(...skipping 952 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1150 // a dynamic library, but is not listed last on the link line. | 1150 // a dynamic library, but is not listed last on the link line. |
1151 // In that case, libraries after it on the link line will | 1151 // In that case, libraries after it on the link line will |
1152 // allocate with libc malloc, but free with tcmalloc's free. | 1152 // allocate with libc malloc, but free with tcmalloc's free. |
1153 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request | 1153 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request |
1154 return; | 1154 return; |
1155 } | 1155 } |
1156 cl = span->sizeclass; | 1156 cl = span->sizeclass; |
1157 Static::pageheap()->CacheSizeClass(p, cl); | 1157 Static::pageheap()->CacheSizeClass(p, cl); |
1158 } | 1158 } |
1159 | 1159 |
1160 // Validate pointer of large objects. | |
1161 if (cl == 0) { | |
jar (doing other things)
2012/05/17 01:36:41
nit: add comment:
// Mimic debug code on done bel
kaiwang
2012/05/19 00:12:27
Done.
| |
1162 // Make sure ptr is inside the first page of the span. | |
jar (doing other things)
2012/05/17 01:36:41
nit: pointer
kaiwang
2012/05/19 00:12:27
Done.
| |
1163 CHECK_CONDITION(span->start == p); | |
1164 // Make sure we are not freeing interior pointers, even in release build. | |
1165 CHECK_CONDITION_PRINT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0, | |
1166 "Pointer not pointed to the start of a span"); | |
jar (doing other things)
2012/05/17 01:36:41
nit:
"Pointer isn't pointing to start of span"
kaiwang
2012/05/19 00:12:27
Done.
| |
1167 } | |
1160 ValidateAllocatedRegion(ptr, cl); | 1168 ValidateAllocatedRegion(ptr, cl); |
1161 | 1169 |
1162 if (cl != 0) { | 1170 if (cl != 0) { |
1163 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); | 1171 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); |
1164 ThreadCache* heap = GetCacheIfPresent(); | 1172 ThreadCache* heap = GetCacheIfPresent(); |
1165 if (heap != NULL) { | 1173 if (heap != NULL) { |
1166 heap->Deallocate(ptr, cl); | 1174 heap->Deallocate(ptr, cl); |
1167 } else { | 1175 } else { |
1168 // Delete directly into central cache | 1176 // Delete directly into central cache |
1169 tcmalloc::FL_Init(ptr); | 1177 tcmalloc::FL_Init(ptr); |
1170 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); | 1178 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); |
1171 } | 1179 } |
1172 } else { | 1180 } else { |
1173 SpinLockHolder h(Static::pageheap_lock()); | 1181 SpinLockHolder h(Static::pageheap_lock()); |
1174 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | |
1175 ASSERT(span != NULL && span->start == p); | |
jar (doing other things)
2012/05/17 01:36:41
nit: might as well keep this, so that merges go ea
kaiwang
2012/05/19 00:12:27
Done.
| |
1176 if (span->sample) { | 1182 if (span->sample) { |
1177 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); | 1183 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); |
1178 tcmalloc::DLL_Remove(span); | 1184 tcmalloc::DLL_Remove(span); |
1179 Static::stacktrace_allocator()->Delete(st); | 1185 Static::stacktrace_allocator()->Delete(st); |
1180 span->objects = NULL; | 1186 span->objects = NULL; |
1181 } | 1187 } |
1182 Static::pageheap()->Delete(span); | 1188 Static::pageheap()->Delete(span); |
1183 } | 1189 } |
1184 } | 1190 } |
1185 | 1191 |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1269 // For use by exported routines below that want specific alignments | 1275 // For use by exported routines below that want specific alignments |
1270 // | 1276 // |
1271 // Note: this code can be slow for alignments > 16, and can | 1277 // Note: this code can be slow for alignments > 16, and can |
1272 // significantly fragment memory. The expectation is that | 1278 // significantly fragment memory. The expectation is that |
1273 // memalign/posix_memalign/valloc/pvalloc will not be invoked very | 1279 // memalign/posix_memalign/valloc/pvalloc will not be invoked very |
1274 // often. This requirement simplifies our implementation and allows | 1280 // often. This requirement simplifies our implementation and allows |
1275 // us to tune for expected allocation patterns. | 1281 // us to tune for expected allocation patterns. |
1276 void* do_memalign(size_t align, size_t size) { | 1282 void* do_memalign(size_t align, size_t size) { |
1277 ASSERT((align & (align - 1)) == 0); | 1283 ASSERT((align & (align - 1)) == 0); |
1278 ASSERT(align > 0); | 1284 ASSERT(align > 0); |
1279 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). | 1285 // Marked in CheckedMallocResult(), which is also inside SpanToMallocResult(). |
1280 AddRoomForMark(&size); | 1286 AddRoomForMark(&size); |
1281 if (size + align < size) return NULL; // Overflow | 1287 if (size + align < size) return NULL; // Overflow |
1282 | 1288 |
1283 // Fall back to malloc if we would already align this memory access properly. | 1289 // Fall back to malloc if we would already align this memory access properly. |
1284 if (align <= AlignmentForSize(size)) { | 1290 if (align <= AlignmentForSize(size)) { |
1285 void* p = do_malloc(size); | 1291 void* p = do_malloc(size); |
1286 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); | 1292 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); |
1287 return p; | 1293 return p; |
1288 } | 1294 } |
1289 | 1295 |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1691 return do_mallinfo(); | 1697 return do_mallinfo(); |
1692 } | 1698 } |
1693 #endif | 1699 #endif |
1694 | 1700 |
1695 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { | 1701 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { |
1696 return MallocExtension::instance()->GetAllocatedSize(ptr); | 1702 return MallocExtension::instance()->GetAllocatedSize(ptr); |
1697 } | 1703 } |
1698 | 1704 |
1699 #endif // TCMALLOC_USING_DEBUGALLOCATION | 1705 #endif // TCMALLOC_USING_DEBUGALLOCATION |
1700 | 1706 |
1701 // ---Double free() debugging implementation ----------------------------------- | 1707 // --- Validation implementation with an extra mark ---------------------------- |
1702 // We will put a mark at the extreme end of each allocation block. We make | 1708 // We will put a mark at the extreme end of each allocation block. We make |
1703 // sure that we always allocate enough "extra memory" that we can fit in the | 1709 // sure that we always allocate enough "extra memory" that we can fit in the |
1704 // mark, and still provide the requested usable region. If ever that mark is | 1710 // mark, and still provide the requested usable region. If ever that mark is |
1705 // not as expected, then we know that the user is corrupting memory beyond their | 1711 // not as expected, then we know that the user is corrupting memory beyond their |
1706 // request size, or that they have called free a second time without having | 1712 // request size, or that they have called free a second time without having |
1707 // the memory allocated (again). This allows us to spot most double free()s, | 1713 // the memory allocated (again). This allows us to spot most double free()s, |
1708 // but some can "slip by" or confuse our logic if the caller reallocates memory | 1714 // but some can "slip by" or confuse our logic if the caller reallocates memory |
1709 // (for a second use) before performing an evil double-free of a first | 1715 // (for a second use) before performing an evil double-free of a first |
1710 // allocation | 1716 // allocation |
1711 | 1717 |
(...skipping 22 matching lines...) Expand all Loading... | |
1734 | 1740 |
1735 static size_t ExcludeSpaceForMark(size_t size) { return size; } | 1741 static size_t ExcludeSpaceForMark(size_t size) { return size; } |
1736 static void AddRoomForMark(size_t* size) {} | 1742 static void AddRoomForMark(size_t* size) {} |
1737 static void ExcludeMarkFromSize(size_t* new_size) {} | 1743 static void ExcludeMarkFromSize(size_t* new_size) {} |
1738 static void MarkAllocatedRegion(void* ptr) {} | 1744 static void MarkAllocatedRegion(void* ptr) {} |
1739 static void ValidateAllocatedRegion(void* ptr, size_t cl) {} | 1745 static void ValidateAllocatedRegion(void* ptr, size_t cl) {} |
1740 | 1746 |
1741 #else // TCMALLOC_VALIDATION | 1747 #else // TCMALLOC_VALIDATION |
1742 | 1748 |
1743 static void DieFromDoubleFree() { | 1749 static void DieFromDoubleFree() { |
1744 char* p = NULL; | 1750 Log(kCrash, __FILE__, __LINE__, "Attempt to double free"); |
1745 p++; | |
1746 *p += 1; // Segv. | |
1747 } | |
1748 | |
1749 static size_t DieFromBadFreePointer(const void* unused) { | |
1750 char* p = NULL; | |
1751 p += 2; | |
1752 *p += 2; // Segv. | |
1753 return 0; | |
1754 } | 1751 } |
1755 | 1752 |
1756 static void DieFromMemoryCorruption() { | 1753 static void DieFromMemoryCorruption() { |
1757 char* p = NULL; | 1754 Log(kCrash, __FILE__, __LINE__, "Memory corrupted"); |
1758 p += 3; | |
1759 *p += 3; // Segv. | |
1760 } | 1755 } |
1761 | 1756 |
1762 // We can either do byte marking, or whole word marking based on the following | 1757 // We can either do byte marking, or whole word marking based on the following |
1763 // define. char is as small as we can get, and word marking probably provides | 1758 // define. char is as small as we can get, and word marking probably provides |
1764 // more than enough bits that we won't miss a corruption. Any sized integral | 1759 // more than enough bits that we won't miss a corruption. Any sized integral |
1765 // type can be used, but we just define two examples. | 1760 // type can be used, but we just define two examples. |
1766 | 1761 |
1767 // #define TCMALLOC_SMALL_VALIDATION | 1762 // #define TCMALLOC_SMALL_VALIDATION |
1768 #if defined (TCMALLOC_SMALL_VALIDATION) | 1763 #if defined (TCMALLOC_SMALL_VALIDATION) |
1769 | 1764 |
(...skipping 16 matching lines...) Expand all Loading... | |
1786 | 1781 |
1787 inline static void ExcludeMarkFromSize(size_t* new_size) { | 1782 inline static void ExcludeMarkFromSize(size_t* new_size) { |
1788 *new_size -= sizeof(kAllocationMarkMask); | 1783 *new_size -= sizeof(kAllocationMarkMask); |
1789 } | 1784 } |
1790 | 1785 |
1791 inline static size_t ExcludeSpaceForMark(size_t size) { | 1786 inline static size_t ExcludeSpaceForMark(size_t size) { |
1792 return size - sizeof(kAllocationMarkMask); // Lie about size when asked. | 1787 return size - sizeof(kAllocationMarkMask); // Lie about size when asked. |
1793 } | 1788 } |
1794 | 1789 |
1795 inline static MarkType* GetMarkLocation(void* ptr) { | 1790 inline static MarkType* GetMarkLocation(void* ptr) { |
1796 size_t class_size = GetSizeWithCallback(ptr, DieFromBadFreePointer); | 1791 size_t class_size = GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); |
1797 ASSERT(class_size % sizeof(kAllocationMarkMask) == 0); | 1792 ASSERT(class_size % sizeof(kAllocationMarkMask) == 0); |
1798 size_t last_index = (class_size / sizeof(kAllocationMarkMask)) - 1; | 1793 size_t last_index = (class_size / sizeof(kAllocationMarkMask)) - 1; |
1799 return static_cast<MarkType*>(ptr) + last_index; | 1794 return static_cast<MarkType*>(ptr) + last_index; |
1800 } | 1795 } |
1801 | 1796 |
1802 // We hash in the mark location plus the pointer so that we effectively mix in | 1797 // We hash in the mark location plus the pointer so that we effectively mix in |
1803 // the size of the block. This means that if a span is used for different sizes | 1798 // the size of the block. This means that if a span is used for different sizes |
1804 // that the mark will be different. It would be good to hash in the size (which | 1799 // that the mark will be different. It would be good to hash in the size (which |
1805 // we effectively get by using both mark location and pointer), but even better | 1800 // we effectively get by using both mark location and pointer), but even better |
1806 // would be to also include the class, as it concisely contains the entropy | 1801 // would be to also include the class, as it concisely contains the entropy |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1859 *mark = ~allocated_mark; // Distinctively not allocated. | 1854 *mark = ~allocated_mark; // Distinctively not allocated. |
1860 } | 1855 } |
1861 | 1856 |
1862 static void MarkAllocatedRegion(void* ptr) { | 1857 static void MarkAllocatedRegion(void* ptr) { |
1863 if (ptr == NULL) return; | 1858 if (ptr == NULL) return; |
1864 MarkType* mark = GetMarkLocation(ptr); | 1859 MarkType* mark = GetMarkLocation(ptr); |
1865 *mark = GetMarkValue(ptr, mark); | 1860 *mark = GetMarkValue(ptr, mark); |
1866 } | 1861 } |
1867 | 1862 |
1868 #endif // TCMALLOC_VALIDATION | 1863 #endif // TCMALLOC_VALIDATION |
OLD | NEW |