Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(209)

Side by Side Diff: third_party/tcmalloc/chromium/src/tcmalloc.cc

Issue 10391178: 1. Enable large object pointer offset check in release build. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « third_party/tcmalloc/chromium/src/internal_logging.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2005, Google Inc. 1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 using tcmalloc::kCrashWithStats; 171 using tcmalloc::kCrashWithStats;
172 using tcmalloc::Log; 172 using tcmalloc::Log;
173 using tcmalloc::PageHeap; 173 using tcmalloc::PageHeap;
174 using tcmalloc::PageHeapAllocator; 174 using tcmalloc::PageHeapAllocator;
175 using tcmalloc::SizeMap; 175 using tcmalloc::SizeMap;
176 using tcmalloc::Span; 176 using tcmalloc::Span;
177 using tcmalloc::StackTrace; 177 using tcmalloc::StackTrace;
178 using tcmalloc::Static; 178 using tcmalloc::Static;
179 using tcmalloc::ThreadCache; 179 using tcmalloc::ThreadCache;
180 180
181 // ---- Double free debug declarations 181 // ---- Functions doing validation with an extra mark.
182 static size_t ExcludeSpaceForMark(size_t size); 182 static size_t ExcludeSpaceForMark(size_t size);
183 static void AddRoomForMark(size_t* size); 183 static void AddRoomForMark(size_t* size);
184 static void ExcludeMarkFromSize(size_t* new_size); 184 static void ExcludeMarkFromSize(size_t* new_size);
185 static void MarkAllocatedRegion(void* ptr); 185 static void MarkAllocatedRegion(void* ptr);
186 static void ValidateAllocatedRegion(void* ptr, size_t cl); 186 static void ValidateAllocatedRegion(void* ptr, size_t cl);
187 // ---- End Double free debug declarations 187 // ---- End validation functions.
188 188
189 DECLARE_int64(tcmalloc_sample_parameter); 189 DECLARE_int64(tcmalloc_sample_parameter);
190 DECLARE_double(tcmalloc_release_rate); 190 DECLARE_double(tcmalloc_release_rate);
191 191
192 // For windows, the printf we use to report large allocs is 192 // For windows, the printf we use to report large allocs is
193 // potentially dangerous: it could cause a malloc that would cause an 193 // potentially dangerous: it could cause a malloc that would cause an
194 // infinite loop. So by default we set the threshold to a huge number 194 // infinite loop. So by default we set the threshold to a huge number
195 // on windows, so this bad situation will never trigger. You can 195 // on windows, so this bad situation will never trigger. You can
196 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you 196 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
197 // want this functionality. 197 // want this functionality.
(...skipping 745 matching lines...) Expand 10 before | Expand all | Expand 10 after
943 // Helpers for the exported routines below 943 // Helpers for the exported routines below
944 //------------------------------------------------------------------- 944 //-------------------------------------------------------------------
945 945
946 static inline bool CheckCachedSizeClass(void *ptr) { 946 static inline bool CheckCachedSizeClass(void *ptr) {
947 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; 947 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
948 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); 948 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p);
949 return cached_value == 0 || 949 return cached_value == 0 ||
950 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; 950 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass;
951 } 951 }
952 952
953 static inline void* CheckedMallocResult(void *result) { 953 static inline void* CheckMallocResult(void *result) {
954 ASSERT(result == NULL || CheckCachedSizeClass(result)); 954 ASSERT(result == NULL || CheckCachedSizeClass(result));
955 MarkAllocatedRegion(result); 955 MarkAllocatedRegion(result);
956 return result; 956 return result;
957 } 957 }
958 958
959 static inline void* SpanToMallocResult(Span *span) { 959 static inline void* SpanToMallocResult(Span *span) {
960 Static::pageheap()->CacheSizeClass(span->start, 0); 960 Static::pageheap()->CacheSizeClass(span->start, 0);
961 return 961 return
962 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); 962 CheckMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
963 } 963 }
964 964
965 static void* DoSampledAllocation(size_t size) { 965 static void* DoSampledAllocation(size_t size) {
966 // Grab the stack trace outside the heap lock 966 // Grab the stack trace outside the heap lock
967 StackTrace tmp; 967 StackTrace tmp;
968 tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1); 968 tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
969 tmp.size = size; 969 tmp.size = size;
970 970
971 SpinLockHolder h(Static::pageheap_lock()); 971 SpinLockHolder h(Static::pageheap_lock());
972 // Allocate span 972 // Allocate span
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
1089 // these tallies. I don't think this is performance critical, but we really 1089 // these tallies. I don't think this is performance critical, but we really
1090 // should measure it. 1090 // should measure it.
1091 heap->AddToByteAllocatedTotal(size); // Chromium profiling. 1091 heap->AddToByteAllocatedTotal(size); // Chromium profiling.
1092 1092
1093 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { 1093 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
1094 ret = DoSampledAllocation(size); 1094 ret = DoSampledAllocation(size);
1095 MarkAllocatedRegion(ret); 1095 MarkAllocatedRegion(ret);
1096 } else { 1096 } else {
1097 // The common case, and also the simplest. This just pops the 1097 // The common case, and also the simplest. This just pops the
1098 // size-appropriate freelist, after replenishing it if it's empty. 1098 // size-appropriate freelist, after replenishing it if it's empty.
1099 ret = CheckedMallocResult(heap->Allocate(size, cl)); 1099 ret = CheckMallocResult(heap->Allocate(size, cl));
1100 } 1100 }
1101 } else { 1101 } else {
1102 ret = do_malloc_pages(heap, size); 1102 ret = do_malloc_pages(heap, size);
1103 MarkAllocatedRegion(ret); 1103 MarkAllocatedRegion(ret);
1104 } 1104 }
1105 if (ret == NULL) errno = ENOMEM; 1105 if (ret == NULL) errno = ENOMEM;
1106 return ret; 1106 return ret;
1107 } 1107 }
1108 1108
1109 inline void* do_calloc(size_t n, size_t elem_size) { 1109 inline void* do_calloc(size_t n, size_t elem_size) {
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1149 // tcmalloc. The latter can happen if tcmalloc is linked in via 1149 // tcmalloc. The latter can happen if tcmalloc is linked in via
1150 // a dynamic library, but is not listed last on the link line. 1150 // a dynamic library, but is not listed last on the link line.
1151 // In that case, libraries after it on the link line will 1151 // In that case, libraries after it on the link line will
1152 // allocate with libc malloc, but free with tcmalloc's free. 1152 // allocate with libc malloc, but free with tcmalloc's free.
1153 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request 1153 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request
1154 return; 1154 return;
1155 } 1155 }
1156 cl = span->sizeclass; 1156 cl = span->sizeclass;
1157 Static::pageheap()->CacheSizeClass(p, cl); 1157 Static::pageheap()->CacheSizeClass(p, cl);
1158 } 1158 }
1159 if (cl == 0) {
1160 // Check to see if the object is in use.
1161 CHECK_CONDITION_PRINT(span->location == Span::IN_USE,
1162 "Object was not in-use");
1159 1163
1164 CHECK_CONDITION_PRINT(
1165 span->start << kPageShift == reinterpret_cast<uintptr_t>(ptr),
1166 "Pointer is not pointing to the start of a span");
1167 }
1160 ValidateAllocatedRegion(ptr, cl); 1168 ValidateAllocatedRegion(ptr, cl);
1161 1169
1162 if (cl != 0) { 1170 if (cl != 0) {
1163 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); 1171 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample);
1164 ThreadCache* heap = GetCacheIfPresent(); 1172 ThreadCache* heap = GetCacheIfPresent();
1165 if (heap != NULL) { 1173 if (heap != NULL) {
1166 heap->Deallocate(ptr, cl); 1174 heap->Deallocate(ptr, cl);
1167 } else { 1175 } else {
1168 // Delete directly into central cache 1176 // Delete directly into central cache
1169 tcmalloc::FL_Init(ptr); 1177 tcmalloc::FL_Init(ptr);
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
1269 // For use by exported routines below that want specific alignments 1277 // For use by exported routines below that want specific alignments
1270 // 1278 //
1271 // Note: this code can be slow for alignments > 16, and can 1279 // Note: this code can be slow for alignments > 16, and can
1272 // significantly fragment memory. The expectation is that 1280 // significantly fragment memory. The expectation is that
1273 // memalign/posix_memalign/valloc/pvalloc will not be invoked very 1281 // memalign/posix_memalign/valloc/pvalloc will not be invoked very
1274 // often. This requirement simplifies our implementation and allows 1282 // often. This requirement simplifies our implementation and allows
1275 // us to tune for expected allocation patterns. 1283 // us to tune for expected allocation patterns.
1276 void* do_memalign(size_t align, size_t size) { 1284 void* do_memalign(size_t align, size_t size) {
1277 ASSERT((align & (align - 1)) == 0); 1285 ASSERT((align & (align - 1)) == 0);
1278 ASSERT(align > 0); 1286 ASSERT(align > 0);
1279 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). 1287 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult().
1280 AddRoomForMark(&size); 1288 AddRoomForMark(&size);
1281 if (size + align < size) return NULL; // Overflow 1289 if (size + align < size) return NULL; // Overflow
1282 1290
1283 // Fall back to malloc if we would already align this memory access properly. 1291 // Fall back to malloc if we would already align this memory access properly.
1284 if (align <= AlignmentForSize(size)) { 1292 if (align <= AlignmentForSize(size)) {
1285 void* p = do_malloc(size); 1293 void* p = do_malloc(size);
1286 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); 1294 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0);
1287 return p; 1295 return p;
1288 } 1296 }
1289 1297
(...skipping 10 matching lines...) Expand all
1300 // we miss in the size class array, but that is deemed acceptable 1308 // we miss in the size class array, but that is deemed acceptable
1301 // since memalign() should be used rarely. 1309 // since memalign() should be used rarely.
1302 int cl = Static::sizemap()->SizeClass(size); 1310 int cl = Static::sizemap()->SizeClass(size);
1303 while (cl < kNumClasses && 1311 while (cl < kNumClasses &&
1304 ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) { 1312 ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) {
1305 cl++; 1313 cl++;
1306 } 1314 }
1307 if (cl < kNumClasses) { 1315 if (cl < kNumClasses) {
1308 ThreadCache* heap = ThreadCache::GetCache(); 1316 ThreadCache* heap = ThreadCache::GetCache();
1309 size = Static::sizemap()->class_to_size(cl); 1317 size = Static::sizemap()->class_to_size(cl);
1310 return CheckedMallocResult(heap->Allocate(size, cl)); 1318 return CheckMallocResult(heap->Allocate(size, cl));
1311 } 1319 }
1312 } 1320 }
1313 1321
1314 // We will allocate directly from the page heap 1322 // We will allocate directly from the page heap
1315 SpinLockHolder h(Static::pageheap_lock()); 1323 SpinLockHolder h(Static::pageheap_lock());
1316 1324
1317 if (align <= kPageSize) { 1325 if (align <= kPageSize) {
1318 // Any page-level allocation will be fine 1326 // Any page-level allocation will be fine
1319 // TODO: We could put the rest of this page in the appropriate 1327 // TODO: We could put the rest of this page in the appropriate
1320 // TODO: cache but it does not seem worth it. 1328 // TODO: cache but it does not seem worth it.
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 return do_mallinfo(); 1699 return do_mallinfo();
1692 } 1700 }
1693 #endif 1701 #endif
1694 1702
1695 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { 1703 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW {
1696 return MallocExtension::instance()->GetAllocatedSize(ptr); 1704 return MallocExtension::instance()->GetAllocatedSize(ptr);
1697 } 1705 }
1698 1706
1699 #endif // TCMALLOC_USING_DEBUGALLOCATION 1707 #endif // TCMALLOC_USING_DEBUGALLOCATION
1700 1708
1701 // ---Double free() debugging implementation ----------------------------------- 1709 // --- Validation implementation with an extra mark ----------------------------
1702 // We will put a mark at the extreme end of each allocation block. We make 1710 // We will put a mark at the extreme end of each allocation block. We make
1703 // sure that we always allocate enough "extra memory" that we can fit in the 1711 // sure that we always allocate enough "extra memory" that we can fit in the
1704 // mark, and still provide the requested usable region. If ever that mark is 1712 // mark, and still provide the requested usable region. If ever that mark is
1705 // not as expected, then we know that the user is corrupting memory beyond their 1713 // not as expected, then we know that the user is corrupting memory beyond their
1706 // request size, or that they have called free a second time without having 1714 // request size, or that they have called free a second time without having
1707 // the memory allocated (again). This allows us to spot most double free()s, 1715 // the memory allocated (again). This allows us to spot most double free()s,
1708 // but some can "slip by" or confuse our logic if the caller reallocates memory 1716 // but some can "slip by" or confuse our logic if the caller reallocates memory
1709 // (for a second use) before performing an evil double-free of a first 1717 // (for a second use) before performing an evil double-free of a first
1710 // allocation 1718 // allocation
1711 1719
(...skipping 22 matching lines...) Expand all
1734 1742
1735 static size_t ExcludeSpaceForMark(size_t size) { return size; } 1743 static size_t ExcludeSpaceForMark(size_t size) { return size; }
1736 static void AddRoomForMark(size_t* size) {} 1744 static void AddRoomForMark(size_t* size) {}
1737 static void ExcludeMarkFromSize(size_t* new_size) {} 1745 static void ExcludeMarkFromSize(size_t* new_size) {}
1738 static void MarkAllocatedRegion(void* ptr) {} 1746 static void MarkAllocatedRegion(void* ptr) {}
1739 static void ValidateAllocatedRegion(void* ptr, size_t cl) {} 1747 static void ValidateAllocatedRegion(void* ptr, size_t cl) {}
1740 1748
1741 #else // TCMALLOC_VALIDATION 1749 #else // TCMALLOC_VALIDATION
1742 1750
1743 static void DieFromDoubleFree() { 1751 static void DieFromDoubleFree() {
1744 char* p = NULL; 1752 Log(kCrash, __FILE__, __LINE__, "Attempt to double free");
1745 p++;
1746 *p += 1; // Segv.
1747 }
1748
1749 static size_t DieFromBadFreePointer(const void* unused) {
1750 char* p = NULL;
1751 p += 2;
1752 *p += 2; // Segv.
1753 return 0;
1754 } 1753 }
1755 1754
1756 static void DieFromMemoryCorruption() { 1755 static void DieFromMemoryCorruption() {
1757 char* p = NULL; 1756 Log(kCrash, __FILE__, __LINE__, "Memory corrupted");
1758 p += 3;
1759 *p += 3; // Segv.
1760 } 1757 }
1761 1758
1762 // We can either do byte marking, or whole word marking based on the following 1759 // We can either do byte marking, or whole word marking based on the following
1763 // define. char is as small as we can get, and word marking probably provides 1760 // define. char is as small as we can get, and word marking probably provides
1764 // more than enough bits that we won't miss a corruption. Any sized integral 1761 // more than enough bits that we won't miss a corruption. Any sized integral
1765 // type can be used, but we just define two examples. 1762 // type can be used, but we just define two examples.
1766 1763
1767 // #define TCMALLOC_SMALL_VALIDATION 1764 // #define TCMALLOC_SMALL_VALIDATION
1768 #if defined (TCMALLOC_SMALL_VALIDATION) 1765 #if defined (TCMALLOC_SMALL_VALIDATION)
1769 1766
1770 typedef char MarkType; // char saves memory... int is more complete. 1767 typedef char MarkType; // char saves memory... int is more complete.
1771 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0x36); 1768 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0x36);
1772 1769
1773 #else 1770 #else
1774 1771
1775 typedef int MarkType; // char saves memory... int is more complete. 1772 typedef int MarkType; // char saves memory... int is more complete.
1776 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0xE1AB9536); 1773 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0xE1AB9536);
1777 1774
1778 #endif 1775 #endif
1779 1776
1780 // TODO(jar): See if use of reference rather than pointer gets better inlining, 1777 // TODO(jar): See if use of reference rather than pointer gets better inlining,
1781 // or if macro is needed. My fear is that taking address map preclude register 1778 // or if macro is needed. My fear is that taking address map preclude register
1782 // allocation :-(. 1779 // allocation :-(.
1783 inline static void AddRoomForMark(size_t* size) { 1780 inline static void AddRoomForMark(size_t* size) {
1784 *size += sizeof(kAllocationMarkMask); 1781 *size += sizeof(kAllocationMarkMask);
1785 } 1782 }
1786 1783
1787 inline static void ExcludeMarkFromSize(size_t* new_size) { 1784 inline static void ExcludeMarkFromSize(size_t* new_size) {
1788 *new_size -= sizeof(kAllocationMarkMask); 1785 *new_size -= sizeof(kAllocationMarkMask);
1789 } 1786 }
1790 1787
1791 inline static size_t ExcludeSpaceForMark(size_t size) { 1788 inline static size_t ExcludeSpaceForMark(size_t size) {
1792 return size - sizeof(kAllocationMarkMask); // Lie about size when asked. 1789 return size - sizeof(kAllocationMarkMask); // Lie about size when asked.
1793 } 1790 }
1794 1791
1795 inline static MarkType* GetMarkLocation(void* ptr) { 1792 inline static MarkType* GetMarkLocation(void* ptr) {
1796 size_t class_size = GetSizeWithCallback(ptr, DieFromBadFreePointer); 1793 size_t size = GetSizeWithCallback(ptr, &InvalidGetAllocatedSize);
1797 ASSERT(class_size % sizeof(kAllocationMarkMask) == 0); 1794 ASSERT(size % sizeof(kAllocationMarkMask) == 0);
1798 size_t last_index = (class_size / sizeof(kAllocationMarkMask)) - 1; 1795 size_t last_index = (size / sizeof(kAllocationMarkMask)) - 1;
1799 return static_cast<MarkType*>(ptr) + last_index; 1796 return static_cast<MarkType*>(ptr) + last_index;
1800 } 1797 }
1801 1798
1802 // We hash in the mark location plus the pointer so that we effectively mix in 1799 // We hash in the mark location plus the pointer so that we effectively mix in
1803 // the size of the block. This means that if a span is used for different sizes 1800 // the size of the block. This means that if a span is used for different sizes
1804 // that the mark will be different. It would be good to hash in the size (which 1801 // that the mark will be different. It would be good to hash in the size (which
1805 // we effectively get by using both mark location and pointer), but even better 1802 // we effectively get by using both mark location and pointer), but even better
1806 // would be to also include the class, as it concisely contains the entropy 1803 // would be to also include the class, as it concisely contains the entropy
1807 // found in the size (when we don't have large allocation), and there is less 1804 // found in the size (when we don't have large allocation), and there is less
1808 // risk of losing those bits to truncation. It would probably be good to combine 1805 // risk of losing those bits to truncation. It would probably be good to combine
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1859 *mark = ~allocated_mark; // Distinctively not allocated. 1856 *mark = ~allocated_mark; // Distinctively not allocated.
1860 } 1857 }
1861 1858
1862 static void MarkAllocatedRegion(void* ptr) { 1859 static void MarkAllocatedRegion(void* ptr) {
1863 if (ptr == NULL) return; 1860 if (ptr == NULL) return;
1864 MarkType* mark = GetMarkLocation(ptr); 1861 MarkType* mark = GetMarkLocation(ptr);
1865 *mark = GetMarkValue(ptr, mark); 1862 *mark = GetMarkValue(ptr, mark);
1866 } 1863 }
1867 1864
1868 #endif // TCMALLOC_VALIDATION 1865 #endif // TCMALLOC_VALIDATION
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/internal_logging.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698