| OLD | NEW |
| 1 // Copyright (c) 2000, Google Inc. | 1 // Copyright (c) 2000, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | 29 |
| 30 // --- | 30 // --- |
| 31 // Author: Urs Holzle <opensource@google.com> | 31 // Author: Urs Holzle <opensource@google.com> |
| 32 | 32 |
| 33 #include "config.h" | 33 #include "config.h" |
| 34 #include <errno.h> |
| 35 #ifdef HAVE_FCNTL_H |
| 36 #include <fcntl.h> |
| 37 #endif |
| 38 #ifdef HAVE_INTTYPES_H |
| 39 #include <inttypes.h> |
| 40 #endif |
| 34 // We only need malloc.h for struct mallinfo. | 41 // We only need malloc.h for struct mallinfo. |
| 35 #ifdef HAVE_STRUCT_MALLINFO | 42 #ifdef HAVE_STRUCT_MALLINFO |
| 36 // Malloc can be in several places on older versions of OS X. | 43 // Malloc can be in several places on older versions of OS X. |
| 37 # if defined(HAVE_MALLOC_H) | 44 # if defined(HAVE_MALLOC_H) |
| 38 # include <malloc.h> | 45 # include <malloc.h> |
| 39 # elif defined(HAVE_MALLOC_MALLOC_H) | 46 # elif defined(HAVE_MALLOC_MALLOC_H) |
| 40 # include <malloc/malloc.h> | 47 # include <malloc/malloc.h> |
| 41 # elif defined(HAVE_SYS_MALLOC_H) | 48 # elif defined(HAVE_SYS_MALLOC_H) |
| 42 # include <sys/malloc.h> | 49 # include <sys/malloc.h> |
| 43 # endif | 50 # endif |
| 44 #endif | 51 #endif |
| 52 #ifdef HAVE_PTHREAD |
| 45 #include <pthread.h> | 53 #include <pthread.h> |
| 46 #include <stdio.h> | |
| 47 #ifdef HAVE_INTTYPES_H | |
| 48 #include <inttypes.h> | |
| 49 #endif | 54 #endif |
| 50 #include <stdarg.h> | 55 #include <stdarg.h> |
| 56 #include <stdio.h> |
| 57 #include <string.h> |
| 51 #ifdef HAVE_MMAP | 58 #ifdef HAVE_MMAP |
| 52 #include <sys/mman.h> | 59 #include <sys/mman.h> |
| 53 #endif | 60 #endif |
| 61 #include <sys/stat.h> |
| 54 #include <sys/types.h> | 62 #include <sys/types.h> |
| 55 #include <sys/stat.h> | |
| 56 #ifdef HAVE_FCNTL_H | |
| 57 #include <fcntl.h> | |
| 58 #endif | |
| 59 #ifdef HAVE_UNISTD_H | 63 #ifdef HAVE_UNISTD_H |
| 60 #include <unistd.h> | 64 #include <unistd.h> |
| 61 #endif | 65 #endif |
| 62 #include <errno.h> | |
| 63 #include <string.h> | |
| 64 | 66 |
| 65 #include <google/malloc_extension.h> | 67 #include <gperftools/malloc_extension.h> |
| 66 #include <google/malloc_hook.h> | 68 #include <gperftools/malloc_hook.h> |
| 67 #include <google/stacktrace.h> | 69 #include <gperftools/stacktrace.h> |
| 70 #include "addressmap-inl.h" |
| 68 #include "base/commandlineflags.h" | 71 #include "base/commandlineflags.h" |
| 69 #include "base/googleinit.h" | 72 #include "base/googleinit.h" |
| 70 #include "base/logging.h" | 73 #include "base/logging.h" |
| 71 #include "base/spinlock.h" | 74 #include "base/spinlock.h" |
| 72 #include "addressmap-inl.h" | |
| 73 #include "malloc_hook-inl.h" | 75 #include "malloc_hook-inl.h" |
| 74 #include "symbolize.h" | 76 #include "symbolize.h" |
| 75 | 77 |
| 76 #define TCMALLOC_USING_DEBUGALLOCATION | 78 #define TCMALLOC_USING_DEBUGALLOCATION |
| 77 #include "tcmalloc.cc" | 79 #include "tcmalloc.cc" |
| 78 | 80 |
| 79 // __THROW is defined in glibc systems. It means, counter-intuitively, | 81 // __THROW is defined in glibc systems. It means, counter-intuitively, |
| 80 // "This function will never throw an exception." It's an optional | 82 // "This function will never throw an exception." It's an optional |
| 81 // optimization tool, but we may need to use it to match glibc prototypes. | 83 // optimization tool, but we may need to use it to match glibc prototypes. |
| 82 #ifndef __THROW // I guess we're not on a glibc system | 84 #ifndef __THROW // I guess we're not on a glibc system |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 117 EnvToInt("TCMALLOC_MAX_FREE_QUEUE_SIZE", 10*1024*1024), | 119 EnvToInt("TCMALLOC_MAX_FREE_QUEUE_SIZE", 10*1024*1024), |
| 118 "If greater than 0, keep freed blocks in a queue instead of " | 120 "If greater than 0, keep freed blocks in a queue instead of " |
| 119 "releasing them to the allocator immediately. Release them when " | 121 "releasing them to the allocator immediately. Release them when " |
| 120 "the total size of all blocks in the queue would otherwise exceed " | 122 "the total size of all blocks in the queue would otherwise exceed " |
| 121 "this limit."); | 123 "this limit."); |
| 122 | 124 |
| 123 DEFINE_bool(symbolize_stacktrace, | 125 DEFINE_bool(symbolize_stacktrace, |
| 124 EnvToBool("TCMALLOC_SYMBOLIZE_STACKTRACE", true), | 126 EnvToBool("TCMALLOC_SYMBOLIZE_STACKTRACE", true), |
| 125 "Symbolize the stack trace when provided (on some error exits)"); | 127 "Symbolize the stack trace when provided (on some error exits)"); |
| 126 | 128 |
| 129 // If we are LD_PRELOAD-ed against a non-pthreads app, then |
| 130 // pthread_once won't be defined. We declare it here, for that |
| 131 // case (with weak linkage) which will cause the non-definition to |
| 132 // resolve to NULL. We can then check for NULL or not in Instance. |
| 133 extern "C" int pthread_once(pthread_once_t *, void (*)(void)) |
| 134 ATTRIBUTE_WEAK; |
| 135 |
| 127 // ========================================================================= // | 136 // ========================================================================= // |
| 128 | 137 |
| 129 // A safe version of printf() that does not do any allocation and | 138 // A safe version of printf() that does not do any allocation and |
| 130 // uses very little stack space. | 139 // uses very little stack space. |
| 131 static void TracePrintf(int fd, const char *fmt, ...) | 140 static void TracePrintf(int fd, const char *fmt, ...) |
| 132 __attribute__ ((__format__ (__printf__, 2, 3))); | 141 __attribute__ ((__format__ (__printf__, 2, 3))); |
| 133 | 142 |
| 134 // The do_* functions are defined in tcmalloc/tcmalloc.cc, | 143 // The do_* functions are defined in tcmalloc/tcmalloc.cc, |
| 135 // which is included before this file | 144 // which is included before this file |
| 136 // when TCMALLOC_FOR_DEBUGALLOCATION is defined | 145 // when TCMALLOC_FOR_DEBUGALLOCATION is defined |
| 137 #define BASE_MALLOC_NEW(size) cpp_alloc(size, false) | 146 // TODO(csilvers): get rid of these now that we are tied to tcmalloc. |
| 147 #define BASE_MALLOC_NEW do_malloc |
| 138 #define BASE_MALLOC do_malloc | 148 #define BASE_MALLOC do_malloc |
| 139 #define BASE_FREE do_free | 149 #define BASE_FREE do_free |
| 140 #define BASE_MALLOC_STATS do_malloc_stats | 150 #define BASE_MALLOC_STATS do_malloc_stats |
| 141 #define BASE_MALLOPT do_mallopt | 151 #define BASE_MALLOPT do_mallopt |
| 142 #define BASE_MALLINFO do_mallinfo | 152 #define BASE_MALLINFO do_mallinfo |
| 143 #define BASE_MALLOC_SIZE(ptr) GetSizeWithCallback(ptr, &InvalidGetAllocatedSize) | |
| 144 | 153 |
| 145 // ========================================================================= // | 154 // ========================================================================= // |
| 146 | 155 |
| 147 class MallocBlock; | 156 class MallocBlock; |
| 148 | 157 |
| 149 // A circular buffer to hold freed blocks of memory. MallocBlock::Deallocate | 158 // A circular buffer to hold freed blocks of memory. MallocBlock::Deallocate |
| 150 // (below) pushes blocks into this queue instead of returning them to the | 159 // (below) pushes blocks into this queue instead of returning them to the |
| 151 // underlying allocator immediately. See MallocBlock::Deallocate for more | 160 // underlying allocator immediately. See MallocBlock::Deallocate for more |
| 152 // information. | 161 // information. |
| 153 // | 162 // |
| 154 // We can't use an STL class for this because we need to be careful not to | 163 // We can't use an STL class for this because we need to be careful not to |
| 155 // perform any heap de-allocations in any of the code in this class, since the | 164 // perform any heap de-allocations in any of the code in this class, since the |
| 156 // code in MallocBlock::Deallocate is not re-entrant. | 165 // code in MallocBlock::Deallocate is not re-entrant. |
| 157 template <typename QueueEntry> | 166 template <typename QueueEntry> |
| 158 class FreeQueue { | 167 class FreeQueue { |
| 159 public: | 168 public: |
| 160 FreeQueue() : q_front_(0), q_back_(0) {} | 169 FreeQueue() : q_front_(0), q_back_(0) {} |
| 161 | 170 |
| 162 bool Full() { | 171 bool Full() { |
| 163 return (q_front_ + 1) % kFreeQueueSize == q_back_; | 172 return (q_front_ + 1) % kFreeQueueSize == q_back_; |
| 164 } | 173 } |
| 165 | 174 |
| 166 void Push(const QueueEntry& block) { | 175 void Push(const QueueEntry& block) { |
| 167 q_[q_front_] = block; | 176 q_[q_front_] = block; |
| 168 q_front_ = (q_front_ + 1) % kFreeQueueSize; | 177 q_front_ = (q_front_ + 1) % kFreeQueueSize; |
| 169 } | 178 } |
| 170 | 179 |
| 171 QueueEntry Pop() { | 180 QueueEntry Pop() { |
| 181 RAW_CHECK(q_back_ != q_front_, "Queue is empty"); |
| 172 const QueueEntry& ret = q_[q_back_]; | 182 const QueueEntry& ret = q_[q_back_]; |
| 173 q_back_ = (q_back_ + 1) % kFreeQueueSize; | 183 q_back_ = (q_back_ + 1) % kFreeQueueSize; |
| 174 return ret; | 184 return ret; |
| 175 } | 185 } |
| 176 | 186 |
| 177 size_t size() const { | 187 size_t size() const { |
| 178 return (q_front_ - q_back_ + kFreeQueueSize) % kFreeQueueSize; | 188 return (q_front_ - q_back_ + kFreeQueueSize) % kFreeQueueSize; |
| 179 } | 189 } |
| 180 | 190 |
| 181 private: | 191 private: |
| 182 // Maximum number of blocks kept in the free queue before being freed. | 192 // Maximum number of blocks kept in the free queue before being freed. |
| 183 static const int kFreeQueueSize = 1024; | 193 static const int kFreeQueueSize = 1024; |
| 184 | 194 |
| 185 QueueEntry q_[kFreeQueueSize]; | 195 QueueEntry q_[kFreeQueueSize]; |
| 186 int q_front_; | 196 int q_front_; |
| 187 int q_back_; | 197 int q_back_; |
| 188 }; | 198 }; |
| 189 | 199 |
| 190 struct MallocBlockQueueEntry { | 200 struct MallocBlockQueueEntry { |
| 191 MallocBlockQueueEntry() : block(NULL), size(0), | 201 MallocBlockQueueEntry() : block(NULL), size(0), |
| 192 num_deleter_pcs(0), deleter_threadid(0) {} | 202 num_deleter_pcs(0), deleter_threadid(0) {} |
| 193 MallocBlockQueueEntry(MallocBlock* b, size_t s) : block(b), size(s) { | 203 MallocBlockQueueEntry(MallocBlock* b, size_t s) : block(b), size(s) { |
| 194 if (FLAGS_max_free_queue_size != 0) { | 204 if (FLAGS_max_free_queue_size != 0 && b != NULL) { |
| 195 // Adjust the number of frames to skip (4) if you change the | 205 // Adjust the number of frames to skip (4) if you change the |
| 196 // location of this call. | 206 // location of this call. |
| 197 num_deleter_pcs = | 207 num_deleter_pcs = |
| 198 GetStackTrace(deleter_pcs, | 208 GetStackTrace(deleter_pcs, |
| 199 sizeof(deleter_pcs) / sizeof(deleter_pcs[0]), | 209 sizeof(deleter_pcs) / sizeof(deleter_pcs[0]), |
| 200 4); | 210 4); |
| 201 deleter_threadid = pthread_self(); | 211 deleter_threadid = pthread_self(); |
| 202 } else { | 212 } else { |
| 203 num_deleter_pcs = 0; | 213 num_deleter_pcs = 0; |
| 204 // Zero is an illegal pthread id by my reading of the pthread | 214 // Zero is an illegal pthread id by my reading of the pthread |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 259 // start the program (see man malloc). | 269 // start the program (see man malloc). |
| 260 | 270 |
| 261 // We use either BASE_MALLOC or mmap to make the actual allocation. In | 271 // We use either BASE_MALLOC or mmap to make the actual allocation. In |
| 262 // order to remember which one of the two was used for any block, we store an | 272 // order to remember which one of the two was used for any block, we store an |
| 263 // appropriate magic word next to the block. | 273 // appropriate magic word next to the block. |
| 264 static const int kMagicMalloc = 0xDEADBEEF; | 274 static const int kMagicMalloc = 0xDEADBEEF; |
| 265 static const int kMagicMMap = 0xABCDEFAB; | 275 static const int kMagicMMap = 0xABCDEFAB; |
| 266 | 276 |
| 267 // This array will be filled with 0xCD, for use with memcmp. | 277 // This array will be filled with 0xCD, for use with memcmp. |
| 268 static unsigned char kMagicDeletedBuffer[1024]; | 278 static unsigned char kMagicDeletedBuffer[1024]; |
| 269 static bool deleted_buffer_initialized_; | 279 static pthread_once_t deleted_buffer_initialized_; |
| 280 static bool deleted_buffer_initialized_no_pthreads_; |
| 270 | 281 |
| 271 private: // data layout | 282 private: // data layout |
| 272 | 283 |
| 273 // The four fields size1_,offset_,magic1_,alloc_type_ | 284 // The four fields size1_,offset_,magic1_,alloc_type_ |
| 274 // should together occupy a multiple of 16 bytes. (At the | 285 // should together occupy a multiple of 16 bytes. (At the |
| 275 // moment, sizeof(size_t) == 4 or 8 depending on piii vs | 286 // moment, sizeof(size_t) == 4 or 8 depending on piii vs |
| 276 // k8, and 4 of those sum to 16 or 32 bytes). | 287 // k8, and 4 of those sum to 16 or 32 bytes). |
| 277 // This, combined with BASE_MALLOC's alignment guarantees, | 288 // This, combined with BASE_MALLOC's alignment guarantees, |
| 278 // ensures that SSE types can be stored into the returned | 289 // ensures that SSE types can be stored into the returned |
| 279 // block, at &size2_. | 290 // block, at &size2_. |
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 554 } | 565 } |
| 555 } | 566 } |
| 556 | 567 |
| 557 static size_t FreeQueueSize() { | 568 static size_t FreeQueueSize() { |
| 558 SpinLockHolder l(&free_queue_lock_); | 569 SpinLockHolder l(&free_queue_lock_); |
| 559 return free_queue_size_; | 570 return free_queue_size_; |
| 560 } | 571 } |
| 561 | 572 |
| 562 static void ProcessFreeQueue(MallocBlock* b, size_t size, | 573 static void ProcessFreeQueue(MallocBlock* b, size_t size, |
| 563 int max_free_queue_size) { | 574 int max_free_queue_size) { |
| 564 SpinLockHolder l(&free_queue_lock_); | 575 // MallocBlockQueueEntry are about 144 in size, so we can only |
| 576 // use a small array of them on the stack. |
| 577 MallocBlockQueueEntry entries[4]; |
| 578 int num_entries = 0; |
| 579 MallocBlockQueueEntry new_entry(b, size); |
| 580 free_queue_lock_.Lock(); |
| 565 if (free_queue_ == NULL) | 581 if (free_queue_ == NULL) |
| 566 free_queue_ = new FreeQueue<MallocBlockQueueEntry>; | 582 free_queue_ = new FreeQueue<MallocBlockQueueEntry>; |
| 567 RAW_CHECK(!free_queue_->Full(), "Free queue mustn't be full!"); | 583 RAW_CHECK(!free_queue_->Full(), "Free queue mustn't be full!"); |
| 568 | 584 |
| 569 if (b != NULL) { | 585 if (b != NULL) { |
| 570 free_queue_size_ += size + sizeof(MallocBlockQueueEntry); | 586 free_queue_size_ += size + sizeof(MallocBlockQueueEntry); |
| 571 MallocBlockQueueEntry new_entry(b, size); | |
| 572 free_queue_->Push(new_entry); | 587 free_queue_->Push(new_entry); |
| 573 } | 588 } |
| 574 | 589 |
| 575 // Free blocks until the total size of unfreed blocks no longer exceeds | 590 // Free blocks until the total size of unfreed blocks no longer exceeds |
| 576 // max_free_queue_size, and the free queue has at least one free | 591 // max_free_queue_size, and the free queue has at least one free |
| 577 // space in it. | 592 // space in it. |
| 578 while (free_queue_size_ > max_free_queue_size || free_queue_->Full()) { | 593 while (free_queue_size_ > max_free_queue_size || free_queue_->Full()) { |
| 579 MallocBlockQueueEntry cur = free_queue_->Pop(); | 594 RAW_CHECK(num_entries < arraysize(entries), "entries array overflow"); |
| 580 CheckForDanglingWrites(cur); | 595 entries[num_entries] = free_queue_->Pop(); |
| 581 free_queue_size_ -= cur.size + sizeof(MallocBlockQueueEntry); | 596 free_queue_size_ -= |
| 582 BASE_FREE(cur.block); | 597 entries[num_entries].size + sizeof(MallocBlockQueueEntry); |
| 598 num_entries++; |
| 599 if (num_entries == arraysize(entries)) { |
| 600 // The queue will not be full at this point, so it is ok to |
| 601 // release the lock. The queue may still contain more than |
| 602 // max_free_queue_size, but this is not a strict invariant. |
| 603 free_queue_lock_.Unlock(); |
| 604 for (int i = 0; i < num_entries; i++) { |
| 605 CheckForDanglingWrites(entries[i]); |
| 606 BASE_FREE(entries[i].block); |
| 607 } |
| 608 num_entries = 0; |
| 609 free_queue_lock_.Lock(); |
| 610 } |
| 583 } | 611 } |
| 584 RAW_CHECK(free_queue_size_ >= 0, "Free queue size went negative!"); | 612 RAW_CHECK(free_queue_size_ >= 0, "Free queue size went negative!"); |
| 613 free_queue_lock_.Unlock(); |
| 614 for (int i = 0; i < num_entries; i++) { |
| 615 CheckForDanglingWrites(entries[i]); |
| 616 BASE_FREE(entries[i].block); |
| 617 } |
| 618 } |
| 619 |
| 620 static void InitDeletedBuffer() { |
| 621 memset(kMagicDeletedBuffer, kMagicDeletedByte, sizeof(kMagicDeletedBuffer)); |
| 622 deleted_buffer_initialized_no_pthreads_ = true; |
| 585 } | 623 } |
| 586 | 624 |
| 587 static void CheckForDanglingWrites(const MallocBlockQueueEntry& queue_entry) { | 625 static void CheckForDanglingWrites(const MallocBlockQueueEntry& queue_entry) { |
| 588 // Initialize the buffer if necessary. | 626 // Initialize the buffer if necessary. |
| 589 if (!deleted_buffer_initialized_) { | 627 if (pthread_once) |
| 590 // This is threadsafe. We hold free_queue_lock_. | 628 pthread_once(&deleted_buffer_initialized_, &InitDeletedBuffer); |
| 591 memset(kMagicDeletedBuffer, 0xcd, sizeof(kMagicDeletedBuffer)); | 629 if (!deleted_buffer_initialized_no_pthreads_) { |
| 592 deleted_buffer_initialized_ = true; | 630 // This will be the case on systems that don't link in pthreads, |
| 631 // including on FreeBSD where pthread_once has a non-zero address |
| 632 // (but doesn't do anything) even when pthreads isn't linked in. |
| 633 InitDeletedBuffer(); |
| 593 } | 634 } |
| 594 | 635 |
| 595 const unsigned char* p = | 636 const unsigned char* p = |
| 596 reinterpret_cast<unsigned char*>(queue_entry.block); | 637 reinterpret_cast<unsigned char*>(queue_entry.block); |
| 597 | 638 |
| 598 static const size_t size_of_buffer = sizeof(kMagicDeletedBuffer); | 639 static const size_t size_of_buffer = sizeof(kMagicDeletedBuffer); |
| 599 const size_t size = queue_entry.size; | 640 const size_t size = queue_entry.size; |
| 600 const size_t buffers = size / size_of_buffer; | 641 const size_t buffers = size / size_of_buffer; |
| 601 const size_t remainder = size % size_of_buffer; | 642 const size_t remainder = size % size_of_buffer; |
| 602 size_t buffer_idx; | 643 size_t buffer_idx; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 618 RAW_LOG(ERROR, | 659 RAW_LOG(ERROR, |
| 619 "Found a corrupted memory buffer in MallocBlock (may be offset " | 660 "Found a corrupted memory buffer in MallocBlock (may be offset " |
| 620 "from user ptr): buffer index: %zd, buffer ptr: %p, size of " | 661 "from user ptr): buffer index: %zd, buffer ptr: %p, size of " |
| 621 "buffer: %zd", buffer_idx, buffer, size_of_buffer); | 662 "buffer: %zd", buffer_idx, buffer, size_of_buffer); |
| 622 | 663 |
| 623 // The magic deleted buffer should only be 1024 bytes, but in case | 664 // The magic deleted buffer should only be 1024 bytes, but in case |
| 624 // this changes, let's put an upper limit on the number of debug | 665 // this changes, let's put an upper limit on the number of debug |
| 625 // lines we'll output: | 666 // lines we'll output: |
| 626 if (size_of_buffer <= 1024) { | 667 if (size_of_buffer <= 1024) { |
| 627 for (int i = 0; i < size_of_buffer; ++i) { | 668 for (int i = 0; i < size_of_buffer; ++i) { |
| 628 if (buffer[i] != 0xcd) { | 669 if (buffer[i] != kMagicDeletedByte) { |
| 629 RAW_LOG(ERROR, "Buffer byte %d is 0x%02x (should be 0xcd).", | 670 RAW_LOG(ERROR, "Buffer byte %d is 0x%02x (should be 0x%02x).", |
| 630 i, buffer[i]); | 671 i, buffer[i], kMagicDeletedByte); |
| 631 } | 672 } |
| 632 } | 673 } |
| 633 } else { | 674 } else { |
| 634 RAW_LOG(ERROR, "Buffer too large to print corruption."); | 675 RAW_LOG(ERROR, "Buffer too large to print corruption."); |
| 635 } | 676 } |
| 636 | 677 |
| 637 const MallocBlock* b = queue_entry.block; | 678 const MallocBlock* b = queue_entry.block; |
| 638 const size_t size = queue_entry.size; | 679 const size_t size = queue_entry.size; |
| 639 if (queue_entry.num_deleter_pcs > 0) { | 680 if (queue_entry.num_deleter_pcs > 0) { |
| 640 TracePrintf(STDERR_FILENO, "Deleted by thread %p\n", | 681 TracePrintf(STDERR_FILENO, "Deleted by thread %p\n", |
| (...skipping 23 matching lines...) Expand all Loading... |
| 664 RAW_LOG(ERROR, | 705 RAW_LOG(ERROR, |
| 665 "Skipping the printing of the deleter's stack! Its stack was " | 706 "Skipping the printing of the deleter's stack! Its stack was " |
| 666 "not found; either the corruption occurred too early in " | 707 "not found; either the corruption occurred too early in " |
| 667 "execution to obtain a stack trace or --max_free_queue_size was " | 708 "execution to obtain a stack trace or --max_free_queue_size was " |
| 668 "set to 0."); | 709 "set to 0."); |
| 669 } | 710 } |
| 670 | 711 |
| 671 RAW_LOG(FATAL, | 712 RAW_LOG(FATAL, |
| 672 "Memory was written to after being freed. MallocBlock: %p, user " | 713 "Memory was written to after being freed. MallocBlock: %p, user " |
| 673 "ptr: %p, size: %zd. If you can't find the source of the error, " | 714 "ptr: %p, size: %zd. If you can't find the source of the error, " |
| 674 "try using valgrind or purify, or study the output of the " | 715 "try using ASan (http://code.google.com/p/address-sanitizer/), " |
| 675 "deleter's stack printed above.", b, b->data_addr(), size); | 716 "Valgrind, or Purify, or study the " |
| 717 "output of the deleter's stack printed above.", |
| 718 b, b->data_addr(), size); |
| 676 } | 719 } |
| 677 | 720 |
| 678 static MallocBlock* FromRawPointer(void* p) { | 721 static MallocBlock* FromRawPointer(void* p) { |
| 679 const size_t data_offset = MallocBlock::data_offset(); | 722 const size_t data_offset = MallocBlock::data_offset(); |
| 680 // Find the header just before client's memory. | 723 // Find the header just before client's memory. |
| 681 MallocBlock *mb = reinterpret_cast<MallocBlock *>( | 724 MallocBlock *mb = reinterpret_cast<MallocBlock *>( |
| 682 reinterpret_cast<char *>(p) - data_offset); | 725 reinterpret_cast<char *>(p) - data_offset); |
| 683 // If mb->alloc_type_ is kMagicDeletedSizeT, we're not an ok pointer. | 726 // If mb->alloc_type_ is kMagicDeletedSizeT, we're not an ok pointer. |
| 684 if (mb->alloc_type_ == kMagicDeletedSizeT) { | 727 if (mb->alloc_type_ == kMagicDeletedSizeT) { |
| 685 RAW_LOG(FATAL, "memory allocation bug: object at %p has been already" | 728 RAW_LOG(FATAL, "memory allocation bug: object at %p has been already" |
| 686 " deallocated; or else a word before the object has been" | 729 " deallocated; or else a word before the object has been" |
| 687 " corrupted (memory stomping bug)", p); | 730 " corrupted (memory stomping bug)", p); |
| 688 } | 731 } |
| 689 // If mb->offset_ is zero (common case), mb is the real header. If | 732 // If mb->offset_ is zero (common case), mb is the real header. If |
| 690 // mb->offset_ is non-zero, this block was allocated by memalign, and | 733 // mb->offset_ is non-zero, this block was allocated by memalign, and |
| 691 // mb->offset_ is the distance backwards to the real header from mb, | 734 // mb->offset_ is the distance backwards to the real header from mb, |
| 692 // which is a fake header. The following subtraction works for both zero | 735 // which is a fake header. The following subtraction works for both zero |
| 693 // and non-zero values. | 736 // and non-zero values. |
| 694 return reinterpret_cast<MallocBlock *>( | 737 return reinterpret_cast<MallocBlock *>( |
| 695 reinterpret_cast<char *>(mb) - mb->offset_); | 738 reinterpret_cast<char *>(mb) - mb->offset_); |
| 696 } | 739 } |
| 697 static const MallocBlock* FromRawPointer(const void* p) { | 740 static const MallocBlock* FromRawPointer(const void* p) { |
| 698 // const-safe version: we just cast about | 741 // const-safe version: we just cast about |
| 699 return FromRawPointer(const_cast<void*>(p)); | 742 return FromRawPointer(const_cast<void*>(p)); |
| 700 } | 743 } |
| 701 | 744 |
| 702 void Check(int type) { | 745 void Check(int type) const { |
| 703 alloc_map_lock_.Lock(); | 746 alloc_map_lock_.Lock(); |
| 704 CheckLocked(type); | 747 CheckLocked(type); |
| 705 alloc_map_lock_.Unlock(); | 748 alloc_map_lock_.Unlock(); |
| 706 } | 749 } |
| 707 | 750 |
| 708 static bool CheckEverything() { | 751 static bool CheckEverything() { |
| 709 alloc_map_lock_.Lock(); | 752 alloc_map_lock_.Lock(); |
| 710 if (alloc_map_ != NULL) alloc_map_->Iterate(CheckCallback, 0); | 753 if (alloc_map_ != NULL) alloc_map_->Iterate(CheckCallback, 0); |
| 711 alloc_map_lock_.Unlock(); | 754 alloc_map_lock_.Unlock(); |
| 712 return true; // if we get here, we're okay | 755 return true; // if we get here, we're okay |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 770 const int MallocBlock::kMagicMMap; | 813 const int MallocBlock::kMagicMMap; |
| 771 | 814 |
| 772 MallocBlock::AllocMap* MallocBlock::alloc_map_ = NULL; | 815 MallocBlock::AllocMap* MallocBlock::alloc_map_ = NULL; |
| 773 SpinLock MallocBlock::alloc_map_lock_(SpinLock::LINKER_INITIALIZED); | 816 SpinLock MallocBlock::alloc_map_lock_(SpinLock::LINKER_INITIALIZED); |
| 774 | 817 |
| 775 FreeQueue<MallocBlockQueueEntry>* MallocBlock::free_queue_ = NULL; | 818 FreeQueue<MallocBlockQueueEntry>* MallocBlock::free_queue_ = NULL; |
| 776 size_t MallocBlock::free_queue_size_ = 0; | 819 size_t MallocBlock::free_queue_size_ = 0; |
| 777 SpinLock MallocBlock::free_queue_lock_(SpinLock::LINKER_INITIALIZED); | 820 SpinLock MallocBlock::free_queue_lock_(SpinLock::LINKER_INITIALIZED); |
| 778 | 821 |
| 779 unsigned char MallocBlock::kMagicDeletedBuffer[1024]; | 822 unsigned char MallocBlock::kMagicDeletedBuffer[1024]; |
| 780 bool MallocBlock::deleted_buffer_initialized_ = false; | 823 pthread_once_t MallocBlock::deleted_buffer_initialized_ = PTHREAD_ONCE_INIT; |
| 824 bool MallocBlock::deleted_buffer_initialized_no_pthreads_ = false; |
| 781 | 825 |
| 782 const char* const MallocBlock::kAllocName[] = { | 826 const char* const MallocBlock::kAllocName[] = { |
| 783 "malloc", | 827 "malloc", |
| 784 "new", | 828 "new", |
| 785 "new []", | 829 "new []", |
| 786 NULL, | 830 NULL, |
| 787 }; | 831 }; |
| 788 | 832 |
| 789 const char* const MallocBlock::kDeallocName[] = { | 833 const char* const MallocBlock::kDeallocName[] = { |
| 790 "free", | 834 "free", |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 964 if (result && (strcmp(name, "generic.current_allocated_bytes") == 0)) { | 1008 if (result && (strcmp(name, "generic.current_allocated_bytes") == 0)) { |
| 965 // Subtract bytes kept in the free queue | 1009 // Subtract bytes kept in the free queue |
| 966 size_t qsize = MallocBlock::FreeQueueSize(); | 1010 size_t qsize = MallocBlock::FreeQueueSize(); |
| 967 if (*value >= qsize) { | 1011 if (*value >= qsize) { |
| 968 *value -= qsize; | 1012 *value -= qsize; |
| 969 } | 1013 } |
| 970 } | 1014 } |
| 971 return result; | 1015 return result; |
| 972 } | 1016 } |
| 973 | 1017 |
| 974 virtual bool VerifyNewMemory(void* p) { | 1018 virtual bool VerifyNewMemory(const void* p) { |
| 975 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kNewType); | 1019 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kNewType); |
| 976 return true; | 1020 return true; |
| 977 } | 1021 } |
| 978 | 1022 |
| 979 virtual bool VerifyArrayNewMemory(void* p) { | 1023 virtual bool VerifyArrayNewMemory(const void* p) { |
| 980 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kArrayNewType); | 1024 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kArrayNewType); |
| 981 return true; | 1025 return true; |
| 982 } | 1026 } |
| 983 | 1027 |
| 984 virtual bool VerifyMallocMemory(void* p) { | 1028 virtual bool VerifyMallocMemory(const void* p) { |
| 985 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kMallocType); | 1029 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kMallocType); |
| 986 return true; | 1030 return true; |
| 987 } | 1031 } |
| 988 | 1032 |
| 989 virtual bool VerifyAllMemory() { | 1033 virtual bool VerifyAllMemory() { |
| 990 return MallocBlock::CheckEverything(); | 1034 return MallocBlock::CheckEverything(); |
| 991 } | 1035 } |
| 992 | 1036 |
| 993 virtual bool MallocMemoryStats(int* blocks, size_t* total, | 1037 virtual bool MallocMemoryStats(int* blocks, size_t* total, |
| 994 int histogram[kMallocHistogramSize]) { | 1038 int histogram[kMallocHistogramSize]) { |
| 995 return MallocBlock::MemoryStats(blocks, total, histogram); | 1039 return MallocBlock::MemoryStats(blocks, total, histogram); |
| 996 } | 1040 } |
| 997 | 1041 |
| 998 virtual size_t GetAllocatedSize(void* p) { | 1042 virtual size_t GetEstimatedAllocatedSize(size_t size) { |
| 1043 return size; |
| 1044 } |
| 1045 |
| 1046 virtual size_t GetAllocatedSize(const void* p) { |
| 999 if (p) { | 1047 if (p) { |
| 1048 RAW_CHECK(GetOwnership(p) != MallocExtension::kNotOwned, |
| 1049 "ptr not allocated by tcmalloc"); |
| 1000 return MallocBlock::FromRawPointer(p)->data_size(); | 1050 return MallocBlock::FromRawPointer(p)->data_size(); |
| 1001 } | 1051 } |
| 1002 return 0; | 1052 return 0; |
| 1003 } | 1053 } |
| 1004 virtual size_t GetEstimatedAllocatedSize(size_t size) { | 1054 |
| 1005 return size; | 1055 virtual MallocExtension::Ownership GetOwnership(const void* p) { |
| 1056 if (p) { |
| 1057 const MallocBlock* mb = MallocBlock::FromRawPointer(p); |
| 1058 return TCMallocImplementation::GetOwnership(mb); |
| 1059 } |
| 1060 return MallocExtension::kNotOwned; // nobody owns NULL |
| 1006 } | 1061 } |
| 1007 | 1062 |
| 1008 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) { | 1063 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) { |
| 1009 static const char* kDebugFreeQueue = "debug.free_queue"; | 1064 static const char* kDebugFreeQueue = "debug.free_queue"; |
| 1010 | 1065 |
| 1011 TCMallocImplementation::GetFreeListSizes(v); | 1066 TCMallocImplementation::GetFreeListSizes(v); |
| 1012 | 1067 |
| 1013 MallocExtension::FreeListInfo i; | 1068 MallocExtension::FreeListInfo i; |
| 1014 i.type = kDebugFreeQueue; | 1069 i.type = kDebugFreeQueue; |
| 1015 i.min_object_size = 0; | 1070 i.min_object_size = 0; |
| 1016 i.max_object_size = numeric_limits<size_t>::max(); | 1071 i.max_object_size = numeric_limits<size_t>::max(); |
| 1017 i.total_bytes_free = MallocBlock::FreeQueueSize(); | 1072 i.total_bytes_free = MallocBlock::FreeQueueSize(); |
| 1018 v->push_back(i); | 1073 v->push_back(i); |
| 1019 } | 1074 } |
| 1020 | 1075 |
| 1021 }; | 1076 }; |
| 1022 | 1077 |
| 1023 static DebugMallocImplementation debug_malloc_implementation; | 1078 static DebugMallocImplementation debug_malloc_implementation; |
| 1024 | 1079 |
| 1025 REGISTER_MODULE_INITIALIZER(debugallocation, { | 1080 REGISTER_MODULE_INITIALIZER(debugallocation, { |
| 1026 // Either we or valgrind will control memory management. We | 1081 // Either we or valgrind will control memory management. We |
| 1027 // register our extension if we're the winner. | 1082 // register our extension if we're the winner. Otherwise let |
| 1028 if (RunningOnValgrind()) { | 1083 // Valgrind use its own malloc (so don't register our extension). |
| 1029 // Let Valgrind uses its own malloc (so don't register our extension). | 1084 if (!RunningOnValgrind()) { |
| 1030 } else { | |
| 1031 MallocExtension::Register(&debug_malloc_implementation); | 1085 MallocExtension::Register(&debug_malloc_implementation); |
| 1086 } |
| 1087 }); |
| 1088 |
| 1089 REGISTER_MODULE_DESTRUCTOR(debugallocation, { |
| 1090 if (!RunningOnValgrind()) { |
| 1032 // When the program exits, check all blocks still in the free | 1091 // When the program exits, check all blocks still in the free |
| 1033 // queue for corruption. | 1092 // queue for corruption. |
| 1034 atexit(DanglingWriteChecker); | 1093 DanglingWriteChecker(); |
| 1035 } | 1094 } |
| 1036 }); | 1095 }); |
| 1037 | 1096 |
| 1038 // ========================================================================= // | 1097 // ========================================================================= // |
| 1039 | 1098 |
| 1040 // This is mostly the same a cpp_alloc in tcmalloc.cc. | 1099 // This is mostly the same a cpp_alloc in tcmalloc.cc. |
| 1041 // TODO(csilvers): write a wrapper for new-handler so we don't have to | 1100 // TODO(csilvers): change Allocate() above to call cpp_alloc, so we |
| 1042 // copy this code so much. | 1101 // don't have to reproduce the logic here. To make tc_new_mode work |
| 1102 // properly, I think we'll need to separate out the logic of throwing |
| 1103 // from the logic of calling the new-handler. |
| 1043 inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) { | 1104 inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) { |
| 1044 for (;;) { | 1105 for (;;) { |
| 1045 void* p = DebugAllocate(size, new_type); | 1106 void* p = DebugAllocate(size, new_type); |
| 1046 #ifdef PREANSINEW | 1107 #ifdef PREANSINEW |
| 1047 return p; | 1108 return p; |
| 1048 #else | 1109 #else |
| 1049 if (p == NULL) { // allocation failed | 1110 if (p == NULL) { // allocation failed |
| 1050 // Get the current new handler. NB: this function is not | 1111 // Get the current new handler. NB: this function is not |
| 1051 // thread-safe. We make a feeble stab at making it so here, but | 1112 // thread-safe. We make a feeble stab at making it so here, but |
| 1052 // this lock only protects against tcmalloc interfering with | 1113 // this lock only protects against tcmalloc interfering with |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1348 return BASE_MALLOPT(cmd, value); | 1409 return BASE_MALLOPT(cmd, value); |
| 1349 } | 1410 } |
| 1350 | 1411 |
| 1351 #ifdef HAVE_STRUCT_MALLINFO | 1412 #ifdef HAVE_STRUCT_MALLINFO |
| 1352 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { | 1413 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { |
| 1353 return BASE_MALLINFO(); | 1414 return BASE_MALLINFO(); |
| 1354 } | 1415 } |
| 1355 #endif | 1416 #endif |
| 1356 | 1417 |
| 1357 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { | 1418 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { |
| 1358 if (!ptr) { | 1419 return MallocExtension::instance()->GetAllocatedSize(ptr); |
| 1359 return 0; | |
| 1360 } | |
| 1361 MallocBlock* mb = MallocBlock::FromRawPointer(ptr); | |
| 1362 // This is just to make sure we actually own mb (and ptr). We don't | |
| 1363 // use the actual value, just the 'exception' it raises on error. | |
| 1364 (void)BASE_MALLOC_SIZE(mb); | |
| 1365 return mb->data_size(); | |
| 1366 } | 1420 } |
| 1367 | |
| 1368 // Override __libc_memalign in libc on linux boxes. | |
| 1369 // They have a bug in libc that causes them (very rarely) to allocate | |
| 1370 // with __libc_memalign() yet deallocate with free(). | |
| 1371 // This function is an exception to the rule of calling MallocHook method | |
| 1372 // from the stack frame of the allocation function; | |
| 1373 // heap-checker handles this special case explicitly. | |
| 1374 static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
| 1375 __THROW ATTRIBUTE_SECTION(google_malloc); | |
| 1376 | |
| 1377 static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
| 1378 __THROW { | |
| 1379 void *p = do_debug_memalign_or_debug_cpp_memalign(align, size); | |
| 1380 MallocHook::InvokeNewHook(p, size); | |
| 1381 return p; | |
| 1382 } | |
| 1383 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; | |
| OLD | NEW |