| Index: third_party/tcmalloc/chromium/src/heap-profile-table.cc
|
| diff --git a/third_party/tcmalloc/chromium/src/heap-profile-table.cc b/third_party/tcmalloc/chromium/src/heap-profile-table.cc
|
| index a1da3885ef7864bd451fe2595df3fd2626c8c5d3..1ef18586e4d050e6e0d1ded2b5ca2df256c058be 100644
|
| --- a/third_party/tcmalloc/chromium/src/heap-profile-table.cc
|
| +++ b/third_party/tcmalloc/chromium/src/heap-profile-table.cc
|
| @@ -61,8 +61,9 @@
|
| #include "base/logging.h"
|
| #include "raw_printer.h"
|
| #include "symbolize.h"
|
| -#include <google/stacktrace.h>
|
| -#include <google/malloc_hook.h>
|
| +#include <gperftools/stacktrace.h>
|
| +#include <gperftools/malloc_hook.h>
|
| +#include "memory_region_map.h"
|
| #include "base/commandlineflags.h"
|
| #include "base/logging.h" // for the RawFD I/O commands
|
| #include "base/sysinfo.h"
|
| @@ -98,7 +99,8 @@ const char HeapProfileTable::kFileExt[] = ".heap";
|
|
|
| //----------------------------------------------------------------------
|
|
|
| -static const int kHashTableSize = 179999; // Size for table_.
|
| +// Size for alloc_table_ and mmap_table_.
|
| +static const int kHashTableSize = 179999;
|
| /*static*/ const int HeapProfileTable::kMaxStackDepth;
|
|
|
| //----------------------------------------------------------------------
|
| @@ -122,38 +124,60 @@ static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
|
|
|
| HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc)
|
| : alloc_(alloc), dealloc_(dealloc) {
|
| - // Make the table
|
| - const int table_bytes = kHashTableSize * sizeof(*table_);
|
| - table_ = reinterpret_cast<Bucket**>(alloc_(table_bytes));
|
| - memset(table_, 0, table_bytes);
|
| - // Make allocation map
|
| - allocation_ =
|
| - new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
|
| - // init the rest:
|
| + // Initialize the overall profile stats.
|
| memset(&total_, 0, sizeof(total_));
|
| - num_buckets_ = 0;
|
| +
|
| + // Make the malloc table.
|
| + const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_);
|
| + alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes));
|
| + memset(alloc_table_, 0, alloc_table_bytes);
|
| + num_alloc_buckets_ = 0;
|
| +
|
| + // Initialize the mmap table.
|
| + mmap_table_ = NULL;
|
| + num_available_mmap_buckets_ = 0;
|
| +
|
| + // Make malloc and mmap allocation maps.
|
| + alloc_address_map_ =
|
| + new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
|
| + mmap_address_map_ = NULL;
|
| }
|
|
|
| HeapProfileTable::~HeapProfileTable() {
|
| - // free allocation map
|
| - allocation_->~AllocationMap();
|
| - dealloc_(allocation_);
|
| - allocation_ = NULL;
|
| - // free hash table
|
| - for (int b = 0; b < kHashTableSize; b++) {
|
| - for (Bucket* x = table_[b]; x != 0; /**/) {
|
| - Bucket* b = x;
|
| - x = x->next;
|
| - dealloc_(b->stack);
|
| - dealloc_(b);
|
| + DeallocateBucketTable(alloc_table_);
|
| + alloc_table_ = NULL;
|
| + DeallocateBucketTable(mmap_table_);
|
| + mmap_table_ = NULL;
|
| + DeallocateAllocationMap(alloc_address_map_);
|
| + alloc_address_map_ = NULL;
|
| + DeallocateAllocationMap(mmap_address_map_);
|
| + mmap_address_map_ = NULL;
|
| +}
|
| +
|
| +void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) {
|
| + if (allocation != NULL) {
|
| + alloc_address_map_->~AllocationMap();
|
| + dealloc_(allocation);
|
| + }
|
| +}
|
| +
|
| +void HeapProfileTable::DeallocateBucketTable(Bucket** table) {
|
| + if (table != NULL) {
|
| + for (int b = 0; b < kHashTableSize; b++) {
|
| + for (Bucket* x = table[b]; x != 0; /**/) {
|
| + Bucket* b = x;
|
| + x = x->next;
|
| + dealloc_(b->stack);
|
| + dealloc_(b);
|
| + }
|
| }
|
| + dealloc_(table);
|
| }
|
| - dealloc_(table_);
|
| - table_ = NULL;
|
| }
|
|
|
| -HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
|
| - const void* const key[]) {
|
| +HeapProfileTable::Bucket* HeapProfileTable::GetBucket(
|
| + int depth, const void* const key[], Bucket** table,
|
| + int* bucket_count) {
|
| // Make hash-value
|
| uintptr_t h = 0;
|
| for (int i = 0; i < depth; i++) {
|
| @@ -166,7 +190,7 @@ HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
|
|
|
| // Lookup stack trace in table
|
| unsigned int buck = ((unsigned int) h) % kHashTableSize;
|
| - for (Bucket* b = table_[buck]; b != 0; b = b->next) {
|
| + for (Bucket* b = table[buck]; b != 0; b = b->next) {
|
| if ((b->hash == h) &&
|
| (b->depth == depth) &&
|
| equal(key, key + depth, b->stack)) {
|
| @@ -183,24 +207,25 @@ HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
|
| b->hash = h;
|
| b->depth = depth;
|
| b->stack = kcopy;
|
| - b->next = table_[buck];
|
| - table_[buck] = b;
|
| - num_buckets_++;
|
| + b->next = table[buck];
|
| + table[buck] = b;
|
| + if (bucket_count != NULL) {
|
| + ++(*bucket_count);
|
| + }
|
| return b;
|
| }
|
|
|
| -void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes,
|
| - int skip_count) {
|
| - void* key[kMaxStackDepth];
|
| - int depth = MallocHook::GetCallerStackTrace(
|
| - key, kMaxStackDepth, kStripFrames + skip_count + 1);
|
| - RecordAllocWithStack(ptr, bytes, depth, key);
|
| +int HeapProfileTable::GetCallerStackTrace(
|
| + int skip_count, void* stack[kMaxStackDepth]) {
|
| + return MallocHook::GetCallerStackTrace(
|
| + stack, kMaxStackDepth, kStripFrames + skip_count + 1);
|
| }
|
|
|
| -void HeapProfileTable::RecordAllocWithStack(
|
| +void HeapProfileTable::RecordAlloc(
|
| const void* ptr, size_t bytes, int stack_depth,
|
| const void* const call_stack[]) {
|
| - Bucket* b = GetBucket(stack_depth, call_stack);
|
| + Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_,
|
| + &num_alloc_buckets_);
|
| b->allocs++;
|
| b->alloc_size += bytes;
|
| total_.allocs++;
|
| @@ -209,12 +234,12 @@ void HeapProfileTable::RecordAllocWithStack(
|
| AllocValue v;
|
| v.set_bucket(b); // also did set_live(false); set_ignore(false)
|
| v.bytes = bytes;
|
| - allocation_->Insert(ptr, v);
|
| + alloc_address_map_->Insert(ptr, v);
|
| }
|
|
|
| void HeapProfileTable::RecordFree(const void* ptr) {
|
| AllocValue v;
|
| - if (allocation_->FindAndRemove(ptr, &v)) {
|
| + if (alloc_address_map_->FindAndRemove(ptr, &v)) {
|
| Bucket* b = v.bucket();
|
| b->frees++;
|
| b->free_size += v.bytes;
|
| @@ -224,14 +249,14 @@ void HeapProfileTable::RecordFree(const void* ptr) {
|
| }
|
|
|
| bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
|
| - const AllocValue* alloc_value = allocation_->Find(ptr);
|
| + const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
|
| if (alloc_value != NULL) *object_size = alloc_value->bytes;
|
| return alloc_value != NULL;
|
| }
|
|
|
| bool HeapProfileTable::FindAllocDetails(const void* ptr,
|
| AllocInfo* info) const {
|
| - const AllocValue* alloc_value = allocation_->Find(ptr);
|
| + const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
|
| if (alloc_value != NULL) {
|
| info->object_size = alloc_value->bytes;
|
| info->call_stack = alloc_value->bucket()->stack;
|
| @@ -245,13 +270,13 @@ bool HeapProfileTable::FindInsideAlloc(const void* ptr,
|
| const void** object_ptr,
|
| size_t* object_size) const {
|
| const AllocValue* alloc_value =
|
| - allocation_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
|
| + alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
|
| if (alloc_value != NULL) *object_size = alloc_value->bytes;
|
| return alloc_value != NULL;
|
| }
|
|
|
| bool HeapProfileTable::MarkAsLive(const void* ptr) {
|
| - AllocValue* alloc = allocation_->FindMutable(ptr);
|
| + AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
|
| if (alloc && !alloc->live()) {
|
| alloc->set_live(true);
|
| return true;
|
| @@ -260,7 +285,7 @@ bool HeapProfileTable::MarkAsLive(const void* ptr) {
|
| }
|
|
|
| void HeapProfileTable::MarkAsIgnored(const void* ptr) {
|
| - AllocValue* alloc = allocation_->FindMutable(ptr);
|
| + AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
|
| if (alloc) {
|
| alloc->set_ignore(true);
|
| }
|
| @@ -301,27 +326,81 @@ int HeapProfileTable::UnparseBucket(const Bucket& b,
|
|
|
| HeapProfileTable::Bucket**
|
| HeapProfileTable::MakeSortedBucketList() const {
|
| - Bucket** list =
|
| - reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
|
| + Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) *
|
| + (num_alloc_buckets_ + num_available_mmap_buckets_)));
|
| +
|
| + RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, "");
|
|
|
| int n = 0;
|
| +
|
| for (int b = 0; b < kHashTableSize; b++) {
|
| - for (Bucket* x = table_[b]; x != 0; x = x->next) {
|
| + for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) {
|
| list[n++] = x;
|
| }
|
| }
|
| - RAW_DCHECK(n == num_buckets_, "");
|
| + RAW_DCHECK(n == num_alloc_buckets_, "");
|
| +
|
| + if (mmap_table_ != NULL) {
|
| + for (int b = 0; b < kHashTableSize; b++) {
|
| + for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) {
|
| + list[n++] = x;
|
| + }
|
| + }
|
| + }
|
| + RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, "");
|
|
|
| - sort(list, list + num_buckets_, ByAllocatedSpace);
|
| + sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_,
|
| + ByAllocatedSpace);
|
|
|
| return list;
|
| }
|
|
|
| +void HeapProfileTable::RefreshMMapData() {
|
| + // Make the table
|
| + static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_);
|
| + if (mmap_table_ == NULL) {
|
| + mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes));
|
| + memset(mmap_table_, 0, mmap_table_bytes);
|
| + }
|
| + num_available_mmap_buckets_ = 0;
|
| +
|
| + ClearMMapData();
|
| + mmap_address_map_ =
|
| + new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
|
| +
|
| + MemoryRegionMap::LockHolder l;
|
| + for (MemoryRegionMap::RegionIterator r =
|
| + MemoryRegionMap::BeginRegionLocked();
|
| + r != MemoryRegionMap::EndRegionLocked(); ++r) {
|
| + Bucket* b =
|
| + GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL);
|
| + if (b->alloc_size == 0) {
|
| + num_available_mmap_buckets_ += 1;
|
| + }
|
| + b->allocs += 1;
|
| + b->alloc_size += r->end_addr - r->start_addr;
|
| +
|
| + AllocValue v;
|
| + v.set_bucket(b);
|
| + v.bytes = r->end_addr - r->start_addr;
|
| + mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v);
|
| + }
|
| +}
|
| +
|
| +void HeapProfileTable::ClearMMapData() {
|
| + if (mmap_address_map_ != NULL) {
|
| + mmap_address_map_->Iterate(ZeroBucketCountsIterator, this);
|
| + mmap_address_map_->~AllocationMap();
|
| + dealloc_(mmap_address_map_);
|
| + mmap_address_map_ = NULL;
|
| + }
|
| +}
|
| +
|
| void HeapProfileTable::IterateOrderedAllocContexts(
|
| AllocContextIterator callback) const {
|
| Bucket** list = MakeSortedBucketList();
|
| AllocContextInfo info;
|
| - for (int i = 0; i < num_buckets_; ++i) {
|
| + for (int i = 0; i < num_alloc_buckets_; ++i) {
|
| *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
|
| info.stack_depth = list[i]->depth;
|
| info.call_stack = list[i]->stack;
|
| @@ -353,9 +432,14 @@ int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
|
| memset(&stats, 0, sizeof(stats));
|
| int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
|
| if (bucket_length < 0 || bucket_length >= size) return 0;
|
| - bucket_length = UnparseBucket(total_, buf, bucket_length, size,
|
| + Bucket total_with_mmap(total_);
|
| + if (mmap_table_ != NULL) {
|
| + total_with_mmap.alloc_size += MemoryRegionMap::MapSize();
|
| + total_with_mmap.free_size += MemoryRegionMap::UnmapSize();
|
| + }
|
| + bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size,
|
| " heapprofile", &stats);
|
| - for (int i = 0; i < num_buckets_; i++) {
|
| + for (int i = 0; i < num_alloc_buckets_; i++) {
|
| bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
|
| &stats);
|
| }
|
| @@ -390,6 +474,17 @@ void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
|
| RawWrite(args.fd, buf, len);
|
| }
|
|
|
| +inline void HeapProfileTable::ZeroBucketCountsIterator(
|
| + const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) {
|
| + Bucket* b = v->bucket();
|
| + if (b != NULL) {
|
| + b->allocs = 0;
|
| + b->alloc_size = 0;
|
| + b->free_size = 0;
|
| + b->frees = 0;
|
| + }
|
| +}
|
| +
|
| // Callback from NonLiveSnapshot; adds entry to arg->dest
|
| // if not the entry is not live and is not present in arg->base.
|
| void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
|
| @@ -457,7 +552,7 @@ void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
|
|
|
| HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
|
| Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
|
| - allocation_->Iterate(AddToSnapshot, s);
|
| + alloc_address_map_->Iterate(AddToSnapshot, s);
|
| return s;
|
| }
|
|
|
| @@ -482,7 +577,7 @@ HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
|
| AddNonLiveArgs args;
|
| args.dest = s;
|
| args.base = base;
|
| - allocation_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
|
| + alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
|
| RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
|
| int(s->total_.allocs - s->total_.frees),
|
| int(s->total_.alloc_size - s->total_.free_size));
|
|
|