| OLD | NEW | 
|---|
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. | 
| 2 // All rights reserved. | 2 // All rights reserved. | 
| 3 // | 3 // | 
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without | 
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are | 
| 6 // met: | 6 // met: | 
| 7 // | 7 // | 
| 8 //     * Redistributions of source code must retain the above copyright | 8 //     * Redistributions of source code must retain the above copyright | 
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. | 
| 10 //     * Redistributions in binary form must reproduce the above | 10 //     * Redistributions in binary form must reproduce the above | 
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 54 #include <stdarg.h> | 54 #include <stdarg.h> | 
| 55 #include <string> | 55 #include <string> | 
| 56 #include <map> | 56 #include <map> | 
| 57 #include <algorithm>  // for sort(), equal(), and copy() | 57 #include <algorithm>  // for sort(), equal(), and copy() | 
| 58 | 58 | 
| 59 #include "heap-profile-table.h" | 59 #include "heap-profile-table.h" | 
| 60 | 60 | 
| 61 #include "base/logging.h" | 61 #include "base/logging.h" | 
| 62 #include "raw_printer.h" | 62 #include "raw_printer.h" | 
| 63 #include "symbolize.h" | 63 #include "symbolize.h" | 
| 64 #include <google/stacktrace.h> | 64 #include <gperftools/stacktrace.h> | 
| 65 #include <google/malloc_hook.h> | 65 #include <gperftools/malloc_hook.h> | 
|  | 66 #include "memory_region_map.h" | 
| 66 #include "base/commandlineflags.h" | 67 #include "base/commandlineflags.h" | 
| 67 #include "base/logging.h"    // for the RawFD I/O commands | 68 #include "base/logging.h"    // for the RawFD I/O commands | 
| 68 #include "base/sysinfo.h" | 69 #include "base/sysinfo.h" | 
| 69 | 70 | 
| 70 using std::sort; | 71 using std::sort; | 
| 71 using std::equal; | 72 using std::equal; | 
| 72 using std::copy; | 73 using std::copy; | 
| 73 using std::string; | 74 using std::string; | 
| 74 using std::map; | 75 using std::map; | 
| 75 | 76 | 
| (...skipping 15 matching lines...) Expand all  Loading... | 
| 91 // header of the dumped heap profile | 92 // header of the dumped heap profile | 
| 92 static const char kProfileHeader[] = "heap profile: "; | 93 static const char kProfileHeader[] = "heap profile: "; | 
| 93 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | 94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | 
| 94 | 95 | 
| 95 //---------------------------------------------------------------------- | 96 //---------------------------------------------------------------------- | 
| 96 | 97 | 
| 97 const char HeapProfileTable::kFileExt[] = ".heap"; | 98 const char HeapProfileTable::kFileExt[] = ".heap"; | 
| 98 | 99 | 
| 99 //---------------------------------------------------------------------- | 100 //---------------------------------------------------------------------- | 
| 100 | 101 | 
| 101 static const int kHashTableSize = 179999;   // Size for table_. | 102 // Size for alloc_table_ and mmap_table_. | 
|  | 103 static const int kHashTableSize = 179999; | 
| 102 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 104 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 
| 103 | 105 | 
| 104 //---------------------------------------------------------------------- | 106 //---------------------------------------------------------------------- | 
| 105 | 107 | 
| 106 // We strip out different number of stack frames in debug mode | 108 // We strip out different number of stack frames in debug mode | 
| 107 // because less inlining happens in that case | 109 // because less inlining happens in that case | 
| 108 #ifdef NDEBUG | 110 #ifdef NDEBUG | 
| 109 static const int kStripFrames = 2; | 111 static const int kStripFrames = 2; | 
| 110 #else | 112 #else | 
| 111 static const int kStripFrames = 3; | 113 static const int kStripFrames = 3; | 
| 112 #endif | 114 #endif | 
| 113 | 115 | 
| 114 // For sorting Stats or Buckets by in-use space | 116 // For sorting Stats or Buckets by in-use space | 
| 115 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 
| 116                              HeapProfileTable::Stats* b) { | 118                              HeapProfileTable::Stats* b) { | 
| 117   // Return true iff "a" has more allocated space than "b" | 119   // Return true iff "a" has more allocated space than "b" | 
| 118   return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 120   return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 
| 119 } | 121 } | 
| 120 | 122 | 
| 121 //---------------------------------------------------------------------- | 123 //---------------------------------------------------------------------- | 
| 122 | 124 | 
| 123 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 
| 124     : alloc_(alloc), dealloc_(dealloc) { | 126     : alloc_(alloc), dealloc_(dealloc) { | 
| 125   // Make the table | 127   // Initialize the overall profile stats. | 
| 126   const int table_bytes = kHashTableSize * sizeof(*table_); |  | 
| 127   table_ = reinterpret_cast<Bucket**>(alloc_(table_bytes)); |  | 
| 128   memset(table_, 0, table_bytes); |  | 
| 129   // Make allocation map |  | 
| 130   allocation_ = |  | 
| 131     new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |  | 
| 132   // init the rest: |  | 
| 133   memset(&total_, 0, sizeof(total_)); | 128   memset(&total_, 0, sizeof(total_)); | 
| 134   num_buckets_ = 0; | 129 | 
|  | 130   // Make the malloc table. | 
|  | 131   const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); | 
|  | 132   alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); | 
|  | 133   memset(alloc_table_, 0, alloc_table_bytes); | 
|  | 134   num_alloc_buckets_ = 0; | 
|  | 135 | 
|  | 136   // Initialize the mmap table. | 
|  | 137   mmap_table_ = NULL; | 
|  | 138   num_available_mmap_buckets_ = 0; | 
|  | 139 | 
|  | 140   // Make malloc and mmap allocation maps. | 
|  | 141   alloc_address_map_ = | 
|  | 142       new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | 
|  | 143   mmap_address_map_ = NULL; | 
| 135 } | 144 } | 
| 136 | 145 | 
| 137 HeapProfileTable::~HeapProfileTable() { | 146 HeapProfileTable::~HeapProfileTable() { | 
| 138   // free allocation map | 147   DeallocateBucketTable(alloc_table_); | 
| 139   allocation_->~AllocationMap(); | 148   alloc_table_ = NULL; | 
| 140   dealloc_(allocation_); | 149   DeallocateBucketTable(mmap_table_); | 
| 141   allocation_ = NULL; | 150   mmap_table_ = NULL; | 
| 142   // free hash table | 151   DeallocateAllocationMap(alloc_address_map_); | 
| 143   for (int b = 0; b < kHashTableSize; b++) { | 152   alloc_address_map_ = NULL; | 
| 144     for (Bucket* x = table_[b]; x != 0; /**/) { | 153   DeallocateAllocationMap(mmap_address_map_); | 
| 145       Bucket* b = x; | 154   mmap_address_map_ = NULL; | 
| 146       x = x->next; |  | 
| 147       dealloc_(b->stack); |  | 
| 148       dealloc_(b); |  | 
| 149     } |  | 
| 150   } |  | 
| 151   dealloc_(table_); |  | 
| 152   table_ = NULL; |  | 
| 153 } | 155 } | 
| 154 | 156 | 
| 155 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, | 157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { | 
| 156                                                       const void* const key[]) { | 158   if (allocation != NULL) { | 
|  | 159     alloc_address_map_->~AllocationMap(); | 
|  | 160     dealloc_(allocation); | 
|  | 161   } | 
|  | 162 } | 
|  | 163 | 
|  | 164 void HeapProfileTable::DeallocateBucketTable(Bucket** table) { | 
|  | 165   if (table != NULL) { | 
|  | 166     for (int b = 0; b < kHashTableSize; b++) { | 
|  | 167       for (Bucket* x = table[b]; x != 0; /**/) { | 
|  | 168         Bucket* b = x; | 
|  | 169         x = x->next; | 
|  | 170         dealloc_(b->stack); | 
|  | 171         dealloc_(b); | 
|  | 172       } | 
|  | 173     } | 
|  | 174     dealloc_(table); | 
|  | 175   } | 
|  | 176 } | 
|  | 177 | 
|  | 178 HeapProfileTable::Bucket* HeapProfileTable::GetBucket( | 
|  | 179     int depth, const void* const key[], Bucket** table, | 
|  | 180     int* bucket_count) { | 
| 157   // Make hash-value | 181   // Make hash-value | 
| 158   uintptr_t h = 0; | 182   uintptr_t h = 0; | 
| 159   for (int i = 0; i < depth; i++) { | 183   for (int i = 0; i < depth; i++) { | 
| 160     h += reinterpret_cast<uintptr_t>(key[i]); | 184     h += reinterpret_cast<uintptr_t>(key[i]); | 
| 161     h += h << 10; | 185     h += h << 10; | 
| 162     h ^= h >> 6; | 186     h ^= h >> 6; | 
| 163   } | 187   } | 
| 164   h += h << 3; | 188   h += h << 3; | 
| 165   h ^= h >> 11; | 189   h ^= h >> 11; | 
| 166 | 190 | 
| 167   // Lookup stack trace in table | 191   // Lookup stack trace in table | 
| 168   unsigned int buck = ((unsigned int) h) % kHashTableSize; | 192   unsigned int buck = ((unsigned int) h) % kHashTableSize; | 
| 169   for (Bucket* b = table_[buck]; b != 0; b = b->next) { | 193   for (Bucket* b = table[buck]; b != 0; b = b->next) { | 
| 170     if ((b->hash == h) && | 194     if ((b->hash == h) && | 
| 171         (b->depth == depth) && | 195         (b->depth == depth) && | 
| 172         equal(key, key + depth, b->stack)) { | 196         equal(key, key + depth, b->stack)) { | 
| 173       return b; | 197       return b; | 
| 174     } | 198     } | 
| 175   } | 199   } | 
| 176 | 200 | 
| 177   // Create new bucket | 201   // Create new bucket | 
| 178   const size_t key_size = sizeof(key[0]) * depth; | 202   const size_t key_size = sizeof(key[0]) * depth; | 
| 179   const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 203   const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 
| 180   copy(key, key + depth, kcopy); | 204   copy(key, key + depth, kcopy); | 
| 181   Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 205   Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 
| 182   memset(b, 0, sizeof(*b)); | 206   memset(b, 0, sizeof(*b)); | 
| 183   b->hash  = h; | 207   b->hash  = h; | 
| 184   b->depth = depth; | 208   b->depth = depth; | 
| 185   b->stack = kcopy; | 209   b->stack = kcopy; | 
| 186   b->next  = table_[buck]; | 210   b->next  = table[buck]; | 
| 187   table_[buck] = b; | 211   table[buck] = b; | 
| 188   num_buckets_++; | 212   if (bucket_count != NULL) { | 
|  | 213     ++(*bucket_count); | 
|  | 214   } | 
| 189   return b; | 215   return b; | 
| 190 } | 216 } | 
| 191 | 217 | 
| 192 void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes, | 218 int HeapProfileTable::GetCallerStackTrace( | 
| 193                                    int skip_count) { | 219     int skip_count, void* stack[kMaxStackDepth]) { | 
| 194   void* key[kMaxStackDepth]; | 220   return MallocHook::GetCallerStackTrace( | 
| 195   int depth = MallocHook::GetCallerStackTrace( | 221       stack, kMaxStackDepth, kStripFrames + skip_count + 1); | 
| 196     key, kMaxStackDepth, kStripFrames + skip_count + 1); |  | 
| 197   RecordAllocWithStack(ptr, bytes, depth, key); |  | 
| 198 } | 222 } | 
| 199 | 223 | 
| 200 void HeapProfileTable::RecordAllocWithStack( | 224 void HeapProfileTable::RecordAlloc( | 
| 201     const void* ptr, size_t bytes, int stack_depth, | 225     const void* ptr, size_t bytes, int stack_depth, | 
| 202     const void* const call_stack[]) { | 226     const void* const call_stack[]) { | 
| 203   Bucket* b = GetBucket(stack_depth, call_stack); | 227   Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, | 
|  | 228                         &num_alloc_buckets_); | 
| 204   b->allocs++; | 229   b->allocs++; | 
| 205   b->alloc_size += bytes; | 230   b->alloc_size += bytes; | 
| 206   total_.allocs++; | 231   total_.allocs++; | 
| 207   total_.alloc_size += bytes; | 232   total_.alloc_size += bytes; | 
| 208 | 233 | 
| 209   AllocValue v; | 234   AllocValue v; | 
| 210   v.set_bucket(b);  // also did set_live(false); set_ignore(false) | 235   v.set_bucket(b);  // also did set_live(false); set_ignore(false) | 
| 211   v.bytes = bytes; | 236   v.bytes = bytes; | 
| 212   allocation_->Insert(ptr, v); | 237   alloc_address_map_->Insert(ptr, v); | 
| 213 } | 238 } | 
| 214 | 239 | 
| 215 void HeapProfileTable::RecordFree(const void* ptr) { | 240 void HeapProfileTable::RecordFree(const void* ptr) { | 
| 216   AllocValue v; | 241   AllocValue v; | 
| 217   if (allocation_->FindAndRemove(ptr, &v)) { | 242   if (alloc_address_map_->FindAndRemove(ptr, &v)) { | 
| 218     Bucket* b = v.bucket(); | 243     Bucket* b = v.bucket(); | 
| 219     b->frees++; | 244     b->frees++; | 
| 220     b->free_size += v.bytes; | 245     b->free_size += v.bytes; | 
| 221     total_.frees++; | 246     total_.frees++; | 
| 222     total_.free_size += v.bytes; | 247     total_.free_size += v.bytes; | 
| 223   } | 248   } | 
| 224 } | 249 } | 
| 225 | 250 | 
| 226 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 251 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 
| 227   const AllocValue* alloc_value = allocation_->Find(ptr); | 252   const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 
| 228   if (alloc_value != NULL) *object_size = alloc_value->bytes; | 253   if (alloc_value != NULL) *object_size = alloc_value->bytes; | 
| 229   return alloc_value != NULL; | 254   return alloc_value != NULL; | 
| 230 } | 255 } | 
| 231 | 256 | 
| 232 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 257 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 
| 233                                         AllocInfo* info) const { | 258                                         AllocInfo* info) const { | 
| 234   const AllocValue* alloc_value = allocation_->Find(ptr); | 259   const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 
| 235   if (alloc_value != NULL) { | 260   if (alloc_value != NULL) { | 
| 236     info->object_size = alloc_value->bytes; | 261     info->object_size = alloc_value->bytes; | 
| 237     info->call_stack = alloc_value->bucket()->stack; | 262     info->call_stack = alloc_value->bucket()->stack; | 
| 238     info->stack_depth = alloc_value->bucket()->depth; | 263     info->stack_depth = alloc_value->bucket()->depth; | 
| 239   } | 264   } | 
| 240   return alloc_value != NULL; | 265   return alloc_value != NULL; | 
| 241 } | 266 } | 
| 242 | 267 | 
| 243 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 268 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 
| 244                                        size_t max_size, | 269                                        size_t max_size, | 
| 245                                        const void** object_ptr, | 270                                        const void** object_ptr, | 
| 246                                        size_t* object_size) const { | 271                                        size_t* object_size) const { | 
| 247   const AllocValue* alloc_value = | 272   const AllocValue* alloc_value = | 
| 248     allocation_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 273     alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 
| 249   if (alloc_value != NULL) *object_size = alloc_value->bytes; | 274   if (alloc_value != NULL) *object_size = alloc_value->bytes; | 
| 250   return alloc_value != NULL; | 275   return alloc_value != NULL; | 
| 251 } | 276 } | 
| 252 | 277 | 
| 253 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 278 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 
| 254   AllocValue* alloc = allocation_->FindMutable(ptr); | 279   AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 
| 255   if (alloc && !alloc->live()) { | 280   if (alloc && !alloc->live()) { | 
| 256     alloc->set_live(true); | 281     alloc->set_live(true); | 
| 257     return true; | 282     return true; | 
| 258   } | 283   } | 
| 259   return false; | 284   return false; | 
| 260 } | 285 } | 
| 261 | 286 | 
| 262 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 287 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 
| 263   AllocValue* alloc = allocation_->FindMutable(ptr); | 288   AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 
| 264   if (alloc) { | 289   if (alloc) { | 
| 265     alloc->set_ignore(true); | 290     alloc->set_ignore(true); | 
| 266   } | 291   } | 
| 267 } | 292 } | 
| 268 | 293 | 
| 269 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 294 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 
| 270 int HeapProfileTable::UnparseBucket(const Bucket& b, | 295 int HeapProfileTable::UnparseBucket(const Bucket& b, | 
| 271                                     char* buf, int buflen, int bufsize, | 296                                     char* buf, int buflen, int bufsize, | 
| 272                                     const char* extra, | 297                                     const char* extra, | 
| 273                                     Stats* profile_stats) { | 298                                     Stats* profile_stats) { | 
| (...skipping 20 matching lines...) Expand all  Loading... | 
| 294     buflen += printed; | 319     buflen += printed; | 
| 295   } | 320   } | 
| 296   printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 321   printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 
| 297   if (printed < 0 || printed >= bufsize - buflen) return buflen; | 322   if (printed < 0 || printed >= bufsize - buflen) return buflen; | 
| 298   buflen += printed; | 323   buflen += printed; | 
| 299   return buflen; | 324   return buflen; | 
| 300 } | 325 } | 
| 301 | 326 | 
| 302 HeapProfileTable::Bucket** | 327 HeapProfileTable::Bucket** | 
| 303 HeapProfileTable::MakeSortedBucketList() const { | 328 HeapProfileTable::MakeSortedBucketList() const { | 
| 304   Bucket** list = | 329   Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * | 
| 305     reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); | 330       (num_alloc_buckets_ + num_available_mmap_buckets_))); | 
|  | 331 | 
|  | 332   RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); | 
| 306 | 333 | 
| 307   int n = 0; | 334   int n = 0; | 
|  | 335 | 
| 308   for (int b = 0; b < kHashTableSize; b++) { | 336   for (int b = 0; b < kHashTableSize; b++) { | 
| 309     for (Bucket* x = table_[b]; x != 0; x = x->next) { | 337     for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { | 
| 310       list[n++] = x; | 338       list[n++] = x; | 
| 311     } | 339     } | 
| 312   } | 340   } | 
| 313   RAW_DCHECK(n == num_buckets_, ""); | 341   RAW_DCHECK(n == num_alloc_buckets_, ""); | 
| 314 | 342 | 
| 315   sort(list, list + num_buckets_, ByAllocatedSpace); | 343   if (mmap_table_ != NULL) { | 
|  | 344     for (int b = 0; b < kHashTableSize; b++) { | 
|  | 345       for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { | 
|  | 346         list[n++] = x; | 
|  | 347       } | 
|  | 348     } | 
|  | 349   } | 
|  | 350   RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); | 
|  | 351 | 
|  | 352   sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, | 
|  | 353        ByAllocatedSpace); | 
| 316 | 354 | 
| 317   return list; | 355   return list; | 
| 318 } | 356 } | 
| 319 | 357 | 
|  | 358 void HeapProfileTable::RefreshMMapData() { | 
|  | 359   // Make the table | 
|  | 360   static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); | 
|  | 361   if (mmap_table_ == NULL) { | 
|  | 362     mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); | 
|  | 363     memset(mmap_table_, 0, mmap_table_bytes); | 
|  | 364   } | 
|  | 365   num_available_mmap_buckets_ = 0; | 
|  | 366 | 
|  | 367   ClearMMapData(); | 
|  | 368   mmap_address_map_ = | 
|  | 369       new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | 
|  | 370 | 
|  | 371   MemoryRegionMap::LockHolder l; | 
|  | 372   for (MemoryRegionMap::RegionIterator r = | 
|  | 373            MemoryRegionMap::BeginRegionLocked(); | 
|  | 374        r != MemoryRegionMap::EndRegionLocked(); ++r) { | 
|  | 375     Bucket* b = | 
|  | 376         GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); | 
|  | 377     if (b->alloc_size == 0) { | 
|  | 378       num_available_mmap_buckets_ += 1; | 
|  | 379     } | 
|  | 380     b->allocs += 1; | 
|  | 381     b->alloc_size += r->end_addr - r->start_addr; | 
|  | 382 | 
|  | 383     AllocValue v; | 
|  | 384     v.set_bucket(b); | 
|  | 385     v.bytes = r->end_addr - r->start_addr; | 
|  | 386     mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); | 
|  | 387   } | 
|  | 388 } | 
|  | 389 | 
|  | 390 void HeapProfileTable::ClearMMapData() { | 
|  | 391   if (mmap_address_map_ != NULL) { | 
|  | 392     mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | 
|  | 393     mmap_address_map_->~AllocationMap(); | 
|  | 394     dealloc_(mmap_address_map_); | 
|  | 395     mmap_address_map_ = NULL; | 
|  | 396   } | 
|  | 397 } | 
|  | 398 | 
| 320 void HeapProfileTable::IterateOrderedAllocContexts( | 399 void HeapProfileTable::IterateOrderedAllocContexts( | 
| 321     AllocContextIterator callback) const { | 400     AllocContextIterator callback) const { | 
| 322   Bucket** list = MakeSortedBucketList(); | 401   Bucket** list = MakeSortedBucketList(); | 
| 323   AllocContextInfo info; | 402   AllocContextInfo info; | 
| 324   for (int i = 0; i < num_buckets_; ++i) { | 403   for (int i = 0; i < num_alloc_buckets_; ++i) { | 
| 325     *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 404     *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 
| 326     info.stack_depth = list[i]->depth; | 405     info.stack_depth = list[i]->depth; | 
| 327     info.call_stack = list[i]->stack; | 406     info.call_stack = list[i]->stack; | 
| 328     callback(info); | 407     callback(info); | 
| 329   } | 408   } | 
| 330   dealloc_(list); | 409   dealloc_(list); | 
| 331 } | 410 } | 
| 332 | 411 | 
| 333 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 412 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 
| 334   Bucket** list = MakeSortedBucketList(); | 413   Bucket** list = MakeSortedBucketList(); | 
| (...skipping 11 matching lines...) Expand all  Loading... | 
| 346   map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 425   map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 
| 347   RAW_DCHECK(map_length <= size, ""); | 426   RAW_DCHECK(map_length <= size, ""); | 
| 348   char* const map_start = buf + size - map_length;      // move to end | 427   char* const map_start = buf + size - map_length;      // move to end | 
| 349   memmove(map_start, buf, map_length); | 428   memmove(map_start, buf, map_length); | 
| 350   size -= map_length; | 429   size -= map_length; | 
| 351 | 430 | 
| 352   Stats stats; | 431   Stats stats; | 
| 353   memset(&stats, 0, sizeof(stats)); | 432   memset(&stats, 0, sizeof(stats)); | 
| 354   int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 433   int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 
| 355   if (bucket_length < 0 || bucket_length >= size) return 0; | 434   if (bucket_length < 0 || bucket_length >= size) return 0; | 
| 356   bucket_length = UnparseBucket(total_, buf, bucket_length, size, | 435   Bucket total_with_mmap(total_); | 
|  | 436   if (mmap_table_ != NULL) { | 
|  | 437     total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); | 
|  | 438     total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); | 
|  | 439   } | 
|  | 440   bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, | 
| 357                                 " heapprofile", &stats); | 441                                 " heapprofile", &stats); | 
| 358   for (int i = 0; i < num_buckets_; i++) { | 442   for (int i = 0; i < num_alloc_buckets_; i++) { | 
| 359     bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 443     bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 
| 360                                   &stats); | 444                                   &stats); | 
| 361   } | 445   } | 
| 362   RAW_DCHECK(bucket_length < size, ""); | 446   RAW_DCHECK(bucket_length < size, ""); | 
| 363 | 447 | 
| 364   dealloc_(list); | 448   dealloc_(list); | 
| 365 | 449 | 
| 366   RAW_DCHECK(buf + bucket_length <= map_start, ""); | 450   RAW_DCHECK(buf + bucket_length <= map_start, ""); | 
| 367   memmove(buf + bucket_length, map_start, map_length);  // close the gap | 451   memmove(buf + bucket_length, map_start, map_length);  // close the gap | 
| 368 | 452 | 
| (...skipping 14 matching lines...) Expand all  Loading... | 
| 383   memset(&b, 0, sizeof(b)); | 467   memset(&b, 0, sizeof(b)); | 
| 384   b.allocs = 1; | 468   b.allocs = 1; | 
| 385   b.alloc_size = v->bytes; | 469   b.alloc_size = v->bytes; | 
| 386   b.depth = v->bucket()->depth; | 470   b.depth = v->bucket()->depth; | 
| 387   b.stack = v->bucket()->stack; | 471   b.stack = v->bucket()->stack; | 
| 388   char buf[1024]; | 472   char buf[1024]; | 
| 389   int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 473   int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 
| 390   RawWrite(args.fd, buf, len); | 474   RawWrite(args.fd, buf, len); | 
| 391 } | 475 } | 
| 392 | 476 | 
|  | 477 inline void HeapProfileTable::ZeroBucketCountsIterator( | 
|  | 478     const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | 
|  | 479   Bucket* b = v->bucket(); | 
|  | 480   if (b != NULL) { | 
|  | 481     b->allocs = 0; | 
|  | 482     b->alloc_size = 0; | 
|  | 483     b->free_size = 0; | 
|  | 484     b->frees = 0; | 
|  | 485   } | 
|  | 486 } | 
|  | 487 | 
| 393 // Callback from NonLiveSnapshot; adds entry to arg->dest | 488 // Callback from NonLiveSnapshot; adds entry to arg->dest | 
| 394 // if not the entry is not live and is not present in arg->base. | 489 // if not the entry is not live and is not present in arg->base. | 
| 395 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 490 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 
| 396                                     AddNonLiveArgs* arg) { | 491                                     AddNonLiveArgs* arg) { | 
| 397   if (v->live()) { | 492   if (v->live()) { | 
| 398     v->set_live(false); | 493     v->set_live(false); | 
| 399   } else { | 494   } else { | 
| 400     if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 495     if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 
| 401       // Present in arg->base, so do not save | 496       // Present in arg->base, so do not save | 
| 402     } else { | 497     } else { | 
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 450     } | 545     } | 
| 451   } | 546   } | 
| 452   globfree(&g); | 547   globfree(&g); | 
| 453 #else   /* HAVE_GLOB_H */ | 548 #else   /* HAVE_GLOB_H */ | 
| 454   RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 549   RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 
| 455 #endif | 550 #endif | 
| 456 } | 551 } | 
| 457 | 552 | 
| 458 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 553 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 
| 459   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 554   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 
| 460   allocation_->Iterate(AddToSnapshot, s); | 555   alloc_address_map_->Iterate(AddToSnapshot, s); | 
| 461   return s; | 556   return s; | 
| 462 } | 557 } | 
| 463 | 558 | 
| 464 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 559 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 
| 465   s->~Snapshot(); | 560   s->~Snapshot(); | 
| 466   dealloc_(s); | 561   dealloc_(s); | 
| 467 } | 562 } | 
| 468 | 563 | 
| 469 // Callback from TakeSnapshot; adds a single entry to snapshot | 564 // Callback from TakeSnapshot; adds a single entry to snapshot | 
| 470 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 565 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 
| 471                                      Snapshot* snapshot) { | 566                                      Snapshot* snapshot) { | 
| 472   snapshot->Add(ptr, *v); | 567   snapshot->Add(ptr, *v); | 
| 473 } | 568 } | 
| 474 | 569 | 
| 475 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 570 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 
| 476     Snapshot* base) { | 571     Snapshot* base) { | 
| 477   RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 572   RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 
| 478            int(total_.allocs - total_.frees), | 573            int(total_.allocs - total_.frees), | 
| 479            int(total_.alloc_size - total_.free_size)); | 574            int(total_.alloc_size - total_.free_size)); | 
| 480 | 575 | 
| 481   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 576   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 
| 482   AddNonLiveArgs args; | 577   AddNonLiveArgs args; | 
| 483   args.dest = s; | 578   args.dest = s; | 
| 484   args.base = base; | 579   args.base = base; | 
| 485   allocation_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 580   alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 
| 486   RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 581   RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 
| 487            int(s->total_.allocs - s->total_.frees), | 582            int(s->total_.allocs - s->total_.frees), | 
| 488            int(s->total_.alloc_size - s->total_.free_size)); | 583            int(s->total_.alloc_size - s->total_.free_size)); | 
| 489   return s; | 584   return s; | 
| 490 } | 585 } | 
| 491 | 586 | 
| 492 // Information kept per unique bucket seen | 587 // Information kept per unique bucket seen | 
| 493 struct HeapProfileTable::Snapshot::Entry { | 588 struct HeapProfileTable::Snapshot::Entry { | 
| 494   int count; | 589   int count; | 
| 495   int bytes; | 590   int bytes; | 
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 595                                               char* unused) { | 690                                               char* unused) { | 
| 596   // Perhaps also log the allocation stack trace (unsymbolized) | 691   // Perhaps also log the allocation stack trace (unsymbolized) | 
| 597   // on this line in case somebody finds it useful. | 692   // on this line in case somebody finds it useful. | 
| 598   RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 693   RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 
| 599 } | 694 } | 
| 600 | 695 | 
| 601 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 696 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 
| 602   char unused; | 697   char unused; | 
| 603   map_.Iterate(ReportObject, &unused); | 698   map_.Iterate(ReportObject, &unused); | 
| 604 } | 699 } | 
| OLD | NEW | 
|---|