| OLD | NEW |
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 #include <stdarg.h> | 54 #include <stdarg.h> |
| 55 #include <string> | 55 #include <string> |
| 56 #include <map> | 56 #include <map> |
| 57 #include <algorithm> // for sort(), equal(), and copy() | 57 #include <algorithm> // for sort(), equal(), and copy() |
| 58 | 58 |
| 59 #include "heap-profile-table.h" | 59 #include "heap-profile-table.h" |
| 60 | 60 |
| 61 #include "base/logging.h" | 61 #include "base/logging.h" |
| 62 #include "raw_printer.h" | 62 #include "raw_printer.h" |
| 63 #include "symbolize.h" | 63 #include "symbolize.h" |
| 64 #include <gperftools/stacktrace.h> | 64 #include <google/stacktrace.h> |
| 65 #include <gperftools/malloc_hook.h> | 65 #include <google/malloc_hook.h> |
| 66 #include "memory_region_map.h" | |
| 67 #include "base/commandlineflags.h" | 66 #include "base/commandlineflags.h" |
| 68 #include "base/logging.h" // for the RawFD I/O commands | 67 #include "base/logging.h" // for the RawFD I/O commands |
| 69 #include "base/sysinfo.h" | 68 #include "base/sysinfo.h" |
| 70 | 69 |
| 71 using std::sort; | 70 using std::sort; |
| 72 using std::equal; | 71 using std::equal; |
| 73 using std::copy; | 72 using std::copy; |
| 74 using std::string; | 73 using std::string; |
| 75 using std::map; | 74 using std::map; |
| 76 | 75 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 92 // header of the dumped heap profile | 91 // header of the dumped heap profile |
| 93 static const char kProfileHeader[] = "heap profile: "; | 92 static const char kProfileHeader[] = "heap profile: "; |
| 94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | 93 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
| 95 | 94 |
| 96 //---------------------------------------------------------------------- | 95 //---------------------------------------------------------------------- |
| 97 | 96 |
| 98 const char HeapProfileTable::kFileExt[] = ".heap"; | 97 const char HeapProfileTable::kFileExt[] = ".heap"; |
| 99 | 98 |
| 100 //---------------------------------------------------------------------- | 99 //---------------------------------------------------------------------- |
| 101 | 100 |
| 102 // Size for alloc_table_ and mmap_table_. | 101 static const int kHashTableSize = 179999; // Size for table_. |
| 103 static const int kHashTableSize = 179999; | |
| 104 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 102 /*static*/ const int HeapProfileTable::kMaxStackDepth; |
| 105 | 103 |
| 106 //---------------------------------------------------------------------- | 104 //---------------------------------------------------------------------- |
| 107 | 105 |
| 108 // We strip out different number of stack frames in debug mode | 106 // We strip out different number of stack frames in debug mode |
| 109 // because less inlining happens in that case | 107 // because less inlining happens in that case |
| 110 #ifdef NDEBUG | 108 #ifdef NDEBUG |
| 111 static const int kStripFrames = 2; | 109 static const int kStripFrames = 2; |
| 112 #else | 110 #else |
| 113 static const int kStripFrames = 3; | 111 static const int kStripFrames = 3; |
| 114 #endif | 112 #endif |
| 115 | 113 |
| 116 // For sorting Stats or Buckets by in-use space | 114 // For sorting Stats or Buckets by in-use space |
| 117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 115 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
| 118 HeapProfileTable::Stats* b) { | 116 HeapProfileTable::Stats* b) { |
| 119 // Return true iff "a" has more allocated space than "b" | 117 // Return true iff "a" has more allocated space than "b" |
| 120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 118 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
| 121 } | 119 } |
| 122 | 120 |
| 123 //---------------------------------------------------------------------- | 121 //---------------------------------------------------------------------- |
| 124 | 122 |
| 125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 123 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) |
| 126 : alloc_(alloc), dealloc_(dealloc) { | 124 : alloc_(alloc), dealloc_(dealloc) { |
| 127 // Initialize the overall profile stats. | 125 // Make the table |
| 126 const int table_bytes = kHashTableSize * sizeof(*table_); |
| 127 table_ = reinterpret_cast<Bucket**>(alloc_(table_bytes)); |
| 128 memset(table_, 0, table_bytes); |
| 129 // Make allocation map |
| 130 allocation_ = |
| 131 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 132 // init the rest: |
| 128 memset(&total_, 0, sizeof(total_)); | 133 memset(&total_, 0, sizeof(total_)); |
| 129 | 134 num_buckets_ = 0; |
| 130 // Make the malloc table. | |
| 131 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); | |
| 132 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); | |
| 133 memset(alloc_table_, 0, alloc_table_bytes); | |
| 134 num_alloc_buckets_ = 0; | |
| 135 | |
| 136 // Initialize the mmap table. | |
| 137 mmap_table_ = NULL; | |
| 138 num_available_mmap_buckets_ = 0; | |
| 139 | |
| 140 // Make malloc and mmap allocation maps. | |
| 141 alloc_address_map_ = | |
| 142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
| 143 mmap_address_map_ = NULL; | |
| 144 } | 135 } |
| 145 | 136 |
| 146 HeapProfileTable::~HeapProfileTable() { | 137 HeapProfileTable::~HeapProfileTable() { |
| 147 DeallocateBucketTable(alloc_table_); | 138 // free allocation map |
| 148 alloc_table_ = NULL; | 139 allocation_->~AllocationMap(); |
| 149 DeallocateBucketTable(mmap_table_); | 140 dealloc_(allocation_); |
| 150 mmap_table_ = NULL; | 141 allocation_ = NULL; |
| 151 DeallocateAllocationMap(alloc_address_map_); | 142 // free hash table |
| 152 alloc_address_map_ = NULL; | 143 for (int b = 0; b < kHashTableSize; b++) { |
| 153 DeallocateAllocationMap(mmap_address_map_); | 144 for (Bucket* x = table_[b]; x != 0; /**/) { |
| 154 mmap_address_map_ = NULL; | 145 Bucket* b = x; |
| 146 x = x->next; |
| 147 dealloc_(b->stack); |
| 148 dealloc_(b); |
| 149 } |
| 150 } |
| 151 dealloc_(table_); |
| 152 table_ = NULL; |
| 155 } | 153 } |
| 156 | 154 |
| 157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { | 155 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, |
| 158 if (allocation != NULL) { | 156 const void* const key[]) { |
| 159 alloc_address_map_->~AllocationMap(); | |
| 160 dealloc_(allocation); | |
| 161 } | |
| 162 } | |
| 163 | |
| 164 void HeapProfileTable::DeallocateBucketTable(Bucket** table) { | |
| 165 if (table != NULL) { | |
| 166 for (int b = 0; b < kHashTableSize; b++) { | |
| 167 for (Bucket* x = table[b]; x != 0; /**/) { | |
| 168 Bucket* b = x; | |
| 169 x = x->next; | |
| 170 dealloc_(b->stack); | |
| 171 dealloc_(b); | |
| 172 } | |
| 173 } | |
| 174 dealloc_(table); | |
| 175 } | |
| 176 } | |
| 177 | |
| 178 HeapProfileTable::Bucket* HeapProfileTable::GetBucket( | |
| 179 int depth, const void* const key[], Bucket** table, | |
| 180 int* bucket_count) { | |
| 181 // Make hash-value | 157 // Make hash-value |
| 182 uintptr_t h = 0; | 158 uintptr_t h = 0; |
| 183 for (int i = 0; i < depth; i++) { | 159 for (int i = 0; i < depth; i++) { |
| 184 h += reinterpret_cast<uintptr_t>(key[i]); | 160 h += reinterpret_cast<uintptr_t>(key[i]); |
| 185 h += h << 10; | 161 h += h << 10; |
| 186 h ^= h >> 6; | 162 h ^= h >> 6; |
| 187 } | 163 } |
| 188 h += h << 3; | 164 h += h << 3; |
| 189 h ^= h >> 11; | 165 h ^= h >> 11; |
| 190 | 166 |
| 191 // Lookup stack trace in table | 167 // Lookup stack trace in table |
| 192 unsigned int buck = ((unsigned int) h) % kHashTableSize; | 168 unsigned int buck = ((unsigned int) h) % kHashTableSize; |
| 193 for (Bucket* b = table[buck]; b != 0; b = b->next) { | 169 for (Bucket* b = table_[buck]; b != 0; b = b->next) { |
| 194 if ((b->hash == h) && | 170 if ((b->hash == h) && |
| 195 (b->depth == depth) && | 171 (b->depth == depth) && |
| 196 equal(key, key + depth, b->stack)) { | 172 equal(key, key + depth, b->stack)) { |
| 197 return b; | 173 return b; |
| 198 } | 174 } |
| 199 } | 175 } |
| 200 | 176 |
| 201 // Create new bucket | 177 // Create new bucket |
| 202 const size_t key_size = sizeof(key[0]) * depth; | 178 const size_t key_size = sizeof(key[0]) * depth; |
| 203 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 179 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
| 204 copy(key, key + depth, kcopy); | 180 copy(key, key + depth, kcopy); |
| 205 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 181 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
| 206 memset(b, 0, sizeof(*b)); | 182 memset(b, 0, sizeof(*b)); |
| 207 b->hash = h; | 183 b->hash = h; |
| 208 b->depth = depth; | 184 b->depth = depth; |
| 209 b->stack = kcopy; | 185 b->stack = kcopy; |
| 210 b->next = table[buck]; | 186 b->next = table_[buck]; |
| 211 table[buck] = b; | 187 table_[buck] = b; |
| 212 if (bucket_count != NULL) { | 188 num_buckets_++; |
| 213 ++(*bucket_count); | |
| 214 } | |
| 215 return b; | 189 return b; |
| 216 } | 190 } |
| 217 | 191 |
| 218 int HeapProfileTable::GetCallerStackTrace( | 192 void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes, |
| 219 int skip_count, void* stack[kMaxStackDepth]) { | 193 int skip_count) { |
| 220 return MallocHook::GetCallerStackTrace( | 194 void* key[kMaxStackDepth]; |
| 221 stack, kMaxStackDepth, kStripFrames + skip_count + 1); | 195 int depth = MallocHook::GetCallerStackTrace( |
| 196 key, kMaxStackDepth, kStripFrames + skip_count + 1); |
| 197 RecordAllocWithStack(ptr, bytes, depth, key); |
| 222 } | 198 } |
| 223 | 199 |
| 224 void HeapProfileTable::RecordAlloc( | 200 void HeapProfileTable::RecordAllocWithStack( |
| 225 const void* ptr, size_t bytes, int stack_depth, | 201 const void* ptr, size_t bytes, int stack_depth, |
| 226 const void* const call_stack[]) { | 202 const void* const call_stack[]) { |
| 227 Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, | 203 Bucket* b = GetBucket(stack_depth, call_stack); |
| 228 &num_alloc_buckets_); | |
| 229 b->allocs++; | 204 b->allocs++; |
| 230 b->alloc_size += bytes; | 205 b->alloc_size += bytes; |
| 231 total_.allocs++; | 206 total_.allocs++; |
| 232 total_.alloc_size += bytes; | 207 total_.alloc_size += bytes; |
| 233 | 208 |
| 234 AllocValue v; | 209 AllocValue v; |
| 235 v.set_bucket(b); // also did set_live(false); set_ignore(false) | 210 v.set_bucket(b); // also did set_live(false); set_ignore(false) |
| 236 v.bytes = bytes; | 211 v.bytes = bytes; |
| 237 alloc_address_map_->Insert(ptr, v); | 212 allocation_->Insert(ptr, v); |
| 238 } | 213 } |
| 239 | 214 |
| 240 void HeapProfileTable::RecordFree(const void* ptr) { | 215 void HeapProfileTable::RecordFree(const void* ptr) { |
| 241 AllocValue v; | 216 AllocValue v; |
| 242 if (alloc_address_map_->FindAndRemove(ptr, &v)) { | 217 if (allocation_->FindAndRemove(ptr, &v)) { |
| 243 Bucket* b = v.bucket(); | 218 Bucket* b = v.bucket(); |
| 244 b->frees++; | 219 b->frees++; |
| 245 b->free_size += v.bytes; | 220 b->free_size += v.bytes; |
| 246 total_.frees++; | 221 total_.frees++; |
| 247 total_.free_size += v.bytes; | 222 total_.free_size += v.bytes; |
| 248 } | 223 } |
| 249 } | 224 } |
| 250 | 225 |
| 251 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 226 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
| 252 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 227 const AllocValue* alloc_value = allocation_->Find(ptr); |
| 253 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 228 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
| 254 return alloc_value != NULL; | 229 return alloc_value != NULL; |
| 255 } | 230 } |
| 256 | 231 |
| 257 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 232 bool HeapProfileTable::FindAllocDetails(const void* ptr, |
| 258 AllocInfo* info) const { | 233 AllocInfo* info) const { |
| 259 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 234 const AllocValue* alloc_value = allocation_->Find(ptr); |
| 260 if (alloc_value != NULL) { | 235 if (alloc_value != NULL) { |
| 261 info->object_size = alloc_value->bytes; | 236 info->object_size = alloc_value->bytes; |
| 262 info->call_stack = alloc_value->bucket()->stack; | 237 info->call_stack = alloc_value->bucket()->stack; |
| 263 info->stack_depth = alloc_value->bucket()->depth; | 238 info->stack_depth = alloc_value->bucket()->depth; |
| 264 } | 239 } |
| 265 return alloc_value != NULL; | 240 return alloc_value != NULL; |
| 266 } | 241 } |
| 267 | 242 |
| 268 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 243 bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
| 269 size_t max_size, | 244 size_t max_size, |
| 270 const void** object_ptr, | 245 const void** object_ptr, |
| 271 size_t* object_size) const { | 246 size_t* object_size) const { |
| 272 const AllocValue* alloc_value = | 247 const AllocValue* alloc_value = |
| 273 alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 248 allocation_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
| 274 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 249 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
| 275 return alloc_value != NULL; | 250 return alloc_value != NULL; |
| 276 } | 251 } |
| 277 | 252 |
| 278 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 253 bool HeapProfileTable::MarkAsLive(const void* ptr) { |
| 279 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 254 AllocValue* alloc = allocation_->FindMutable(ptr); |
| 280 if (alloc && !alloc->live()) { | 255 if (alloc && !alloc->live()) { |
| 281 alloc->set_live(true); | 256 alloc->set_live(true); |
| 282 return true; | 257 return true; |
| 283 } | 258 } |
| 284 return false; | 259 return false; |
| 285 } | 260 } |
| 286 | 261 |
| 287 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 262 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
| 288 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 263 AllocValue* alloc = allocation_->FindMutable(ptr); |
| 289 if (alloc) { | 264 if (alloc) { |
| 290 alloc->set_ignore(true); | 265 alloc->set_ignore(true); |
| 291 } | 266 } |
| 292 } | 267 } |
| 293 | 268 |
| 294 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 269 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
| 295 int HeapProfileTable::UnparseBucket(const Bucket& b, | 270 int HeapProfileTable::UnparseBucket(const Bucket& b, |
| 296 char* buf, int buflen, int bufsize, | 271 char* buf, int buflen, int bufsize, |
| 297 const char* extra, | 272 const char* extra, |
| 298 Stats* profile_stats) { | 273 Stats* profile_stats) { |
| (...skipping 20 matching lines...) Expand all Loading... |
| 319 buflen += printed; | 294 buflen += printed; |
| 320 } | 295 } |
| 321 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 296 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
| 322 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 297 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 323 buflen += printed; | 298 buflen += printed; |
| 324 return buflen; | 299 return buflen; |
| 325 } | 300 } |
| 326 | 301 |
| 327 HeapProfileTable::Bucket** | 302 HeapProfileTable::Bucket** |
| 328 HeapProfileTable::MakeSortedBucketList() const { | 303 HeapProfileTable::MakeSortedBucketList() const { |
| 329 Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * | 304 Bucket** list = |
| 330 (num_alloc_buckets_ + num_available_mmap_buckets_))); | 305 reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
| 331 | |
| 332 RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); | |
| 333 | 306 |
| 334 int n = 0; | 307 int n = 0; |
| 335 | |
| 336 for (int b = 0; b < kHashTableSize; b++) { | 308 for (int b = 0; b < kHashTableSize; b++) { |
| 337 for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { | 309 for (Bucket* x = table_[b]; x != 0; x = x->next) { |
| 338 list[n++] = x; | 310 list[n++] = x; |
| 339 } | 311 } |
| 340 } | 312 } |
| 341 RAW_DCHECK(n == num_alloc_buckets_, ""); | 313 RAW_DCHECK(n == num_buckets_, ""); |
| 342 | 314 |
| 343 if (mmap_table_ != NULL) { | 315 sort(list, list + num_buckets_, ByAllocatedSpace); |
| 344 for (int b = 0; b < kHashTableSize; b++) { | |
| 345 for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { | |
| 346 list[n++] = x; | |
| 347 } | |
| 348 } | |
| 349 } | |
| 350 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); | |
| 351 | |
| 352 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, | |
| 353 ByAllocatedSpace); | |
| 354 | 316 |
| 355 return list; | 317 return list; |
| 356 } | 318 } |
| 357 | 319 |
| 358 void HeapProfileTable::RefreshMMapData() { | |
| 359 // Make the table | |
| 360 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); | |
| 361 if (mmap_table_ == NULL) { | |
| 362 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); | |
| 363 memset(mmap_table_, 0, mmap_table_bytes); | |
| 364 } | |
| 365 num_available_mmap_buckets_ = 0; | |
| 366 | |
| 367 ClearMMapData(); | |
| 368 mmap_address_map_ = | |
| 369 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
| 370 | |
| 371 MemoryRegionMap::LockHolder l; | |
| 372 for (MemoryRegionMap::RegionIterator r = | |
| 373 MemoryRegionMap::BeginRegionLocked(); | |
| 374 r != MemoryRegionMap::EndRegionLocked(); ++r) { | |
| 375 Bucket* b = | |
| 376 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); | |
| 377 if (b->alloc_size == 0) { | |
| 378 num_available_mmap_buckets_ += 1; | |
| 379 } | |
| 380 b->allocs += 1; | |
| 381 b->alloc_size += r->end_addr - r->start_addr; | |
| 382 | |
| 383 AllocValue v; | |
| 384 v.set_bucket(b); | |
| 385 v.bytes = r->end_addr - r->start_addr; | |
| 386 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); | |
| 387 } | |
| 388 } | |
| 389 | |
| 390 void HeapProfileTable::ClearMMapData() { | |
| 391 if (mmap_address_map_ != NULL) { | |
| 392 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | |
| 393 mmap_address_map_->~AllocationMap(); | |
| 394 dealloc_(mmap_address_map_); | |
| 395 mmap_address_map_ = NULL; | |
| 396 } | |
| 397 } | |
| 398 | |
| 399 void HeapProfileTable::IterateOrderedAllocContexts( | 320 void HeapProfileTable::IterateOrderedAllocContexts( |
| 400 AllocContextIterator callback) const { | 321 AllocContextIterator callback) const { |
| 401 Bucket** list = MakeSortedBucketList(); | 322 Bucket** list = MakeSortedBucketList(); |
| 402 AllocContextInfo info; | 323 AllocContextInfo info; |
| 403 for (int i = 0; i < num_alloc_buckets_; ++i) { | 324 for (int i = 0; i < num_buckets_; ++i) { |
| 404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 325 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
| 405 info.stack_depth = list[i]->depth; | 326 info.stack_depth = list[i]->depth; |
| 406 info.call_stack = list[i]->stack; | 327 info.call_stack = list[i]->stack; |
| 407 callback(info); | 328 callback(info); |
| 408 } | 329 } |
| 409 dealloc_(list); | 330 dealloc_(list); |
| 410 } | 331 } |
| 411 | 332 |
| 412 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 333 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
| 413 Bucket** list = MakeSortedBucketList(); | 334 Bucket** list = MakeSortedBucketList(); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 425 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 346 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); |
| 426 RAW_DCHECK(map_length <= size, ""); | 347 RAW_DCHECK(map_length <= size, ""); |
| 427 char* const map_start = buf + size - map_length; // move to end | 348 char* const map_start = buf + size - map_length; // move to end |
| 428 memmove(map_start, buf, map_length); | 349 memmove(map_start, buf, map_length); |
| 429 size -= map_length; | 350 size -= map_length; |
| 430 | 351 |
| 431 Stats stats; | 352 Stats stats; |
| 432 memset(&stats, 0, sizeof(stats)); | 353 memset(&stats, 0, sizeof(stats)); |
| 433 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 354 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
| 434 if (bucket_length < 0 || bucket_length >= size) return 0; | 355 if (bucket_length < 0 || bucket_length >= size) return 0; |
| 435 Bucket total_with_mmap(total_); | 356 bucket_length = UnparseBucket(total_, buf, bucket_length, size, |
| 436 if (mmap_table_ != NULL) { | |
| 437 total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); | |
| 438 total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); | |
| 439 } | |
| 440 bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, | |
| 441 " heapprofile", &stats); | 357 " heapprofile", &stats); |
| 442 for (int i = 0; i < num_alloc_buckets_; i++) { | 358 for (int i = 0; i < num_buckets_; i++) { |
| 443 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 359 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
| 444 &stats); | 360 &stats); |
| 445 } | 361 } |
| 446 RAW_DCHECK(bucket_length < size, ""); | 362 RAW_DCHECK(bucket_length < size, ""); |
| 447 | 363 |
| 448 dealloc_(list); | 364 dealloc_(list); |
| 449 | 365 |
| 450 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 366 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
| 451 memmove(buf + bucket_length, map_start, map_length); // close the gap | 367 memmove(buf + bucket_length, map_start, map_length); // close the gap |
| 452 | 368 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 467 memset(&b, 0, sizeof(b)); | 383 memset(&b, 0, sizeof(b)); |
| 468 b.allocs = 1; | 384 b.allocs = 1; |
| 469 b.alloc_size = v->bytes; | 385 b.alloc_size = v->bytes; |
| 470 b.depth = v->bucket()->depth; | 386 b.depth = v->bucket()->depth; |
| 471 b.stack = v->bucket()->stack; | 387 b.stack = v->bucket()->stack; |
| 472 char buf[1024]; | 388 char buf[1024]; |
| 473 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 389 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); |
| 474 RawWrite(args.fd, buf, len); | 390 RawWrite(args.fd, buf, len); |
| 475 } | 391 } |
| 476 | 392 |
| 477 inline void HeapProfileTable::ZeroBucketCountsIterator( | |
| 478 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | |
| 479 Bucket* b = v->bucket(); | |
| 480 if (b != NULL) { | |
| 481 b->allocs = 0; | |
| 482 b->alloc_size = 0; | |
| 483 b->free_size = 0; | |
| 484 b->frees = 0; | |
| 485 } | |
| 486 } | |
| 487 | |
| 488 // Callback from NonLiveSnapshot; adds entry to arg->dest | 393 // Callback from NonLiveSnapshot; adds entry to arg->dest |
| 489 // if not the entry is not live and is not present in arg->base. | 394 // if not the entry is not live and is not present in arg->base. |
| 490 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 395 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
| 491 AddNonLiveArgs* arg) { | 396 AddNonLiveArgs* arg) { |
| 492 if (v->live()) { | 397 if (v->live()) { |
| 493 v->set_live(false); | 398 v->set_live(false); |
| 494 } else { | 399 } else { |
| 495 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 400 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { |
| 496 // Present in arg->base, so do not save | 401 // Present in arg->base, so do not save |
| 497 } else { | 402 } else { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 545 } | 450 } |
| 546 } | 451 } |
| 547 globfree(&g); | 452 globfree(&g); |
| 548 #else /* HAVE_GLOB_H */ | 453 #else /* HAVE_GLOB_H */ |
| 549 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 454 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); |
| 550 #endif | 455 #endif |
| 551 } | 456 } |
| 552 | 457 |
| 553 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 458 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
| 554 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 459 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
| 555 alloc_address_map_->Iterate(AddToSnapshot, s); | 460 allocation_->Iterate(AddToSnapshot, s); |
| 556 return s; | 461 return s; |
| 557 } | 462 } |
| 558 | 463 |
| 559 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 464 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { |
| 560 s->~Snapshot(); | 465 s->~Snapshot(); |
| 561 dealloc_(s); | 466 dealloc_(s); |
| 562 } | 467 } |
| 563 | 468 |
| 564 // Callback from TakeSnapshot; adds a single entry to snapshot | 469 // Callback from TakeSnapshot; adds a single entry to snapshot |
| 565 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 470 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, |
| 566 Snapshot* snapshot) { | 471 Snapshot* snapshot) { |
| 567 snapshot->Add(ptr, *v); | 472 snapshot->Add(ptr, *v); |
| 568 } | 473 } |
| 569 | 474 |
| 570 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 475 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
| 571 Snapshot* base) { | 476 Snapshot* base) { |
| 572 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 477 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", |
| 573 int(total_.allocs - total_.frees), | 478 int(total_.allocs - total_.frees), |
| 574 int(total_.alloc_size - total_.free_size)); | 479 int(total_.alloc_size - total_.free_size)); |
| 575 | 480 |
| 576 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 481 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
| 577 AddNonLiveArgs args; | 482 AddNonLiveArgs args; |
| 578 args.dest = s; | 483 args.dest = s; |
| 579 args.base = base; | 484 args.base = base; |
| 580 alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 485 allocation_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
| 581 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 486 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
| 582 int(s->total_.allocs - s->total_.frees), | 487 int(s->total_.allocs - s->total_.frees), |
| 583 int(s->total_.alloc_size - s->total_.free_size)); | 488 int(s->total_.alloc_size - s->total_.free_size)); |
| 584 return s; | 489 return s; |
| 585 } | 490 } |
| 586 | 491 |
| 587 // Information kept per unique bucket seen | 492 // Information kept per unique bucket seen |
| 588 struct HeapProfileTable::Snapshot::Entry { | 493 struct HeapProfileTable::Snapshot::Entry { |
| 589 int count; | 494 int count; |
| 590 int bytes; | 495 int bytes; |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 690 char* unused) { | 595 char* unused) { |
| 691 // Perhaps also log the allocation stack trace (unsymbolized) | 596 // Perhaps also log the allocation stack trace (unsymbolized) |
| 692 // on this line in case somebody finds it useful. | 597 // on this line in case somebody finds it useful. |
| 693 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 598 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
| 694 } | 599 } |
| 695 | 600 |
| 696 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 601 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
| 697 char unused; | 602 char unused; |
| 698 map_.Iterate(ReportObject, &unused); | 603 map_.Iterate(ReportObject, &unused); |
| 699 } | 604 } |
| OLD | NEW |