OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
283 return false; | 283 return false; |
284 } | 284 } |
285 | 285 |
286 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 286 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
287 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 287 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
288 if (alloc) { | 288 if (alloc) { |
289 alloc->set_ignore(true); | 289 alloc->set_ignore(true); |
290 } | 290 } |
291 } | 291 } |
292 | 292 |
| 293 void HeapProfileTable::MarkAllAsIgnored() { |
| 294 alloc_address_map_->Iterate(MarkAllIterator, true); |
| 295 } |
| 296 |
| 297 void HeapProfileTable::MarkAllAsLive() { |
| 298 alloc_address_map_->Iterate(MarkAllIterator, false); |
| 299 } |
| 300 |
293 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 301 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
294 int HeapProfileTable::UnparseBucket(const Bucket& b, | 302 int HeapProfileTable::UnparseBucket(const Bucket& b, |
295 char* buf, int buflen, int bufsize, | 303 char* buf, int buflen, int bufsize, |
296 const char* extra, | 304 const char* extra, |
297 Stats* profile_stats) { | 305 Stats* profile_stats) { |
298 if (profile_stats != NULL) { | 306 if (profile_stats != NULL) { |
299 profile_stats->allocs += b.allocs; | 307 profile_stats->allocs += b.allocs; |
300 profile_stats->alloc_size += b.alloc_size; | 308 profile_stats->alloc_size += b.alloc_size; |
301 profile_stats->frees += b.frees; | 309 profile_stats->frees += b.frees; |
302 profile_stats->free_size += b.free_size; | 310 profile_stats->free_size += b.free_size; |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
389 | 397 |
390 void HeapProfileTable::ClearMMapData() { | 398 void HeapProfileTable::ClearMMapData() { |
391 if (mmap_address_map_ == NULL) return; | 399 if (mmap_address_map_ == NULL) return; |
392 | 400 |
393 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | 401 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); |
394 mmap_address_map_->~AllocationMap(); | 402 mmap_address_map_->~AllocationMap(); |
395 dealloc_(mmap_address_map_); | 403 dealloc_(mmap_address_map_); |
396 mmap_address_map_ = NULL; | 404 mmap_address_map_ = NULL; |
397 } | 405 } |
398 | 406 |
| 407 void HeapProfileTable::DumpLiveObjects(const char* file_name) { |
| 408 RawFD fd = RawOpenForWriting(file_name); |
| 409 if (fd == kIllegalRawFD) { |
| 410 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); |
| 411 return; |
| 412 } |
| 413 const DumpArgs args(fd, NULL); |
| 414 alloc_address_map_->Iterate<const DumpArgs&>(DumpLiveIterator, args); |
| 415 RawClose(fd); |
| 416 } |
| 417 |
399 void HeapProfileTable::IterateOrderedAllocContexts( | 418 void HeapProfileTable::IterateOrderedAllocContexts( |
400 AllocContextIterator callback) const { | 419 AllocContextIterator callback) const { |
401 Bucket** list = MakeSortedBucketList(); | 420 Bucket** list = MakeSortedBucketList(); |
402 AllocContextInfo info; | 421 AllocContextInfo info; |
403 for (int i = 0; i < num_alloc_buckets_; ++i) { | 422 for (int i = 0; i < num_alloc_buckets_; ++i) { |
404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 423 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
405 info.stack_depth = list[i]->depth; | 424 info.stack_depth = list[i]->depth; |
406 info.call_stack = list[i]->stack; | 425 info.call_stack = list[i]->stack; |
407 callback(info); | 426 callback(info); |
408 } | 427 } |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 memset(&b, 0, sizeof(b)); | 486 memset(&b, 0, sizeof(b)); |
468 b.allocs = 1; | 487 b.allocs = 1; |
469 b.alloc_size = v->bytes; | 488 b.alloc_size = v->bytes; |
470 b.depth = v->bucket()->depth; | 489 b.depth = v->bucket()->depth; |
471 b.stack = v->bucket()->stack; | 490 b.stack = v->bucket()->stack; |
472 char buf[1024]; | 491 char buf[1024]; |
473 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 492 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); |
474 RawWrite(args.fd, buf, len); | 493 RawWrite(args.fd, buf, len); |
475 } | 494 } |
476 | 495 |
| 496 inline |
| 497 void HeapProfileTable::DumpLiveIterator(const void* ptr, AllocValue* v, |
| 498 const DumpArgs& args) { |
| 499 if (!v->live()) { |
| 500 return; |
| 501 } |
| 502 if (v->ignore()) { |
| 503 return; |
| 504 } |
| 505 Bucket b; |
| 506 memset(&b, 0, sizeof(b)); |
| 507 b.allocs = 1; |
| 508 b.alloc_size = v->bytes; |
| 509 b.depth = v->bucket()->depth; |
| 510 b.stack = v->bucket()->stack; |
| 511 char addr[16]; |
| 512 snprintf(addr, 16, "0x%08" PRIxPTR, ptr); |
| 513 char buf[1024]; |
| 514 int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, args.profile_stats); |
| 515 RawWrite(args.fd, buf, len); |
| 516 } |
| 517 |
477 inline void HeapProfileTable::ZeroBucketCountsIterator( | 518 inline void HeapProfileTable::ZeroBucketCountsIterator( |
478 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | 519 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { |
479 Bucket* b = v->bucket(); | 520 Bucket* b = v->bucket(); |
480 if (b != NULL) { | 521 if (b != NULL) { |
481 b->allocs = 0; | 522 b->allocs = 0; |
482 b->alloc_size = 0; | 523 b->alloc_size = 0; |
483 b->free_size = 0; | 524 b->free_size = 0; |
484 b->frees = 0; | 525 b->frees = 0; |
485 } | 526 } |
486 } | 527 } |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
689 char* unused) { | 730 char* unused) { |
690 // Perhaps also log the allocation stack trace (unsymbolized) | 731 // Perhaps also log the allocation stack trace (unsymbolized) |
691 // on this line in case somebody finds it useful. | 732 // on this line in case somebody finds it useful. |
692 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 733 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
693 } | 734 } |
694 | 735 |
695 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 736 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
696 char unused; | 737 char unused; |
697 map_.Iterate(ReportObject, &unused); | 738 map_.Iterate(ReportObject, &unused); |
698 } | 739 } |
OLD | NEW |