| OLD | NEW |
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 283 return false; | 283 return false; |
| 284 } | 284 } |
| 285 | 285 |
| 286 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 286 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
| 287 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 287 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
| 288 if (alloc) { | 288 if (alloc) { |
| 289 alloc->set_ignore(true); | 289 alloc->set_ignore(true); |
| 290 } | 290 } |
| 291 } | 291 } |
| 292 | 292 |
| 293 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { |
| 294 const MarkArgs args(mark, true); |
| 295 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
| 296 } |
| 297 |
| 298 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { |
| 299 const MarkArgs args(mark, true); |
| 300 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
| 301 } |
| 302 |
| 293 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 303 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
| 294 int HeapProfileTable::UnparseBucket(const Bucket& b, | 304 int HeapProfileTable::UnparseBucket(const Bucket& b, |
| 295 char* buf, int buflen, int bufsize, | 305 char* buf, int buflen, int bufsize, |
| 296 const char* extra, | 306 const char* extra, |
| 297 Stats* profile_stats) { | 307 Stats* profile_stats) { |
| 298 if (profile_stats != NULL) { | 308 if (profile_stats != NULL) { |
| 299 profile_stats->allocs += b.allocs; | 309 profile_stats->allocs += b.allocs; |
| 300 profile_stats->alloc_size += b.alloc_size; | 310 profile_stats->alloc_size += b.alloc_size; |
| 301 profile_stats->frees += b.frees; | 311 profile_stats->frees += b.frees; |
| 302 profile_stats->free_size += b.free_size; | 312 profile_stats->free_size += b.free_size; |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 389 | 399 |
| 390 void HeapProfileTable::ClearMMapData() { | 400 void HeapProfileTable::ClearMMapData() { |
| 391 if (mmap_address_map_ == NULL) return; | 401 if (mmap_address_map_ == NULL) return; |
| 392 | 402 |
| 393 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | 403 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); |
| 394 mmap_address_map_->~AllocationMap(); | 404 mmap_address_map_->~AllocationMap(); |
| 395 dealloc_(mmap_address_map_); | 405 dealloc_(mmap_address_map_); |
| 396 mmap_address_map_ = NULL; | 406 mmap_address_map_ = NULL; |
| 397 } | 407 } |
| 398 | 408 |
| 409 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, |
| 410 const char* file_name) { |
| 411 RawFD fd = RawOpenForWriting(file_name); |
| 412 if (fd == kIllegalRawFD) { |
| 413 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); |
| 414 return; |
| 415 } |
| 416 const DumpMarkedArgs args(fd, mark); |
| 417 alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
| 418 RawClose(fd); |
| 419 } |
| 420 |
| 399 void HeapProfileTable::IterateOrderedAllocContexts( | 421 void HeapProfileTable::IterateOrderedAllocContexts( |
| 400 AllocContextIterator callback) const { | 422 AllocContextIterator callback) const { |
| 401 Bucket** list = MakeSortedBucketList(); | 423 Bucket** list = MakeSortedBucketList(); |
| 402 AllocContextInfo info; | 424 AllocContextInfo info; |
| 403 for (int i = 0; i < num_alloc_buckets_; ++i) { | 425 for (int i = 0; i < num_alloc_buckets_; ++i) { |
| 404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 426 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
| 405 info.stack_depth = list[i]->depth; | 427 info.stack_depth = list[i]->depth; |
| 406 info.call_stack = list[i]->stack; | 428 info.call_stack = list[i]->stack; |
| 407 callback(info); | 429 callback(info); |
| 408 } | 430 } |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 memset(&b, 0, sizeof(b)); | 489 memset(&b, 0, sizeof(b)); |
| 468 b.allocs = 1; | 490 b.allocs = 1; |
| 469 b.alloc_size = v->bytes; | 491 b.alloc_size = v->bytes; |
| 470 b.depth = v->bucket()->depth; | 492 b.depth = v->bucket()->depth; |
| 471 b.stack = v->bucket()->stack; | 493 b.stack = v->bucket()->stack; |
| 472 char buf[1024]; | 494 char buf[1024]; |
| 473 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 495 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); |
| 474 RawWrite(args.fd, buf, len); | 496 RawWrite(args.fd, buf, len); |
| 475 } | 497 } |
| 476 | 498 |
| 499 inline |
| 500 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v, |
| 501 const DumpMarkedArgs& args) { |
| 502 if (v->mark() != args.mark) |
| 503 return; |
| 504 Bucket b; |
| 505 memset(&b, 0, sizeof(b)); |
| 506 b.allocs = 1; |
| 507 b.alloc_size = v->bytes; |
| 508 b.depth = v->bucket()->depth; |
| 509 b.stack = v->bucket()->stack; |
| 510 char addr[16]; |
| 511 snprintf(addr, 16, "0x%08" PRIxPTR, ptr); |
| 512 char buf[1024]; |
| 513 int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL); |
| 514 RawWrite(args.fd, buf, len); |
| 515 } |
| 516 |
| 517 inline |
| 518 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, |
| 519 const MarkArgs& args) { |
| 520 if (!args.mark_all && v->mark() != UNMARKED) |
| 521 return; |
| 522 v->set_mark(args.mark); |
| 523 } |
| 524 |
| 477 inline void HeapProfileTable::ZeroBucketCountsIterator( | 525 inline void HeapProfileTable::ZeroBucketCountsIterator( |
| 478 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | 526 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { |
| 479 Bucket* b = v->bucket(); | 527 Bucket* b = v->bucket(); |
| 480 if (b != NULL) { | 528 if (b != NULL) { |
| 481 b->allocs = 0; | 529 b->allocs = 0; |
| 482 b->alloc_size = 0; | 530 b->alloc_size = 0; |
| 483 b->free_size = 0; | 531 b->free_size = 0; |
| 484 b->frees = 0; | 532 b->frees = 0; |
| 485 } | 533 } |
| 486 } | 534 } |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 689 char* unused) { | 737 char* unused) { |
| 690 // Perhaps also log the allocation stack trace (unsymbolized) | 738 // Perhaps also log the allocation stack trace (unsymbolized) |
| 691 // on this line in case somebody finds it useful. | 739 // on this line in case somebody finds it useful. |
| 692 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 740 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
| 693 } | 741 } |
| 694 | 742 |
| 695 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 743 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
| 696 char unused; | 744 char unused; |
| 697 map_.Iterate(ReportObject, &unused); | 745 map_.Iterate(ReportObject, &unused); |
| 698 } | 746 } |
| OLD | NEW |