| OLD | NEW |
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 90 | 90 |
| 91 // Memory (de)allocator interface we'll use. | 91 // Memory (de)allocator interface we'll use. |
| 92 typedef void* (*Allocator)(size_t size); | 92 typedef void* (*Allocator)(size_t size); |
| 93 typedef void (*DeAllocator)(void* ptr); | 93 typedef void (*DeAllocator)(void* ptr); |
| 94 | 94 |
| 95 // interface --------------------------- | 95 // interface --------------------------- |
| 96 | 96 |
| 97 HeapProfileTable(Allocator alloc, DeAllocator dealloc); | 97 HeapProfileTable(Allocator alloc, DeAllocator dealloc); |
| 98 ~HeapProfileTable(); | 98 ~HeapProfileTable(); |
| 99 | 99 |
| 100 // Record an allocation at 'ptr' of 'bytes' bytes. | 100 // Collect the stack trace for the function that asked to do the |
| 101 // skip_count gives the number of stack frames between this call | 101 // allocation for passing to RecordAlloc() below. |
| 102 // and the memory allocation function that was asked to do the allocation. | 102 // |
| 103 void RecordAlloc(const void* ptr, size_t bytes, int skip_count); | 103 // The stack trace is stored in 'stack'. The stack depth is returned. |
| 104 // |
| 105 // 'skip_count' gives the number of stack frames between this call |
| 106 // and the memory allocation function. |
| 107 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); |
| 104 | 108 |
| 105 // Direct version of RecordAlloc when the caller stack to use | 109 // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth' |
| 106 // is already known: call_stack of depth stack_depth. | 110 // and 'call_stack' identifying the function that requested the |
| 107 void RecordAllocWithStack(const void* ptr, size_t bytes, | 111 // allocation. They can be generated using GetCallerStackTrace() above. |
| 108 int stack_depth, const void* const call_stack[]); | 112 void RecordAlloc(const void* ptr, size_t bytes, |
| 113 int stack_depth, const void* const call_stack[]); |
| 109 | 114 |
| 110 // Record the deallocation of memory at 'ptr'. | 115 // Record the deallocation of memory at 'ptr'. |
| 111 void RecordFree(const void* ptr); | 116 void RecordFree(const void* ptr); |
| 112 | 117 |
| 113 // Return true iff we have recorded an allocation at 'ptr'. | 118 // Return true iff we have recorded an allocation at 'ptr'. |
| 114 // If yes, fill *object_size with the allocation byte size. | 119 // If yes, fill *object_size with the allocation byte size. |
| 115 bool FindAlloc(const void* ptr, size_t* object_size) const; | 120 bool FindAlloc(const void* ptr, size_t* object_size) const; |
| 116 // Same as FindAlloc, but fills all of *info. | 121 // Same as FindAlloc, but fills all of *info. |
| 117 bool FindAllocDetails(const void* ptr, AllocInfo* info) const; | 122 bool FindAllocDetails(const void* ptr, AllocInfo* info) const; |
| 118 | 123 |
| 119 // Return true iff "ptr" points into a recorded allocation | 124 // Return true iff "ptr" points into a recorded allocation |
| 120 // If yes, fill *object_ptr with the actual allocation address | 125 // If yes, fill *object_ptr with the actual allocation address |
| 121 // and *object_size with the allocation byte size. | 126 // and *object_size with the allocation byte size. |
| 122 // max_size specifies largest currently possible allocation size. | 127 // max_size specifies largest currently possible allocation size. |
| 123 bool FindInsideAlloc(const void* ptr, size_t max_size, | 128 bool FindInsideAlloc(const void* ptr, size_t max_size, |
| 124 const void** object_ptr, size_t* object_size) const; | 129 const void** object_ptr, size_t* object_size) const; |
| 125 | 130 |
| 126 // If "ptr" points to a recorded allocation and it's not marked as live | 131 // If "ptr" points to a recorded allocation and it's not marked as live |
| 127 // mark it as live and return true. Else return false. | 132 // mark it as live and return true. Else return false. |
| 128 // All allocations start as non-live. | 133 // All allocations start as non-live. |
| 129 bool MarkAsLive(const void* ptr); | 134 bool MarkAsLive(const void* ptr); |
| 130 | 135 |
| 131 // If "ptr" points to a recorded allocation, mark it as "ignored". | 136 // If "ptr" points to a recorded allocation, mark it as "ignored". |
| 132 // Ignored objects are treated like other objects, except that they | 137 // Ignored objects are treated like other objects, except that they |
| 133 // are skipped in heap checking reports. | 138 // are skipped in heap checking reports. |
| 134 void MarkAsIgnored(const void* ptr); | 139 void MarkAsIgnored(const void* ptr); |
| 135 | 140 |
| 136 // Return current total (de)allocation statistics. | 141 // Return current total (de)allocation statistics. It doesn't contain |
| 142 // mmap'ed regions. |
| 137 const Stats& total() const { return total_; } | 143 const Stats& total() const { return total_; } |
| 138 | 144 |
| 139 // Allocation data iteration callback: gets passed object pointer and | 145 // Allocation data iteration callback: gets passed object pointer and |
| 140 // fully-filled AllocInfo. | 146 // fully-filled AllocInfo. |
| 141 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 147 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
| 142 | 148 |
| 143 // Iterate over the allocation profile data calling "callback" | 149 // Iterate over the allocation profile data calling "callback" |
| 144 // for every allocation. | 150 // for every allocation. |
| 145 void IterateAllocs(AllocIterator callback) const { | 151 void IterateAllocs(AllocIterator callback) const { |
| 146 allocation_->Iterate(MapArgsAllocIterator, callback); | 152 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); |
| 147 } | 153 } |
| 148 | 154 |
| 149 // Allocation context profile data iteration callback | 155 // Allocation context profile data iteration callback |
| 150 typedef void (*AllocContextIterator)(const AllocContextInfo& info); | 156 typedef void (*AllocContextIterator)(const AllocContextInfo& info); |
| 151 | 157 |
| 152 // Iterate over the allocation context profile data calling "callback" | 158 // Iterate over the allocation context profile data calling "callback" |
| 153 // for every allocation context. Allocation contexts are ordered by the | 159 // for every allocation context. Allocation contexts are ordered by the |
| 154 // size of allocated space. | 160 // size of allocated space. |
| 155 void IterateOrderedAllocContexts(AllocContextIterator callback) const; | 161 void IterateOrderedAllocContexts(AllocContextIterator callback) const; |
| 156 | 162 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 174 // Release a previously taken snapshot. snapshot must not | 180 // Release a previously taken snapshot. snapshot must not |
| 175 // be used after this call. | 181 // be used after this call. |
| 176 void ReleaseSnapshot(Snapshot* snapshot); | 182 void ReleaseSnapshot(Snapshot* snapshot); |
| 177 | 183 |
| 178 // Return a snapshot of every non-live, non-ignored object in *this. | 184 // Return a snapshot of every non-live, non-ignored object in *this. |
| 179 // If "base" is non-NULL, skip any objects present in "base". | 185 // If "base" is non-NULL, skip any objects present in "base". |
| 180 // As a side-effect, clears the "live" bit on every live object in *this. | 186 // As a side-effect, clears the "live" bit on every live object in *this. |
| 181 // Caller must call ReleaseSnapshot() on result when no longer needed. | 187 // Caller must call ReleaseSnapshot() on result when no longer needed. |
| 182 Snapshot* NonLiveSnapshot(Snapshot* base); | 188 Snapshot* NonLiveSnapshot(Snapshot* base); |
| 183 | 189 |
| 190 // Refresh the internal mmap information from MemoryRegionMap. Results of |
| 191 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed |
| 192 // memory regions as at calling RefreshMMapData. |
| 193 void RefreshMMapData(); |
| 194 |
| 195 // Clear the internal mmap information. Results of FillOrderedProfile and |
| 196 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after |
| 197 // calling ClearMMapData. |
| 198 void ClearMMapData(); |
| 199 |
| 184 private: | 200 private: |
| 185 | 201 |
| 186 // data types ---------------------------- | 202 // data types ---------------------------- |
| 187 | 203 |
| 188 // Hash table bucket to hold (de)allocation stats | 204 // Hash table bucket to hold (de)allocation stats |
| 189 // for a given allocation call stack trace. | 205 // for a given allocation call stack trace. |
| 190 struct Bucket : public Stats { | 206 struct Bucket : public Stats { |
| 191 uintptr_t hash; // Hash value of the stack trace | 207 uintptr_t hash; // Hash value of the stack trace |
| 192 int depth; // Depth of stack trace | 208 int depth; // Depth of stack trace |
| 193 const void** stack; // Stack trace | 209 const void** stack; // Stack trace |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 251 // counting bucket b. | 267 // counting bucket b. |
| 252 // | 268 // |
| 253 // "extra" is appended to the unparsed bucket. Typically it is empty, | 269 // "extra" is appended to the unparsed bucket. Typically it is empty, |
| 254 // but may be set to something like " heapprofile" for the total | 270 // but may be set to something like " heapprofile" for the total |
| 255 // bucket to indicate the type of the profile. | 271 // bucket to indicate the type of the profile. |
| 256 static int UnparseBucket(const Bucket& b, | 272 static int UnparseBucket(const Bucket& b, |
| 257 char* buf, int buflen, int bufsize, | 273 char* buf, int buflen, int bufsize, |
| 258 const char* extra, | 274 const char* extra, |
| 259 Stats* profile_stats); | 275 Stats* profile_stats); |
| 260 | 276 |
| 261 // Get the bucket for the caller stack trace 'key' of depth 'depth' | 277 // Deallocate a given allocation map. |
| 262 // creating the bucket if needed. | 278 void DeallocateAllocationMap(AllocationMap* allocation); |
| 263 Bucket* GetBucket(int depth, const void* const key[]); | 279 |
| 280 // Deallocate a given bucket table. |
| 281 void DeallocateBucketTable(Bucket** table); |
| 282 |
| 283 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a |
| 284 // bucket hash map 'table' creating the bucket if needed. '*bucket_count' |
| 285 // is incremented both when 'bucket_count' is not NULL and when a new |
| 286 // bucket object is created. |
| 287 Bucket* GetBucket(int depth, const void* const key[], Bucket** table, |
| 288 int* bucket_count); |
| 264 | 289 |
| 265 // Helper for IterateAllocs to do callback signature conversion | 290 // Helper for IterateAllocs to do callback signature conversion |
| 266 // from AllocationMap::Iterate to AllocIterator. | 291 // from AllocationMap::Iterate to AllocIterator. |
| 267 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, | 292 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, |
| 268 AllocIterator callback) { | 293 AllocIterator callback) { |
| 269 AllocInfo info; | 294 AllocInfo info; |
| 270 info.object_size = v->bytes; | 295 info.object_size = v->bytes; |
| 271 info.call_stack = v->bucket()->stack; | 296 info.call_stack = v->bucket()->stack; |
| 272 info.stack_depth = v->bucket()->depth; | 297 info.stack_depth = v->bucket()->depth; |
| 273 info.live = v->live(); | 298 info.live = v->live(); |
| 274 info.ignored = v->ignore(); | 299 info.ignored = v->ignore(); |
| 275 callback(ptr, info); | 300 callback(ptr, info); |
| 276 } | 301 } |
| 277 | 302 |
| 278 // Helper for DumpNonLiveProfile to do object-granularity | 303 // Helper for DumpNonLiveProfile to do object-granularity |
| 279 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 304 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
| 280 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 305 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
| 281 const DumpArgs& args); | 306 const DumpArgs& args); |
| 282 | 307 |
| 308 // Helper for filling size variables in buckets by zero. |
| 309 inline static void ZeroBucketCountsIterator( |
| 310 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
| 311 |
| 283 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 312 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
| 284 // Creates a sorted list of Buckets whose length is num_buckets_. | 313 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
| 285 // The caller is responsible for dellocating the returned list. | 314 // num_avaliable_mmap_buckets_. |
| 315 // The caller is responsible for deallocating the returned list. |
| 286 Bucket** MakeSortedBucketList() const; | 316 Bucket** MakeSortedBucketList() const; |
| 287 | 317 |
| 288 // Helper for TakeSnapshot. Saves object to snapshot. | 318 // Helper for TakeSnapshot. Saves object to snapshot. |
| 289 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 319 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
| 290 | 320 |
| 291 // Arguments passed to AddIfNonLive | 321 // Arguments passed to AddIfNonLive |
| 292 struct AddNonLiveArgs { | 322 struct AddNonLiveArgs { |
| 293 Snapshot* dest; | 323 Snapshot* dest; |
| 294 Snapshot* base; | 324 Snapshot* base; |
| 295 }; | 325 }; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 307 AllocationMap* allocations); | 337 AllocationMap* allocations); |
| 308 | 338 |
| 309 // data ---------------------------- | 339 // data ---------------------------- |
| 310 | 340 |
| 311 // Memory (de)allocator that we use. | 341 // Memory (de)allocator that we use. |
| 312 Allocator alloc_; | 342 Allocator alloc_; |
| 313 DeAllocator dealloc_; | 343 DeAllocator dealloc_; |
| 314 | 344 |
| 315 // Overall profile stats; we use only the Stats part, | 345 // Overall profile stats; we use only the Stats part, |
| 316 // but make it a Bucket to pass to UnparseBucket. | 346 // but make it a Bucket to pass to UnparseBucket. |
| 347 // It doesn't contain mmap'ed regions. |
| 317 Bucket total_; | 348 Bucket total_; |
| 318 | 349 |
| 319 // Bucket hash table. | 350 // Bucket hash table for malloc. |
| 320 // We hand-craft one instead of using one of the pre-written | 351 // We hand-craft one instead of using one of the pre-written |
| 321 // ones because we do not want to use malloc when operating on the table. | 352 // ones because we do not want to use malloc when operating on the table. |
| 322 // It is only few lines of code, so no big deal. | 353 // It is only few lines of code, so no big deal. |
| 323 Bucket** table_; | 354 Bucket** alloc_table_; |
| 324 int num_buckets_; | 355 int num_alloc_buckets_; |
| 325 | 356 |
| 326 // Map of all currently allocated objects we know about. | 357 // Bucket hash table for mmap. |
| 327 AllocationMap* allocation_; | 358 // This table is filled with the information from MemoryRegionMap by calling |
| 359 // RefreshMMapData. |
| 360 Bucket** mmap_table_; |
| 361 int num_available_mmap_buckets_; |
| 362 |
| 363 // Map of all currently allocated objects and mapped regions we know about. |
| 364 AllocationMap* alloc_address_map_; |
| 365 AllocationMap* mmap_address_map_; |
| 328 | 366 |
| 329 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 367 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
| 330 }; | 368 }; |
| 331 | 369 |
| 332 class HeapProfileTable::Snapshot { | 370 class HeapProfileTable::Snapshot { |
| 333 public: | 371 public: |
| 334 const Stats& total() const { return total_; } | 372 const Stats& total() const { return total_; } |
| 335 | 373 |
| 336 // Report anything in this snapshot as a leak. | 374 // Report anything in this snapshot as a leak. |
| 337 // May use new/delete for temporary storage. | 375 // May use new/delete for temporary storage. |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 375 // Helpers for sorting and generating leak reports | 413 // Helpers for sorting and generating leak reports |
| 376 struct Entry; | 414 struct Entry; |
| 377 struct ReportState; | 415 struct ReportState; |
| 378 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 416 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
| 379 static void ReportObject(const void* ptr, AllocValue* v, char*); | 417 static void ReportObject(const void* ptr, AllocValue* v, char*); |
| 380 | 418 |
| 381 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 419 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
| 382 }; | 420 }; |
| 383 | 421 |
| 384 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 422 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
| OLD | NEW |