OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
90 | 90 |
91 // Memory (de)allocator interface we'll use. | 91 // Memory (de)allocator interface we'll use. |
92 typedef void* (*Allocator)(size_t size); | 92 typedef void* (*Allocator)(size_t size); |
93 typedef void (*DeAllocator)(void* ptr); | 93 typedef void (*DeAllocator)(void* ptr); |
94 | 94 |
95 // interface --------------------------- | 95 // interface --------------------------- |
96 | 96 |
97 HeapProfileTable(Allocator alloc, DeAllocator dealloc); | 97 HeapProfileTable(Allocator alloc, DeAllocator dealloc); |
98 ~HeapProfileTable(); | 98 ~HeapProfileTable(); |
99 | 99 |
100 // Collect the stack trace for the function that asked to do the | 100 // Record an allocation at 'ptr' of 'bytes' bytes. |
101 // allocation for passing to RecordAlloc() below. | 101 // skip_count gives the number of stack frames between this call |
102 // | 102 // and the memory allocation function that was asked to do the allocation. |
103 // The stack trace is stored in 'stack'. The stack depth is returned. | 103 void RecordAlloc(const void* ptr, size_t bytes, int skip_count); |
104 // | |
105 // 'skip_count' gives the number of stack frames between this call | |
106 // and the memory allocation function. | |
107 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); | |
108 | 104 |
109 // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth' | 105 // Direct version of RecordAlloc when the caller stack to use |
110 // and 'call_stack' identifying the function that requested the | 106 // is already known: call_stack of depth stack_depth. |
111 // allocation. They can be generated using GetCallerStackTrace() above. | 107 void RecordAllocWithStack(const void* ptr, size_t bytes, |
112 void RecordAlloc(const void* ptr, size_t bytes, | 108 int stack_depth, const void* const call_stack[]); |
113 int stack_depth, const void* const call_stack[]); | |
114 | 109 |
115 // Record the deallocation of memory at 'ptr'. | 110 // Record the deallocation of memory at 'ptr'. |
116 void RecordFree(const void* ptr); | 111 void RecordFree(const void* ptr); |
117 | 112 |
118 // Return true iff we have recorded an allocation at 'ptr'. | 113 // Return true iff we have recorded an allocation at 'ptr'. |
119 // If yes, fill *object_size with the allocation byte size. | 114 // If yes, fill *object_size with the allocation byte size. |
120 bool FindAlloc(const void* ptr, size_t* object_size) const; | 115 bool FindAlloc(const void* ptr, size_t* object_size) const; |
121 // Same as FindAlloc, but fills all of *info. | 116 // Same as FindAlloc, but fills all of *info. |
122 bool FindAllocDetails(const void* ptr, AllocInfo* info) const; | 117 bool FindAllocDetails(const void* ptr, AllocInfo* info) const; |
123 | 118 |
124 // Return true iff "ptr" points into a recorded allocation | 119 // Return true iff "ptr" points into a recorded allocation |
125 // If yes, fill *object_ptr with the actual allocation address | 120 // If yes, fill *object_ptr with the actual allocation address |
126 // and *object_size with the allocation byte size. | 121 // and *object_size with the allocation byte size. |
127 // max_size specifies largest currently possible allocation size. | 122 // max_size specifies largest currently possible allocation size. |
128 bool FindInsideAlloc(const void* ptr, size_t max_size, | 123 bool FindInsideAlloc(const void* ptr, size_t max_size, |
129 const void** object_ptr, size_t* object_size) const; | 124 const void** object_ptr, size_t* object_size) const; |
130 | 125 |
131 // If "ptr" points to a recorded allocation and it's not marked as live | 126 // If "ptr" points to a recorded allocation and it's not marked as live |
132 // mark it as live and return true. Else return false. | 127 // mark it as live and return true. Else return false. |
133 // All allocations start as non-live. | 128 // All allocations start as non-live. |
134 bool MarkAsLive(const void* ptr); | 129 bool MarkAsLive(const void* ptr); |
135 | 130 |
136 // If "ptr" points to a recorded allocation, mark it as "ignored". | 131 // If "ptr" points to a recorded allocation, mark it as "ignored". |
137 // Ignored objects are treated like other objects, except that they | 132 // Ignored objects are treated like other objects, except that they |
138 // are skipped in heap checking reports. | 133 // are skipped in heap checking reports. |
139 void MarkAsIgnored(const void* ptr); | 134 void MarkAsIgnored(const void* ptr); |
140 | 135 |
141 // Return current total (de)allocation statistics. It doesn't contain | 136 // Return current total (de)allocation statistics. |
142 // mmap'ed regions. | |
143 const Stats& total() const { return total_; } | 137 const Stats& total() const { return total_; } |
144 | 138 |
145 // Allocation data iteration callback: gets passed object pointer and | 139 // Allocation data iteration callback: gets passed object pointer and |
146 // fully-filled AllocInfo. | 140 // fully-filled AllocInfo. |
147 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 141 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
148 | 142 |
149 // Iterate over the allocation profile data calling "callback" | 143 // Iterate over the allocation profile data calling "callback" |
150 // for every allocation. | 144 // for every allocation. |
151 void IterateAllocs(AllocIterator callback) const { | 145 void IterateAllocs(AllocIterator callback) const { |
152 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); | 146 allocation_->Iterate(MapArgsAllocIterator, callback); |
153 } | 147 } |
154 | 148 |
155 // Allocation context profile data iteration callback | 149 // Allocation context profile data iteration callback |
156 typedef void (*AllocContextIterator)(const AllocContextInfo& info); | 150 typedef void (*AllocContextIterator)(const AllocContextInfo& info); |
157 | 151 |
158 // Iterate over the allocation context profile data calling "callback" | 152 // Iterate over the allocation context profile data calling "callback" |
159 // for every allocation context. Allocation contexts are ordered by the | 153 // for every allocation context. Allocation contexts are ordered by the |
160 // size of allocated space. | 154 // size of allocated space. |
161 void IterateOrderedAllocContexts(AllocContextIterator callback) const; | 155 void IterateOrderedAllocContexts(AllocContextIterator callback) const; |
162 | 156 |
(...skipping 17 matching lines...) Expand all Loading... |
180 // Release a previously taken snapshot. snapshot must not | 174 // Release a previously taken snapshot. snapshot must not |
181 // be used after this call. | 175 // be used after this call. |
182 void ReleaseSnapshot(Snapshot* snapshot); | 176 void ReleaseSnapshot(Snapshot* snapshot); |
183 | 177 |
184 // Return a snapshot of every non-live, non-ignored object in *this. | 178 // Return a snapshot of every non-live, non-ignored object in *this. |
185 // If "base" is non-NULL, skip any objects present in "base". | 179 // If "base" is non-NULL, skip any objects present in "base". |
186 // As a side-effect, clears the "live" bit on every live object in *this. | 180 // As a side-effect, clears the "live" bit on every live object in *this. |
187 // Caller must call ReleaseSnapshot() on result when no longer needed. | 181 // Caller must call ReleaseSnapshot() on result when no longer needed. |
188 Snapshot* NonLiveSnapshot(Snapshot* base); | 182 Snapshot* NonLiveSnapshot(Snapshot* base); |
189 | 183 |
190 // Refresh the internal mmap information from MemoryRegionMap. Results of | |
191 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed | |
192 // memory regions as at calling RefreshMMapData. | |
193 void RefreshMMapData(); | |
194 | |
195 // Clear the internal mmap information. Results of FillOrderedProfile and | |
196 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after | |
197 // calling ClearMMapData. | |
198 void ClearMMapData(); | |
199 | |
200 private: | 184 private: |
201 | 185 |
202 // data types ---------------------------- | 186 // data types ---------------------------- |
203 | 187 |
204 // Hash table bucket to hold (de)allocation stats | 188 // Hash table bucket to hold (de)allocation stats |
205 // for a given allocation call stack trace. | 189 // for a given allocation call stack trace. |
206 struct Bucket : public Stats { | 190 struct Bucket : public Stats { |
207 uintptr_t hash; // Hash value of the stack trace | 191 uintptr_t hash; // Hash value of the stack trace |
208 int depth; // Depth of stack trace | 192 int depth; // Depth of stack trace |
209 const void** stack; // Stack trace | 193 const void** stack; // Stack trace |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
267 // counting bucket b. | 251 // counting bucket b. |
268 // | 252 // |
269 // "extra" is appended to the unparsed bucket. Typically it is empty, | 253 // "extra" is appended to the unparsed bucket. Typically it is empty, |
270 // but may be set to something like " heapprofile" for the total | 254 // but may be set to something like " heapprofile" for the total |
271 // bucket to indicate the type of the profile. | 255 // bucket to indicate the type of the profile. |
272 static int UnparseBucket(const Bucket& b, | 256 static int UnparseBucket(const Bucket& b, |
273 char* buf, int buflen, int bufsize, | 257 char* buf, int buflen, int bufsize, |
274 const char* extra, | 258 const char* extra, |
275 Stats* profile_stats); | 259 Stats* profile_stats); |
276 | 260 |
277 // Deallocate a given allocation map. | 261 // Get the bucket for the caller stack trace 'key' of depth 'depth' |
278 void DeallocateAllocationMap(AllocationMap* allocation); | 262 // creating the bucket if needed. |
279 | 263 Bucket* GetBucket(int depth, const void* const key[]); |
280 // Deallocate a given bucket table. | |
281 void DeallocateBucketTable(Bucket** table); | |
282 | |
283 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a | |
284 // bucket hash map 'table' creating the bucket if needed. '*bucket_count' | |
285 // is incremented both when 'bucket_count' is not NULL and when a new | |
286 // bucket object is created. | |
287 Bucket* GetBucket(int depth, const void* const key[], Bucket** table, | |
288 int* bucket_count); | |
289 | 264 |
290 // Helper for IterateAllocs to do callback signature conversion | 265 // Helper for IterateAllocs to do callback signature conversion |
291 // from AllocationMap::Iterate to AllocIterator. | 266 // from AllocationMap::Iterate to AllocIterator. |
292 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, | 267 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, |
293 AllocIterator callback) { | 268 AllocIterator callback) { |
294 AllocInfo info; | 269 AllocInfo info; |
295 info.object_size = v->bytes; | 270 info.object_size = v->bytes; |
296 info.call_stack = v->bucket()->stack; | 271 info.call_stack = v->bucket()->stack; |
297 info.stack_depth = v->bucket()->depth; | 272 info.stack_depth = v->bucket()->depth; |
298 info.live = v->live(); | 273 info.live = v->live(); |
299 info.ignored = v->ignore(); | 274 info.ignored = v->ignore(); |
300 callback(ptr, info); | 275 callback(ptr, info); |
301 } | 276 } |
302 | 277 |
303 // Helper for DumpNonLiveProfile to do object-granularity | 278 // Helper for DumpNonLiveProfile to do object-granularity |
304 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 279 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
305 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 280 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
306 const DumpArgs& args); | 281 const DumpArgs& args); |
307 | 282 |
308 // Helper for filling size variables in buckets by zero. | |
309 inline static void ZeroBucketCountsIterator( | |
310 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | |
311 | |
312 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 283 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
313 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 284 // Creates a sorted list of Buckets whose length is num_buckets_. |
314 // num_avaliable_mmap_buckets_. | 285 // The caller is responsible for dellocating the returned list. |
315 // The caller is responsible for deallocating the returned list. | |
316 Bucket** MakeSortedBucketList() const; | 286 Bucket** MakeSortedBucketList() const; |
317 | 287 |
318 // Helper for TakeSnapshot. Saves object to snapshot. | 288 // Helper for TakeSnapshot. Saves object to snapshot. |
319 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 289 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
320 | 290 |
321 // Arguments passed to AddIfNonLive | 291 // Arguments passed to AddIfNonLive |
322 struct AddNonLiveArgs { | 292 struct AddNonLiveArgs { |
323 Snapshot* dest; | 293 Snapshot* dest; |
324 Snapshot* base; | 294 Snapshot* base; |
325 }; | 295 }; |
(...skipping 11 matching lines...) Expand all Loading... |
337 AllocationMap* allocations); | 307 AllocationMap* allocations); |
338 | 308 |
339 // data ---------------------------- | 309 // data ---------------------------- |
340 | 310 |
341 // Memory (de)allocator that we use. | 311 // Memory (de)allocator that we use. |
342 Allocator alloc_; | 312 Allocator alloc_; |
343 DeAllocator dealloc_; | 313 DeAllocator dealloc_; |
344 | 314 |
345 // Overall profile stats; we use only the Stats part, | 315 // Overall profile stats; we use only the Stats part, |
346 // but make it a Bucket to pass to UnparseBucket. | 316 // but make it a Bucket to pass to UnparseBucket. |
347 // It doesn't contain mmap'ed regions. | |
348 Bucket total_; | 317 Bucket total_; |
349 | 318 |
350 // Bucket hash table for malloc. | 319 // Bucket hash table. |
351 // We hand-craft one instead of using one of the pre-written | 320 // We hand-craft one instead of using one of the pre-written |
352 // ones because we do not want to use malloc when operating on the table. | 321 // ones because we do not want to use malloc when operating on the table. |
353 // It is only few lines of code, so no big deal. | 322 // It is only few lines of code, so no big deal. |
354 Bucket** alloc_table_; | 323 Bucket** table_; |
355 int num_alloc_buckets_; | 324 int num_buckets_; |
356 | 325 |
357 // Bucket hash table for mmap. | 326 // Map of all currently allocated objects we know about. |
358 // This table is filled with the information from MemoryRegionMap by calling | 327 AllocationMap* allocation_; |
359 // RefreshMMapData. | |
360 Bucket** mmap_table_; | |
361 int num_available_mmap_buckets_; | |
362 | |
363 // Map of all currently allocated objects and mapped regions we know about. | |
364 AllocationMap* alloc_address_map_; | |
365 AllocationMap* mmap_address_map_; | |
366 | 328 |
367 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 329 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
368 }; | 330 }; |
369 | 331 |
370 class HeapProfileTable::Snapshot { | 332 class HeapProfileTable::Snapshot { |
371 public: | 333 public: |
372 const Stats& total() const { return total_; } | 334 const Stats& total() const { return total_; } |
373 | 335 |
374 // Report anything in this snapshot as a leak. | 336 // Report anything in this snapshot as a leak. |
375 // May use new/delete for temporary storage. | 337 // May use new/delete for temporary storage. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
413 // Helpers for sorting and generating leak reports | 375 // Helpers for sorting and generating leak reports |
414 struct Entry; | 376 struct Entry; |
415 struct ReportState; | 377 struct ReportState; |
416 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 378 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
417 static void ReportObject(const void* ptr, AllocValue* v, char*); | 379 static void ReportObject(const void* ptr, AllocValue* v, char*); |
418 | 380 |
419 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 381 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
420 }; | 382 }; |
421 | 383 |
422 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 384 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |