OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
131 // If "ptr" points to a recorded allocation and it's not marked as live | 131 // If "ptr" points to a recorded allocation and it's not marked as live |
132 // mark it as live and return true. Else return false. | 132 // mark it as live and return true. Else return false. |
133 // All allocations start as non-live. | 133 // All allocations start as non-live. |
134 bool MarkAsLive(const void* ptr); | 134 bool MarkAsLive(const void* ptr); |
135 | 135 |
136 // If "ptr" points to a recorded allocation, mark it as "ignored". | 136 // If "ptr" points to a recorded allocation, mark it as "ignored". |
137 // Ignored objects are treated like other objects, except that they | 137 // Ignored objects are treated like other objects, except that they |
138 // are skipped in heap checking reports. | 138 // are skipped in heap checking reports. |
139 void MarkAsIgnored(const void* ptr); | 139 void MarkAsIgnored(const void* ptr); |
140 | 140 |
141 // Mark all currently known allocations as "ignored". | |
142 void MarkAllAsIgnored(); | |
143 | |
144 // Mark all currently known, but not "ignored" allocations as "live". | |
145 void MarkAllAsLive(); | |
146 | |
141 // Return current total (de)allocation statistics. It doesn't contain | 147 // Return current total (de)allocation statistics. It doesn't contain |
142 // mmap'ed regions. | 148 // mmap'ed regions. |
143 const Stats& total() const { return total_; } | 149 const Stats& total() const { return total_; } |
144 | 150 |
145 // Allocation data iteration callback: gets passed object pointer and | 151 // Allocation data iteration callback: gets passed object pointer and |
146 // fully-filled AllocInfo. | 152 // fully-filled AllocInfo. |
147 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 153 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
148 | 154 |
149 // Iterate over the allocation profile data calling "callback" | 155 // Iterate over the allocation profile data calling "callback" |
150 // for every allocation. | 156 // for every allocation. |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
196 // They are introduced to avoid expected memory fragmentation and bloat in | 202 // They are introduced to avoid expected memory fragmentation and bloat in |
197 // an arena. A dedicated arena for this function allows disposing whole the | 203 // an arena. A dedicated arena for this function allows disposing whole the |
198 // arena after ClearMMapData. | 204 // arena after ClearMMapData. |
199 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); | 205 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); |
200 | 206 |
201 // Clear the internal mmap information. Results of FillOrderedProfile and | 207 // Clear the internal mmap information. Results of FillOrderedProfile and |
202 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after | 208 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after |
203 // calling ClearMMapData. | 209 // calling ClearMMapData. |
204 void ClearMMapData(); | 210 void ClearMMapData(); |
205 | 211 |
212 // Dump a list of allocations marked as "live" along with their creation | |
213 // stack traces and sizes to a file named |file_name|. Together with | |
214 // MarkAllAsIgnored and MarkAllAsLive this can be used to find objects that | |
215 // are created in a certain time span: | |
216 // 1. Invoke MarkAllAsIgnored to mark the start of the timespan. All known | |
217 // allocations are marked as "ignored" now. | |
218 // 2. Perform whatever action you suspect allocates memory that is not | |
219 // correctly freed. | |
220 // 3. Invoke MarkAllAsLive. New allocations are now marked as "live". | |
jar (doing other things)
2012/06/06 22:13:46
Given this description, is this name right? I had
| |
221 // 4. Perform whatever action is supposed to free the memory again. New | |
222 // allocations are not marked. So all allocations that are marked as | |
223 // "live" where created during step 2. | |
224 // 5. Invoke DumpLiveObjects to get the list of "live" allocations. | |
225 void DumpLiveObjects(const char* file_name); | |
226 | |
206 private: | 227 private: |
207 friend class DeepHeapProfile; | 228 friend class DeepHeapProfile; |
208 | 229 |
209 // data types ---------------------------- | 230 // data types ---------------------------- |
210 | 231 |
211 // Hash table bucket to hold (de)allocation stats | 232 // Hash table bucket to hold (de)allocation stats |
212 // for a given allocation call stack trace. | 233 // for a given allocation call stack trace. |
213 struct Bucket : public Stats { | 234 struct Bucket : public Stats { |
214 uintptr_t hash; // Hash value of the stack trace | 235 uintptr_t hash; // Hash value of the stack trace |
215 int depth; // Depth of stack trace | 236 int depth; // Depth of stack trace |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
300 AllocIterator callback) { | 321 AllocIterator callback) { |
301 AllocInfo info; | 322 AllocInfo info; |
302 info.object_size = v->bytes; | 323 info.object_size = v->bytes; |
303 info.call_stack = v->bucket()->stack; | 324 info.call_stack = v->bucket()->stack; |
304 info.stack_depth = v->bucket()->depth; | 325 info.stack_depth = v->bucket()->depth; |
305 info.live = v->live(); | 326 info.live = v->live(); |
306 info.ignored = v->ignore(); | 327 info.ignored = v->ignore(); |
307 callback(ptr, info); | 328 callback(ptr, info); |
308 } | 329 } |
309 | 330 |
331 // Helper for MarkAllAsIgnored and MarkAllAsLive | |
332 static void MarkAllIterator(const void* ptr, AllocValue* v, | |
333 bool mark_as_ignored) { | |
334 if (mark_as_ignored) | |
335 v->set_ignore(true); | |
336 else if (!v->ignore()) | |
337 v->set_live(true); | |
338 } | |
339 | |
310 // Helper for DumpNonLiveProfile to do object-granularity | 340 // Helper for DumpNonLiveProfile to do object-granularity |
311 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 341 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
312 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 342 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
313 const DumpArgs& args); | 343 const DumpArgs& args); |
314 | 344 |
345 // Helper for DumpLiveObjects to dump all "live" allocations. It gets passed | |
346 // to AllocationMap::Iterate. | |
347 inline static void DumpLiveIterator(const void* ptr, AllocValue* v, | |
348 const DumpArgs& args); | |
349 | |
315 // Helper for filling size variables in buckets by zero. | 350 // Helper for filling size variables in buckets by zero. |
316 inline static void ZeroBucketCountsIterator( | 351 inline static void ZeroBucketCountsIterator( |
317 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | 352 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
318 | 353 |
319 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 354 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
320 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 355 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
321 // num_avaliable_mmap_buckets_. | 356 // num_avaliable_mmap_buckets_. |
322 // The caller is responsible for deallocating the returned list. | 357 // The caller is responsible for deallocating the returned list. |
323 Bucket** MakeSortedBucketList() const; | 358 Bucket** MakeSortedBucketList() const; |
324 | 359 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
420 // Helpers for sorting and generating leak reports | 455 // Helpers for sorting and generating leak reports |
421 struct Entry; | 456 struct Entry; |
422 struct ReportState; | 457 struct ReportState; |
423 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 458 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
424 static void ReportObject(const void* ptr, AllocValue* v, char*); | 459 static void ReportObject(const void* ptr, AllocValue* v, char*); |
425 | 460 |
426 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 461 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
427 }; | 462 }; |
428 | 463 |
429 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 464 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |