OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
64 int64 alloc_size; // Total size of all allocated objects so far | 64 int64 alloc_size; // Total size of all allocated objects so far |
65 int64 free_size; // Total size of all freed objects so far | 65 int64 free_size; // Total size of all freed objects so far |
66 | 66 |
67 // semantic equality | 67 // semantic equality |
68 bool Equivalent(const Stats& x) const { | 68 bool Equivalent(const Stats& x) const { |
69 return allocs - frees == x.allocs - x.frees && | 69 return allocs - frees == x.allocs - x.frees && |
70 alloc_size - free_size == x.alloc_size - x.free_size; | 70 alloc_size - free_size == x.alloc_size - x.free_size; |
71 } | 71 } |
72 }; | 72 }; |
73 | 73 |
74 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New | |
75 // allocations are marked with UNMARKED by default. | |
76 enum AllocationMark { | |
77 UNMARKED = 0, | |
78 MARK_ONE, | |
79 MARK_TWO, | |
80 MARK_THREE | |
81 }; | |
82 | |
74 // Info we can return about an allocation. | 83 // Info we can return about an allocation. |
75 struct AllocInfo { | 84 struct AllocInfo { |
76 size_t object_size; // size of the allocation | 85 size_t object_size; // size of the allocation |
77 const void* const* call_stack; // call stack that made the allocation call | 86 const void* const* call_stack; // call stack that made the allocation call |
78 int stack_depth; // depth of call_stack | 87 int stack_depth; // depth of call_stack |
79 bool live; | 88 bool live; |
80 bool ignored; | 89 bool ignored; |
81 }; | 90 }; |
82 | 91 |
83 // Info we return about an allocation context. | 92 // Info we return about an allocation context. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
131 // If "ptr" points to a recorded allocation and it's not marked as live | 140 // If "ptr" points to a recorded allocation and it's not marked as live |
132 // mark it as live and return true. Else return false. | 141 // mark it as live and return true. Else return false. |
133 // All allocations start as non-live. | 142 // All allocations start as non-live. |
134 bool MarkAsLive(const void* ptr); | 143 bool MarkAsLive(const void* ptr); |
135 | 144 |
136 // If "ptr" points to a recorded allocation, mark it as "ignored". | 145 // If "ptr" points to a recorded allocation, mark it as "ignored". |
137 // Ignored objects are treated like other objects, except that they | 146 // Ignored objects are treated like other objects, except that they |
138 // are skipped in heap checking reports. | 147 // are skipped in heap checking reports. |
139 void MarkAsIgnored(const void* ptr); | 148 void MarkAsIgnored(const void* ptr); |
140 | 149 |
150 // Mark all currently known allocations with the given AllocationMark. | |
151 void MarkCurrentAllocations(AllocationMark mark); | |
152 | |
153 // Mark all unmarked (i.e. marked with AllocationMark::UNMARKED) with the | |
154 // given mark. | |
155 void MarkUnmarkedAllocations(AllocationMark mark); | |
156 | |
141 // Return current total (de)allocation statistics. It doesn't contain | 157 // Return current total (de)allocation statistics. It doesn't contain |
142 // mmap'ed regions. | 158 // mmap'ed regions. |
143 const Stats& total() const { return total_; } | 159 const Stats& total() const { return total_; } |
144 | 160 |
145 // Allocation data iteration callback: gets passed object pointer and | 161 // Allocation data iteration callback: gets passed object pointer and |
146 // fully-filled AllocInfo. | 162 // fully-filled AllocInfo. |
147 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 163 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
148 | 164 |
149 // Iterate over the allocation profile data calling "callback" | 165 // Iterate over the allocation profile data calling "callback" |
150 // for every allocation. | 166 // for every allocation. |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
196 // They are introduced to avoid expected memory fragmentation and bloat in | 212 // They are introduced to avoid expected memory fragmentation and bloat in |
197 // an arena. A dedicated arena for this function allows disposing whole the | 213 // an arena. A dedicated arena for this function allows disposing whole the |
198 // arena after ClearMMapData. | 214 // arena after ClearMMapData. |
199 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); | 215 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); |
200 | 216 |
201 // Clear the internal mmap information. Results of FillOrderedProfile and | 217 // Clear the internal mmap information. Results of FillOrderedProfile and |
202 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after | 218 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after |
203 // calling ClearMMapData. | 219 // calling ClearMMapData. |
204 void ClearMMapData(); | 220 void ClearMMapData(); |
205 | 221 |
222 // Dump a list of allocations marked as "live" along with their creation | |
223 // stack traces and sizes to a file named |file_name|. Together with | |
224 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used | |
225 // to find objects that are created in a certain time span: | |
226 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the | |
227 // timespan. | |
228 // 2. Perform whatever action you suspect allocates memory that is not | |
229 // correctly freed. | |
230 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). | |
231 // 4. Perform whatever action is supposed to free the memory again. New | |
232 // allocations are not marked. So all allocations that are marked as | |
233 // "live" where created during step 2. | |
234 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that | |
235 // were created during step 2, but survived step 4. | |
236 // | |
237 // Note that this functionality cannot be used if the HeapProfileTable is | |
238 // used for leak checking (using HeapLeakChecker). | |
239 void DumpMarkedObjects(AllocationMark mark, const char* file_name); | |
240 | |
206 private: | 241 private: |
207 friend class DeepHeapProfile; | 242 friend class DeepHeapProfile; |
208 | 243 |
209 // data types ---------------------------- | 244 // data types ---------------------------- |
210 | 245 |
211 // Hash table bucket to hold (de)allocation stats | 246 // Hash table bucket to hold (de)allocation stats |
212 // for a given allocation call stack trace. | 247 // for a given allocation call stack trace. |
213 struct Bucket : public Stats { | 248 struct Bucket : public Stats { |
214 uintptr_t hash; // Hash value of the stack trace | 249 uintptr_t hash; // Hash value of the stack trace |
215 int depth; // Depth of stack trace | 250 int depth; // Depth of stack trace |
(...skipping 15 matching lines...) Expand all Loading... | |
231 bool live() const { return bucket_rep & kLive; } | 266 bool live() const { return bucket_rep & kLive; } |
232 void set_live(bool l) { | 267 void set_live(bool l) { |
233 bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0); | 268 bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0); |
234 } | 269 } |
235 | 270 |
236 // Should this allocation be ignored if it looks like a leak? | 271 // Should this allocation be ignored if it looks like a leak? |
237 bool ignore() const { return bucket_rep & kIgnore; } | 272 bool ignore() const { return bucket_rep & kIgnore; } |
238 void set_ignore(bool r) { | 273 void set_ignore(bool r) { |
239 bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0); | 274 bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0); |
240 } | 275 } |
276 AllocationMark mark() const { | |
277 return static_cast<AllocationMark>(bucket_rep & uintptr_t(kMask)); | |
278 } | |
279 void set_mark(AllocationMark mark) { | |
280 bucket_rep = (bucket_rep & ~uintptr_t(kMask)) | uintptr_t(mark); | |
281 } | |
241 | 282 |
242 private: | 283 private: |
243 // We store a few bits in the bottom bits of bucket_rep. | 284 // We store a few bits in the bottom bits of bucket_rep. |
244 // (Alignment is at least four, so we have at least two bits.) | 285 // (Alignment is at least four, so we have at least two bits.) |
245 static const int kLive = 1; | 286 static const int kLive = 1; |
246 static const int kIgnore = 2; | 287 static const int kIgnore = 2; |
247 static const int kMask = kLive | kIgnore; | 288 static const int kMask = kLive | kIgnore; |
248 | 289 |
249 uintptr_t bucket_rep; | 290 uintptr_t bucket_rep; |
250 }; | 291 }; |
251 | 292 |
252 // helper for FindInsideAlloc | 293 // helper for FindInsideAlloc |
253 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } | 294 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } |
254 | 295 |
255 typedef AddressMap<AllocValue> AllocationMap; | 296 typedef AddressMap<AllocValue> AllocationMap; |
256 | 297 |
257 // Arguments that need to be passed DumpNonLiveIterator callback below. | 298 // Arguments that need to be passed DumpNonLiveIterator callback below. |
258 struct DumpArgs { | 299 struct DumpArgs { |
259 RawFD fd; // file to write to | 300 RawFD fd; // file to write to |
260 Stats* profile_stats; // stats to update (may be NULL) | 301 Stats* profile_stats; // stats to update (may be NULL) |
261 | 302 |
262 DumpArgs(RawFD a, Stats* d) | 303 DumpArgs(RawFD a, Stats* d) |
263 : fd(a), profile_stats(d) { } | 304 : fd(a), profile_stats(d) { } |
264 }; | 305 }; |
265 | 306 |
307 // Arguments that need to be passed DumpMarkedIterator callback below. | |
308 struct DumpMarkedArgs { | |
309 RawFD fd; // file to write to. | |
310 AllocationMark mark; // The mark of the allocations to process. | |
311 | |
312 DumpMarkedArgs(RawFD a, AllocationMark m) | |
313 : fd(a), mark(m) { } | |
jar (doing other things)
2012/06/11 08:53:14
nit: one initializer per line if you wrap... with
jochen (gone - plz use gerrit)
2012/06/11 13:13:47
Done.
| |
314 }; | |
315 | |
316 // Arguments that need to be passed MarkIterator callback below. | |
317 struct MarkArgs { | |
318 AllocationMark mark; // The mark to put on allocations. | |
319 bool mark_all; // True if all allocations should be marked. Otherwise just | |
320 // mark unmarked allocations. | |
321 | |
322 MarkArgs(AllocationMark m, bool a) | |
323 : mark(m), mark_all(a) { } | |
jar (doing other things)
2012/06/11 08:53:14
nit: again... one init per line ... with ":" inden
jochen (gone - plz use gerrit)
2012/06/11 13:13:47
Done.
| |
324 }; | |
325 | |
266 // helpers ---------------------------- | 326 // helpers ---------------------------- |
267 | 327 |
268 // Unparse bucket b and print its portion of profile dump into buf. | 328 // Unparse bucket b and print its portion of profile dump into buf. |
269 // We return the amount of space in buf that we use. We start printing | 329 // We return the amount of space in buf that we use. We start printing |
270 // at buf + buflen, and promise not to go beyond buf + bufsize. | 330 // at buf + buflen, and promise not to go beyond buf + bufsize. |
271 // We do not provision for 0-terminating 'buf'. | 331 // We do not provision for 0-terminating 'buf'. |
272 // | 332 // |
273 // If profile_stats is non-NULL, we update *profile_stats by | 333 // If profile_stats is non-NULL, we update *profile_stats by |
274 // counting bucket b. | 334 // counting bucket b. |
275 // | 335 // |
(...skipping 24 matching lines...) Expand all Loading... | |
300 AllocIterator callback) { | 360 AllocIterator callback) { |
301 AllocInfo info; | 361 AllocInfo info; |
302 info.object_size = v->bytes; | 362 info.object_size = v->bytes; |
303 info.call_stack = v->bucket()->stack; | 363 info.call_stack = v->bucket()->stack; |
304 info.stack_depth = v->bucket()->depth; | 364 info.stack_depth = v->bucket()->depth; |
305 info.live = v->live(); | 365 info.live = v->live(); |
306 info.ignored = v->ignore(); | 366 info.ignored = v->ignore(); |
307 callback(ptr, info); | 367 callback(ptr, info); |
308 } | 368 } |
309 | 369 |
370 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. | |
371 inline static void MarkIterator(const void* ptr, AllocValue* v, | |
372 const MarkArgs& args); | |
373 | |
310 // Helper for DumpNonLiveProfile to do object-granularity | 374 // Helper for DumpNonLiveProfile to do object-granularity |
311 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 375 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
312 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 376 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
313 const DumpArgs& args); | 377 const DumpArgs& args); |
314 | 378 |
379 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It | |
380 // gets passed to AllocationMap::Iterate. | |
381 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, | |
382 const DumpMarkedArgs& args); | |
383 | |
315 // Helper for filling size variables in buckets by zero. | 384 // Helper for filling size variables in buckets by zero. |
316 inline static void ZeroBucketCountsIterator( | 385 inline static void ZeroBucketCountsIterator( |
317 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | 386 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
318 | 387 |
319 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 388 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
320 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 389 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
321 // num_avaliable_mmap_buckets_. | 390 // num_avaliable_mmap_buckets_. |
322 // The caller is responsible for deallocating the returned list. | 391 // The caller is responsible for deallocating the returned list. |
323 Bucket** MakeSortedBucketList() const; | 392 Bucket** MakeSortedBucketList() const; |
324 | 393 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
420 // Helpers for sorting and generating leak reports | 489 // Helpers for sorting and generating leak reports |
421 struct Entry; | 490 struct Entry; |
422 struct ReportState; | 491 struct ReportState; |
423 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 492 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
424 static void ReportObject(const void* ptr, AllocValue* v, char*); | 493 static void ReportObject(const void* ptr, AllocValue* v, char*); |
425 | 494 |
426 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 495 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
427 }; | 496 }; |
428 | 497 |
429 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 498 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |