OLD | NEW |
1 // Copyright (c) 2008, Google Inc. | 1 // Copyright (c) 2008, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
87 | 87 |
88 // Total byte size in cache | 88 // Total byte size in cache |
89 size_t Size() const { return size_; } | 89 size_t Size() const { return size_; } |
90 | 90 |
91 // Allocate an object of the given size and class. The size given | 91 // Allocate an object of the given size and class. The size given |
92 // must be the same as the size of the class in the size map. | 92 // must be the same as the size of the class in the size map. |
93 void* Allocate(size_t size, size_t cl); | 93 void* Allocate(size_t size, size_t cl); |
94 void Deallocate(void* ptr, size_t size_class); | 94 void Deallocate(void* ptr, size_t size_class); |
95 | 95 |
96 void Scavenge(); | 96 void Scavenge(); |
| 97 void Print(TCMalloc_Printer* out) const; |
97 | 98 |
98 int GetSamplePeriod(); | 99 int GetSamplePeriod(); |
99 | 100 |
100 // Record allocation of "k" bytes. Return true iff allocation | 101 // Record allocation of "k" bytes. Return true iff allocation |
101 // should be sampled | 102 // should be sampled |
102 bool SampleAllocation(size_t k); | 103 bool SampleAllocation(size_t k); |
103 | 104 |
104 static void InitModule(); | 105 static void InitModule(); |
105 static void InitTSD(); | 106 static void InitTSD(); |
106 static ThreadCache* GetThreadHeap(); | 107 static ThreadCache* GetThreadHeap(); |
107 static ThreadCache* GetCache(); | 108 static ThreadCache* GetCache(); |
108 static ThreadCache* GetCacheIfPresent(); | 109 static ThreadCache* GetCacheIfPresent(); |
109 static ThreadCache* CreateCacheIfNecessary(); | 110 static ThreadCache* CreateCacheIfNecessary(); |
110 static void BecomeIdle(); | 111 static void BecomeIdle(); |
111 | 112 |
112 // Return the number of thread heaps in use. | 113 // Return the number of thread heaps in use. |
113 static inline int HeapsInUse(); | 114 static inline int HeapsInUse(); |
114 | 115 |
115 // Writes to total_bytes the total number of bytes used by all thread heaps. | 116 // Writes to total_bytes the total number of bytes used by all thread heaps. |
116 // class_count must be an array of size kNumClasses. Writes the number of | 117 // class_count must be an array of size kNumClasses. Writes the number of |
117 // items on the corresponding freelist. class_count may be NULL. | 118 // items on the corresponding freelist. class_count may be NULL. |
118 // The storage of both parameters must be zero intialized. | 119 // The storage of both parameters must be zero intialized. |
119 // REQUIRES: Static::pageheap_lock is held. | 120 // REQUIRES: Static::pageheap_lock is held. |
120 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count); | 121 static void GetThreadStats(uint64_t* total_bytes, uint64_t* class_count); |
121 | 122 |
| 123 // Write debugging statistics to 'out'. |
| 124 // REQUIRES: Static::pageheap_lock is held. |
| 125 static void PrintThreads(TCMalloc_Printer* out); |
| 126 |
122 // Sets the total thread cache size to new_size, recomputing the | 127 // Sets the total thread cache size to new_size, recomputing the |
123 // individual thread cache sizes as necessary. | 128 // individual thread cache sizes as necessary. |
124 // REQUIRES: Static::pageheap lock is held. | 129 // REQUIRES: Static::pageheap lock is held. |
125 static void set_overall_thread_cache_size(size_t new_size); | 130 static void set_overall_thread_cache_size(size_t new_size); |
126 static size_t overall_thread_cache_size() { | 131 static size_t overall_thread_cache_size() { |
127 return overall_thread_cache_size_; | 132 return overall_thread_cache_size_; |
128 } | 133 } |
129 | 134 |
130 private: | 135 private: |
131 class FreeList { | 136 class FreeList { |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 length_++; | 202 length_++; |
198 } | 203 } |
199 | 204 |
200 void* Pop() { | 205 void* Pop() { |
201 ASSERT(list_ != NULL); | 206 ASSERT(list_ != NULL); |
202 length_--; | 207 length_--; |
203 if (length_ < lowater_) lowater_ = length_; | 208 if (length_ < lowater_) lowater_ = length_; |
204 return SLL_Pop(&list_); | 209 return SLL_Pop(&list_); |
205 } | 210 } |
206 | 211 |
207 void* Next() { | |
208 return SLL_Next(&list_); | |
209 } | |
210 | |
211 void PushRange(int N, void *start, void *end) { | 212 void PushRange(int N, void *start, void *end) { |
212 SLL_PushRange(&list_, start, end); | 213 SLL_PushRange(&list_, start, end); |
213 length_ += N; | 214 length_ += N; |
214 } | 215 } |
215 | 216 |
216 void PopRange(int N, void **start, void **end) { | 217 void PopRange(int N, void **start, void **end) { |
217 SLL_PopRange(&list_, N, start, end); | 218 SLL_PopRange(&list_, N, start, end); |
218 ASSERT(length_ >= N); | 219 ASSERT(length_ >= N); |
219 length_ -= N; | 220 length_ -= N; |
220 if (length_ < lowater_) lowater_ = length_; | 221 if (length_ < lowater_) lowater_ = length_; |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
341 return FetchFromCentralCache(cl, size); | 342 return FetchFromCentralCache(cl, size); |
342 } | 343 } |
343 size_ -= size; | 344 size_ -= size; |
344 return list->Pop(); | 345 return list->Pop(); |
345 } | 346 } |
346 | 347 |
347 inline void ThreadCache::Deallocate(void* ptr, size_t cl) { | 348 inline void ThreadCache::Deallocate(void* ptr, size_t cl) { |
348 FreeList* list = &list_[cl]; | 349 FreeList* list = &list_[cl]; |
349 size_ += Static::sizemap()->ByteSizeForClass(cl); | 350 size_ += Static::sizemap()->ByteSizeForClass(cl); |
350 ssize_t size_headroom = max_size_ - size_ - 1; | 351 ssize_t size_headroom = max_size_ - size_ - 1; |
351 | |
352 // This catches back-to-back frees of allocs in the same size | |
353 // class. A more comprehensive (and expensive) test would be to walk | |
354 // the entire freelist. But this might be enough to find some bugs. | |
355 ASSERT(ptr != list->Next()); | |
356 | |
357 list->Push(ptr); | 352 list->Push(ptr); |
358 ssize_t list_headroom = | 353 ssize_t list_headroom = |
359 static_cast<ssize_t>(list->max_length()) - list->length(); | 354 static_cast<ssize_t>(list->max_length()) - list->length(); |
360 | 355 |
361 // There are two relatively uncommon things that require further work. | 356 // There are two relatively uncommon things that require further work. |
362 // In the common case we're done, and in that case we need a single branch | 357 // In the common case we're done, and in that case we need a single branch |
363 // because of the bitwise-or trick that follows. | 358 // because of the bitwise-or trick that follows. |
364 if ((list_headroom | size_headroom) < 0) { | 359 if ((list_headroom | size_headroom) < 0) { |
365 if (list_headroom < 0) { | 360 if (list_headroom < 0) { |
366 ListTooLong(list, cl); | 361 ListTooLong(list, cl); |
(...skipping 27 matching lines...) Expand all Loading... |
394 // because we may be in the thread destruction code and may have | 389 // because we may be in the thread destruction code and may have |
395 // already cleaned up the cache for this thread. | 390 // already cleaned up the cache for this thread. |
396 inline ThreadCache* ThreadCache::GetCacheIfPresent() { | 391 inline ThreadCache* ThreadCache::GetCacheIfPresent() { |
397 if (!tsd_inited_) return NULL; | 392 if (!tsd_inited_) return NULL; |
398 return GetThreadHeap(); | 393 return GetThreadHeap(); |
399 } | 394 } |
400 | 395 |
401 } // namespace tcmalloc | 396 } // namespace tcmalloc |
402 | 397 |
403 #endif // TCMALLOC_THREAD_CACHE_H_ | 398 #endif // TCMALLOC_THREAD_CACHE_H_ |
OLD | NEW |