Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: Source/wtf/FastMalloc.cpp

Issue 20300002: Fix trailing whitespace in .cpp, .h, and .idl files (ex. Source/core) (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/wtf/FastMalloc.h ('k') | Source/wtf/FilePrintStream.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2005, 2007, Google Inc. 1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserv ed. 3 // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserv ed.
4 // 4 //
5 // Redistribution and use in source and binary forms, with or without 5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are 6 // modification, are permitted provided that the following conditions are
7 // met: 7 // met:
8 // 8 //
9 // * Redistributions of source code must retain the above copyright 9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer. 10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above 11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer 12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the 13 // in the documentation and/or other materials provided with the
14 // distribution. 14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its 15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from 16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission. 17 // this software without specific prior written permission.
18 // 18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 #endif 194 #endif
195 195
196 NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*) 196 NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*)
197 { 197 {
198 CRASH(); 198 CRASH();
199 } 199 }
200 200
201 } // namespace Internal 201 } // namespace Internal
202 202
203 203
204 void* fastZeroedMalloc(size_t n) 204 void* fastZeroedMalloc(size_t n)
205 { 205 {
206 void* result = fastMalloc(n); 206 void* result = fastMalloc(n);
207 memset(result, 0, n); 207 memset(result, 0, n);
208 return result; 208 return result;
209 } 209 }
210 210
211 char* fastStrDup(const char* src) 211 char* fastStrDup(const char* src)
212 { 212 {
213 size_t len = strlen(src) + 1; 213 size_t len = strlen(src) + 1;
214 char* dup = static_cast<char*>(fastMalloc(len)); 214 char* dup = static_cast<char*>(fastMalloc(len));
215 memcpy(dup, src, len); 215 memcpy(dup, src, len);
216 return dup; 216 return dup;
217 } 217 }
218 218
219 TryMallocReturnValue tryFastZeroedMalloc(size_t n) 219 TryMallocReturnValue tryFastZeroedMalloc(size_t n)
220 { 220 {
221 void* result; 221 void* result;
222 if (!tryFastMalloc(n).getValue(result)) 222 if (!tryFastMalloc(n).getValue(result))
223 return 0; 223 return 0;
224 memset(result, 0, n); 224 memset(result, 0, n);
225 return result; 225 return result;
226 } 226 }
227 227
228 } // namespace WTF 228 } // namespace WTF
229 229
230 #if FORCE_SYSTEM_MALLOC 230 #if FORCE_SYSTEM_MALLOC
231 231
232 #if OS(DARWIN) 232 #if OS(DARWIN)
233 #include <malloc/malloc.h> 233 #include <malloc/malloc.h>
234 #elif OS(WINDOWS) 234 #elif OS(WINDOWS)
235 #include <malloc.h> 235 #include <malloc.h>
236 #endif 236 #endif
237 237
238 namespace WTF { 238 namespace WTF {
239 239
240 size_t fastMallocGoodSize(size_t bytes) 240 size_t fastMallocGoodSize(size_t bytes)
241 { 241 {
242 #if OS(DARWIN) 242 #if OS(DARWIN)
243 return malloc_good_size(bytes); 243 return malloc_good_size(bytes);
244 #else 244 #else
245 return bytes; 245 return bytes;
246 #endif 246 #endif
247 } 247 }
248 248
249 TryMallocReturnValue tryFastMalloc(size_t n) 249 TryMallocReturnValue tryFastMalloc(size_t n)
250 { 250 {
251 ASSERT(!isForbidden()); 251 ASSERT(!isForbidden());
252 252
253 #if ENABLE(WTF_MALLOC_VALIDATION) 253 #if ENABLE(WTF_MALLOC_VALIDATION)
254 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n ) // If overflow would occur... 254 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n ) // If overflow would occur...
255 return 0; 255 return 0;
256 256
257 void* result = malloc(n + Internal::ValidationBufferSize); 257 void* result = malloc(n + Internal::ValidationBufferSize);
258 if (!result) 258 if (!result)
259 return 0; 259 return 0;
260 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader* >(result); 260 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader* >(result);
261 header->m_size = n; 261 header->m_size = n;
262 header->m_type = Internal::AllocTypeMalloc; 262 header->m_type = Internal::AllocTypeMalloc;
263 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix); 263 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
264 result = header + 1; 264 result = header + 1;
265 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix; 265 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
266 fastMallocValidate(result); 266 fastMallocValidate(result);
267 return result; 267 return result;
268 #else 268 #else
269 return malloc(n); 269 return malloc(n);
270 #endif 270 #endif
271 } 271 }
272 272
273 void* fastMalloc(size_t n) 273 void* fastMalloc(size_t n)
274 { 274 {
275 ASSERT(!isForbidden()); 275 ASSERT(!isForbidden());
276 276
277 #if ENABLE(WTF_MALLOC_VALIDATION) 277 #if ENABLE(WTF_MALLOC_VALIDATION)
278 TryMallocReturnValue returnValue = tryFastMalloc(n); 278 TryMallocReturnValue returnValue = tryFastMalloc(n);
279 void* result; 279 void* result;
280 if (!returnValue.getValue(result)) 280 if (!returnValue.getValue(result))
281 CRASH(); 281 CRASH();
282 #else 282 #else
283 void* result = malloc(n); 283 void* result = malloc(n);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 return result; 327 return result;
328 } 328 }
329 329
330 void fastFree(void* p) 330 void fastFree(void* p)
331 { 331 {
332 ASSERT(!isForbidden()); 332 ASSERT(!isForbidden());
333 333
334 #if ENABLE(WTF_MALLOC_VALIDATION) 334 #if ENABLE(WTF_MALLOC_VALIDATION)
335 if (!p) 335 if (!p)
336 return; 336 return;
337 337
338 fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc); 338 fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
339 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p) ; 339 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p) ;
340 memset(p, 0xCC, header->m_size); 340 memset(p, 0xCC, header->m_size);
341 free(header); 341 free(header);
342 #else 342 #else
343 free(p); 343 free(p);
344 #endif 344 #endif
345 } 345 }
346 346
347 TryMallocReturnValue tryFastRealloc(void* p, size_t n) 347 TryMallocReturnValue tryFastRealloc(void* p, size_t n)
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
381 #else 381 #else
382 void* result = realloc(p, n); 382 void* result = realloc(p, n);
383 #endif 383 #endif
384 384
385 ASSERT(result); // We expect tcmalloc underneath, which would crash instead of getting here. 385 ASSERT(result); // We expect tcmalloc underneath, which would crash instead of getting here.
386 386
387 return result; 387 return result;
388 } 388 }
389 389
390 void releaseFastMallocFreeMemory() { } 390 void releaseFastMallocFreeMemory() { }
391 391
392 FastMallocStatistics fastMallocStatistics() 392 FastMallocStatistics fastMallocStatistics()
393 { 393 {
394 FastMallocStatistics statistics = { 0, 0, 0 }; 394 FastMallocStatistics statistics = { 0, 0, 0 };
395 return statistics; 395 return statistics;
396 } 396 }
397 397
398 } // namespace WTF 398 } // namespace WTF
399 399
400 #if OS(DARWIN) 400 #if OS(DARWIN)
401 // This symbol is present in the JavaScriptCore exports file even when FastMallo c is disabled. 401 // This symbol is present in the JavaScriptCore exports file even when FastMallo c is disabled.
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
464 #define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val)) 464 #define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
465 #endif 465 #endif
466 #endif 466 #endif
467 467
468 #define DEFINE_VARIABLE(type, name, value, meaning) \ 468 #define DEFINE_VARIABLE(type, name, value, meaning) \
469 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \ 469 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
470 type FLAGS_##name(value); \ 470 type FLAGS_##name(value); \
471 char FLAGS_no##name; \ 471 char FLAGS_no##name; \
472 } \ 472 } \
473 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_ ##name 473 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_ ##name
474 474
475 #define DEFINE_int64(name, value, meaning) \ 475 #define DEFINE_int64(name, value, meaning) \
476 DEFINE_VARIABLE(int64_t, name, value, meaning) 476 DEFINE_VARIABLE(int64_t, name, value, meaning)
477 477
478 #define DEFINE_double(name, value, meaning) \ 478 #define DEFINE_double(name, value, meaning) \
479 DEFINE_VARIABLE(double, name, value, meaning) 479 DEFINE_VARIABLE(double, name, value, meaning)
480 480
481 namespace WTF { 481 namespace WTF {
482 482
483 #define malloc fastMalloc 483 #define malloc fastMalloc
484 #define calloc fastCalloc 484 #define calloc fastCalloc
485 #define free fastFree 485 #define free fastFree
486 #define realloc fastRealloc 486 #define realloc fastRealloc
487 487
(...skipping 25 matching lines...) Expand all
513 * To make it harder to exploit use-after free style exploits 513 * To make it harder to exploit use-after free style exploits
514 * we mask the addresses we put into our linked lists with the 514 * we mask the addresses we put into our linked lists with the
515 * address of kLLHardeningMask. Due to ASLR the address of 515 * address of kLLHardeningMask. Due to ASLR the address of
516 * kLLHardeningMask should be sufficiently randomized to make direct 516 * kLLHardeningMask should be sufficiently randomized to make direct
517 * freelist manipulation much more difficult. 517 * freelist manipulation much more difficult.
518 */ 518 */
519 enum { 519 enum {
520 MaskKeyShift = 13 520 MaskKeyShift = 13
521 }; 521 };
522 522
523 static ALWAYS_INLINE uintptr_t internalEntropyValue() 523 static ALWAYS_INLINE uintptr_t internalEntropyValue()
524 { 524 {
525 static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1; 525 static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1;
526 ASSERT(value); 526 ASSERT(value);
527 return value; 527 return value;
528 } 528 }
529 529
530 #define HARDENING_ENTROPY internalEntropyValue() 530 #define HARDENING_ENTROPY internalEntropyValue()
531 #define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof (value) * 8 - (amount)))) 531 #define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof (value) * 8 - (amount))))
532 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<typeof(ptr)>( reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy))) 532 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<typeof(ptr)>( reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
533 533
(...skipping 976 matching lines...) Expand 10 before | Expand all | Expand 10 after
1510 // because sometimes the sizeclass is all the information we need. 1510 // because sometimes the sizeclass is all the information we need.
1511 1511
1512 // Selector class -- general selector uses 3-level map 1512 // Selector class -- general selector uses 3-level map
1513 template <int BITS> class MapSelector { 1513 template <int BITS> class MapSelector {
1514 public: 1514 public:
1515 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; 1515 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1516 typedef PackedCache<BITS, uint64_t> CacheType; 1516 typedef PackedCache<BITS, uint64_t> CacheType;
1517 }; 1517 };
1518 1518
1519 #if CPU(X86_64) 1519 #if CPU(X86_64)
1520 // On all known X86-64 platforms, the upper 16 bits are always unused and theref ore 1520 // On all known X86-64 platforms, the upper 16 bits are always unused and theref ore
1521 // can be excluded from the PageMap key. 1521 // can be excluded from the PageMap key.
1522 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details 1522 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1523 1523
1524 static const size_t kBitsUnusedOn64Bit = 16; 1524 static const size_t kBitsUnusedOn64Bit = 16;
1525 #else 1525 #else
1526 static const size_t kBitsUnusedOn64Bit = 0; 1526 static const size_t kBitsUnusedOn64Bit = 0;
1527 #endif 1527 #endif
1528 1528
1529 // A three-level map for 64-bit machines 1529 // A three-level map for 64-bit machines
1530 template <> class MapSelector<64> { 1530 template <> class MapSelector<64> {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1612 // Return the descriptor for the specified page. 1612 // Return the descriptor for the specified page.
1613 inline Span* GetDescriptor(PageID p) const { 1613 inline Span* GetDescriptor(PageID p) const {
1614 return reinterpret_cast<Span*>(pagemap_.get(p)); 1614 return reinterpret_cast<Span*>(pagemap_.get(p));
1615 } 1615 }
1616 1616
1617 inline Span* GetDescriptorEnsureSafe(PageID p) 1617 inline Span* GetDescriptorEnsureSafe(PageID p)
1618 { 1618 {
1619 pagemap_.Ensure(p, 1); 1619 pagemap_.Ensure(p, 1);
1620 return GetDescriptor(p); 1620 return GetDescriptor(p);
1621 } 1621 }
1622 1622
1623 size_t ReturnedBytes() const; 1623 size_t ReturnedBytes() const;
1624 1624
1625 // Return number of bytes allocated from system 1625 // Return number of bytes allocated from system
1626 inline uint64_t SystemBytes() const { return system_bytes_; } 1626 inline uint64_t SystemBytes() const { return system_bytes_; }
1627 1627
1628 // Return number of free bytes in heap 1628 // Return number of free bytes in heap
1629 uint64_t FreeBytes() const { 1629 uint64_t FreeBytes() const {
1630 return (static_cast<uint64_t>(free_pages_) << kPageShift); 1630 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1631 } 1631 }
1632 1632
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1696 // 1696 //
1697 // "released" is true iff "span" was found on a "returned" list. 1697 // "released" is true iff "span" was found on a "returned" list.
1698 void Carve(Span* span, Length n, bool released); 1698 void Carve(Span* span, Length n, bool released);
1699 1699
1700 void RecordSpan(Span* span) { 1700 void RecordSpan(Span* span) {
1701 pagemap_.set(span->start, span); 1701 pagemap_.set(span->start, span);
1702 if (span->length > 1) { 1702 if (span->length > 1) {
1703 pagemap_.set(span->start + span->length - 1, span); 1703 pagemap_.set(span->start + span->length - 1, span);
1704 } 1704 }
1705 } 1705 }
1706 1706
1707 // Allocate a large span of length == n. If successful, returns a 1707 // Allocate a large span of length == n. If successful, returns a
1708 // span of exactly the specified length. Else, returns NULL. 1708 // span of exactly the specified length. Else, returns NULL.
1709 Span* AllocLarge(Length n); 1709 Span* AllocLarge(Length n);
1710 1710
1711 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1711 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1712 // Incrementally release some memory to the system. 1712 // Incrementally release some memory to the system.
1713 // IncrementalScavenge(n) is called whenever n pages are freed. 1713 // IncrementalScavenge(n) is called whenever n pages are freed.
1714 void IncrementalScavenge(Length n); 1714 void IncrementalScavenge(Length n);
1715 #endif 1715 #endif
1716 1716
1717 // Number of pages to deallocate before doing more scavenging 1717 // Number of pages to deallocate before doing more scavenging
1718 int64_t scavenge_counter_; 1718 int64_t scavenge_counter_;
1719 1719
1720 // Index of last free list we scavenged 1720 // Index of last free list we scavenged
1721 size_t scavenge_index_; 1721 size_t scavenge_index_;
1722 1722
1723 #if OS(DARWIN) 1723 #if OS(DARWIN)
1724 friend class FastMallocZone; 1724 friend class FastMallocZone;
1725 #endif 1725 #endif
1726 1726
1727 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1727 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1728 void initializeScavenger(); 1728 void initializeScavenger();
1729 ALWAYS_INLINE void signalScavenger(); 1729 ALWAYS_INLINE void signalScavenger();
1730 void scavenge(); 1730 void scavenge();
1731 ALWAYS_INLINE bool shouldScavenge() const; 1731 ALWAYS_INLINE bool shouldScavenge() const;
1732 1732
1733 #if HAVE(DISPATCH_H) || OS(WINDOWS) 1733 #if HAVE(DISPATCH_H) || OS(WINDOWS)
1734 void periodicScavenge(); 1734 void periodicScavenge();
1735 ALWAYS_INLINE bool isScavengerSuspended(); 1735 ALWAYS_INLINE bool isScavengerSuspended();
1736 ALWAYS_INLINE void scheduleScavenger(); 1736 ALWAYS_INLINE void scheduleScavenger();
1737 ALWAYS_INLINE void rescheduleScavenger(); 1737 ALWAYS_INLINE void rescheduleScavenger();
1738 ALWAYS_INLINE void suspendScavenger(); 1738 ALWAYS_INLINE void suspendScavenger();
1739 #endif 1739 #endif
1740 1740
1741 #if HAVE(DISPATCH_H) 1741 #if HAVE(DISPATCH_H)
1742 dispatch_queue_t m_scavengeQueue; 1742 dispatch_queue_t m_scavengeQueue;
1743 dispatch_source_t m_scavengeTimer; 1743 dispatch_source_t m_scavengeTimer;
1744 bool m_scavengingSuspended; 1744 bool m_scavengingSuspended;
1745 #elif OS(WINDOWS) 1745 #elif OS(WINDOWS)
1746 static void CALLBACK scavengerTimerFired(void*, BOOLEAN); 1746 static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
1747 HANDLE m_scavengeQueueTimer; 1747 HANDLE m_scavengeQueueTimer;
1748 #else 1748 #else
1749 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*); 1749 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
1750 NO_RETURN void scavengerThread(); 1750 NO_RETURN void scavengerThread();
1751 1751
1752 // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or 1752 // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
1753 // it's blocked waiting for more pages to be deleted. 1753 // it's blocked waiting for more pages to be deleted.
1754 bool m_scavengeThreadActive; 1754 bool m_scavengeThreadActive;
1755 1755
1756 pthread_mutex_t m_scavengeMutex; 1756 pthread_mutex_t m_scavengeMutex;
1757 pthread_cond_t m_scavengeCondition; 1757 pthread_cond_t m_scavengeCondition;
1758 #endif 1758 #endif
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
1897 { 1897 {
1898 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread(); 1898 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1899 #if COMPILER(MSVC) 1899 #if COMPILER(MSVC)
1900 // Without this, Visual Studio will complain that this method does not retur n a value. 1900 // Without this, Visual Studio will complain that this method does not retur n a value.
1901 return 0; 1901 return 0;
1902 #endif 1902 #endif
1903 } 1903 }
1904 1904
1905 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger() 1905 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1906 { 1906 {
1907 // shouldScavenge() should be called only when the pageheap_lock spinlock is held, additionally, 1907 // shouldScavenge() should be called only when the pageheap_lock spinlock is held, additionally,
1908 // m_scavengeThreadActive is only set to false whilst pageheap_lock is held. The caller must ensure this is 1908 // m_scavengeThreadActive is only set to false whilst pageheap_lock is held. The caller must ensure this is
1909 // taken prior to calling this method. If the scavenger thread is sleeping a nd shouldScavenge() indicates there 1909 // taken prior to calling this method. If the scavenger thread is sleeping a nd shouldScavenge() indicates there
1910 // is memory to free the scavenger thread is signalled to start. 1910 // is memory to free the scavenger thread is signalled to start.
1911 ASSERT(pageheap_lock.IsHeld()); 1911 ASSERT(pageheap_lock.IsHeld());
1912 if (!m_scavengeThreadActive && shouldScavenge()) 1912 if (!m_scavengeThreadActive && shouldScavenge())
1913 pthread_cond_signal(&m_scavengeCondition); 1913 pthread_cond_signal(&m_scavengeCondition);
1914 } 1914 }
1915 1915
1916 #endif 1916 #endif
1917 1917
1918 void TCMalloc_PageHeap::scavenge() 1918 void TCMalloc_PageHeap::scavenge()
1919 { 1919 {
1920 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kSca vengePercentage; 1920 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kSca vengePercentage;
1921 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, fr ee_committed_pages_ - pagesToRelease); 1921 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, fr ee_committed_pages_ - pagesToRelease);
1922 1922
1923 Length lastFreeCommittedPages = free_committed_pages_; 1923 Length lastFreeCommittedPages = free_committed_pages_;
1924 while (free_committed_pages_ > targetPageCount) { 1924 while (free_committed_pages_ > targetPageCount) {
1925 ASSERT(Check()); 1925 ASSERT(Check());
1926 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCoun t; i--) { 1926 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCoun t; i--) {
1927 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i]; 1927 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1928 // If the span size is bigger than kMinSpanListsWithSpans pages retu rn all the spans in the list, else return all but 1 span. 1928 // If the span size is bigger than kMinSpanListsWithSpans pages retu rn all the spans in the list, else return all but 1 span.
1929 // Return only 50% of a spanlist at a time so spans of size 1 are no t the only ones left. 1929 // Return only 50% of a spanlist at a time so spans of size 1 are no t the only ones left.
1930 size_t length = DLL_Length(&slist->normal, entropy_); 1930 size_t length = DLL_Length(&slist->normal, entropy_);
1931 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : le ngth / 2; 1931 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : le ngth / 2;
1932 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_Is Empty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) { 1932 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_Is Empty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) {
1933 Span* s = slist->normal.prev(entropy_); 1933 Span* s = slist->normal.prev(entropy_);
1934 DLL_Remove(s, entropy_); 1934 DLL_Remove(s, entropy_);
1935 ASSERT(!s->decommitted); 1935 ASSERT(!s->decommitted);
1936 if (!s->decommitted) { 1936 if (!s->decommitted) {
1937 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << k PageShift), 1937 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << k PageShift),
1938 static_cast<size_t>(s->length << kPag eShift)); 1938 static_cast<size_t>(s->length << kPag eShift));
1939 ASSERT(free_committed_pages_ >= s->length); 1939 ASSERT(free_committed_pages_ >= s->length);
1940 free_committed_pages_ -= s->length; 1940 free_committed_pages_ -= s->length;
1941 s->decommitted = true; 1941 s->decommitted = true;
1942 } 1942 }
1943 DLL_Prepend(&slist->returned, s, entropy_); 1943 DLL_Prepend(&slist->returned, s, entropy_);
1944 } 1944 }
1945 } 1945 }
1946 1946
1947 if (lastFreeCommittedPages == free_committed_pages_) 1947 if (lastFreeCommittedPages == free_committed_pages_)
1948 break; 1948 break;
1949 lastFreeCommittedPages = free_committed_pages_; 1949 lastFreeCommittedPages = free_committed_pages_;
1950 } 1950 }
1951 1951
1952 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; 1952 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1953 } 1953 }
1954 1954
1955 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const 1955 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
1956 { 1956 {
1957 return free_committed_pages_ > kMinimumFreeCommittedPageCount; 1957 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1958 } 1958 }
1959 1959
1960 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1960 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1961 1961
1962 inline Span* TCMalloc_PageHeap::New(Length n) { 1962 inline Span* TCMalloc_PageHeap::New(Length n) {
1963 ASSERT(Check()); 1963 ASSERT(Check());
1964 ASSERT(n > 0); 1964 ASSERT(n > 0);
1965 1965
1966 // Find first size >= n that has a non-empty list 1966 // Find first size >= n that has a non-empty list
1967 for (Length s = n; s < kMaxPages; s++) { 1967 for (Length s = n; s < kMaxPages; s++) {
(...skipping 11 matching lines...) Expand all
1979 continue; 1979 continue;
1980 } 1980 }
1981 1981
1982 Span* result = ll->next(entropy_); 1982 Span* result = ll->next(entropy_);
1983 Carve(result, n, released); 1983 Carve(result, n, released);
1984 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1984 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1985 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the 1985 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1986 // free committed pages count. 1986 // free committed pages count.
1987 ASSERT(free_committed_pages_ >= n); 1987 ASSERT(free_committed_pages_ >= n);
1988 free_committed_pages_ -= n; 1988 free_committed_pages_ -= n;
1989 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) 1989 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1990 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; 1990 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1991 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1991 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1992 ASSERT(Check()); 1992 ASSERT(Check());
1993 free_pages_ -= n; 1993 free_pages_ -= n;
1994 return result; 1994 return result;
1995 } 1995 }
1996 1996
1997 Span* result = AllocLarge(n); 1997 Span* result = AllocLarge(n);
1998 if (result != NULL) { 1998 if (result != NULL) {
1999 ASSERT_SPAN_COMMITTED(result); 1999 ASSERT_SPAN_COMMITTED(result);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
2085 2085
2086 if (released) { 2086 if (released) {
2087 // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time. 2087 // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
2088 ASSERT(span->decommitted); 2088 ASSERT(span->decommitted);
2089 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), st atic_cast<size_t>(span->length << kPageShift)); 2089 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), st atic_cast<size_t>(span->length << kPageShift));
2090 span->decommitted = false; 2090 span->decommitted = false;
2091 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 2091 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2092 free_committed_pages_ += span->length; 2092 free_committed_pages_ += span->length;
2093 #endif 2093 #endif
2094 } 2094 }
2095 2095
2096 const int extra = static_cast<int>(span->length - n); 2096 const int extra = static_cast<int>(span->length - n);
2097 ASSERT(extra >= 0); 2097 ASSERT(extra >= 0);
2098 if (extra > 0) { 2098 if (extra > 0) {
2099 Span* leftover = NewSpan(span->start + n, extra); 2099 Span* leftover = NewSpan(span->start + n, extra);
2100 leftover->free = 1; 2100 leftover->free = 1;
2101 leftover->decommitted = false; 2101 leftover->decommitted = false;
2102 Event(leftover, 'S', extra); 2102 Event(leftover, 'S', extra);
2103 RecordSpan(leftover); 2103 RecordSpan(leftover);
2104 2104
2105 // Place leftover span on appropriate free list 2105 // Place leftover span on appropriate free list
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
2260 // Associate span object with all interior pages as well 2260 // Associate span object with all interior pages as well
2261 ASSERT(!span->free); 2261 ASSERT(!span->free);
2262 ASSERT(GetDescriptor(span->start) == span); 2262 ASSERT(GetDescriptor(span->start) == span);
2263 ASSERT(GetDescriptor(span->start+span->length-1) == span); 2263 ASSERT(GetDescriptor(span->start+span->length-1) == span);
2264 Event(span, 'C', sc); 2264 Event(span, 'C', sc);
2265 span->sizeclass = static_cast<unsigned int>(sc); 2265 span->sizeclass = static_cast<unsigned int>(sc);
2266 for (Length i = 1; i < span->length-1; i++) { 2266 for (Length i = 1; i < span->length-1; i++) {
2267 pagemap_.set(span->start+i, span); 2267 pagemap_.set(span->start+i, span);
2268 } 2268 }
2269 } 2269 }
2270 2270
2271 size_t TCMalloc_PageHeap::ReturnedBytes() const { 2271 size_t TCMalloc_PageHeap::ReturnedBytes() const {
2272 size_t result = 0; 2272 size_t result = 0;
2273 for (unsigned s = 0; s < kMaxPages; s++) { 2273 for (unsigned s = 0; s < kMaxPages; s++) {
2274 const int r_length = DLL_Length(&free_[s].returned, entropy_); 2274 const int r_length = DLL_Length(&free_[s].returned, entropy_);
2275 unsigned r_pages = s * r_length; 2275 unsigned r_pages = s * r_length;
2276 result += r_pages << kPageShift; 2276 result += r_pages << kPageShift;
2277 } 2277 }
2278 2278
2279 for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s- >next(entropy_)) 2279 for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s- >next(entropy_))
2280 result += s->length << kPageShift; 2280 result += s->length << kPageShift;
2281 return result; 2281 return result;
2282 } 2282 }
2283 2283
2284 bool TCMalloc_PageHeap::GrowHeap(Length n) { 2284 bool TCMalloc_PageHeap::GrowHeap(Length n) {
2285 ASSERT(kMaxPages >= kMinSystemAlloc); 2285 ASSERT(kMaxPages >= kMinSystemAlloc);
2286 if (n > kMaxValidPages) return false; 2286 if (n > kMaxValidPages) return false;
2287 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); 2287 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
2288 size_t actual_size; 2288 size_t actual_size;
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
2392 DLL_Prepend(returned, s, entropy_); 2392 DLL_Prepend(returned, s, entropy_);
2393 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), 2393 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2394 static_cast<size_t>(s->length << kPageShift)); 2394 static_cast<size_t>(s->length << kPageShift));
2395 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 2395 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2396 freePageReduction += s->length; 2396 freePageReduction += s->length;
2397 #endif 2397 #endif
2398 } 2398 }
2399 2399
2400 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 2400 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2401 free_committed_pages_ -= freePageReduction; 2401 free_committed_pages_ -= freePageReduction;
2402 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) 2402 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
2403 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; 2403 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
2404 #endif 2404 #endif
2405 } 2405 }
2406 2406
2407 void TCMalloc_PageHeap::ReleaseFreePages() { 2407 void TCMalloc_PageHeap::ReleaseFreePages() {
2408 for (Length s = 0; s < kMaxPages; s++) { 2408 for (Length s = 0; s < kMaxPages; s++) {
2409 ReleaseFreeList(&free_[s].normal, &free_[s].returned); 2409 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2410 } 2410 }
2411 ReleaseFreeList(&large_.normal, &large_.returned); 2411 ReleaseFreeList(&large_.normal, &large_.returned);
2412 ASSERT(Check()); 2412 ASSERT(Check());
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after
3280 ASSERT(!tsd_inited); 3280 ASSERT(!tsd_inited);
3281 #if USE(PTHREAD_GETSPECIFIC_DIRECT) 3281 #if USE(PTHREAD_GETSPECIFIC_DIRECT)
3282 pthread_key_init_np(heap_key, DestroyThreadCache); 3282 pthread_key_init_np(heap_key, DestroyThreadCache);
3283 #else 3283 #else
3284 pthread_key_create(&heap_key, DestroyThreadCache); 3284 pthread_key_create(&heap_key, DestroyThreadCache);
3285 #endif 3285 #endif
3286 #if OS(WINDOWS) 3286 #if OS(WINDOWS)
3287 tlsIndex = TlsAlloc(); 3287 tlsIndex = TlsAlloc();
3288 #endif 3288 #endif
3289 tsd_inited = true; 3289 tsd_inited = true;
3290 3290
3291 #if !OS(WINDOWS) 3291 #if !OS(WINDOWS)
3292 // We may have used a fake pthread_t for the main thread. Fix it. 3292 // We may have used a fake pthread_t for the main thread. Fix it.
3293 pthread_t zero; 3293 pthread_t zero;
3294 memset(&zero, 0, sizeof(zero)); 3294 memset(&zero, 0, sizeof(zero));
3295 #endif 3295 #endif
3296 ASSERT(pageheap_lock.IsHeld()); 3296 ASSERT(pageheap_lock.IsHeld());
3297 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { 3297 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3298 #if OS(WINDOWS) 3298 #if OS(WINDOWS)
3299 if (h->tid_ == 0) { 3299 if (h->tid_ == 0) {
3300 h->tid_ = GetCurrentThreadId(); 3300 h->tid_ = GetCurrentThreadId();
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
3672 #if ENABLE(WTF_MALLOC_VALIDATION) 3672 #if ENABLE(WTF_MALLOC_VALIDATION)
3673 fastMallocValidate(result); 3673 fastMallocValidate(result);
3674 #endif 3674 #endif
3675 return result; 3675 return result;
3676 } 3676 }
3677 3677
3678 template <bool crashOnFailure> 3678 template <bool crashOnFailure>
3679 ALWAYS_INLINE 3679 ALWAYS_INLINE
3680 void* calloc(size_t n, size_t elem_size) { 3680 void* calloc(size_t n, size_t elem_size) {
3681 size_t totalBytes = n * elem_size; 3681 size_t totalBytes = n * elem_size;
3682 3682
3683 // Protect against overflow 3683 // Protect against overflow
3684 if (n > 1 && elem_size && (totalBytes / elem_size) != n) 3684 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3685 return 0; 3685 return 0;
3686 3686
3687 #if ENABLE(WTF_MALLOC_VALIDATION) 3687 #if ENABLE(WTF_MALLOC_VALIDATION)
3688 void* result = malloc<crashOnFailure>(totalBytes); 3688 void* result = malloc<crashOnFailure>(totalBytes);
3689 if (!result) 3689 if (!result)
3690 return 0; 3690 return 0;
3691 3691
3692 memset(result, 0, totalBytes); 3692 memset(result, 0, totalBytes);
(...skipping 467 matching lines...) Expand 10 before | Expand all | Expand 10 after
4160 void FastMallocZone::init() 4160 void FastMallocZone::init()
4161 { 4161 {
4162 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Cen tral_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator); 4162 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Cen tral_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
4163 } 4163 }
4164 4164
4165 #endif // OS(DARWIN) 4165 #endif // OS(DARWIN)
4166 4166
4167 } // namespace WTF 4167 } // namespace WTF
4168 4168
4169 #endif // FORCE_SYSTEM_MALLOC 4169 #endif // FORCE_SYSTEM_MALLOC
OLDNEW
« no previous file with comments | « Source/wtf/FastMalloc.h ('k') | Source/wtf/FilePrintStream.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698