OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef PersistentNode_h |
| 6 #define PersistentNode_h |
| 7 |
| 8 #include "platform/PlatformExport.h" |
| 9 #include "platform/heap/ThreadState.h" |
| 10 #include "wtf/Assertions.h" |
| 11 #include "wtf/MainThread.h" |
| 12 #include "wtf/ThreadingPrimitives.h" |
| 13 |
| 14 namespace blink { |
| 15 |
| 16 class PersistentNode final { |
| 17 public: |
| 18 PersistentNode() |
| 19 : m_self(nullptr) |
| 20 , m_trace(nullptr) |
| 21 { |
| 22 ASSERT(isUnused()); |
| 23 } |
| 24 |
| 25 ~PersistentNode() |
| 26 { |
| 27 // If you hit this assert, it means that the thread finished |
| 28 // without clearing persistent handles that the thread created. |
| 29 // We don't enable the assert for the main thread because the |
| 30 // main thread finishes without clearing all persistent handles. |
| 31 ASSERT(isMainThread() || isUnused()); |
| 32 } |
| 33 |
| 34 // It is dangrous to copy the PersistentNode because it breaks the |
| 35 // free list. |
| 36 PersistentNode& operator=(const PersistentNode& otherref) = delete; |
| 37 |
| 38 // Ideally the trace method should be virtual and automatically dispatch |
| 39 // to the most specific implementation. However having a virtual method |
| 40 // on PersistentNode leads to too eager template instantiation with MSVC |
| 41 // which leads to include cycles. |
| 42 // Instead we call the constructor with a TraceCallback which knows the |
| 43 // type of the most specific child and calls trace directly. See |
| 44 // TraceMethodDelegate in Visitor.h for how this is done. |
| 45 void tracePersistentNode(Visitor* visitor) |
| 46 { |
| 47 ASSERT(!isUnused()); |
| 48 ASSERT(m_trace); |
| 49 m_trace(visitor, m_self); |
| 50 } |
| 51 |
| 52 void initialize(void* self, TraceCallback trace) |
| 53 { |
| 54 ASSERT(isUnused()); |
| 55 m_self = self; |
| 56 m_trace = trace; |
| 57 } |
| 58 |
| 59 void setFreeListNext(PersistentNode* node) |
| 60 { |
| 61 ASSERT(!node || node->isUnused()); |
| 62 m_self = node; |
| 63 m_trace = nullptr; |
| 64 ASSERT(isUnused()); |
| 65 } |
| 66 |
| 67 PersistentNode* freeListNext() |
| 68 { |
| 69 ASSERT(isUnused()); |
| 70 PersistentNode* node = reinterpret_cast<PersistentNode*>(m_self); |
| 71 ASSERT(!node || node->isUnused()); |
| 72 return node; |
| 73 } |
| 74 |
| 75 bool isUnused() const |
| 76 { |
| 77 return !m_trace; |
| 78 } |
| 79 |
| 80 private: |
| 81 // If this PersistentNode is in use: |
| 82 // - m_self points to the corresponding Persistent handle. |
| 83 // - m_trace points to the trace method. |
| 84 // If this PersistentNode is freed: |
| 85 // - m_self points to the next freed PersistentNode. |
| 86 // - m_trace is nullptr. |
| 87 void* m_self; |
| 88 TraceCallback m_trace; |
| 89 }; |
| 90 |
| 91 struct PersistentNodeSlots final { |
| 92 private: |
| 93 static const int slotCount = 256; |
| 94 PersistentNodeSlots* m_next; |
| 95 PersistentNode m_slot[slotCount]; |
| 96 friend class PersistentRegion; |
| 97 }; |
| 98 |
| 99 // PersistentRegion provides a region of PersistentNodes. PersistentRegion |
| 100 // holds a linked list of PersistentNodeSlots, each of which stores |
| 101 // a predefined number of PersistentNodes. You can call allocatePersistentNode/ |
| 102 // freePersistentNode to allocate/free a PersistentNode on the region. |
| 103 class PLATFORM_EXPORT PersistentRegion final { |
| 104 public: |
| 105 PersistentRegion() |
| 106 : m_freeListHead(nullptr) |
| 107 , m_slots(nullptr) |
| 108 #if ENABLE(ASSERT) |
| 109 , m_persistentCount(0) |
| 110 #endif |
| 111 { |
| 112 } |
| 113 ~PersistentRegion(); |
| 114 |
| 115 PersistentNode* allocatePersistentNode(void* self, TraceCallback trace) |
| 116 { |
| 117 #if ENABLE(ASSERT) |
| 118 ++m_persistentCount; |
| 119 #endif |
| 120 if (UNLIKELY(!m_freeListHead)) |
| 121 ensurePersistentNodeSlots(self, trace); |
| 122 ASSERT(m_freeListHead); |
| 123 PersistentNode* node = m_freeListHead; |
| 124 m_freeListHead = m_freeListHead->freeListNext(); |
| 125 node->initialize(self, trace); |
| 126 ASSERT(!node->isUnused()); |
| 127 return node; |
| 128 } |
| 129 void freePersistentNode(PersistentNode* persistentNode) |
| 130 { |
| 131 ASSERT(m_persistentCount > 0); |
| 132 persistentNode->setFreeListNext(m_freeListHead); |
| 133 m_freeListHead = persistentNode; |
| 134 #if ENABLE(ASSERT) |
| 135 --m_persistentCount; |
| 136 #endif |
| 137 } |
| 138 void tracePersistentNodes(Visitor*); |
| 139 int numberOfPersistents(); |
| 140 |
| 141 private: |
| 142 void ensurePersistentNodeSlots(void*, TraceCallback); |
| 143 |
| 144 PersistentNode* m_freeListHead; |
| 145 PersistentNodeSlots* m_slots; |
| 146 #if ENABLE(ASSERT) |
| 147 int m_persistentCount; |
| 148 #endif |
| 149 }; |
| 150 |
| 151 class CrossThreadPersistentRegion final { |
| 152 public: |
| 153 CrossThreadPersistentRegion() : m_persistentRegion(adoptPtr(new PersistentRe
gion)) { } |
| 154 |
| 155 PersistentNode* allocatePersistentNode(void* self, TraceCallback trace) |
| 156 { |
| 157 MutexLocker lock(m_mutex); |
| 158 return m_persistentRegion->allocatePersistentNode(self, trace); |
| 159 } |
| 160 |
| 161 void freePersistentNode(PersistentNode* persistentNode) |
| 162 { |
| 163 MutexLocker lock(m_mutex); |
| 164 m_persistentRegion->freePersistentNode(persistentNode); |
| 165 } |
| 166 |
| 167 void tracePersistentNodes(Visitor* visitor) |
| 168 { |
| 169 MutexLocker lock(m_mutex); |
| 170 m_persistentRegion->tracePersistentNodes(visitor); |
| 171 } |
| 172 |
| 173 private: |
| 174 // We don't make CrossThreadPersistentRegion inherit from PersistentRegion |
| 175 // because we don't want to virtualize performance-sensitive methods |
| 176 // such as PersistentRegion::allocate/freePersistentNode. |
| 177 OwnPtr<PersistentRegion> m_persistentRegion; |
| 178 Mutex m_mutex; |
| 179 }; |
| 180 |
| 181 } // namespace blink |
| 182 |
| 183 #endif |
OLD | NEW |