| OLD | NEW |
| (Empty) |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | |
| 6 | |
| 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | |
| 8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | |
| 9 | |
| 10 #include <libkern/OSAtomic.h> | |
| 11 | |
| 12 namespace v8 { | |
| 13 namespace base { | |
| 14 | |
| 15 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
| 16 | |
| 17 inline void MemoryBarrier() { OSMemoryBarrier(); } | |
| 18 | |
| 19 inline void AcquireMemoryBarrier() { | |
| 20 // On x86 processors, loads already have acquire semantics, so | |
| 21 // there is no need to put a full barrier here. | |
| 22 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 | |
| 23 ATOMICOPS_COMPILER_BARRIER(); | |
| 24 #else | |
| 25 MemoryBarrier(); | |
| 26 #endif | |
| 27 } | |
| 28 | |
| 29 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
| 30 Atomic32 old_value, | |
| 31 Atomic32 new_value) { | |
| 32 Atomic32 prev_value; | |
| 33 do { | |
| 34 if (OSAtomicCompareAndSwap32(old_value, new_value, | |
| 35 const_cast<Atomic32*>(ptr))) { | |
| 36 return old_value; | |
| 37 } | |
| 38 prev_value = *ptr; | |
| 39 } while (prev_value == old_value); | |
| 40 return prev_value; | |
| 41 } | |
| 42 | |
| 43 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
| 44 Atomic32 new_value) { | |
| 45 Atomic32 old_value; | |
| 46 do { | |
| 47 old_value = *ptr; | |
| 48 } while (!OSAtomicCompareAndSwap32(old_value, new_value, | |
| 49 const_cast<Atomic32*>(ptr))); | |
| 50 return old_value; | |
| 51 } | |
| 52 | |
| 53 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 54 Atomic32 increment) { | |
| 55 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | |
| 56 } | |
| 57 | |
| 58 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 59 Atomic32 increment) { | |
| 60 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | |
| 61 } | |
| 62 | |
| 63 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
| 64 Atomic32 old_value, | |
| 65 Atomic32 new_value) { | |
| 66 Atomic32 prev_value; | |
| 67 do { | |
| 68 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | |
| 69 const_cast<Atomic32*>(ptr))) { | |
| 70 return old_value; | |
| 71 } | |
| 72 prev_value = *ptr; | |
| 73 } while (prev_value == old_value); | |
| 74 return prev_value; | |
| 75 } | |
| 76 | |
| 77 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
| 78 Atomic32 old_value, | |
| 79 Atomic32 new_value) { | |
| 80 return Acquire_CompareAndSwap(ptr, old_value, new_value); | |
| 81 } | |
| 82 | |
| 83 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
| 84 *ptr = value; | |
| 85 } | |
| 86 | |
| 87 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 88 *ptr = value; | |
| 89 } | |
| 90 | |
| 91 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 92 *ptr = value; | |
| 93 MemoryBarrier(); | |
| 94 } | |
| 95 | |
| 96 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 97 MemoryBarrier(); | |
| 98 *ptr = value; | |
| 99 } | |
| 100 | |
| 101 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
| 102 return *ptr; | |
| 103 } | |
| 104 | |
| 105 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
| 106 return *ptr; | |
| 107 } | |
| 108 | |
| 109 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
| 110 Atomic32 value = *ptr; | |
| 111 AcquireMemoryBarrier(); | |
| 112 return value; | |
| 113 } | |
| 114 | |
| 115 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
| 116 MemoryBarrier(); | |
| 117 return *ptr; | |
| 118 } | |
| 119 | |
| 120 #ifdef __LP64__ | |
| 121 | |
| 122 // 64-bit implementation on 64-bit platform | |
| 123 | |
| 124 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
| 125 Atomic64 old_value, | |
| 126 Atomic64 new_value) { | |
| 127 Atomic64 prev_value; | |
| 128 do { | |
| 129 if (OSAtomicCompareAndSwap64(old_value, new_value, | |
| 130 reinterpret_cast<volatile int64_t*>(ptr))) { | |
| 131 return old_value; | |
| 132 } | |
| 133 prev_value = *ptr; | |
| 134 } while (prev_value == old_value); | |
| 135 return prev_value; | |
| 136 } | |
| 137 | |
| 138 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
| 139 Atomic64 new_value) { | |
| 140 Atomic64 old_value; | |
| 141 do { | |
| 142 old_value = *ptr; | |
| 143 } while (!OSAtomicCompareAndSwap64(old_value, new_value, | |
| 144 reinterpret_cast<volatile int64_t*>(ptr))); | |
| 145 return old_value; | |
| 146 } | |
| 147 | |
| 148 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 149 Atomic64 increment) { | |
| 150 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); | |
| 151 } | |
| 152 | |
| 153 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 154 Atomic64 increment) { | |
| 155 return OSAtomicAdd64Barrier(increment, | |
| 156 reinterpret_cast<volatile int64_t*>(ptr)); | |
| 157 } | |
| 158 | |
| 159 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
| 160 Atomic64 old_value, | |
| 161 Atomic64 new_value) { | |
| 162 Atomic64 prev_value; | |
| 163 do { | |
| 164 if (OSAtomicCompareAndSwap64Barrier( | |
| 165 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { | |
| 166 return old_value; | |
| 167 } | |
| 168 prev_value = *ptr; | |
| 169 } while (prev_value == old_value); | |
| 170 return prev_value; | |
| 171 } | |
| 172 | |
| 173 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
| 174 Atomic64 old_value, | |
| 175 Atomic64 new_value) { | |
| 176 // The lib kern interface does not distinguish between | |
| 177 // Acquire and Release memory barriers; they are equivalent. | |
| 178 return Acquire_CompareAndSwap(ptr, old_value, new_value); | |
| 179 } | |
| 180 | |
| 181 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 182 *ptr = value; | |
| 183 } | |
| 184 | |
| 185 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 186 *ptr = value; | |
| 187 MemoryBarrier(); | |
| 188 } | |
| 189 | |
| 190 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 191 MemoryBarrier(); | |
| 192 *ptr = value; | |
| 193 } | |
| 194 | |
| 195 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
| 196 return *ptr; | |
| 197 } | |
| 198 | |
| 199 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
| 200 Atomic64 value = *ptr; | |
| 201 AcquireMemoryBarrier(); | |
| 202 return value; | |
| 203 } | |
| 204 | |
| 205 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
| 206 MemoryBarrier(); | |
| 207 return *ptr; | |
| 208 } | |
| 209 | |
| 210 #endif // defined(__LP64__) | |
| 211 | |
| 212 #undef ATOMICOPS_COMPILER_BARRIER | |
| 213 } // namespace base | |
| 214 } // namespace v8 | |
| 215 | |
| 216 #endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | |
| OLD | NEW |