OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. |
| 6 // |
| 7 // This implementation uses C++11 atomics' member functions. The code base is |
| 8 // currently written assuming atomicity revolves around accesses instead of |
| 9 // C++11's memory locations. The burden is on the programmer to ensure that all |
| 10 // memory locations accessed atomically are never accessed non-atomically (tsan |
| 11 // should help with this). |
| 12 // |
| 13 // Of note in this implementation: |
| 14 // * All NoBarrier variants are implemented as relaxed. |
| 15 // * All Barrier variants are implemented as sequentially-consistent. |
| 16 // * Compare exchange's failure ordering is always the same as the success one |
| 17 // (except for release, which fails as relaxed): using a weaker ordering is |
| 18 // only valid under certain uses of compare exchange. |
| 19 // * Acquire store doesn't exist in the C11 memory model, it is instead |
| 20 // implemented as a relaxed store followed by a sequentially consistent |
| 21 // fence. |
| 22 // * Release load doesn't exist in the C11 memory model, it is instead |
| 23 // implemented as sequentially consistent fence followed by a relaxed load. |
| 24 // * Atomic increment is expected to return the post-incremented value, whereas |
| 25 // C11 fetch add returns the previous value. The implementation therefore |
| 26 // needs to increment twice (which the compiler should be able to detect and |
| 27 // optimize). |
| 28 |
| 29 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| 30 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| 31 |
| 32 #include <atomic> |
| 33 |
| 34 #include "src/base/build_config.h" |
| 35 |
| 36 namespace v8 { |
| 37 namespace base { |
| 38 |
| 39 // This implementation is transitional and maintains the original API for |
| 40 // atomicops.h. |
| 41 |
| 42 inline void MemoryBarrier() { |
| 43 #if defined(__GLIBCXX__) |
| 44 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but |
| 45 // not defined, leading to the linker complaining about undefined references. |
| 46 __atomic_thread_fence(std::memory_order_seq_cst); |
| 47 #else |
| 48 std::atomic_thread_fence(std::memory_order_seq_cst); |
| 49 #endif |
| 50 } |
| 51 |
| 52 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 53 Atomic32 old_value, |
| 54 Atomic32 new_value) { |
| 55 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 56 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 57 return old_value; |
| 58 } |
| 59 |
| 60 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 61 Atomic32 new_value) { |
| 62 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 63 } |
| 64 |
| 65 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 66 Atomic32 increment) { |
| 67 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| 68 } |
| 69 |
| 70 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 71 Atomic32 increment) { |
| 72 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| 73 } |
| 74 |
| 75 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 76 Atomic32 old_value, Atomic32 new_value) { |
| 77 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 78 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 79 return old_value; |
| 80 } |
| 81 |
| 82 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 83 Atomic32 old_value, Atomic32 new_value) { |
| 84 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 85 __ATOMIC_RELEASE, __ATOMIC_RELAXED); |
| 86 return old_value; |
| 87 } |
| 88 |
| 89 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
| 90 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 91 } |
| 92 |
| 93 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 94 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 95 } |
| 96 |
| 97 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 98 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 99 } |
| 100 |
| 101 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
| 102 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 103 } |
| 104 |
| 105 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 106 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 107 } |
| 108 |
| 109 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 110 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| 111 } |
| 112 |
| 113 #if defined(V8_HOST_ARCH_64_BIT) |
| 114 |
| 115 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 116 Atomic64 old_value, |
| 117 Atomic64 new_value) { |
| 118 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 119 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 120 return old_value; |
| 121 } |
| 122 |
| 123 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 124 Atomic64 new_value) { |
| 125 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 126 } |
| 127 |
| 128 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 129 Atomic64 increment) { |
| 130 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| 131 } |
| 132 |
| 133 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 134 Atomic64 increment) { |
| 135 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| 136 } |
| 137 |
| 138 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 139 Atomic64 old_value, Atomic64 new_value) { |
| 140 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 141 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 142 return old_value; |
| 143 } |
| 144 |
| 145 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 146 Atomic64 old_value, Atomic64 new_value) { |
| 147 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 148 __ATOMIC_RELEASE, __ATOMIC_RELEASE); |
| 149 return old_value; |
| 150 } |
| 151 |
| 152 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 153 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 154 } |
| 155 |
| 156 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 157 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 158 } |
| 159 |
| 160 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 161 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 162 } |
| 163 |
| 164 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 165 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| 166 } |
| 167 |
| 168 #endif // defined(V8_HOST_ARCH_64_BIT) |
| 169 } // namespace base |
| 170 } // namespace v8 |
| 171 |
| 172 #endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
OLD | NEW |