| OLD | NEW |
| (Empty) |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | |
| 6 | |
| 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_ | |
| 8 #define V8_BASE_ATOMICOPS_INTERNALS_S390_H_ | |
| 9 | |
| 10 namespace v8 { | |
| 11 namespace base { | |
| 12 | |
| 13 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
| 14 Atomic32 old_value, | |
| 15 Atomic32 new_value) { | |
| 16 return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | |
| 17 } | |
| 18 | |
| 19 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
| 20 Atomic32 new_value) { | |
| 21 Atomic32 old_value; | |
| 22 do { | |
| 23 old_value = *ptr; | |
| 24 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | |
| 25 return old_value; | |
| 26 } | |
| 27 | |
| 28 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 29 Atomic32 increment) { | |
| 30 return Barrier_AtomicIncrement(ptr, increment); | |
| 31 } | |
| 32 | |
| 33 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 34 Atomic32 increment) { | |
| 35 return __sync_add_and_fetch(ptr, increment); | |
| 36 } | |
| 37 | |
| 38 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
| 39 Atomic32 old_value, Atomic32 new_value) { | |
| 40 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 41 } | |
| 42 | |
| 43 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
| 44 Atomic32 old_value, Atomic32 new_value) { | |
| 45 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 46 } | |
| 47 | |
| 48 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
| 49 *ptr = value; | |
| 50 } | |
| 51 | |
| 52 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 53 *ptr = value; | |
| 54 } | |
| 55 | |
| 56 inline void MemoryBarrier() { __sync_synchronize(); } | |
| 57 | |
| 58 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 59 *ptr = value; | |
| 60 MemoryBarrier(); | |
| 61 } | |
| 62 | |
| 63 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 64 MemoryBarrier(); | |
| 65 *ptr = value; | |
| 66 } | |
| 67 | |
| 68 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; } | |
| 69 | |
| 70 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } | |
| 71 | |
| 72 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
| 73 Atomic32 value = *ptr; | |
| 74 MemoryBarrier(); | |
| 75 return value; | |
| 76 } | |
| 77 | |
| 78 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
| 79 MemoryBarrier(); | |
| 80 return *ptr; | |
| 81 } | |
| 82 | |
| 83 #ifdef V8_TARGET_ARCH_S390X | |
| 84 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
| 85 Atomic64 old_value, | |
| 86 Atomic64 new_value) { | |
| 87 return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | |
| 88 } | |
| 89 | |
| 90 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
| 91 Atomic64 new_value) { | |
| 92 Atomic64 old_value; | |
| 93 do { | |
| 94 old_value = *ptr; | |
| 95 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | |
| 96 return old_value; | |
| 97 } | |
| 98 | |
| 99 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 100 Atomic64 increment) { | |
| 101 return Barrier_AtomicIncrement(ptr, increment); | |
| 102 } | |
| 103 | |
| 104 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 105 Atomic64 increment) { | |
| 106 return __sync_add_and_fetch(ptr, increment); | |
| 107 } | |
| 108 | |
| 109 | |
| 110 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
| 111 Atomic64 old_value, Atomic64 new_value) { | |
| 112 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 113 } | |
| 114 | |
| 115 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
| 116 Atomic64 old_value, Atomic64 new_value) { | |
| 117 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 118 } | |
| 119 | |
| 120 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 121 *ptr = value; | |
| 122 } | |
| 123 | |
| 124 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 125 *ptr = value; | |
| 126 MemoryBarrier(); | |
| 127 } | |
| 128 | |
| 129 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 130 MemoryBarrier(); | |
| 131 *ptr = value; | |
| 132 } | |
| 133 | |
| 134 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } | |
| 135 | |
| 136 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
| 137 Atomic64 value = *ptr; | |
| 138 MemoryBarrier(); | |
| 139 return value; | |
| 140 } | |
| 141 | |
| 142 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
| 143 MemoryBarrier(); | |
| 144 return *ptr; | |
| 145 } | |
| 146 | |
| 147 #endif | |
| 148 | |
| 149 } // namespace base | |
| 150 } // namespace v8 | |
| 151 | |
| 152 #endif // V8_BASE_ATOMICOPS_INTERNALS_S390_H_ | |
| OLD | NEW |