| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | |
| 6 // | |
| 7 | |
| 8 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ | |
| 9 #define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ | |
| 10 | |
| 11 namespace v8 { | |
| 12 namespace base { | |
| 13 | |
| 14 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
| 15 Atomic32 old_value, | |
| 16 Atomic32 new_value) { | |
| 17 return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | |
| 18 } | |
| 19 | |
| 20 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
| 21 Atomic32 new_value) { | |
| 22 Atomic32 old_value; | |
| 23 do { | |
| 24 old_value = *ptr; | |
| 25 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | |
| 26 return old_value; | |
| 27 } | |
| 28 | |
| 29 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 30 Atomic32 increment) { | |
| 31 return Barrier_AtomicIncrement(ptr, increment); | |
| 32 } | |
| 33 | |
| 34 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 35 Atomic32 increment) { | |
| 36 for (;;) { | |
| 37 Atomic32 old_value = *ptr; | |
| 38 Atomic32 new_value = old_value + increment; | |
| 39 if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { | |
| 40 return new_value; | |
| 41 // The exchange took place as expected. | |
| 42 } | |
| 43 // Otherwise, *ptr changed mid-loop and we need to retry. | |
| 44 } | |
| 45 } | |
| 46 | |
| 47 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
| 48 Atomic32 old_value, Atomic32 new_value) { | |
| 49 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 50 } | |
| 51 | |
| 52 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
| 53 Atomic32 old_value, Atomic32 new_value) { | |
| 54 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 55 } | |
| 56 | |
| 57 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
| 58 *ptr = value; | |
| 59 } | |
| 60 | |
| 61 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 62 *ptr = value; | |
| 63 } | |
| 64 | |
| 65 inline void MemoryBarrier() { | |
| 66 __asm__ __volatile__("sync" : : : "memory"); } | |
| 67 | |
| 68 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 69 *ptr = value; | |
| 70 MemoryBarrier(); | |
| 71 } | |
| 72 | |
| 73 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 74 MemoryBarrier(); | |
| 75 *ptr = value; | |
| 76 } | |
| 77 | |
| 78 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; } | |
| 79 | |
| 80 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } | |
| 81 | |
| 82 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
| 83 Atomic32 value = *ptr; | |
| 84 MemoryBarrier(); | |
| 85 return value; | |
| 86 } | |
| 87 | |
| 88 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
| 89 MemoryBarrier(); | |
| 90 return *ptr; | |
| 91 } | |
| 92 | |
| 93 #ifdef V8_TARGET_ARCH_PPC64 | |
| 94 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
| 95 Atomic64 old_value, | |
| 96 Atomic64 new_value) { | |
| 97 return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | |
| 98 } | |
| 99 | |
| 100 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
| 101 Atomic64 new_value) { | |
| 102 Atomic64 old_value; | |
| 103 do { | |
| 104 old_value = *ptr; | |
| 105 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | |
| 106 return old_value; | |
| 107 } | |
| 108 | |
| 109 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 110 Atomic64 increment) { | |
| 111 return Barrier_AtomicIncrement(ptr, increment); | |
| 112 } | |
| 113 | |
| 114 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 115 Atomic64 increment) { | |
| 116 for (;;) { | |
| 117 Atomic64 old_value = *ptr; | |
| 118 Atomic64 new_value = old_value + increment; | |
| 119 if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { | |
| 120 return new_value; | |
| 121 // The exchange took place as expected. | |
| 122 } | |
| 123 // Otherwise, *ptr changed mid-loop and we need to retry. | |
| 124 } | |
| 125 } | |
| 126 | |
| 127 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
| 128 Atomic64 old_value, Atomic64 new_value) { | |
| 129 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 130 } | |
| 131 | |
| 132 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
| 133 Atomic64 old_value, Atomic64 new_value) { | |
| 134 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
| 135 } | |
| 136 | |
| 137 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 138 *ptr = value; | |
| 139 } | |
| 140 | |
| 141 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 142 *ptr = value; | |
| 143 MemoryBarrier(); | |
| 144 } | |
| 145 | |
| 146 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 147 MemoryBarrier(); | |
| 148 *ptr = value; | |
| 149 } | |
| 150 | |
| 151 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } | |
| 152 | |
| 153 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
| 154 Atomic64 value = *ptr; | |
| 155 MemoryBarrier(); | |
| 156 return value; | |
| 157 } | |
| 158 | |
| 159 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
| 160 MemoryBarrier(); | |
| 161 return *ptr; | |
| 162 } | |
| 163 | |
| 164 #endif | |
| 165 } // namespace base | |
| 166 } // namespace v8 | |
| 167 | |
| 168 #endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ | |
| OLD | NEW |