OLD | NEW |
| (Empty) |
1 // Copyright 2010 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 | |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
9 | |
10 #include "src/base/base-export.h" | |
11 | |
12 namespace v8 { | |
13 namespace base { | |
14 | |
15 // This struct is not part of the public API of this module; clients may not | |
16 // use it. | |
17 // Features of this x86. Values may not be correct before main() is run, | |
18 // but are set conservatively. | |
19 struct AtomicOps_x86CPUFeatureStruct { | |
20 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | |
21 // after acquire compare-and-swap. | |
22 #if !defined(__SSE2__) | |
23 bool has_sse2; // Processor has SSE2. | |
24 #endif | |
25 }; | |
26 V8_BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct | |
27 AtomicOps_Internalx86CPUFeatures; | |
28 | |
29 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
30 | |
31 // 32-bit low-level operations on any platform. | |
32 | |
33 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
34 Atomic32 old_value, | |
35 Atomic32 new_value) { | |
36 Atomic32 prev; | |
37 __asm__ __volatile__("lock; cmpxchgl %1,%2" | |
38 : "=a" (prev) | |
39 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
40 : "memory"); | |
41 return prev; | |
42 } | |
43 | |
44 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
45 Atomic32 new_value) { | |
46 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. | |
47 : "=r" (new_value) | |
48 : "m" (*ptr), "0" (new_value) | |
49 : "memory"); | |
50 return new_value; // Now it's the previous value. | |
51 } | |
52 | |
53 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
54 Atomic32 increment) { | |
55 Atomic32 temp = increment; | |
56 __asm__ __volatile__("lock; xaddl %0,%1" | |
57 : "+r" (temp), "+m" (*ptr) | |
58 : : "memory"); | |
59 // temp now holds the old value of *ptr | |
60 return temp + increment; | |
61 } | |
62 | |
63 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
64 Atomic32 increment) { | |
65 Atomic32 temp = increment; | |
66 __asm__ __volatile__("lock; xaddl %0,%1" | |
67 : "+r" (temp), "+m" (*ptr) | |
68 : : "memory"); | |
69 // temp now holds the old value of *ptr | |
70 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
71 __asm__ __volatile__("lfence" : : : "memory"); | |
72 } | |
73 return temp + increment; | |
74 } | |
75 | |
76 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
77 Atomic32 old_value, | |
78 Atomic32 new_value) { | |
79 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
80 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
81 __asm__ __volatile__("lfence" : : : "memory"); | |
82 } | |
83 return x; | |
84 } | |
85 | |
86 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
87 Atomic32 old_value, | |
88 Atomic32 new_value) { | |
89 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
90 } | |
91 | |
92 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
93 *ptr = value; | |
94 } | |
95 | |
96 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
97 *ptr = value; | |
98 } | |
99 | |
100 #if defined(__x86_64__) || defined(__SSE2__) | |
101 | |
102 // 64-bit implementations of memory barrier can be simpler, because it | |
103 // "mfence" is guaranteed to exist. | |
104 inline void MemoryBarrier() { | |
105 __asm__ __volatile__("mfence" : : : "memory"); | |
106 } | |
107 | |
108 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
109 *ptr = value; | |
110 MemoryBarrier(); | |
111 } | |
112 | |
113 #else | |
114 | |
115 inline void MemoryBarrier() { | |
116 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
117 __asm__ __volatile__("mfence" : : : "memory"); | |
118 } else { // mfence is faster but not present on PIII | |
119 Atomic32 x = 0; | |
120 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |
121 } | |
122 } | |
123 | |
124 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
125 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
126 *ptr = value; | |
127 __asm__ __volatile__("mfence" : : : "memory"); | |
128 } else { | |
129 NoBarrier_AtomicExchange(ptr, value); | |
130 // acts as a barrier on PIII | |
131 } | |
132 } | |
133 #endif | |
134 | |
135 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
136 ATOMICOPS_COMPILER_BARRIER(); | |
137 *ptr = value; // An x86 store acts as a release barrier. | |
138 // See comments in Atomic64 version of Release_Store(), below. | |
139 } | |
140 | |
141 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
142 return *ptr; | |
143 } | |
144 | |
145 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
146 return *ptr; | |
147 } | |
148 | |
149 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
150 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. | |
151 // See comments in Atomic64 version of Release_Store(), below. | |
152 ATOMICOPS_COMPILER_BARRIER(); | |
153 return value; | |
154 } | |
155 | |
156 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
157 MemoryBarrier(); | |
158 return *ptr; | |
159 } | |
160 | |
161 #if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT) | |
162 | |
163 // 64-bit low-level operations on 64-bit platform. | |
164 | |
165 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
166 Atomic64 old_value, | |
167 Atomic64 new_value) { | |
168 Atomic64 prev; | |
169 __asm__ __volatile__("lock; cmpxchgq %1,%2" | |
170 : "=a" (prev) | |
171 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
172 : "memory"); | |
173 return prev; | |
174 } | |
175 | |
176 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
177 Atomic64 new_value) { | |
178 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. | |
179 : "=r" (new_value) | |
180 : "m" (*ptr), "0" (new_value) | |
181 : "memory"); | |
182 return new_value; // Now it's the previous value. | |
183 } | |
184 | |
185 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
186 Atomic64 increment) { | |
187 Atomic64 temp = increment; | |
188 __asm__ __volatile__("lock; xaddq %0,%1" | |
189 : "+r" (temp), "+m" (*ptr) | |
190 : : "memory"); | |
191 // temp now contains the previous value of *ptr | |
192 return temp + increment; | |
193 } | |
194 | |
195 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
196 Atomic64 increment) { | |
197 Atomic64 temp = increment; | |
198 __asm__ __volatile__("lock; xaddq %0,%1" | |
199 : "+r" (temp), "+m" (*ptr) | |
200 : : "memory"); | |
201 // temp now contains the previous value of *ptr | |
202 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
203 __asm__ __volatile__("lfence" : : : "memory"); | |
204 } | |
205 return temp + increment; | |
206 } | |
207 | |
208 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
209 *ptr = value; | |
210 } | |
211 | |
212 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
213 *ptr = value; | |
214 MemoryBarrier(); | |
215 } | |
216 | |
217 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
218 ATOMICOPS_COMPILER_BARRIER(); | |
219 | |
220 *ptr = value; // An x86 store acts as a release barrier | |
221 // for current AMD/Intel chips as of Jan 2008. | |
222 // See also Acquire_Load(), below. | |
223 | |
224 // When new chips come out, check: | |
225 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |
226 // System Programming Guide, Chatper 7: Multiple-processor management, | |
227 // Section 7.2, Memory Ordering. | |
228 // Last seen at: | |
229 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |
230 // | |
231 // x86 stores/loads fail to act as barriers for a few instructions (clflush | |
232 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are | |
233 // not generated by the compiler, and are rare. Users of these instructions | |
234 // need to know about cache behaviour in any case since all of these involve | |
235 // either flushing cache lines or non-temporal cache hints. | |
236 } | |
237 | |
238 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
239 return *ptr; | |
240 } | |
241 | |
242 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
243 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, | |
244 // for current AMD/Intel chips as of Jan 2008. | |
245 // See also Release_Store(), above. | |
246 ATOMICOPS_COMPILER_BARRIER(); | |
247 return value; | |
248 } | |
249 | |
250 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
251 MemoryBarrier(); | |
252 return *ptr; | |
253 } | |
254 | |
255 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
256 Atomic64 old_value, | |
257 Atomic64 new_value) { | |
258 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
259 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
260 __asm__ __volatile__("lfence" : : : "memory"); | |
261 } | |
262 return x; | |
263 } | |
264 | |
265 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
266 Atomic64 old_value, | |
267 Atomic64 new_value) { | |
268 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
269 } | |
270 | |
271 #endif // defined(__x86_64__) | |
272 | |
273 } // namespace base | |
274 } // namespace v8 | |
275 | |
276 #undef ATOMICOPS_COMPILER_BARRIER | |
277 | |
278 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
OLD | NEW |