OLD | NEW |
| (Empty) |
1 /* Copyright (c) 2010, Google Inc. | |
2 * All rights reserved. | |
3 * | |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 * | |
30 * --- | |
31 * Author: Lei Zhang, Sasha Levitskiy | |
32 */ | |
33 | |
34 // This file is an internal atomic implementation, use base/atomicops.h instead. | |
35 // | |
36 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | |
37 | |
38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
39 #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
40 | |
41 #include <stdio.h> | |
42 #include "base/basictypes.h" // For COMPILE_ASSERT | |
43 | |
44 typedef int32_t Atomic32; | |
45 | |
46 namespace base { | |
47 namespace subtle { | |
48 | |
49 typedef int64_t Atomic64; | |
50 | |
51 // 0xffff0fc0 is the hard coded address of a function provided by | |
52 // the kernel which implements an atomic compare-exchange. On older | |
53 // ARM architecture revisions (pre-v6) this may be implemented using | |
54 // a syscall. This address is stable, and in active use (hard coded) | |
55 // by at least glibc-2.7 and the Android C library. | |
56 // pLinuxKernelCmpxchg has both acquire and release barrier sematincs. | |
57 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | |
58 Atomic32 new_value, | |
59 volatile Atomic32* ptr); | |
60 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | |
61 (LinuxKernelCmpxchgFunc) 0xffff0fc0; | |
62 | |
63 typedef void (*LinuxKernelMemoryBarrierFunc)(void); | |
64 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | |
65 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | |
66 | |
67 | |
68 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
69 Atomic32 old_value, | |
70 Atomic32 new_value) { | |
71 Atomic32 prev_value = *ptr; | |
72 do { | |
73 if (!pLinuxKernelCmpxchg(old_value, new_value, | |
74 const_cast<Atomic32*>(ptr))) { | |
75 return old_value; | |
76 } | |
77 prev_value = *ptr; | |
78 } while (prev_value == old_value); | |
79 return prev_value; | |
80 } | |
81 | |
82 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
83 Atomic32 new_value) { | |
84 Atomic32 old_value; | |
85 do { | |
86 old_value = *ptr; | |
87 } while (pLinuxKernelCmpxchg(old_value, new_value, | |
88 const_cast<Atomic32*>(ptr))); | |
89 return old_value; | |
90 } | |
91 | |
92 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
93 Atomic32 increment) { | |
94 for (;;) { | |
95 // Atomic exchange the old value with an incremented one. | |
96 Atomic32 old_value = *ptr; | |
97 Atomic32 new_value = old_value + increment; | |
98 if (pLinuxKernelCmpxchg(old_value, new_value, | |
99 const_cast<Atomic32*>(ptr)) == 0) { | |
100 // The exchange took place as expected. | |
101 return new_value; | |
102 } | |
103 // Otherwise, *ptr changed mid-loop and we need to retry. | |
104 } | |
105 } | |
106 | |
107 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
108 Atomic32 increment) { | |
109 return Barrier_AtomicIncrement(ptr, increment); | |
110 } | |
111 | |
112 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
113 Atomic32 old_value, | |
114 Atomic32 new_value) { | |
115 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
116 } | |
117 | |
118 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
119 Atomic32 old_value, | |
120 Atomic32 new_value) { | |
121 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
122 } | |
123 | |
124 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
125 *ptr = value; | |
126 } | |
127 | |
128 inline void MemoryBarrier() { | |
129 pLinuxKernelMemoryBarrier(); | |
130 } | |
131 | |
132 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
133 *ptr = value; | |
134 MemoryBarrier(); | |
135 } | |
136 | |
137 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
138 MemoryBarrier(); | |
139 *ptr = value; | |
140 } | |
141 | |
142 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
143 return *ptr; | |
144 } | |
145 | |
146 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
147 Atomic32 value = *ptr; | |
148 MemoryBarrier(); | |
149 return value; | |
150 } | |
151 | |
152 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
153 MemoryBarrier(); | |
154 return *ptr; | |
155 } | |
156 | |
157 | |
158 // 64-bit versions are not implemented yet. | |
159 | |
160 inline void NotImplementedFatalError(const char *function_name) { | |
161 fprintf(stderr, "64-bit %s() not implemented on this platform\n", | |
162 function_name); | |
163 abort(); | |
164 } | |
165 | |
166 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
167 Atomic64 old_value, | |
168 Atomic64 new_value) { | |
169 NotImplementedFatalError("NoBarrier_CompareAndSwap"); | |
170 return 0; | |
171 } | |
172 | |
173 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
174 Atomic64 new_value) { | |
175 NotImplementedFatalError("NoBarrier_AtomicExchange"); | |
176 return 0; | |
177 } | |
178 | |
179 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
180 Atomic64 increment) { | |
181 NotImplementedFatalError("NoBarrier_AtomicIncrement"); | |
182 return 0; | |
183 } | |
184 | |
185 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
186 Atomic64 increment) { | |
187 NotImplementedFatalError("Barrier_AtomicIncrement"); | |
188 return 0; | |
189 } | |
190 | |
191 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
192 NotImplementedFatalError("NoBarrier_Store"); | |
193 } | |
194 | |
195 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
196 NoBarrier_AtomicExchange(ptr, value); | |
197 // acts as a barrier in this implementation | |
198 } | |
199 | |
200 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
201 NotImplementedFatalError("Release_Store"); | |
202 } | |
203 | |
204 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
205 NotImplementedFatalError("NoBarrier_Load"); | |
206 return 0; | |
207 } | |
208 | |
209 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
210 Atomic64 value = NoBarrier_Load(ptr); | |
211 return value; | |
212 } | |
213 | |
214 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
215 MemoryBarrier(); | |
216 return NoBarrier_Load(ptr); | |
217 } | |
218 | |
219 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
220 Atomic64 old_value, | |
221 Atomic64 new_value) { | |
222 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
223 } | |
224 | |
225 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
226 Atomic64 old_value, | |
227 Atomic64 new_value) { | |
228 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
229 } | |
230 | |
231 } // namespace base::subtle | |
232 } // namespace base | |
233 | |
234 #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
OLD | NEW |