Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h

Issue 9667026: Revert 126020 - Experiment for updating the tcmalloc chromium branch to r144 (gperftools 2.0). (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011, Google Inc. 1 // Copyright (c) 2011, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 24 matching lines...) Expand all
35 // 35 //
36 // This code implements ARM atomics for architectures V6 and newer. 36 // This code implements ARM atomics for architectures V6 and newer.
37 37
38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
40 40
41 #include <stdio.h> 41 #include <stdio.h>
42 #include <stdlib.h> 42 #include <stdlib.h>
43 #include "base/basictypes.h" // For COMPILE_ASSERT 43 #include "base/basictypes.h" // For COMPILE_ASSERT
44 44
45 // The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
46 // only some variants support it. For simplicity, we only use exclusive
47 // 64-bit load/store in V7 or above.
48 #if defined(ARMV7)
49 # define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
50 #endif
51
52 typedef int32_t Atomic32; 45 typedef int32_t Atomic32;
53 46
54 namespace base { 47 namespace base {
55 namespace subtle { 48 namespace subtle {
56 49
57 typedef int64_t Atomic64; 50 typedef int64_t Atomic64;
58 51
59 // 32-bit low-level ops 52 // 32-bit low-level ops
60 53
61 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
62 Atomic32 old_value, 55 Atomic32 old_value,
63 Atomic32 new_value) { 56 Atomic32 new_value) {
64 Atomic32 oldval, res; 57 Atomic32 oldval, res;
65 do { 58 do {
66 __asm__ __volatile__( 59 __asm__ __volatile__(
67 "ldrex %1, [%3]\n" 60 "ldrex %1, [%3]\n"
68 "mov %0, #0\n" 61 "mov %0, #0\n"
69 "teq %1, %4\n" 62 "teq %1, %4\n"
70 // The following IT (if-then) instruction is needed for the subsequent
71 // conditional instruction STREXEQ when compiling in THUMB mode.
72 // In ARM mode, the compiler/assembler will not generate any code for it.
73 "it eq\n"
74 "strexeq %0, %5, [%3]\n" 63 "strexeq %0, %5, [%3]\n"
75 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) 64 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
76 : "r" (ptr), "Ir" (old_value), "r" (new_value) 65 : "r" (ptr), "Ir" (old_value), "r" (new_value)
77 : "cc"); 66 : "cc");
78 } while (res); 67 } while (res);
79 return oldval; 68 return oldval;
80 } 69 }
81 70
82 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 71 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
83 Atomic32 new_value) { 72 Atomic32 new_value) {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 Atomic32 value = *ptr; 157 Atomic32 value = *ptr;
169 MemoryBarrier(); 158 MemoryBarrier();
170 return value; 159 return value;
171 } 160 }
172 161
173 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 162 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
174 MemoryBarrier(); 163 MemoryBarrier();
175 return *ptr; 164 return *ptr;
176 } 165 }
177 166
178 // 64-bit versions are only available if LDREXD and STREXD instructions 167 // 64-bit versions are not implemented yet.
179 // are available.
180 #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
181
182 #define BASE_HAS_ATOMIC64 1
183
184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
185 Atomic64 old_value,
186 Atomic64 new_value) {
187 Atomic64 oldval, res;
188 do {
189 __asm__ __volatile__(
190 "ldrexd %1, [%3]\n"
191 "mov %0, #0\n"
192 "teq %Q1, %Q4\n"
193 // The following IT (if-then) instructions are needed for the subsequent
194 // conditional instructions when compiling in THUMB mode.
195 // In ARM mode, the compiler/assembler will not generate any code for it.
196 "it eq\n"
197 "teqeq %R1, %R4\n"
198 "it eq\n"
199 "strexdeq %0, %5, [%3]\n"
200 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
201 : "r" (ptr), "Ir" (old_value), "r" (new_value)
202 : "cc");
203 } while (res);
204 return oldval;
205 }
206
207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
209 int store_failed;
210 Atomic64 old;
211 __asm__ __volatile__(
212 "1:\n"
213 "ldrexd %1, [%2]\n"
214 "strexd %0, %3, [%2]\n"
215 "teq %0, #0\n"
216 "bne 1b"
217 : "=&r" (store_failed), "=&r" (old)
218 : "r" (ptr), "r" (new_value)
219 : "cc", "memory");
220 return old;
221 }
222
223 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
224 Atomic64 increment) {
225 int store_failed;
226 Atomic64 res;
227 __asm__ __volatile__(
228 "1:\n"
229 "ldrexd %1, [%2]\n"
230 "adds %Q1, %Q1, %Q3\n"
231 "adc %R1, %R1, %R3\n"
232 "strexd %0, %1, [%2]\n"
233 "teq %0, #0\n"
234 "bne 1b"
235 : "=&r" (store_failed), "=&r"(res)
236 : "r" (ptr), "r"(increment)
237 : "cc", "memory");
238 return res;
239 }
240
241 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
242 Atomic64 increment) {
243 int store_failed;
244 Atomic64 res;
245 __asm__ __volatile__(
246 "1:\n"
247 "ldrexd %1, [%2]\n"
248 "adds %Q1, %Q1, %Q3\n"
249 "adc %R1, %R1, %R3\n"
250 "dmb\n"
251 "strexd %0, %1, [%2]\n"
252 "teq %0, #0\n"
253 "bne 1b"
254 : "=&r" (store_failed), "=&r"(res)
255 : "r" (ptr), "r"(increment)
256 : "cc", "memory");
257 return res;
258 }
259
260 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
261 int store_failed;
262 Atomic64 dummy;
263 __asm__ __volatile__(
264 "1:\n"
265 // Dummy load to lock cache line.
266 "ldrexd %1, [%3]\n"
267 "strexd %0, %2, [%3]\n"
268 "teq %0, #0\n"
269 "bne 1b"
270 : "=&r" (store_failed), "=&r"(dummy)
271 : "r"(value), "r" (ptr)
272 : "cc", "memory");
273 }
274
275 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
276 Atomic64 res;
277 __asm__ __volatile__(
278 "ldrexd %0, [%1]\n"
279 "clrex\n"
280 : "=r" (res)
281 : "r"(ptr), "Q"(*ptr));
282 return res;
283 }
284
285 #else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
286 168
287 inline void NotImplementedFatalError(const char *function_name) { 169 inline void NotImplementedFatalError(const char *function_name) {
288 fprintf(stderr, "64-bit %s() not implemented on this platform\n", 170 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
289 function_name); 171 function_name);
290 abort(); 172 abort();
291 } 173 }
292 174
293 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 175 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
294 Atomic64 old_value, 176 Atomic64 old_value,
295 Atomic64 new_value) { 177 Atomic64 new_value) {
(...skipping 16 matching lines...) Expand all
312 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 194 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
313 Atomic64 increment) { 195 Atomic64 increment) {
314 NotImplementedFatalError("Barrier_AtomicIncrement"); 196 NotImplementedFatalError("Barrier_AtomicIncrement");
315 return 0; 197 return 0;
316 } 198 }
317 199
318 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 200 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
319 NotImplementedFatalError("NoBarrier_Store"); 201 NotImplementedFatalError("NoBarrier_Store");
320 } 202 }
321 203
204 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
205 NotImplementedFatalError("Acquire_Store64");
206 }
207
208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
209 NotImplementedFatalError("Release_Store");
210 }
211
322 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 212 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
323 NotImplementedFatalError("NoBarrier_Load"); 213 NotImplementedFatalError("NoBarrier_Load");
324 return 0; 214 return 0;
325 } 215 }
326 216
327 #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
328
329 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
330 NoBarrier_Store(ptr, value);
331 MemoryBarrier();
332 }
333
334 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
335 MemoryBarrier();
336 NoBarrier_Store(ptr, value);
337 }
338
339 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 217 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
340 Atomic64 value = NoBarrier_Load(ptr); 218 NotImplementedFatalError("Atomic64 Acquire_Load");
341 MemoryBarrier(); 219 return 0;
342 return value;
343 } 220 }
344 221
345 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 222 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
346 MemoryBarrier(); 223 NotImplementedFatalError("Atomic64 Release_Load");
347 return NoBarrier_Load(ptr); 224 return 0;
348 } 225 }
349 226
350 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 227 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
351 Atomic64 old_value, 228 Atomic64 old_value,
352 Atomic64 new_value) { 229 Atomic64 new_value) {
353 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 230 NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
354 MemoryBarrier(); 231 return 0;
355 return value;
356 } 232 }
357 233
358 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 234 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
359 Atomic64 old_value, 235 Atomic64 old_value,
360 Atomic64 new_value) { 236 Atomic64 new_value) {
361 MemoryBarrier(); 237 NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
362 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 238 return 0;
363 } 239 }
364 240
365 } // namespace subtle ends 241 } // namespace subtle ends
366 } // namespace base ends 242 } // namespace base ends
367 243
368 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 244 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698