Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(158)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h

Issue 9311003: Update the tcmalloc chromium branch to r144 (gperftools 2.0), and merge chromium-specific changes. (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Rebasec Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011, Google Inc. 1 // Copyright (c) 2011, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 24 matching lines...) Expand all
35 // 35 //
36 // This code implements ARM atomics for architectures V6 and newer. 36 // This code implements ARM atomics for architectures V6 and newer.
37 37
38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
40 40
41 #include <stdio.h> 41 #include <stdio.h>
42 #include <stdlib.h> 42 #include <stdlib.h>
43 #include "base/basictypes.h" // For COMPILE_ASSERT 43 #include "base/basictypes.h" // For COMPILE_ASSERT
44 44
45 // The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
46 // only some variants support it. For simplicity, we only use exclusive
47 // 64-bit load/store in V7 or above.
48 #if defined(ARMV7)
49 # define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
50 #endif
51
45 typedef int32_t Atomic32; 52 typedef int32_t Atomic32;
46 53
47 namespace base { 54 namespace base {
48 namespace subtle { 55 namespace subtle {
49 56
50 typedef int64_t Atomic64; 57 typedef int64_t Atomic64;
51 58
52 // 32-bit low-level ops 59 // 32-bit low-level ops
53 60
54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 61 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
55 Atomic32 old_value, 62 Atomic32 old_value,
56 Atomic32 new_value) { 63 Atomic32 new_value) {
57 Atomic32 oldval, res; 64 Atomic32 oldval, res;
58 do { 65 do {
59 __asm__ __volatile__( 66 __asm__ __volatile__(
60 "ldrex %1, [%3]\n" 67 "ldrex %1, [%3]\n"
61 "mov %0, #0\n" 68 "mov %0, #0\n"
62 "teq %1, %4\n" 69 "teq %1, %4\n"
70 // The following IT (if-then) instruction is needed for the subsequent
71 // conditional instruction STREXEQ when compiling in THUMB mode.
72 // In ARM mode, the compiler/assembler will not generate any code for it.
73 "it eq\n"
63 "strexeq %0, %5, [%3]\n" 74 "strexeq %0, %5, [%3]\n"
64 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) 75 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
65 : "r" (ptr), "Ir" (old_value), "r" (new_value) 76 : "r" (ptr), "Ir" (old_value), "r" (new_value)
66 : "cc"); 77 : "cc");
67 } while (res); 78 } while (res);
68 return oldval; 79 return oldval;
69 } 80 }
70 81
71 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 82 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
72 Atomic32 new_value) { 83 Atomic32 new_value) {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 Atomic32 value = *ptr; 168 Atomic32 value = *ptr;
158 MemoryBarrier(); 169 MemoryBarrier();
159 return value; 170 return value;
160 } 171 }
161 172
162 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 173 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
163 MemoryBarrier(); 174 MemoryBarrier();
164 return *ptr; 175 return *ptr;
165 } 176 }
166 177
167 // 64-bit versions are not implemented yet. 178 // 64-bit versions are only available if LDREXD and STREXD instructions
179 // are available.
180 #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
181
182 #define BASE_HAS_ATOMIC64 1
183
184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
185 Atomic64 old_value,
186 Atomic64 new_value) {
187 Atomic64 oldval, res;
188 do {
189 __asm__ __volatile__(
190 "ldrexd %1, [%3]\n"
191 "mov %0, #0\n"
192 "teq %Q1, %Q4\n"
193 // The following IT (if-then) instructions are needed for the subsequent
194 // conditional instructions when compiling in THUMB mode.
195 // In ARM mode, the compiler/assembler will not generate any code for it.
196 "it eq\n"
197 "teqeq %R1, %R4\n"
198 "it eq\n"
199 "strexdeq %0, %5, [%3]\n"
200 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
201 : "r" (ptr), "Ir" (old_value), "r" (new_value)
202 : "cc");
203 } while (res);
204 return oldval;
205 }
206
207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
209 int store_failed;
210 Atomic64 old;
211 __asm__ __volatile__(
212 "1:\n"
213 "ldrexd %1, [%2]\n"
214 "strexd %0, %3, [%2]\n"
215 "teq %0, #0\n"
216 "bne 1b"
217 : "=&r" (store_failed), "=&r" (old)
218 : "r" (ptr), "r" (new_value)
219 : "cc", "memory");
220 return old;
221 }
222
223 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
224 Atomic64 increment) {
225 int store_failed;
226 Atomic64 res;
227 __asm__ __volatile__(
228 "1:\n"
229 "ldrexd %1, [%2]\n"
230 "adds %Q1, %Q1, %Q3\n"
231 "adc %R1, %R1, %R3\n"
232 "strexd %0, %1, [%2]\n"
233 "teq %0, #0\n"
234 "bne 1b"
235 : "=&r" (store_failed), "=&r"(res)
236 : "r" (ptr), "r"(increment)
237 : "cc", "memory");
238 return res;
239 }
240
241 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
242 Atomic64 increment) {
243 int store_failed;
244 Atomic64 res;
245 __asm__ __volatile__(
246 "1:\n"
247 "ldrexd %1, [%2]\n"
248 "adds %Q1, %Q1, %Q3\n"
249 "adc %R1, %R1, %R3\n"
250 "dmb\n"
251 "strexd %0, %1, [%2]\n"
252 "teq %0, #0\n"
253 "bne 1b"
254 : "=&r" (store_failed), "=&r"(res)
255 : "r" (ptr), "r"(increment)
256 : "cc", "memory");
257 return res;
258 }
259
260 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
261 int store_failed;
262 Atomic64 dummy;
263 __asm__ __volatile__(
264 "1:\n"
265 // Dummy load to lock cache line.
266 "ldrexd %1, [%3]\n"
267 "strexd %0, %2, [%3]\n"
268 "teq %0, #0\n"
269 "bne 1b"
270 : "=&r" (store_failed), "=&r"(dummy)
271 : "r"(value), "r" (ptr)
272 : "cc", "memory");
273 }
274
275 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
276 Atomic64 res;
277 __asm__ __volatile__(
278 "ldrexd %0, [%1]\n"
279 "clrex\n"
280 : "=r" (res)
281 : "r"(ptr), "Q"(*ptr));
282 return res;
283 }
284
285 #else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
168 286
169 inline void NotImplementedFatalError(const char *function_name) { 287 inline void NotImplementedFatalError(const char *function_name) {
170 fprintf(stderr, "64-bit %s() not implemented on this platform\n", 288 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
171 function_name); 289 function_name);
172 abort(); 290 abort();
173 } 291 }
174 292
175 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 293 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
176 Atomic64 old_value, 294 Atomic64 old_value,
177 Atomic64 new_value) { 295 Atomic64 new_value) {
(...skipping 16 matching lines...) Expand all
194 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 312 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
195 Atomic64 increment) { 313 Atomic64 increment) {
196 NotImplementedFatalError("Barrier_AtomicIncrement"); 314 NotImplementedFatalError("Barrier_AtomicIncrement");
197 return 0; 315 return 0;
198 } 316 }
199 317
200 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 318 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
201 NotImplementedFatalError("NoBarrier_Store"); 319 NotImplementedFatalError("NoBarrier_Store");
202 } 320 }
203 321
204 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
205 NotImplementedFatalError("Acquire_Store64");
206 }
207
208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
209 NotImplementedFatalError("Release_Store");
210 }
211
212 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 322 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
213 NotImplementedFatalError("NoBarrier_Load"); 323 NotImplementedFatalError("NoBarrier_Load");
214 return 0; 324 return 0;
215 } 325 }
216 326
327 #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
328
329 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
330 NoBarrier_Store(ptr, value);
331 MemoryBarrier();
332 }
333
334 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
335 MemoryBarrier();
336 NoBarrier_Store(ptr, value);
337 }
338
217 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 339 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
218 NotImplementedFatalError("Atomic64 Acquire_Load"); 340 Atomic64 value = NoBarrier_Load(ptr);
219 return 0; 341 MemoryBarrier();
342 return value;
220 } 343 }
221 344
222 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 345 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
223 NotImplementedFatalError("Atomic64 Release_Load"); 346 MemoryBarrier();
224 return 0; 347 return NoBarrier_Load(ptr);
225 } 348 }
226 349
227 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 350 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
228 Atomic64 old_value, 351 Atomic64 old_value,
229 Atomic64 new_value) { 352 Atomic64 new_value) {
230 NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap"); 353 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
231 return 0; 354 MemoryBarrier();
355 return value;
232 } 356 }
233 357
234 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 358 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
235 Atomic64 old_value, 359 Atomic64 old_value,
236 Atomic64 new_value) { 360 Atomic64 new_value) {
237 NotImplementedFatalError("Atomic64 Release_CompareAndSwap"); 361 MemoryBarrier();
238 return 0; 362 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
239 } 363 }
240 364
241 } // namespace subtle ends 365 } // namespace subtle ends
242 } // namespace base ends 366 } // namespace base ends
243 367
244 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ 368 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698