Index: third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h |
diff --git a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h |
index ee09f3293ce61ebe57fecf01dfa7bbfe4f6af13c..8d5b9b5342f171cce39629c39c433bc8b81ee515 100644 |
--- a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h |
+++ b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h |
@@ -42,6 +42,13 @@ |
#include <stdlib.h> |
#include "base/basictypes.h" // For COMPILE_ASSERT |
+// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6, |
+// only some variants support it. For simplicity, we only use exclusive |
+// 64-bit load/store in V7 or above. |
+#if defined(ARMV7) |
+# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD |
+#endif |
+ |
typedef int32_t Atomic32; |
namespace base { |
@@ -60,6 +67,10 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
"ldrex %1, [%3]\n" |
"mov %0, #0\n" |
"teq %1, %4\n" |
+ // The following IT (if-then) instruction is needed for the subsequent |
+ // conditional instruction STREXEQ when compiling in THUMB mode. |
+ // In ARM mode, the compiler/assembler will not generate any code for it. |
+ "it eq\n" |
"strexeq %0, %5, [%3]\n" |
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) |
: "r" (ptr), "Ir" (old_value), "r" (new_value) |
@@ -164,7 +175,114 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
return *ptr; |
} |
-// 64-bit versions are not implemented yet. |
+// 64-bit versions are only available if LDREXD and STREXD instructions |
+// are available. |
+#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD |
+ |
+#define BASE_HAS_ATOMIC64 1 |
+ |
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
+ Atomic64 old_value, |
+ Atomic64 new_value) { |
+ Atomic64 oldval, res; |
+ do { |
+ __asm__ __volatile__( |
+ "ldrexd %1, [%3]\n" |
+ "mov %0, #0\n" |
+ "teq %Q1, %Q4\n" |
+ // The following IT (if-then) instructions are needed for the subsequent |
+ // conditional instructions when compiling in THUMB mode. |
+ // In ARM mode, the compiler/assembler will not generate any code for it. |
+ "it eq\n" |
+ "teqeq %R1, %R4\n" |
+ "it eq\n" |
+ "strexdeq %0, %5, [%3]\n" |
+ : "=&r" (res), "=&r" (oldval), "+Q" (*ptr) |
+ : "r" (ptr), "Ir" (old_value), "r" (new_value) |
+ : "cc"); |
+ } while (res); |
+ return oldval; |
+} |
+ |
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
+ Atomic64 new_value) { |
+ int store_failed; |
+ Atomic64 old; |
+ __asm__ __volatile__( |
+ "1:\n" |
+ "ldrexd %1, [%2]\n" |
+ "strexd %0, %3, [%2]\n" |
+ "teq %0, #0\n" |
+ "bne 1b" |
+ : "=&r" (store_failed), "=&r" (old) |
+ : "r" (ptr), "r" (new_value) |
+ : "cc", "memory"); |
+ return old; |
+} |
+ |
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
+ Atomic64 increment) { |
+ int store_failed; |
+ Atomic64 res; |
+ __asm__ __volatile__( |
+ "1:\n" |
+ "ldrexd %1, [%2]\n" |
+ "adds %Q1, %Q1, %Q3\n" |
+ "adc %R1, %R1, %R3\n" |
+ "strexd %0, %1, [%2]\n" |
+ "teq %0, #0\n" |
+ "bne 1b" |
+ : "=&r" (store_failed), "=&r"(res) |
+ : "r" (ptr), "r"(increment) |
+ : "cc", "memory"); |
+ return res; |
+} |
+ |
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
+ Atomic64 increment) { |
+ int store_failed; |
+ Atomic64 res; |
+ __asm__ __volatile__( |
+ "1:\n" |
+ "ldrexd %1, [%2]\n" |
+ "adds %Q1, %Q1, %Q3\n" |
+ "adc %R1, %R1, %R3\n" |
+ "dmb\n" |
+ "strexd %0, %1, [%2]\n" |
+ "teq %0, #0\n" |
+ "bne 1b" |
+ : "=&r" (store_failed), "=&r"(res) |
+ : "r" (ptr), "r"(increment) |
+ : "cc", "memory"); |
+ return res; |
+} |
+ |
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
+ int store_failed; |
+ Atomic64 dummy; |
+ __asm__ __volatile__( |
+ "1:\n" |
+ // Dummy load to lock cache line. |
+ "ldrexd %1, [%3]\n" |
+ "strexd %0, %2, [%3]\n" |
+ "teq %0, #0\n" |
+ "bne 1b" |
+ : "=&r" (store_failed), "=&r"(dummy) |
+ : "r"(value), "r" (ptr) |
+ : "cc", "memory"); |
+} |
+ |
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
+ Atomic64 res; |
+ __asm__ __volatile__( |
+ "ldrexd %0, [%1]\n" |
+ "clrex\n" |
+ : "=r" (res) |
+ : "r"(ptr), "Q"(*ptr)); |
+ return res; |
+} |
+ |
+#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD |
inline void NotImplementedFatalError(const char *function_name) { |
fprintf(stderr, "64-bit %s() not implemented on this platform\n", |
@@ -201,41 +319,47 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
NotImplementedFatalError("NoBarrier_Store"); |
} |
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
- NotImplementedFatalError("Acquire_Store64"); |
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
+ NotImplementedFatalError("NoBarrier_Load"); |
+ return 0; |
} |
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
- NotImplementedFatalError("Release_Store"); |
+#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD |
+ |
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
+ NoBarrier_Store(ptr, value); |
+ MemoryBarrier(); |
} |
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
- NotImplementedFatalError("NoBarrier_Load"); |
- return 0; |
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
+ MemoryBarrier(); |
+ NoBarrier_Store(ptr, value); |
} |
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
- NotImplementedFatalError("Atomic64 Acquire_Load"); |
- return 0; |
+ Atomic64 value = NoBarrier_Load(ptr); |
+ MemoryBarrier(); |
+ return value; |
} |
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
- NotImplementedFatalError("Atomic64 Release_Load"); |
- return 0; |
+ MemoryBarrier(); |
+ return NoBarrier_Load(ptr); |
} |
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap"); |
- return 0; |
+ Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ MemoryBarrier(); |
+ return value; |
} |
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- NotImplementedFatalError("Atomic64 Release_CompareAndSwap"); |
- return 0; |
+ MemoryBarrier(); |
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
} |
} // namespace subtle ends |