| OLD | NEW |
| 1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
| 2 * All rights reserved. | 2 * All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 48 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* | 48 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* |
| 49 #endif | 49 #endif |
| 50 | 50 |
| 51 namespace base { | 51 namespace base { |
| 52 namespace subtle { | 52 namespace subtle { |
| 53 | 53 |
| 54 typedef int64 Atomic64; | 54 typedef int64 Atomic64; |
| 55 | 55 |
| 56 // 32-bit low-level operations on any platform | 56 // 32-bit low-level operations on any platform |
| 57 | 57 |
| 58 extern "C" { | |
| 59 // We use windows intrinsics when we can (they seem to be supported | |
| 60 // well on MSVC 8.0 and above). Unfortunately, in some | |
| 61 // environments, <windows.h> and <intrin.h> have conflicting | |
| 62 // declarations of some other intrinsics, breaking compilation: | |
| 63 // http://connect.microsoft.com/VisualStudio/feedback/details/262047 | |
| 64 // Therefore, we simply declare the relevant intrinsics ourself. | |
| 65 | |
| 66 // MinGW has a bug in the header files where it doesn't indicate the | 58 // MinGW has a bug in the header files where it doesn't indicate the |
| 67 // first argument is volatile -- they're not up to date. See | 59 // first argument is volatile -- they're not up to date. See |
| 68 // http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html | 60 // http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html |
| 69 // We have to const_cast away the volatile to avoid compiler warnings. | 61 // We have to const_cast away the volatile to avoid compiler warnings. |
| 70 // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h | 62 // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h |
| 71 #if defined(__MINGW32__) | 63 #ifdef __MINGW32__ |
| 72 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | 64 inline LONG InterlockedCompareExchange(volatile LONG* ptr, |
| 73 LONG newval, LONG oldval) { | 65 LONG newval, LONG oldval) { |
| 74 return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); | 66 return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); |
| 75 } | 67 } |
| 76 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | 68 inline LONG InterlockedExchange(volatile LONG* ptr, LONG newval) { |
| 77 return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); | 69 return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); |
| 78 } | 70 } |
| 79 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 71 inline LONG InterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { |
| 80 return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); | 72 return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); |
| 81 } | 73 } |
| 82 | |
| 83 #elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0 | |
| 84 // Unfortunately, in some environments, <windows.h> and <intrin.h> | |
| 85 // have conflicting declarations of some intrinsics, breaking | |
| 86 // compilation. So we declare the intrinsics we need ourselves. See | |
| 87 // http://connect.microsoft.com/VisualStudio/feedback/details/262047 | |
| 88 LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval); | |
| 89 #pragma intrinsic(_InterlockedCompareExchange) | |
| 90 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | |
| 91 LONG newval, LONG oldval) { | |
| 92 return _InterlockedCompareExchange(ptr, newval, oldval); | |
| 93 } | |
| 94 | |
| 95 LONG _InterlockedExchange(volatile LONG* ptr, LONG newval); | |
| 96 #pragma intrinsic(_InterlockedExchange) | |
| 97 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | |
| 98 return _InterlockedExchange(ptr, newval); | |
| 99 } | |
| 100 | |
| 101 LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment); | |
| 102 #pragma intrinsic(_InterlockedExchangeAdd) | |
| 103 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | |
| 104 return _InterlockedExchangeAdd(ptr, increment); | |
| 105 } | |
| 106 | |
| 107 #else | |
| 108 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | |
| 109 LONG newval, LONG oldval) { | |
| 110 return ::InterlockedCompareExchange(ptr, newval, oldval); | |
| 111 } | |
| 112 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | |
| 113 return ::InterlockedExchange(ptr, newval); | |
| 114 } | |
| 115 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | |
| 116 return ::InterlockedExchangeAdd(ptr, increment); | |
| 117 } | |
| 118 | |
| 119 #endif // ifdef __MINGW32__ | 74 #endif // ifdef __MINGW32__ |
| 120 } // extern "C" | |
| 121 | 75 |
| 122 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 76 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 123 Atomic32 old_value, | 77 Atomic32 old_value, |
| 124 Atomic32 new_value) { | 78 Atomic32 new_value) { |
| 125 LONG result = FastInterlockedCompareExchange( | 79 LONG result = InterlockedCompareExchange( |
| 126 reinterpret_cast<volatile LONG*>(ptr), | 80 reinterpret_cast<volatile LONG*>(ptr), |
| 127 static_cast<LONG>(new_value), | 81 static_cast<LONG>(new_value), |
| 128 static_cast<LONG>(old_value)); | 82 static_cast<LONG>(old_value)); |
| 129 return static_cast<Atomic32>(result); | 83 return static_cast<Atomic32>(result); |
| 130 } | 84 } |
| 131 | 85 |
| 132 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 86 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 133 Atomic32 new_value) { | 87 Atomic32 new_value) { |
| 134 LONG result = FastInterlockedExchange( | 88 LONG result = InterlockedExchange( |
| 135 reinterpret_cast<volatile LONG*>(ptr), | 89 reinterpret_cast<volatile LONG*>(ptr), |
| 136 static_cast<LONG>(new_value)); | 90 static_cast<LONG>(new_value)); |
| 137 return static_cast<Atomic32>(result); | 91 return static_cast<Atomic32>(result); |
| 138 } | 92 } |
| 139 | 93 |
| 140 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 94 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 141 Atomic32 increment) { | 95 Atomic32 increment) { |
| 142 return FastInterlockedExchangeAdd( | 96 return InterlockedExchangeAdd( |
| 143 reinterpret_cast<volatile LONG*>(ptr), | 97 reinterpret_cast<volatile LONG*>(ptr), |
| 144 static_cast<LONG>(increment)) + increment; | 98 static_cast<LONG>(increment)) + increment; |
| 145 } | 99 } |
| 146 | 100 |
| 147 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 101 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 148 Atomic32 increment) { | 102 Atomic32 increment) { |
| 149 return Barrier_AtomicIncrement(ptr, increment); | 103 return Barrier_AtomicIncrement(ptr, increment); |
| 150 } | 104 } |
| 151 | 105 |
| 152 } // namespace base::subtle | 106 } // namespace base::subtle |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 212 } | 166 } |
| 213 | 167 |
| 214 // 64-bit operations | 168 // 64-bit operations |
| 215 | 169 |
| 216 #if defined(_WIN64) || defined(__MINGW64__) | 170 #if defined(_WIN64) || defined(__MINGW64__) |
| 217 | 171 |
| 218 // 64-bit low-level operations on 64-bit platform. | 172 // 64-bit low-level operations on 64-bit platform. |
| 219 | 173 |
| 220 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | 174 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); |
| 221 | 175 |
| 222 // These are the intrinsics needed for 64-bit operations. Similar to the | 176 // Like for the __MINGW32__ case above, this works around a header |
| 223 // 32-bit case above. | 177 // error in mingw, where it's missing 'volatile'. |
| 224 | 178 #ifdef __MINGW64__ |
| 225 extern "C" { | 179 inline PVOID InterlockedCompareExchangePointer(volatile PVOID* ptr, |
| 226 #if defined(__MINGW64__) | 180 PVOID newval, PVOID oldval) { |
| 227 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | |
| 228 PVOID newval, PVOID oldval) { | |
| 229 return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | 181 return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), |
| 230 newval, oldval); | 182 newval, oldval); |
| 231 } | 183 } |
| 232 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 184 inline PVOID InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { |
| 233 return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | 185 return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); |
| 234 } | 186 } |
| 235 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | 187 inline LONGLONG InterlockedExchangeAdd64(volatile LONGLONG* ptr, |
| 236 LONGLONG increment) { | 188 LONGLONG increment) { |
| 237 return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | 189 return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); |
| 238 } | 190 } |
| 239 | |
| 240 #elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0 | |
| 241 // Like above, we need to declare the intrinsics ourselves. | |
| 242 PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr, | |
| 243 PVOID newval, PVOID oldval); | |
| 244 #pragma intrinsic(_InterlockedCompareExchangePointer) | |
| 245 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | |
| 246 PVOID newval, PVOID oldval) { | |
| 247 return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | |
| 248 newval, oldval); | |
| 249 } | |
| 250 | |
| 251 PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval); | |
| 252 #pragma intrinsic(_InterlockedExchangePointer) | |
| 253 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | |
| 254 return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | |
| 255 } | |
| 256 | |
| 257 LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment); | |
| 258 #pragma intrinsic(_InterlockedExchangeAdd64) | |
| 259 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | |
| 260 LONGLONG increment) { | |
| 261 return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | |
| 262 } | |
| 263 | |
| 264 #else | |
| 265 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | |
| 266 PVOID newval, PVOID oldval) { | |
| 267 return ::InterlockedCompareExchangePointer(ptr, newval, oldval); | |
| 268 } | |
| 269 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | |
| 270 return ::InterlockedExchangePointer(ptr, newval); | |
| 271 } | |
| 272 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | |
| 273 LONGLONG increment) { | |
| 274 return ::InterlockedExchangeAdd64(ptr, increment); | |
| 275 } | |
| 276 | |
| 277 #endif // ifdef __MINGW64__ | 191 #endif // ifdef __MINGW64__ |
| 278 } // extern "C" | |
| 279 | 192 |
| 280 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 193 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 281 Atomic64 old_value, | 194 Atomic64 old_value, |
| 282 Atomic64 new_value) { | 195 Atomic64 new_value) { |
| 283 PVOID result = FastInterlockedCompareExchangePointer( | 196 PVOID result = InterlockedCompareExchangePointer( |
| 284 reinterpret_cast<volatile PVOID*>(ptr), | 197 reinterpret_cast<volatile PVOID*>(ptr), |
| 285 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 198 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
| 286 return reinterpret_cast<Atomic64>(result); | 199 return reinterpret_cast<Atomic64>(result); |
| 287 } | 200 } |
| 288 | 201 |
| 289 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 202 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 290 Atomic64 new_value) { | 203 Atomic64 new_value) { |
| 291 PVOID result = FastInterlockedExchangePointer( | 204 PVOID result = InterlockedExchangePointer( |
| 292 reinterpret_cast<volatile PVOID*>(ptr), | 205 reinterpret_cast<volatile PVOID*>(ptr), |
| 293 reinterpret_cast<PVOID>(new_value)); | 206 reinterpret_cast<PVOID>(new_value)); |
| 294 return reinterpret_cast<Atomic64>(result); | 207 return reinterpret_cast<Atomic64>(result); |
| 295 } | 208 } |
| 296 | 209 |
| 297 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 210 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 298 Atomic64 increment) { | 211 Atomic64 increment) { |
| 299 return FastInterlockedExchangeAdd64( | 212 return InterlockedExchangeAdd64( |
| 300 reinterpret_cast<volatile LONGLONG*>(ptr), | 213 reinterpret_cast<volatile LONGLONG*>(ptr), |
| 301 static_cast<LONGLONG>(increment)) + increment; | 214 static_cast<LONGLONG>(increment)) + increment; |
| 302 } | 215 } |
| 303 | 216 |
| 304 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 217 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 305 Atomic64 increment) { | 218 Atomic64 increment) { |
| 306 return Barrier_AtomicIncrement(ptr, increment); | 219 return Barrier_AtomicIncrement(ptr, increment); |
| 307 } | 220 } |
| 308 | 221 |
| 309 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 338 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 251 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 339 MemoryBarrier(); | 252 MemoryBarrier(); |
| 340 return *ptr; | 253 return *ptr; |
| 341 } | 254 } |
| 342 | 255 |
| 343 #else // defined(_WIN64) || defined(__MINGW64__) | 256 #else // defined(_WIN64) || defined(__MINGW64__) |
| 344 | 257 |
| 345 // 64-bit low-level operations on 32-bit platform | 258 // 64-bit low-level operations on 32-bit platform |
| 346 | 259 |
| 347 // TODO(vchen): The GNU assembly below must be converted to MSVC inline | 260 // TODO(vchen): The GNU assembly below must be converted to MSVC inline |
| 348 // assembly. Then the file should be renamed to ...-x86-msvc.h, probably. | 261 // assembly. Then the file should be renamed to ...-x86-mscv.h, probably. |
| 349 | 262 |
| 350 inline void NotImplementedFatalError(const char *function_name) { | 263 inline void NotImplementedFatalError(const char *function_name) { |
| 351 fprintf(stderr, "64-bit %s() not implemented on this platform\n", | 264 fprintf(stderr, "64-bit %s() not implemented on this platform\n", |
| 352 function_name); | 265 function_name); |
| 353 abort(); | 266 abort(); |
| 354 } | 267 } |
| 355 | 268 |
| 356 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 269 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 357 Atomic64 old_value, | 270 Atomic64 old_value, |
| 358 Atomic64 new_value) { | 271 Atomic64 new_value) { |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 492 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 405 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 493 Atomic64 old_value, | 406 Atomic64 old_value, |
| 494 Atomic64 new_value) { | 407 Atomic64 new_value) { |
| 495 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 408 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 496 } | 409 } |
| 497 | 410 |
| 498 } // namespace base::subtle | 411 } // namespace base::subtle |
| 499 } // namespace base | 412 } // namespace base |
| 500 | 413 |
| 501 #endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 414 #endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ |
| OLD | NEW |