| OLD | NEW | 
|---|
| 1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. | 
| 2  * All rights reserved. | 2  * All rights reserved. | 
| 3  * | 3  * | 
| 4  * Redistribution and use in source and binary forms, with or without | 4  * Redistribution and use in source and binary forms, with or without | 
| 5  * modification, are permitted provided that the following conditions are | 5  * modification, are permitted provided that the following conditions are | 
| 6  * met: | 6  * met: | 
| 7  * | 7  * | 
| 8  *     * Redistributions of source code must retain the above copyright | 8  *     * Redistributions of source code must retain the above copyright | 
| 9  * notice, this list of conditions and the following disclaimer. | 9  * notice, this list of conditions and the following disclaimer. | 
| 10  *     * Redistributions in binary form must reproduce the above | 10  *     * Redistributions in binary form must reproduce the above | 
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 48 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic* | 48 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic* | 
| 49 #endif | 49 #endif | 
| 50 | 50 | 
| 51 namespace base { | 51 namespace base { | 
| 52 namespace subtle { | 52 namespace subtle { | 
| 53 | 53 | 
| 54 typedef int64 Atomic64; | 54 typedef int64 Atomic64; | 
| 55 | 55 | 
| 56 // 32-bit low-level operations on any platform | 56 // 32-bit low-level operations on any platform | 
| 57 | 57 | 
|  | 58 extern "C" { | 
|  | 59 // We use windows intrinsics when we can (they seem to be supported | 
|  | 60 // well on MSVC 8.0 and above).  Unfortunately, in some | 
|  | 61 // environments, <windows.h> and <intrin.h> have conflicting | 
|  | 62 // declarations of some other intrinsics, breaking compilation: | 
|  | 63 //   http://connect.microsoft.com/VisualStudio/feedback/details/262047 | 
|  | 64 // Therefore, we simply declare the relevant intrinsics ourself. | 
|  | 65 | 
| 58 // MinGW has a bug in the header files where it doesn't indicate the | 66 // MinGW has a bug in the header files where it doesn't indicate the | 
| 59 // first argument is volatile -- they're not up to date.  See | 67 // first argument is volatile -- they're not up to date.  See | 
| 60 //   http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html | 68 //   http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html | 
| 61 // We have to const_cast away the volatile to avoid compiler warnings. | 69 // We have to const_cast away the volatile to avoid compiler warnings. | 
| 62 // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h | 70 // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h | 
| 63 #ifdef __MINGW32__ | 71 #if defined(__MINGW32__) | 
| 64 inline LONG InterlockedCompareExchange(volatile LONG* ptr, | 72 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | 
| 65                                        LONG newval, LONG oldval) { | 73                                            LONG newval, LONG oldval) { | 
| 66   return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); | 74   return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); | 
| 67 } | 75 } | 
| 68 inline LONG InterlockedExchange(volatile LONG* ptr, LONG newval) { | 76 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | 
| 69   return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); | 77   return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); | 
| 70 } | 78 } | 
| 71 inline LONG InterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 79 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 
| 72   return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); | 80   return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); | 
| 73 } | 81 } | 
|  | 82 | 
|  | 83 #elif _MSC_VER >= 1400   // intrinsics didn't work so well before MSVC 8.0 | 
|  | 84 // Unfortunately, in some environments, <windows.h> and <intrin.h> | 
|  | 85 // have conflicting declarations of some intrinsics, breaking | 
|  | 86 // compilation.  So we declare the intrinsics we need ourselves.  See | 
|  | 87 //   http://connect.microsoft.com/VisualStudio/feedback/details/262047 | 
|  | 88 LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval); | 
|  | 89 #pragma intrinsic(_InterlockedCompareExchange) | 
|  | 90 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | 
|  | 91                                            LONG newval, LONG oldval) { | 
|  | 92   return _InterlockedCompareExchange(ptr, newval, oldval); | 
|  | 93 } | 
|  | 94 | 
|  | 95 LONG _InterlockedExchange(volatile LONG* ptr, LONG newval); | 
|  | 96 #pragma intrinsic(_InterlockedExchange) | 
|  | 97 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | 
|  | 98   return _InterlockedExchange(ptr, newval); | 
|  | 99 } | 
|  | 100 | 
|  | 101 LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment); | 
|  | 102 #pragma intrinsic(_InterlockedExchangeAdd) | 
|  | 103 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 
|  | 104   return _InterlockedExchangeAdd(ptr, increment); | 
|  | 105 } | 
|  | 106 | 
|  | 107 #else | 
|  | 108 inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, | 
|  | 109                                            LONG newval, LONG oldval) { | 
|  | 110   return ::InterlockedCompareExchange(ptr, newval, oldval); | 
|  | 111 } | 
|  | 112 inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { | 
|  | 113   return ::InterlockedExchange(ptr, newval); | 
|  | 114 } | 
|  | 115 inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 
|  | 116   return ::InterlockedExchangeAdd(ptr, increment); | 
|  | 117 } | 
|  | 118 | 
| 74 #endif  // ifdef __MINGW32__ | 119 #endif  // ifdef __MINGW32__ | 
|  | 120 }  // extern "C" | 
| 75 | 121 | 
| 76 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 122 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 
| 77                                          Atomic32 old_value, | 123                                          Atomic32 old_value, | 
| 78                                          Atomic32 new_value) { | 124                                          Atomic32 new_value) { | 
| 79   LONG result = InterlockedCompareExchange( | 125   LONG result = FastInterlockedCompareExchange( | 
| 80       reinterpret_cast<volatile LONG*>(ptr), | 126       reinterpret_cast<volatile LONG*>(ptr), | 
| 81       static_cast<LONG>(new_value), | 127       static_cast<LONG>(new_value), | 
| 82       static_cast<LONG>(old_value)); | 128       static_cast<LONG>(old_value)); | 
| 83   return static_cast<Atomic32>(result); | 129   return static_cast<Atomic32>(result); | 
| 84 } | 130 } | 
| 85 | 131 | 
| 86 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 132 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 
| 87                                          Atomic32 new_value) { | 133                                          Atomic32 new_value) { | 
| 88   LONG result = InterlockedExchange( | 134   LONG result = FastInterlockedExchange( | 
| 89       reinterpret_cast<volatile LONG*>(ptr), | 135       reinterpret_cast<volatile LONG*>(ptr), | 
| 90       static_cast<LONG>(new_value)); | 136       static_cast<LONG>(new_value)); | 
| 91   return static_cast<Atomic32>(result); | 137   return static_cast<Atomic32>(result); | 
| 92 } | 138 } | 
| 93 | 139 | 
| 94 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 140 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 
| 95                                         Atomic32 increment) { | 141                                         Atomic32 increment) { | 
| 96   return InterlockedExchangeAdd( | 142   return FastInterlockedExchangeAdd( | 
| 97       reinterpret_cast<volatile LONG*>(ptr), | 143       reinterpret_cast<volatile LONG*>(ptr), | 
| 98       static_cast<LONG>(increment)) + increment; | 144       static_cast<LONG>(increment)) + increment; | 
| 99 } | 145 } | 
| 100 | 146 | 
| 101 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 147 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 
| 102                                           Atomic32 increment) { | 148                                           Atomic32 increment) { | 
| 103   return Barrier_AtomicIncrement(ptr, increment); | 149   return Barrier_AtomicIncrement(ptr, increment); | 
| 104 } | 150 } | 
| 105 | 151 | 
| 106 }  // namespace base::subtle | 152 }  // namespace base::subtle | 
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 166 } | 212 } | 
| 167 | 213 | 
| 168 // 64-bit operations | 214 // 64-bit operations | 
| 169 | 215 | 
| 170 #if defined(_WIN64) || defined(__MINGW64__) | 216 #if defined(_WIN64) || defined(__MINGW64__) | 
| 171 | 217 | 
| 172 // 64-bit low-level operations on 64-bit platform. | 218 // 64-bit low-level operations on 64-bit platform. | 
| 173 | 219 | 
| 174 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | 220 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | 
| 175 | 221 | 
| 176 // Like for the __MINGW32__ case above, this works around a header | 222 // These are the intrinsics needed for 64-bit operations.  Similar to the | 
| 177 // error in mingw, where it's missing 'volatile'. | 223 // 32-bit case above. | 
| 178 #ifdef __MINGW64__ | 224 | 
| 179 inline PVOID InterlockedCompareExchangePointer(volatile PVOID* ptr, | 225 extern "C" { | 
| 180                                                PVOID newval, PVOID oldval) { | 226 #if defined(__MINGW64__) | 
|  | 227 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | 
|  | 228                                                    PVOID newval, PVOID oldval) { | 
| 181   return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | 229   return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | 
| 182                                              newval, oldval); | 230                                              newval, oldval); | 
| 183 } | 231 } | 
| 184 inline PVOID InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 232 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 
| 185   return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | 233   return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | 
| 186 } | 234 } | 
| 187 inline LONGLONG InterlockedExchangeAdd64(volatile LONGLONG* ptr, | 235 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | 
| 188                                          LONGLONG increment) { | 236                                              LONGLONG increment) { | 
| 189   return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | 237   return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | 
| 190 } | 238 } | 
|  | 239 | 
|  | 240 #elif _MSC_VER >= 1400   // intrinsics didn't work so well before MSVC 8.0 | 
|  | 241 // Like above, we need to declare the intrinsics ourselves. | 
|  | 242 PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr, | 
|  | 243                                          PVOID newval, PVOID oldval); | 
|  | 244 #pragma intrinsic(_InterlockedCompareExchangePointer) | 
|  | 245 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | 
|  | 246                                                    PVOID newval, PVOID oldval) { | 
|  | 247   return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | 
|  | 248                                             newval, oldval); | 
|  | 249 } | 
|  | 250 | 
|  | 251 PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval); | 
|  | 252 #pragma intrinsic(_InterlockedExchangePointer) | 
|  | 253 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 
|  | 254   return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | 
|  | 255 } | 
|  | 256 | 
|  | 257 LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment); | 
|  | 258 #pragma intrinsic(_InterlockedExchangeAdd64) | 
|  | 259 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | 
|  | 260                                              LONGLONG increment) { | 
|  | 261   return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | 
|  | 262 } | 
|  | 263 | 
|  | 264 #else | 
|  | 265 inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, | 
|  | 266                                                    PVOID newval, PVOID oldval) { | 
|  | 267   return ::InterlockedCompareExchangePointer(ptr, newval, oldval); | 
|  | 268 } | 
|  | 269 inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 
|  | 270   return ::InterlockedExchangePointer(ptr, newval); | 
|  | 271 } | 
|  | 272 inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, | 
|  | 273                                          LONGLONG increment) { | 
|  | 274   return ::InterlockedExchangeAdd64(ptr, increment); | 
|  | 275 } | 
|  | 276 | 
| 191 #endif  // ifdef __MINGW64__ | 277 #endif  // ifdef __MINGW64__ | 
|  | 278 }  // extern "C" | 
| 192 | 279 | 
| 193 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 280 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
| 194                                          Atomic64 old_value, | 281                                          Atomic64 old_value, | 
| 195                                          Atomic64 new_value) { | 282                                          Atomic64 new_value) { | 
| 196   PVOID result = InterlockedCompareExchangePointer( | 283   PVOID result = FastInterlockedCompareExchangePointer( | 
| 197     reinterpret_cast<volatile PVOID*>(ptr), | 284     reinterpret_cast<volatile PVOID*>(ptr), | 
| 198     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 285     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 
| 199   return reinterpret_cast<Atomic64>(result); | 286   return reinterpret_cast<Atomic64>(result); | 
| 200 } | 287 } | 
| 201 | 288 | 
| 202 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 289 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 
| 203                                          Atomic64 new_value) { | 290                                          Atomic64 new_value) { | 
| 204   PVOID result = InterlockedExchangePointer( | 291   PVOID result = FastInterlockedExchangePointer( | 
| 205     reinterpret_cast<volatile PVOID*>(ptr), | 292     reinterpret_cast<volatile PVOID*>(ptr), | 
| 206     reinterpret_cast<PVOID>(new_value)); | 293     reinterpret_cast<PVOID>(new_value)); | 
| 207   return reinterpret_cast<Atomic64>(result); | 294   return reinterpret_cast<Atomic64>(result); | 
| 208 } | 295 } | 
| 209 | 296 | 
| 210 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 297 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 
| 211                                         Atomic64 increment) { | 298                                         Atomic64 increment) { | 
| 212   return InterlockedExchangeAdd64( | 299   return FastInterlockedExchangeAdd64( | 
| 213       reinterpret_cast<volatile LONGLONG*>(ptr), | 300       reinterpret_cast<volatile LONGLONG*>(ptr), | 
| 214       static_cast<LONGLONG>(increment)) + increment; | 301       static_cast<LONGLONG>(increment)) + increment; | 
| 215 } | 302 } | 
| 216 | 303 | 
| 217 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 304 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
| 218                                           Atomic64 increment) { | 305                                           Atomic64 increment) { | 
| 219   return Barrier_AtomicIncrement(ptr, increment); | 306   return Barrier_AtomicIncrement(ptr, increment); | 
| 220 } | 307 } | 
| 221 | 308 | 
| 222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 309 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
| (...skipping 28 matching lines...) Expand all  Loading... | 
| 251 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 338 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
| 252   MemoryBarrier(); | 339   MemoryBarrier(); | 
| 253   return *ptr; | 340   return *ptr; | 
| 254 } | 341 } | 
| 255 | 342 | 
| 256 #else  // defined(_WIN64) || defined(__MINGW64__) | 343 #else  // defined(_WIN64) || defined(__MINGW64__) | 
| 257 | 344 | 
| 258 // 64-bit low-level operations on 32-bit platform | 345 // 64-bit low-level operations on 32-bit platform | 
| 259 | 346 | 
| 260 // TODO(vchen): The GNU assembly below must be converted to MSVC inline | 347 // TODO(vchen): The GNU assembly below must be converted to MSVC inline | 
| 261 // assembly.  Then the file should be renamed to ...-x86-mscv.h, probably. | 348 // assembly.  Then the file should be renamed to ...-x86-msvc.h, probably. | 
| 262 | 349 | 
| 263 inline void NotImplementedFatalError(const char *function_name) { | 350 inline void NotImplementedFatalError(const char *function_name) { | 
| 264   fprintf(stderr, "64-bit %s() not implemented on this platform\n", | 351   fprintf(stderr, "64-bit %s() not implemented on this platform\n", | 
| 265           function_name); | 352           function_name); | 
| 266   abort(); | 353   abort(); | 
| 267 } | 354 } | 
| 268 | 355 | 
| 269 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 356 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
| 270                                          Atomic64 old_value, | 357                                          Atomic64 old_value, | 
| 271                                          Atomic64 new_value) { | 358                                          Atomic64 new_value) { | 
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 405 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 492 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 
| 406                                        Atomic64 old_value, | 493                                        Atomic64 old_value, | 
| 407                                        Atomic64 new_value) { | 494                                        Atomic64 new_value) { | 
| 408   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 495   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
| 409 } | 496 } | 
| 410 | 497 | 
| 411 }  // namespace base::subtle | 498 }  // namespace base::subtle | 
| 412 }  // namespace base | 499 }  // namespace base | 
| 413 | 500 | 
| 414 #endif  // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 501 #endif  // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 
| OLD | NEW | 
|---|