| OLD | NEW | 
|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. | 
| 6 | 6 | 
| 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 
| 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 
| 9 | 9 | 
| 10 #include "src/base/macros.h" | 10 #include "src/base/macros.h" | 
| 11 #include "src/base/win32-headers.h" | 11 #include "src/base/win32-headers.h" | 
| 12 | 12 | 
| 13 #if defined(V8_HOST_ARCH_64_BIT) | 13 #if defined(V8_HOST_ARCH_64_BIT) | 
| 14 // windows.h #defines this (only on x64). This causes problems because the | 14 // windows.h #defines this (only on x64). This causes problems because the | 
| 15 // public API also uses MemoryBarrier at the public name for this fence. So, on | 15 // public API also uses MemoryBarrier at the public name for this fence. So, on | 
| 16 // X64, undef it, and call its documented | 16 // X64, undef it, and call its documented | 
| 17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) | 17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) | 
| 18 // implementation directly. | 18 // implementation directly. | 
| 19 #undef MemoryBarrier | 19 #undef MemoryBarrier | 
| 20 #endif | 20 #endif | 
| 21 | 21 | 
| 22 namespace v8 { | 22 namespace v8 { | 
| 23 namespace base { | 23 namespace base { | 
| 24 | 24 | 
| 25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 
| 26                                          Atomic32 old_value, | 26                                          Atomic32 old_value, | 
| 27                                          Atomic32 new_value) { | 27                                          Atomic32 new_value) { | 
| 28   LONG result = InterlockedCompareExchange( | 28   LONG result = InterlockedCompareExchange( | 
| 29       reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value), | 29       reinterpret_cast<volatile LONG*>(ptr), | 
|  | 30       static_cast<LONG>(new_value), | 
| 30       static_cast<LONG>(old_value)); | 31       static_cast<LONG>(old_value)); | 
| 31   return static_cast<Atomic32>(result); | 32   return static_cast<Atomic32>(result); | 
| 32 } | 33 } | 
| 33 | 34 | 
| 34 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 35 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 
| 35                                          Atomic32 new_value) { | 36                                          Atomic32 new_value) { | 
| 36   LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr), | 37   LONG result = InterlockedExchange( | 
| 37                                     static_cast<LONG>(new_value)); | 38       reinterpret_cast<volatile LONG*>(ptr), | 
|  | 39       static_cast<LONG>(new_value)); | 
| 38   return static_cast<Atomic32>(result); | 40   return static_cast<Atomic32>(result); | 
| 39 } | 41 } | 
| 40 | 42 | 
| 41 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 43 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 
| 42                                         Atomic32 increment) { | 44                                         Atomic32 increment) { | 
| 43   return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr), | 45   return InterlockedExchangeAdd( | 
| 44                                 static_cast<LONG>(increment)) + | 46       reinterpret_cast<volatile LONG*>(ptr), | 
| 45          increment; | 47       static_cast<LONG>(increment)) + increment; | 
| 46 } | 48 } | 
| 47 | 49 | 
| 48 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 50 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 
| 49                                           Atomic32 increment) { | 51                                           Atomic32 increment) { | 
| 50   return Barrier_AtomicIncrement(ptr, increment); | 52   return Barrier_AtomicIncrement(ptr, increment); | 
| 51 } | 53 } | 
| 52 | 54 | 
|  | 55 #if !(defined(_MSC_VER) && _MSC_VER >= 1400) | 
|  | 56 #error "We require at least vs2005 for MemoryBarrier" | 
|  | 57 #endif | 
| 53 inline void MemoryBarrier() { | 58 inline void MemoryBarrier() { | 
| 54 #if defined(V8_HOST_ARCH_64_BIT) | 59 #if defined(V8_HOST_ARCH_64_BIT) | 
| 55   // See #undef and note at the top of this file. | 60   // See #undef and note at the top of this file. | 
| 56   __faststorefence(); | 61   __faststorefence(); | 
| 57 #else | 62 #else | 
| 58   // We use MemoryBarrier from WinNT.h | 63   // We use MemoryBarrier from WinNT.h | 
| 59   ::MemoryBarrier(); | 64   ::MemoryBarrier(); | 
| 60 #endif | 65 #endif | 
| 61 } | 66 } | 
| 62 | 67 | 
| (...skipping 10 matching lines...) Expand all  Loading... | 
| 73 } | 78 } | 
| 74 | 79 | 
| 75 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 80 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 
| 76   *ptr = value; | 81   *ptr = value; | 
| 77 } | 82 } | 
| 78 | 83 | 
| 79 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 84 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 80   *ptr = value; | 85   *ptr = value; | 
| 81 } | 86 } | 
| 82 | 87 | 
|  | 88 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 
|  | 89   NoBarrier_AtomicExchange(ptr, value); | 
|  | 90               // acts as a barrier in this implementation | 
|  | 91 } | 
|  | 92 | 
| 83 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 93 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 84   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005 | 94   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005 | 
| 85   // See comments in Atomic64 version of Release_Store() below. | 95   // See comments in Atomic64 version of Release_Store() below. | 
| 86 } | 96 } | 
| 87 | 97 | 
| 88 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 98 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 
| 89   return *ptr; | 99   return *ptr; | 
| 90 } | 100 } | 
| 91 | 101 | 
| 92 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 102 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 
| 93   return *ptr; | 103   return *ptr; | 
| 94 } | 104 } | 
| 95 | 105 | 
| 96 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 106 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
| 97   Atomic32 value = *ptr; | 107   Atomic32 value = *ptr; | 
| 98   return value; | 108   return value; | 
| 99 } | 109 } | 
| 100 | 110 | 
|  | 111 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 
|  | 112   MemoryBarrier(); | 
|  | 113   return *ptr; | 
|  | 114 } | 
|  | 115 | 
| 101 #if defined(_WIN64) | 116 #if defined(_WIN64) | 
| 102 | 117 | 
| 103 // 64-bit low-level operations on 64-bit platform. | 118 // 64-bit low-level operations on 64-bit platform. | 
| 104 | 119 | 
| 105 static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic"); | 120 STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); | 
| 106 | 121 | 
| 107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 122 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
| 108                                          Atomic64 old_value, | 123                                          Atomic64 old_value, | 
| 109                                          Atomic64 new_value) { | 124                                          Atomic64 new_value) { | 
| 110   PVOID result = InterlockedCompareExchangePointer( | 125   PVOID result = InterlockedCompareExchangePointer( | 
| 111     reinterpret_cast<volatile PVOID*>(ptr), | 126     reinterpret_cast<volatile PVOID*>(ptr), | 
| 112     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 127     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 
| 113   return reinterpret_cast<Atomic64>(result); | 128   return reinterpret_cast<Atomic64>(result); | 
| 114 } | 129 } | 
| 115 | 130 | 
| (...skipping 14 matching lines...) Expand all  Loading... | 
| 130 | 145 | 
| 131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 146 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
| 132                                           Atomic64 increment) { | 147                                           Atomic64 increment) { | 
| 133   return Barrier_AtomicIncrement(ptr, increment); | 148   return Barrier_AtomicIncrement(ptr, increment); | 
| 134 } | 149 } | 
| 135 | 150 | 
| 136 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 151 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
| 137   *ptr = value; | 152   *ptr = value; | 
| 138 } | 153 } | 
| 139 | 154 | 
|  | 155 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 
|  | 156   NoBarrier_AtomicExchange(ptr, value); | 
|  | 157               // acts as a barrier in this implementation | 
|  | 158 } | 
|  | 159 | 
| 140 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 160 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 
| 141   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005 | 161   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005 | 
| 142 | 162 | 
| 143   // When new chips come out, check: | 163   // When new chips come out, check: | 
| 144   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3: | 164   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3: | 
| 145   //  System Programming Guide, Chatper 7: Multiple-processor management, | 165   //  System Programming Guide, Chatper 7: Multiple-processor management, | 
| 146   //  Section 7.2, Memory Ordering. | 166   //  Section 7.2, Memory Ordering. | 
| 147   // Last seen at: | 167   // Last seen at: | 
| 148   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm | 168   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm | 
| 149 } | 169 } | 
| 150 | 170 | 
| 151 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 171 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 
| 152   return *ptr; | 172   return *ptr; | 
| 153 } | 173 } | 
| 154 | 174 | 
| 155 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 175 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 
| 156   Atomic64 value = *ptr; | 176   Atomic64 value = *ptr; | 
| 157   return value; | 177   return value; | 
| 158 } | 178 } | 
| 159 | 179 | 
|  | 180 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
|  | 181   MemoryBarrier(); | 
|  | 182   return *ptr; | 
|  | 183 } | 
|  | 184 | 
| 160 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 185 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 
| 161                                        Atomic64 old_value, | 186                                        Atomic64 old_value, | 
| 162                                        Atomic64 new_value) { | 187                                        Atomic64 new_value) { | 
| 163   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 188   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
| 164 } | 189 } | 
| 165 | 190 | 
| 166 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 191 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 
| 167                                        Atomic64 old_value, | 192                                        Atomic64 old_value, | 
| 168                                        Atomic64 new_value) { | 193                                        Atomic64 new_value) { | 
| 169   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 194   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
| 170 } | 195 } | 
| 171 | 196 | 
| 172 | 197 | 
| 173 #endif  // defined(_WIN64) | 198 #endif  // defined(_WIN64) | 
| 174 | 199 | 
| 175 }  // namespace base | 200 }  // namespace base | 
| 176 }  // namespace v8 | 201 }  // namespace v8 | 
| 177 | 202 | 
| 178 #endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 203 #endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 
| OLD | NEW | 
|---|