OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 | 6 |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
9 | 9 |
10 #include "src/base/macros.h" | 10 #include "src/base/macros.h" |
11 #include "src/base/win32-headers.h" | 11 #include "src/base/win32-headers.h" |
12 | 12 |
13 #if defined(V8_HOST_ARCH_64_BIT) | 13 #if defined(V8_HOST_ARCH_64_BIT) |
14 // windows.h #defines this (only on x64). This causes problems because the | 14 // windows.h #defines this (only on x64). This causes problems because the |
15 // public API also uses MemoryBarrier at the public name for this fence. So, on | 15 // public API also uses MemoryBarrier at the public name for this fence. So, on |
16 // X64, undef it, and call its documented | 16 // X64, undef it, and call its documented |
17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) | 17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) |
18 // implementation directly. | 18 // implementation directly. |
19 #undef MemoryBarrier | 19 #undef MemoryBarrier |
20 #endif | 20 #endif |
21 | 21 |
22 namespace v8 { | 22 namespace v8 { |
23 namespace base { | 23 namespace base { |
24 | 24 |
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
26 Atomic32 old_value, | 26 Atomic32 old_value, |
27 Atomic32 new_value) { | 27 Atomic32 new_value) { |
28 LONG result = InterlockedCompareExchange( | 28 LONG result = InterlockedCompareExchange( |
29 reinterpret_cast<volatile LONG*>(ptr), | 29 reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value), |
30 static_cast<LONG>(new_value), | |
31 static_cast<LONG>(old_value)); | 30 static_cast<LONG>(old_value)); |
32 return static_cast<Atomic32>(result); | 31 return static_cast<Atomic32>(result); |
33 } | 32 } |
34 | 33 |
35 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 34 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
36 Atomic32 new_value) { | 35 Atomic32 new_value) { |
37 LONG result = InterlockedExchange( | 36 LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr), |
38 reinterpret_cast<volatile LONG*>(ptr), | 37 static_cast<LONG>(new_value)); |
39 static_cast<LONG>(new_value)); | |
40 return static_cast<Atomic32>(result); | 38 return static_cast<Atomic32>(result); |
41 } | 39 } |
42 | 40 |
43 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 41 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
44 Atomic32 increment) { | 42 Atomic32 increment) { |
45 return InterlockedExchangeAdd( | 43 return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr), |
46 reinterpret_cast<volatile LONG*>(ptr), | 44 static_cast<LONG>(increment)) + |
47 static_cast<LONG>(increment)) + increment; | 45 increment; |
48 } | 46 } |
49 | 47 |
50 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 48 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
51 Atomic32 increment) { | 49 Atomic32 increment) { |
52 return Barrier_AtomicIncrement(ptr, increment); | 50 return Barrier_AtomicIncrement(ptr, increment); |
53 } | 51 } |
54 | 52 |
55 #if !(defined(_MSC_VER) && _MSC_VER >= 1400) | |
56 #error "We require at least vs2005 for MemoryBarrier" | |
57 #endif | |
58 inline void MemoryBarrier() { | 53 inline void MemoryBarrier() { |
59 #if defined(V8_HOST_ARCH_64_BIT) | 54 #if defined(V8_HOST_ARCH_64_BIT) |
60 // See #undef and note at the top of this file. | 55 // See #undef and note at the top of this file. |
61 __faststorefence(); | 56 __faststorefence(); |
62 #else | 57 #else |
63 // We use MemoryBarrier from WinNT.h | 58 // We use MemoryBarrier from WinNT.h |
64 ::MemoryBarrier(); | 59 ::MemoryBarrier(); |
65 #endif | 60 #endif |
66 } | 61 } |
67 | 62 |
(...skipping 10 matching lines...) Expand all Loading... |
78 } | 73 } |
79 | 74 |
80 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 75 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
81 *ptr = value; | 76 *ptr = value; |
82 } | 77 } |
83 | 78 |
84 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 79 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
85 *ptr = value; | 80 *ptr = value; |
86 } | 81 } |
87 | 82 |
88 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
89 NoBarrier_AtomicExchange(ptr, value); | |
90 // acts as a barrier in this implementation | |
91 } | |
92 | |
93 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 83 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
94 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 84 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
95 // See comments in Atomic64 version of Release_Store() below. | 85 // See comments in Atomic64 version of Release_Store() below. |
96 } | 86 } |
97 | 87 |
98 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 88 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
99 return *ptr; | 89 return *ptr; |
100 } | 90 } |
101 | 91 |
102 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 92 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
103 return *ptr; | 93 return *ptr; |
104 } | 94 } |
105 | 95 |
106 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 96 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
107 Atomic32 value = *ptr; | 97 Atomic32 value = *ptr; |
108 return value; | 98 return value; |
109 } | 99 } |
110 | 100 |
111 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
112 MemoryBarrier(); | |
113 return *ptr; | |
114 } | |
115 | |
116 #if defined(_WIN64) | 101 #if defined(_WIN64) |
117 | 102 |
118 // 64-bit low-level operations on 64-bit platform. | 103 // 64-bit low-level operations on 64-bit platform. |
119 | 104 |
120 STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); | 105 static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic"); |
121 | 106 |
122 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
123 Atomic64 old_value, | 108 Atomic64 old_value, |
124 Atomic64 new_value) { | 109 Atomic64 new_value) { |
125 PVOID result = InterlockedCompareExchangePointer( | 110 PVOID result = InterlockedCompareExchangePointer( |
126 reinterpret_cast<volatile PVOID*>(ptr), | 111 reinterpret_cast<volatile PVOID*>(ptr), |
127 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 112 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
128 return reinterpret_cast<Atomic64>(result); | 113 return reinterpret_cast<Atomic64>(result); |
129 } | 114 } |
130 | 115 |
(...skipping 14 matching lines...) Expand all Loading... |
145 | 130 |
146 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
147 Atomic64 increment) { | 132 Atomic64 increment) { |
148 return Barrier_AtomicIncrement(ptr, increment); | 133 return Barrier_AtomicIncrement(ptr, increment); |
149 } | 134 } |
150 | 135 |
151 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 136 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
152 *ptr = value; | 137 *ptr = value; |
153 } | 138 } |
154 | 139 |
155 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
156 NoBarrier_AtomicExchange(ptr, value); | |
157 // acts as a barrier in this implementation | |
158 } | |
159 | |
160 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 140 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
161 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 141 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
162 | 142 |
163 // When new chips come out, check: | 143 // When new chips come out, check: |
164 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | 144 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: |
165 // System Programming Guide, Chatper 7: Multiple-processor management, | 145 // System Programming Guide, Chatper 7: Multiple-processor management, |
166 // Section 7.2, Memory Ordering. | 146 // Section 7.2, Memory Ordering. |
167 // Last seen at: | 147 // Last seen at: |
168 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | 148 // http://developer.intel.com/design/pentium4/manuals/index_new.htm |
169 } | 149 } |
170 | 150 |
171 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 151 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
172 return *ptr; | 152 return *ptr; |
173 } | 153 } |
174 | 154 |
175 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 155 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
176 Atomic64 value = *ptr; | 156 Atomic64 value = *ptr; |
177 return value; | 157 return value; |
178 } | 158 } |
179 | 159 |
180 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
181 MemoryBarrier(); | |
182 return *ptr; | |
183 } | |
184 | |
185 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 160 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
186 Atomic64 old_value, | 161 Atomic64 old_value, |
187 Atomic64 new_value) { | 162 Atomic64 new_value) { |
188 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 163 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
189 } | 164 } |
190 | 165 |
191 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 166 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
192 Atomic64 old_value, | 167 Atomic64 old_value, |
193 Atomic64 new_value) { | 168 Atomic64 new_value) { |
194 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 169 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
195 } | 170 } |
196 | 171 |
197 | 172 |
198 #endif // defined(_WIN64) | 173 #endif // defined(_WIN64) |
199 | 174 |
200 } // namespace base | 175 } // namespace base |
201 } // namespace v8 | 176 } // namespace v8 |
202 | 177 |
203 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 178 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
OLD | NEW |