OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/macros.h" | 5 #include "src/base/macros.h" |
6 #include "src/base/platform/mutex.h" | 6 #include "src/base/platform/mutex.h" |
7 #include "src/base/platform/time.h" | 7 #include "src/base/platform/time.h" |
8 #include "src/builtins/builtins-utils.h" | 8 #include "src/builtins/builtins-utils.h" |
9 #include "src/builtins/builtins.h" | 9 #include "src/builtins/builtins.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
179 | 179 |
180 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, | 180 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, |
181 timeout_number); | 181 timeout_number); |
182 } | 182 } |
183 | 183 |
184 namespace { | 184 namespace { |
185 | 185 |
186 #if V8_CC_GNU | 186 #if V8_CC_GNU |
187 | 187 |
188 template <typename T> | 188 template <typename T> |
189 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
190 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
191 __ATOMIC_SEQ_CST); | |
192 return oldval; | |
193 } | |
194 | |
195 template <typename T> | |
196 inline T AddSeqCst(T* p, T value) { | 189 inline T AddSeqCst(T* p, T value) { |
197 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | 190 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
198 } | 191 } |
199 | 192 |
200 template <typename T> | 193 template <typename T> |
201 inline T SubSeqCst(T* p, T value) { | 194 inline T SubSeqCst(T* p, T value) { |
202 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | 195 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); |
203 } | 196 } |
204 | 197 |
205 template <typename T> | 198 template <typename T> |
206 inline T AndSeqCst(T* p, T value) { | 199 inline T AndSeqCst(T* p, T value) { |
207 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | 200 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); |
208 } | 201 } |
209 | 202 |
210 template <typename T> | 203 template <typename T> |
211 inline T OrSeqCst(T* p, T value) { | 204 inline T OrSeqCst(T* p, T value) { |
212 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | 205 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
213 } | 206 } |
214 | 207 |
215 template <typename T> | 208 template <typename T> |
216 inline T XorSeqCst(T* p, T value) { | 209 inline T XorSeqCst(T* p, T value) { |
217 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 210 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
218 } | 211 } |
219 | 212 |
220 #elif V8_CC_MSVC | 213 #elif V8_CC_MSVC |
221 | 214 |
222 #define InterlockedCompareExchange32 _InterlockedCompareExchange | |
223 #define InterlockedExchange32 _InterlockedExchange | |
224 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 215 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
225 #define InterlockedAnd32 _InterlockedAnd | 216 #define InterlockedAnd32 _InterlockedAnd |
226 #define InterlockedOr32 _InterlockedOr | 217 #define InterlockedOr32 _InterlockedOr |
227 #define InterlockedXor32 _InterlockedXor | 218 #define InterlockedXor32 _InterlockedXor |
228 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 219 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
229 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | |
230 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 220 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
231 | 221 |
232 #define ATOMIC_OPS(type, suffix, vctype) \ | 222 #define ATOMIC_OPS(type, suffix, vctype) \ |
233 inline type AddSeqCst(type* p, type value) { \ | 223 inline type AddSeqCst(type* p, type value) { \ |
234 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 224 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
235 bit_cast<vctype>(value)); \ | 225 bit_cast<vctype>(value)); \ |
236 } \ | 226 } \ |
237 inline type SubSeqCst(type* p, type value) { \ | 227 inline type SubSeqCst(type* p, type value) { \ |
238 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 228 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
239 -bit_cast<vctype>(value)); \ | 229 -bit_cast<vctype>(value)); \ |
240 } \ | 230 } \ |
241 inline type AndSeqCst(type* p, type value) { \ | 231 inline type AndSeqCst(type* p, type value) { \ |
242 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 232 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
243 bit_cast<vctype>(value)); \ | 233 bit_cast<vctype>(value)); \ |
244 } \ | 234 } \ |
245 inline type OrSeqCst(type* p, type value) { \ | 235 inline type OrSeqCst(type* p, type value) { \ |
246 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 236 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
247 bit_cast<vctype>(value)); \ | 237 bit_cast<vctype>(value)); \ |
248 } \ | 238 } \ |
249 inline type XorSeqCst(type* p, type value) { \ | 239 inline type XorSeqCst(type* p, type value) { \ |
250 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 240 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
251 bit_cast<vctype>(value)); \ | 241 bit_cast<vctype>(value)); \ |
252 } \ | |
253 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | |
254 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
255 bit_cast<vctype>(newval), \ | |
256 bit_cast<vctype>(oldval)); \ | |
257 } | 242 } |
258 | 243 |
259 ATOMIC_OPS(int8_t, 8, char) | 244 ATOMIC_OPS(int8_t, 8, char) |
260 ATOMIC_OPS(uint8_t, 8, char) | 245 ATOMIC_OPS(uint8_t, 8, char) |
261 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 246 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
262 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 247 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
263 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 248 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
264 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 249 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
265 | 250 |
266 #undef ATOMIC_OPS_INTEGER | 251 #undef ATOMIC_OPS_INTEGER |
267 #undef ATOMIC_OPS | 252 #undef ATOMIC_OPS |
268 | 253 |
269 #undef InterlockedCompareExchange32 | |
270 #undef InterlockedExchange32 | |
271 #undef InterlockedExchangeAdd32 | 254 #undef InterlockedExchangeAdd32 |
272 #undef InterlockedAnd32 | 255 #undef InterlockedAnd32 |
273 #undef InterlockedOr32 | 256 #undef InterlockedOr32 |
274 #undef InterlockedXor32 | 257 #undef InterlockedXor32 |
275 #undef InterlockedExchangeAdd16 | 258 #undef InterlockedExchangeAdd16 |
276 #undef InterlockedCompareExchange8 | |
277 #undef InterlockedExchangeAdd8 | 259 #undef InterlockedExchangeAdd8 |
278 | 260 |
279 #else | 261 #else |
280 | 262 |
281 #error Unsupported platform! | 263 #error Unsupported platform! |
282 | 264 |
283 #endif | 265 #endif |
284 | 266 |
285 template <typename T> | 267 template <typename T> |
286 T FromObject(Handle<Object> number); | 268 T FromObject(Handle<Object> number); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
327 | 309 |
328 inline Object* ToObject(Isolate* isolate, int32_t t) { | 310 inline Object* ToObject(Isolate* isolate, int32_t t) { |
329 return *isolate->factory()->NewNumber(t); | 311 return *isolate->factory()->NewNumber(t); |
330 } | 312 } |
331 | 313 |
332 inline Object* ToObject(Isolate* isolate, uint32_t t) { | 314 inline Object* ToObject(Isolate* isolate, uint32_t t) { |
333 return *isolate->factory()->NewNumber(t); | 315 return *isolate->factory()->NewNumber(t); |
334 } | 316 } |
335 | 317 |
336 template <typename T> | 318 template <typename T> |
337 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
338 Handle<Object> oldobj, Handle<Object> newobj) { | |
339 T oldval = FromObject<T>(oldobj); | |
340 T newval = FromObject<T>(newobj); | |
341 T result = | |
342 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); | |
343 return ToObject(isolate, result); | |
344 } | |
345 | |
346 template <typename T> | |
347 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | 319 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
348 Handle<Object> obj) { | 320 Handle<Object> obj) { |
349 T value = FromObject<T>(obj); | 321 T value = FromObject<T>(obj); |
350 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); | 322 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); |
351 return ToObject(isolate, result); | 323 return ToObject(isolate, result); |
352 } | 324 } |
353 | 325 |
354 template <typename T> | 326 template <typename T> |
355 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | 327 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, |
356 Handle<Object> obj) { | 328 Handle<Object> obj) { |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 // Duplicated from objects.h | 360 // Duplicated from objects.h |
389 // V has parameters (Type, type, TYPE, C type, element_size) | 361 // V has parameters (Type, type, TYPE, C type, element_size) |
390 #define INTEGER_TYPED_ARRAYS(V) \ | 362 #define INTEGER_TYPED_ARRAYS(V) \ |
391 V(Uint8, uint8, UINT8, uint8_t, 1) \ | 363 V(Uint8, uint8, UINT8, uint8_t, 1) \ |
392 V(Int8, int8, INT8, int8_t, 1) \ | 364 V(Int8, int8, INT8, int8_t, 1) \ |
393 V(Uint16, uint16, UINT16, uint16_t, 2) \ | 365 V(Uint16, uint16, UINT16, uint16_t, 2) \ |
394 V(Int16, int16, INT16, int16_t, 2) \ | 366 V(Int16, int16, INT16, int16_t, 2) \ |
395 V(Uint32, uint32, UINT32, uint32_t, 4) \ | 367 V(Uint32, uint32, UINT32, uint32_t, 4) \ |
396 V(Int32, int32, INT32, int32_t, 4) | 368 V(Int32, int32, INT32, int32_t, 4) |
397 | 369 |
398 // ES #sec-atomics.wait | |
399 // Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) | |
400 BUILTIN(AtomicsCompareExchange) { | |
401 HandleScope scope(isolate); | |
402 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
403 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
404 Handle<Object> expected_value = args.atOrUndefined(isolate, 3); | |
405 Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); | |
406 | |
407 Handle<JSTypedArray> sta; | |
408 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
409 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
410 | |
411 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
412 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
413 size_t i = maybe_index.FromJust(); | |
414 | |
415 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
416 isolate, expected_value, Object::ToInteger(isolate, expected_value)); | |
417 | |
418 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
419 isolate, replacement_value, | |
420 Object::ToInteger(isolate, replacement_value)); | |
421 | |
422 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
423 NumberToSize(sta->byte_offset()); | |
424 | |
425 switch (sta->type()) { | |
426 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
427 case kExternal##Type##Array: \ | |
428 return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ | |
429 replacement_value); | |
430 | |
431 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
432 #undef TYPED_ARRAY_CASE | |
433 | |
434 default: | |
435 break; | |
436 } | |
437 | |
438 UNREACHABLE(); | |
439 return isolate->heap()->undefined_value(); | |
440 } | |
441 | |
442 // ES #sec-atomics.add | 370 // ES #sec-atomics.add |
443 // Atomics.add( typedArray, index, value ) | 371 // Atomics.add( typedArray, index, value ) |
444 BUILTIN(AtomicsAdd) { | 372 BUILTIN(AtomicsAdd) { |
445 HandleScope scope(isolate); | 373 HandleScope scope(isolate); |
446 Handle<Object> array = args.atOrUndefined(isolate, 1); | 374 Handle<Object> array = args.atOrUndefined(isolate, 1); |
447 Handle<Object> index = args.atOrUndefined(isolate, 2); | 375 Handle<Object> index = args.atOrUndefined(isolate, 2); |
448 Handle<Object> value = args.atOrUndefined(isolate, 3); | 376 Handle<Object> value = args.atOrUndefined(isolate, 3); |
449 | 377 |
450 Handle<JSTypedArray> sta; | 378 Handle<JSTypedArray> sta; |
451 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | 379 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
624 default: | 552 default: |
625 break; | 553 break; |
626 } | 554 } |
627 | 555 |
628 UNREACHABLE(); | 556 UNREACHABLE(); |
629 return isolate->heap()->undefined_value(); | 557 return isolate->heap()->undefined_value(); |
630 } | 558 } |
631 | 559 |
632 } // namespace internal | 560 } // namespace internal |
633 } // namespace v8 | 561 } // namespace v8 |
OLD | NEW |