v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
atomicops.h
Go to the documentation of this file.
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASE_ATOMICOPS_H_
6#define V8_BASE_ATOMICOPS_H_
7
8// The routines exported by this module are subtle. If you use them, even if
9// you get the code right, it will depend on careful reasoning about atomicity
10// and memory ordering; it will be less readable, and harder to maintain. If
11// you plan to use these routines, you should have a good reason, such as solid
12// evidence that performance would otherwise suffer, or there being no
13// alternative. You should assume only properties explicitly guaranteed by the
14// specifications in this file. You are almost certainly _not_ writing code
15// just for the x86; if you assume x86 semantics, x86 hardware bugs and
16// implementations on other archtectures will cause your code to break. If you
17// do not know what you are doing, avoid these routines, and use a Mutex.
18//
19// It is incorrect to make direct assignments to/from an atomic variable.
20// You should use one of the Load or Store routines. The Relaxed versions
21// are provided when no fences are needed:
22// Relaxed_Store()
23// Relaxed_Load()
24// Although there are currently no compiler enforcement, you are encouraged
25// to use these.
26//
27
28#include <stdint.h>
29
30#include <atomic>
31
32// Small C++ header which defines implementation specific macros used to
33// identify the STL implementation.
34// - libc++: captures __config for _LIBCPP_VERSION
35// - libstdc++: captures bits/c++config.h for __GLIBCXX__
36#include <cstddef>
37
40#include "src/base/macros.h"
41
42#if defined(V8_OS_STARBOARD)
43#include "starboard/atomic.h"
44#endif // V8_OS_STARBOARD
45
46namespace v8 {
47namespace base {
48
49#ifdef V8_OS_STARBOARD
50using Atomic8 = SbAtomic8;
51using Atomic16 = int16_t;
52using Atomic32 = SbAtomic32;
53#if SB_IS_64_BIT
54using Atomic64 = SbAtomic64;
55#endif
56#else
57using Atomic8 = char;
58using Atomic16 = int16_t;
59using Atomic32 = int32_t;
60#if defined(V8_HOST_ARCH_64_BIT)
61// We need to be able to go between Atomic64 and AtomicWord implicitly. This
62// means Atomic64 and AtomicWord should be the same type on 64-bit.
63#if defined(__ILP32__)
64using Atomic64 = int64_t;
65#else
66using Atomic64 = intptr_t;
67#endif // defined(__ILP32__)
68#endif // defined(V8_HOST_ARCH_64_BIT)
69#endif // V8_OS_STARBOARD
70
71// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
72// Atomic64 routines below, depending on your architecture.
73#if defined(V8_HOST_ARCH_64_BIT)
74using AtomicWord = Atomic64;
75#else
77#endif
78static_assert(sizeof(void*) == sizeof(AtomicWord));
79
80namespace helper {
81template <typename T>
82volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
83 return reinterpret_cast<volatile std::atomic<T>*>(ptr);
84}
85template <typename T>
86volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
87 return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
88}
89} // namespace helper
90
91inline void SeqCst_MemoryFence() {
92 std::atomic_thread_fence(std::memory_order_seq_cst);
93}
94
95// Atomically execute:
96// result = *ptr;
97// if (result == old_value)
98// *ptr = new_value;
99// return result;
100//
101// I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
102// Always return the value of |*ptr| before the operation.
103// Acquire, Relaxed, Release correspond to standard C++ memory orders.
104inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
105 Atomic8 new_value) {
106 std::atomic_compare_exchange_strong_explicit(
107 helper::to_std_atomic(ptr), &old_value, new_value,
108 std::memory_order_relaxed, std::memory_order_relaxed);
109 return old_value;
110}
111
113 Atomic16 old_value, Atomic16 new_value) {
114 std::atomic_compare_exchange_strong_explicit(
115 helper::to_std_atomic(ptr), &old_value, new_value,
116 std::memory_order_relaxed, std::memory_order_relaxed);
117 return old_value;
118}
119
121 Atomic32 old_value, Atomic32 new_value) {
122 std::atomic_compare_exchange_strong_explicit(
123 helper::to_std_atomic(ptr), &old_value, new_value,
124 std::memory_order_relaxed, std::memory_order_relaxed);
125 return old_value;
126}
127
129 Atomic32 new_value) {
130 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
131 std::memory_order_relaxed);
132}
133
135 Atomic32 new_value) {
136 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
137 std::memory_order_seq_cst);
138}
139
142 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
143 increment,
144 std::memory_order_relaxed);
145}
146
148 Atomic32 old_value, Atomic32 new_value) {
149 atomic_compare_exchange_strong_explicit(
150 helper::to_std_atomic(ptr), &old_value, new_value,
151 std::memory_order_acquire, std::memory_order_acquire);
152 return old_value;
153}
154
155inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
156 Atomic8 new_value) {
157 bool result = atomic_compare_exchange_strong_explicit(
158 helper::to_std_atomic(ptr), &old_value, new_value,
159 std::memory_order_release, std::memory_order_relaxed);
160 USE(result); // Make gcc compiler happy.
161 return old_value;
162}
163
165 Atomic32 old_value, Atomic32 new_value) {
166 atomic_compare_exchange_strong_explicit(
167 helper::to_std_atomic(ptr), &old_value, new_value,
168 std::memory_order_release, std::memory_order_relaxed);
169 return old_value;
170}
171
173 Atomic32 old_value,
174 Atomic32 new_value) {
175 atomic_compare_exchange_strong_explicit(
176 helper::to_std_atomic(ptr), &old_value, new_value,
177 std::memory_order_acq_rel, std::memory_order_acquire);
178 return old_value;
179}
180
182 Atomic32 old_value, Atomic32 new_value) {
183 atomic_compare_exchange_strong_explicit(
184 helper::to_std_atomic(ptr), &old_value, new_value,
185 std::memory_order_seq_cst, std::memory_order_seq_cst);
186 return old_value;
187}
188
189inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
190 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
191 std::memory_order_relaxed);
192}
193
194inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
195 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
196 std::memory_order_relaxed);
197}
198
199inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
200 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
201 std::memory_order_relaxed);
202}
203
204inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
205 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
206 std::memory_order_release);
207}
208
209inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
210 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
211 std::memory_order_release);
212}
213
214inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
215 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
216 std::memory_order_release);
217}
218
219inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
220 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
221 std::memory_order_seq_cst);
222}
223
224inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
225 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
226 std::memory_order_seq_cst);
227}
228
229inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
230 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
231 std::memory_order_seq_cst);
232}
233
234inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
235 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
236 std::memory_order_relaxed);
237}
238
239inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
240 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
241 std::memory_order_relaxed);
242}
243
244inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
245 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
246 std::memory_order_relaxed);
247}
248
249inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
250 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
251 std::memory_order_acquire);
252}
253
254inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
255 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
256 std::memory_order_acquire);
257}
258
259inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) {
260 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
261 std::memory_order_seq_cst);
262}
263
264inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) {
265 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
266 std::memory_order_seq_cst);
267}
268
269#if defined(V8_HOST_ARCH_64_BIT)
270
271inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
272 Atomic64 old_value, Atomic64 new_value) {
273 std::atomic_compare_exchange_strong_explicit(
274 helper::to_std_atomic(ptr), &old_value, new_value,
275 std::memory_order_relaxed, std::memory_order_relaxed);
276 return old_value;
277}
278
279inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
280 Atomic64 new_value) {
281 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
282 std::memory_order_relaxed);
283}
284
285inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr,
286 Atomic64 new_value) {
287 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
288 std::memory_order_seq_cst);
289}
290
291inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
292 Atomic64 increment) {
293 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
294 increment,
295 std::memory_order_relaxed);
296}
297
298inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
299 Atomic64 old_value, Atomic64 new_value) {
300 std::atomic_compare_exchange_strong_explicit(
301 helper::to_std_atomic(ptr), &old_value, new_value,
302 std::memory_order_acquire, std::memory_order_acquire);
303 return old_value;
304}
305
306inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
307 Atomic64 old_value, Atomic64 new_value) {
308 std::atomic_compare_exchange_strong_explicit(
309 helper::to_std_atomic(ptr), &old_value, new_value,
310 std::memory_order_release, std::memory_order_relaxed);
311 return old_value;
312}
313
314inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
315 Atomic64 old_value,
316 Atomic64 new_value) {
317 std::atomic_compare_exchange_strong_explicit(
318 helper::to_std_atomic(ptr), &old_value, new_value,
319 std::memory_order_acq_rel, std::memory_order_acquire);
320 return old_value;
321}
322
323inline Atomic64 SeqCst_CompareAndSwap(volatile Atomic64* ptr,
324 Atomic64 old_value, Atomic64 new_value) {
325 std::atomic_compare_exchange_strong_explicit(
326 helper::to_std_atomic(ptr), &old_value, new_value,
327 std::memory_order_seq_cst, std::memory_order_seq_cst);
328 return old_value;
329}
330
331inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
332 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
333 std::memory_order_relaxed);
334}
335
336inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
337 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
338 std::memory_order_release);
339}
340
341inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
342 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
343 std::memory_order_seq_cst);
344}
345
346inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
347 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
348 std::memory_order_relaxed);
349}
350
351inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
352 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
353 std::memory_order_acquire);
354}
355
356inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) {
357 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
358 std::memory_order_seq_cst);
359}
360
361#endif // defined(V8_HOST_ARCH_64_BIT)
362
363inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
364 size_t bytes) {
365 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
366 while (bytes > 0 &&
367 !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
368 Relaxed_Store(dst++, Relaxed_Load(src++));
369 --bytes;
370 }
371 if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
372 IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
373 while (bytes >= kAtomicWordSize) {
375 reinterpret_cast<volatile AtomicWord*>(dst),
376 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
377 dst += kAtomicWordSize;
378 src += kAtomicWordSize;
379 bytes -= kAtomicWordSize;
380 }
381 }
382 while (bytes > 0) {
383 Relaxed_Store(dst++, Relaxed_Load(src++));
384 --bytes;
385 }
386}
387
388inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
389 size_t bytes) {
390 // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there
391 // is no overlap, or {dst} lies before {src}.
392 // This single check checks for both:
393 if (reinterpret_cast<uintptr_t>(dst) - reinterpret_cast<uintptr_t>(src) >=
394 bytes) {
395 Relaxed_Memcpy(dst, src, bytes);
396 return;
397 }
398
399 // Otherwise copy backwards.
400 dst += bytes;
401 src += bytes;
402 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
403 while (bytes > 0 &&
404 !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
405 Relaxed_Store(--dst, Relaxed_Load(--src));
406 --bytes;
407 }
408 if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
409 IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
410 while (bytes >= kAtomicWordSize) {
411 dst -= kAtomicWordSize;
412 src -= kAtomicWordSize;
413 bytes -= kAtomicWordSize;
415 reinterpret_cast<volatile AtomicWord*>(dst),
416 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
417 }
418 }
419 while (bytes > 0) {
420 Relaxed_Store(--dst, Relaxed_Load(--src));
421 --bytes;
422 }
423}
424
425namespace helper {
427 DCHECK_NE(u1, u2);
428 return u1 < u2 ? -1 : 1;
429}
431 DCHECK_NE(u1, u2);
432#if defined(V8_TARGET_BIG_ENDIAN)
433 return u1 < u2 ? -1 : 1;
434#else
435 for (size_t i = 0; i < sizeof(AtomicWord); ++i) {
436 uint8_t byte1 = u1 & 0xFF;
437 uint8_t byte2 = u2 & 0xFF;
438 if (byte1 != byte2) return byte1 < byte2 ? -1 : 1;
439 u1 >>= 8;
440 u2 >>= 8;
441 }
442 UNREACHABLE();
443#endif
444}
445} // namespace helper
446
447inline int Relaxed_Memcmp(volatile const Atomic8* s1,
448 volatile const Atomic8* s2, size_t len) {
449 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
450 while (len > 0 &&
451 !(IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
452 IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize))) {
453 Atomic8 u1 = Relaxed_Load(s1++);
454 Atomic8 u2 = Relaxed_Load(s2++);
455 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
456 --len;
457 }
458
459 if (IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
460 IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize)) {
461 while (len >= kAtomicWordSize) {
462 AtomicWord u1 =
463 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s1));
464 AtomicWord u2 =
465 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s2));
466 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
467 s1 += kAtomicWordSize;
468 s2 += kAtomicWordSize;
469 len -= kAtomicWordSize;
470 }
471 }
472
473 while (len > 0) {
474 Atomic8 u1 = Relaxed_Load(s1++);
475 Atomic8 u2 = Relaxed_Load(s2++);
476 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
477 --len;
478 }
479
480 return 0;
481}
482
483} // namespace base
484} // namespace v8
485
486#endif // V8_BASE_ATOMICOPS_H_
double increment
ZoneVector< RpoNumber > & result
signed short int16_t
Definition unicode.cc:38
volatile const std::atomic< T > * to_std_atomic_const(volatile const T *ptr)
Definition atomicops.h:86
volatile std::atomic< T > * to_std_atomic(volatile T *ptr)
Definition atomicops.h:82
int MemcmpNotEqualFundamental(Atomic8 u1, Atomic8 u2)
Definition atomicops.h:426
void Relaxed_Store(volatile Atomic8 *ptr, Atomic8 value)
Definition atomicops.h:189
Atomic32 AtomicWord
Definition atomicops.h:76
Atomic8 Release_CompareAndSwap(volatile Atomic8 *ptr, Atomic8 old_value, Atomic8 new_value)
Definition atomicops.h:155
Atomic8 Relaxed_CompareAndSwap(volatile Atomic8 *ptr, Atomic8 old_value, Atomic8 new_value)
Definition atomicops.h:104
Atomic32 Relaxed_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Definition atomicops.h:128
int Relaxed_Memcmp(volatile const Atomic8 *s1, volatile const Atomic8 *s2, size_t len)
Definition atomicops.h:447
Atomic8 Relaxed_Load(volatile const Atomic8 *ptr)
Definition atomicops.h:234
void Relaxed_Memmove(volatile Atomic8 *dst, volatile const Atomic8 *src, size_t bytes)
Definition atomicops.h:388
Atomic32 SeqCst_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Definition atomicops.h:181
Atomic32 Relaxed_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
Definition atomicops.h:140
Atomic8 SeqCst_Load(volatile const Atomic8 *ptr)
Definition atomicops.h:259
int16_t Atomic16
Definition atomicops.h:58
void Relaxed_Memcpy(volatile Atomic8 *dst, volatile const Atomic8 *src, size_t bytes)
Definition atomicops.h:363
char Atomic8
Definition atomicops.h:57
Atomic8 Acquire_Load(volatile const Atomic8 *ptr)
Definition atomicops.h:249
Atomic32 SeqCst_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Definition atomicops.h:134
void SeqCst_MemoryFence()
Definition atomicops.h:91
void Release_Store(volatile Atomic8 *ptr, Atomic8 value)
Definition atomicops.h:204
int32_t Atomic32
Definition atomicops.h:59
void SeqCst_Store(volatile Atomic8 *ptr, Atomic8 value)
Definition atomicops.h:219
Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Definition atomicops.h:172
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Definition atomicops.h:147
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403