v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
js-atomics-synchronization-inl.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
6#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
12#include "src/common/globals.h"
16
17// Has to be the last include (doesn't have include guards):
19
20namespace v8 {
21namespace internal {
22
23#include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc"
24
25TQ_OBJECT_CONSTRUCTORS_IMPL(JSSynchronizationPrimitive)
26
27std::atomic<JSSynchronizationPrimitive::StateT>*
29 StateT* state_ptr = reinterpret_cast<StateT*>(field_address(kStateOffset));
30 DCHECK(IsAligned(reinterpret_cast<uintptr_t>(state_ptr), sizeof(StateT)));
31 return base::AsAtomicPtr(state_ptr);
32}
33
35#if V8_COMPRESS_POINTERS
36 base::AsAtomic32::Relaxed_Store(waiter_queue_head_handle_location(),
38#else
40#endif // V8_COMPRESS_POINTERS
41}
42
43#if V8_COMPRESS_POINTERS
45JSSynchronizationPrimitive::waiter_queue_head_handle_location() const {
46 Address location = field_address(kWaiterQueueHeadOffset);
47 return reinterpret_cast<ExternalPointerHandle*>(location);
48}
49#else
51 const {
52 Address location = field_address(kWaiterQueueHeadOffset);
53 return reinterpret_cast<WaiterQueueNode**>(location);
54}
55#endif // V8_COMPRESS_POINTERS
56
58 Isolate* requester) {
60 StateT state = AtomicStatePtr()->load(std::memory_order_relaxed);
62 USE(state);
63 }
64#if V8_COMPRESS_POINTERS
66 base::AsAtomic32::Relaxed_Load(waiter_queue_head_handle_location());
67 if (handle == kNullExternalPointerHandle) return nullptr;
68 // Clear external pointer after decoding as a safeguard, no other thread
69 // should be trying to access though the same non-null handle.
70 WaiterQueueNode* waiter_head = reinterpret_cast<WaiterQueueNode*>(
71 requester->shared_external_pointer_table().Exchange(handle, kNullAddress,
73 return waiter_head;
74#else
76#endif // V8_COMPRESS_POINTERS
77}
78
81 WaiterQueueNode* waiter_head,
82 StateT new_state) {
84 StateT state = AtomicStatePtr()->load(std::memory_order_relaxed);
86 USE(state);
87 }
88#if V8_COMPRESS_POINTERS
90 base::AsAtomic32::Relaxed_Load(waiter_queue_head_handle_location());
91 if (waiter_head) {
92 new_state = HasWaitersField::update(new_state, true);
93 ExternalPointerTable& table = requester->shared_external_pointer_table();
95 handle = table.AllocateAndInitializeEntry(
96 requester->shared_external_pointer_space(),
97 reinterpret_cast<Address>(waiter_head), kWaiterQueueNodeTag);
98 // Use a Release_Store to ensure that the store of the pointer into the
99 // table is not reordered after the store of the handle. Otherwise, other
100 // threads may access an uninitialized table entry and crash.
101 base::AsAtomic32::Release_Store(waiter_queue_head_handle_location(),
102 handle);
103 EXTERNAL_POINTER_WRITE_BARRIER(*this, kWaiterQueueHeadOffset,
105 return new_state;
106 }
107 if (DEBUG_BOOL) {
108 Address old = requester->shared_external_pointer_table().Exchange(
109 handle, reinterpret_cast<Address>(waiter_head), kWaiterQueueNodeTag);
111 USE(old);
112 } else {
113 requester->shared_external_pointer_table().Set(
114 handle, reinterpret_cast<Address>(waiter_head), kWaiterQueueNodeTag);
115 }
116 } else {
117 new_state = HasWaitersField::update(new_state, false);
118 if (handle) {
119 requester->shared_external_pointer_table().Set(handle, kNullAddress,
121 }
122 }
123#else
124 new_state = HasWaitersField::update(new_state, waiter_head);
126 waiter_head);
127#endif // V8_COMPRESS_POINTERS
128 return new_state;
129}
130
132
135 bool locked)
136 : isolate_(isolate), mutex_(mutex), locked_(locked) {}
137
141
144 std::optional<base::TimeDelta> timeout)
145 : LockGuardBase(isolate, mutex,
146 JSAtomicsMutex::Lock(isolate, mutex, timeout)) {}
147
151
152// static
153template <typename LockSlowPathWrapper, typename>
156 std::optional<base::TimeDelta> timeout,
157 LockSlowPathWrapper slow_path_wrapper) {
159 // First try to lock an uncontended mutex, which should be the common case. If
160 // this fails, then go to the slow path to possibly put the current thread to
161 // sleep.
162 //
163 // The fast path is done using a weak CAS which may fail spuriously on
164 // architectures with load-link/store-conditional instructions.
165 std::atomic<StateT>* state = mutex->AtomicStatePtr();
166 StateT expected = kUnlockedUncontended;
167 bool locked;
168 if (V8_LIKELY(state->compare_exchange_weak(expected, kLockedUncontended,
169 std::memory_order_acquire,
170 std::memory_order_relaxed))) {
171 locked = true;
172 } else {
173 locked = slow_path_wrapper(state);
174 }
175 if (V8_LIKELY(locked)) {
176 mutex->SetCurrentThreadAsOwner();
177 }
178 return locked;
179}
180
181// static
184 std::optional<base::TimeDelta> timeout) {
185 return LockImpl(requester, mutex, timeout, [=](std::atomic<StateT>* state) {
186 return LockSlowPath(requester, mutex, state, timeout);
187 });
188}
189
192 StateT expected = kUnlockedUncontended;
193 if (V8_LIKELY(AtomicStatePtr()->compare_exchange_strong(
194 expected, kLockedUncontended, std::memory_order_acquire,
195 std::memory_order_relaxed))) {
197 return true;
198 }
199 return false;
200}
201
204 // First try to unlock an uncontended mutex, which should be the common
205 // case. If this fails, then go to the slow path to wake a waiting thread.
206 //
207 // In contrast to Lock, the fast path is done using a strong CAS which does
208 // not fail spuriously. This simplifies the slow path by guaranteeing that
209 // there is at least one waiter to be notified.
212 std::atomic<StateT>* state = AtomicStatePtr();
213 StateT expected = kLockedUncontended;
214 if (V8_LIKELY(state->compare_exchange_strong(expected, kUnlockedUncontended,
215 std::memory_order_release,
216 std::memory_order_relaxed))) {
217 return;
218 }
219 UnlockSlowPath(requester, state);
220}
221
224 AtomicStatePtr()->load(std::memory_order_relaxed));
225}
226
228 bool result = AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed) ==
231 return result;
232}
233
235 AtomicOwnerThreadIdPtr()->store(ThreadId::Current().ToInteger(),
236 std::memory_order_relaxed);
237}
238
240 AtomicOwnerThreadIdPtr()->store(ThreadId::Invalid().ToInteger(),
241 std::memory_order_relaxed);
242}
243
245 int32_t* owner_thread_id_ptr =
246 reinterpret_cast<int32_t*>(field_address(kOwnerThreadIdOffset));
247 return base::AsAtomicPtr(owner_thread_id_ptr);
248}
249
251
252} // namespace internal
253} // namespace v8
254
256
257#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
Isolate * isolate_
static void Release_Store(T *addr, typename std::remove_reference< T >::type new_value)
static void Relaxed_Store(T *addr, typename std::remove_reference< T >::type new_value)
static T Relaxed_Load(T *addr)
static constexpr T decode(U value)
Definition bit-field.h:66
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
LockGuard(Isolate *isolate, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout=std::nullopt)
TryLockGuard(Isolate *isolate, DirectHandle< JSAtomicsMutex > mutex)
static bool Lock(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout=std::nullopt)
V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate *requester, std::atomic< StateT > *state)
static bool LockImpl(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout, LockSlowPathWrapper slow_path_wrapper)
std::atomic< int32_t > * AtomicOwnerThreadIdPtr()
static V8_EXPORT_PRIVATE bool LockSlowPath(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state, std::optional< base::TimeDelta > timeout)
static constexpr StateT kUnlockedUncontended
WaiterQueueNode * DestructivelyGetWaiterQueueHead(Isolate *requester)
StateT SetWaiterQueueHead(Isolate *requester, WaiterQueueNode *waiter_head, StateT new_state)
constexpr int ToInteger() const
Definition thread-id.h:26
static ThreadId Current()
Definition thread-id.h:32
static constexpr ThreadId Invalid()
Definition thread-id.h:35
base::Mutex & mutex_
#define DEBUG_BOOL
Definition globals.h:87
ZoneVector< RpoNumber > & result
base::Mutex mutex
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr ExternalPointerHandle kNullExternalPointerHandle
uint32_t ExternalPointerHandle
static constexpr Address kNullAddress
Definition v8-internal.h:53
#define EXTERNAL_POINTER_WRITE_BARRIER(object, offset, tag)
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660