v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
js-atomics-synchronization.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
6#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
7
8#include <atomic>
9
15#include "src/objects/struct.h"
16
17// Has to be the last include (doesn't have include guards):
19
20namespace v8 {
21namespace internal {
22
23#include "torque-generated/src/objects/js-atomics-synchronization-tq.inc"
24
25namespace detail {
26class WaiterQueueLockGuard;
27class WaiterQueueNode;
28template <typename T>
30} // namespace detail
31
37
38// JSSynchronizationPrimitive is the base class for JSAtomicsMutex and
39// JSAtomicsCondition. It contains a 32-bit state field and a pointer to a
40// waiter queue head, used to manage the queue of waiting threads for both: the
41// mutex and the condition variable.
42
44 : public TorqueGeneratedJSSynchronizationPrimitive<
45 JSSynchronizationPrimitive, AlwaysSharedSpaceJSObject> {
46 public:
47 // Synchronization only store raw data as state.
48 static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
49 class BodyDescriptor;
50
51 static void IsolateDeinit(Isolate* isolate);
53
55 inline void SetNullWaiterQueueHead();
56
57 protected:
58 using StateT = uint32_t;
59
60 // The `HasWaitersField` bitfield has the following properties:
61 // - It isn't a lock bit, meaning that if this bit is 1,
62 // that doesn't imply that some thread has exclusive write access to the
63 // lock state.
64 // - It is a metadata bit that's only written with the queue lock bit held.
65 // - It is set iff the external pointer is non-null.
66 // - It can be read without holding any lock bit.
67 // - It allows for fast and threadsafe checking if there is a waiter,
68 // as dereferencing the waiter queue should be done only when the
69 // `IsWaiterQueueLockedField` bit is set.
71
72 // The `IsWaiterQueueLockedField` bitfield protects the waiter queue head from
73 // concurrent modification. It is set through as CAS operation in a spinlock.
75
76 template <class T, int size>
78
79 inline std::atomic<StateT>* AtomicStatePtr();
81
82 // Store the waiter queue head in the synchronization primitive. If the head
83 // is not null, the returned state has the kHasWaitersBit set.
84 // In case of pointer compression, the waiter queue head is encoded as an
85 // `ExternalPointerHandle`.
86 inline StateT SetWaiterQueueHead(Isolate* requester,
87 WaiterQueueNode* waiter_head,
88 StateT new_state);
89
90 // Set the new state without modifying bits outside the waiter queue mask.
91 static void SetWaiterQueueStateOnly(std::atomic<StateT>* state,
92 StateT new_state);
93
94 static bool TryLockWaiterQueueExplicit(std::atomic<StateT>* state,
95 StateT& expected);
96
97 using TorqueGeneratedJSSynchronizationPrimitive<
99 using TorqueGeneratedJSSynchronizationPrimitive<
101 using DequeueMatcher = std::function<bool(WaiterQueueNode*)>;
102
103 static constexpr StateT kEmptyState = 0;
104 static constexpr StateT kWaiterQueueMask =
106
107 private:
109
110#if V8_COMPRESS_POINTERS
111 // When pointer compression is enabled, the pointer to the waiter queue head
112 // is stored in the external pointer table and the object itself only contains
113 // a 32-bit external pointer handles.
114 inline ExternalPointerHandle* waiter_queue_head_handle_location() const;
115#else
117#endif
118 // Remove the matching async waiter queue nodes from the locked and unlocked
119 // async waiter lists in the isolate.
120 static void CleanupAsyncWaiterLists(Isolate* isolate, DequeueMatcher matcher);
121};
122
123// A non-recursive mutex that is exposed to JS.
124//
125// It has the following properties:
126// - Slim: 12-16 bytes. Lock state is 4 bytes, waiter queue head is 4 bytes
127// when V8_COMPRESS_POINTERS, and sizeof(void*) otherwise. Owner thread is
128// an additional 4 bytes.
129// - Fast when uncontended: a single weak CAS.
130// - Possibly unfair under contention.
131// - Moving GC safe. It uses an index into the shared Isolate's external
132// pointer table to store a queue of sleeping threads.
133// - Parks the main thread LocalHeap when the thread is blocked on acquiring
134// the lock. Unparks the main thread LocalHeap when unblocked. This means
135// that the lock can only be used with main thread isolates (including
136// workers) but not with helper threads that have their own LocalHeap.
137//
138// This mutex manages its own queue of waiting threads under contention, i.e.
139// it implements a futex in userland. The algorithm is inspired by WebKit's
140// ParkingLot.
141//
142// The state variable encodes the locking state as a single word: 0bLQW.
143// - W: Whether there are waiter threads in the queue.
144// - Q: Whether the waiter queue is locked.
145// - L: Whether the lock itself is locked.
146
147// The locking algorithm is as follows:
148// 1. Fast Path. Unlocked+Uncontended(0b000) -> Locked+Uncontended(0b100).
149// 2. Otherwise, slow path.
150// a. Attempt to acquire the L bit (set current state | 0b100) on the state
151// using a CAS spin loop bounded to some number of iterations.
152// b. If L bit cannot be acquired, park the current thread:
153// i. Acquire the Q bit (set current state | 0b010) in a spinlock.
154// ii. Destructively get the waiter queue head.
155// iii. Enqueue this thread's WaiterQueueNode to the tail of the list
156// pointed to by the head, possibly creating a new list.
157// iv. Release the Q bit and set the W bit
158// (set (current state | 0b001) & ~0b010 in a single CAS operation).
159// iv. Put the thread to sleep.
160// v. Upon wake up, go to i.
161
162// The unlocking algorithm is as follows:
163// 1. Fast Path. Locked+Uncontended(0b100) -> Unlocked+Uncontended(0b000).
164// 2. Otherwise, slow path.
165// a. Acquire the Q bit (set current state | 0b010) in a spinlock.
166// b. Destructively get the waiter queue head.
167// c. If the head is not null, dequeue the head.
168// d. Store the new waiter queue head (possibly null).
169// f. If the list is empty, clear the W bit (set current state & ~0b001).
170// g. Release the Q bit and clear the L bit (set current state & ~0b100).
171// (The W and Q bits must be set in a single CAS operation).
172// h. If the list was not empty, notify the dequeued head.
174 : public TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
175 JSSynchronizationPrimitive> {
176 public:
178 // A non-copyable wrapper class that provides an RAII-style mechanism for
179 // owning the `JSAtomicsMutex`.
181 public:
182 LockGuardBase(const LockGuardBase&) = delete;
184 inline ~LockGuardBase();
185 bool locked() const { return locked_; }
186
187 protected:
189 bool locked);
190
191 private:
195 };
196
197 // The mutex is attempted to be locked via `Lock` when a `LockGuard`
198 // object is created, the lock will be acquired unless the timeout is reached.
199 // If the mutex was acquired, then it is released when the `LockGuard` object
200 // is destructed.
201 class V8_NODISCARD LockGuard final : public LockGuardBase {
202 public:
204 std::optional<base::TimeDelta> timeout = std::nullopt);
205 };
206
207 // The mutex is attempted to be locked via `TryLock` when a `TryLockGuard`
208 // object is created. If the mutex was acquired, then it is released when the
209 // `TryLockGuard` object is destructed.
211 public:
213 };
214
217
220 bool success);
221
222 // Lock the mutex, blocking if it's currently owned by another thread.
223 // Returns false if the lock times out, true otherwise.
224 static inline bool Lock(
226 std::optional<base::TimeDelta> timeout = std::nullopt);
227
228 V8_WARN_UNUSED_RESULT inline bool TryLock();
229
230 // Try to lock the mutex, if it's currently owned by another thread, creates
231 // a LockAsyncWaiterQueueNode and enqueue it in the mutex's waiter queue.
232 // The `internal_locked_promise` is resolved when the node is notified.
233 // Returns true if the lock was acquired, false otherwise.
234 static bool LockAsync(Isolate* requester, DirectHandle<JSAtomicsMutex> mutex,
235 Handle<JSPromise> internal_locked_promise,
236 MaybeHandle<JSPromise> unlocked_promise,
237 AsyncWaiterNodeType** waiter_node,
238 std::optional<base::TimeDelta> timeout = std::nullopt);
239
240 // A wrapper for LockAsync called when an asyncWait call returns control
241 // to the lockAsync callback. It calls `LockAsync` without setting all the
242 // logic to run the callback, since the callback is already running.
245
246 // Try to take the lock and set up the promise logic to asynchronously run
247 // the callback under the lock. Always returns a promise that settles when the
248 // promise is unlocked or times out.
251 DirectHandle<Object> callback, std::optional<base::TimeDelta> timeout);
252
253 // Try to take the lock or requeue an existing node.
254 static bool LockOrEnqueueAsyncNode(Isolate* isolate,
259
260 inline void Unlock(Isolate* requester);
261
262 inline bool IsHeld();
263 inline bool IsCurrentThreadOwner();
264
266 Isolate* requester, DirectHandle<Foreign> async_locked_waiter_wrapper);
267
268 static void CleanupMatchingAsyncWaiters(Isolate* isolate,
269 WaiterQueueNode* node,
270 DequeueMatcher matcher);
271
272 // The context slots for the artificial context created for the resolve and
273 // reject handlers in charge of unlocking the mutex after the callback passed
274 // to Atomics.Mutex.lockAsync is executed.
275 enum {
276 // The context slot for the js mutex that is locked asynchronously.
278 // The context slot for the js exposed promise returned by the call to
279 // Atomics.Mutex.lockAsync, it should be resolved or rejected after the
280 // mutex is released.
282 // The isolate keeps track of WaiterQueueNodes for each mutex locked
283 // asynchronously, this is so that the lock can be released in case worker
284 // termination. The kAsyncLockedWaiterAsyncContextSlot slot is used to store
285 // a Foreign wrapping around an ExternalPointerHandle (or raw
286 // pointer when pointer compression is disabled) pointing to the
287 // WaiterQueueNode so that it can be removed from the list when the lock is
288 // released through the usual path.
291 };
292
294
295 private:
296 friend class Factory;
297
298 // There are 3 state bits: whether there are waiter threads in the queue,
299 // whether the waiter queue is locked (both inherited from the base class),
300 // and whether the lock itself is locked (IsLockedField).
302
303 static constexpr StateT kUnlockedUncontended = kEmptyState;
304 static constexpr StateT kLockedUncontended = IsLockedField::encode(true);
305
306 inline void SetCurrentThreadAsOwner();
307 inline void ClearOwnerThread();
308
309 inline std::atomic<int32_t>* AtomicOwnerThreadIdPtr();
310
313 std::atomic<StateT>* state, std::optional<base::TimeDelta> timeout);
314 static bool LockAsyncSlowPath(Isolate* isolate,
316 std::atomic<StateT>* state,
317 Handle<JSPromise> internal_locked_promise,
318 MaybeHandle<JSPromise> unlocked_promise,
319 AsyncWaiterNodeType** waiter_node,
320 std::optional<base::TimeDelta> timeout);
321
323 std::atomic<StateT>* state);
324
325 // Returns true if the JS mutex was taken and false otherwise.
327 std::atomic<StateT>* state,
328 WaiterQueueNode* timed_out_waiter);
329
330 static bool TryLockExplicit(std::atomic<StateT>* state, StateT& expected);
331 // Returns nullopt if the JS mutex is acquired, otherwise return an optional
332 // with a `WaiterQueueLockGuard` object.
333 static std::optional<WaiterQueueLockGuard> LockWaiterQueueOrJSMutex(
334 std::atomic<StateT>* state, StateT& current_state);
335 V8_EXPORT_PRIVATE static bool MutexTryLock(Isolate* requester,
337 std::atomic<StateT>* state);
338 V8_INLINE static bool BackoffTryLock(Isolate* requester,
340 std::atomic<StateT>* state);
341 static bool DequeueTimedOutAsyncWaiter(Isolate* requester,
343 std::atomic<StateT>* state,
344 WaiterQueueNode* timed_out_waiter);
345
348 std::atomic<StateT>* state, WaiterQueueNode* this_waiter);
349
350 template <typename LockSlowPathWrapper,
351 typename = std::enable_if<std::is_invocable_r_v<
352 bool, LockSlowPathWrapper, std::atomic<StateT>*>>>
353 static inline bool LockImpl(Isolate* requester,
355 std::optional<base::TimeDelta> timeout,
356 LockSlowPathWrapper slow_path_wrapper);
357
358 using TorqueGeneratedJSAtomicsMutex<
360 using TorqueGeneratedJSAtomicsMutex<
361 JSAtomicsMutex, JSSynchronizationPrimitive>::set_owner_thread_id;
362};
363
364// A condition variable that is exposed to JS.
365//
366// It has the following properties:
367// - Slim: 8-12 bytes. Lock state is 4 bytes, waiter queue head is 4 bytes
368// when V8_COMPRESS_POINTERS, and sizeof(void*) otherwise.
369// - Moving GC safe. It uses an index into the shared Isolate's external
370// pointer table to store a queue of sleeping threads.
371// - Parks the main thread LocalHeap when waiting. Unparks the main thread
372// LocalHeap after waking up.
373//
374// This condition variable manages its own queue of waiting threads, like
375// JSAtomicsMutex. The algorithm is inspired by WebKit's ParkingLot.
376//
377// The state variable encodes the locking state as a single word: 0bQW.
378// - W: Whether there are waiter threads in the queue.
379// - Q: Whether the waiter queue is locked.
380//
381// The waiting algorithm is as follows:
382// 1. Acquire the Q bit (set current state | 0b010) in a spinlock.
383// 2. Destructively get the waiter queue head.
384// 3. Enqueue this thread's WaiterQueueNode to the tail of the list pointed to
385// by the head, possibly creating a new list.
386// 4. Release the Q bit and set the W bit (set (current state | 0b001) & ~0b010
387// in a single CAS operation).
388// 5. Put the thread to sleep.
389//
390// The notification algorithm is as follows:
391// 1. Acquire the Q bit (set current state | 0b010) in a spinlock.
392// 2. Destructively get the waiter queue head.
393// 3. If the head is not null, dequeue the head.
394// 4. Store the new waiter queue head (possibly null).
395// 5. If the list is empty, clear the W bit (set current state & ~0b001).
396// 6. Release the Q bit (set current state & ~0b010).
397// (The W and Q bits must be set in a single CAS operation).
398// 7. If the list was not empty, notify the dequeued head.
399
401 : public TorqueGeneratedJSAtomicsCondition<JSAtomicsCondition,
402 JSSynchronizationPrimitive> {
403 public:
407
408 V8_EXPORT_PRIVATE static bool WaitFor(Isolate* requester,
411 std::optional<base::TimeDelta> timeout);
412
416 std::optional<base::TimeDelta> timeout);
417
420
421 static constexpr uint32_t kAllWaiters = UINT32_MAX;
422
423 // Notify {count} waiters. Returns the number of waiters woken up.
424 static V8_EXPORT_PRIVATE uint32_t Notify(Isolate* requester,
426 uint32_t count);
427
428 static void CleanupMatchingAsyncWaiters(Isolate* isolate,
429 WaiterQueueNode* node,
430 DequeueMatcher matcher);
431
432 enum {
436 };
437
439
440 private:
441 friend class Factory;
442
443 static void QueueWaiter(Isolate* requester,
445 WaiterQueueNode* waiter);
446
447 using DequeueAction = std::function<uint32_t(WaiterQueueNode**)>;
448 static uint32_t DequeueExplicit(Isolate* requester,
450 std::atomic<StateT>* state,
451 const DequeueAction& dequeue_action);
452};
453
454} // namespace internal
455} // namespace v8
456
458
459#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
static constexpr U encode(T value)
Definition bit-field.h:55
static V8_EXPORT_PRIVATE MaybeDirectHandle< JSReceiver > WaitAsync(Isolate *requester, DirectHandle< JSAtomicsCondition > cv, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout)
static void QueueWaiter(Isolate *requester, DirectHandle< JSAtomicsCondition > cv, WaiterQueueNode *waiter)
static uint32_t DequeueExplicit(Isolate *requester, DirectHandle< JSAtomicsCondition > cv, std::atomic< StateT > *state, const DequeueAction &dequeue_action)
static void HandleAsyncNotify(WaitAsyncWaiterQueueNode *node)
std::function< uint32_t(WaiterQueueNode **)> DequeueAction
static void CleanupMatchingAsyncWaiters(Isolate *isolate, WaiterQueueNode *node, DequeueMatcher matcher)
static void HandleAsyncTimeout(WaitAsyncWaiterQueueNode *node)
static V8_EXPORT_PRIVATE uint32_t Notify(Isolate *requester, DirectHandle< JSAtomicsCondition > cv, uint32_t count)
static V8_EXPORT_PRIVATE bool WaitFor(Isolate *requester, DirectHandle< JSAtomicsCondition > cv, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout)
LockGuardBase(const LockGuardBase &)=delete
LockGuardBase & operator=(const LockGuardBase &)=delete
static DirectHandle< JSPromise > LockAsyncWrapperForWait(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex)
static void HandleAsyncTimeout(LockAsyncWaiterQueueNode *node)
void UnlockAsyncLockedMutex(Isolate *requester, DirectHandle< Foreign > async_locked_waiter_wrapper)
static bool TryLockExplicit(std::atomic< StateT > *state, StateT &expected)
static void HandleAsyncNotify(LockAsyncWaiterQueueNode *node)
static V8_EXPORT_PRIVATE bool MaybeEnqueueNode(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state, WaiterQueueNode *this_waiter)
static bool Lock(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout=std::nullopt)
V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate *requester, std::atomic< StateT > *state)
static V8_EXPORT_PRIVATE bool MutexTryLock(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state)
bool LockJSMutexOrDequeueTimedOutWaiter(Isolate *requester, std::atomic< StateT > *state, WaiterQueueNode *timed_out_waiter)
static bool LockImpl(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::optional< base::TimeDelta > timeout, LockSlowPathWrapper slow_path_wrapper)
static DirectHandle< JSObject > CreateResultObject(Isolate *isolate, DirectHandle< Object > value, bool success)
static std::optional< WaiterQueueLockGuard > LockWaiterQueueOrJSMutex(std::atomic< StateT > *state, StateT &current_state)
static void CleanupMatchingAsyncWaiters(Isolate *isolate, WaiterQueueNode *node, DequeueMatcher matcher)
static bool LockOrEnqueueAsyncNode(Isolate *isolate, DirectHandle< JSAtomicsMutex > mutex, LockAsyncWaiterQueueNode *node)
static bool LockAsync(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, Handle< JSPromise > internal_locked_promise, MaybeHandle< JSPromise > unlocked_promise, AsyncWaiterNodeType **waiter_node, std::optional< base::TimeDelta > timeout=std::nullopt)
std::atomic< int32_t > * AtomicOwnerThreadIdPtr()
static bool DequeueTimedOutAsyncWaiter(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state, WaiterQueueNode *timed_out_waiter)
static bool LockAsyncSlowPath(Isolate *isolate, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state, Handle< JSPromise > internal_locked_promise, MaybeHandle< JSPromise > unlocked_promise, AsyncWaiterNodeType **waiter_node, std::optional< base::TimeDelta > timeout)
static V8_EXPORT_PRIVATE bool LockSlowPath(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state, std::optional< base::TimeDelta > timeout)
static constexpr StateT kUnlockedUncontended
static MaybeDirectHandle< JSPromise > LockOrEnqueuePromise(Isolate *isolate, DirectHandle< JSAtomicsMutex > mutex, DirectHandle< Object > callback, std::optional< base::TimeDelta > timeout)
static V8_INLINE bool BackoffTryLock(Isolate *requester, DirectHandle< JSAtomicsMutex > mutex, std::atomic< StateT > *state)
static void CleanupAsyncWaiterLists(Isolate *isolate, DequeueMatcher matcher)
static bool TryLockWaiterQueueExplicit(std::atomic< StateT > *state, StateT &expected)
WaiterQueueNode * DestructivelyGetWaiterQueueHead(Isolate *requester)
static void SetWaiterQueueStateOnly(std::atomic< StateT > *state, StateT new_state)
std::function< bool(WaiterQueueNode *)> DequeueMatcher
Tagged< Object > NumWaitersForTesting(Isolate *requester)
StateT SetWaiterQueueHead(Isolate *requester, WaiterQueueNode *waiter_head, StateT new_state)
TNode< Object > callback
LiftoffAssembler::CacheState state
base::Mutex mutex
detail::AsyncWaiterQueueNode< JSAtomicsMutex > LockAsyncWaiterQueueNode
detail::AsyncWaiterQueueNode< JSAtomicsCondition > WaitAsyncWaiterQueueNode
uint32_t ExternalPointerHandle
#define DECL_PRINTER(Name)
#define TQ_OBJECT_CONSTRUCTORS(Type)
#define EXPORT_DECL_VERIFIER(Name)
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_INLINE
Definition v8config.h:500
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671
#define V8_NODISCARD
Definition v8config.h:693