5#ifndef V8_HANDLES_TRACED_HANDLES_H_
6#define V8_HANDLES_TRACED_HANDLES_H_
26#ifdef V8_HOST_ARCH_64_BIT
74 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
79 reinterpret_cast<std::atomic<Address>*
>(&
object_)->
store(
80 value, std::memory_order_relaxed);
88 bool needs_young_bit_update,
89 bool needs_black_allocation,
130 return !(*
this == rhs);
152 template <
typename ConcreteTraits>
155 return &ConcreteTraits::GetListNode(tnb).prev_;
158 return &ConcreteTraits::GetListNode(tnb).next_;
193#if defined(V8_USE_ADDRESS_SANITIZER)
197#ifdef V8_HOST_ARCH_64_BIT
203 std::numeric_limits<TracedNode::IndexType>::max() - 1;
256 return locally_freed;
277 enum class MarkMode : uint8_t { kOnlyYoung, kAll };
279 static void Destroy(Address* location);
280 static void Copy(
const Address*
const* from, Address** to);
281 static void Move(Address** from, Address** to);
283 static Tagged<Object> Mark(Address* location, MarkMode mark_mode);
285 Address* traced_node_block_base,
288 static bool IsValidInUseNode(
const Address* location);
300 using NodeBounds = std::vector<std::pair<const void*, const void*>>;
303 void SetIsMarking(
bool);
304 void SetIsSweepingOnMutatorThread(
bool);
307 void UpdateListOfYoungNodes();
310 void DeleteEmptyBlocks();
320 void ComputeWeaknessForYoungObjects();
329 void IterateAndMarkYoungRootsWithOldHosts(
RootVisitor*);
330 void IterateYoungRootsWithOldHostsForTesting(
RootVisitor*);
336 bool HasYoung()
const;
339 V8_INLINE std::pair<TracedNodeBlock*, TracedNode*> AllocateNode();
341 void FreeNode(
TracedNode* node, Address zap_value);
352 void Copy(
const TracedNode& from_node, Address** to);
353 void Move(
TracedNode& from_node, Address** from, Address** to);
355 bool SupportsClearingWeakNonLiveWrappers();
358 size_t num_blocks_ = 0;
366 bool is_marking_ =
false;
367 bool is_sweeping_on_mutator_thread_ =
false;
368 size_t used_nodes_ = 0;
369 size_t block_size_bytes_ = 0;
370 bool disable_block_handling_on_free_ =
false;
static constexpr T decode(U value)
static V8_NODISCARD constexpr U update(U previous, T value)
size_t used_size_bytes() const
TracedHandles & operator=(const TracedHandles &)=delete
TracedNodeBlock::YoungList young_blocks_
size_t used_node_count() const
std::vector< TracedNodeBlock * > empty_blocks_
TracedHandles(const TracedHandles &)=delete
TracedNodeBlock::UsableList usable_blocks_
std::vector< std::pair< const void *, const void * > > NodeBounds
size_t total_size_bytes() const
TracedNodeBlock::OverallList blocks_
bool operator==(const NodeIteratorImpl &rhs) const
NodeIteratorImpl(TracedNodeBlock *block)
NodeIteratorImpl & operator++()
bool operator!=(const NodeIteratorImpl &rhs) const
TracedNode::IndexType current_index_
NodeIteratorImpl(TracedNodeBlock *block, TracedNode::IndexType current_index)
NodeIteratorImpl operator++(int)
NodeIteratorImpl(const NodeIteratorImpl &other) V8_NOEXCEPT
ListNode overall_list_node_
static constexpr TracedNode::IndexType kInvalidFreeListNodeIndex
TracedHandles & traced_handles_
TracedNode::IndexType ConsumeLocallyFreed()
ListNode usable_list_node_
TracedNode::IndexType locally_freed_
TracedNode * at(TracedNode::IndexType index)
void FreeNode(TracedNode *node, Address zap_value)
TracedHandles & traced_handles() const
static void Delete(TracedNodeBlock *)
const void * nodes_begin_address() const
TracedNodeBlock(TracedHandles &, TracedNode::IndexType)
bool NeedsReprocessing() const
static TracedNodeBlock & From(TracedNode &node)
void SetReprocessing(bool value)
static TracedNodeBlock * Create(TracedHandles &)
size_t size_bytes() const
V8_INLINE TracedNode * AllocateNode()
void SetLocallyFreed(TracedNode::IndexType count)
TracedNode::IndexType first_free_node_
const void * nodes_end_address() const
ListNode young_list_node_
bool InUsableList() const
const TracedNode::IndexType capacity_
const TracedNode * at(TracedNode::IndexType index) const
NodeIteratorImpl Iterator
static constexpr size_t kMaxCapacity
static constexpr size_t kMinCapacity
TracedNode::IndexType used_
static const TracedNode * FromLocation(const Address *location)
IndexType next_free_index_
IndexType next_free() const
bool is_in_young_list() const
Tagged< Object > object() const
void set_is_in_use(bool v)
void set_raw_object(Address value)
std::atomic< bool > is_marked_
bool is_droppable() const
TracedNode(IndexType, IndexType)
Address raw_object() const
V8_INLINE FullObjectSlot Publish(Tagged< Object > object, bool needs_young_bit_update, bool needs_black_allocation, bool has_old_host, bool is_droppable)
static TracedNode * FromLocation(Address *location)
void set_has_old_host(bool v)
bool IsMetadataCleared() const
FullObjectSlot location()
void set_droppable(bool v)
void Release(Address zap_value)
void set_is_in_young_list(bool v)
void set_next_free(IndexType next_free_index)
bool has_old_host() const
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
constexpr int kSystemPointerSize
bool(*)(Heap *heap, FullObjectSlot pointer) WeakSlotCallbackWithHeap
static constexpr Address kNullAddress
#define DCHECK_EQ(v1, v2)
#define V8_EXPORT_PRIVATE
static TracedNodeBlock *** prev(TracedNodeBlock *tnb)
static bool in_use(const TracedNodeBlock *tnb)
static TracedNodeBlock ** next(TracedNodeBlock *tnb)
static bool non_empty(TracedNodeBlock *tnb)
static ListNode & GetListNode(TracedNodeBlock *tnb)
static ListNode & GetListNode(TracedNodeBlock *tnb)
static ListNode & GetListNode(TracedNodeBlock *tnb)
const size_t num_young_blocks_