29    : next_free_index_(next_free_index), 
index_(index) {
 
 
   53                "TracedNodeBlock size is used to auto-align node FAM storage.");
 
   54  const size_t min_wanted_size =
 
   58  const size_t capacity = std::min(
 
   61  CHECK_LT(capacity, std::numeric_limits<TracedNode::IndexType>::max());
 
   62  const auto result = std::make_pair(raw_result.ptr, capacity);
 
 
   93  node->Release(zap_value);
 
   94  DCHECK(!node->is_in_use());
 
 
  101  reinterpret_cast<std::atomic<Address*>*
>(slot)->
store(
 
  102      val, std::memory_order_relaxed);
 
 
  117  DCHECK(!block->InYoungList());
 
 
  127    block.FreeNode(node, zap_value);
 
  134  block.FreeNode(node, zap_value);
 
  135  if (block.IsEmpty()) {
 
  138    if (block.InYoungList()) {
 
  140      DCHECK(!block.InYoungList());
 
 
  152  size_t block_size_bytes = 0;
 
  156    block_size_bytes += block->size_bytes();
 
  160    block_size_bytes += block->size_bytes();
 
  163  USE(block_size_bytes);
 
 
  222    Destroy(to_node_block, *to_node);
 
  227  to_node = &from_node;
 
  234    to_node->set_markbit();
 
  237    const bool object_is_young_and_not_yet_recorded =
 
  240    if (object_is_young_and_not_yet_recorded &&
 
 
  262  for (
const auto* block : 
blocks_) {
 
  263    block_bounds.push_back(
 
  264        {block->nodes_begin_address(), block->nodes_end_address()});
 
  266  std::sort(block_bounds.begin(), block_bounds.end(),
 
  267            [](
const auto& pair1, 
const auto& pair2) {
 
  268              return pair1.first < pair2.first;
 
 
  274  const bool needs_to_mark_as_old =
 
  278    bool contains_young_node = 
false;
 
  280    DCHECK(block->InYoungList());
 
  282    for (
auto* node : *block) {
 
  283      if (!node->is_in_young_list()) 
continue;
 
  284      DCHECK(node->is_in_use());
 
  286        contains_young_node = 
true;
 
  289        if (needs_to_mark_as_old) node->set_has_old_host(
true);
 
  291        node->set_is_in_young_list(
false);
 
  292        node->set_has_old_host(
false);
 
  295    if (contains_young_node) {
 
  299      DCHECK(!block->InYoungList());
 
 
  324    auto* block = *(it++);
 
  325    for (
auto* node : *block) {
 
  326      if (!node->is_in_use()) 
continue;
 
  329      if (!node->markbit()) {
 
  335      node->clear_markbit();
 
  340    if (block->InYoungList()) {
 
  342      DCHECK(!block->InYoungList());
 
 
  353    for (
auto* node : *block) {
 
  354      if (!node->is_in_young_list()) 
continue;
 
  355      DCHECK(node->is_in_use());
 
  358      if (!node->markbit()) {
 
  364      node->clear_markbit();
 
 
  373  if (!
v8_flags.reclaim_unmodified_wrappers) {
 
 
  384template <
typename Derived>
 
  385class ParallelWeakHandlesProcessor {
 
  389    explicit Job(Derived& derived) : 
derived_(derived) {}
 
  395        RunImpl<
true>(delegate);
 
  398                                 Derived::kBackgroundThreadScope,
 
  399                                 ThreadKind::kBackground, 
derived_.trace_id_,
 
  401        RunImpl<
false>(delegate);
 
  405    size_t GetMaxConcurrency(
size_t worker_count)
 const override {
 
  406      const auto processed_young_blocks =
 
  407          derived_.processed_young_blocks_.load(std::memory_order_relaxed);
 
  408      if (
derived_.num_young_blocks_ < processed_young_blocks) {
 
  411      if (!
v8_flags.parallel_reclaim_unmodified_wrappers) {
 
  414      const auto blocks_left =
 
  415          derived_.num_young_blocks_ - processed_young_blocks;
 
  416      constexpr size_t kMaxParallelTasks = 3;
 
  417      constexpr size_t kBlocksPerTask = 8;
 
  418      const auto wanted_tasks =
 
  419          (blocks_left + (kBlocksPerTask - 1)) / kBlocksPerTask;
 
  420      return std::min(kMaxParallelTasks, wanted_tasks);
 
  424    template <
bool IsMainThread>
 
  425    void RunImpl(JobDelegate* delegate) {
 
  429      auto it = 
derived_.young_blocks_.begin();
 
  431      for (
size_t index = 
derived_.processed_young_blocks_.fetch_add(
 
  432               1, std::memory_order_relaxed);
 
  434           index = 
derived_.processed_young_blocks_.fetch_add(
 
  435               +1, std::memory_order_relaxed)) {
 
  436        while (current < index) {
 
  440        TracedNodeBlock* block = *it;
 
  441        DCHECK(block->InYoungList());
 
  442        derived_.template ProcessBlock<IsMainThread>(block);
 
  444        if (delegate->ShouldYield()) {
 
  453  ParallelWeakHandlesProcessor(Heap* 
heap,
 
  454                               TracedNodeBlock::YoungList& young_blocks,
 
  455                               size_t num_young_blocks)
 
  459        trace_id_(reinterpret_cast<uint64_t>(this) ^
 
  460                  heap_->tracer()->CurrentEpoch(
 
  461                      GCTracer::Scope::SCAVENGER_SCAVENGE)) {}
 
  466    V8::GetCurrentPlatform()
 
  468                    std::make_unique<Job>(
static_cast<Derived&
>(*
this)))
 
  473  uint64_t trace_id()
 const { 
return trace_id_; }
 
  483class ComputeWeaknessProcessor final
 
  484    : 
public ParallelWeakHandlesProcessor<ComputeWeaknessProcessor> {
 
  487      GCTracer::Scope::SCAVENGER_TRACED_HANDLES_COMPUTE_WEAKNESS_PARALLEL;
 
  489      SCAVENGER_BACKGROUND_TRACED_HANDLES_COMPUTE_WEAKNESS_PARALLEL;
 
  490  static constexpr char kStartNote[] = 
"ComputeWeaknessProcessor start";
 
  492  ComputeWeaknessProcessor(Heap* 
heap, TracedNodeBlock::YoungList& young_blocks,
 
  493                           size_t num_young_blocks)
 
  494      : ParallelWeakHandlesProcessor(
heap, young_blocks, num_young_blocks) {}
 
  496  template <
bool IsMainThread>
 
  497  void ProcessBlock(TracedNodeBlock* block) {
 
  498    for (TracedNode* node : *block) {
 
  499      if (!node->is_in_young_list()) {
 
  502      DCHECK(node->is_in_use());
 
  504      if (node->is_droppable() &&
 
  505          JSObject::IsUnmodifiedApiObject(node->location())) {
 
  506        node->set_weak(
true);
 
  525class ClearWeaknessProcessor final
 
  526    : 
public ParallelWeakHandlesProcessor<ClearWeaknessProcessor> {
 
  529      GCTracer::Scope::SCAVENGER_TRACED_HANDLES_RESET_PARALLEL;
 
  531      GCTracer::Scope::SCAVENGER_BACKGROUND_TRACED_HANDLES_RESET_PARALLEL;
 
  532  static constexpr char kStartNote[] = 
"ClearWeaknessProcessor start";
 
  535                         size_t num_young_blocks, 
Heap* 
heap,
 
  538      : ParallelWeakHandlesProcessor(
heap, young_blocks, num_young_blocks),
 
  543  template <
bool IsMainThread>
 
  544  void ProcessBlock(TracedNodeBlock* block) {
 
  545    const auto saved_used_nodes_in_block = block->used();
 
  546    for (TracedNode* node : *block) {
 
  547      if (!node->is_weak()) {
 
  550      DCHECK(node->is_in_use());
 
  551      DCHECK(node->is_in_young_list());
 
  555        FullObjectSlot slot = node->location();
 
  556        bool node_cleared = 
true;
 
  557        if constexpr (IsMainThread) {
 
  561          node_cleared = 
handler_->TryResetRoot(
 
  567          DCHECK(!node->is_in_use());
 
  570          block->SetReprocessing(
true);
 
  573        node->set_weak(
false);
 
  575          visitor_->VisitRootPointer(Root::kTracedHandles, 
nullptr,
 
  580    DCHECK_GE(saved_used_nodes_in_block, block->used());
 
  581    block->SetLocallyFreed(saved_used_nodes_in_block - block->used());
 
  601    cpp_heap->EnterDisallowGCScope();
 
  602    cpp_heap->EnterNoGCScope();
 
  606  size_t num_young_blocks = 0;
 
  609    DCHECK(block->InYoungList());
 
  610    DCHECK(!block->NeedsReprocessing());
 
  618                             should_reset_handle);
 
  628    DCHECK(block->InYoungList());
 
  633    if (!block->IsFull() && !block->IsEmpty()) {
 
  637        DCHECK(!block->InUsableList());
 
  639        DCHECK(block->InUsableList());
 
  641    } 
else if (block->IsEmpty()) {
 
  644      DCHECK(!block->NeedsReprocessing());
 
  646        DCHECK(block->InUsableList());
 
  648        DCHECK(!block->InUsableList());
 
  651      DCHECK(block->InYoungList());
 
  653      DCHECK(!block->InYoungList());
 
  663    if (!block->NeedsReprocessing()) {
 
  666    block->SetReprocessing(
false);
 
  667    job.template ProcessBlock<
true>(
block);
 
  668    DCHECK(!block->NeedsReprocessing());
 
  671    const auto locally_freed = block->ConsumeLocallyFreed();
 
  677    cpp_heap->LeaveNoGCScope();
 
  678    cpp_heap->LeaveDisallowGCScope();
 
 
  684    for (
auto* node : *block) {
 
  685      if (!node->is_in_use()) 
continue;
 
 
  695    for (
auto* node : *block) {
 
  696      if (!node->is_in_young_list()) 
continue;
 
  697      DCHECK(node->is_in_use());
 
 
  707    DCHECK(block->InYoungList());
 
  709    for (
auto* node : *block) {
 
  710      if (!node->is_in_young_list()) 
continue;
 
  711      DCHECK(node->is_in_use());
 
  713      if (node->is_weak()) 
continue;
 
 
  724    for (
auto* node : *block) {
 
  725      if (!node->is_in_young_list()) 
continue;
 
  726      DCHECK(node->is_in_use());
 
  727      if (!node->has_old_host()) 
continue;
 
  729      if (node->is_weak()) 
continue;
 
 
  743    for (
auto* node : *block) {
 
  744      if (!node->is_in_young_list()) 
continue;
 
  745      DCHECK(node->is_in_use());
 
  746      if (!node->has_old_host()) 
continue;
 
  748      if (node->is_weak()) 
continue;
 
 
  758  if (!location) 
return;
 
  762  auto& traced_handles = node_block.traced_handles();
 
  763  traced_handles.Destroy(node_block, *node);
 
 
  773  auto& traced_handles = node_block.traced_handles();
 
  774  traced_handles.Copy(*from_node, to);
 
 
  788  auto& traced_handles = node_block.traced_handles();
 
  789  traced_handles.Move(*from_node, from, to);
 
 
  796      !node.is_in_young_list())
 
  813      Tagged<Object>(
reinterpret_cast<std::atomic<Address>*
>(location)->load(
 
  814          std::memory_order_acquire));
 
  816  DCHECK(node->is_in_use());
 
  817  return MarkObject(
object, *node, mark_mode);
 
 
  825  const ptrdiff_t delta = 
reinterpret_cast<uintptr_t
>(inner_location) -
 
  826                          reinterpret_cast<uintptr_t
>(traced_node_block_base);
 
  827  const auto index = delta / 
sizeof(
TracedNode);
 
  829      reinterpret_cast<TracedNode*
>(traced_node_block_base)[index];
 
  830  if (!node.is_in_use()) 
return Smi::zero();
 
  831  return MarkObject(node.object(), node, mark_mode);
 
 
  841  return node->is_in_use();
 
 
virtual bool IsJoiningThread() const =0
bool Contains(T needle) const
iterator RemoveAt(iterator &it)
bool ContainsSlow(T needle) const
static CppHeap * From(v8::CppHeap *heap)
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
v8::CppHeap * cpp_heap() const
EmbedderRootsHandler * GetEmbedderRootsHandler() const
virtual void VisitRootPointer(Root root, const char *description, FullObjectSlot p)
static constexpr Tagged< Smi > zero()
bool SupportsClearingWeakNonLiveWrappers()
V8_INLINE bool IsCppGCHostOld(CppHeap &cpp_heap, Address host) const
void IterateYoung(RootVisitor *)
TracedNodeBlock::YoungList young_blocks_
void ComputeWeaknessForYoungObjects()
V8_NOINLINE V8_PRESERVE_MOST void RefillUsableNodeBlocks()
std::vector< TracedNodeBlock * > empty_blocks_
void Iterate(RootVisitor *)
static void Move(Address **from, Address **to)
static void Destroy(Address *location)
static Tagged< Object > MarkConservatively(Address *inner_location, Address *traced_node_block_base, MarkMode mark_mode)
void ProcessWeakYoungObjects(RootVisitor *v, WeakSlotCallbackWithHeap should_reset_handle)
static bool IsValidInUseNode(const Address *location)
bool disable_block_handling_on_free_
TracedNodeBlock::UsableList usable_blocks_
void FreeNode(TracedNode *node, Address zap_value)
void ResetDeadNodes(WeakSlotCallbackWithHeap should_reset_handle)
const NodeBounds GetNodeBounds() const
std::vector< std::pair< const void *, const void * > > NodeBounds
void ResetYoungDeadNodes(WeakSlotCallbackWithHeap should_reset_handle)
void SetIsSweepingOnMutatorThread(bool)
V8_INLINE CppHeap * GetCppHeapIfUnifiedYoungGC(Isolate *isolate) const
void IterateYoungRootsWithOldHostsForTesting(RootVisitor *)
void IterateAndMarkYoungRootsWithOldHosts(RootVisitor *)
V8_INLINE FullObjectSlot Create(Address value, Address *slot, TracedReferenceStoreMode store_mode, TracedReferenceHandling reference_handling)
bool is_sweeping_on_mutator_thread_
void UpdateListOfYoungNodes()
static void Copy(const Address *const *from, Address **to)
static Tagged< Object > Mark(Address *location, MarkMode mark_mode)
void IterateYoungRoots(RootVisitor *)
TracedNodeBlock::OverallList blocks_
static constexpr TracedNode::IndexType kInvalidFreeListNodeIndex
TracedNode * at(TracedNode::IndexType index)
void FreeNode(TracedNode *node, Address zap_value)
TracedHandles & traced_handles() const
static void Delete(TracedNodeBlock *)
TracedNodeBlock(TracedHandles &, TracedNode::IndexType)
static TracedNodeBlock & From(TracedNode &node)
static TracedNodeBlock * Create(TracedHandles &)
TracedNode::IndexType first_free_node_
const TracedNode::IndexType capacity_
static constexpr size_t kMaxCapacity
static constexpr size_t kMinCapacity
TracedNode::IndexType used_
bool is_in_young_list() const
Tagged< Object > object() const
void set_raw_object(Address value)
bool is_droppable() const
TracedNode(IndexType, IndexType)
Address raw_object() const
static TracedNode * FromLocation(Address *location)
void set_has_old_host(bool v)
bool IsMetadataCleared() const
void Release(Address zap_value)
bool has_old_host() const
static void MarkingFromTracedHandle(Tagged< Object > value)
TracedHandles & traced_handles_
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
ZoneVector< RpoNumber > & result
V8_NODISCARD AllocationResult< T * > AllocateAtLeast(size_t n)
constexpr uint32_t kTracedHandleEagerResetZapValue
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
constexpr uint32_t kTracedHandleMinorGCWeakResetZapValue
constexpr int kSystemPointerSize
constexpr uint32_t kTracedHandleMinorGCResetZapValue
constexpr uint32_t kGlobalHandleZapValue
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint32_t kTracedHandleFullGCResetZapValue
bool(*)(Heap *heap, FullObjectSlot pointer) WeakSlotCallbackWithHeap
void SetSlotThreadSafe(Address **slot, Address *val)
static constexpr Address kNullAddress
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
TracedNodeBlock::YoungList & young_blocks_
static constexpr auto kBackgroundThreadScope
static constexpr char kStartNote[]
const size_t num_young_blocks_
WeakSlotCallbackWithHeap should_reset_handle_
static constexpr auto kMainThreadScope
std::atomic< size_t > processed_young_blocks_
EmbedderRootsHandler * handler_
#define V8_UNLIKELY(condition)