v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
traced-handles.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8
10#include "include/v8-internal.h"
12#include "src/base/logging.h"
14#include "src/common/globals.h"
15#include "src/handles/handles.h"
20#include "src/objects/objects.h"
21#include "src/objects/slots.h"
23
24namespace v8::internal {
25
26class TracedNodeBlock;
27
29 : next_free_index_(next_free_index), index_(index) {
30 // TracedNode size should stay within 2 words.
31 static_assert(sizeof(TracedNode) <= (2 * kSystemPointerSize));
32 DCHECK(!is_in_use());
34 DCHECK(!is_weak());
35 DCHECK(!markbit());
38}
39
42 // Clear all flags.
43 flags_ = 0;
45 set_raw_object(zap_value);
47}
48
49// static
51 static_assert(alignof(TracedNodeBlock) >= alignof(TracedNode));
52 static_assert(sizeof(TracedNodeBlock) % alignof(TracedNode) == 0,
53 "TracedNodeBlock size is used to auto-align node FAM storage.");
54 const size_t min_wanted_size =
55 sizeof(TracedNodeBlock) +
57 const auto raw_result = v8::base::AllocateAtLeast<char>(min_wanted_size);
58 const size_t capacity = std::min(
59 (raw_result.count - sizeof(TracedNodeBlock)) / sizeof(TracedNode),
61 CHECK_LT(capacity, std::numeric_limits<TracedNode::IndexType>::max());
62 const auto result = std::make_pair(raw_result.ptr, capacity);
63 return new (result.first) TracedNodeBlock(
64 traced_handles, static_cast<TracedNode::IndexType>(result.second));
65}
66
67// static
68void TracedNodeBlock::Delete(TracedNodeBlock* block) { free(block); }
69
71 TracedNode::IndexType capacity)
72 : traced_handles_(traced_handles), capacity_(capacity) {
73 for (TracedNode::IndexType i = 0; i < (capacity_ - 1); i++) {
74 new (at(i)) TracedNode(i, i + 1);
75 }
77}
78
79// static
81 TracedNode* first_node = &node - node.index();
82 return *reinterpret_cast<TracedNodeBlock*>(
83 reinterpret_cast<uintptr_t>(first_node) - sizeof(TracedNodeBlock));
84}
85
86// static
88 return From(const_cast<TracedNode&>(node));
89}
90
92 DCHECK(node->is_in_use());
93 node->Release(zap_value);
94 DCHECK(!node->is_in_use());
95 node->set_next_free(first_free_node_);
96 first_free_node_ = node->index();
97 used_--;
98}
99
101 reinterpret_cast<std::atomic<Address*>*>(slot)->store(
102 val, std::memory_order_relaxed);
103}
104
107 if (empty_blocks_.empty()) {
108 block = TracedNodeBlock::Create(*this);
109 block_size_bytes_ += block->size_bytes();
110 } else {
111 block = empty_blocks_.back();
112 empty_blocks_.pop_back();
113 }
115 blocks_.PushFront(block);
116 num_blocks_++;
117 DCHECK(!block->InYoungList());
118 DCHECK(block->IsEmpty());
121}
122
124 auto& block = TracedNodeBlock::From(*node);
126 // The list of blocks and used nodes will be updated separately.
127 block.FreeNode(node, zap_value);
128 return;
129 }
130 if (V8_UNLIKELY(block.IsFull())) {
133 }
134 block.FreeNode(node, zap_value);
135 if (block.IsEmpty()) {
136 usable_blocks_.Remove(&block);
137 blocks_.Remove(&block);
138 if (block.InYoungList()) {
139 young_blocks_.Remove(&block);
140 DCHECK(!block.InYoungList());
142 }
143 num_blocks_--;
144 empty_blocks_.push_back(&block);
145 }
146 used_nodes_--;
147}
148
150
152 size_t block_size_bytes = 0;
153 while (!blocks_.empty()) {
154 auto* block = blocks_.Front();
156 block_size_bytes += block->size_bytes();
158 }
159 for (auto* block : empty_blocks_) {
160 block_size_bytes += block->size_bytes();
162 }
163 USE(block_size_bytes);
164 DCHECK_EQ(block_size_bytes, block_size_bytes_);
165}
166
170
171 // If sweeping on the mutator thread is running then the handle destruction
172 // may be a result of a Reset() call from a destructor. The node will be
173 // reclaimed on the next cycle.
174 //
175 // This allows v8::TracedReference::Reset() calls from destructors on
176 // objects that may be used from stack and heap.
178 return;
179 }
180
181 if (is_marking_) {
182 // Incremental/concurrent marking is running.
183 //
184 // On-heap traced nodes are released in the atomic pause in
185 // `ResetDeadNodes()` when they are discovered as not marked. Eagerly clear
186 // out the object here to avoid needlessly marking it from this point on.
187 // The node will be reclaimed on the next cycle.
188 node.set_raw_object<AccessMode::ATOMIC>(kNullAddress);
189 return;
190 }
191
192 // In case marking and sweeping are off, the handle may be freed immediately.
193 // Note that this includes also the case when invoking the first pass
194 // callbacks during the atomic pause which requires releasing a node fully.
196}
197
198void TracedHandles::Copy(const TracedNode& from_node, Address** to) {
201 Create(from_node.raw_object(), reinterpret_cast<Address*>(to),
205#ifdef VERIFY_HEAP
206 if (v8_flags.verify_heap) {
207 Object::ObjectVerify(Tagged<Object>(**to), isolate_);
208 }
209#endif // VERIFY_HEAP
210}
211
212void TracedHandles::Move(TracedNode& from_node, Address** from, Address** to) {
213 DCHECK(from_node.is_in_use());
214
215 // Deal with old "to".
216 auto* to_node = TracedNode::FromLocation(*to);
217 DCHECK_IMPLIES(*to, to_node->is_in_use());
218 DCHECK_IMPLIES(*to, kGlobalHandleZapValue != to_node->raw_object());
220 if (*to) {
221 auto& to_node_block = TracedNodeBlock::From(*to_node);
222 Destroy(to_node_block, *to_node);
223 }
224
225 // Set "to" to "from".
226 SetSlotThreadSafe(to, *from);
227 to_node = &from_node;
228
229 // Deal with new "to"
230 DCHECK_NOT_NULL(*to);
231 DCHECK_EQ(*from, *to);
232 if (is_marking_) {
233 // Write barrier needs to cover node as well as object.
234 to_node->set_markbit();
235 WriteBarrier::MarkingFromTracedHandle(to_node->object());
236 } else if (auto* cpp_heap = GetCppHeapIfUnifiedYoungGC(isolate_)) {
237 const bool object_is_young_and_not_yet_recorded =
238 !from_node.has_old_host() &&
240 if (object_is_young_and_not_yet_recorded &&
241 IsCppGCHostOld(*cpp_heap, reinterpret_cast<Address>(to))) {
242 DCHECK(from_node.is_in_young_list());
243 from_node.set_has_old_host(true);
244 }
245 }
246 SetSlotThreadSafe(from, nullptr);
247}
248
250 DCHECK_EQ(is_marking_, !value);
252}
253
258
260 TracedHandles::NodeBounds block_bounds;
261 block_bounds.reserve(num_blocks_);
262 for (const auto* block : blocks_) {
263 block_bounds.push_back(
264 {block->nodes_begin_address(), block->nodes_end_address()});
265 }
266 std::sort(block_bounds.begin(), block_bounds.end(),
267 [](const auto& pair1, const auto& pair2) {
268 return pair1.first < pair2.first;
269 });
270 return block_bounds;
271}
272
274 const bool needs_to_mark_as_old =
275 static_cast<bool>(GetCppHeapIfUnifiedYoungGC(isolate_));
276
277 for (auto it = young_blocks_.begin(); it != young_blocks_.end();) {
278 bool contains_young_node = false;
279 TracedNodeBlock* const block = *it;
280 DCHECK(block->InYoungList());
281
282 for (auto* node : *block) {
283 if (!node->is_in_young_list()) continue;
284 DCHECK(node->is_in_use());
285 if (HeapLayout::InYoungGeneration(node->object())) {
286 contains_young_node = true;
287 // The node was discovered through a cppgc object, which will be
288 // immediately promoted. Remember the object.
289 if (needs_to_mark_as_old) node->set_has_old_host(true);
290 } else {
291 node->set_is_in_young_list(false);
292 node->set_has_old_host(false);
293 }
294 }
295 if (contains_young_node) {
296 ++it;
297 } else {
298 it = young_blocks_.RemoveAt(it);
299 DCHECK(!block->InYoungList());
301 }
302 }
303}
304
306 // Keep one node block around for fast allocation/deallocation patterns.
307 if (empty_blocks_.size() <= 1) return;
308
309 for (size_t i = 1; i < empty_blocks_.size(); i++) {
310 auto* block = empty_blocks_[i];
311 DCHECK(block->IsEmpty());
312 DCHECK_GE(block_size_bytes_, block->size_bytes());
313 block_size_bytes_ -= block->size_bytes();
315 }
316 empty_blocks_.resize(1);
317 empty_blocks_.shrink_to_fit();
318}
319
321 WeakSlotCallbackWithHeap should_reset_handle) {
322 // Manual iteration as the block may be deleted in `FreeNode()`.
323 for (auto it = blocks_.begin(); it != blocks_.end();) {
324 auto* block = *(it++);
325 for (auto* node : *block) {
326 if (!node->is_in_use()) continue;
327
328 // Detect unreachable nodes first.
329 if (!node->markbit()) {
331 continue;
332 }
333
334 // Node was reachable. Clear the markbit for the next GC.
335 node->clear_markbit();
336 // TODO(v8:13141): Turn into a DCHECK after some time.
337 CHECK(!should_reset_handle(isolate_->heap(), node->location()));
338 }
339
340 if (block->InYoungList()) {
341 young_blocks_.Remove(block);
342 DCHECK(!block->InYoungList());
344 }
345 }
346
348}
349
351 WeakSlotCallbackWithHeap should_reset_handle) {
352 for (auto* block : young_blocks_) {
353 for (auto* node : *block) {
354 if (!node->is_in_young_list()) continue;
355 DCHECK(node->is_in_use());
356 DCHECK_IMPLIES(node->has_old_host(), node->markbit());
357
358 if (!node->markbit()) {
360 continue;
361 }
362
363 // Node was reachable. Clear the markbit for the next GC.
364 node->clear_markbit();
365 // TODO(v8:13141): Turn into a DCHECK after some time.
366 CHECK(!should_reset_handle(isolate_->heap(), node->location()));
367 }
368 }
369}
370
373 if (!v8_flags.reclaim_unmodified_wrappers) {
374 return false;
375 }
377 return false;
378 }
379 return true;
380}
381
382namespace {
383
384template <typename Derived>
385class ParallelWeakHandlesProcessor {
386 public:
387 class Job : public v8::JobTask {
388 public:
389 explicit Job(Derived& derived) : derived_(derived) {}
390
391 void Run(JobDelegate* delegate) override {
392 if (delegate->IsJoiningThread()) {
393 TRACE_GC_WITH_FLOW(derived_.heap()->tracer(), Derived::kMainThreadScope,
395 RunImpl</*IsMainThread=*/true>(delegate);
396 } else {
397 TRACE_GC_EPOCH_WITH_FLOW(derived_.heap()->tracer(),
398 Derived::kBackgroundThreadScope,
399 ThreadKind::kBackground, derived_.trace_id_,
401 RunImpl</*IsMainThread=*/false>(delegate);
402 }
403 }
404
405 size_t GetMaxConcurrency(size_t worker_count) const override {
406 const auto processed_young_blocks =
407 derived_.processed_young_blocks_.load(std::memory_order_relaxed);
408 if (derived_.num_young_blocks_ < processed_young_blocks) {
409 return 0;
410 }
411 if (!v8_flags.parallel_reclaim_unmodified_wrappers) {
412 return 1;
413 }
414 const auto blocks_left =
415 derived_.num_young_blocks_ - processed_young_blocks;
416 constexpr size_t kMaxParallelTasks = 3;
417 constexpr size_t kBlocksPerTask = 8;
418 const auto wanted_tasks =
419 (blocks_left + (kBlocksPerTask - 1)) / kBlocksPerTask;
420 return std::min(kMaxParallelTasks, wanted_tasks);
421 }
422
423 private:
424 template <bool IsMainThread>
425 void RunImpl(JobDelegate* delegate) {
426 // The following logic parallelizes the handling of the doubly-linked
427 // list. We basically race through the list from begin() with acquiring
428 // exclusive access by incrementing a single counter.
429 auto it = derived_.young_blocks_.begin();
430 size_t current = 0;
431 for (size_t index = derived_.processed_young_blocks_.fetch_add(
432 1, std::memory_order_relaxed);
433 index < derived_.num_young_blocks_;
434 index = derived_.processed_young_blocks_.fetch_add(
435 +1, std::memory_order_relaxed)) {
436 while (current < index) {
437 it++;
438 current++;
439 }
440 TracedNodeBlock* block = *it;
441 DCHECK(block->InYoungList());
442 derived_.template ProcessBlock<IsMainThread>(block);
443 // TracedNodeBlock is the minimum granularity of processing.
444 if (delegate->ShouldYield()) {
445 return;
446 }
447 }
448 }
449
450 Derived& derived_;
451 };
452
453 ParallelWeakHandlesProcessor(Heap* heap,
454 TracedNodeBlock::YoungList& young_blocks,
455 size_t num_young_blocks)
456 : heap_(heap),
457 young_blocks_(young_blocks),
458 num_young_blocks_(num_young_blocks),
459 trace_id_(reinterpret_cast<uint64_t>(this) ^
460 heap_->tracer()->CurrentEpoch(
461 GCTracer::Scope::SCAVENGER_SCAVENGE)) {}
462
463 void Run() {
464 TRACE_GC_NOTE_WITH_FLOW(Derived::kStartNote, trace_id(),
466 V8::GetCurrentPlatform()
468 std::make_unique<Job>(static_cast<Derived&>(*this)))
469 ->Join();
470 }
471
472 Heap* heap() const { return heap_; }
473 uint64_t trace_id() const { return trace_id_; }
474
475 private:
476 Heap* heap_;
477 TracedNodeBlock::YoungList& young_blocks_;
478 const size_t num_young_blocks_;
479 const uint64_t trace_id_;
480 std::atomic<size_t> processed_young_blocks_{0};
481};
482
483class ComputeWeaknessProcessor final
484 : public ParallelWeakHandlesProcessor<ComputeWeaknessProcessor> {
485 public:
486 static constexpr auto kMainThreadScope =
487 GCTracer::Scope::SCAVENGER_TRACED_HANDLES_COMPUTE_WEAKNESS_PARALLEL;
488 static constexpr auto kBackgroundThreadScope = GCTracer::Scope::
489 SCAVENGER_BACKGROUND_TRACED_HANDLES_COMPUTE_WEAKNESS_PARALLEL;
490 static constexpr char kStartNote[] = "ComputeWeaknessProcessor start";
491
492 ComputeWeaknessProcessor(Heap* heap, TracedNodeBlock::YoungList& young_blocks,
493 size_t num_young_blocks)
494 : ParallelWeakHandlesProcessor(heap, young_blocks, num_young_blocks) {}
495
496 template <bool IsMainThread>
497 void ProcessBlock(TracedNodeBlock* block) {
498 for (TracedNode* node : *block) {
499 if (!node->is_in_young_list()) {
500 continue;
501 }
502 DCHECK(node->is_in_use());
503 DCHECK(!node->is_weak());
504 if (node->is_droppable() &&
505 JSObject::IsUnmodifiedApiObject(node->location())) {
506 node->set_weak(true);
507 }
508 }
509 }
510};
511
512} // namespace
513
516 return;
517 }
518 ComputeWeaknessProcessor job(isolate_->heap(), young_blocks_,
520 job.Run();
521}
522
523namespace {
524
525class ClearWeaknessProcessor final
526 : public ParallelWeakHandlesProcessor<ClearWeaknessProcessor> {
527 public:
528 static constexpr auto kMainThreadScope =
529 GCTracer::Scope::SCAVENGER_TRACED_HANDLES_RESET_PARALLEL;
530 static constexpr auto kBackgroundThreadScope =
531 GCTracer::Scope::SCAVENGER_BACKGROUND_TRACED_HANDLES_RESET_PARALLEL;
532 static constexpr char kStartNote[] = "ClearWeaknessProcessor start";
533
534 ClearWeaknessProcessor(TracedNodeBlock::YoungList& young_blocks,
535 size_t num_young_blocks, Heap* heap,
536 RootVisitor* visitor,
537 WeakSlotCallbackWithHeap should_reset_handle)
538 : ParallelWeakHandlesProcessor(heap, young_blocks, num_young_blocks),
539 visitor_(visitor),
540 handler_(heap->GetEmbedderRootsHandler()),
541 should_reset_handle_(should_reset_handle) {}
542
543 template <bool IsMainThread>
544 void ProcessBlock(TracedNodeBlock* block) {
545 const auto saved_used_nodes_in_block = block->used();
546 for (TracedNode* node : *block) {
547 if (!node->is_weak()) {
548 continue;
549 }
550 DCHECK(node->is_in_use());
551 DCHECK(node->is_in_young_list());
552
553 const bool should_reset = should_reset_handle_(heap(), node->location());
554 if (should_reset) {
555 FullObjectSlot slot = node->location();
556 bool node_cleared = true;
557 if constexpr (IsMainThread) {
558 handler_->ResetRoot(
559 *reinterpret_cast<v8::TracedReference<v8::Value>*>(&slot));
560 } else {
561 node_cleared = handler_->TryResetRoot(
562 *reinterpret_cast<v8::TracedReference<v8::Value>*>(&slot));
563 }
564 if (node_cleared) {
565 // Mark as cleared due to weak semantics.
566 node->set_raw_object(kTracedHandleMinorGCWeakResetZapValue);
567 DCHECK(!node->is_in_use());
568 DCHECK(!node->is_weak());
569 } else {
570 block->SetReprocessing(true);
571 }
572 } else {
573 node->set_weak(false);
574 if (visitor_) {
575 visitor_->VisitRootPointer(Root::kTracedHandles, nullptr,
576 node->location());
577 }
578 }
579 }
580 DCHECK_GE(saved_used_nodes_in_block, block->used());
581 block->SetLocallyFreed(saved_used_nodes_in_block - block->used());
582 }
583
584 private:
585 RootVisitor* visitor_;
586 EmbedderRootsHandler* handler_;
588};
589
590} // namespace
591
593 RootVisitor* visitor, WeakSlotCallbackWithHeap should_reset_handle) {
595 return;
596 }
597
598 auto* heap = isolate_->heap();
599 // ResetRoot() below should not trigger allocations in CppGC.
600 if (auto* cpp_heap = CppHeap::From(heap->cpp_heap())) {
601 cpp_heap->EnterDisallowGCScope();
602 cpp_heap->EnterNoGCScope();
603 }
604
605#ifdef DEBUG
606 size_t num_young_blocks = 0;
607 for (auto it = young_blocks_.begin(); it != young_blocks_.end(); it++) {
608 TracedNodeBlock* block = *it;
609 DCHECK(block->InYoungList());
610 DCHECK(!block->NeedsReprocessing());
611 num_young_blocks++;
612 }
613 DCHECK_EQ(num_young_blocks_, num_young_blocks);
614#endif
615
617 ClearWeaknessProcessor job(young_blocks_, num_young_blocks_, heap, visitor,
618 should_reset_handle);
619 job.Run();
621
622 // Post processing on block level.
623 for (auto it = young_blocks_.begin(); it != young_blocks_.end();) {
624 TracedNodeBlock* block = *it;
625 // Avoid iterator invalidation by incrementing iterator here before a block
626 // is possible removed below.
627 it++;
628 DCHECK(block->InYoungList());
629
630 // Freeing a node will not make the block fuller, so IsFull() should mean
631 // that the block was already not usable before freeing.
632 CHECK_IMPLIES(block->IsFull(), !usable_blocks_.Contains(block));
633 if (!block->IsFull() && !block->IsEmpty()) {
634 // A block is usable but may have been full before. Check if we need to
635 // add it to the usable blocks.
636 if (!usable_blocks_.Contains(block)) {
637 DCHECK(!block->InUsableList());
639 DCHECK(block->InUsableList());
640 }
641 } else if (block->IsEmpty()) {
642 // A non-empty block got empty during freeing. The block must not require
643 // reprocessing which would mean that at least one node was not yet freed.
644 DCHECK(!block->NeedsReprocessing());
645 if (usable_blocks_.Contains(block)) {
646 DCHECK(block->InUsableList());
647 usable_blocks_.Remove(block);
648 DCHECK(!block->InUsableList());
649 }
650 blocks_.Remove(block);
651 DCHECK(block->InYoungList());
652 young_blocks_.Remove(block);
653 DCHECK(!block->InYoungList());
655 empty_blocks_.push_back(block);
656 num_blocks_--;
657 }
658
659 used_nodes_ -= block->ConsumeLocallyFreed();
660
661 // Handle reprocessing of blocks because `TryReset()` was not able to reset
662 // a node concurrently.
663 if (!block->NeedsReprocessing()) {
664 continue;
665 }
666 block->SetReprocessing(false);
667 job.template ProcessBlock</*IsMainThread=*/true>(block);
668 DCHECK(!block->NeedsReprocessing());
669 // The nodes are fully freed and accounted but still reported as locally
670 // freed as we reuse the processor.
671 const auto locally_freed = block->ConsumeLocallyFreed();
672 (void)locally_freed;
673 DCHECK_GT(locally_freed, 0);
674 }
675
676 if (auto* cpp_heap = CppHeap::From(isolate_->heap()->cpp_heap())) {
677 cpp_heap->LeaveNoGCScope();
678 cpp_heap->LeaveDisallowGCScope();
679 }
680}
681
683 for (auto* block : blocks_) {
684 for (auto* node : *block) {
685 if (!node->is_in_use()) continue;
686
687 visitor->VisitRootPointer(Root::kTracedHandles, nullptr,
688 node->location());
689 }
690 }
691}
692
694 for (auto* block : young_blocks_) {
695 for (auto* node : *block) {
696 if (!node->is_in_young_list()) continue;
697 DCHECK(node->is_in_use());
698 visitor->VisitRootPointer(Root::kTracedHandles, nullptr,
699 node->location());
700 }
701 }
702}
703
706 for (auto* block : young_blocks_) {
707 DCHECK(block->InYoungList());
708
709 for (auto* node : *block) {
710 if (!node->is_in_young_list()) continue;
711 DCHECK(node->is_in_use());
712
713 if (node->is_weak()) continue;
714
715 visitor->VisitRootPointer(Root::kTracedHandles, nullptr,
716 node->location());
717 }
718 }
719}
720
723 for (auto* block : young_blocks_) {
724 for (auto* node : *block) {
725 if (!node->is_in_young_list()) continue;
726 DCHECK(node->is_in_use());
727 if (!node->has_old_host()) continue;
728
729 if (node->is_weak()) continue;
730
731 node->set_markbit();
732 CHECK(HeapLayout::InYoungGeneration(node->object()));
733 visitor->VisitRootPointer(Root::kTracedHandles, nullptr,
734 node->location());
735 }
736 }
737}
738
740 RootVisitor* visitor) {
742 for (auto* block : young_blocks_) {
743 for (auto* node : *block) {
744 if (!node->is_in_young_list()) continue;
745 DCHECK(node->is_in_use());
746 if (!node->has_old_host()) continue;
747
748 if (node->is_weak()) continue;
749
750 visitor->VisitRootPointer(Root::kTracedHandles, nullptr,
751 node->location());
752 }
753 }
754}
755
756// static
758 if (!location) return;
759
760 auto* node = TracedNode::FromLocation(location);
761 auto& node_block = TracedNodeBlock::From(*node);
762 auto& traced_handles = node_block.traced_handles();
763 traced_handles.Destroy(node_block, *node);
764}
765
766// static
767void TracedHandles::Copy(const Address* const* from, Address** to) {
768 DCHECK_NOT_NULL(*from);
769 DCHECK_NULL(*to);
770
771 const TracedNode* from_node = TracedNode::FromLocation(*from);
772 const auto& node_block = TracedNodeBlock::From(*from_node);
773 auto& traced_handles = node_block.traced_handles();
774 traced_handles.Copy(*from_node, to);
775}
776
777// static
779 // Fast path for moving from an empty reference.
780 if (!*from) {
781 Destroy(*to);
782 SetSlotThreadSafe(to, nullptr);
783 return;
784 }
785
786 TracedNode* from_node = TracedNode::FromLocation(*from);
787 auto& node_block = TracedNodeBlock::From(*from_node);
788 auto& traced_handles = node_block.traced_handles();
789 traced_handles.Move(*from_node, from, to);
790}
791
792namespace {
793Tagged<Object> MarkObject(Tagged<Object> obj, TracedNode& node,
794 TracedHandles::MarkMode mark_mode) {
795 if (mark_mode == TracedHandles::MarkMode::kOnlyYoung &&
796 !node.is_in_young_list())
797 return Smi::zero();
798 node.set_markbit();
799 // Being in the young list, the node may still point to an old object, in
800 // which case we want to keep the node marked, but not follow the reference.
801 if (mark_mode == TracedHandles::MarkMode::kOnlyYoung &&
803 return Smi::zero();
804 return obj;
805}
806} // namespace
807
808// static
810 // The load synchronizes internal bitfields that are also read atomically
811 // from the concurrent marker. The counterpart is `TracedNode::Publish()`.
812 Tagged<Object> object =
813 Tagged<Object>(reinterpret_cast<std::atomic<Address>*>(location)->load(
814 std::memory_order_acquire));
815 auto* node = TracedNode::FromLocation(location);
816 DCHECK(node->is_in_use());
817 return MarkObject(object, *node, mark_mode);
818}
819
820// static
822 Address* inner_location, Address* traced_node_block_base,
823 MarkMode mark_mode) {
824 // Compute the `TracedNode` address based on its inner pointer.
825 const ptrdiff_t delta = reinterpret_cast<uintptr_t>(inner_location) -
826 reinterpret_cast<uintptr_t>(traced_node_block_base);
827 const auto index = delta / sizeof(TracedNode);
828 TracedNode& node =
829 reinterpret_cast<TracedNode*>(traced_node_block_base)[index];
830 if (!node.is_in_use()) return Smi::zero();
831 return MarkObject(node.object(), node, mark_mode);
832}
833
835 const TracedNode* node = TracedNode::FromLocation(location);
836 // This method is called after mark bits have been cleared.
837 DCHECK(!node->markbit());
838 CHECK_IMPLIES(node->is_in_use(), node->raw_object() != kGlobalHandleZapValue);
839 CHECK_IMPLIES(!node->is_in_use(),
840 node->raw_object() == kGlobalHandleZapValue);
841 return node->is_in_use();
842}
843
844bool TracedHandles::HasYoung() const { return !young_blocks_.empty(); }
845
846} // namespace v8::internal
Isolate * isolate_
virtual bool IsJoiningThread() const =0
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
EmbedderRootsHandler * GetEmbedderRootsHandler() const
Definition heap.cc:6032
virtual void VisitRootPointer(Root root, const char *description, FullObjectSlot p)
Definition visitors.h:75
TData * location() const
Definition slots.h:80
static constexpr Tagged< Smi > zero()
Definition smi.h:99
V8_INLINE bool IsCppGCHostOld(CppHeap &cpp_heap, Address host) const
void IterateYoung(RootVisitor *)
TracedNodeBlock::YoungList young_blocks_
V8_NOINLINE V8_PRESERVE_MOST void RefillUsableNodeBlocks()
std::vector< TracedNodeBlock * > empty_blocks_
void Iterate(RootVisitor *)
static void Move(Address **from, Address **to)
static void Destroy(Address *location)
static Tagged< Object > MarkConservatively(Address *inner_location, Address *traced_node_block_base, MarkMode mark_mode)
void ProcessWeakYoungObjects(RootVisitor *v, WeakSlotCallbackWithHeap should_reset_handle)
static bool IsValidInUseNode(const Address *location)
TracedNodeBlock::UsableList usable_blocks_
void FreeNode(TracedNode *node, Address zap_value)
void ResetDeadNodes(WeakSlotCallbackWithHeap should_reset_handle)
const NodeBounds GetNodeBounds() const
std::vector< std::pair< const void *, const void * > > NodeBounds
void ResetYoungDeadNodes(WeakSlotCallbackWithHeap should_reset_handle)
V8_INLINE CppHeap * GetCppHeapIfUnifiedYoungGC(Isolate *isolate) const
void IterateYoungRootsWithOldHostsForTesting(RootVisitor *)
void IterateAndMarkYoungRootsWithOldHosts(RootVisitor *)
V8_INLINE FullObjectSlot Create(Address value, Address *slot, TracedReferenceStoreMode store_mode, TracedReferenceHandling reference_handling)
static void Copy(const Address *const *from, Address **to)
static Tagged< Object > Mark(Address *location, MarkMode mark_mode)
void IterateYoungRoots(RootVisitor *)
TracedNodeBlock::OverallList blocks_
static constexpr TracedNode::IndexType kInvalidFreeListNodeIndex
TracedNode * at(TracedNode::IndexType index)
void FreeNode(TracedNode *node, Address zap_value)
TracedHandles & traced_handles() const
static void Delete(TracedNodeBlock *)
TracedNodeBlock(TracedHandles &, TracedNode::IndexType)
static TracedNodeBlock & From(TracedNode &node)
static TracedNodeBlock * Create(TracedHandles &)
TracedNode::IndexType first_free_node_
const TracedNode::IndexType capacity_
static constexpr size_t kMaxCapacity
static constexpr size_t kMinCapacity
TracedNode::IndexType used_
IndexType index() const
Tagged< Object > object() const
void set_raw_object(Address value)
TracedNode(IndexType, IndexType)
Address raw_object() const
static TracedNode * FromLocation(Address *location)
void Release(Address zap_value)
static void MarkingFromTracedHandle(Tagged< Object > value)
Register const index_
TracedHandles & traced_handles_
Definition cpp-heap.cc:543
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
Definition gc-tracer.h:98
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
Definition gc-tracer.h:52
RpoNumber block
ZoneVector< RpoNumber > & result
V8_NODISCARD AllocationResult< T * > AllocateAtLeast(size_t n)
Definition memory.h:144
constexpr uint32_t kTracedHandleEagerResetZapValue
Definition globals.h:1009
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
constexpr uint32_t kTracedHandleMinorGCWeakResetZapValue
Definition globals.h:1011
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr uint32_t kTracedHandleMinorGCResetZapValue
Definition globals.h:1010
constexpr uint32_t kGlobalHandleZapValue
Definition globals.h:1007
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint32_t kTracedHandleFullGCResetZapValue
Definition globals.h:1012
bool(*)(Heap *heap, FullObjectSlot pointer) WeakSlotCallbackWithHeap
Definition globals.h:1278
void SetSlotThreadSafe(Address **slot, Address *val)
return value
Definition map-inl.h:893
static constexpr Address kNullAddress
Definition v8-internal.h:53
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
const uint64_t trace_id_
RootVisitor * visitor_
TracedNodeBlock::YoungList & young_blocks_
static constexpr auto kBackgroundThreadScope
Heap * heap_
static constexpr char kStartNote[]
const size_t num_young_blocks_
Derived & derived_
WeakSlotCallbackWithHeap should_reset_handle_
static constexpr auto kMainThreadScope
std::atomic< size_t > processed_young_blocks_
EmbedderRootsHandler * handler_
#define V8_UNLIKELY(condition)
Definition v8config.h:660