v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
mark-compact.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <iterator>
10#include <memory>
11#include <optional>
12
13#include "src/base/bits.h"
14#include "src/base/logging.h"
20#include "src/common/globals.h"
27#include "src/flags/flags.h"
36#include "src/heap/gc-tracer.h"
40#include "src/heap/heap.h"
51#include "src/heap/marking.h"
59#include "src/heap/new-spaces.h"
69#include "src/heap/safepoint.h"
70#include "src/heap/slot-set.h"
71#include "src/heap/spaces-inl.h"
72#include "src/heap/sweeper.h"
75#include "src/heap/zapping.h"
76#include "src/init/v8.h"
79#include "src/objects/foreign.h"
87#include "src/objects/objects.h"
89#include "src/objects/smi.h"
97#include "src/utils/utils-inl.h"
98
99#ifdef V8_ENABLE_WEBASSEMBLY
101#endif
102
103namespace v8 {
104namespace internal {
105
106// =============================================================================
107// Verifiers
108// =============================================================================
109
110#ifdef VERIFY_HEAP
111namespace {
112
113class FullMarkingVerifier : public MarkingVerifierBase {
114 public:
115 explicit FullMarkingVerifier(Heap* heap)
116 : MarkingVerifierBase(heap),
117 marking_state_(heap->non_atomic_marking_state()) {}
118
119 void Run() override {
120 VerifyRoots();
121 VerifyMarking(heap_->new_space());
122 VerifyMarking(heap_->new_lo_space());
123 VerifyMarking(heap_->old_space());
124 VerifyMarking(heap_->code_space());
125 if (heap_->shared_space()) VerifyMarking(heap_->shared_space());
126 VerifyMarking(heap_->lo_space());
127 VerifyMarking(heap_->code_lo_space());
128 if (heap_->shared_lo_space()) VerifyMarking(heap_->shared_lo_space());
129 VerifyMarking(heap_->trusted_space());
130 VerifyMarking(heap_->trusted_lo_space());
131 }
132
133 protected:
134 const MarkingBitmap* bitmap(const MutablePageMetadata* chunk) override {
135 return chunk->marking_bitmap();
136 }
137
138 bool IsMarked(Tagged<HeapObject> object) override {
139 return marking_state_->IsMarked(object);
140 }
141
142 void VerifyMap(Tagged<Map> map) override { VerifyHeapObjectImpl(map); }
143
144 void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
145 VerifyPointersImpl(start, end);
146 }
147
148 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
149 VerifyPointersImpl(start, end);
150 }
151
152 void VerifyCodePointer(InstructionStreamSlot slot) override {
153 Tagged<Object> maybe_code = slot.load(code_cage_base());
154 Tagged<HeapObject> code;
155 // The slot might contain smi during Code creation, so skip it.
156 if (maybe_code.GetHeapObject(&code)) {
157 VerifyHeapObjectImpl(code);
158 }
159 }
160
161 void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
162 VerifyPointersImpl(start, end);
163 }
164
165 void VisitCodeTarget(Tagged<InstructionStream> host,
166 RelocInfo* rinfo) override {
167 Tagged<InstructionStream> target =
168 InstructionStream::FromTargetAddress(rinfo->target_address());
169 VerifyHeapObjectImpl(target);
170 }
171
172 void VisitEmbeddedPointer(Tagged<InstructionStream> host,
173 RelocInfo* rinfo) override {
174 CHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
175 Tagged<HeapObject> target_object = rinfo->target_object(cage_base());
176 Tagged<Code> code = UncheckedCast<Code>(host->raw_code(kAcquireLoad));
177 if (!code->IsWeakObject(target_object)) {
178 VerifyHeapObjectImpl(target_object);
179 }
180 }
181
182 private:
183 V8_INLINE void VerifyHeapObjectImpl(Tagged<HeapObject> heap_object) {
184 if (!ShouldVerifyObject(heap_object)) return;
185
186 if (heap_->MustBeInSharedOldSpace(heap_object)) {
187 CHECK(heap_->SharedHeapContains(heap_object));
188 }
189
190 CHECK(HeapLayout::InReadOnlySpace(heap_object) ||
191 (v8_flags.black_allocated_pages &&
192 HeapLayout::InBlackAllocatedPage(heap_object)) ||
193 marking_state_->IsMarked(heap_object));
194 }
195
196 V8_INLINE bool ShouldVerifyObject(Tagged<HeapObject> heap_object) {
197 const bool in_shared_heap = HeapLayout::InWritableSharedSpace(heap_object);
198 return heap_->isolate()->is_shared_space_isolate() ? true : !in_shared_heap;
199 }
200
201 template <typename TSlot>
202 V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
203 PtrComprCageBase cage_base =
205 for (TSlot slot = start; slot < end; ++slot) {
206 typename TSlot::TObject object = slot.load(cage_base);
207#ifdef V8_ENABLE_DIRECT_HANDLE
208 if (object.ptr() == kTaggedNullAddress) continue;
209#endif
210 Tagged<HeapObject> heap_object;
211 if (object.GetHeapObjectIfStrong(&heap_object)) {
212 VerifyHeapObjectImpl(heap_object);
213 }
214 }
215 }
216
217 NonAtomicMarkingState* const marking_state_;
218};
219
220} // namespace
221#endif // VERIFY_HEAP
222
223// ==================================================================
224// MarkCompactCollector
225// ==================================================================
226
227namespace {
228
229int NumberOfAvailableCores() {
230 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
231 // This number of cores should be greater than zero and never change.
232 DCHECK_GE(num_cores, 1);
233 DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
234 return num_cores;
235}
236
237int NumberOfParallelCompactionTasks(Heap* heap) {
238 int tasks = v8_flags.parallel_compaction ? NumberOfAvailableCores() : 1;
239 if (!heap->CanPromoteYoungAndExpandOldGeneration(
240 static_cast<size_t>(tasks * PageMetadata::kPageSize))) {
241 // Optimize for memory usage near the heap limit.
242 tasks = 1;
243 }
244 return tasks;
245}
246
247} // namespace
248
249// This visitor is used for marking on the main thread. It is cheaper than
250// the concurrent marking visitor because it does not snapshot JSObjects.
252 : public FullMarkingVisitorBase<MainMarkingVisitor> {
253 public:
255 WeakObjects::Local* local_weak_objects, Heap* heap,
256 unsigned mark_compact_epoch,
257 base::EnumSet<CodeFlushMode> code_flush_mode,
258 bool should_keep_ages_unchanged,
259 uint16_t code_flushing_increase)
261 local_marking_worklists, local_weak_objects, heap,
262 mark_compact_epoch, code_flush_mode, should_keep_ages_unchanged,
263 code_flushing_increase) {}
264
265 private:
266 // Functions required by MarkingVisitorBase.
267
268 template <typename TSlot>
269 void RecordSlot(Tagged<HeapObject> object, TSlot slot,
270 Tagged<HeapObject> target) {
271 MarkCompactCollector::RecordSlot(object, slot, target);
272 }
273
275 Tagged<HeapObject> target) {
276 MarkCompactCollector::RecordRelocSlot(host, rinfo, target);
277 }
278
280};
281
283 : heap_(heap),
284#ifdef DEBUG
285 state_(IDLE),
286#endif
287 uses_shared_heap_(heap_->isolate()->has_shared_space()),
288 is_shared_space_isolate_(heap_->isolate()->is_shared_space_isolate()),
289 marking_state_(heap_->marking_state()),
290 non_atomic_marking_state_(heap_->non_atomic_marking_state()),
291 sweeper_(heap_->sweeper()) {
292}
293
295
298 local_marking_worklists_->Publish();
300 // Marking barriers of LocalHeaps will be published in their destructors.
303 weak_objects()->Clear();
304 }
305}
306
308 DCHECK(!p->Chunk()->NeverEvacuate());
310
311 if (v8_flags.trace_evacuation_candidates) {
313 heap_->isolate(),
314 "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
316 }
317
319 evacuation_candidates_.push_back(p);
320}
321
322static void TraceFragmentation(PagedSpace* space) {
323 int number_of_pages = space->CountTotalPages();
324 intptr_t reserved = (number_of_pages * space->AreaSize());
325 intptr_t free = reserved - space->SizeOfObjects();
326 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", ToString(space->identity()),
327 number_of_pages, static_cast<int>(free),
328 static_cast<double>(free) * 100 / reserved);
329}
330
334
335 // Bailouts for completely disabled compaction.
336 if (!v8_flags.compact ||
338 !v8_flags.compact_with_stack) ||
339 (v8_flags.gc_experiment_less_compaction &&
342 return false;
343 }
344
346
347 // Don't compact shared space when CSS is enabled, since there may be
348 // DirectHandles on stacks of client isolates.
349 if (!v8_flags.conservative_stack_scanning && heap_->shared_space()) {
351 }
352
354
356 (!heap_->IsGCWithStack() || v8_flags.compact_code_space_with_stack)) {
358 } else if (v8_flags.trace_fragmentation) {
360 }
361
363 return compacting_;
364}
365
366namespace {
367
368// Helper function to get the bytecode flushing mode based on the flags. This
369// is required because it is not safe to access flags in concurrent marker.
370base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate) {
371 if (isolate->disable_bytecode_flushing()) {
373 }
374
375 base::EnumSet<CodeFlushMode> code_flush_mode;
376 if (v8_flags.flush_bytecode) {
377 code_flush_mode.Add(CodeFlushMode::kFlushBytecode);
378 }
379
380 if (v8_flags.flush_baseline_code) {
381 code_flush_mode.Add(CodeFlushMode::kFlushBaselineCode);
382 }
383
384 if (v8_flags.stress_flush_code) {
385 // This is to check tests accidentally don't miss out on adding either flush
386 // bytecode or flush code along with stress flush code. stress_flush_code
387 // doesn't do anything if either one of them isn't enabled.
388 DCHECK(v8_flags.fuzzing || v8_flags.flush_baseline_code ||
389 v8_flags.flush_bytecode);
390 code_flush_mode.Add(CodeFlushMode::kForceFlush);
391 }
392
393 if (isolate->heap()->IsLastResortGC() &&
394 (v8_flags.flush_code_based_on_time ||
395 v8_flags.flush_code_based_on_tab_visibility)) {
396 code_flush_mode.Add(CodeFlushMode::kForceFlush);
397 }
398
399 return code_flush_mode;
400}
401
402} // namespace
403
405 std::shared_ptr<::heap::base::IncrementalMarkingSchedule> schedule) {
406 // The state for background thread is saved here and maintained for the whole
407 // GC cycle. Both CppHeap and regular V8 heap will refer to this flag.
409
410 if (v8_flags.sticky_mark_bits) {
411 heap()->Unmark();
412 }
413
414#ifdef V8_COMPRESS_POINTERS
415 heap_->young_external_pointer_space()->StartCompactingIfNeeded();
416 heap_->old_external_pointer_space()->StartCompactingIfNeeded();
417 heap_->cpp_heap_pointer_space()->StartCompactingIfNeeded();
418#endif // V8_COMPRESS_POINTERS
419
420 // CppHeap's marker must be initialized before the V8 marker to allow
421 // exchanging of worklists.
422 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
423 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
424 cpp_heap->InitializeMarking(CppHeap::CollectionType::kMajor, schedule);
425 }
426
427 std::vector<Address> contexts =
429 if (v8_flags.stress_per_context_marking_worklist) {
430 contexts.clear();
431 HandleScope handle_scope(heap_->isolate());
432 for (auto context : heap_->FindAllNativeContexts()) {
433 contexts.push_back(context->ptr());
434 }
435 }
437 code_flush_mode_ = GetCodeFlushMode(heap_->isolate());
439 auto* cpp_heap = CppHeap::From(heap_->cpp_heap_);
440 local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
442 cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
444 local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
445 marking_visitor_ = std::make_unique<MainMarkingVisitor>(
449 // This method evicts SFIs with flushed bytecode from the cache before
450 // iterating the compilation cache as part of the root set. SFIs that get
451 // flushed in this GC cycle will get evicted out of the cache in the next GC
452 // cycle. The SFI will remain in the cache until then and may remain in the
453 // cache even longer in case the SFI is re-compiled.
455 // Marking bits are cleared by the sweeper or unmarker (if sticky mark-bits
456 // are enabled).
457#ifdef VERIFY_HEAP
458 if (v8_flags.verify_heap) {
459 VerifyMarkbitsAreClean();
460 }
461#endif // VERIFY_HEAP
462}
463
465 CallOrigin origin) {
466 if (v8_flags.concurrent_marking && !use_background_threads_in_cycle_) {
467 // With --parallel_pause_for_gc_in_background we force background threads in
468 // the atomic pause.
469 const bool force_background_threads =
470 v8_flags.parallel_pause_for_gc_in_background &&
471 origin == CallOrigin::kAtomicGC;
473 force_background_threads || heap()->ShouldUseBackgroundThreads();
474
478
479 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap_)) {
480 cpp_heap->ReEnableConcurrentMarking();
481 }
482 }
483 }
484}
485
487 // Make sure that Prepare() has been called. The individual steps below will
488 // update the state as they proceed.
489 DCHECK(state_ == PREPARE_GC);
490
492
494
495 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap_)) {
496 cpp_heap->ProcessCrossThreadWeakness();
497 }
498
499 // This will walk dead object graphs and so requires that all references are
500 // still intact.
504
505 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap_)) {
506 cpp_heap->FinishMarkingAndProcessWeakness();
507 }
508
510
511 Sweep();
512 Evacuate();
513 Finish();
514}
515
516#ifdef VERIFY_HEAP
517
518void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpaceBase* space) {
519 for (PageMetadata* p : *space) {
520 CHECK(p->marking_bitmap()->IsClean());
521 CHECK_EQ(0, p->live_bytes());
522 }
523}
524
525void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
526 if (!space) return;
527 if (v8_flags.minor_ms) {
528 VerifyMarkbitsAreClean(PagedNewSpace::From(space)->paged_space());
529 return;
530 }
531 for (PageMetadata* p : *space) {
532 CHECK(p->marking_bitmap()->IsClean());
533 CHECK_EQ(0, p->live_bytes());
534 }
535}
536
537void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
538 if (!space) return;
539 LargeObjectSpaceObjectIterator it(space);
540 for (Tagged<HeapObject> obj = it.Next(); !obj.is_null(); obj = it.Next()) {
542 CHECK_EQ(0, MutablePageMetadata::FromHeapObject(obj)->live_bytes());
543 }
544}
545
546void MarkCompactCollector::VerifyMarkbitsAreClean() {
547 VerifyMarkbitsAreClean(heap_->old_space());
548 VerifyMarkbitsAreClean(heap_->code_space());
549 VerifyMarkbitsAreClean(heap_->new_space());
550 VerifyMarkbitsAreClean(heap_->lo_space());
551 VerifyMarkbitsAreClean(heap_->code_lo_space());
552 VerifyMarkbitsAreClean(heap_->new_lo_space());
553 VerifyMarkbitsAreClean(heap_->trusted_space());
554 VerifyMarkbitsAreClean(heap_->trusted_lo_space());
555}
556
557#endif // VERIFY_HEAP
558
560 size_t area_size, int* target_fragmentation_percent,
561 size_t* max_evacuated_bytes) {
562 // For memory reducing and optimize for memory mode we directly define both
563 // constants.
564 const int kTargetFragmentationPercentForReduceMemory = 20;
565 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
566 const int kTargetFragmentationPercentForOptimizeMemory = 20;
567 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
568
569 // For regular mode (which is latency critical) we define less aggressive
570 // defaults to start and switch to a trace-based (using compaction speed)
571 // approach as soon as we have enough samples.
572 const int kTargetFragmentationPercent = 70;
573 const size_t kMaxEvacuatedBytes = 4 * MB;
574 // Time to take for a single area (=payload of page). Used as soon as there
575 // exist enough compaction speed samples.
576 const float kTargetMsPerArea = .5;
577
578 if (heap_->ShouldReduceMemory()) {
579 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
580 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
581 } else if (heap_->ShouldOptimizeForMemoryUsage()) {
582 *target_fragmentation_percent =
583 kTargetFragmentationPercentForOptimizeMemory;
584 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
585 } else {
586 const std::optional<double> estimated_compaction_speed =
588 if (estimated_compaction_speed.has_value()) {
589 // Estimate the target fragmentation based on traced compaction speed
590 // and a goal for a single page.
591 const double estimated_ms_per_area =
592 1 + area_size / *estimated_compaction_speed;
593 *target_fragmentation_percent = static_cast<int>(
594 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
595 if (*target_fragmentation_percent <
596 kTargetFragmentationPercentForReduceMemory) {
597 *target_fragmentation_percent =
598 kTargetFragmentationPercentForReduceMemory;
599 }
600 } else {
601 *target_fragmentation_percent = kTargetFragmentationPercent;
602 }
603 *max_evacuated_bytes = kMaxEvacuatedBytes;
604 }
605}
606
608 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
609 space->identity() == SHARED_SPACE ||
610 space->identity() == TRUSTED_SPACE);
611
612 int number_of_pages = space->CountTotalPages();
613 size_t area_size = space->AreaSize();
614
615 const bool in_standard_path =
616 !(v8_flags.manual_evacuation_candidates_selection ||
617 v8_flags.stress_compaction_random || v8_flags.stress_compaction ||
618 v8_flags.compact_on_every_full_gc);
619 // Those variables will only be initialized if |in_standard_path|, and are not
620 // used otherwise.
621 size_t max_evacuated_bytes;
622 int target_fragmentation_percent;
623 size_t free_bytes_threshold;
624 if (in_standard_path) {
625 // We use two conditions to decide whether a page qualifies as an evacuation
626 // candidate, or not:
627 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
628 // between live bytes and capacity of this page (= area).
629 // * Evacuation quota: A global quota determining how much bytes should be
630 // compacted.
631 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
632 &max_evacuated_bytes);
633 free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
634 }
635
636 // Pairs of (live_bytes_in_page, page).
637 using LiveBytesPagePair = std::pair<size_t, PageMetadata*>;
638 std::vector<LiveBytesPagePair> pages;
639 pages.reserve(number_of_pages);
640
642 for (PageMetadata* p : *space) {
643 MemoryChunk* chunk = p->Chunk();
644 if (chunk->NeverEvacuate() || !chunk->CanAllocate()) continue;
645
646 if (chunk->IsPinned()) {
647 DCHECK(!chunk->IsFlagSet(
649 continue;
650 }
651
652 // Invariant: Evacuation candidates are just created when marking is
653 // started. This means that sweeping has finished. Furthermore, at the end
654 // of a GC all evacuation candidates are cleared and their slot buffers are
655 // released.
656 CHECK(!chunk->IsEvacuationCandidate());
657 CHECK_NULL(p->slot_set<OLD_TO_OLD>());
658 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
659 CHECK(p->SweepingDone());
660 DCHECK(p->area_size() == area_size);
661 if (in_standard_path) {
662 // Only the pages with at more than |free_bytes_threshold| free bytes are
663 // considered for evacuation.
664 if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
665 pages.push_back(std::make_pair(p->allocated_bytes(), p));
666 }
667 } else {
668 pages.push_back(std::make_pair(p->allocated_bytes(), p));
669 }
670 }
671
672 int candidate_count = 0;
673 size_t total_live_bytes = 0;
674
675 const bool reduce_memory = heap_->ShouldReduceMemory();
676 if (v8_flags.manual_evacuation_candidates_selection) {
677 for (size_t i = 0; i < pages.size(); i++) {
678 PageMetadata* p = pages[i].second;
679 MemoryChunk* chunk = p->Chunk();
680 if (chunk->IsFlagSet(
682 candidate_count++;
683 total_live_bytes += pages[i].first;
684 chunk->ClearFlagSlow(
687 }
688 }
689 } else if (v8_flags.stress_compaction_random) {
690 double fraction = heap_->isolate()->fuzzer_rng()->NextDouble();
691 size_t pages_to_mark_count =
692 static_cast<size_t>(fraction * (pages.size() + 1));
693 for (uint64_t i : heap_->isolate()->fuzzer_rng()->NextSample(
694 pages.size(), pages_to_mark_count)) {
695 candidate_count++;
696 total_live_bytes += pages[i].first;
698 }
699 } else if (v8_flags.stress_compaction) {
700 for (size_t i = 0; i < pages.size(); i++) {
701 PageMetadata* p = pages[i].second;
702 if (i % 2 == 0) {
703 candidate_count++;
704 total_live_bytes += pages[i].first;
706 }
707 }
708 } else {
709 // The following approach determines the pages that should be evacuated.
710 //
711 // Sort pages from the most free to the least free, then select
712 // the first n pages for evacuation such that:
713 // - the total size of evacuated objects does not exceed the specified
714 // limit.
715 // - fragmentation of (n+1)-th page does not exceed the specified limit.
716 std::sort(pages.begin(), pages.end(),
717 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
718 return a.first < b.first;
719 });
720 for (size_t i = 0; i < pages.size(); i++) {
721 size_t live_bytes = pages[i].first;
722 DCHECK_GE(area_size, live_bytes);
723 if (v8_flags.compact_on_every_full_gc ||
724 ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
725 candidate_count++;
726 total_live_bytes += live_bytes;
727 }
728 if (v8_flags.trace_fragmentation_verbose) {
730 "compaction-selection-page: space=%s free_bytes_page=%zu "
731 "fragmentation_limit_kb=%zu "
732 "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
733 "compaction_limit_kb=%zu\n",
734 ToString(space->identity()), (area_size - live_bytes) / KB,
735 free_bytes_threshold / KB, target_fragmentation_percent,
736 total_live_bytes / KB, max_evacuated_bytes / KB);
737 }
738 }
739 // How many pages we will allocated for the evacuated objects
740 // in the worst case: ceil(total_live_bytes / area_size)
741 int estimated_new_pages =
742 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
743 DCHECK_LE(estimated_new_pages, candidate_count);
744 int estimated_released_pages = candidate_count - estimated_new_pages;
745 // Avoid (compact -> expand) cycles.
746 if ((estimated_released_pages == 0) && !v8_flags.compact_on_every_full_gc) {
747 candidate_count = 0;
748 }
749 for (int i = 0; i < candidate_count; i++) {
751 }
752 }
753
754 if (v8_flags.trace_fragmentation) {
756 "compaction-selection: space=%s reduce_memory=%d pages=%d "
757 "total_live_bytes=%zu\n",
758 ToString(space->identity()), reduce_memory, candidate_count,
759 total_live_bytes / KB);
760 }
761}
762
764#ifdef DEBUG
765 DCHECK(state_ == IDLE);
766 state_ = PREPARE_GC;
767#endif
768
770
775 StartMarking();
776 if (heap_->cpp_heap_) {
777 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
778 // `StartMarking()` immediately starts marking which requires V8 worklists
779 // to be set up.
781 }
782 }
783 if (auto* new_space = heap_->new_space()) {
784 new_space->GarbageCollectionPrologue();
785 }
786 if (heap_->use_new_space()) {
787 DCHECK_EQ(
790 }
791}
792
794 // FinishConcurrentMarking is called for both, concurrent and parallel,
795 // marking. It is safe to call this function when tasks are already finished.
798 if (v8_flags.parallel_marking || v8_flags.concurrent_marking) {
802 }
803 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap_)) {
804 cpp_heap->FinishConcurrentMarkingIfNeeded();
805 }
806}
807
811#ifdef VERIFY_HEAP
812 if (v8_flags.verify_heap) {
813 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_VERIFY);
814 FullMarkingVerifier verifier(heap_);
815 verifier.Run();
816 heap_->old_space()->VerifyLiveBytes();
817 heap_->code_space()->VerifyLiveBytes();
818 if (heap_->shared_space()) heap_->shared_space()->VerifyLiveBytes();
819 heap_->trusted_space()->VerifyLiveBytes();
820 if (v8_flags.minor_ms && heap_->paged_new_space())
821 heap_->paged_new_space()->paged_space()->VerifyLiveBytes();
822 }
823#endif // VERIFY_HEAP
824}
825
826namespace {
827
828void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
829 size_t surviving_object_size = 0;
830 PtrComprCageBase cage_base(heap->isolate());
831 for (auto it = space->begin(); it != space->end();) {
832 LargePageMetadata* current = *(it++);
833 Tagged<HeapObject> object = current->GetObject();
834 const size_t object_size = static_cast<size_t>(object->Size(cage_base));
835 space->ShrinkPageToObjectSize(current, object, object_size);
836 surviving_object_size += object_size;
837 }
838 space->set_objects_size(surviving_object_size);
839}
840
841} // namespace
842
844 {
846 heap_->tracer(), GCTracer::Scope::MC_SWEEP, ThreadKind::kMain,
847 sweeper_->GetTraceIdForFlowEvent(GCTracer::Scope::MC_SWEEP),
849
850 // Delay releasing empty new space pages and dead new large object pages
851 // until after pointer updating is done because dead old space objects may
852 // have slots pointing to these pages and will need to be updated.
853 DCHECK_IMPLIES(!v8_flags.minor_ms,
856 GCTracer::Scope sweep_scope(
857 heap_->tracer(), GCTracer::Scope::MC_SWEEP_NEW, ThreadKind::kMain);
859 // Sweeping empty pages already relinks them to the freelist.
861 }
863 }
864
865 if (heap_->new_lo_space()) {
866 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_SWEEP_NEW_LO);
868 }
869
870#ifdef DEBUG
871 heap_->VerifyCountersBeforeConcurrentSweeping(
873#endif // DEBUG
874 }
875
876 if (heap_->new_space()) {
877 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE);
878 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
880 }
881
882 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_FINISH);
883
884 if (heap_->new_space()) {
887 }
888
889 auto* isolate = heap_->isolate();
891
893
894 marking_visitor_.reset();
898 key_to_values_.clear();
899
900 CHECK(weak_objects_.current_ephemerons.IsEmpty());
901 local_weak_objects_->next_ephemerons_local.Publish();
902 local_weak_objects_.reset();
903 weak_objects_.next_ephemerons.Clear();
904
906
907 // Release empty pages now, when the pointer-update phase is done.
909
910 // Shrink pages if possible after processing and filtering slots.
911 ShrinkPagesToObjectSizes(heap_, heap_->lo_space());
912
913#ifdef DEBUG
914 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
915 state_ = IDLE;
916#endif
917
919 // Some code objects were marked for deoptimization during the GC.
922 }
923}
924
932
933// This visitor is used to visit the body of special objects held alive by
934// other roots.
935//
936// It is currently used for
937// - InstructionStream held alive by the top optimized frame. This code cannot
938// be deoptimized and thus have to be kept alive in an isolate way, i.e., it
939// should not keep alive other code objects reachable through the weak list but
940// they should keep alive its embedded pointers (which would otherwise be
941// dropped).
942// - Prefix of the string table.
943// - If V8_ENABLE_SANDBOX, client Isolates' waiter queue node
944// ExternalPointer_t in shared Isolates.
947 public:
949 : ObjectVisitorWithCageBases(collector->heap_->isolate()),
950 collector_(collector) {}
951
953 MarkObject(host, p.load(cage_base()));
954 }
955
957 MarkObject(host, host->map(cage_base()));
958 }
959
961 ObjectSlot end) final {
962 for (ObjectSlot p = start; p < end; ++p) {
963 // The map slot should be handled in VisitMapPointer.
964 DCHECK_NE(host->map_slot(), p);
966 MarkObject(host, p.load(cage_base()));
967 }
968 }
969
971 InstructionStreamSlot slot) override {
972 MarkObject(host, slot.load(code_cage_base()));
973 }
974
976 MaybeObjectSlot end) final {
977 // At the moment, custom roots cannot contain weak pointers.
978 UNREACHABLE();
979 }
980
982 RelocInfo* rinfo) override {
985 MarkObject(host, target);
986 }
987
989 RelocInfo* rinfo) override {
990 MarkObject(host, rinfo->target_object(cage_base()));
991 }
992
993 private:
995 if (!IsHeapObject(object)) return;
996 Tagged<HeapObject> heap_object = Cast<HeapObject>(object);
997 const auto target_worklist =
999 if (!target_worklist) {
1000 return;
1001 }
1002 collector_->MarkObject(host, heap_object, target_worklist.value());
1003 }
1004
1006};
1007
1009 : public HeapVisitor<MarkCompactCollector::SharedHeapObjectVisitor> {
1010 public:
1012 : HeapVisitor(collector->heap_->isolate()), collector_(collector) {}
1013
1015 CheckForSharedObject(host, p, p.load(cage_base()));
1016 }
1017
1019 Tagged<MaybeObject> object = p.load(cage_base());
1020 Tagged<HeapObject> heap_object;
1021 if (object.GetHeapObject(&heap_object))
1022 CheckForSharedObject(host, ObjectSlot(p), heap_object);
1023 }
1024
1026 CheckForSharedObject(host, host->map_slot(), host->map(cage_base()));
1027 }
1028
1030 ObjectSlot end) final {
1031 for (ObjectSlot p = start; p < end; ++p) {
1032 // The map slot should be handled in VisitMapPointer.
1033 DCHECK_NE(host->map_slot(), p);
1035 CheckForSharedObject(host, p, p.load(cage_base()));
1036 }
1037 }
1038
1040 InstructionStreamSlot slot) override {
1041 UNREACHABLE();
1042 }
1043
1045 MaybeObjectSlot end) final {
1046 for (MaybeObjectSlot p = start; p < end; ++p) {
1047 // The map slot should be handled in VisitMapPointer.
1048 DCHECK_NE(host->map_slot(), ObjectSlot(p));
1049 VisitPointer(host, p);
1050 }
1051 }
1052
1054 RelocInfo* rinfo) override {
1055 UNREACHABLE();
1056 }
1057
1059 RelocInfo* rinfo) override {
1060 UNREACHABLE();
1061 }
1062
1063 private:
1065 Tagged<Object> object) {
1067 Tagged<HeapObject> heap_object;
1068 if (!object.GetHeapObject(&heap_object)) return;
1069 if (!HeapLayout::InWritableSharedSpace(heap_object)) return;
1071 MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
1072 MutablePageMetadata* host_page_metadata =
1073 MutablePageMetadata::cast(host_chunk->Metadata());
1075 // Temporarily record new-to-shared slots in the old-to-shared remembered
1076 // set so we don't need to iterate the page again later for updating the
1077 // references.
1079 host_page_metadata, host_chunk->Offset(slot.address()));
1080 if (MarkingHelper::ShouldMarkObject(collector_->heap(), heap_object)) {
1081 collector_->MarkRootObject(Root::kClientHeap, heap_object,
1083 }
1084 }
1085
1087};
1088
1090 public:
1092
1093 void VisitRootPointers(Root root, const char* description,
1095 UNREACHABLE();
1096 }
1097
1098 void VisitRootPointers(Root root, const char* description,
1100 OffHeapObjectSlot end) override {
1101 DCHECK_EQ(root, Root::kStringTable);
1102 // Visit all HeapObject pointers in [start, end).
1103 Isolate* const isolate = heap_->isolate();
1104 for (OffHeapObjectSlot p = start; p < end; ++p) {
1105 Tagged<Object> o = p.load(isolate);
1106 if (IsHeapObject(o)) {
1107 Tagged<HeapObject> heap_object = Cast<HeapObject>(o);
1110 heap_, heap_->marking_state(), heap_object)) {
1113 }
1114 }
1115 }
1116 }
1117
1118 int PointersRemoved() const { return pointers_removed_; }
1119
1120 private:
1123};
1124
1125#ifdef V8_ENABLE_SANDBOX
1126class MarkExternalPointerFromExternalStringTable : public RootVisitor {
1127 public:
1128 explicit MarkExternalPointerFromExternalStringTable(
1129 ExternalPointerTable* shared_table, ExternalPointerTable::Space* space)
1130 : visitor(shared_table, space) {}
1131
1132 void VisitRootPointers(Root root, const char* description,
1133 FullObjectSlot start, FullObjectSlot end) override {
1134 // Visit all HeapObject pointers in [start, end).
1135 for (FullObjectSlot p = start; p < end; ++p) {
1136 Tagged<Object> o = *p;
1137 if (IsHeapObject(o)) {
1138 Tagged<HeapObject> heap_object = Cast<HeapObject>(o);
1139 if (IsExternalString(heap_object)) {
1140 Tagged<ExternalString> string = Cast<ExternalString>(heap_object);
1141 string->VisitExternalPointers(&visitor);
1142 } else {
1143 // The original external string may have been internalized.
1144 DCHECK(IsThinString(o));
1145 }
1146 }
1147 }
1148 }
1149
1150 private:
1151 class MarkExternalPointerTableVisitor : public ObjectVisitor {
1152 public:
1153 explicit MarkExternalPointerTableVisitor(ExternalPointerTable* table,
1154 ExternalPointerTable::Space* space)
1155 : table_(table), space_(space) {}
1156 void VisitExternalPointer(Tagged<HeapObject> host,
1157 ExternalPointerSlot slot) override {
1158 DCHECK(!slot.tag_range().IsEmpty());
1159 DCHECK(IsSharedExternalPointerType(slot.tag_range()));
1160 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
1161 table_->Mark(space_, handle, slot.address());
1162 }
1163 void VisitPointers(Tagged<HeapObject> host, ObjectSlot start,
1164 ObjectSlot end) override {
1165 UNREACHABLE();
1166 }
1167 void VisitPointers(Tagged<HeapObject> host, MaybeObjectSlot start,
1168 MaybeObjectSlot end) override {
1169 UNREACHABLE();
1170 }
1171 void VisitInstructionStreamPointer(Tagged<Code> host,
1172 InstructionStreamSlot slot) override {
1173 UNREACHABLE();
1174 }
1175 void VisitCodeTarget(Tagged<InstructionStream> host,
1176 RelocInfo* rinfo) override {
1177 UNREACHABLE();
1178 }
1179 void VisitEmbeddedPointer(Tagged<InstructionStream> host,
1180 RelocInfo* rinfo) override {
1181 UNREACHABLE();
1182 }
1183
1184 private:
1185 ExternalPointerTable* table_;
1186 ExternalPointerTable::Space* space_;
1187 };
1188
1189 MarkExternalPointerTableVisitor visitor;
1190};
1191#endif // V8_ENABLE_SANDBOX
1192
1193// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1194// are retained.
1196 public:
1198 : heap_(heap), marking_state_(marking_state) {}
1199
1201 Tagged<HeapObject> heap_object = Cast<HeapObject>(object);
1203 heap_object)) {
1204 return object;
1205 } else if (IsAllocationSite(heap_object) &&
1206 !Cast<AllocationSite>(object)->IsZombie()) {
1207 // "dead" AllocationSites need to live long enough for a traversal of new
1208 // space. These sites get a one-time reprieve.
1209
1210 Tagged<Object> nested = object;
1211 while (IsAllocationSite(nested)) {
1212 Tagged<AllocationSite> current_site = Cast<AllocationSite>(nested);
1213 // MarkZombie will override the nested_site, read it first before
1214 // marking
1215 nested = current_site->nested_site();
1216 current_site->MarkZombie();
1218 }
1219
1220 return object;
1221 } else {
1222 return Smi::zero();
1223 }
1224 }
1225
1226 private:
1227 Heap* const heap_;
1229};
1230
1232 : public HeapVisitor<RecordMigratedSlotVisitor> {
1233 public:
1235 : HeapVisitor(heap->isolate()), heap_(heap) {}
1236
1237 V8_INLINE static constexpr bool UsePrecomputedObjectSize() { return true; }
1238
1239 inline void VisitPointer(Tagged<HeapObject> host, ObjectSlot p) final {
1241 RecordMigratedSlot(host, p.load(cage_base()), p.address());
1242 }
1243
1244 inline void VisitMapPointer(Tagged<HeapObject> host) final {
1245 VisitPointer(host, host->map_slot());
1246 }
1247
1249 DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
1250 RecordMigratedSlot(host, p.load(cage_base()), p.address());
1251 }
1252
1254 ObjectSlot end) final {
1255 while (start < end) {
1256 VisitPointer(host, start);
1257 ++start;
1258 }
1259 }
1260
1262 MaybeObjectSlot end) final {
1263 while (start < end) {
1264 VisitPointer(host, start);
1265 ++start;
1266 }
1267 }
1268
1270 InstructionStreamSlot slot) final {
1271 // This code is similar to the implementation of VisitPointer() modulo
1272 // new kind of slot.
1274 Tagged<Object> code = slot.load(code_cage_base());
1275 RecordMigratedSlot(host, code, slot.address());
1276 }
1277
1278 inline void VisitEphemeron(Tagged<HeapObject> host, int index, ObjectSlot key,
1279 ObjectSlot value) override {
1280 DCHECK(IsEphemeronHashTable(host));
1282
1283 // Simply record ephemeron keys in OLD_TO_NEW if it points into the young
1284 // generation instead of recording it in ephemeron_remembered_set here for
1285 // migrated objects. OLD_TO_NEW is per page and we can therefore easily
1286 // record in OLD_TO_NEW on different pages in parallel without merging. Both
1287 // sets are anyways guaranteed to be empty after a full GC.
1288 VisitPointer(host, key);
1289 VisitPointer(host, value);
1290 }
1291
1293 RelocInfo* rinfo) override {
1297 // The target is always in old space, we don't have to record the slot in
1298 // the old-to-new remembered set.
1301 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
1302 }
1303
1305 RelocInfo* rinfo) override {
1307 Tagged<HeapObject> object = rinfo->target_object(cage_base());
1308 WriteBarrier::GenerationalForRelocInfo(host, rinfo, object);
1309 WriteBarrier::SharedForRelocInfo(host, rinfo, object);
1310 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
1311 }
1312
1313 // Entries that are skipped for recording.
1315 RelocInfo* rinfo) final {}
1317 RelocInfo* rinfo) final {}
1319 ExternalPointerSlot slot) final {}
1320
1323 IndirectPointerMode mode) final {}
1324
1327
1329 ProtectedPointerSlot slot) final {
1330 RecordMigratedSlot(host, slot.load(), slot.address());
1331 }
1332
1334 ProtectedMaybeObjectSlot slot) final {
1335 DCHECK(!MapWord::IsPacked(slot.Relaxed_Load().ptr()));
1336 RecordMigratedSlot(host, slot.load(), slot.address());
1337 }
1338
1339 protected:
1341 Tagged<MaybeObject> value, Address slot) {
1342 if (value.IsStrongOrWeak()) {
1343 MemoryChunk* value_chunk = MemoryChunk::FromAddress(value.ptr());
1344 MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
1345 if (HeapLayout::InYoungGeneration(value)) {
1346 MutablePageMetadata* host_metadata =
1347 MutablePageMetadata::cast(host_chunk->Metadata());
1348 DCHECK_IMPLIES(value_chunk->IsToPage(),
1349 v8_flags.minor_ms || value_chunk->IsLargePage());
1350 DCHECK(host_metadata->SweepingDone());
1352 host_metadata, host_chunk->Offset(slot));
1353 } else if (value_chunk->IsEvacuationCandidate()) {
1354 MutablePageMetadata* host_metadata =
1355 MutablePageMetadata::cast(host_chunk->Metadata());
1356 if (value_chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1357 // TODO(377724745): currently needed because flags are untrusted.
1358 SBXCHECK(!InsideSandbox(value_chunk->address()));
1360 host_metadata, host_chunk->Offset(slot));
1361 } else if (value_chunk->IsFlagSet(MemoryChunk::IS_TRUSTED) &&
1362 host_chunk->IsFlagSet(MemoryChunk::IS_TRUSTED)) {
1363 // When the sandbox is disabled, we use plain tagged pointers to
1364 // reference trusted objects from untrusted ones. However, for these
1365 // references we want to use the OLD_TO_OLD remembered set, so here
1366 // we need to check that both the value chunk and the host chunk are
1367 // trusted space chunks.
1368 // TODO(377724745): currently needed because flags are untrusted.
1369 SBXCHECK(!InsideSandbox(value_chunk->address()));
1370 if (value_chunk->InWritableSharedSpace()) {
1372 AccessMode::NON_ATOMIC>(host_metadata,
1373 host_chunk->Offset(slot));
1374 } else {
1376 host_metadata, host_chunk->Offset(slot));
1377 }
1378 } else {
1380 host_metadata, host_chunk->Offset(slot));
1381 }
1382 } else if (value_chunk->InWritableSharedSpace() &&
1384 MutablePageMetadata* host_metadata =
1385 MutablePageMetadata::cast(host_chunk->Metadata());
1386 if (value_chunk->IsFlagSet(MemoryChunk::IS_TRUSTED) &&
1387 host_chunk->IsFlagSet(MemoryChunk::IS_TRUSTED)) {
1389 AccessMode::NON_ATOMIC>(host_metadata, host_chunk->Offset(slot));
1390 } else {
1392 host_metadata, host_chunk->Offset(slot));
1393 }
1394 }
1395 }
1396 }
1397
1398 Heap* const heap_;
1399};
1400
1402 public:
1404
1405 virtual ~MigrationObserver() = default;
1407 Tagged<HeapObject> dst, int size) = 0;
1408
1409 protected:
1411};
1412
1414 public:
1416
1418 Tagged<HeapObject> dst, int size) final {
1419 // Note this method is called in a concurrent setting. The current object
1420 // (src and dst) is somewhat safe to access without precautions, but other
1421 // objects may be subject to concurrent modification.
1422 if (dest == CODE_SPACE) {
1423 PROFILE(heap_->isolate(), CodeMoveEvent(Cast<InstructionStream>(src),
1425 } else if ((dest == OLD_SPACE || dest == TRUSTED_SPACE) &&
1426 IsBytecodeArray(dst)) {
1427 // TODO(saelo): remove `dest == OLD_SPACE` once BytecodeArrays are
1428 // allocated in trusted space.
1429 PROFILE(heap_->isolate(), BytecodeMoveEvent(Cast<BytecodeArray>(src),
1430 Cast<BytecodeArray>(dst)));
1431 }
1432 heap_->OnMoveEvent(src, dst, size);
1433 }
1434};
1435
1437 public:
1438 virtual ~HeapObjectVisitor() = default;
1439 virtual bool Visit(Tagged<HeapObject> object, int size) = 0;
1440};
1441
1443 public:
1448
1449#if DEBUG
1450 void DisableAbortEvacuationAtAddress(MutablePageMetadata* chunk) {
1451 abort_evacuation_at_address_ = chunk->area_end();
1452 }
1453
1454 void SetUpAbortEvacuationAtAddress(MutablePageMetadata* chunk) {
1455 if (v8_flags.stress_compaction || v8_flags.stress_compaction_random) {
1456 // Stress aborting of evacuation by aborting ~5% of evacuation candidates
1457 // when stress testing.
1458 const double kFraction = 0.05;
1459
1460 if (rng_->NextDouble() < kFraction) {
1461 const double abort_evacuation_percentage = rng_->NextDouble();
1462 abort_evacuation_at_address_ =
1463 chunk->area_start() +
1464 abort_evacuation_percentage * chunk->area_size();
1465 return;
1466 }
1467 }
1468
1469 abort_evacuation_at_address_ = chunk->area_end();
1470 }
1471#endif // DEBUG
1472
1473 protected:
1475
1477#if V8_COMPRESS_POINTERS
1478 return PtrComprCageBase{heap_->isolate()};
1479#else
1480 return PtrComprCageBase{};
1481#endif // V8_COMPRESS_POINTERS
1482 }
1483
1486 Tagged<HeapObject> src, int size,
1487 AllocationSpace dest);
1488
1489 template <MigrationMode mode>
1492 int size, AllocationSpace dest) {
1493 Address dst_addr = dst.address();
1494 Address src_addr = src.address();
1495 PtrComprCageBase cage_base = base->cage_base();
1496 DCHECK(base->heap_->AllowedToBeMigrated(src->map(cage_base), src, dest));
1497 DCHECK_NE(dest, LO_SPACE);
1498 DCHECK_NE(dest, CODE_LO_SPACE);
1500 if (dest == OLD_SPACE) {
1501 DCHECK_OBJECT_SIZE(size);
1503 base->heap_->CopyBlock(dst_addr, src_addr, size);
1504 if (mode != MigrationMode::kFast) {
1505 base->ExecuteMigrationObservers(dest, src, dst, size);
1506 }
1507 // In case the object's map gets relocated during GC we load the old map
1508 // here. This is fine since they store the same content.
1509 base->record_visitor_->Visit(dst->map(cage_base), dst, size);
1510 } else if (dest == SHARED_SPACE) {
1511 DCHECK_OBJECT_SIZE(size);
1513 base->heap_->CopyBlock(dst_addr, src_addr, size);
1514 if (mode != MigrationMode::kFast) {
1515 base->ExecuteMigrationObservers(dest, src, dst, size);
1516 }
1517 base->record_visitor_->Visit(dst->map(cage_base), dst, size);
1518 } else if (dest == TRUSTED_SPACE) {
1519 DCHECK_OBJECT_SIZE(size);
1521 base->heap_->CopyBlock(dst_addr, src_addr, size);
1522 if (mode != MigrationMode::kFast) {
1523 base->ExecuteMigrationObservers(dest, src, dst, size);
1524 }
1525 // In case the object's map gets relocated during GC we load the old map
1526 // here. This is fine since they store the same content.
1527 base->record_visitor_->Visit(dst->map(cage_base), dst, size);
1528 } else if (dest == CODE_SPACE) {
1530 {
1531 WritableJitAllocation writable_allocation =
1533 size);
1535 writable_allocation.CopyData(0, reinterpret_cast<uint8_t*>(src_addr),
1537 writable_allocation.CopyCode(
1539 reinterpret_cast<uint8_t*>(src_addr +
1543 istream->Relocate(writable_allocation, dst_addr - src_addr);
1544 }
1545 if (mode != MigrationMode::kFast) {
1546 base->ExecuteMigrationObservers(dest, src, dst, size);
1547 }
1548 // In case the object's map gets relocated during GC we load the old map
1549 // here. This is fine since they store the same content.
1550 base->record_visitor_->Visit(dst->map(cage_base), dst, size);
1551 } else {
1552 DCHECK_OBJECT_SIZE(size);
1553 DCHECK(dest == NEW_SPACE);
1554 base->heap_->CopyBlock(dst_addr, src_addr, size);
1555 if (mode != MigrationMode::kFast) {
1556 base->ExecuteMigrationObservers(dest, src, dst, size);
1557 }
1558 }
1559
1560 if (dest == CODE_SPACE) {
1561 WritableJitAllocation jit_allocation =
1566 } else {
1567 src->set_map_word_forwarded(dst, kRelaxedStore);
1568 }
1569 }
1570
1572 RecordMigratedSlotVisitor* record_visitor)
1573 : heap_(heap),
1574 local_allocator_(local_allocator),
1575 record_visitor_(record_visitor),
1576 shared_string_table_(v8_flags.shared_string_table &&
1577 heap->isolate()->has_shared_space()) {
1579#if DEBUG
1580 rng_.emplace(heap_->isolate()->fuzzer_rng()->NextInt64());
1581#endif // DEBUG
1582 }
1583
1584 inline bool TryEvacuateObject(AllocationSpace target_space,
1585 Tagged<HeapObject> object, int size,
1586 Tagged<HeapObject>* target_object) {
1587#if DEBUG
1588 DCHECK_LE(abort_evacuation_at_address_,
1589 MutablePageMetadata::FromHeapObject(object)->area_end());
1590 DCHECK_GE(abort_evacuation_at_address_,
1591 MutablePageMetadata::FromHeapObject(object)->area_start());
1592
1593 if (V8_UNLIKELY(object.address() >= abort_evacuation_at_address_)) {
1594 return false;
1595 }
1596#endif // DEBUG
1597
1598 Tagged<Map> map = object->map(cage_base());
1600 AllocationResult allocation;
1601 if (target_space == OLD_SPACE && ShouldPromoteIntoSharedHeap(map)) {
1602 allocation = local_allocator_->Allocate(SHARED_SPACE, size, alignment);
1603 } else {
1604 allocation = local_allocator_->Allocate(target_space, size, alignment);
1605 }
1606 if (allocation.To(target_object)) {
1607 MigrateObject(*target_object, object, size, target_space);
1608 return true;
1609 }
1610 return false;
1611 }
1612
1616 map->instance_type());
1617 }
1618 return false;
1619 }
1620
1623 Tagged<HeapObject> dst, int size) {
1624 for (MigrationObserver* obs : observers_) {
1625 obs->Move(dest, src, dst, size);
1626 }
1627 }
1628
1630 int size, AllocationSpace dest) {
1631 migration_function_(this, dst, src, size, dest);
1632 }
1633
1637 std::vector<MigrationObserver*> observers_;
1640#if DEBUG
1641 Address abort_evacuation_at_address_{kNullAddress};
1642#endif // DEBUG
1643 std::optional<base::RandomNumberGenerator> rng_;
1644};
1645
1647 public:
1649 Heap* heap, EvacuationAllocator* local_allocator,
1650 RecordMigratedSlotVisitor* record_visitor,
1651 PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
1652 : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1653 promoted_size_(0),
1654 pretenuring_handler_(heap_->pretenuring_handler()),
1655 local_pretenuring_feedback_(local_pretenuring_feedback),
1656 is_incremental_marking_(heap->incremental_marking()->IsMarking()),
1657 shortcut_strings_(!heap_->IsGCWithStack() ||
1658 v8_flags.shortcut_strings_with_stack) {
1660 heap->incremental_marking()->IsMajorMarking());
1661 }
1662
1663 inline bool Visit(Tagged<HeapObject> object, int size) override {
1664 if (TryEvacuateWithoutCopy(object)) return true;
1665 Tagged<HeapObject> target_object;
1666
1667 PretenuringHandler::UpdateAllocationSite(heap_, object->map(), object, size,
1669
1670 if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1672 "MarkCompactCollector: young object promotion failed");
1673 }
1674
1676 return true;
1677 }
1678
1679 intptr_t promoted_size() { return promoted_size_; }
1680
1681 private:
1684
1685 if (!shortcut_strings_) return false;
1686
1687 Tagged<Map> map = object->map();
1688
1689 // Some objects can be evacuated without creating a copy.
1690 if (map->visitor_id() == kVisitThinString) {
1691 Tagged<HeapObject> actual = Cast<ThinString>(object)->unchecked_actual();
1692 if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1693 object->set_map_word_forwarded(actual, kRelaxedStore);
1694 return true;
1695 }
1696 // TODO(mlippautz): Handle ConsString.
1697
1698 return false;
1699 }
1700
1702 Tagged<HeapObject> old_object, int size,
1703 Tagged<HeapObject>* target_object) {
1704 AllocationAlignment alignment =
1705 HeapObject::RequiredAlignment(old_object->map());
1706 AllocationSpace space_allocated_in = NEW_SPACE;
1707 AllocationResult allocation =
1708 local_allocator_->Allocate(NEW_SPACE, size, alignment);
1709 if (allocation.IsFailure()) {
1710 allocation = AllocateInOldSpace(size, alignment);
1711 space_allocated_in = OLD_SPACE;
1712 }
1713 bool ok = allocation.To(target_object);
1714 DCHECK(ok);
1715 USE(ok);
1716 return space_allocated_in;
1717 }
1718
1719 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1720 AllocationAlignment alignment) {
1721 AllocationResult allocation =
1722 local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1723 if (allocation.IsFailure()) {
1725 "MarkCompactCollector: semi-space copy, fallback in old gen");
1726 }
1727 return allocation;
1728 }
1729
1735};
1736
1738 public:
1740 Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1741 PretenuringHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
1742 : heap_(heap),
1743 record_visitor_(record_visitor),
1744 moved_bytes_(0),
1745 pretenuring_handler_(heap_->pretenuring_handler()),
1746 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1747
1748 static void Move(PageMetadata* page) {
1749 page->heap()->new_space()->PromotePageToOldSpace(
1750 page, v8_flags.minor_ms ? FreeMode::kDoNotLinkCategory
1752 }
1753
1754 inline bool Visit(Tagged<HeapObject> object, int size) override {
1755 PretenuringHandler::UpdateAllocationSite(heap_, object->map(), object, size,
1758 record_visitor_->Visit(object->map(), object, size);
1759 return true;
1760 }
1761
1762 intptr_t moved_bytes() { return moved_bytes_; }
1763 void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1764
1765 private:
1771};
1772
1774 public:
1776 RecordMigratedSlotVisitor* record_visitor)
1777 : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1778
1779 inline bool Visit(Tagged<HeapObject> object, int size) override {
1780 Tagged<HeapObject> target_object;
1782 PageMetadata::FromHeapObject(object)->owner_identity(), object,
1783 size, &target_object)) {
1784 DCHECK(object->map_word(heap_->isolate(), kRelaxedLoad)
1785 .IsForwardingAddress());
1786 return true;
1787 }
1788 return false;
1789 }
1790};
1791
1793 public:
1795 : heap_(heap), cage_base_(heap->isolate()) {}
1796
1797 bool Visit(Tagged<HeapObject> object, int size) override {
1799 Tagged<Map> map = object->map(cage_base_);
1800 // Instead of calling object.IterateFast(cage_base(), &visitor) here
1801 // we can shortcut and use the precomputed size value passed to the visitor.
1802 DCHECK_EQ(object->SizeFromMap(map), size);
1804 visitor.Visit(map, object, size);
1805 return true;
1806 }
1807
1808 size_t live_object_size() const { return live_object_size_; }
1809
1810 private:
1814};
1815
1816// static
1818 Tagged<Object> o = *p;
1819 if (!IsHeapObject(o)) return false;
1820 Tagged<HeapObject> heap_object = Cast<HeapObject>(o);
1822 heap, heap->non_atomic_marking_state(), heap_object);
1823}
1824
1825// static
1827 FullObjectSlot p) {
1828 Tagged<Object> o = *p;
1829 if (!IsHeapObject(o)) return false;
1830 Tagged<HeapObject> heap_object = Cast<HeapObject>(o);
1831 Heap* shared_space_heap =
1832 client_heap->isolate()->shared_space_isolate()->heap();
1833 if (!HeapLayout::InWritableSharedSpace(heap_object)) return false;
1835 shared_space_heap, shared_space_heap->non_atomic_marking_state(),
1836 heap_object);
1837}
1838
1840 Isolate* const isolate = heap_->isolate();
1841
1842 // Mark the heap roots including global variables, stack variables,
1843 // etc., and all objects reachable from them.
1845 root_visitor,
1849
1850 // Custom marking for top optimized frame.
1851 CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1852 ProcessTopOptimizedFrame(&custom_root_body_visitor, isolate);
1853
1854 if (isolate->is_shared_space_isolate()) {
1855 ClientRootVisitor<> client_root_visitor(root_visitor);
1856 ClientObjectVisitor<> client_custom_root_body_visitor(
1857 &custom_root_body_visitor);
1858
1859 isolate->global_safepoint()->IterateClientIsolates(
1860 [this, &client_root_visitor,
1861 &client_custom_root_body_visitor](Isolate* client) {
1862 client->heap()->IterateRoots(
1863 &client_root_visitor,
1867 ProcessTopOptimizedFrame(&client_custom_root_body_visitor, client);
1868 });
1869 }
1870}
1871
1873 RootVisitor* root_visitor) {
1874 TRACE_GC(heap_->tracer(), GCTracer::Scope::CONSERVATIVE_STACK_SCANNING);
1876 Heap::IterateRootsMode::kMainIsolate);
1877
1878 Isolate* const isolate = heap_->isolate();
1879 if (isolate->is_shared_space_isolate()) {
1880 ClientRootVisitor<> client_root_visitor(root_visitor);
1881 // For client isolates, use the stack marker to conservatively scan the
1882 // stack.
1883 isolate->global_safepoint()->IterateClientIsolates(
1884 [v = &client_root_visitor](Isolate* client) {
1886 v, Heap::IterateRootsMode::kClientIsolate);
1887 });
1888 }
1889}
1890
1892 Isolate* const isolate = heap_->isolate();
1893 if (!isolate->is_shared_space_isolate()) return;
1894
1896 [collector = this](Isolate* client) {
1897 collector->MarkObjectsFromClientHeap(client);
1898 });
1899}
1900
1902 // There is no OLD_TO_SHARED remembered set for the young generation. We
1903 // therefore need to iterate each object and check whether it points into the
1904 // shared heap. As an optimization and to avoid a second heap iteration in the
1905 // "update pointers" phase, all pointers into the shared heap are recorded in
1906 // the OLD_TO_SHARED remembered set as well.
1907 SharedHeapObjectVisitor visitor(this);
1908
1909 PtrComprCageBase cage_base(client);
1910 Heap* client_heap = client->heap();
1911
1912 // Finish sweeping quarantined pages for Scavenger's new space in order to
1913 // iterate objects in it.
1915 // Finish sweeping for new space in order to iterate objects in it.
1916 client_heap->sweeper()->FinishMinorJobs();
1917 // Finish sweeping for old generation in order to iterate OLD_TO_SHARED.
1918 client_heap->sweeper()->FinishMajorJobs();
1919
1920 if (auto* new_space = client_heap->new_space()) {
1921 DCHECK(!client_heap->allocator()->new_space_allocator()->IsLabValid());
1922 for (PageMetadata* page : *new_space) {
1923 for (Tagged<HeapObject> obj : HeapObjectRange(page)) {
1924 visitor.Visit(obj);
1925 }
1926 }
1927 }
1928
1929 if (client_heap->new_lo_space()) {
1930 std::unique_ptr<ObjectIterator> iterator =
1931 client_heap->new_lo_space()->GetObjectIterator(client_heap);
1932 for (Tagged<HeapObject> obj = iterator->Next(); !obj.is_null();
1933 obj = iterator->Next()) {
1934 visitor.Visit(obj);
1935 }
1936 }
1937
1938 // In the old generation we can simply use the OLD_TO_SHARED remembered set to
1939 // find all incoming pointers into the shared heap.
1940 OldGenerationMemoryChunkIterator chunk_iterator(client_heap);
1941
1942 // Tracking OLD_TO_SHARED requires the write barrier.
1943 DCHECK(!v8_flags.disable_write_barriers);
1944
1945 for (MutablePageMetadata* chunk = chunk_iterator.next(); chunk;
1946 chunk = chunk_iterator.next()) {
1947 const auto slot_count = RememberedSet<OLD_TO_SHARED>::Iterate(
1948 chunk,
1949 [collector = this, cage_base](MaybeObjectSlot slot) {
1950 Tagged<MaybeObject> obj = slot.Relaxed_Load(cage_base);
1951 Tagged<HeapObject> heap_object;
1952
1953 if (obj.GetHeapObject(&heap_object) &&
1954 HeapLayout::InWritableSharedSpace(heap_object)) {
1955 // If the object points to the black allocated shared page, don't
1956 // mark the object, but still keep the slot.
1957 if (MarkingHelper::ShouldMarkObject(collector->heap(),
1958 heap_object)) {
1959 collector->MarkRootObject(
1960 Root::kClientHeap, heap_object,
1962 }
1963 return KEEP_SLOT;
1964 } else {
1965 return REMOVE_SLOT;
1966 }
1967 },
1969 if (slot_count == 0) {
1970 chunk->ReleaseSlotSet(OLD_TO_SHARED);
1971 }
1972
1973 const auto typed_slot_count = RememberedSet<OLD_TO_SHARED>::IterateTyped(
1974 chunk,
1975 [collector = this, client_heap](SlotType slot_type, Address slot) {
1976 Tagged<HeapObject> heap_object =
1977 UpdateTypedSlotHelper::GetTargetObject(client_heap, slot_type,
1978 slot);
1979 if (HeapLayout::InWritableSharedSpace(heap_object)) {
1980 // If the object points to the black allocated shared page, don't
1981 // mark the object, but still keep the slot.
1982 if (MarkingHelper::ShouldMarkObject(collector->heap(),
1983 heap_object)) {
1984 collector->MarkRootObject(
1985 Root::kClientHeap, heap_object,
1987 }
1988 return KEEP_SLOT;
1989 } else {
1990 return REMOVE_SLOT;
1991 }
1992 });
1993 if (typed_slot_count == 0) {
1994 chunk->ReleaseTypedSlotSet(OLD_TO_SHARED);
1995 }
1996
1997 const auto protected_slot_count =
1999 chunk,
2000 [collector = this](MaybeObjectSlot slot) {
2001 ProtectedPointerSlot protected_slot(slot.address());
2002 Tagged<MaybeObject> obj = protected_slot.Relaxed_Load();
2003 Tagged<HeapObject> heap_object;
2004
2005 if (obj.GetHeapObject(&heap_object) &&
2006 HeapLayout::InWritableSharedSpace(heap_object)) {
2007 // If the object points to the black allocated shared page,
2008 // don't mark the object, but still keep the slot.
2009 if (MarkingHelper::ShouldMarkObject(collector->heap(),
2010 heap_object)) {
2011 collector->MarkRootObject(
2012 Root::kClientHeap, heap_object,
2014 }
2015 return KEEP_SLOT;
2016 } else {
2017 return REMOVE_SLOT;
2018 }
2019 },
2021 if (protected_slot_count == 0) {
2022 chunk->ReleaseSlotSet(TRUSTED_TO_SHARED_TRUSTED);
2023 }
2024 }
2025
2026#ifdef V8_ENABLE_SANDBOX
2029 // All ExternalString resources are stored in the shared external pointer
2030 // table. Mark entries from client heaps.
2031 ExternalPointerTable& shared_table = client->shared_external_pointer_table();
2032 ExternalPointerTable::Space* shared_space =
2033 client->shared_external_pointer_space();
2034 MarkExternalPointerFromExternalStringTable external_string_visitor(
2035 &shared_table, shared_space);
2036 client_heap->external_string_table_.IterateAll(&external_string_visitor);
2037#endif // V8_ENABLE_SANDBOX
2038}
2039
2041 int iterations = 0;
2042 int max_iterations = v8_flags.ephemeron_fixpoint_iterations;
2043
2044 bool another_ephemeron_iteration_main_thread;
2045
2046 do {
2047 if (iterations >= max_iterations) {
2048 // Give up fixpoint iteration and switch to linear algorithm.
2049 return false;
2050 }
2051
2053
2054 // Move ephemerons from next_ephemerons into current_ephemerons to
2055 // drain them in this iteration.
2056 DCHECK(
2057 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2058 weak_objects_.current_ephemerons.Merge(weak_objects_.next_ephemerons);
2060
2061 {
2063 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2064 another_ephemeron_iteration_main_thread = ProcessEphemerons();
2065 }
2066
2067 // Can only check for local emptiness here as parallel marking tasks may
2068 // still be running. The caller performs the CHECKs for global emptiness.
2069 CHECK(local_weak_objects()->current_ephemerons_local.IsLocalEmpty());
2070 CHECK(local_weak_objects()->next_ephemerons_local.IsLocalEmpty());
2071
2072 ++iterations;
2073 } while (another_ephemeron_iteration_main_thread ||
2075 !local_marking_worklists_->IsEmpty() ||
2077
2078 return true;
2079}
2080
2082 Ephemeron ephemeron;
2083 bool another_ephemeron_iteration = false;
2084
2085 // Drain current_ephemerons and push ephemerons where key and value are still
2086 // unreachable into next_ephemerons.
2087 while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2088 if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
2089 another_ephemeron_iteration = true;
2090 }
2091 }
2092
2093 // Drain marking worklist and push discovered ephemerons into
2094 // next_ephemerons.
2095 size_t objects_processed;
2096 std::tie(std::ignore, objects_processed) =
2098
2099 // As soon as a single object was processed and potentially marked another
2100 // object we need another iteration. Otherwise we might miss to apply
2101 // ephemeron semantics on it.
2102 if (objects_processed > 0) another_ephemeron_iteration = true;
2103
2104 // Flush local ephemerons for main task to global pool.
2105 local_weak_objects()->ephemeron_hash_tables_local.Publish();
2106 local_weak_objects()->next_ephemerons_local.Publish();
2107
2108 return another_ephemeron_iteration;
2109}
2110
2113 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
2114 // This phase doesn't support parallel marking.
2116 DCHECK(key_to_values_.empty());
2117 DCHECK(
2118 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2119
2120 // Update visitor to directly add new ephemerons to key_to_values_.
2121 marking_visitor_->SetKeyToValues(&key_to_values_);
2122
2123 Ephemeron ephemeron;
2124 while (local_weak_objects()->next_ephemerons_local.Pop(&ephemeron)) {
2125 if (ApplyEphemeronSemantics(ephemeron.key, ephemeron.value) ==
2127 auto it = key_to_values_.try_emplace(ephemeron.key).first;
2128 it->second.push_back(ephemeron.value);
2129 }
2130 }
2131
2132 bool work_to_do;
2133
2134 do {
2136
2137 {
2139 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2140 // Drain marking worklist but:
2141 // (1) push all new ephemerons directly into key_to_values_.
2142 // (2) look up all traced objects in key_to_values_.
2145 v8::base::TimeDelta::Max(), SIZE_MAX);
2146 }
2147
2148 // Do NOT drain marking worklist here, otherwise the current checks
2149 // for work_to_do are not sufficient for determining if another iteration
2150 // is necessary.
2151
2152 work_to_do =
2153 !local_marking_worklists_->IsEmpty() ||
2155 CHECK(local_weak_objects()->next_ephemerons_local.IsLocalAndGlobalEmpty());
2156 } while (work_to_do);
2157
2158 CHECK(local_marking_worklists_->IsEmpty());
2159
2160 CHECK(weak_objects_.current_ephemerons.IsEmpty());
2161 CHECK(weak_objects_.next_ephemerons.IsEmpty());
2162
2163 // Flush local ephemerons for main task to global pool.
2164 local_weak_objects()->ephemeron_hash_tables_local.Publish();
2165 local_weak_objects()->next_ephemerons_local.Publish();
2166}
2167
2169 auto* cpp_heap = CppHeap::From(heap_->cpp_heap_);
2170 if (!cpp_heap) return;
2171
2172 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
2173 cpp_heap->AdvanceMarking(v8::base::TimeDelta::Max(), SIZE_MAX);
2174}
2175
2176namespace {
2177
2178constexpr size_t kDeadlineCheckInterval = 128u;
2179
2180} // namespace
2181
2182template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
2184 v8::base::TimeDelta max_duration, size_t max_bytes_to_process) {
2185 Tagged<HeapObject> object;
2186 size_t bytes_processed = 0;
2187 size_t objects_processed = 0;
2188 bool is_per_context_mode = local_marking_worklists_->IsPerContextMode();
2189 Isolate* const isolate = heap_->isolate();
2190 const auto start = v8::base::TimeTicks::Now();
2191 PtrComprCageBase cage_base(isolate);
2192
2196 }
2197
2198 while (local_marking_worklists_->Pop(&object) ||
2199 local_marking_worklists_->PopOnHold(&object)) {
2200 // The marking worklist should never contain filler objects.
2201 CHECK(!IsFreeSpaceOrFiller(object, cage_base));
2202 DCHECK(IsHeapObject(object));
2205 DCHECK(heap_->Contains(object));
2206 DCHECK(!(marking_state_->IsUnmarked(object)));
2207
2208 if constexpr (mode ==
2210 auto it = key_to_values_.find(object);
2211 if (it != key_to_values_.end()) {
2212 for (Tagged<HeapObject> value : it->second) {
2213 const auto target_worklist =
2215 if (target_worklist) {
2216 MarkObject(object, value, target_worklist.value());
2217 }
2218 }
2219 key_to_values_.erase(it);
2220 }
2221 }
2222
2223 Tagged<Map> map = object->map(cage_base);
2224 if (is_per_context_mode) {
2226 if (native_context_inferrer_.Infer(cage_base, map, object, &context)) {
2227 local_marking_worklists_->SwitchToContext(context);
2228 }
2229 }
2230 const auto visited_size = marking_visitor_->Visit(map, object);
2231 if (visited_size) {
2233 ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
2234 }
2235 if (is_per_context_mode) {
2237 map, object, visited_size);
2238 }
2239 bytes_processed += visited_size;
2240 objects_processed++;
2241 static_assert(base::bits::IsPowerOfTwo(kDeadlineCheckInterval),
2242 "kDeadlineCheckInterval must be power of 2");
2243 // The below check is an optimized version of
2244 // `(objects_processed % kDeadlineCheckInterval) == 0`
2245 if ((objects_processed & (kDeadlineCheckInterval -1)) == 0 &&
2246 ((v8::base::TimeTicks::Now() - start) > max_duration)) {
2247 break;
2248 }
2249 if (bytes_processed >= max_bytes_to_process) {
2250 break;
2251 }
2252 }
2253 return std::make_pair(bytes_processed, objects_processed);
2254}
2255
2257 Tagged<HeapObject> value) {
2259
2261 local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
2262 return true;
2263 }
2264
2266}
2267
2270 Tagged<HeapObject> value) {
2271 // Objects in the shared heap are prohibited from being used as keys in
2272 // WeakMaps and WeakSets and therefore cannot be ephemeron keys, because that
2273 // would enable thread local -> shared heap edges.
2275 // Usually values that should not be marked are not added to the ephemeron
2276 // worklist. However, minor collection during incremental marking may promote
2277 // strings from the younger generation into the shared heap. This
2278 // ShouldMarkObject call catches those cases.
2279 const auto target_worklist = MarkingHelper::ShouldMarkObject(heap_, value);
2280 if (!target_worklist) {
2281 // The value doesn't need to be marked in this GC, so no need to track
2282 // ephemeron further.
2284 }
2285
2288 marking_state_, target_worklist.value(),
2289 value)) {
2291 } else {
2293 }
2294 } else {
2295 if (marking_state_->IsMarked(value)) {
2297 } else {
2299 }
2300 }
2301}
2302
2304#ifdef VERIFY_HEAP
2305 if (v8_flags.verify_heap) {
2306 Ephemeron ephemeron;
2307
2308 // In the fixpoint iteration all unresolved ephemerons are in
2309 // `next_ephemerons_`.
2310 CHECK(
2311 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2312 weak_objects_.current_ephemerons.Merge(weak_objects_.next_ephemerons);
2313 while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2314 CHECK_NE(ApplyEphemeronSemantics(ephemeron.key, ephemeron.value),
2316 }
2317
2318 // In the linear-time algorithm ephemerons are kept in `key_to_values_`.
2319 for (auto& [key, values] : key_to_values_) {
2320 for (auto value : values) {
2323 }
2324 }
2325 }
2326#endif // VERIFY_HEAP
2327}
2328
2330 // Incremental marking might leave ephemerons in main task's local
2331 // buffer, flush it into global pool.
2332 local_weak_objects()->next_ephemerons_local.Publish();
2333
2335 // Fixpoint iteration needed too many iterations and was cancelled. Use the
2336 // guaranteed linear algorithm. But only in the final single-thread marking
2337 // phase.
2339 }
2340}
2341
2343 Isolate* isolate) {
2344 for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
2345 it.Advance()) {
2346 if (it.frame()->is_unoptimized_js()) return;
2347 if (it.frame()->is_optimized_js()) {
2348 Tagged<GcSafeCode> lookup_result = it.frame()->GcSafeLookupCode();
2349 if (!lookup_result->has_instruction_stream()) return;
2350 if (!lookup_result->CanDeoptAt(isolate,
2351 it.frame()->maybe_unauthenticated_pc())) {
2353 lookup_result->raw_instruction_stream());
2354 PtrComprCageBase cage_base(isolate);
2355 InstructionStream::BodyDescriptor::IterateBody(istream->map(cage_base),
2356 istream, visitor);
2357 }
2358 return;
2359 }
2360 }
2361}
2362
2365 // Cannot run during bootstrapping due to incomplete objects.
2366 if (heap_->isolate()->bootstrapper()->IsActive()) return;
2367 TRACE_EVENT0(TRACE_GC_CATEGORIES, "V8.GC_OBJECT_DUMP_STATISTICS");
2370 heap_->dead_object_stats_.get());
2371 collector.Collect();
2372 if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
2374 std::stringstream live, dead;
2375 heap_->live_object_stats_->Dump(live);
2376 heap_->dead_object_stats_->Dump(dead);
2378 "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
2379 "live", TRACE_STR_COPY(live.str().c_str()), "dead",
2380 TRACE_STR_COPY(dead.str().c_str()));
2381 }
2382 if (v8_flags.trace_gc_object_stats) {
2383 heap_->live_object_stats_->PrintJSON("live");
2384 heap_->dead_object_stats_->PrintJSON("dead");
2385 }
2386 heap_->live_object_stats_->CheckpointObjectStats();
2387 heap_->dead_object_stats_->ClearObjectStats();
2388}
2389
2390namespace {
2391
2392bool ShouldRetainMap(Heap* heap, MarkingState* marking_state, Tagged<Map> map,
2393 int age) {
2394 if (age == 0) {
2395 // The map has aged. Do not retain this map.
2396 return false;
2397 }
2398 Tagged<Object> constructor = map->GetConstructor();
2399 if (!IsHeapObject(constructor) ||
2401 heap, marking_state, Cast<HeapObject>(constructor))) {
2402 // The constructor is dead, no new objects with this map can
2403 // be created. Do not retain this map.
2404 return false;
2405 }
2406 return true;
2407}
2408
2409} // namespace
2410
2412 // Retaining maps increases the chances of reusing map transitions at some
2413 // memory cost, hence disable it when trying to reduce memory footprint more
2414 // aggressively.
2415 const bool should_retain_maps =
2416 !heap_->ShouldReduceMemory() && v8_flags.retain_maps_for_n_gc != 0;
2417
2418 for (Tagged<WeakArrayList> retained_maps : heap_->FindAllRetainedMaps()) {
2419 DCHECK_EQ(0, retained_maps->length() % 2);
2420 for (int i = 0; i < retained_maps->length(); i += 2) {
2421 Tagged<MaybeObject> value = retained_maps->Get(i);
2422 Tagged<HeapObject> map_heap_object;
2423 if (!value.GetHeapObjectIfWeak(&map_heap_object)) {
2424 continue;
2425 }
2426 int age = retained_maps->Get(i + 1).ToSmi().value();
2427 int new_age;
2428 Tagged<Map> map = Cast<Map>(map_heap_object);
2429 if (should_retain_maps && MarkingHelper::IsUnmarkedAndNotAlwaysLive(
2430 heap_, marking_state_, map)) {
2431 if (ShouldRetainMap(heap_, marking_state_, map, age)) {
2436 }
2437 }
2438 Tagged<Object> prototype = map->prototype();
2439 if (age > 0 && IsHeapObject(prototype) &&
2441 heap_, marking_state_, Cast<HeapObject>(prototype))) {
2442 // The prototype is not marked, age the map.
2443 new_age = age - 1;
2444 } else {
2445 // The prototype and the constructor are marked, this map keeps only
2446 // transition tree alive, not JSObjects. Do not age the map.
2447 new_age = age;
2448 }
2449 } else {
2450 new_age = v8_flags.retain_maps_for_n_gc;
2451 }
2452 // Compact the array and update the age.
2453 if (new_age != age) {
2454 retained_maps->Set(i + 1, Smi::FromInt(new_age));
2455 }
2456 }
2457 }
2458}
2459
2461 TRACE_GC_ARG1(heap_->tracer(), GCTracer::Scope::MC_MARK,
2462 "UseBackgroundThreads", UseBackgroundThreadsInCycle());
2463
2464 const bool was_marked_incrementally =
2466 if (was_marked_incrementally) {
2467 auto* incremental_marking = heap_->incremental_marking();
2469 heap_->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL,
2470 incremental_marking->current_trace_id(), TRACE_EVENT_FLAG_FLOW_IN);
2471 DCHECK(incremental_marking->IsMajorMarking());
2472 incremental_marking->Stop();
2474 }
2475
2476#ifdef DEBUG
2477 DCHECK(state_ == PREPARE_GC);
2478 state_ = MARK_LIVE_OBJECTS;
2479#endif
2480
2481 if (heap_->cpp_heap_) {
2484 }
2485
2486 RootMarkingVisitor root_visitor(this);
2487
2488 {
2489 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2490 MarkRoots(&root_visitor);
2491 }
2492
2493 {
2494 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
2496 }
2497
2498 {
2499 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_RETAIN_MAPS);
2500 RetainMaps();
2501 }
2502
2503 if (v8_flags.parallel_marking && UseBackgroundThreadsInCycle()) {
2504 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL);
2505 parallel_marking_ = true;
2509 {
2511 GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL_JOIN);
2513 }
2514 parallel_marking_ = false;
2515 }
2516
2517 {
2518 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2519 MarkRootsFromConservativeStack(&root_visitor);
2520 }
2521
2522 {
2523 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_MARK_FULL_CLOSURE_SERIAL);
2524 // Complete the transitive closure single-threaded to avoid races with
2525 // multiple threads when processing weak maps and embedder heaps.
2527 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
2528 // Lock the process-global mutex here and mark cross-thread roots again.
2529 // This is done as late as possible to keep locking durations short.
2530 cpp_heap->EnterProcessGlobalAtomicPause();
2531 }
2533 CHECK(local_marking_worklists_->IsEmpty());
2534 CHECK(
2535 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2538 }
2539
2540 if (was_marked_incrementally) {
2541 // Disable the marking barrier after concurrent/parallel marking has
2542 // finished as it will reset page flags that share the same bitmap as
2543 // the evacuation candidate bit.
2546 }
2547
2548 epoch_++;
2549}
2550
2551namespace {
2552
2553class ParallelClearingJob final : public v8::JobTask {
2554 public:
2555 class ClearingItem {
2556 public:
2557 virtual ~ClearingItem() = default;
2558 virtual void Run(JobDelegate* delegate) = 0;
2559 };
2560
2561 explicit ParallelClearingJob(MarkCompactCollector* collector)
2562 : collector_(collector) {}
2563 ~ParallelClearingJob() override = default;
2564 ParallelClearingJob(const ParallelClearingJob&) = delete;
2565 ParallelClearingJob& operator=(const ParallelClearingJob&) = delete;
2566
2567 // v8::JobTask overrides.
2568 void Run(JobDelegate* delegate) override {
2569 std::unique_ptr<ClearingItem> item;
2570 {
2571 base::MutexGuard guard(&items_mutex_);
2572 item = std::move(items_.back());
2573 items_.pop_back();
2574 }
2575 item->Run(delegate);
2576 }
2577
2578 size_t GetMaxConcurrency(size_t worker_count) const override {
2579 base::MutexGuard guard(&items_mutex_);
2580 if (!v8_flags.parallel_weak_ref_clearing ||
2581 !collector_->UseBackgroundThreadsInCycle()) {
2582 return std::min<size_t>(items_.size(), 1);
2583 }
2584 return items_.size();
2585 }
2586
2587 void Add(std::unique_ptr<ClearingItem> item) {
2588 items_.push_back(std::move(item));
2589 }
2590
2591 private:
2592 MarkCompactCollector* collector_;
2593 mutable base::Mutex items_mutex_;
2594 std::vector<std::unique_ptr<ClearingItem>> items_;
2595};
2596
2597class ClearStringTableJobItem final : public ParallelClearingJob::ClearingItem {
2598 public:
2599 explicit ClearStringTableJobItem(Isolate* isolate)
2600 : isolate_(isolate),
2601 trace_id_(reinterpret_cast<uint64_t>(this) ^
2602 isolate->heap()->tracer()->CurrentEpoch(
2603 GCTracer::Scope::MC_CLEAR_STRING_TABLE)) {}
2604
2605 void Run(JobDelegate* delegate) final {
2606 // Set the current isolate such that trusted pointer tables etc are
2607 // available and the cage base is set correctly for multi-cage mode.
2608 SetCurrentIsolateScope isolate_scope(isolate_);
2609
2610 if (isolate_->OwnsStringTables()) {
2611 TRACE_GC1_WITH_FLOW(isolate_->heap()->tracer(),
2612 GCTracer::Scope::MC_CLEAR_STRING_TABLE,
2613 delegate->IsJoiningThread() ? ThreadKind::kMain
2615 trace_id_, TRACE_EVENT_FLAG_FLOW_IN);
2616 // Prune the string table removing all strings only pointed to by the
2617 // string table. Cannot use string_table() here because the string
2618 // table is marked.
2619 StringTable* string_table = isolate_->string_table();
2620 InternalizedStringTableCleaner internalized_visitor(isolate_->heap());
2621 string_table->DropOldData();
2622 string_table->IterateElements(&internalized_visitor);
2623 string_table->NotifyElementsRemoved(
2624 internalized_visitor.PointersRemoved());
2625 }
2626 }
2627
2628 uint64_t trace_id() const { return trace_id_; }
2629
2630 private:
2631 Isolate* const isolate_;
2632 const uint64_t trace_id_;
2633};
2634
2635} // namespace
2636
2639 public:
2644
2645 // Transition all strings in the forwarding table to
2646 // ThinStrings/ExternalStrings and clear the table afterwards.
2649 v8_flags.transition_strings_during_gc_with_stack);
2650 StringForwardingTable* forwarding_table =
2652 forwarding_table->IterateElements(
2655 });
2656 forwarding_table->Reset();
2657 }
2658
2659 // When performing GC with a stack, we conservatively assume that
2660 // the GC could have been triggered by optimized code. Optimized code
2661 // assumes that flat strings don't transition during GCs, so we are not
2662 // allowed to transition strings to ThinString/ExternalString in that
2663 // case.
2664 // Instead we mark forward objects to keep them alive and update entries
2665 // of evacuated objects later.
2668 !v8_flags.transition_strings_during_gc_with_stack);
2669 StringForwardingTable* forwarding_table =
2671 forwarding_table->IterateElements(
2674 });
2675 }
2676
2677 private:
2679 Tagged<Object> original = record->OriginalStringObject(isolate_);
2680 if (!IsHeapObject(original)) {
2682 return;
2683 }
2684 Tagged<String> original_string = Cast<String>(original);
2686 original_string)) {
2687 Tagged<Object> forward = record->ForwardStringObjectOrHash(isolate_);
2688 if (!IsHeapObject(forward) ||
2691 return;
2692 }
2694 } else {
2696 record->set_original_string(StringForwardingTable::deleted_element());
2697 }
2698 }
2699
2701 Tagged<Object> original = record->OriginalStringObject(isolate_);
2702 if (!IsHeapObject(original)) {
2704 return;
2705 }
2707 Cast<HeapObject>(original))) {
2708 Tagged<String> original_string = Cast<String>(original);
2709 if (IsThinString(original_string)) {
2710 original_string = Cast<ThinString>(original_string)->actual();
2711 }
2712 TryExternalize(original_string, record);
2713 TryInternalize(original_string, record);
2714 original_string->set_raw_hash_field(record->raw_hash(isolate_));
2715 } else {
2717 }
2718 }
2719
2720 void TryExternalize(Tagged<String> original_string,
2722 // If the string is already external, dispose the resource.
2723 if (IsExternalString(original_string)) {
2724 record->DisposeUnusedExternalResource(isolate_, original_string);
2725 return;
2726 }
2727
2728 bool is_one_byte;
2729 v8::String::ExternalStringResourceBase* external_resource =
2730 record->external_resource(&is_one_byte);
2731 if (external_resource == nullptr) return;
2732
2733 if (is_one_byte) {
2734 original_string->MakeExternalDuringGC(
2735 isolate_,
2737 external_resource));
2738 } else {
2739 original_string->MakeExternalDuringGC(
2740 isolate_, reinterpret_cast<v8::String::ExternalStringResource*>(
2741 external_resource));
2742 }
2743 }
2744
2745 void TryInternalize(Tagged<String> original_string,
2747 if (IsInternalizedString(original_string)) return;
2748 Tagged<Object> forward = record->ForwardStringObjectOrHash(isolate_);
2749 if (!IsHeapObject(forward)) {
2750 return;
2751 }
2752 Tagged<String> forward_string = Cast<String>(forward);
2753
2754 // Mark the forwarded string to keep it alive.
2755 if (MarkingHelper::GetLivenessMode(heap_, forward_string) !=
2758 }
2759 // Transition the original string to a ThinString and override the
2760 // forwarding index with the correct hash.
2761 original_string->MakeThin(isolate_, forward_string);
2762 // Record the slot in the old-to-old remembered set. This is
2763 // required as the internalized string could be relocated during
2764 // compaction.
2765 ObjectSlot slot(&Cast<ThinString>(original_string)->actual_);
2766 MarkCompactCollector::RecordSlot(original_string, slot, forward_string);
2767 }
2768
2769 Heap* const heap_;
2770};
2771
2772namespace {
2773
2774class SharedStructTypeRegistryCleaner final : public RootVisitor {
2775 public:
2776 explicit SharedStructTypeRegistryCleaner(Heap* heap) : heap_(heap) {}
2777
2778 void VisitRootPointers(Root root, const char* description,
2780 UNREACHABLE();
2781 }
2782
2783 void VisitRootPointers(Root root, const char* description,
2785 OffHeapObjectSlot end) override {
2786 DCHECK_EQ(root, Root::kSharedStructTypeRegistry);
2787 // The SharedStructTypeRegistry holds the canonical SharedStructType
2788 // instance maps weakly. Visit all Map pointers in [start, end), deleting
2789 // it if unmarked.
2790 auto* marking_state = heap_->marking_state();
2791 Isolate* const isolate = heap_->isolate();
2792 for (OffHeapObjectSlot p = start; p < end; p++) {
2793 Tagged<Object> o = p.load(isolate);
2794 DCHECK(!IsString(o));
2795 if (IsMap(o)) {
2798 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, marking_state, map))
2799 continue;
2802 }
2803 }
2804 }
2805
2806 int ElementsRemoved() const { return elements_removed_; }
2807
2808 private:
2809 Heap* heap_;
2811};
2812
2813class ClearSharedStructTypeRegistryJobItem final
2814 : public ParallelClearingJob::ClearingItem {
2815 public:
2816 explicit ClearSharedStructTypeRegistryJobItem(Isolate* isolate)
2817 : isolate_(isolate) {
2818 DCHECK(isolate->is_shared_space_isolate());
2819 DCHECK_NOT_NULL(isolate->shared_struct_type_registry());
2820 }
2821
2822 void Run(JobDelegate* delegate) final {
2823 // Set the current isolate such that trusted pointer tables etc are
2824 // available and the cage base is set correctly for multi-cage mode.
2825 SetCurrentIsolateScope isolate_scope(isolate_);
2826
2827 auto* registry = isolate_->shared_struct_type_registry();
2828 SharedStructTypeRegistryCleaner cleaner(isolate_->heap());
2829 registry->IterateElements(isolate_, &cleaner);
2830 registry->NotifyElementsRemoved(cleaner.ElementsRemoved());
2831 }
2832
2833 private:
2834 Isolate* const isolate_;
2835};
2836
2837} // namespace
2838
2840 : public ParallelClearingJob::ClearingItem {
2841 public:
2843 : collector_(collector),
2844 trace_id_(reinterpret_cast<uint64_t>(this) ^
2845 collector->heap()->tracer()->CurrentEpoch(
2846 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_TRIVIAL)) {}
2847
2848 void Run(JobDelegate* delegate) final {
2849 Heap* heap = collector_->heap();
2850
2851 // Set the current isolate such that trusted pointer tables etc are
2852 // available and the cage base is set correctly for multi-cage mode.
2853 SetCurrentIsolateScope isolate_scope(heap->isolate());
2854
2855 TRACE_GC1_WITH_FLOW(heap->tracer(),
2856 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_TRIVIAL,
2857 delegate->IsJoiningThread() ? ThreadKind::kMain
2862 }
2863
2864 uint64_t trace_id() const { return trace_id_; }
2865
2866 private:
2868 const uint64_t trace_id_;
2869};
2870
2872 : public ParallelClearingJob::ClearingItem {
2873 public:
2875 : collector_(collector),
2876 trace_id_(
2877 reinterpret_cast<uint64_t>(this) ^
2878 collector->heap()->tracer()->CurrentEpoch(
2879 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_FILTER_NON_TRIVIAL)) {
2880 }
2881
2882 void Run(JobDelegate* delegate) final {
2883 Heap* heap = collector_->heap();
2884
2885 // Set the current isolate such that trusted pointer tables etc are
2886 // available and the cage base is set correctly for multi-cage mode.
2887 SetCurrentIsolateScope isolate_scope(heap->isolate());
2888
2890 heap->tracer(),
2891 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_FILTER_NON_TRIVIAL,
2892 delegate->IsJoiningThread() ? ThreadKind::kMain
2896 }
2897
2898 uint64_t trace_id() const { return trace_id_; }
2899
2900 private:
2902 const uint64_t trace_id_;
2903};
2904
2906 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR);
2907
2908 Isolate* const isolate = heap_->isolate();
2909 if (isolate->OwnsStringTables()) {
2911 GCTracer::Scope::MC_CLEAR_STRING_FORWARDING_TABLE);
2912 // Clear string forwarding table. Live strings are transitioned to
2913 // ThinStrings/ExternalStrings in the cleanup process, if this is a GC
2914 // without stack.
2915 // Clearing the string forwarding table must happen before clearing the
2916 // string table, as entries in the forwarding table can keep internalized
2917 // strings alive.
2918 FullStringForwardingTableCleaner forwarding_table_cleaner(heap_);
2919 if (!heap_->IsGCWithStack() ||
2920 v8_flags.transition_strings_during_gc_with_stack) {
2921 forwarding_table_cleaner.TransitionStrings();
2922 } else {
2923 forwarding_table_cleaner.ProcessFullWithStack();
2924 }
2925 }
2926
2927 {
2928 // Clear Isolate::topmost_script_having_context slot if it's not alive.
2929 Tagged<Object> maybe_caller_context =
2930 isolate->topmost_script_having_context();
2931 if (maybe_caller_context.IsHeapObject() &&
2933 heap_, marking_state_, Cast<HeapObject>(maybe_caller_context))) {
2934 isolate->clear_topmost_script_having_context();
2935 }
2936 }
2937
2938 std::unique_ptr<JobHandle> clear_string_table_job_handle;
2939 {
2940 auto job = std::make_unique<ParallelClearingJob>(this);
2941 auto job_item = std::make_unique<ClearStringTableJobItem>(isolate);
2942 const uint64_t trace_id = job_item->trace_id();
2943 job->Add(std::move(job_item));
2944 TRACE_GC_NOTE_WITH_FLOW("ClearStringTableJob started", trace_id,
2946 if (isolate->is_shared_space_isolate() &&
2947 isolate->shared_struct_type_registry()) {
2948 auto registry_job_item =
2949 std::make_unique<ClearSharedStructTypeRegistryJobItem>(isolate);
2950 job->Add(std::move(registry_job_item));
2951 }
2952 clear_string_table_job_handle = V8::GetCurrentPlatform()->CreateJob(
2953 TaskPriority::kUserBlocking, std::move(job));
2954 }
2955 if (v8_flags.parallel_weak_ref_clearing && UseBackgroundThreadsInCycle()) {
2956 clear_string_table_job_handle->NotifyConcurrencyIncrease();
2957 }
2958
2959 {
2960 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_EXTERNAL_STRING_TABLE);
2962 external_visitor(heap_);
2963 heap_->external_string_table_.IterateAll(&external_visitor);
2964 heap_->external_string_table_.CleanUpAll();
2965 }
2966
2967 {
2968 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_GLOBAL_HANDLES);
2969 // We depend on `IterateWeakRootsForPhantomHandles()` being called before
2970 // `ProcessOldCodeCandidates()` in order to identify flushed bytecode in the
2971 // CPU profiler.
2972 isolate->global_handles()->IterateWeakRootsForPhantomHandles(
2974 isolate->traced_handles()->ResetDeadNodes(&IsUnmarkedHeapObject);
2975
2976 if (isolate->is_shared_space_isolate()) {
2977 isolate->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2980 // No need to reset traced handles since they are always strong.
2981 });
2982 }
2983 }
2984
2985 {
2986 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
2987 // `ProcessFlushedBaselineCandidates()` must be called after
2988 // `ProcessOldCodeCandidates()` so that we correctly set the code object on
2989 // the JSFunction after flushing.
2991#ifndef V8_ENABLE_LEAPTIERING
2992 // With leaptiering this is done during sweeping.
2994#endif // !V8_ENABLE_LEAPTIERING
2995 }
2996
2997#ifdef V8_ENABLE_LEAPTIERING
2998 {
2999 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_SWEEP_JS_DISPATCH_TABLE);
3000 JSDispatchTable* jdt = IsolateGroup::current()->js_dispatch_table();
3001 Tagged<Code> compile_lazy =
3002 heap_->isolate()->builtins()->code(Builtin::kCompileLazy);
3003 jdt->Sweep(heap_->js_dispatch_table_space(), isolate->counters(),
3004 [&](JSDispatchEntry& entry) {
3005 Tagged<Code> code = entry.GetCode();
3006 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3007 heap_, marking_state_, code)) {
3008 // Baseline flushing: if the Code object is no longer alive,
3009 // it must have been flushed and so we replace it with the
3010 // CompileLazy builtin. Once we use leaptiering on all
3011 // platforms, we can probably simplify the other code related
3012 // to baseline flushing.
3013
3014 // Currently, we can also see optimized code here. This
3015 // happens when a FeedbackCell for which no JSFunctions
3016 // remain references optimized code. However, in that case we
3017 // probably do want to delete the optimized code, so that is
3018 // working as intended. It does mean, however, that we cannot
3019 // DCHECK here that we only see baseline code.
3020 DCHECK(code->kind() == CodeKind::FOR_TESTING ||
3021 code->kind() == CodeKind::BASELINE ||
3022 code->kind() == CodeKind::MAGLEV ||
3023 code->kind() == CodeKind::TURBOFAN_JS);
3024 entry.SetCodeAndEntrypointPointer(
3025 compile_lazy.ptr(), compile_lazy->instruction_start());
3026 }
3027 });
3028 }
3029#endif // V8_ENABLE_LEAPTIERING
3030
3031 // TODO(olivf, 42204201): If we make the bytecode accessible from the dispatch
3032 // table this could also be implemented during JSDispatchTable::Sweep.
3033 {
3034 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
3035 ClearFlushedJsFunctions();
3036 }
3037
3038 {
3039 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
3040 // Process the weak references.
3041 MarkCompactWeakObjectRetainer mark_compact_object_retainer(heap_,
3043 heap_->ProcessAllWeakReferences(&mark_compact_object_retainer);
3044 }
3045
3046 {
3047 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
3048 // ClearFullMapTransitions must be called before weak references are
3049 // cleared.
3050 ClearFullMapTransitions();
3051 // Weaken recorded strong DescriptorArray objects. This phase can
3052 // potentially move everywhere after `ClearFullMapTransitions()`.
3053 WeakenStrongDescriptorArrays();
3054 }
3055
3056 // Start two parallel jobs: one for clearing trivial weak references and one
3057 // for filtering out non-trivial weak references that will not be cleared.
3058 // Both jobs read the values of weak references and the corresponding
3059 // mark bits. They cannot start before the following methods have finished,
3060 // because these may change the values of weak references and/or mark more
3061 // objects, thus creating data races:
3062 // - ProcessOldCodeCandidates
3063 // - ProcessAllWeakReferences
3064 // - ClearFullMapTransitions
3065 // - WeakenStrongDescriptorArrays
3066 // The two jobs could be merged but it's convenient to keep them separate,
3067 // as they are joined at different times. The filtering job must be joined
3068 // before proceeding to the actual clearing of non-trivial weak references,
3069 // whereas the job for clearing trivial weak references can be joined at the
3070 // end of this method.
3071 std::unique_ptr<JobHandle> clear_trivial_weakrefs_job_handle;
3072 {
3073 auto job = std::make_unique<ParallelClearingJob>(this);
3074 auto job_item = std::make_unique<ClearTrivialWeakRefJobItem>(this);
3075 const uint64_t trace_id = job_item->trace_id();
3076 job->Add(std::move(job_item));
3077 TRACE_GC_NOTE_WITH_FLOW("ClearTrivialWeakRefJob started", trace_id,
3079 clear_trivial_weakrefs_job_handle = V8::GetCurrentPlatform()->CreateJob(
3080 TaskPriority::kUserBlocking, std::move(job));
3081 }
3082 std::unique_ptr<JobHandle> filter_non_trivial_weakrefs_job_handle;
3083 {
3084 auto job = std::make_unique<ParallelClearingJob>(this);
3085 auto job_item = std::make_unique<FilterNonTrivialWeakRefJobItem>(this);
3086 const uint64_t trace_id = job_item->trace_id();
3087 job->Add(std::move(job_item));
3088 TRACE_GC_NOTE_WITH_FLOW("FilterNonTrivialWeakRefJob started", trace_id,
3090 filter_non_trivial_weakrefs_job_handle =
3092 std::move(job));
3093 }
3094 if (v8_flags.parallel_weak_ref_clearing && UseBackgroundThreadsInCycle()) {
3095 clear_trivial_weakrefs_job_handle->NotifyConcurrencyIncrease();
3096 filter_non_trivial_weakrefs_job_handle->NotifyConcurrencyIncrease();
3097 }
3098
3099#ifdef V8_COMPRESS_POINTERS
3100 {
3101 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
3102 // External pointer table sweeping needs to happen before evacuating live
3103 // objects as it may perform table compaction, which requires objects to
3104 // still be at the same location as during marking.
3105 //
3106 // Note we explicitly do NOT run SweepAndCompact on
3107 // read_only_external_pointer_space since these entries are all immortal by
3108 // definition.
3109 isolate->external_pointer_table().EvacuateAndSweepAndCompact(
3110 isolate->heap()->old_external_pointer_space(),
3111 isolate->heap()->young_external_pointer_space(), isolate->counters());
3112 isolate->heap()->young_external_pointer_space()->AssertEmpty();
3113 if (isolate->owns_shareable_data()) {
3114 isolate->shared_external_pointer_table().SweepAndCompact(
3115 isolate->shared_external_pointer_space(), isolate->counters());
3116 }
3117 isolate->cpp_heap_pointer_table().SweepAndCompact(
3118 isolate->heap()->cpp_heap_pointer_space(), isolate->counters());
3119 }
3120#endif // V8_COMPRESS_POINTERS
3121
3122#ifdef V8_ENABLE_SANDBOX
3123 {
3124 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_SWEEP_TRUSTED_POINTER_TABLE);
3125 isolate->trusted_pointer_table().Sweep(heap_->trusted_pointer_space(),
3126 isolate->counters());
3127 if (isolate->owns_shareable_data()) {
3128 isolate->shared_trusted_pointer_table().Sweep(
3129 isolate->shared_trusted_pointer_space(), isolate->counters());
3130 }
3131 }
3132
3133 {
3134 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_SWEEP_CODE_POINTER_TABLE);
3135 IsolateGroup::current()->code_pointer_table()->Sweep(
3136 heap_->code_pointer_space(), isolate->counters());
3137 }
3138#endif // V8_ENABLE_SANDBOX
3139
3140#ifdef V8_ENABLE_WEBASSEMBLY
3141 {
3143 GCTracer::Scope::MC_SWEEP_WASM_CODE_POINTER_TABLE);
3145 }
3146#endif // V8_ENABLE_WEBASSEMBLY
3147
3148 {
3150 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_JOIN_FILTER_JOB);
3151 filter_non_trivial_weakrefs_job_handle->Join();
3152 }
3153
3154 {
3155 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_WEAKNESS_HANDLING);
3156 ClearNonTrivialWeakReferences();
3157 ClearWeakCollections();
3158 ClearJSWeakRefs();
3159 }
3160
3161 PROFILE(heap_->isolate(), WeakCodeClearEvent());
3162
3163 {
3164 // This method may be called from within a DisallowDeoptimizations scope.
3165 // Temporarily allow deopts for marking code for deopt. This is not doing
3166 // the deopt yet and the actual deopts will be bailed out on later if the
3167 // current safepoint is not safe for deopts.
3168 // TODO(357636610): Reconsider whether the DisallowDeoptimization scopes are
3169 // truly needed.
3170 AllowDeoptimization allow_deoptimization(heap_->isolate());
3171 MarkDependentCodeForDeoptimization();
3172 }
3173
3174 {
3175 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_JOIN_JOB);
3176 clear_string_table_job_handle->Join();
3177 clear_trivial_weakrefs_job_handle->Join();
3178 }
3179
3180 if (v8_flags.sticky_mark_bits) {
3181 // TODO(333906585): Consider adjusting the dchecks that happen on clearing
3182 // and move this phase into MarkingBarrier::DeactivateAll.
3183 heap()->DeactivateMajorGCInProgressFlag();
3184 }
3185
3186 DCHECK(weak_objects_.transition_arrays.IsEmpty());
3187 DCHECK(weak_objects_.weak_references_trivial.IsEmpty());
3188 DCHECK(weak_objects_.weak_references_non_trivial.IsEmpty());
3189 DCHECK(weak_objects_.weak_references_non_trivial_unmarked.IsEmpty());
3190 DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
3191 DCHECK(weak_objects_.js_weak_refs.IsEmpty());
3192 DCHECK(weak_objects_.weak_cells.IsEmpty());
3193 DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
3194 DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
3195#ifndef V8_ENABLE_LEAPTIERING
3196 DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
3197#endif // !V8_ENABLE_LEAPTIERING
3198}
3199
3200void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
3201 HeapObjectAndCode weak_object_in_code;
3202 while (local_weak_objects()->weak_objects_in_code_local.Pop(
3203 &weak_object_in_code)) {
3204 Tagged<HeapObject> object = weak_object_in_code.heap_object;
3205 Tagged<Code> code = weak_object_in_code.code;
3206 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3207 heap_, non_atomic_marking_state_, object) &&
3208 !code->embedded_objects_cleared()) {
3209 if (!code->marked_for_deoptimization()) {
3210 code->SetMarkedForDeoptimization(heap_->isolate(),
3211 LazyDeoptimizeReason::kWeakObjects);
3212 have_code_to_deoptimize_ = true;
3213 }
3214 code->ClearEmbeddedObjectsAndJSDispatchHandles(heap_);
3215 DCHECK(code->embedded_objects_cleared());
3216 }
3217 }
3218}
3219
3220void MarkCompactCollector::ClearPotentialSimpleMapTransition(
3221 Tagged<Map> dead_target) {
3222 DCHECK(non_atomic_marking_state_->IsUnmarked(dead_target));
3223 Tagged<Object> potential_parent = dead_target->constructor_or_back_pointer();
3224 if (IsMap(potential_parent)) {
3225 Tagged<Map> parent = Cast<Map>(potential_parent);
3226 DisallowGarbageCollection no_gc_obviously;
3227 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3228 parent) &&
3229 TransitionsAccessor(heap_->isolate(), parent)
3230 .HasSimpleTransitionTo(dead_target)) {
3231 ClearPotentialSimpleMapTransition(parent, dead_target);
3232 }
3233 }
3234}
3235
3236void MarkCompactCollector::ClearPotentialSimpleMapTransition(
3237 Tagged<Map> map, Tagged<Map> dead_target) {
3238 DCHECK(!map->is_prototype_map());
3239 DCHECK(!dead_target->is_prototype_map());
3240 DCHECK_EQ(map->raw_transitions(), MakeWeak(dead_target));
3241 // Take ownership of the descriptor array.
3242 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
3243 Tagged<DescriptorArray> descriptors =
3244 map->instance_descriptors(heap_->isolate());
3245 if (descriptors == dead_target->instance_descriptors(heap_->isolate()) &&
3246 number_of_own_descriptors > 0) {
3247 TrimDescriptorArray(map, descriptors);
3248 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
3249 }
3250}
3251
3252bool MarkCompactCollector::SpecialClearMapSlot(Tagged<HeapObject> host,
3253 Tagged<Map> map,
3254 HeapObjectSlot slot) {
3255 ClearPotentialSimpleMapTransition(map);
3256
3257 // Special handling for clearing field type entries, identified by their host
3258 // being a descriptor array.
3259 // TODO(olivf): This whole special handling of field-type clearing
3260 // could be replaced by eagerly triggering field type dependencies and
3261 // generalizing field types, as soon as a field-type map becomes
3262 // unstable.
3263 if (IsDescriptorArray(host)) {
3264 // We want to distinguish two cases:
3265 // 1. There are no instances of the descriptor owner's map left.
3266 // 2. The field type is not up to date because the stored object
3267 // migrated away to a different map.
3268 // In case (1) it makes sense to clear the field type such that we
3269 // can learn a new one should we ever start creating instances
3270 // again.
3271 // In case (2) we must not re-learn a new field type. Doing so could
3272 // lead us to learning a field type that is not consistent with
3273 // still existing object's contents. To conservatively identify case
3274 // (1) we check the stability of the dead map.
3275 MaybeObjectSlot location(slot);
3276 if (map->is_stable() && FieldType::kFieldTypesCanBeClearedOnGC) {
3277 location.store(FieldType::None());
3278 } else {
3279 location.store(FieldType::Any());
3280 }
3281 return true;
3282 }
3283 return false;
3284}
3285
3286void MarkCompactCollector::FlushBytecodeFromSFI(
3287 Tagged<SharedFunctionInfo> shared_info) {
3288 DCHECK(shared_info->HasBytecodeArray());
3289
3290 // Retain objects required for uncompiled data.
3291 Tagged<String> inferred_name = shared_info->inferred_name();
3292 int start_position = shared_info->StartPosition();
3293 int end_position = shared_info->EndPosition();
3294
3295 shared_info->DiscardCompiledMetadata(
3296 heap_->isolate(),
3297 [](Tagged<HeapObject> object, ObjectSlot slot,
3298 Tagged<HeapObject> target) { RecordSlot(object, slot, target); });
3299
3300 // The size of the bytecode array should always be larger than an
3301 // UncompiledData object.
3302 static_assert(BytecodeArray::SizeFor(0) >=
3303 UncompiledDataWithoutPreparseData::kSize);
3304
3305 // Replace the bytecode with an uncompiled data object.
3306 Tagged<BytecodeArray> bytecode_array =
3307 shared_info->GetBytecodeArray(heap_->isolate());
3308
3309#ifdef V8_ENABLE_SANDBOX
3310 DCHECK(!HeapLayout::InWritableSharedSpace(shared_info));
3311 // Zap the old entry in the trusted pointer table.
3312 TrustedPointerTable& table = heap_->isolate()->trusted_pointer_table();
3313 IndirectPointerSlot self_indirect_pointer_slot =
3314 bytecode_array->RawIndirectPointerField(
3315 BytecodeArray::kSelfIndirectPointerOffset,
3316 kBytecodeArrayIndirectPointerTag);
3317 table.Zap(self_indirect_pointer_slot.Relaxed_LoadHandle());
3318#endif
3319
3320 Tagged<HeapObject> compiled_data = bytecode_array;
3321 Address compiled_data_start = compiled_data.address();
3322 int compiled_data_size = ALIGN_TO_ALLOCATION_ALIGNMENT(compiled_data->Size());
3323 MutablePageMetadata* chunk =
3324 MutablePageMetadata::FromAddress(compiled_data_start);
3325
3326 // Clear any recorded slots for the compiled data as being invalid.
3328 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3329 SlotSet::FREE_EMPTY_BUCKETS);
3331 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3332 SlotSet::FREE_EMPTY_BUCKETS);
3334 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3335 SlotSet::FREE_EMPTY_BUCKETS);
3337 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3338 SlotSet::FREE_EMPTY_BUCKETS);
3340 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3341 SlotSet::FREE_EMPTY_BUCKETS);
3342
3343 // Swap the map, using set_map_after_allocation to avoid verify heap checks
3344 // which are not necessary since we are doing this during the GC atomic pause.
3345 compiled_data->set_map_after_allocation(
3346 heap_->isolate(),
3347 ReadOnlyRoots(heap_).uncompiled_data_without_preparse_data_map(),
3349
3350 // Create a filler object for any left over space in the bytecode array.
3351 if (!heap_->IsLargeObject(compiled_data)) {
3352 const int aligned_filler_offset =
3353 ALIGN_TO_ALLOCATION_ALIGNMENT(UncompiledDataWithoutPreparseData::kSize);
3354 heap_->CreateFillerObjectAt(compiled_data.address() + aligned_filler_offset,
3355 compiled_data_size - aligned_filler_offset);
3356 }
3357
3358 // Initialize the uncompiled data.
3359 Tagged<UncompiledData> uncompiled_data = Cast<UncompiledData>(compiled_data);
3360
3361 uncompiled_data->InitAfterBytecodeFlush(
3362 heap_->isolate(), inferred_name, start_position, end_position,
3363 [](Tagged<HeapObject> object, ObjectSlot slot,
3364 Tagged<HeapObject> target) { RecordSlot(object, slot, target); });
3365
3366 // Mark the uncompiled data as black, and ensure all fields have already been
3367 // marked.
3368 DCHECK(MarkingHelper::IsMarkedOrAlwaysLive(heap_, marking_state_,
3369 inferred_name));
3370 if (MarkingHelper::GetLivenessMode(heap_, uncompiled_data) ==
3371 MarkingHelper::LivenessMode::kMarkbit) {
3372 marking_state_->TryMarkAndAccountLiveBytes(uncompiled_data);
3373 }
3374
3375#ifdef V8_ENABLE_SANDBOX
3376 // Mark the new entry in the trusted pointer table as alive.
3377 TrustedPointerTable::Space* space = heap_->trusted_pointer_space();
3378 table.Mark(space, self_indirect_pointer_slot.Relaxed_LoadHandle());
3379#endif
3380
3381 shared_info->set_uncompiled_data(uncompiled_data);
3382 DCHECK(!shared_info->is_compiled());
3383}
3384
3385void MarkCompactCollector::ProcessOldCodeCandidates() {
3386 DCHECK(v8_flags.flush_bytecode || v8_flags.flush_baseline_code ||
3387 weak_objects_.code_flushing_candidates.IsEmpty());
3388 Tagged<SharedFunctionInfo> flushing_candidate;
3389 int number_of_flushed_sfis = 0;
3390 while (local_weak_objects()->code_flushing_candidates_local.Pop(
3391 &flushing_candidate)) {
3392 bool is_bytecode_live;
3393 if (v8_flags.flush_baseline_code && flushing_candidate->HasBaselineCode()) {
3394 is_bytecode_live = ProcessOldBaselineSFI(flushing_candidate);
3395 } else {
3396 is_bytecode_live = ProcessOldBytecodeSFI(flushing_candidate);
3397 }
3398
3399 if (!is_bytecode_live) number_of_flushed_sfis++;
3400
3401 // Now record the data slots, which have been updated to an uncompiled
3402 // data, Baseline code or BytecodeArray which is still alive.
3403#ifndef V8_ENABLE_SANDBOX
3404 // If the sandbox is enabled, the slot contains an indirect pointer which
3405 // does not need to be updated during mark-compact (because the pointer in
3406 // the pointer table will be updated), so no action is needed here.
3407 ObjectSlot slot = flushing_candidate->RawField(
3408 SharedFunctionInfo::kTrustedFunctionDataOffset);
3409 if (IsHeapObject(*slot)) {
3410 RecordSlot(flushing_candidate, slot, Cast<HeapObject>(*slot));
3411 }
3412#endif
3413 }
3414
3415 if (v8_flags.trace_flush_code) {
3416 PrintIsolate(heap_->isolate(), "%d flushed SharedFunctionInfo(s)\n",
3417 number_of_flushed_sfis);
3418 }
3419}
3420
3421bool MarkCompactCollector::ProcessOldBytecodeSFI(
3422 Tagged<SharedFunctionInfo> flushing_candidate) {
3423 // During flushing a BytecodeArray is transformed into an UncompiledData
3424 // in place. Seeing an UncompiledData here implies that another
3425 // SharedFunctionInfo had a reference to the same BytecodeArray and
3426 // flushed it before processing this candidate. This can happen when using
3427 // CloneSharedFunctionInfo().
3428 Isolate* const isolate = heap_->isolate();
3429
3430 const bool bytecode_already_decompiled =
3431 flushing_candidate->HasUncompiledData();
3432 if (!bytecode_already_decompiled) {
3433 // Check if the bytecode is still live.
3434 Tagged<BytecodeArray> bytecode =
3435 flushing_candidate->GetBytecodeArray(isolate);
3436 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3437 bytecode)) {
3438 return true;
3439 }
3440 }
3441 FlushSFI(flushing_candidate, bytecode_already_decompiled);
3442 return false;
3443}
3444
3445bool MarkCompactCollector::ProcessOldBaselineSFI(
3446 Tagged<SharedFunctionInfo> flushing_candidate) {
3447 Tagged<Code> baseline_code = flushing_candidate->baseline_code(kAcquireLoad);
3448 // Safe to do a relaxed load here since the Code was acquire-loaded.
3449 Tagged<InstructionStream> baseline_istream =
3450 baseline_code->instruction_stream(baseline_code->code_cage_base(),
3451 kRelaxedLoad);
3452 Tagged<HeapObject> baseline_bytecode_or_interpreter_data =
3453 baseline_code->bytecode_or_interpreter_data();
3454
3455 // During flushing a BytecodeArray is transformed into an UncompiledData
3456 // in place. Seeing an UncompiledData here implies that another
3457 // SharedFunctionInfo had a reference to the same BytecodeArray and
3458 // flushed it before processing this candidate. This can happen when using
3459 // CloneSharedFunctionInfo().
3460 const bool bytecode_already_decompiled =
3461 IsUncompiledData(baseline_bytecode_or_interpreter_data, heap_->isolate());
3462 bool is_bytecode_live = false;
3463 if (!bytecode_already_decompiled) {
3464 Tagged<BytecodeArray> bytecode =
3465 flushing_candidate->GetBytecodeArray(heap_->isolate());
3466 is_bytecode_live = MarkingHelper::IsMarkedOrAlwaysLive(
3467 heap_, non_atomic_marking_state_, bytecode);
3468 }
3469
3470 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3471 baseline_istream)) {
3472 // Currently baseline code holds bytecode array strongly and it is
3473 // always ensured that bytecode is live if baseline code is live. Hence
3474 // baseline code can safely load bytecode array without any additional
3475 // checks. In future if this changes we need to update these checks to
3476 // flush code if the bytecode is not live and also update baseline code
3477 // to bailout if there is no bytecode.
3478 DCHECK(is_bytecode_live);
3479
3480 // Regardless of whether the Code is a Code or
3481 // the InstructionStream itself, if the InstructionStream is live then
3482 // the Code has to be live and will have been marked via
3483 // the owning JSFunction.
3484 DCHECK(MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3485 baseline_code));
3486 } else if (is_bytecode_live || bytecode_already_decompiled) {
3487 // Reset the function_data field to the BytecodeArray, InterpreterData,
3488 // or UncompiledData found on the baseline code. We can skip this step
3489 // if the BytecodeArray is not live and not already decompiled, because
3490 // FlushBytecodeFromSFI below will set the function_data field.
3491 flushing_candidate->FlushBaselineCode();
3492 }
3493
3494 if (!is_bytecode_live) {
3495 FlushSFI(flushing_candidate, bytecode_already_decompiled);
3496 }
3497 return is_bytecode_live;
3498}
3499
3500void MarkCompactCollector::FlushSFI(Tagged<SharedFunctionInfo> sfi,
3501 bool bytecode_already_decompiled) {
3502 // If baseline code flushing is disabled we should only flush bytecode
3503 // from functions that don't have baseline data.
3504 DCHECK(v8_flags.flush_baseline_code || !sfi->HasBaselineCode());
3505
3506 if (bytecode_already_decompiled) {
3507 sfi->DiscardCompiledMetadata(
3508 heap_->isolate(),
3509 [](Tagged<HeapObject> object, ObjectSlot slot,
3510 Tagged<HeapObject> target) { RecordSlot(object, slot, target); });
3511 } else {
3512 // If the BytecodeArray is dead, flush it, which will replace the field
3513 // with an uncompiled data object.
3514 FlushBytecodeFromSFI(sfi);
3515 }
3516}
3517
3518void MarkCompactCollector::ClearFlushedJsFunctions() {
3519 DCHECK(v8_flags.flush_bytecode ||
3520 weak_objects_.flushed_js_functions.IsEmpty());
3521 Tagged<JSFunction> flushed_js_function;
3522 while (local_weak_objects()->flushed_js_functions_local.Pop(
3523 &flushed_js_function)) {
3524 auto gc_notify_updated_slot = [](Tagged<HeapObject> object, ObjectSlot slot,
3526 RecordSlot(object, slot, Cast<HeapObject>(target));
3527 };
3528 flushed_js_function->ResetIfCodeFlushed(heap_->isolate(),
3529 gc_notify_updated_slot);
3530 }
3531}
3532
3533#ifndef V8_ENABLE_LEAPTIERING
3534
3535void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
3536 DCHECK(v8_flags.flush_baseline_code ||
3537 weak_objects_.baseline_flushing_candidates.IsEmpty());
3538 Tagged<JSFunction> flushed_js_function;
3539 while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
3540 &flushed_js_function)) {
3541 auto gc_notify_updated_slot = [](Tagged<HeapObject> object, ObjectSlot slot,
3543 RecordSlot(object, slot, Cast<HeapObject>(target));
3544 };
3545 flushed_js_function->ResetIfCodeFlushed(heap_->isolate(),
3546 gc_notify_updated_slot);
3547
3548#ifndef V8_ENABLE_SANDBOX
3549 // Record the code slot that has been updated either to CompileLazy,
3550 // InterpreterEntryTrampoline or baseline code.
3551 // This is only necessary when the sandbox is not enabled. If it is, the
3552 // Code objects are referenced through a pointer table indirection and so
3553 // remembered slots are not necessary as the Code object will update its
3554 // entry in the pointer table when it is relocated.
3555 ObjectSlot slot = flushed_js_function->RawField(JSFunction::kCodeOffset);
3556 RecordSlot(flushed_js_function, slot, Cast<HeapObject>(*slot));
3557#endif
3558 }
3559}
3560
3561#endif // !V8_ENABLE_LEAPTIERING
3562
3563void MarkCompactCollector::ClearFullMapTransitions() {
3565 Isolate* const isolate = heap_->isolate();
3566 ReadOnlyRoots roots(isolate);
3567 while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
3568 int num_transitions = array->number_of_transitions();
3569 if (num_transitions > 0) {
3571 // The array might contain "undefined" elements because it's not yet
3572 // filled. Allow it.
3573 if (array->GetTargetIfExists(0, isolate, &map)) {
3574 DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
3576 map->constructor_or_back_pointer();
3578 DCHECK(isolate->has_active_deserializer());
3580 Smi::uninitialized_deserialization_value());
3581 continue;
3582 }
3583 Tagged<Map> parent = Cast<Map>(map->constructor_or_back_pointer());
3584 const bool parent_is_alive = MarkingHelper::IsMarkedOrAlwaysLive(
3585 heap_, non_atomic_marking_state_, parent);
3586 Tagged<DescriptorArray> descriptors =
3587 parent_is_alive ? parent->instance_descriptors(isolate)
3589 bool descriptors_owner_died =
3590 CompactTransitionArray(parent, array, descriptors);
3591 if (descriptors_owner_died) {
3592 TrimDescriptorArray(parent, descriptors);
3593 }
3594 }
3595 }
3596 }
3597}
3598
3599// Returns false if no maps have died, or if the transition array is
3600// still being deserialized.
3601bool MarkCompactCollector::TransitionArrayNeedsCompaction(
3602 Tagged<TransitionArray> transitions, int num_transitions) {
3603 ReadOnlyRoots roots(heap_->isolate());
3604 for (int i = 0; i < num_transitions; ++i) {
3605 Tagged<MaybeObject> raw_target = transitions->GetRawTarget(i);
3606 if (raw_target.IsSmi()) {
3607 // This target is still being deserialized,
3608 DCHECK(heap_->isolate()->has_active_deserializer());
3609 DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
3610#ifdef DEBUG
3611 // Targets can only be dead iff this array is fully deserialized.
3612 for (int j = 0; j < num_transitions; ++j) {
3614 !transitions->GetRawTarget(j).IsSmi(),
3615 !non_atomic_marking_state_->IsUnmarked(transitions->GetTarget(j)));
3616 }
3617#endif
3618 return false;
3619 } else if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3620 heap_, non_atomic_marking_state_,
3621 TransitionsAccessor::GetTargetFromRaw(raw_target))) {
3622#ifdef DEBUG
3623 // Targets can only be dead iff this array is fully deserialized.
3624 for (int j = 0; j < num_transitions; ++j) {
3625 DCHECK(!transitions->GetRawTarget(j).IsSmi());
3626 }
3627#endif
3628 return true;
3629 }
3630 }
3631 return false;
3632}
3633
3634bool MarkCompactCollector::CompactTransitionArray(
3635 Tagged<Map> map, Tagged<TransitionArray> transitions,
3636 Tagged<DescriptorArray> descriptors) {
3637 DCHECK(!map->is_prototype_map());
3638 int num_transitions = transitions->number_of_transitions();
3639 if (!TransitionArrayNeedsCompaction(transitions, num_transitions)) {
3640 return false;
3641 }
3642 ReadOnlyRoots roots(heap_->isolate());
3643 bool descriptors_owner_died = false;
3644 int transition_index = 0;
3645 // Compact all live transitions to the left.
3646 for (int i = 0; i < num_transitions; ++i) {
3647 Tagged<Map> target = transitions->GetTarget(i);
3648 DCHECK_EQ(target->constructor_or_back_pointer(), map);
3649
3650 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3651 heap_, non_atomic_marking_state_, target)) {
3652 if (!descriptors.is_null() &&
3653 target->instance_descriptors(heap_->isolate()) == descriptors) {
3654 DCHECK(!target->is_prototype_map());
3655 descriptors_owner_died = true;
3656 }
3657 continue;
3658 }
3659
3660 if (i != transition_index) {
3661 Tagged<Name> key = transitions->GetKey(i);
3662 transitions->SetKey(transition_index, key);
3663 HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
3664 RecordSlot(transitions, key_slot, key);
3665 Tagged<MaybeObject> raw_target = transitions->GetRawTarget(i);
3666 transitions->SetRawTarget(transition_index, raw_target);
3667 HeapObjectSlot target_slot = transitions->GetTargetSlot(transition_index);
3668 RecordSlot(transitions, target_slot, raw_target.GetHeapObject());
3669 }
3670 transition_index++;
3671 }
3672 // If there are no transitions to be cleared, return.
3673 if (transition_index == num_transitions) {
3674 DCHECK(!descriptors_owner_died);
3675 return false;
3676 }
3677 // Note that we never eliminate a transition array, though we might right-trim
3678 // such that number_of_transitions() == 0. If this assumption changes,
3679 // TransitionArray::Insert() will need to deal with the case that a transition
3680 // array disappeared during GC.
3681 int old_capacity_in_entries = transitions->Capacity();
3682 if (transition_index < old_capacity_in_entries) {
3683 int old_capacity = transitions->length();
3684 static_assert(TransitionArray::kEntryKeyIndex == 0);
3685 DCHECK_EQ(TransitionArray::ToKeyIndex(old_capacity_in_entries),
3686 old_capacity);
3687 int new_capacity = TransitionArray::ToKeyIndex(transition_index);
3688 heap_->RightTrimArray(transitions, new_capacity, old_capacity);
3689 transitions->SetNumberOfTransitions(transition_index);
3690 }
3691 return descriptors_owner_died;
3692}
3693
3694void MarkCompactCollector::RightTrimDescriptorArray(
3695 Tagged<DescriptorArray> array, int descriptors_to_trim) {
3696 int old_nof_all_descriptors = array->number_of_all_descriptors();
3697 int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
3698 DCHECK_LT(0, descriptors_to_trim);
3699 DCHECK_LE(0, new_nof_all_descriptors);
3700 Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
3701 Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
3702 MutablePageMetadata* chunk = MutablePageMetadata::FromHeapObject(array);
3704 SlotSet::FREE_EMPTY_BUCKETS);
3706 chunk, start, end, SlotSet::FREE_EMPTY_BUCKETS);
3708 SlotSet::FREE_EMPTY_BUCKETS);
3710 SlotSet::FREE_EMPTY_BUCKETS);
3712 Address aligned_start = ALIGN_TO_ALLOCATION_ALIGNMENT(start);
3713 Address aligned_end = ALIGN_TO_ALLOCATION_ALIGNMENT(end);
3714 if (aligned_start < aligned_end) {
3715 heap_->CreateFillerObjectAt(
3716 aligned_start, static_cast<int>(aligned_end - aligned_start));
3717 }
3718 if (heap::ShouldZapGarbage()) {
3719 Address zap_end = std::min(aligned_start, end);
3721 Tagged<Object>(static_cast<Address>(kZapValue)),
3722 (zap_end - start) >> kTaggedSizeLog2);
3723 }
3724 } else {
3725 heap_->CreateFillerObjectAt(start, static_cast<int>(end - start));
3726 }
3727 array->set_number_of_all_descriptors(new_nof_all_descriptors);
3728}
3729
3730void MarkCompactCollector::RecordStrongDescriptorArraysForWeakening(
3731 GlobalHandleVector<DescriptorArray> strong_descriptor_arrays) {
3732 DCHECK(heap_->incremental_marking()->IsMajorMarking());
3733 base::MutexGuard guard(&strong_descriptor_arrays_mutex_);
3734 strong_descriptor_arrays_.push_back(std::move(strong_descriptor_arrays));
3735}
3736
3737void MarkCompactCollector::WeakenStrongDescriptorArrays() {
3738 Tagged<Map> descriptor_array_map =
3739 ReadOnlyRoots(heap_->isolate()).descriptor_array_map();
3740 for (auto& vec : strong_descriptor_arrays_) {
3741 for (auto it = vec.begin(); it != vec.end(); ++it) {
3742 Tagged<DescriptorArray> raw = it.raw();
3743 DCHECK(IsStrongDescriptorArray(raw));
3744 raw->set_map_safe_transition_no_write_barrier(heap_->isolate(),
3745 descriptor_array_map);
3746 DCHECK_EQ(raw->raw_gc_state(kRelaxedLoad), 0);
3747 }
3748 }
3749 strong_descriptor_arrays_.clear();
3750}
3751
3752void MarkCompactCollector::TrimDescriptorArray(
3753 Tagged<Map> map, Tagged<DescriptorArray> descriptors) {
3754 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
3755 if (number_of_own_descriptors == 0) {
3756 DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
3757 return;
3758 }
3759 int to_trim =
3760 descriptors->number_of_all_descriptors() - number_of_own_descriptors;
3761 if (to_trim > 0) {
3762 descriptors->set_number_of_descriptors(number_of_own_descriptors);
3763 RightTrimDescriptorArray(descriptors, to_trim);
3764
3765 TrimEnumCache(map, descriptors);
3766 descriptors->Sort();
3767 }
3768 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
3769 map->set_owns_descriptors(true);
3770}
3771
3772void MarkCompactCollector::TrimEnumCache(Tagged<Map> map,
3773 Tagged<DescriptorArray> descriptors) {
3774 int live_enum = map->EnumLength();
3775 if (live_enum == kInvalidEnumCacheSentinel) {
3776 live_enum = map->NumberOfEnumerableProperties();
3777 }
3778 if (live_enum == 0) return descriptors->ClearEnumCache();
3779 Tagged<EnumCache> enum_cache = descriptors->enum_cache();
3780
3781 Tagged<FixedArray> keys = enum_cache->keys();
3782 int keys_length = keys->length();
3783 if (live_enum >= keys_length) return;
3784 heap_->RightTrimArray(keys, live_enum, keys_length);
3785
3786 Tagged<FixedArray> indices = enum_cache->indices();
3787 int indices_length = indices->length();
3788 if (live_enum >= indices_length) return;
3789 heap_->RightTrimArray(indices, live_enum, indices_length);
3790}
3791
3792void MarkCompactCollector::ClearWeakCollections() {
3793 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
3795 while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
3796 for (InternalIndex i : table->IterateEntries()) {
3797 Tagged<HeapObject> key = Cast<HeapObject>(table->KeyAt(i));
3798#ifdef VERIFY_HEAP
3799 if (v8_flags.verify_heap) {
3800 Tagged<Object> value = table->ValueAt(i);
3801 if (IsHeapObject(value)) {
3802 Tagged<HeapObject> heap_object = Cast<HeapObject>(value);
3803
3804 CHECK_IMPLIES(MarkingHelper::IsMarkedOrAlwaysLive(
3805 heap_, non_atomic_marking_state_, key),
3806 MarkingHelper::IsMarkedOrAlwaysLive(
3807 heap_, non_atomic_marking_state_, heap_object));
3808 }
3809 }
3810#endif // VERIFY_HEAP
3811 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3812 heap_, non_atomic_marking_state_, key)) {
3813 table->RemoveEntry(i);
3814 }
3815 }
3816 }
3817 auto* table_map = heap_->ephemeron_remembered_set()->tables();
3818 for (auto it = table_map->begin(); it != table_map->end();) {
3819 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3820 heap_, non_atomic_marking_state_, it->first)) {
3821 it = table_map->erase(it);
3822 } else {
3823 ++it;
3824 }
3825 }
3826}
3827
3828template <typename TObjectAndSlot, typename TMaybeSlot>
3829void MarkCompactCollector::ClearWeakReferences(
3831 Tagged<HeapObjectReference> cleared_weak_ref) {
3832 TObjectAndSlot slot;
3833 while (worklist.Pop(&slot)) {
3835 // The slot could have been overwritten, so we have to treat it
3836 // as [Protected]MaybeObjectSlot.
3837 TMaybeSlot location(slot.slot);
3838 if (location.load().GetHeapObjectIfWeak(&value)) {
3839 DCHECK(!IsWeakCell(value));
3840 // Values in RO space have already been filtered, but a non-RO value may
3841 // have been overwritten by a RO value since marking.
3842 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3843 value)) {
3844 // The value of the weak reference is alive.
3845 RecordSlot(slot.heap_object, slot.slot, value);
3846 } else {
3847 DCHECK(MainMarkingVisitor::IsTrivialWeakReferenceValue(slot.heap_object,
3848 value));
3849 // The value of the weak reference is non-live.
3850 // This is a non-atomic store, which is fine as long as we only have a
3851 // single clearing job.
3852 location.store(cleared_weak_ref);
3853 }
3854 }
3855 }
3856}
3857
3858void MarkCompactCollector::ClearTrivialWeakReferences() {
3859 Tagged<HeapObjectReference> cleared_weak_ref = ClearedValue(heap_->isolate());
3860 ClearWeakReferences<HeapObjectAndSlot, MaybeObjectSlot>(
3861 local_weak_objects()->weak_references_trivial_local, cleared_weak_ref);
3862}
3863
3864void MarkCompactCollector::ClearTrustedWeakReferences() {
3866 ClearWeakReferences<TrustedObjectAndSlot, ProtectedMaybeObjectSlot>(
3867 local_weak_objects()->weak_references_trusted_local, cleared_weak_ref);
3868}
3869
3870void MarkCompactCollector::FilterNonTrivialWeakReferences() {
3871 HeapObjectAndSlot slot;
3872 while (local_weak_objects()->weak_references_non_trivial_local.Pop(&slot)) {
3874 // The slot could have been overwritten, so we have to treat it
3875 // as MaybeObjectSlot.
3876 MaybeObjectSlot location(slot.slot);
3877 if ((*location).GetHeapObjectIfWeak(&value)) {
3878 DCHECK(!IsWeakCell(value));
3879 // Values in RO space have already been filtered, but a non-RO value may
3880 // have been overwritten by a RO value since marking.
3881 if (MarkingHelper::IsMarkedOrAlwaysLive(heap_, non_atomic_marking_state_,
3882 value)) {
3883 // The value of the weak reference is alive.
3884 RecordSlot(slot.heap_object, HeapObjectSlot(location), value);
3885 } else {
3886 DCHECK(!MainMarkingVisitor::IsTrivialWeakReferenceValue(
3887 slot.heap_object, value));
3888 // The value is non-live, defer the actual clearing.
3889 // This is non-atomic, which is fine as long as we only have a single
3890 // filtering job.
3891 local_weak_objects_->weak_references_non_trivial_unmarked_local.Push(
3892 slot);
3893 }
3894 }
3895 }
3896}
3897
3898void MarkCompactCollector::ClearNonTrivialWeakReferences() {
3899 TRACE_GC(heap_->tracer(),
3900 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_NON_TRIVIAL);
3901 HeapObjectAndSlot slot;
3902 Tagged<HeapObjectReference> cleared_weak_ref = ClearedValue(heap_->isolate());
3903 while (local_weak_objects()->weak_references_non_trivial_unmarked_local.Pop(
3904 &slot)) {
3905 // The slot may not have been overwritten since it was filtered, so we can
3906 // directly read its value.
3907 Tagged<HeapObject> value = (*slot.slot).GetHeapObjectAssumeWeak();
3908 DCHECK(!IsWeakCell(value));
3909 DCHECK(!HeapLayout::InReadOnlySpace(value));
3910 DCHECK_IMPLIES(v8_flags.black_allocated_pages,
3911 !HeapLayout::InBlackAllocatedPage(value));
3912 DCHECK(!non_atomic_marking_state_->IsMarked(value));
3913 DCHECK(!MainMarkingVisitor::IsTrivialWeakReferenceValue(slot.heap_object,
3914 value));
3915 if (!SpecialClearMapSlot(slot.heap_object, Cast<Map>(value), slot.slot)) {
3916 slot.slot.store(cleared_weak_ref);
3917 }
3918 }
3919}
3920
3921void MarkCompactCollector::ClearJSWeakRefs() {
3922 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_CLEAR_JS_WEAK_REFERENCES);
3923 Tagged<JSWeakRef> weak_ref;
3924 Isolate* const isolate = heap_->isolate();
3925 while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
3926 Tagged<HeapObject> target = Cast<HeapObject>(weak_ref->target());
3927 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3928 heap_, non_atomic_marking_state_, target)) {
3929 weak_ref->set_target(ReadOnlyRoots(isolate).undefined_value());
3930 } else {
3931 // The value of the JSWeakRef is alive.
3932 ObjectSlot slot = weak_ref->RawField(JSWeakRef::kTargetOffset);
3933 RecordSlot(weak_ref, slot, target);
3934 }
3935 }
3936 Tagged<WeakCell> weak_cell;
3937 while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
3938 auto gc_notify_updated_slot = [](Tagged<HeapObject> object, ObjectSlot slot,
3940 if (IsHeapObject(target)) {
3941 RecordSlot(object, slot, Cast<HeapObject>(target));
3942 }
3943 };
3944 Tagged<HeapObject> target = Cast<HeapObject>(weak_cell->target());
3945 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3946 heap_, non_atomic_marking_state_, target)) {
3947 DCHECK(Object::CanBeHeldWeakly(target));
3948 // The value of the WeakCell is dead.
3949 Tagged<JSFinalizationRegistry> finalization_registry =
3950 Cast<JSFinalizationRegistry>(weak_cell->finalization_registry());
3951 if (!finalization_registry->scheduled_for_cleanup()) {
3952 heap_->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
3953 gc_notify_updated_slot);
3954 }
3955 // We're modifying the pointers in WeakCell and JSFinalizationRegistry
3956 // during GC; thus we need to record the slots it writes. The normal write
3957 // barrier is not enough, since it's disabled before GC.
3958 weak_cell->Nullify(isolate, gc_notify_updated_slot);
3959 DCHECK(finalization_registry->NeedsCleanup());
3960 DCHECK(finalization_registry->scheduled_for_cleanup());
3961 } else {
3962 // The value of the WeakCell is alive.
3963 ObjectSlot slot = weak_cell->RawField(WeakCell::kTargetOffset);
3964 RecordSlot(weak_cell, slot, Cast<HeapObject>(*slot));
3965 }
3966
3967 Tagged<HeapObject> unregister_token = weak_cell->unregister_token();
3968 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3969 heap_, non_atomic_marking_state_, unregister_token)) {
3970 DCHECK(Object::CanBeHeldWeakly(unregister_token));
3971 // The unregister token is dead. Remove any corresponding entries in the
3972 // key map. Multiple WeakCell with the same token will have all their
3973 // unregister_token field set to undefined when processing the first
3974 // WeakCell. Like above, we're modifying pointers during GC, so record the
3975 // slots.
3976 Tagged<JSFinalizationRegistry> finalization_registry =
3977 Cast<JSFinalizationRegistry>(weak_cell->finalization_registry());
3978 finalization_registry->RemoveUnregisterToken(
3979 unregister_token, isolate,
3980 JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
3981 gc_notify_updated_slot);
3982 } else {
3983 // The unregister_token is alive.
3984 ObjectSlot slot = weak_cell->RawField(WeakCell::kUnregisterTokenOffset);
3985 RecordSlot(weak_cell, slot, Cast<HeapObject>(*slot));
3986 }
3987 }
3988 heap_->PostFinalizationRegistryCleanupTaskIfNeeded();
3989}
3990
3991// static
3992bool MarkCompactCollector::ShouldRecordRelocSlot(Tagged<InstructionStream> host,
3993 RelocInfo* rinfo,
3994 Tagged<HeapObject> target) {
3995 MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
3996 MemoryChunk* target_chunk = MemoryChunk::FromHeapObject(target);
3997 return target_chunk->IsEvacuationCandidate() &&
3998 !source_chunk->ShouldSkipEvacuationSlotRecording();
3999}
4000
4001// static
4003MarkCompactCollector::ProcessRelocInfo(Tagged<InstructionStream> host,
4004 RelocInfo* rinfo,
4005 Tagged<HeapObject> target) {
4007 const RelocInfo::Mode rmode = rinfo->rmode();
4008 Address addr;
4009 SlotType slot_type;
4010
4011 if (rinfo->IsInConstantPool()) {
4012 addr = rinfo->constant_pool_entry_address();
4013
4014 if (RelocInfo::IsCodeTargetMode(rmode)) {
4015 slot_type = SlotType::kConstPoolCodeEntry;
4016 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
4017 slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
4018 } else {
4019 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
4020 slot_type = SlotType::kConstPoolEmbeddedObjectFull;
4021 }
4022 } else {
4023 addr = rinfo->pc();
4024
4025 if (RelocInfo::IsCodeTargetMode(rmode)) {
4026 slot_type = SlotType::kCodeEntry;
4027 } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
4028 slot_type = SlotType::kEmbeddedObjectFull;
4029 } else {
4030 DCHECK(RelocInfo::IsCompressedEmbeddedObject(rmode));
4031 slot_type = SlotType::kEmbeddedObjectCompressed;
4032 }
4033 }
4034
4035 MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
4036 MutablePageMetadata* const source_page_metadata =
4037 MutablePageMetadata::cast(source_chunk->Metadata());
4038 const uintptr_t offset = source_chunk->Offset(addr);
4039 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
4040 result.page_metadata = source_page_metadata;
4041 result.slot_type = slot_type;
4042 result.offset = static_cast<uint32_t>(offset);
4043
4044 return result;
4045}
4046
4047// static
4048void MarkCompactCollector::RecordRelocSlot(Tagged<InstructionStream> host,
4049 RelocInfo* rinfo,
4050 Tagged<HeapObject> target) {
4051 if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
4052 RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
4053
4054 // Access to TypeSlots need to be protected, since LocalHeaps might
4055 // publish code in the background thread.
4056 std::optional<base::MutexGuard> opt_guard;
4057 if (v8_flags.concurrent_sparkplug) {
4058 opt_guard.emplace(info.page_metadata->mutex());
4059 }
4060 RememberedSet<OLD_TO_OLD>::InsertTyped(info.page_metadata, info.slot_type,
4061 info.offset);
4062}
4063
4064namespace {
4065
4066// Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
4067// attempt to store a weak reference to strong-only slot to a compilation error.
4068template <typename TSlot, HeapObjectReferenceType reference_type>
4069typename TSlot::TObject MakeSlotValue(Tagged<HeapObject> heap_object);
4070
4071template <>
4072Tagged<Object> MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
4073 Tagged<HeapObject> heap_object) {
4074 return heap_object;
4075}
4076
4077template <>
4078Tagged<MaybeObject>
4079MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4080 Tagged<HeapObject> heap_object) {
4081 return heap_object;
4082}
4083
4084template <>
4085Tagged<MaybeObject>
4086MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4087 Tagged<HeapObject> heap_object) {
4088 return MakeWeak(heap_object);
4089}
4090
4091template <>
4092Tagged<Object>
4093MakeSlotValue<WriteProtectedSlot<ObjectSlot>, HeapObjectReferenceType::STRONG>(
4094 Tagged<HeapObject> heap_object) {
4095 return heap_object;
4096}
4097
4098#ifdef V8_ENABLE_SANDBOX
4099template <>
4100Tagged<Object> MakeSlotValue<WriteProtectedSlot<ProtectedPointerSlot>,
4101 HeapObjectReferenceType::STRONG>(
4102 Tagged<HeapObject> heap_object) {
4103 return heap_object;
4104}
4105
4106template <>
4107Tagged<MaybeObject>
4108MakeSlotValue<ProtectedMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4109 Tagged<HeapObject> heap_object) {
4110 return heap_object;
4111}
4112
4113template <>
4114Tagged<MaybeObject>
4115MakeSlotValue<ProtectedMaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4116 Tagged<HeapObject> heap_object) {
4117 return MakeWeak(heap_object);
4118}
4119#endif
4120
4121template <>
4122Tagged<Object>
4123MakeSlotValue<OffHeapObjectSlot, HeapObjectReferenceType::STRONG>(
4124 Tagged<HeapObject> heap_object) {
4125 return heap_object;
4126}
4127
4128#ifdef V8_COMPRESS_POINTERS
4129template <>
4130Tagged<Object> MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
4131 Tagged<HeapObject> heap_object) {
4132 return heap_object;
4133}
4134
4135template <>
4136Tagged<MaybeObject>
4137MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4138 Tagged<HeapObject> heap_object) {
4139 return heap_object;
4140}
4141
4142template <>
4143Tagged<MaybeObject>
4144MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4145 Tagged<HeapObject> heap_object) {
4146 return MakeWeak(heap_object);
4147}
4148
4149#ifdef V8_EXTERNAL_CODE_SPACE
4150template <>
4151Tagged<Object>
4152MakeSlotValue<InstructionStreamSlot, HeapObjectReferenceType::STRONG>(
4153 Tagged<HeapObject> heap_object) {
4154 return heap_object;
4155}
4156#endif // V8_EXTERNAL_CODE_SPACE
4157
4158#ifdef V8_ENABLE_SANDBOX
4159template <>
4160Tagged<Object>
4161MakeSlotValue<ProtectedPointerSlot, HeapObjectReferenceType::STRONG>(
4162 Tagged<HeapObject> heap_object) {
4163 return heap_object;
4164}
4165#endif // V8_ENABLE_SANDBOX
4166
4167// The following specialization
4168// MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
4169// is not used.
4170#endif // V8_COMPRESS_POINTERS
4171
4172template <HeapObjectReferenceType reference_type, typename TSlot>
4173static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot,
4174 Tagged<HeapObject> heap_obj) {
4175 static_assert(
4176 std::is_same_v<TSlot, FullObjectSlot> ||
4177 std::is_same_v<TSlot, ObjectSlot> ||
4178 std::is_same_v<TSlot, FullMaybeObjectSlot> ||
4179 std::is_same_v<TSlot, MaybeObjectSlot> ||
4180 std::is_same_v<TSlot, OffHeapObjectSlot> ||
4181 std::is_same_v<TSlot, InstructionStreamSlot> ||
4182 std::is_same_v<TSlot, ProtectedPointerSlot> ||
4183 std::is_same_v<TSlot, ProtectedMaybeObjectSlot> ||
4184 std::is_same_v<TSlot, WriteProtectedSlot<ObjectSlot>> ||
4185 std::is_same_v<TSlot, WriteProtectedSlot<ProtectedPointerSlot>>,
4186 "Only [Full|OffHeap]ObjectSlot, [Full]MaybeObjectSlot, "
4187 "InstructionStreamSlot, Protected[Pointer|MaybeObject]Slot, "
4188 "or WriteProtectedSlot are expected here");
4189 MapWord map_word = heap_obj->map_word(cage_base, kRelaxedLoad);
4190 if (!map_word.IsForwardingAddress()) return;
4191 DCHECK_IMPLIES((!v8_flags.minor_ms && !Heap::InFromPage(heap_obj)),
4192 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
4193 MemoryChunk::FromHeapObject(heap_obj)->IsFlagSet(
4194 MemoryChunk::COMPACTION_WAS_ABORTED));
4195 typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
4196 map_word.ToForwardingAddress(heap_obj));
4197 // Needs to be atomic for map space compaction: This slot could be a map
4198 // word which we update while loading the map word for updating the slot
4199 // on another page.
4200 slot.Relaxed_Store(target);
4201 DCHECK_IMPLIES(!v8_flags.sticky_mark_bits, !Heap::InFromPage(target));
4202 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
4203}
4204
4205template <typename TSlot>
4206static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot) {
4207 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4208 Tagged<HeapObject> heap_obj;
4209 if constexpr (TSlot::kCanBeWeak) {
4210 if (obj.GetHeapObjectIfWeak(&heap_obj)) {
4211 return UpdateSlot<HeapObjectReferenceType::WEAK>(cage_base, slot,
4212 heap_obj);
4213 }
4214 }
4215 if (obj.GetHeapObjectIfStrong(&heap_obj)) {
4216 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4217 }
4218}
4219
4220template <typename TSlot>
4221static inline SlotCallbackResult UpdateOldToSharedSlot(
4222 PtrComprCageBase cage_base, TSlot slot) {
4223 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4224 Tagged<HeapObject> heap_obj;
4225
4226 if constexpr (TSlot::kCanBeWeak) {
4227 if (obj.GetHeapObjectIfWeak(&heap_obj)) {
4228 UpdateSlot<HeapObjectReferenceType::WEAK>(cage_base, slot, heap_obj);
4229 return HeapLayout::InWritableSharedSpace(heap_obj) ? KEEP_SLOT
4230 : REMOVE_SLOT;
4231 }
4232 }
4233
4234 if (obj.GetHeapObjectIfStrong(&heap_obj)) {
4235 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4236 return HeapLayout::InWritableSharedSpace(heap_obj) ? KEEP_SLOT
4237 : REMOVE_SLOT;
4238 }
4239
4240 return REMOVE_SLOT;
4241}
4242
4243template <typename TSlot>
4244static inline void UpdateStrongSlot(PtrComprCageBase cage_base, TSlot slot) {
4245 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4246#ifdef V8_ENABLE_DIRECT_HANDLE
4247 if (obj.ptr() == kTaggedNullAddress) return;
4248#endif
4249 DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
4250 Tagged<HeapObject> heap_obj;
4251 if (obj.GetHeapObject(&heap_obj)) {
4252 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4253 }
4254}
4255
4256static inline SlotCallbackResult UpdateStrongOldToSharedSlot(
4257 PtrComprCageBase cage_base, FullMaybeObjectSlot slot) {
4258 Tagged<MaybeObject> obj = slot.Relaxed_Load(cage_base);
4259#ifdef V8_ENABLE_DIRECT_HANDLE
4260 if (obj.ptr() == kTaggedNullAddress) return REMOVE_SLOT;
4261#endif
4262 DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
4263 Tagged<HeapObject> heap_obj;
4264 if (obj.GetHeapObject(&heap_obj)) {
4265 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4266 return HeapLayout::InWritableSharedSpace(heap_obj) ? KEEP_SLOT
4267 : REMOVE_SLOT;
4268 }
4269
4270 return REMOVE_SLOT;
4271}
4272
4273static inline void UpdateStrongCodeSlot(IsolateForSandbox isolate,
4274 PtrComprCageBase cage_base,
4275 PtrComprCageBase code_cage_base,
4276 InstructionStreamSlot slot) {
4277 Tagged<Object> obj = slot.Relaxed_Load(code_cage_base);
4278 DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
4279 Tagged<HeapObject> heap_obj;
4280 if (obj.GetHeapObject(&heap_obj)) {
4281 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4282
4283 Tagged<Code> code = Cast<Code>(HeapObject::FromAddress(
4284 slot.address() - Code::kInstructionStreamOffset));
4285 Tagged<InstructionStream> instruction_stream =
4286 code->instruction_stream(code_cage_base);
4287 code->UpdateInstructionStart(isolate, instruction_stream);
4288 }
4289}
4290
4291} // namespace
4292
4293// Visitor for updating root pointers and to-space pointers.
4294// It does not expect to encounter pointers to dead objects.
4296 public RootVisitor {
4297 public:
4300
4302 UpdateStrongSlotInternal(cage_base(), p);
4303 }
4304
4306 UpdateSlotInternal(cage_base(), p);
4307 }
4308
4310 ObjectSlot end) override {
4311 for (ObjectSlot p = start; p < end; ++p) {
4312 UpdateStrongSlotInternal(cage_base(), p);
4313 }
4314 }
4315
4317 MaybeObjectSlot end) final {
4318 for (MaybeObjectSlot p = start; p < end; ++p) {
4319 UpdateSlotInternal(cage_base(), p);
4320 }
4321 }
4322
4324 InstructionStreamSlot slot) override {
4325 UpdateStrongCodeSlot(isolate_, cage_base(), code_cage_base(), slot);
4326 }
4327
4328 void VisitRootPointer(Root root, const char* description,
4329 FullObjectSlot p) override {
4330 DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
4331 UpdateRootSlotInternal(cage_base(), p);
4332 }
4333
4334 void VisitRootPointers(Root root, const char* description,
4336 for (FullObjectSlot p = start; p < end; ++p) {
4337 UpdateRootSlotInternal(cage_base(), p);
4338 }
4339 }
4340
4341 void VisitRootPointers(Root root, const char* description,
4343 OffHeapObjectSlot end) override {
4344 for (OffHeapObjectSlot p = start; p < end; ++p) {
4345 UpdateRootSlotInternal(cage_base(), p);
4346 }
4347 }
4348
4350 RelocInfo* rinfo) override {
4351 // This visitor nevers visits code objects.
4352 UNREACHABLE();
4353 }
4354
4356 RelocInfo* rinfo) override {
4357 // This visitor nevers visits code objects.
4358 UNREACHABLE();
4359 }
4360
4361 private:
4362 static inline void UpdateRootSlotInternal(PtrComprCageBase cage_base,
4363 FullObjectSlot slot) {
4364 UpdateStrongSlot(cage_base, slot);
4365 }
4366
4367 static inline void UpdateRootSlotInternal(PtrComprCageBase cage_base,
4368 OffHeapObjectSlot slot) {
4369 UpdateStrongSlot(cage_base, slot);
4370 }
4371
4373 PtrComprCageBase cage_base, MaybeObjectSlot slot) {
4374 UpdateStrongSlot(cage_base, slot);
4375 }
4376
4377 static inline void UpdateStrongSlotInternal(PtrComprCageBase cage_base,
4378 ObjectSlot slot) {
4379 UpdateStrongSlot(cage_base, slot);
4380 }
4381
4382 static inline void UpdateSlotInternal(PtrComprCageBase cage_base,
4383 MaybeObjectSlot slot) {
4384 UpdateSlot(cage_base, slot);
4385 }
4386
4388};
4389
4391 Heap* heap, FullObjectSlot p) {
4392 Tagged<HeapObject> old_string = Cast<HeapObject>(*p);
4393 MapWord map_word = old_string->map_word(kRelaxedLoad);
4394
4395 if (map_word.IsForwardingAddress()) {
4396 Tagged<String> new_string =
4397 Cast<String>(map_word.ToForwardingAddress(old_string));
4398
4399 if (IsExternalString(new_string)) {
4400 MutablePageMetadata::MoveExternalBackingStoreBytes(
4401 ExternalBackingStoreType::kExternalString,
4402 PageMetadata::FromAddress((*p).ptr()),
4403 PageMetadata::FromHeapObject(new_string),
4404 Cast<ExternalString>(new_string)->ExternalPayloadSize());
4405 }
4406 return new_string;
4407 }
4408
4409 return Cast<String>(*p);
4410}
4411
4412void MarkCompactCollector::EvacuatePrologue() {
4413 // New space.
4414 if (NewSpace* new_space = heap_->new_space()) {
4415 DCHECK(new_space_evacuation_pages_.empty());
4416 std::copy_if(new_space->begin(), new_space->end(),
4417 std::back_inserter(new_space_evacuation_pages_),
4418 [](PageMetadata* p) { return p->live_bytes() > 0; });
4419 if (!v8_flags.minor_ms) {
4420 SemiSpaceNewSpace::From(new_space)->SwapSemiSpaces();
4421 }
4422 }
4423
4424 // Large new space.
4425 if (NewLargeObjectSpace* new_lo_space = heap_->new_lo_space()) {
4426 new_lo_space->Flip();
4427 new_lo_space->ResetPendingObject();
4428 }
4429
4430 // Old space.
4431 DCHECK(old_space_evacuation_pages_.empty());
4432 old_space_evacuation_pages_ = std::move(evacuation_candidates_);
4433 evacuation_candidates_.clear();
4434 DCHECK(evacuation_candidates_.empty());
4435}
4436
4437void MarkCompactCollector::EvacuateEpilogue() {
4438 aborted_evacuation_candidates_due_to_oom_.clear();
4439 aborted_evacuation_candidates_due_to_flags_.clear();
4440
4441 // New space.
4442 if (heap_->new_space()) {
4443 DCHECK_EQ(0, heap_->new_space()->Size());
4444 }
4445
4446 // Old generation. Deallocate evacuated candidate pages.
4447 ReleaseEvacuationCandidates();
4448
4449#ifdef DEBUG
4450 VerifyRememberedSetsAfterEvacuation(heap_, GarbageCollector::MARK_COMPACTOR);
4451#endif // DEBUG
4452}
4453
4454class Evacuator final : public Malloced {
4455 public:
4461
4462 static const char* EvacuationModeName(EvacuationMode mode) {
4463 switch (mode) {
4464 case kObjectsNewToOld:
4465 return "objects-new-to-old";
4466 case kPageNewToOld:
4467 return "page-new-to-old";
4468 case kObjectsOldToOld:
4469 return "objects-old-to-old";
4470 }
4471 }
4472
4474 // Note: The order of checks is important in this function.
4475 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
4476 return kPageNewToOld;
4477 if (chunk->InYoungGeneration()) return kObjectsNewToOld;
4478 return kObjectsOldToOld;
4479 }
4480
4482 : heap_(heap),
4483 local_pretenuring_feedback_(
4484 PretenuringHandler::kInitialFeedbackCapacity),
4485 local_allocator_(heap_,
4487 record_visitor_(heap_),
4488 new_space_visitor_(heap_, &local_allocator_, &record_visitor_,
4489 &local_pretenuring_feedback_),
4490 new_to_old_page_visitor_(heap_, &record_visitor_,
4491 &local_pretenuring_feedback_),
4492
4493 old_space_visitor_(heap_, &local_allocator_, &record_visitor_),
4494 duration_(0.0),
4495 bytes_compacted_(0) {}
4496
4497 void EvacuatePage(MutablePageMetadata* chunk);
4498
4500 new_space_visitor_.AddObserver(observer);
4501 old_space_visitor_.AddObserver(observer);
4502 }
4503
4504 // Merge back locally cached info sequentially. Note that this method needs
4505 // to be called from the main thread.
4506 void Finalize();
4507
4508 private:
4509 // |saved_live_bytes| returns the live bytes of the page that was processed.
4510 bool RawEvacuatePage(MutablePageMetadata* chunk);
4511
4512 inline Heap* heap() { return heap_; }
4513
4514 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
4515 duration_ += duration;
4516 bytes_compacted_ += bytes_compacted;
4517 }
4518
4520
4522
4523 // Locally cached collector data.
4525
4527
4528 // Visitors for the corresponding spaces.
4532
4533 // Book keeping info.
4536};
4537
4538void Evacuator::EvacuatePage(MutablePageMetadata* page) {
4539 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
4540 DCHECK(page->SweepingDone());
4541 intptr_t saved_live_bytes = page->live_bytes();
4542 double evacuation_time = 0.0;
4543 bool success = false;
4544 {
4545 TimedScope timed_scope(&evacuation_time);
4546 success = RawEvacuatePage(page);
4547 }
4548 ReportCompactionProgress(evacuation_time, saved_live_bytes);
4549 if (v8_flags.trace_evacuation) {
4550 MemoryChunk* chunk = page->Chunk();
4551 PrintIsolate(heap_->isolate(),
4552 "evacuation[%p]: page=%p new_space=%d "
4553 "page_evacuation=%d executable=%d can_promote=%d "
4554 "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
4555 static_cast<void*>(this), static_cast<void*>(page),
4556 chunk->InNewSpace(),
4557 chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
4558 chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
4559 heap_->new_space()->IsPromotionCandidate(page),
4560 saved_live_bytes, evacuation_time, success);
4561 }
4562}
4563
4564void Evacuator::Finalize() {
4565 local_allocator_.Finalize();
4566 heap_->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
4567 heap_->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
4568 new_to_old_page_visitor_.moved_bytes());
4569 heap_->IncrementYoungSurvivorsCounter(
4570 new_space_visitor_.promoted_size() +
4571 new_to_old_page_visitor_.moved_bytes());
4572 heap_->pretenuring_handler()->MergeAllocationSitePretenuringFeedback(
4573 local_pretenuring_feedback_);
4574}
4575
4577 public:
4578 // Visits marked objects using `bool Visitor::Visit(HeapObject object, size_t
4579 // size)` as long as the return value is true.
4580 //
4581 // Returns whether all objects were successfully visited. Upon returning
4582 // false, also sets `failed_object` to the object for which the visitor
4583 // returned false.
4584 template <class Visitor>
4585 static bool VisitMarkedObjects(PageMetadata* page, Visitor* visitor,
4586 Tagged<HeapObject>* failed_object);
4587
4588 // Visits marked objects using `bool Visitor::Visit(HeapObject object, size_t
4589 // size)` as long as the return value is true. Assumes that the return value
4590 // is always true (success).
4591 template <class Visitor>
4592 static void VisitMarkedObjectsNoFail(PageMetadata* page, Visitor* visitor);
4593};
4594
4595template <class Visitor>
4596bool LiveObjectVisitor::VisitMarkedObjects(PageMetadata* page, Visitor* visitor,
4597 Tagged<HeapObject>* failed_object) {
4599 "LiveObjectVisitor::VisitMarkedObjects");
4600 for (auto [object, size] : LiveObjectRange(page)) {
4601 if (!visitor->Visit(object, size)) {
4602 *failed_object = object;
4603 return false;
4604 }
4605 }
4606 return true;
4607}
4608
4609template <class Visitor>
4610void LiveObjectVisitor::VisitMarkedObjectsNoFail(PageMetadata* page,
4611 Visitor* visitor) {
4613 "LiveObjectVisitor::VisitMarkedObjectsNoFail");
4614 for (auto [object, size] : LiveObjectRange(page)) {
4615 const bool success = visitor->Visit(object, size);
4616 USE(success);
4617 DCHECK(success);
4618 }
4619}
4620
4621bool Evacuator::RawEvacuatePage(MutablePageMetadata* page) {
4622 MemoryChunk* chunk = page->Chunk();
4623 const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
4625 "FullEvacuator::RawEvacuatePage", "evacuation_mode",
4626 EvacuationModeName(evacuation_mode), "live_bytes",
4627 page->live_bytes());
4628 switch (evacuation_mode) {
4629 case kObjectsNewToOld:
4630#if DEBUG
4631 new_space_visitor_.DisableAbortEvacuationAtAddress(page);
4632#endif // DEBUG
4633 LiveObjectVisitor::VisitMarkedObjectsNoFail(PageMetadata::cast(page),
4634 &new_space_visitor_);
4635 page->ClearLiveness();
4636 break;
4637 case kPageNewToOld:
4638 if (chunk->IsLargePage()) {
4639 auto object = LargePageMetadata::cast(page)->GetObject();
4640 bool success = new_to_old_page_visitor_.Visit(object, object->Size());
4641 USE(success);
4642 DCHECK(success);
4643 } else {
4644 LiveObjectVisitor::VisitMarkedObjectsNoFail(PageMetadata::cast(page),
4645 &new_to_old_page_visitor_);
4646 }
4647 new_to_old_page_visitor_.account_moved_bytes(page->live_bytes());
4648 break;
4649 case kObjectsOldToOld: {
4650#if DEBUG
4651 old_space_visitor_.SetUpAbortEvacuationAtAddress(page);
4652#endif // DEBUG
4653 Tagged<HeapObject> failed_object;
4654 if (LiveObjectVisitor::VisitMarkedObjects(
4655 PageMetadata::cast(page), &old_space_visitor_, &failed_object)) {
4656 page->ClearLiveness();
4657 } else {
4658 // Aborted compaction page. Actual processing happens on the main
4659 // thread for simplicity reasons.
4660 heap_->mark_compact_collector()
4661 ->ReportAbortedEvacuationCandidateDueToOOM(
4662 failed_object.address(), static_cast<PageMetadata*>(page));
4663 return false;
4664 }
4665 break;
4666 }
4667 }
4668
4669 return true;
4670}
4671
4673 public:
4675 Isolate* isolate, MarkCompactCollector* collector,
4676 std::vector<std::unique_ptr<Evacuator>>* evacuators,
4677 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4678 evacuation_items)
4679 : collector_(collector),
4680 evacuators_(evacuators),
4681 evacuation_items_(std::move(evacuation_items)),
4682 remaining_evacuation_items_(evacuation_items_.size()),
4683 generator_(evacuation_items_.size()),
4684 tracer_(isolate->heap()->tracer()),
4685 trace_id_(reinterpret_cast<uint64_t>(this) ^
4686 tracer_->CurrentEpoch(GCTracer::Scope::MC_EVACUATE)) {}
4687
4688 void Run(JobDelegate* delegate) override {
4689 // Set the current isolate such that trusted pointer tables etc are
4690 // available and the cage base is set correctly for multi-cage mode.
4691 SetCurrentIsolateScope isolate_scope(collector_->heap()->isolate());
4692
4693 Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
4694 if (delegate->IsJoiningThread()) {
4695 TRACE_GC_WITH_FLOW(tracer_, GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL,
4697 ProcessItems(delegate, evacuator);
4698 } else {
4700 tracer_, GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
4701 ThreadKind::kBackground, trace_id_, TRACE_EVENT_FLAG_FLOW_IN);
4702 ProcessItems(delegate, evacuator);
4703 }
4704 }
4705
4706 void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
4707 while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
4708 std::optional<size_t> index = generator_.GetNext();
4709 if (!index) return;
4710 for (size_t i = *index; i < evacuation_items_.size(); ++i) {
4711 auto& work_item = evacuation_items_[i];
4712 if (!work_item.first.TryAcquire()) break;
4713 evacuator->EvacuatePage(work_item.second);
4714 if (remaining_evacuation_items_.fetch_sub(
4715 1, std::memory_order_relaxed) <= 1) {
4716 return;
4717 }
4718 }
4719 }
4720 }
4721
4722 size_t GetMaxConcurrency(size_t worker_count) const override {
4723 const size_t kItemsPerWorker = std::max(1, MB / PageMetadata::kPageSize);
4724 // Ceiling division to ensure enough workers for all
4725 // |remaining_evacuation_items_|
4726 size_t wanted_num_workers =
4727 (remaining_evacuation_items_.load(std::memory_order_relaxed) +
4728 kItemsPerWorker - 1) /
4729 kItemsPerWorker;
4730 wanted_num_workers =
4731 std::min<size_t>(wanted_num_workers, evacuators_->size());
4732 if (!collector_->UseBackgroundThreadsInCycle()) {
4733 return std::min<size_t>(wanted_num_workers, 1);
4734 }
4735 return wanted_num_workers;
4736 }
4737
4738 uint64_t trace_id() const { return trace_id_; }
4739
4740 private:
4742 std::vector<std::unique_ptr<Evacuator>>* evacuators_;
4743 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4745 std::atomic<size_t> remaining_evacuation_items_{0};
4747
4749 const uint64_t trace_id_;
4750};
4751
4752namespace {
4753size_t CreateAndExecuteEvacuationTasks(
4754 Heap* heap, MarkCompactCollector* collector,
4755 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4756 evacuation_items) {
4757 std::optional<ProfilingMigrationObserver> profiling_observer;
4758 if (heap->isolate()->log_object_relocation()) {
4759 profiling_observer.emplace(heap);
4760 }
4761 std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
4762 const int wanted_num_tasks = NumberOfParallelCompactionTasks(heap);
4763 for (int i = 0; i < wanted_num_tasks; i++) {
4764 auto evacuator = std::make_unique<Evacuator>(heap);
4765 if (profiling_observer) {
4766 evacuator->AddObserver(&profiling_observer.value());
4767 }
4768 evacuators.push_back(std::move(evacuator));
4769 }
4770 auto page_evacuation_job = std::make_unique<PageEvacuationJob>(
4771 heap->isolate(), collector, &evacuators, std::move(evacuation_items));
4772 TRACE_GC_NOTE_WITH_FLOW("PageEvacuationJob started",
4773 page_evacuation_job->trace_id(),
4775 V8::GetCurrentPlatform()
4777 std::move(page_evacuation_job))
4778 ->Join();
4779 for (auto& evacuator : evacuators) {
4780 evacuator->Finalize();
4781 }
4782 return wanted_num_tasks;
4783}
4784
4785enum class MemoryReductionMode { kNone, kShouldReduceMemory };
4786
4787// NewSpacePages with more live bytes than this threshold qualify for fast
4788// evacuation.
4789intptr_t NewSpacePageEvacuationThreshold() {
4790 return v8_flags.page_promotion_threshold *
4791 MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
4792}
4793
4794bool ShouldMovePage(PageMetadata* p, intptr_t live_bytes,
4795 MemoryReductionMode memory_reduction_mode) {
4796 Heap* heap = p->heap();
4797 DCHECK(!p->Chunk()->NeverEvacuate());
4798 const bool should_move_page =
4799 v8_flags.page_promotion &&
4800 (memory_reduction_mode == MemoryReductionMode::kNone) &&
4801 (live_bytes > NewSpacePageEvacuationThreshold()) &&
4802 heap->CanExpandOldGeneration(live_bytes);
4803 if (v8_flags.trace_page_promotions) {
4804 PrintIsolate(heap->isolate(),
4805 "[Page Promotion] %p: collector=mc, should move: %d"
4806 ", live bytes = %zu, promotion threshold = %zu"
4807 ", allocated labs size = %zu\n",
4808 p, should_move_page, live_bytes,
4809 NewSpacePageEvacuationThreshold(), p->AllocatedLabSize());
4810 }
4811 return should_move_page;
4812}
4813
4814void TraceEvacuation(Isolate* isolate, size_t pages_count,
4815 size_t wanted_num_tasks, size_t live_bytes,
4816 size_t aborted_pages) {
4817 DCHECK(v8_flags.trace_evacuation);
4818 PrintIsolate(isolate,
4819 "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
4820 "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
4821 " compaction_speed=%.f aborted=%zu\n",
4822 isolate->time_millis_since_init(),
4823 v8_flags.parallel_compaction ? "yes" : "no", pages_count,
4824 wanted_num_tasks,
4825 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
4826 live_bytes,
4827 isolate->heap()
4828 ->tracer()
4829 ->CompactionSpeedInBytesPerMillisecond()
4830 .value_or(0),
4831 aborted_pages);
4832}
4833
4834} // namespace
4835
4837 public:
4839 : RootVisitor(),
4840 collector_(collector),
4841 should_pin_in_shared_space_(
4842 collector->heap()->isolate()->is_shared_space_isolate()) {}
4843
4844 void VisitRootPointer(Root root, const char* description,
4845 FullObjectSlot p) final {
4846 HandlePointer(p);
4847 }
4848
4849 void VisitRootPointers(Root root, const char* description,
4851 for (FullObjectSlot p = start; p < end; ++p) {
4852 HandlePointer(p);
4853 }
4854 }
4855
4856 private:
4858 Tagged<Object> object = *p;
4859 if (!object.IsHeapObject()) {
4860 return;
4861 }
4862 MemoryChunk* chunk = MemoryChunk::FromHeapObject(Cast<HeapObject>(object));
4863 if (chunk->IsLargePage() || chunk->InReadOnlySpace()) {
4864 // Large objects and read only objects are not evacuated and thus don't
4865 // need to be pinned.
4866 return;
4867 }
4868 if (!should_pin_in_shared_space_ && chunk->InWritableSharedSpace()) {
4869 return;
4870 }
4871 if (chunk->InYoungGeneration()) {
4872 // Young gen pages are not considered evacuation candidates. Pinning is
4873 // done by marking them as quarantined and promoting the page as is.
4874 DCHECK(v8_flags.minor_ms ? chunk->IsToPage() : chunk->IsFromPage());
4875 if (chunk->IsQuarantined()) {
4876 return;
4877 }
4878 chunk->SetFlagNonExecutable(MemoryChunk::IS_QUARANTINED);
4879 return;
4880 }
4881 if (!chunk->IsFlagSet(MemoryChunk::EVACUATION_CANDIDATE)) {
4882 return;
4883 }
4884 collector_->ReportAbortedEvacuationCandidateDueToFlags(
4885 PageMetadata::cast(chunk->Metadata()), chunk);
4886 }
4887
4890};
4891
4892void MarkCompactCollector::PinPreciseRootsIfNeeded() {
4893 if (!v8_flags.precise_object_pinning) {
4894 return;
4895 }
4896
4897 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_PIN_PAGES);
4898
4899 Isolate* const isolate = heap_->isolate();
4900
4901 PrecisePagePinningVisitor root_visitor(this);
4902
4903 // Mark the heap roots including global variables, stack variables,
4904 // etc., and all objects reachable from them.
4905 heap_->IterateRootsForPrecisePinning(&root_visitor);
4906
4907 if (isolate->is_shared_space_isolate()) {
4908 ClientRootVisitor<> client_root_visitor(&root_visitor);
4909 isolate->global_safepoint()->IterateClientIsolates(
4910 [&client_root_visitor](Isolate* client) {
4911 client->heap()->IterateRootsForPrecisePinning(&client_root_visitor);
4912 });
4913 }
4914}
4915
4916void MarkCompactCollector::EvacuatePagesInParallel() {
4917 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4918 evacuation_items;
4919 intptr_t live_bytes = 0;
4920
4921 PinPreciseRootsIfNeeded();
4922
4923 // Evacuation of new space pages cannot be aborted, so it needs to run
4924 // before old space evacuation.
4925 bool force_page_promotion =
4926 heap_->IsGCWithStack() && !v8_flags.compact_with_stack;
4927 for (PageMetadata* page : new_space_evacuation_pages_) {
4928 intptr_t live_bytes_on_page = page->live_bytes();
4929 DCHECK_LT(0, live_bytes_on_page);
4930 live_bytes += live_bytes_on_page;
4931 MemoryReductionMode memory_reduction_mode =
4932 heap_->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory
4933 : MemoryReductionMode::kNone;
4934 if (ShouldMovePage(page, live_bytes_on_page, memory_reduction_mode) ||
4935 force_page_promotion || page->Chunk()->IsQuarantined()) {
4936 EvacuateNewToOldSpacePageVisitor::Move(page);
4937 page->Chunk()->SetFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
4938 DCHECK_EQ(heap_->old_space(), page->owner());
4939 // The move added page->allocated_bytes to the old space, but we are
4940 // going to sweep the page and add page->live_byte_count.
4941 heap_->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(), page);
4942 }
4943 evacuation_items.emplace_back(ParallelWorkItem{}, page);
4944 }
4945
4946 if (heap_->IsGCWithStack()) {
4947 if (!v8_flags.compact_with_stack) {
4948 for (PageMetadata* page : old_space_evacuation_pages_) {
4949 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4950 }
4951 } else if (!v8_flags.compact_code_space_with_stack ||
4952 heap_->isolate()->InFastCCall()) {
4953 // For fast C calls we cannot patch the return address in the native stack
4954 // frame if we would relocate InstructionStream objects.
4955 for (PageMetadata* page : old_space_evacuation_pages_) {
4956 if (page->owner_identity() != CODE_SPACE) continue;
4957 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4958 }
4959 }
4960 } else {
4961 // There should always be a stack when we are in a fast c call.
4962 DCHECK(!heap_->isolate()->InFastCCall());
4963 }
4964
4965 if (v8_flags.stress_compaction || v8_flags.stress_compaction_random) {
4966 // Stress aborting of evacuation by aborting ~10% of evacuation candidates
4967 // when stress testing.
4968 const double kFraction = 0.05;
4969
4970 for (PageMetadata* page : old_space_evacuation_pages_) {
4971 if (heap_->isolate()->fuzzer_rng()->NextDouble() < kFraction) {
4972 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4973 }
4974 }
4975 }
4976
4977 for (PageMetadata* page : old_space_evacuation_pages_) {
4978 MemoryChunk* chunk = page->Chunk();
4979 if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) continue;
4980
4981 live_bytes += page->live_bytes();
4982 evacuation_items.emplace_back(ParallelWorkItem{}, page);
4983 }
4984
4985 // Promote young generation large objects.
4986 if (auto* new_lo_space = heap_->new_lo_space()) {
4987 for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
4988 LargePageMetadata* current = *(it++);
4989 Tagged<HeapObject> object = current->GetObject();
4990 // The black-allocated flag was already cleared in SweepLargeSpace().
4991 DCHECK_IMPLIES(v8_flags.black_allocated_pages,
4992 !HeapLayout::InBlackAllocatedPage(object));
4993 if (marking_state_->IsMarked(object)) {
4994 heap_->lo_space()->PromoteNewLargeObject(current);
4995 current->Chunk()->SetFlagNonExecutable(
4996 MemoryChunk::PAGE_NEW_OLD_PROMOTION);
4997 promoted_large_pages_.push_back(current);
4998 evacuation_items.emplace_back(ParallelWorkItem{}, current);
4999 }
5000 }
5001 new_lo_space->set_objects_size(0);
5002 }
5003
5004 const size_t pages_count = evacuation_items.size();
5005 size_t wanted_num_tasks = 0;
5006 if (!evacuation_items.empty()) {
5008 "MarkCompactCollector::EvacuatePagesInParallel", "pages",
5009 evacuation_items.size());
5010
5011 wanted_num_tasks = CreateAndExecuteEvacuationTasks(
5012 heap_, this, std::move(evacuation_items));
5013 }
5014
5015 const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();
5016
5017 if (v8_flags.trace_evacuation) {
5018 TraceEvacuation(heap_->isolate(), pages_count, wanted_num_tasks, live_bytes,
5019 aborted_pages);
5020 }
5021}
5022
5024 public:
5026 if (object.IsHeapObject()) {
5027 Tagged<HeapObject> heap_object = Cast<HeapObject>(object);
5028 MapWord map_word = heap_object->map_word(kRelaxedLoad);
5029 if (map_word.IsForwardingAddress()) {
5030 return map_word.ToForwardingAddress(heap_object);
5031 }
5032 }
5033 return object;
5034 }
5035};
5036
5037void MarkCompactCollector::Evacuate() {
5038 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE);
5039
5040 {
5041 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
5042 EvacuatePrologue();
5043 }
5044
5045 {
5046 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
5047 EvacuatePagesInParallel();
5048 }
5049
5050 UpdatePointersAfterEvacuation();
5051
5052 {
5053 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
5054
5055 for (PageMetadata* p : new_space_evacuation_pages_) {
5056 MemoryChunk* chunk = p->Chunk();
5057 AllocationSpace owner_identity = p->owner_identity();
5058 USE(owner_identity);
5059 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) {
5060 chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
5061 // The in-sandbox page flags may be corrupted, so we currently need
5062 // this check here to make sure that this doesn't lead to further
5063 // confusion about the state of MemoryChunkMetadata objects.
5064 // TODO(377724745): if we move (some of) the flags into the trusted
5065 // MemoryChunkMetadata object, then this wouldn't be necessary.
5066 SBXCHECK_EQ(OLD_SPACE, owner_identity);
5067 sweeper_->AddPage(OLD_SPACE, p);
5068 } else if (v8_flags.minor_ms) {
5069 // Sweep non-promoted pages to add them back to the free list.
5070 DCHECK_EQ(NEW_SPACE, owner_identity);
5071 DCHECK_EQ(0, p->live_bytes());
5072 DCHECK(p->SweepingDone());
5073 PagedNewSpace* space = heap_->paged_new_space();
5074 if (space->ShouldReleaseEmptyPage()) {
5075 space->ReleasePage(p);
5076 } else {
5077 sweeper_->SweepEmptyNewSpacePage(p);
5078 }
5079 }
5080 }
5081 new_space_evacuation_pages_.clear();
5082
5083 for (LargePageMetadata* p : promoted_large_pages_) {
5084 MemoryChunk* chunk = p->Chunk();
5085 DCHECK(chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
5086 chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
5087 Tagged<HeapObject> object = p->GetObject();
5088 if (!v8_flags.sticky_mark_bits) {
5089 MarkBit::From(object).Clear();
5090 p->SetLiveBytes(0);
5091 }
5092 p->marking_progress_tracker().ResetIfEnabled();
5093 }
5094 promoted_large_pages_.clear();
5095
5096 for (PageMetadata* p : old_space_evacuation_pages_) {
5097 MemoryChunk* chunk = p->Chunk();
5098 if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5099 sweeper_->AddPage(p->owner_identity(), p);
5100 chunk->ClearFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
5101 }
5102 }
5103 }
5104
5105 {
5106 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
5107 EvacuateEpilogue();
5108 }
5109
5110#ifdef VERIFY_HEAP
5111 if (v8_flags.verify_heap && !sweeper_->sweeping_in_progress()) {
5112 EvacuationVerifier verifier(heap_);
5113 verifier.Run();
5114 }
5115#endif // VERIFY_HEAP
5116}
5117
5119 public:
5120 virtual ~UpdatingItem() = default;
5121 virtual void Process() = 0;
5122};
5123
5125 public:
5127 Isolate* isolate, MarkCompactCollector* collector,
5128 std::vector<std::unique_ptr<UpdatingItem>> updating_items)
5129 : collector_(collector),
5130 updating_items_(std::move(updating_items)),
5131 remaining_updating_items_(updating_items_.size()),
5132 generator_(updating_items_.size()),
5133 tracer_(isolate->heap()->tracer()),
5134 trace_id_(reinterpret_cast<uint64_t>(this) ^
5135 tracer_->CurrentEpoch(GCTracer::Scope::MC_EVACUATE)) {}
5136
5137 void Run(JobDelegate* delegate) override {
5138 // Set the current isolate such that trusted pointer tables etc are
5139 // available and the cage base is set correctly for multi-cage mode.
5140 SetCurrentIsolateScope isolate_scope(collector_->heap()->isolate());
5141
5142 if (delegate->IsJoiningThread()) {
5143 TRACE_GC_WITH_FLOW(tracer_,
5144 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
5146 UpdatePointers(delegate);
5147 } else {
5149 tracer_, GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
5150 ThreadKind::kBackground, trace_id_, TRACE_EVENT_FLAG_FLOW_IN);
5151 UpdatePointers(delegate);
5152 }
5153 }
5154
5155 void UpdatePointers(JobDelegate* delegate) {
5156 while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
5157 std::optional<size_t> index = generator_.GetNext();
5158 if (!index) return;
5159 for (size_t i = *index; i < updating_items_.size(); ++i) {
5160 auto& work_item = updating_items_[i];
5161 if (!work_item->TryAcquire()) break;
5162 work_item->Process();
5163 if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
5164 1) {
5165 return;
5166 }
5167 }
5168 }
5169 }
5170
5171 size_t GetMaxConcurrency(size_t worker_count) const override {
5172 size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
5173 if (!v8_flags.parallel_pointer_update ||
5174 !collector_->UseBackgroundThreadsInCycle()) {
5175 return std::min<size_t>(items, 1);
5176 }
5177 const size_t kMaxPointerUpdateTasks = 8;
5178 size_t max_concurrency = std::min<size_t>(kMaxPointerUpdateTasks, items);
5179 DCHECK_IMPLIES(items > 0, max_concurrency > 0);
5180 return max_concurrency;
5181 }
5182
5183 uint64_t trace_id() const { return trace_id_; }
5184
5185 private:
5187 std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
5188 std::atomic<size_t> remaining_updating_items_{0};
5190
5192 const uint64_t trace_id_;
5193};
5194
5195namespace {
5196
5197class RememberedSetUpdatingItem : public UpdatingItem {
5198 public:
5199 explicit RememberedSetUpdatingItem(Heap* heap, MutablePageMetadata* chunk)
5200 : heap_(heap),
5201 marking_state_(heap_->non_atomic_marking_state()),
5202 chunk_(chunk),
5203 record_old_to_shared_slots_(heap->isolate()->has_shared_space() &&
5204 !chunk->Chunk()->InWritableSharedSpace()) {}
5205 ~RememberedSetUpdatingItem() override = default;
5206
5207 void Process() override {
5209 "RememberedSetUpdatingItem::Process");
5210 UpdateUntypedPointers();
5211 UpdateTypedPointers();
5212 }
5213
5214 private:
5215 template <typename TSlot>
5216 inline void CheckSlotForOldToSharedUntyped(PtrComprCageBase cage_base,
5217 MutablePageMetadata* page,
5218 TSlot slot) {
5219 Tagged<HeapObject> heap_object;
5220
5221 if (!slot.load(cage_base).GetHeapObject(&heap_object)) {
5222 return;
5223 }
5224
5225 if (HeapLayout::InWritableSharedSpace(heap_object)) {
5227 page, page->Offset(slot.address()));
5228 }
5229 }
5230
5231 inline void CheckSlotForOldToSharedTyped(
5232 MutablePageMetadata* page, SlotType slot_type, Address addr,
5233 WritableJitAllocation& jit_allocation) {
5234 Tagged<HeapObject> heap_object =
5235 UpdateTypedSlotHelper::GetTargetObject(page->heap(), slot_type, addr);
5236
5237#if DEBUG
5238 UpdateTypedSlotHelper::UpdateTypedSlot(
5239 jit_allocation, page->heap(), slot_type, addr,
5240 [heap_object](FullMaybeObjectSlot slot) {
5241 DCHECK_EQ((*slot).GetHeapObjectAssumeStrong(), heap_object);
5242 return KEEP_SLOT;
5243 });
5244#endif // DEBUG
5245
5246 if (HeapLayout::InWritableSharedSpace(heap_object)) {
5247 const uintptr_t offset = page->Offset(addr);
5248 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
5250 static_cast<uint32_t>(offset));
5251 }
5252 }
5253
5254 template <typename TSlot>
5255 inline void CheckAndUpdateOldToNewSlot(TSlot slot,
5256 const PtrComprCageBase cage_base) {
5257 static_assert(
5258 std::is_same_v<TSlot, FullMaybeObjectSlot> ||
5259 std::is_same_v<TSlot, MaybeObjectSlot>,
5260 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
5261 Tagged<HeapObject> heap_object;
5262 if (!(*slot).GetHeapObject(&heap_object)) return;
5263 if (!HeapLayout::InYoungGeneration(heap_object)) return;
5264
5265 if (!v8_flags.sticky_mark_bits) {
5266 DCHECK_IMPLIES(v8_flags.minor_ms && !Heap::IsLargeObject(heap_object),
5267 Heap::InToPage(heap_object));
5268 DCHECK_IMPLIES(!v8_flags.minor_ms || Heap::IsLargeObject(heap_object),
5269 Heap::InFromPage(heap_object));
5270 }
5271
5272 // OLD_TO_NEW slots are recorded in dead memory, so they might point to
5273 // dead objects.
5274 DCHECK_IMPLIES(!heap_object->map_word(kRelaxedLoad).IsForwardingAddress(),
5275 !marking_state_->IsMarked(heap_object));
5276 UpdateSlot(cage_base, slot);
5277 }
5278
5279 void UpdateUntypedPointers() {
5280 UpdateUntypedOldToNewPointers<OLD_TO_NEW>();
5281 UpdateUntypedOldToNewPointers<OLD_TO_NEW_BACKGROUND>();
5282 UpdateUntypedOldToOldPointers();
5283 UpdateUntypedTrustedToCodePointers();
5284 UpdateUntypedTrustedToTrustedPointers();
5285 }
5286
5287 template <RememberedSetType old_to_new_type>
5288 void UpdateUntypedOldToNewPointers() {
5289 if (!chunk_->slot_set<old_to_new_type, AccessMode::NON_ATOMIC>()) {
5290 return;
5291 }
5292
5293 const PtrComprCageBase cage_base = heap_->isolate();
5294 // Marking bits are cleared already when the page is already swept. This
5295 // is fine since in that case the sweeper has already removed dead invalid
5296 // objects as well.
5298 chunk_,
5299 [this, cage_base](MaybeObjectSlot slot) {
5300 CheckAndUpdateOldToNewSlot(slot, cage_base);
5301 // A new space string might have been promoted into the shared heap
5302 // during GC.
5304 CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
5305 }
5306 // Always keep slot since all slots are dropped at once after
5307 // iteration.
5308 return KEEP_SLOT;
5309 },
5310 SlotSet::KEEP_EMPTY_BUCKETS);
5311
5312 // Full GCs will empty new space, so [old_to_new_type] is empty.
5313 chunk_->ReleaseSlotSet(old_to_new_type);
5314 }
5315
5316 void UpdateUntypedOldToOldPointers() {
5317 if (!chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>()) {
5318 return;
5319 }
5320
5321 const PtrComprCageBase cage_base = heap_->isolate();
5322 if (chunk_->Chunk()->executable()) {
5323 // When updating pointer in an InstructionStream (in particular, the
5324 // pointer to relocation info), we need to use WriteProtectedSlots that
5325 // ensure that the code page is unlocked.
5326 WritableJitPage jit_page(chunk_->area_start(), chunk_->area_size());
5328 chunk_,
5329 [&](MaybeObjectSlot slot) {
5330 WritableJitAllocation jit_allocation =
5331 jit_page.LookupAllocationContaining(slot.address());
5332 UpdateSlot(cage_base, WriteProtectedSlot<ObjectSlot>(
5333 jit_allocation, slot.address()));
5334 // Always keep slot since all slots are dropped at once after
5335 // iteration.
5336 return KEEP_SLOT;
5337 },
5338 SlotSet::KEEP_EMPTY_BUCKETS);
5339 } else {
5341 chunk_,
5342 [&](MaybeObjectSlot slot) {
5343 UpdateSlot(cage_base, slot);
5344 // A string might have been promoted into the shared heap during
5345 // GC.
5347 CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
5348 }
5349 // Always keep slot since all slots are dropped at once after
5350 // iteration.
5351 return KEEP_SLOT;
5352 },
5353 SlotSet::KEEP_EMPTY_BUCKETS);
5354 }
5355
5356 chunk_->ReleaseSlotSet(OLD_TO_OLD);
5357 }
5358
5359 void UpdateUntypedTrustedToCodePointers() {
5360 if (!chunk_->slot_set<TRUSTED_TO_CODE, AccessMode::NON_ATOMIC>()) {
5361 return;
5362 }
5363
5364#ifdef V8_ENABLE_SANDBOX
5365 // When the sandbox is enabled, we must not process the TRUSTED_TO_CODE
5366 // remembered set on any chunk that is located inside the sandbox (in which
5367 // case the set should be unused). This is because an attacker could either
5368 // directly modify the TRUSTED_TO_CODE set on such a chunk, or trick the GC
5369 // into populating it with invalid pointers, both of which may lead to
5370 // memory corruption inside the (trusted) code space here.
5371 SBXCHECK(!InsideSandbox(chunk_->ChunkAddress()));
5372#endif
5373
5374 const PtrComprCageBase cage_base = heap_->isolate();
5375#ifdef V8_EXTERNAL_CODE_SPACE
5376 const PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
5377#else
5378 const PtrComprCageBase code_cage_base = cage_base;
5379#endif
5381 chunk_,
5382 [cage_base, code_cage_base,
5383 isolate = IsolateForSandbox{heap_->isolate()}](MaybeObjectSlot slot) {
5384 DCHECK(IsCode(HeapObject::FromAddress(slot.address() -
5385 Code::kInstructionStreamOffset),
5386 cage_base));
5387 UpdateStrongCodeSlot(isolate, cage_base, code_cage_base,
5388 InstructionStreamSlot(slot.address()));
5389 // Always keep slot since all slots are dropped at once after
5390 // iteration.
5391 return KEEP_SLOT;
5392 },
5393 SlotSet::FREE_EMPTY_BUCKETS);
5394
5395 chunk_->ReleaseSlotSet(TRUSTED_TO_CODE);
5396 }
5397
5398 void UpdateUntypedTrustedToTrustedPointers() {
5399 if (!chunk_->slot_set<TRUSTED_TO_TRUSTED, AccessMode::NON_ATOMIC>()) {
5400 return;
5401 }
5402
5403#ifdef V8_ENABLE_SANDBOX
5404 // When the sandbox is enabled, we must not process the TRUSTED_TO_TRUSTED
5405 // remembered set on any chunk that is located inside the sandbox (in which
5406 // case the set should be unused). This is because an attacker could either
5407 // directly modify the TRUSTED_TO_TRUSTED set on such a chunk, or trick the
5408 // GC into populating it with invalid pointers, both of which may lead to
5409 // memory corruption inside the trusted space here.
5410 SBXCHECK(!InsideSandbox(chunk_->ChunkAddress()));
5411#endif
5412
5413 // TODO(saelo) we can probably drop all the cage_bases here once we no
5414 // longer need to pass them into our slot implementations.
5415 const PtrComprCageBase unused_cage_base(kNullAddress);
5416
5417 if (chunk_->Chunk()->executable()) {
5418 // When updating the InstructionStream -> Code pointer, we need to use
5419 // WriteProtectedSlots that ensure that the code page is unlocked.
5420 WritableJitPage jit_page(chunk_->area_start(), chunk_->area_size());
5421
5423 chunk_,
5424 [&](MaybeObjectSlot slot) {
5425 WritableJitAllocation jit_allocation =
5426 jit_page.LookupAllocationContaining(slot.address());
5427 UpdateStrongSlot(unused_cage_base,
5429 jit_allocation, slot.address()));
5430 // Always keep slot since all slots are dropped at once after
5431 // iteration.
5432 return KEEP_SLOT;
5433 },
5434 SlotSet::FREE_EMPTY_BUCKETS);
5435 } else {
5437 chunk_,
5438 [&](MaybeObjectSlot slot) {
5439 UpdateSlot(unused_cage_base,
5441 // Always keep slot since all slots are dropped at once after
5442 // iteration.
5443 return KEEP_SLOT;
5444 },
5445 SlotSet::FREE_EMPTY_BUCKETS);
5446 }
5447
5448 chunk_->ReleaseSlotSet(TRUSTED_TO_TRUSTED);
5449 }
5450
5451 void UpdateTypedPointers() {
5452 if (!chunk_->Chunk()->executable()) {
5453 DCHECK_NULL((chunk_->typed_slot_set<OLD_TO_NEW>()));
5454 DCHECK_NULL((chunk_->typed_slot_set<OLD_TO_OLD>()));
5455 return;
5456 }
5457
5458 WritableJitPage jit_page = ThreadIsolation::LookupWritableJitPage(
5459 chunk_->area_start(), chunk_->area_size());
5460 UpdateTypedOldToNewPointers(jit_page);
5461 UpdateTypedOldToOldPointers(jit_page);
5462 }
5463
5464 void UpdateTypedOldToNewPointers(WritableJitPage& jit_page) {
5465 if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() == nullptr)
5466 return;
5467 const PtrComprCageBase cage_base = heap_->isolate();
5468 const auto check_and_update_old_to_new_slot_fn =
5469 [this, cage_base](FullMaybeObjectSlot slot) {
5470 CheckAndUpdateOldToNewSlot(slot, cage_base);
5471 return KEEP_SLOT;
5472 };
5473
5475 chunk_, [this, &check_and_update_old_to_new_slot_fn, &jit_page](
5476 SlotType slot_type, Address slot) {
5477 WritableJitAllocation jit_allocation =
5478 jit_page.LookupAllocationContaining(slot);
5479 UpdateTypedSlotHelper::UpdateTypedSlot(
5480 jit_allocation, heap_, slot_type, slot,
5481 check_and_update_old_to_new_slot_fn);
5482 // A new space string might have been promoted into the shared heap
5483 // during GC.
5485 CheckSlotForOldToSharedTyped(chunk_, slot_type, slot,
5486 jit_allocation);
5487 }
5488 // Always keep slot since all slots are dropped at once after
5489 // iteration.
5490 return KEEP_SLOT;
5491 });
5492 // Full GCs will empty new space, so OLD_TO_NEW is empty.
5493 chunk_->ReleaseTypedSlotSet(OLD_TO_NEW);
5494 // OLD_TO_NEW_BACKGROUND typed slots set should always be empty.
5495 DCHECK_NULL(chunk_->typed_slot_set<OLD_TO_NEW_BACKGROUND>());
5496 }
5497
5498 void UpdateTypedOldToOldPointers(WritableJitPage& jit_page) {
5499 if (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() == nullptr)
5500 return;
5501 PtrComprCageBase cage_base = heap_->isolate();
5503 chunk_, [this, cage_base, &jit_page](SlotType slot_type, Address slot) {
5504 // Using UpdateStrongSlot is OK here, because there are no weak
5505 // typed slots.
5506 WritableJitAllocation jit_allocation =
5507 jit_page.LookupAllocationContaining(slot);
5508 SlotCallbackResult result = UpdateTypedSlotHelper::UpdateTypedSlot(
5509 jit_allocation, heap_, slot_type, slot,
5510 [cage_base](FullMaybeObjectSlot slot) {
5511 UpdateStrongSlot(cage_base, slot);
5512 // Always keep slot since all slots are dropped at once after
5513 // iteration.
5514 return KEEP_SLOT;
5515 });
5516 // A string might have been promoted into the shared heap during GC.
5518 CheckSlotForOldToSharedTyped(chunk_, slot_type, slot,
5519 jit_allocation);
5520 }
5521 return result;
5522 });
5523 chunk_->ReleaseTypedSlotSet(OLD_TO_OLD);
5524 }
5525
5526 Heap* heap_;
5527 NonAtomicMarkingState* marking_state_;
5528 MutablePageMetadata* chunk_;
5530};
5531
5532} // namespace
5533
5534namespace {
5535template <typename IterateableSpace>
5536void CollectRememberedSetUpdatingItems(
5537 std::vector<std::unique_ptr<UpdatingItem>>* items,
5538 IterateableSpace* space) {
5539 for (MutablePageMetadata* page : *space) {
5540 // No need to update pointers on evacuation candidates. Evacuated pages will
5541 // be released after this phase.
5542 if (page->Chunk()->IsEvacuationCandidate()) continue;
5543 if (page->ContainsAnySlots()) {
5544 items->emplace_back(
5545 std::make_unique<RememberedSetUpdatingItem>(space->heap(), page));
5546 }
5547 }
5548}
5549} // namespace
5550
5552 public:
5553 enum EvacuationState { kRegular, kAborted };
5554
5556 ~EphemeronTableUpdatingItem() override = default;
5557
5558 void Process() override {
5560 "EphemeronTableUpdatingItem::Process");
5561 PtrComprCageBase cage_base(heap_->isolate());
5562
5563 auto* table_map = heap_->ephemeron_remembered_set()->tables();
5564 for (auto it = table_map->begin(); it != table_map->end(); it++) {
5565 Tagged<EphemeronHashTable> table = it->first;
5566 auto& indices = it->second;
5567 if (Cast<HeapObject>(table)
5568 ->map_word(kRelaxedLoad)
5569 .IsForwardingAddress()) {
5570 // The object has moved, so ignore slots in dead memory here.
5571 continue;
5572 }
5573 DCHECK(IsMap(table->map(), cage_base));
5574 DCHECK(IsEphemeronHashTable(table, cage_base));
5575 for (auto iti = indices.begin(); iti != indices.end(); ++iti) {
5576 // EphemeronHashTable keys must be heap objects.
5577 ObjectSlot key_slot(table->RawFieldOfElementAt(
5578 EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
5579 Tagged<Object> key_object = key_slot.Relaxed_Load();
5581 CHECK(key_object.GetHeapObject(&key));
5582 MapWord map_word = key->map_word(cage_base, kRelaxedLoad);
5583 if (map_word.IsForwardingAddress()) {
5584 key = map_word.ToForwardingAddress(key);
5585 key_slot.Relaxed_Store(key);
5586 }
5587 }
5588 }
5589 table_map->clear();
5590 }
5591
5592 private:
5593 Heap* const heap_;
5594};
5595
5596void MarkCompactCollector::UpdatePointersAfterEvacuation() {
5597 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
5598
5599 {
5600 TRACE_GC(heap_->tracer(),
5601 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
5602 // The external string table is updated at the end.
5603 PointersUpdatingVisitor updating_visitor(heap_);
5604 heap_->IterateRootsIncludingClients(
5605 &updating_visitor,
5606 base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
5607 SkipRoot::kConservativeStack,
5608 SkipRoot::kReadOnlyBuiltins});
5609 }
5610
5611 {
5612 TRACE_GC(heap_->tracer(),
5613 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
5614 UpdatePointersInClientHeaps();
5615 }
5616
5617 {
5618 TRACE_GC(heap_->tracer(),
5619 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
5620 std::vector<std::unique_ptr<UpdatingItem>> updating_items;
5621
5622 CollectRememberedSetUpdatingItems(&updating_items, heap_->old_space());
5623 CollectRememberedSetUpdatingItems(&updating_items, heap_->code_space());
5624 if (heap_->shared_space()) {
5625 CollectRememberedSetUpdatingItems(&updating_items, heap_->shared_space());
5626 }
5627 CollectRememberedSetUpdatingItems(&updating_items, heap_->lo_space());
5628 CollectRememberedSetUpdatingItems(&updating_items, heap_->code_lo_space());
5629 if (heap_->shared_lo_space()) {
5630 CollectRememberedSetUpdatingItems(&updating_items,
5631 heap_->shared_lo_space());
5632 }
5633 CollectRememberedSetUpdatingItems(&updating_items, heap_->trusted_space());
5634 CollectRememberedSetUpdatingItems(&updating_items,
5635 heap_->trusted_lo_space());
5636 if (heap_->shared_trusted_space()) {
5637 CollectRememberedSetUpdatingItems(&updating_items,
5638 heap_->shared_trusted_space());
5639 }
5640 if (heap_->shared_trusted_lo_space()) {
5641 CollectRememberedSetUpdatingItems(&updating_items,
5642 heap_->shared_trusted_lo_space());
5643 }
5644
5645 // Iterating to space may require a valid body descriptor for e.g.
5646 // WasmStruct which races with updating a slot in Map. Since to space is
5647 // empty after a full GC, such races can't happen.
5648 DCHECK_IMPLIES(heap_->new_space(), heap_->new_space()->Size() == 0);
5649
5650 updating_items.push_back(
5651 std::make_unique<EphemeronTableUpdatingItem>(heap_));
5652
5653 auto pointers_updating_job = std::make_unique<PointersUpdatingJob>(
5654 heap_->isolate(), this, std::move(updating_items));
5655 TRACE_GC_NOTE_WITH_FLOW("PointersUpdatingJob started",
5656 pointers_updating_job->trace_id(),
5658 V8::GetCurrentPlatform()
5660 std::move(pointers_updating_job))
5661 ->Join();
5662 }
5663
5664 {
5665 TRACE_GC(heap_->tracer(),
5666 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
5667 // Update pointers from external string table.
5668 heap_->UpdateReferencesInExternalStringTable(
5670
5671 // Update pointers in string forwarding table.
5672 // When GC was performed without a stack, the table was cleared and this
5673 // does nothing. In the case this was a GC with stack, we need to update
5674 // the entries for evacuated objects.
5675 // All entries are objects in shared space (unless
5676 // --always-use-forwarding-table), so we only need to update pointers during
5677 // a shared GC.
5678 if (heap_->isolate()->OwnsStringTables() ||
5679 V8_UNLIKELY(v8_flags.always_use_string_forwarding_table)) {
5680 heap_->isolate()->string_forwarding_table()->UpdateAfterFullEvacuation();
5681 }
5682
5683 EvacuationWeakObjectRetainer evacuation_object_retainer;
5684 heap_->ProcessWeakListRoots(&evacuation_object_retainer);
5685 }
5686
5687 {
5688 TRACE_GC(heap_->tracer(),
5689 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_POINTER_TABLES);
5690 UpdatePointersInPointerTables();
5691 }
5692
5693 // Flush the inner_pointer_to_code_cache which may now have stale contents.
5694 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
5695}
5696
5697void MarkCompactCollector::UpdatePointersInClientHeaps() {
5698 Isolate* const isolate = heap_->isolate();
5699 if (!isolate->is_shared_space_isolate()) return;
5700
5701 isolate->global_safepoint()->IterateClientIsolates(
5702 [this](Isolate* client) { UpdatePointersInClientHeap(client); });
5703}
5704
5705void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
5706 PtrComprCageBase cage_base(client);
5707 MemoryChunkIterator chunk_iterator(client->heap());
5708
5709 while (chunk_iterator.HasNext()) {
5710 MutablePageMetadata* page = chunk_iterator.Next();
5711 MemoryChunk* chunk = page->Chunk();
5712
5713 const auto slot_count = RememberedSet<OLD_TO_SHARED>::Iterate(
5714 page,
5715 [cage_base](MaybeObjectSlot slot) {
5716 return UpdateOldToSharedSlot(cage_base, slot);
5717 },
5718 SlotSet::FREE_EMPTY_BUCKETS);
5719
5720 if (slot_count == 0 || chunk->InYoungGeneration()) {
5721 page->ReleaseSlotSet(OLD_TO_SHARED);
5722 }
5723
5724 const PtrComprCageBase unused_cage_base(kNullAddress);
5725
5726 const auto protected_slot_count =
5728 page,
5729 [unused_cage_base](MaybeObjectSlot slot) {
5730 ProtectedPointerSlot protected_slot(slot.address());
5731 return UpdateOldToSharedSlot(unused_cage_base, protected_slot);
5732 },
5733 SlotSet::FREE_EMPTY_BUCKETS);
5734 if (protected_slot_count == 0) {
5735 page->ReleaseSlotSet(TRUSTED_TO_SHARED_TRUSTED);
5736 }
5737
5738 if (!chunk->executable()) {
5739 DCHECK_NULL(page->typed_slot_set<OLD_TO_SHARED>());
5740 continue;
5741 }
5742
5743 WritableJitPage jit_page = ThreadIsolation::LookupWritableJitPage(
5744 page->area_start(), page->area_size());
5745 const auto typed_slot_count = RememberedSet<OLD_TO_SHARED>::IterateTyped(
5746 page, [this, &jit_page](SlotType slot_type, Address slot) {
5747 // Using UpdateStrongSlot is OK here, because there are no weak
5748 // typed slots.
5749 PtrComprCageBase cage_base = heap_->isolate();
5750 WritableJitAllocation jit_allocation =
5751 jit_page.LookupAllocationContaining(slot);
5752 return UpdateTypedSlotHelper::UpdateTypedSlot(
5753 jit_allocation, heap_, slot_type, slot,
5754 [cage_base](FullMaybeObjectSlot slot) {
5755 return UpdateStrongOldToSharedSlot(cage_base, slot);
5756 });
5757 });
5758 if (typed_slot_count == 0 || chunk->InYoungGeneration())
5759 page->ReleaseTypedSlotSet(OLD_TO_SHARED);
5760 }
5761}
5762
5763void MarkCompactCollector::UpdatePointersInPointerTables() {
5764#if defined(V8_ENABLE_SANDBOX) || defined(V8_ENABLE_LEAPTIERING)
5765 // Process an entry of a pointer table, returning either the relocated object
5766 // or a null pointer if the object wasn't relocated.
5767 auto process_entry = [&](Address content) -> Tagged<ExposedTrustedObject> {
5768 Tagged<HeapObject> heap_obj = Cast<HeapObject>(Tagged<Object>(content));
5769 MapWord map_word = heap_obj->map_word(kRelaxedLoad);
5770 if (!map_word.IsForwardingAddress()) return {};
5771 Tagged<HeapObject> relocated_object =
5772 map_word.ToForwardingAddress(heap_obj);
5773 DCHECK(IsExposedTrustedObject(relocated_object));
5774 return Cast<ExposedTrustedObject>(relocated_object);
5775 };
5776#endif // defined(V8_ENABLE_SANDBOX) || defined(V8_ENABLE_LEAPTIERING)
5777
5778#ifdef V8_ENABLE_SANDBOX
5779 TrustedPointerTable* const tpt = &heap_->isolate()->trusted_pointer_table();
5780 tpt->IterateActiveEntriesIn(
5781 heap_->trusted_pointer_space(),
5782 [&](TrustedPointerHandle handle, Address content) {
5783 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5784 if (!relocated_object.is_null()) {
5785 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5786 auto instance_type = relocated_object->map()->instance_type();
5787 auto tag = IndirectPointerTagFromInstanceType(instance_type);
5788 tpt->Set(handle, relocated_object.ptr(), tag);
5789 }
5790 });
5791
5792 TrustedPointerTable* const stpt =
5793 &heap_->isolate()->shared_trusted_pointer_table();
5794 stpt->IterateActiveEntriesIn(
5795 heap_->isolate()->shared_trusted_pointer_space(),
5796 [&](TrustedPointerHandle handle, Address content) {
5797 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5798 if (!relocated_object.is_null()) {
5799 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5800 auto instance_type = relocated_object->map()->instance_type();
5801 auto tag = IndirectPointerTagFromInstanceType(instance_type);
5802 DCHECK(IsSharedTrustedPointerType(tag));
5803 stpt->Set(handle, relocated_object.ptr(), tag);
5804 }
5805 });
5806
5807 CodePointerTable* const cpt = IsolateGroup::current()->code_pointer_table();
5808 cpt->IterateActiveEntriesIn(
5809 heap_->code_pointer_space(),
5810 [&](CodePointerHandle handle, Address content) {
5811 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5812 if (!relocated_object.is_null()) {
5813 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5814 cpt->SetCodeObject(handle, relocated_object.address());
5815 }
5816 });
5817#endif // V8_ENABLE_SANDBOX
5818
5819#ifdef V8_ENABLE_LEAPTIERING
5820 JSDispatchTable* const jdt = IsolateGroup::current()->js_dispatch_table();
5821 const EmbeddedData& embedded_data = EmbeddedData::FromBlob(heap_->isolate());
5822 jdt->IterateActiveEntriesIn(
5823 heap_->js_dispatch_table_space(), [&](JSDispatchHandle handle) {
5824 Address code_address = jdt->GetCodeAddress(handle);
5825 Address entrypoint_address = jdt->GetEntrypoint(handle);
5826 Tagged<TrustedObject> relocated_code = process_entry(code_address);
5827 bool code_object_was_relocated = !relocated_code.is_null();
5828 Tagged<Code> code = Cast<Code>(code_object_was_relocated
5829 ? relocated_code
5830 : Tagged<Object>(code_address));
5831 bool instruction_stream_was_relocated =
5832 code->instruction_start() != entrypoint_address;
5833 if (code_object_was_relocated || instruction_stream_was_relocated) {
5834 Address old_entrypoint = jdt->GetEntrypoint(handle);
5835 // Ensure tiering trampolines are not overwritten here.
5836 Address new_entrypoint = ([&]() {
5837#define CASE(name, ...) \
5838 if (old_entrypoint == embedded_data.InstructionStartOf(Builtin::k##name)) { \
5839 return old_entrypoint; \
5840 }
5841 BUILTIN_LIST_BASE_TIERING(CASE)
5842#undef CASE
5843 return code->instruction_start();
5844 })();
5845 jdt->SetCodeAndEntrypointNoWriteBarrier(handle, code, new_entrypoint);
5846 CHECK_IMPLIES(jdt->IsTieringRequested(handle),
5847 old_entrypoint == new_entrypoint);
5848 }
5849 });
5850#endif // V8_ENABLE_LEAPTIERING
5851}
5852
5853void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
5854 Address failed_start, PageMetadata* page) {
5855 base::MutexGuard guard(&mutex_);
5856 aborted_evacuation_candidates_due_to_oom_.push_back(
5857 std::make_pair(failed_start, page));
5858}
5859
5860void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
5861 PageMetadata* page, MemoryChunk* chunk) {
5862 DCHECK_EQ(page->Chunk(), chunk);
5863 if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5864 return;
5865 }
5866 chunk->SetFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
5867 aborted_evacuation_candidates_due_to_flags_.push_back(page);
5868}
5869
5870namespace {
5871
5872void ReRecordPage(Heap* heap, Address failed_start, PageMetadata* page) {
5873 DCHECK(page->Chunk()->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED));
5874
5875 // Aborted compaction page. We have to record slots here, since we
5876 // might not have recorded them in first place.
5877
5878 // Remove mark bits in evacuated area.
5879 page->marking_bitmap()->ClearRange<AccessMode::NON_ATOMIC>(
5880 MarkingBitmap::AddressToIndex(page->area_start()),
5881 MarkingBitmap::LimitAddressToIndex(failed_start));
5882
5883 // Remove outdated slots.
5884 RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->area_start(), failed_start,
5885 SlotSet::FREE_EMPTY_BUCKETS);
5886 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->area_start(),
5887 failed_start);
5888
5890 page, page->area_start(), failed_start, SlotSet::FREE_EMPTY_BUCKETS);
5891 DCHECK_NULL(page->typed_slot_set<OLD_TO_NEW_BACKGROUND>());
5892
5894 page, page->area_start(), failed_start, SlotSet::FREE_EMPTY_BUCKETS);
5895 RememberedSet<OLD_TO_SHARED>::RemoveRangeTyped(page, page->area_start(),
5896 failed_start);
5897
5898 // Re-record slots and recompute live bytes.
5900 LiveObjectVisitor::VisitMarkedObjectsNoFail(page, &visitor);
5901 page->SetLiveBytes(visitor.live_object_size());
5902 // Array buffers will be processed during pointer updating.
5903}
5904
5905} // namespace
5906
5907size_t MarkCompactCollector::PostProcessAbortedEvacuationCandidates() {
5908 for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
5909 PageMetadata* page = start_and_page.second;
5910 MemoryChunk* chunk = page->Chunk();
5911 DCHECK(!chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED));
5912 chunk->SetFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
5913 }
5914 for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
5915 ReRecordPage(heap_, start_and_page.first, start_and_page.second);
5916 }
5917 for (auto page : aborted_evacuation_candidates_due_to_flags_) {
5918 ReRecordPage(heap_, page->area_start(), page);
5919 }
5920 const size_t aborted_pages =
5921 aborted_evacuation_candidates_due_to_oom_.size() +
5922 aborted_evacuation_candidates_due_to_flags_.size();
5923 size_t aborted_pages_verified = 0;
5924 for (PageMetadata* p : old_space_evacuation_pages_) {
5925 MemoryChunk* chunk = p->Chunk();
5926 if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5927 // Only clear EVACUATION_CANDIDATE flag after all slots were re-recorded
5928 // on all aborted pages. Necessary since repopulating
5929 // OLD_TO_OLD still requires the EVACUATION_CANDIDATE flag. After clearing
5930 // the evacuation candidate flag the page is again in a regular state.
5931 p->ClearEvacuationCandidate();
5932 aborted_pages_verified++;
5933 } else {
5934 DCHECK(chunk->IsEvacuationCandidate());
5935 DCHECK(p->SweepingDone());
5936 }
5937 }
5938 DCHECK_EQ(aborted_pages_verified, aborted_pages);
5939 USE(aborted_pages_verified);
5940 return aborted_pages;
5941}
5942
5943void MarkCompactCollector::ReleaseEvacuationCandidates() {
5944 for (PageMetadata* p : old_space_evacuation_pages_) {
5945 if (!p->Chunk()->IsEvacuationCandidate()) continue;
5946 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
5947 p->SetLiveBytes(0);
5948 CHECK(p->SweepingDone());
5949 space->ReleasePage(p);
5950 }
5951 old_space_evacuation_pages_.clear();
5952 compacting_ = false;
5953}
5954
5955void MarkCompactCollector::StartSweepNewSpace() {
5956 PagedSpaceForNewSpace* paged_space = heap_->paged_new_space()->paged_space();
5957 paged_space->ClearAllocatorState();
5958
5959 int will_be_swept = 0;
5960
5961 heap_->StartResizeNewSpace();
5962
5963 DCHECK(empty_new_space_pages_to_be_swept_.empty());
5964 for (auto it = paged_space->begin(); it != paged_space->end();) {
5965 PageMetadata* p = *(it++);
5966 DCHECK(p->SweepingDone());
5967 DCHECK(!p->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
5968
5969 if (p->live_bytes() > 0) {
5970 // Non-empty pages will be evacuated/promoted.
5971 continue;
5972 }
5973
5974 if (paged_space->ShouldReleaseEmptyPage()) {
5975 paged_space->ReleasePage(p);
5976 } else {
5977 empty_new_space_pages_to_be_swept_.push_back(p);
5978 }
5979 will_be_swept++;
5980 }
5981
5982 if (v8_flags.gc_verbose) {
5983 PrintIsolate(heap_->isolate(),
5984 "sweeping: space=%s initialized_for_sweeping=%d",
5985 ToString(paged_space->identity()), will_be_swept);
5986 }
5987}
5988
5989void MarkCompactCollector::ResetAndRelinkBlackAllocatedPage(
5990 PagedSpace* space, PageMetadata* page) {
5991 DCHECK(page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
5992 DCHECK_EQ(page->live_bytes(), 0);
5993 DCHECK_GE(page->allocated_bytes(), 0);
5994 DCHECK(page->marking_bitmap()->IsClean());
5995 std::optional<RwxMemoryWriteScope> scope;
5996 if (page->Chunk()->InCodeSpace()) {
5997 scope.emplace("For writing flags.");
5998 }
5999 page->Chunk()->ClearFlagUnlocked(MemoryChunk::BLACK_ALLOCATED);
6000 space->IncreaseAllocatedBytes(page->allocated_bytes(), page);
6001 space->RelinkFreeListCategories(page);
6002}
6003
6004void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
6005 DCHECK_NE(NEW_SPACE, space->identity());
6006 space->ClearAllocatorState();
6007
6008 int will_be_swept = 0;
6009 bool unused_page_present = false;
6010
6011 Sweeper* sweeper = heap_->sweeper();
6012
6013 // Loop needs to support deletion if live bytes == 0 for a page.
6014 for (auto it = space->begin(); it != space->end();) {
6015 PageMetadata* p = *(it++);
6016 DCHECK(p->SweepingDone());
6017
6018 if (p->Chunk()->IsEvacuationCandidate()) {
6019 DCHECK(!p->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
6020 DCHECK_NE(NEW_SPACE, space->identity());
6021 // Will be processed in Evacuate.
6022 continue;
6023 }
6024
6025 // If the page is black, just reset the flag and don't add the page to the
6026 // sweeper.
6027 if (p->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED)) {
6028 ResetAndRelinkBlackAllocatedPage(space, p);
6029 continue;
6030 }
6031
6032 // One unused page is kept, all further are released before sweeping them.
6033 if (p->live_bytes() == 0) {
6034 if (unused_page_present) {
6035 if (v8_flags.gc_verbose) {
6036 PrintIsolate(heap_->isolate(), "sweeping: released page: %p",
6037 static_cast<void*>(p));
6038 }
6039 space->ReleasePage(p);
6040 continue;
6041 }
6042 unused_page_present = true;
6043 }
6044
6045 sweeper->AddPage(space->identity(), p);
6046 will_be_swept++;
6047 }
6048
6049 if (v8_flags.sticky_mark_bits && space->identity() == OLD_SPACE) {
6050 static_cast<StickySpace*>(space)->set_old_objects_size(space->Size());
6051 }
6052
6053 if (v8_flags.gc_verbose) {
6054 PrintIsolate(heap_->isolate(),
6055 "sweeping: space=%s initialized_for_sweeping=%d",
6056 ToString(space->identity()), will_be_swept);
6057 }
6058}
6059
6060namespace {
6061bool ShouldPostponeFreeingEmptyPages(LargeObjectSpace* space) {
6062 // Delay releasing dead old large object pages until after pointer updating is
6063 // done because dead old space objects may have old-to-new slots (which
6064 // were possibly later overriden with old-to-old references) that are
6065 // pointing to these pages and will need to be updated.
6066 if (space->identity() == LO_SPACE) return true;
6067 // Old-to-new slots may also point to shared spaces. Delay releasing so that
6068 // updating slots in dead old objects can access the dead shared objects.
6069 if (space->identity() == SHARED_LO_SPACE) return true;
6070 return false;
6071}
6072} // namespace
6073
6074void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
6075 PtrComprCageBase cage_base(heap_->isolate());
6076 size_t surviving_object_size = 0;
6077 const MemoryAllocator::FreeMode free_mode =
6078 ShouldPostponeFreeingEmptyPages(space)
6079 ? MemoryAllocator::FreeMode::kPostpone
6080 : MemoryAllocator::FreeMode::kImmediately;
6081 for (auto it = space->begin(); it != space->end();) {
6082 LargePageMetadata* current = *(it++);
6083 DCHECK(!current->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
6084 Tagged<HeapObject> object = current->GetObject();
6085 if (!marking_state_->IsMarked(object)) {
6086 // Object is dead and page can be released.
6087 space->RemovePage(current);
6088 heap_->memory_allocator()->Free(free_mode, current);
6089
6090 continue;
6091 }
6092 if (!v8_flags.sticky_mark_bits) {
6093 MarkBit::From(object).Clear();
6094 current->SetLiveBytes(0);
6095 }
6096 current->marking_progress_tracker().ResetIfEnabled();
6097 surviving_object_size += static_cast<size_t>(object->Size(cage_base));
6098 }
6099 space->set_objects_size(surviving_object_size);
6100}
6101
6102void MarkCompactCollector::Sweep() {
6103 DCHECK(!sweeper_->sweeping_in_progress());
6104 sweeper_->InitializeMajorSweeping();
6105
6107 heap_->tracer(), GCTracer::Scope::MC_SWEEP, ThreadKind::kMain,
6108 sweeper_->GetTraceIdForFlowEvent(GCTracer::Scope::MC_SWEEP),
6110#ifdef DEBUG
6111 state_ = SWEEP_SPACES;
6112#endif
6113
6114 {
6115 GCTracer::Scope sweep_scope(heap_->tracer(), GCTracer::Scope::MC_SWEEP_LO,
6116 ThreadKind::kMain);
6117 SweepLargeSpace(heap_->lo_space());
6118 }
6119 {
6120 GCTracer::Scope sweep_scope(
6121 heap_->tracer(), GCTracer::Scope::MC_SWEEP_CODE_LO, ThreadKind::kMain);
6122 SweepLargeSpace(heap_->code_lo_space());
6123 }
6124 if (heap_->shared_space()) {
6125 GCTracer::Scope sweep_scope(heap_->tracer(),
6126 GCTracer::Scope::MC_SWEEP_SHARED_LO,
6127 ThreadKind::kMain);
6128 SweepLargeSpace(heap_->shared_lo_space());
6129 }
6130 {
6131 GCTracer::Scope sweep_scope(heap_->tracer(), GCTracer::Scope::MC_SWEEP_OLD,
6132 ThreadKind::kMain);
6133 StartSweepSpace(heap_->old_space());
6134 }
6135 {
6136 GCTracer::Scope sweep_scope(heap_->tracer(), GCTracer::Scope::MC_SWEEP_CODE,
6137 ThreadKind::kMain);
6138 StartSweepSpace(heap_->code_space());
6139 }
6140 if (heap_->shared_space()) {
6141 GCTracer::Scope sweep_scope(
6142 heap_->tracer(), GCTracer::Scope::MC_SWEEP_SHARED, ThreadKind::kMain);
6143 StartSweepSpace(heap_->shared_space());
6144 }
6145 {
6146 GCTracer::Scope sweep_scope(
6147 heap_->tracer(), GCTracer::Scope::MC_SWEEP_TRUSTED, ThreadKind::kMain);
6148 StartSweepSpace(heap_->trusted_space());
6149 }
6150 if (heap_->shared_trusted_space()) {
6151 GCTracer::Scope sweep_scope(
6152 heap_->tracer(), GCTracer::Scope::MC_SWEEP_SHARED, ThreadKind::kMain);
6153 StartSweepSpace(heap_->shared_trusted_space());
6154 }
6155 {
6156 GCTracer::Scope sweep_scope(heap_->tracer(),
6157 GCTracer::Scope::MC_SWEEP_TRUSTED_LO,
6158 ThreadKind::kMain);
6159 SweepLargeSpace(heap_->trusted_lo_space());
6160 }
6161 if (v8_flags.minor_ms && heap_->new_space()) {
6162 GCTracer::Scope sweep_scope(heap_->tracer(), GCTracer::Scope::MC_SWEEP_NEW,
6163 ThreadKind::kMain);
6164 StartSweepNewSpace();
6165 }
6166
6167 sweeper_->StartMajorSweeping();
6168}
6169
6170RootMarkingVisitor::RootMarkingVisitor(MarkCompactCollector* collector)
6171 : collector_(collector) {}
6172
6174
6176 FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) {
6177 Tagged<Object> istream_or_smi_zero = *istream_or_smi_zero_slot;
6178 DCHECK(istream_or_smi_zero == Smi::zero() ||
6179 IsInstructionStream(istream_or_smi_zero));
6180 Tagged<Code> code = Cast<Code>(*code_slot);
6181 DCHECK_EQ(code->raw_instruction_stream(PtrComprCageBase{
6182 collector_->heap()->isolate()->code_cage_base()}),
6183 istream_or_smi_zero);
6184
6185 // We must not remove deoptimization literals which may be needed in
6186 // order to successfully deoptimize.
6187 code->IterateDeoptimizationLiterals(this);
6188
6189 if (istream_or_smi_zero != Smi::zero()) {
6190 VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot);
6191 }
6192
6193 VisitRootPointer(Root::kStackRoots, nullptr, code_slot);
6194}
6195
6196} // namespace internal
6197} // namespace v8
Schedule * schedule
Isolate * isolate_
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
#define SBXCHECK(condition)
Definition check.h:61
V8_INLINE bool Pop(EntryType *entry)
Definition worklist.h:402
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
virtual int NumberOfWorkerThreads()=0
std::unique_ptr< JobHandle > CreateJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
constexpr void Add(E element)
Definition enum-set.h:50
double NextDouble() V8_WARN_UNUSED_RESULT
std::vector< uint64_t > NextSample(uint64_t max, size_t n) V8_WARN_UNUSED_RESULT
int64_t NextInt64() V8_WARN_UNUSED_RESULT
static constexpr TimeDelta Max()
Definition time.h:233
static TimeTicks Now()
Definition time.cc:736
bool To(Tagged< T > *obj) const
void RequestSweep(SweepingType sweeping_type, TreatAllYoungAsPromoted treat_all_young_as_promoted)
V8_EXPORT_PRIVATE Tagged< Code > code(Builtin builtin)
Definition builtins.cc:149
GarbageCollector garbage_collector() const
void FlushNativeContexts(NativeContextStats *main_stats)
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
void set_another_ephemeron_iteration(bool another_ephemeron_iteration)
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
void EnterFinalPause(cppgc::EmbedderStackState stack_state)
Definition cpp-heap.cc:890
static void DeoptimizeMarkedCode(Isolate *isolate)
~EphemeronTableUpdatingItem() override=default
PretenuringHandler::PretenuringFeedbackMap * local_pretenuring_feedback_
AllocationSpace AllocateTargetObject(Tagged< HeapObject > old_object, int size, Tagged< HeapObject > *target_object)
EvacuateNewSpaceVisitor(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor, PretenuringHandler::PretenuringFeedbackMap *local_pretenuring_feedback)
AllocationResult AllocateInOldSpace(int size_in_bytes, AllocationAlignment alignment)
PretenuringHandler *const pretenuring_handler_
bool TryEvacuateWithoutCopy(Tagged< HeapObject > object)
bool Visit(Tagged< HeapObject > object, int size) override
EvacuateNewToOldSpacePageVisitor(Heap *heap, RecordMigratedSlotVisitor *record_visitor, PretenuringHandler::PretenuringFeedbackMap *local_pretenuring_feedback)
bool Visit(Tagged< HeapObject > object, int size) override
PretenuringHandler::PretenuringFeedbackMap * local_pretenuring_feedback_
bool Visit(Tagged< HeapObject > object, int size) override
EvacuateOldSpaceVisitor(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor)
bool Visit(Tagged< HeapObject > object, int size) override
void ExecuteMigrationObservers(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size)
static void RawMigrateObject(EvacuateVisitorBase *base, Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest)
bool ShouldPromoteIntoSharedHeap(Tagged< Map > map)
void(*)(EvacuateVisitorBase *base, Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest) MigrateFunction
std::optional< base::RandomNumberGenerator > rng_
bool TryEvacuateObject(AllocationSpace target_space, Tagged< HeapObject > object, int size, Tagged< HeapObject > *target_object)
RecordMigratedSlotVisitor * record_visitor_
void AddObserver(MigrationObserver *observer)
EvacuateVisitorBase(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor)
EvacuationAllocator * local_allocator_
void MigrateObject(Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest)
std::vector< MigrationObserver * > observers_
AllocationResult Allocate(AllocationSpace space, int object_size, AllocationAlignment alignment)
Tagged< Object > RetainAs(Tagged< Object > object) override
PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_
EvacuateOldSpaceVisitor old_space_visitor_
static EvacuationMode ComputeEvacuationMode(MemoryChunk *chunk)
EvacuateNewSpaceVisitor new_space_visitor_
void ReportCompactionProgress(double duration, intptr_t bytes_compacted)
void EvacuatePage(MutablePageMetadata *chunk)
static const char * EvacuationModeName(EvacuationMode mode)
RecordMigratedSlotVisitor record_visitor_
EvacuateNewToOldSpacePageVisitor new_to_old_page_visitor_
EvacuationAllocator local_allocator_
void AddObserver(MigrationObserver *observer)
Isolate * isolate() const
Definition factory.h:1281
Tagged< MaybeObject > Relaxed_Load() const
Definition slots-inl.h:141
void store(Tagged< MaybeObject > value) const
Definition slots-inl.h:137
void Relaxed_Store(Tagged< Object > value) const
Definition slots-inl.h:100
Tagged< Object > load() const
Definition slots-inl.h:48
Tagged< Object > Relaxed_Load() const
Definition slots-inl.h:82
void TransitionStrings(StringForwardingTable::Record *record)
void MarkForwardObject(StringForwardingTable::Record *record)
void TryExternalize(Tagged< String > original_string, StringForwardingTable::Record *record)
void TryInternalize(Tagged< String > original_string, StringForwardingTable::Record *record)
uint16_t CodeFlushingIncrease() const
Definition gc-tracer.cc:723
std::optional< double > CompactionSpeedInBytesPerMillisecond() const
void IterateWeakRootsForPhantomHandles(WeakSlotCallbackWithHeap should_reset_handle)
void IterateClientIsolates(Callback callback)
Definition safepoint.h:190
MainAllocator * new_space_allocator()
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InCodeSpace(Tagged< HeapObject > object)
virtual ~HeapObjectVisitor()=default
virtual bool Visit(Tagged< HeapObject > object, int size)=0
static constexpr int kHeaderSize
static constexpr int kMapOffset
static AllocationAlignment RequiredAlignment(Tagged< Map > map)
static V8_INLINE Heap * GetOwnerHeap(Tagged< HeapObject > object)
V8_INLINE size_t Visit(Tagged< HeapObject > object)
ExternalStringTable external_string_table_
Definition heap.h:2364
std::unique_ptr< ObjectStats > live_object_stats_
Definition heap.h:2276
void ProcessAllWeakReferences(WeakObjectRetainer *retainer)
Definition heap.cc:2885
NewSpace * new_space() const
Definition heap.h:727
SharedSpace * shared_space() const
Definition heap.h:733
OldLargeObjectSpace * lo_space() const
Definition heap.h:734
NewLargeObjectSpace * new_lo_space() const
Definition heap.h:737
bool use_new_space() const
Definition heap.h:1643
MarkCompactCollector * mark_compact_collector()
Definition heap.h:813
std::unique_ptr< ObjectStats > dead_object_stats_
Definition heap.h:2277
V8_EXPORT_PRIVATE bool Contains(Tagged< HeapObject > value) const
Definition heap.cc:4341
bool ShouldCurrentGCKeepAgesUnchanged() const
Definition heap.h:1361
void OnMoveEvent(Tagged< HeapObject > source, Tagged< HeapObject > target, int size_in_bytes)
Definition heap.cc:3337
void ResizeNewSpace()
Definition heap.cc:3841
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
MemoryMeasurement * memory_measurement()
Definition heap.h:2055
OldSpace * old_space() const
Definition heap.h:730
ArrayBufferSweeper * array_buffer_sweeper()
Definition heap.h:823
ConcurrentMarking * concurrent_marking() const
Definition heap.h:1070
TrustedSpace * trusted_space() const
Definition heap.h:739
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4657
v8::CppHeap * cpp_heap_
Definition heap.h:2305
MemoryAllocator * memory_allocator()
Definition heap.h:803
void IterateConservativeStackRoots(RootVisitor *root_visitor, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4836
void IterateRootsForPrecisePinning(RootVisitor *visitor)
Definition heap.cc:4861
Sweeper * sweeper()
Definition heap.h:821
CodeLargeObjectSpace * code_lo_space() const
Definition heap.h:735
TrustedLargeObjectSpace * trusted_lo_space() const
Definition heap.h:743
StackState embedder_stack_state_
Definition heap.h:2309
CodeSpace * code_space() const
Definition heap.h:732
MarkingState * marking_state()
Definition heap.h:1621
LocalHeap * main_thread_local_heap_
Definition heap.h:2191
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char *location)
Definition heap.cc:6385
bool ShouldUseBackgroundThreads() const
Definition heap.cc:456
V8_EXPORT_PRIVATE void Unmark()
Definition heap.cc:3580
PagedNewSpace * paged_new_space() const
Definition heap-inl.h:435
std::vector< Handle< NativeContext > > FindAllNativeContexts()
Definition heap.cc:7004
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage()
Definition heap.cc:3752
std::vector< Tagged< WeakArrayList > > FindAllRetainedMaps()
Definition heap.cc:7015
NonAtomicMarkingState * non_atomic_marking_state()
Definition heap.h:1623
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
GCTracer * tracer()
Definition heap.h:800
bool IsGCWithStack() const
Definition heap.cc:526
Isolate * isolate() const
Definition heap-inl.h:61
void EnsureQuarantinedPagesSweepingCompleted()
Definition heap.cc:7349
HeapAllocator * allocator()
Definition heap.h:1640
bool ShouldReduceMemory() const
Definition heap.h:1615
void CreateObjectStats()
Definition heap.cc:7080
IndirectPointerHandle Relaxed_LoadHandle() const
Definition slots-inl.h:398
static void IterateBody(Tagged< Map > map, Tagged< HeapObject > obj, ObjectVisitor *v)
static Tagged< InstructionStream > FromTargetAddress(Address address)
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
void VisitRootPointers(Root root, const char *description, OffHeapObjectSlot start, OffHeapObjectSlot end) override
static IsolateGroup * current()
GlobalHandles * global_handles() const
Definition isolate.h:1416
bool serializer_enabled() const
Definition isolate.h:1549
CompilationCache * compilation_cache()
Definition isolate.h:1191
Bootstrapper * bootstrapper()
Definition isolate.h:1178
GlobalSafepoint * global_safepoint() const
Definition isolate.h:2305
TracedHandles * traced_handles()
Definition isolate.h:1418
bool AllowsCodeCompaction() const
Definition isolate.cc:6151
Builtins * builtins()
Definition isolate.h:1443
Isolate * shared_space_isolate() const
Definition isolate.h:2295
StringForwardingTable * string_forwarding_table() const
Definition isolate.h:785
base::RandomNumberGenerator * fuzzer_rng()
Definition isolate.cc:6330
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) override
size_t Size() const override
MarkingBarrier * marking_barrier()
Definition local-heap.h:130
Address original_top_acquire() const
V8_INLINE bool IsLabValid() const
void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
void RecordSlot(Tagged< HeapObject > object, TSlot slot, Tagged< HeapObject > target)
MainMarkingVisitor(MarkingWorklists::Local *local_marking_worklists, WeakObjects::Local *local_weak_objects, Heap *heap, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged, uint16_t code_flushing_increase)
static constexpr bool IsPacked(Address)
Definition objects.h:846
bool IsForwardingAddress() const
static MapWord FromForwardingAddress(Tagged< HeapObject > map_word_host, Tagged< HeapObject > object)
Tagged< HeapObject > ToForwardingAddress(Tagged< HeapObject > map_word_host)
V8_INLINE void MarkObject(Tagged< HeapObject > host, Tagged< Object > object)
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
V8_INLINE void CheckForSharedObject(Tagged< HeapObject > host, ObjectSlot slot, Tagged< Object > object)
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitMapPointer(Tagged< HeapObject > host) final
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) final
std::pair< size_t, size_t > ProcessMarkingWorklist(v8::base::TimeDelta max_duration, size_t max_bytes_to_process)
V8_INLINE void MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
void SweepLargeSpace(LargeObjectSpace *space)
void MarkRoots(RootVisitor *root_visitor)
void StartMarking(std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule={})
base::EnumSet< CodeFlushMode > code_flush_mode_
void AddEvacuationCandidate(PageMetadata *p)
NativeContextInferrer native_context_inferrer_
void CollectEvacuationCandidates(PagedSpace *space)
static bool IsUnmarkedSharedHeapObject(Heap *heap, FullObjectSlot p)
V8_INLINE void MarkRootObject(Root root, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
std::vector< PageMetadata * > evacuation_candidates_
void ComputeEvacuationHeuristics(size_t area_size, int *target_fragmentation_percent, size_t *max_evacuated_bytes)
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
bool ProcessEphemeron(Tagged< HeapObject > key, Tagged< HeapObject > value)
std::unique_ptr< MarkingWorklists::Local > local_marking_worklists_
void MaybeEnableBackgroundThreadsInCycle(CallOrigin origin)
WeakObjects::Local * local_weak_objects()
std::vector< PageMetadata * > empty_new_space_pages_to_be_swept_
static V8_INLINE void RecordSlot(Tagged< HeapObject > object, THeapObjectSlot slot, Tagged< HeapObject > target)
void ProcessTopOptimizedFrame(ObjectVisitor *visitor, Isolate *isolate)
EphemeronResult ApplyEphemeronSemantics(Tagged< HeapObject > key, Tagged< HeapObject > value)
void MarkObjectsFromClientHeap(Isolate *client)
std::unique_ptr< WeakObjects::Local > local_weak_objects_
bool StartCompaction(StartCompactionMode mode)
static bool IsUnmarkedHeapObject(Heap *heap, FullObjectSlot p)
void MarkRootsFromConservativeStack(RootVisitor *root_visitor)
NativeContextStats native_context_stats_
static void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
std::unique_ptr< MainMarkingVisitor > marking_visitor_
NonAtomicMarkingState *const non_atomic_marking_state_
base::EnumSet< CodeFlushMode > code_flush_mode() const
MarkCompactWeakObjectRetainer(Heap *heap, MarkingState *marking_state)
Tagged< Object > RetainAs(Tagged< Object > object) override
static V8_EXPORT_PRIVATE void PublishAll(Heap *heap)
static void DeactivateAll(Heap *heap)
V8_INLINE bool IsMarked(const Tagged< HeapObject > obj) const
V8_INLINE bool TryMarkAndAccountLiveBytes(Tagged< HeapObject > obj)
V8_INLINE bool IsUnmarked(const Tagged< HeapObject > obj) const
static constexpr std::nullptr_t kNoCppMarkingState
void CreateContextWorklists(const std::vector< Address > &contexts)
V8_INLINE MutablePageMetadata * Next()
Definition spaces-inl.h:128
void ClearFlagSlow(Flag flag)
V8_INLINE bool InWritableSharedSpace() const
bool IsEvacuationCandidate() const
V8_INLINE void SetFlagNonExecutable(Flag flag)
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
V8_INLINE MemoryChunkMetadata * Metadata()
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
void SetFlagSlow(Flag flag)
V8_INLINE bool InYoungGeneration() const
bool ShouldSkipEvacuationSlotRecording() const
V8_INLINE void ClearFlagNonExecutable(Flag flag)
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
V8_INLINE bool InReadOnlySpace() const
std::vector< Address > StartProcessing()
void FinishProcessing(const NativeContextStats &stats)
virtual void Move(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size)=0
virtual ~MigrationObserver()=default
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE bool Infer(PtrComprCageBase cage_base, Tagged< Map > map, Tagged< HeapObject > object, Address *native_context)
V8_INLINE void IncrementSize(Address context, Tagged< Map > map, Tagged< HeapObject > object, size_t size)
virtual void GarbageCollectionEpilogue()=0
PtrComprCageBase code_cage_base() const
Definition visitors.h:235
PtrComprCageBase cage_base() const
Definition visitors.h:225
PageEvacuationJob(Isolate *isolate, MarkCompactCollector *collector, std::vector< std::unique_ptr< Evacuator > > *evacuators, std::vector< std::pair< ParallelWorkItem, MutablePageMetadata * > > evacuation_items)
size_t GetMaxConcurrency(size_t worker_count) const override
void ProcessItems(JobDelegate *delegate, Evacuator *evacuator)
std::vector< std::pair< ParallelWorkItem, MutablePageMetadata * > > evacuation_items_
MarkCompactCollector * collector_
void Run(JobDelegate *delegate) override
std::vector< std::unique_ptr< Evacuator > > * evacuators_
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
PagedSpaceForNewSpace * paged_space()
Definition new-spaces.h:718
static PagedNewSpace * From(NewSpace *space)
Definition new-spaces.h:598
void ReleasePage(PageMetadata *page) final
std::vector< std::unique_ptr< UpdatingItem > > updating_items_
size_t GetMaxConcurrency(size_t worker_count) const override
MarkCompactCollector * collector_
void Run(JobDelegate *delegate) override
void UpdatePointers(JobDelegate *delegate)
PointersUpdatingJob(Isolate *isolate, MarkCompactCollector *collector, std::vector< std::unique_ptr< UpdatingItem > > updating_items)
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) override
static void UpdateStrongMaybeObjectSlotInternal(PtrComprCageBase cage_base, MaybeObjectSlot slot)
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) override
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
static void UpdateStrongSlotInternal(PtrComprCageBase cage_base, ObjectSlot slot)
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
static void UpdateRootSlotInternal(PtrComprCageBase cage_base, OffHeapObjectSlot slot)
static void UpdateRootSlotInternal(PtrComprCageBase cage_base, FullObjectSlot slot)
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitRootPointers(Root root, const char *description, OffHeapObjectSlot start, OffHeapObjectSlot end) override
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) override
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) override
static void UpdateSlotInternal(PtrComprCageBase cage_base, MaybeObjectSlot slot)
MarkCompactCollector *const collector_
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) final
PrecisePagePinningVisitor(MarkCompactCollector *collector)
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
static void UpdateAllocationSite(Heap *heap, Tagged< Map > map, Tagged< HeapObject > object, int object_size, PretenuringFeedbackMap *pretenuring_feedback)
void Move(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size) final
void VisitMapPointer(Tagged< HeapObject > host) final
void VisitTrustedPointerTableEntry(Tagged< HeapObject > host, IndirectPointerSlot slot) final
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) final
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
static V8_INLINE constexpr bool UsePrecomputedObjectSize()
void VisitIndirectPointer(Tagged< HeapObject > host, IndirectPointerSlot slot, IndirectPointerMode mode) final
void RecordMigratedSlot(Tagged< HeapObject > host, Tagged< MaybeObject > value, Address slot)
void VisitExternalPointer(Tagged< HeapObject > host, ExternalPointerSlot slot) final
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) final
void VisitProtectedPointer(Tagged< TrustedObject > host, ProtectedPointerSlot slot) final
void VisitInternalReference(Tagged< InstructionStream > host, RelocInfo *rinfo) final
void VisitEphemeron(Tagged< HeapObject > host, int index, ObjectSlot key, ObjectSlot value) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
void VisitProtectedPointer(Tagged< TrustedObject > host, ProtectedMaybeObjectSlot slot) final
void VisitExternalReference(Tagged< InstructionStream > host, RelocInfo *rinfo) final
static constexpr bool IsCodeTargetMode(Mode mode)
Definition reloc-info.h:197
V8_INLINE Address target_address()
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
V8_INLINE Tagged< HeapObject > target_object(PtrComprCageBase cage_base)
V8_INLINE Address constant_pool_entry_address()
Address pc() const
Definition reloc-info.h:275
static void Insert(MutablePageMetadata *page, size_t slot_offset)
static int IterateTyped(MutablePageMetadata *chunk, Callback callback)
static int Iterate(MutablePageMetadata *chunk, Callback callback, SlotSet::EmptyBucketMode mode)
V8_INLINE void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
void VisitRunningCode(FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) final
static constexpr Tagged< Smi > deleted_element()
Definition js-struct.h:81
Address address() const
Definition slots.h:78
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
void DisposeExternalResource(StringForwardingTable::Record *record)
V8_INLINE void IterateElements(Func &&callback)
static constexpr Tagged< Smi > deleted_element()
static constexpr Tagged< Smi > deleted_element()
static bool IsInPlaceInternalizableExcludingExternal(InstanceType instance_type)
bool sweeping_in_progress() const
Definition sweeper.h:114
V8_EXPORT_PRIVATE void StartMajorSweeperTasks()
Definition sweeper.cc:738
uint64_t GetTraceIdForFlowEvent(GCTracer::Scope::ScopeId scope_id) const
Definition sweeper.cc:1541
void SweepEmptyNewSpacePage(PageMetadata *page)
Definition sweeper.cc:1476
void AddPage(AllocationSpace space, PageMetadata *page)
Definition sweeper.cc:1311
V8_INLINE constexpr StorageType ptr() const
bool GetHeapObject(Tagged< HeapObject > *result) const
bool ToSmi(Tagged< Smi > *value) const
V8_INLINE constexpr bool is_null() const
Definition tagged.h:502
constexpr V8_INLINE bool IsHeapObject() const
Definition tagged.h:507
constexpr V8_INLINE bool IsSmi() const
Definition tagged.h:508
static WritableJitAllocation RegisterInstructionStreamAllocation(Address addr, size_t size, bool enforce_write_api=false)
bool HasSimpleTransitionTo(Tagged< Map > map)
static Tagged< HeapObject > GetTargetObject(Heap *heap, SlotType slot_type, Address addr)
virtual ~UpdatingItem()=default
virtual void Process()=0
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
static WritableJitAllocation ForInstructionStream(Tagged< InstructionStream > istream)
V8_INLINE void WriteHeaderSlot(T value)
V8_INLINE void CopyData(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_INLINE WritableJitAllocation LookupAllocationContaining(Address addr)
static void GenerationalForRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > object)
static void SharedForRelocInfo(Tagged< InstructionStream > host, RelocInfo *, Tagged< HeapObject > value)
void SweepSegments(size_t threshold=2 *kEntriesPerSegment)
#define PROFILE(the_isolate, Call)
Definition code-events.h:59
base::Mutex & mutex_
Handle< Code > code
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
#define HAS_WEAK_HEAP_OBJECT_TAG(value)
Definition globals.h:1778
NormalPageSpace * space_
Definition compactor.cc:324
BasePage * page
Definition sweeper.cc:218
constexpr const char * ToString(DataViewOp op)
int start
int end
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
LineAndColumn current
Disallow flags or implications overriding each other abort_on_contradictory_flags true
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
Definition gc-tracer.h:98
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
Definition gc-tracer.h:52
#define TRACE_GC(tracer, scope_id)
Definition gc-tracer.h:35
#define TRACE_GC_ARG1(tracer, scope_id, arg0_name, arg0_value)
Definition gc-tracer.h:43
#define TRACE_GC_CATEGORIES
Definition gc-tracer.h:29
#define TRACE_GC1_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:68
int32_t offset
TNode< Context > context
TNode< Object > target
std::map< const std::string, const std::string > map
std::unique_ptr< icu::DateTimePatternGenerator > generator_
double second
DurationRecord record
ZoneVector< RpoNumber > & result
Heap * heap_
MarkCompactCollector * collector_
int elements_removed_
NonAtomicMarkingState * marking_state_
MutablePageMetadata * chunk_
base::Mutex items_mutex_
std::vector< std::unique_ptr< ClearingItem > > items_
const bool record_old_to_shared_slots_
STL namespace.
void MakeWeak(i::Address *location, void *parameter, WeakCallbackInfo< void >::Callback weak_callback, WeakCallbackType type)
Definition api.cc:644
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
void Add(RWDigits Z, Digits X, Digits Y)
V8_EXPORT_PRIVATE WasmCodePointerTable * GetProcessWideWasmCodePointerTable()
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
static V8_INLINE bool HasWeakHeapObjectTag(const Tagged< Object > value)
Definition objects.h:653
IndirectPointerHandle TrustedPointerHandle
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
constexpr int kTaggedSize
Definition globals.h:542
V8_INLINE constexpr PtrComprCageBase GetPtrComprCageBaseFromOnHeapAddress(Address address)
@ SKIP_WRITE_BARRIER
Definition objects.h:52
SlotTraits::TObjectSlot ObjectSlot
Definition globals.h:1243
void PrintF(const char *format,...)
Definition utils.cc:39
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
@ kDoNotLinkCategory
Definition free-list.h:42
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
kInterpreterTrampolineOffset Tagged< HeapObject >
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Definition slots-inl.h:486
Handle< To > UncheckedCast(Handle< From > value)
Definition handles-inl.h:55
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr uint32_t kZapValue
Definition globals.h:1005
static void TraceFragmentation(PagedSpace *space)
bool IsCppHeapMarkingFinished(Heap *heap, MarkingWorklists::Local *local_marking_worklists)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalStringResourceTag
@ kExternalStringResourceDataTag
V8_INLINE constexpr bool IsHeapObject(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:669
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< ClearedWeakValue > ClearedTrustedValue()
uint32_t ExternalPointerHandle
static Tagged< String > UpdateReferenceInExternalStringTableEntry(Heap *heap, FullObjectSlot p)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates pages(requires --stress_compaction).") DEFINE_BOOL(cppheap_incremental_marking
static constexpr Address kNullAddress
Definition v8-internal.h:53
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
V8_INLINE bool InsideSandbox(uintptr_t address)
Definition sandbox.h:334
constructor_or_back_pointer
Definition map-inl.h:870
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
@ IDLE
Definition v8-unwinder.h:45
SourcePositionTable *const table_
Definition pipeline.cc:227
WeakObjects weak_objects_
WeakObjects::Local local_weak_objects_
#define DCHECK_CODEOBJECT_SIZE(size)
Definition spaces.h:54
#define DCHECK_OBJECT_SIZE(size)
Definition spaces.h:51
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define V8PRIdPTR
Definition macros.h:332
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
other heap size max size of the shared heap(in Mbytes)
static V8_INLINE bool TryMarkAndPush(Heap *heap, MarkingWorklists::Local *marking_worklist, MarkingState *marking_state, WorklistTarget target_worklis, Tagged< HeapObject > object)
static V8_INLINE bool IsMarkedOrAlwaysLive(Heap *heap, MarkingStateT *marking_state, Tagged< HeapObject > object)
static V8_INLINE bool IsUnmarkedAndNotAlwaysLive(Heap *heap, MarkingStateT *marking_state, Tagged< HeapObject > object)
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
static V8_INLINE LivenessMode GetLivenessMode(Heap *heap, Tagged< HeapObject > object)
static V8_EXPORT_PRIVATE std::atomic_uint gc_stats
static bool is_gc_stats_enabled()
#define TRACE_EVENT0(category_group, name)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_SCOPE_THREAD
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
const uint64_t trace_id_
Heap * heap_
#define TRACE_STR_COPY(str)
Definition trace-event.h:50
#define V8_INLINE
Definition v8config.h:500
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660
std::unique_ptr< ValueMirror > value
std::unique_ptr< ValueMirror > key