99#ifdef V8_ENABLE_WEBASSEMBLY
113class FullMarkingVerifier :
public MarkingVerifierBase {
115 explicit FullMarkingVerifier(Heap*
heap)
116 : MarkingVerifierBase(
heap),
119 void Run()
override {
121 VerifyMarking(
heap_->new_space());
122 VerifyMarking(
heap_->new_lo_space());
123 VerifyMarking(
heap_->old_space());
124 VerifyMarking(
heap_->code_space());
125 if (
heap_->shared_space()) VerifyMarking(
heap_->shared_space());
126 VerifyMarking(
heap_->lo_space());
127 VerifyMarking(
heap_->code_lo_space());
128 if (
heap_->shared_lo_space()) VerifyMarking(
heap_->shared_lo_space());
129 VerifyMarking(
heap_->trusted_space());
130 VerifyMarking(
heap_->trusted_lo_space());
134 const MarkingBitmap* bitmap(
const MutablePageMetadata* chunk)
override {
135 return chunk->marking_bitmap();
138 bool IsMarked(Tagged<HeapObject>
object)
override {
142 void VerifyMap(Tagged<Map> map)
override { VerifyHeapObjectImpl(map); }
144 void VerifyPointers(ObjectSlot
start, ObjectSlot
end)
override {
148 void VerifyPointers(MaybeObjectSlot
start, MaybeObjectSlot
end)
override {
152 void VerifyCodePointer(InstructionStreamSlot slot)
override {
153 Tagged<Object> maybe_code = slot.load(code_cage_base());
154 Tagged<HeapObject>
code;
156 if (maybe_code.GetHeapObject(&code)) {
157 VerifyHeapObjectImpl(code);
161 void VerifyRootPointers(FullObjectSlot
start, FullObjectSlot
end)
override {
165 void VisitCodeTarget(Tagged<InstructionStream> host,
166 RelocInfo* rinfo)
override {
167 Tagged<InstructionStream> target =
168 InstructionStream::FromTargetAddress(rinfo->target_address());
169 VerifyHeapObjectImpl(target);
172 void VisitEmbeddedPointer(Tagged<InstructionStream> host,
173 RelocInfo* rinfo)
override {
174 CHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
175 Tagged<HeapObject> target_object = rinfo->target_object(cage_base());
176 Tagged<Code> code = UncheckedCast<Code>(host->raw_code(kAcquireLoad));
177 if (!code->IsWeakObject(target_object)) {
178 VerifyHeapObjectImpl(target_object);
183 V8_INLINE void VerifyHeapObjectImpl(Tagged<HeapObject> heap_object) {
184 if (!ShouldVerifyObject(heap_object))
return;
186 if (
heap_->MustBeInSharedOldSpace(heap_object)) {
187 CHECK(
heap_->SharedHeapContains(heap_object));
190 CHECK(HeapLayout::InReadOnlySpace(heap_object) ||
192 HeapLayout::InBlackAllocatedPage(heap_object)) ||
196 V8_INLINE bool ShouldVerifyObject(Tagged<HeapObject> heap_object) {
197 const bool in_shared_heap = HeapLayout::InWritableSharedSpace(heap_object);
198 return heap_->isolate()->is_shared_space_isolate() ?
true : !in_shared_heap;
201 template <
typename TSlot>
203 PtrComprCageBase cage_base =
205 for (TSlot slot =
start; slot <
end; ++slot) {
206 typename TSlot::TObject
object = slot.load(cage_base);
207#ifdef V8_ENABLE_DIRECT_HANDLE
208 if (
object.ptr() == kTaggedNullAddress)
continue;
210 Tagged<HeapObject> heap_object;
211 if (
object.GetHeapObjectIfStrong(&heap_object)) {
212 VerifyHeapObjectImpl(heap_object);
229int NumberOfAvailableCores() {
237int NumberOfParallelCompactionTasks(Heap*
heap) {
238 int tasks =
v8_flags.parallel_compaction ? NumberOfAvailableCores() : 1;
239 if (!
heap->CanPromoteYoungAndExpandOldGeneration(
256 unsigned mark_compact_epoch,
258 bool should_keep_ages_unchanged,
259 uint16_t code_flushing_increase)
261 local_marking_worklists, local_weak_objects,
heap,
262 mark_compact_epoch, code_flush_mode, should_keep_ages_unchanged,
263 code_flushing_increase) {}
268 template <
typename TSlot>
287 uses_shared_heap_(
heap_->isolate()->has_shared_space()),
288 is_shared_space_isolate_(
heap_->isolate()->is_shared_space_isolate()),
290 non_atomic_marking_state_(
heap_->non_atomic_marking_state()),
291 sweeper_(
heap_->sweeper()) {
311 if (
v8_flags.trace_evacuation_candidates) {
314 "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
323 int number_of_pages = space->CountTotalPages();
324 intptr_t reserved = (number_of_pages * space->AreaSize());
325 intptr_t free = reserved - space->SizeOfObjects();
326 PrintF(
"[%s]: %d pages, %d (%.1f%%) free\n",
ToString(space->identity()),
327 number_of_pages,
static_cast<int>(free),
328 static_cast<double>(free) * 100 / reserved);
339 (
v8_flags.gc_experiment_less_compaction &&
358 }
else if (
v8_flags.trace_fragmentation) {
371 if (isolate->disable_bytecode_flushing()) {
393 if (isolate->heap()->IsLastResortGC() &&
394 (
v8_flags.flush_code_based_on_time ||
395 v8_flags.flush_code_based_on_tab_visibility)) {
399 return code_flush_mode;
405 std::shared_ptr<::heap::base::IncrementalMarkingSchedule>
schedule) {
414#ifdef V8_COMPRESS_POINTERS
415 heap_->young_external_pointer_space()->StartCompactingIfNeeded();
416 heap_->old_external_pointer_space()->StartCompactingIfNeeded();
417 heap_->cpp_heap_pointer_space()->StartCompactingIfNeeded();
423 TRACE_GC(
heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
424 cpp_heap->InitializeMarking(CppHeap::CollectionType::kMajor,
schedule);
427 std::vector<Address> contexts =
429 if (
v8_flags.stress_per_context_marking_worklist) {
433 contexts.push_back(context->ptr());
442 cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
459 VerifyMarkbitsAreClean();
469 const bool force_background_threads =
470 v8_flags.parallel_pause_for_gc_in_background &&
480 cpp_heap->ReEnableConcurrentMarking();
496 cpp_heap->ProcessCrossThreadWeakness();
506 cpp_heap->FinishMarkingAndProcessWeakness();
518void MarkCompactCollector::VerifyMarkbitsAreClean(
PagedSpaceBase* space) {
520 CHECK(p->marking_bitmap()->IsClean());
525void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
531 for (PageMetadata* p : *space) {
532 CHECK(p->marking_bitmap()->IsClean());
537void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
539 LargeObjectSpaceObjectIterator it(space);
546void MarkCompactCollector::VerifyMarkbitsAreClean() {
560 size_t area_size,
int* target_fragmentation_percent,
561 size_t* max_evacuated_bytes) {
564 const int kTargetFragmentationPercentForReduceMemory = 20;
565 const size_t kMaxEvacuatedBytesForReduceMemory = 12 *
MB;
566 const int kTargetFragmentationPercentForOptimizeMemory = 20;
567 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 *
MB;
572 const int kTargetFragmentationPercent = 70;
573 const size_t kMaxEvacuatedBytes = 4 *
MB;
576 const float kTargetMsPerArea = .5;
579 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
580 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
582 *target_fragmentation_percent =
583 kTargetFragmentationPercentForOptimizeMemory;
584 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
586 const std::optional<double> estimated_compaction_speed =
588 if (estimated_compaction_speed.has_value()) {
591 const double estimated_ms_per_area =
592 1 + area_size / *estimated_compaction_speed;
593 *target_fragmentation_percent =
static_cast<int>(
594 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
595 if (*target_fragmentation_percent <
596 kTargetFragmentationPercentForReduceMemory) {
597 *target_fragmentation_percent =
598 kTargetFragmentationPercentForReduceMemory;
601 *target_fragmentation_percent = kTargetFragmentationPercent;
603 *max_evacuated_bytes = kMaxEvacuatedBytes;
612 int number_of_pages = space->CountTotalPages();
613 size_t area_size = space->AreaSize();
615 const bool in_standard_path =
616 !(
v8_flags.manual_evacuation_candidates_selection ||
621 size_t max_evacuated_bytes;
622 int target_fragmentation_percent;
623 size_t free_bytes_threshold;
624 if (in_standard_path) {
632 &max_evacuated_bytes);
633 free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
637 using LiveBytesPagePair = std::pair<size_t, PageMetadata*>;
638 std::vector<LiveBytesPagePair>
pages;
639 pages.reserve(number_of_pages);
659 CHECK(p->SweepingDone());
660 DCHECK(p->area_size() == area_size);
661 if (in_standard_path) {
664 if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
665 pages.push_back(std::make_pair(p->allocated_bytes(), p));
668 pages.push_back(std::make_pair(p->allocated_bytes(), p));
672 int candidate_count = 0;
673 size_t total_live_bytes = 0;
676 if (
v8_flags.manual_evacuation_candidates_selection) {
677 for (
size_t i = 0;
i < pages.
size();
i++) {
683 total_live_bytes += pages[
i].first;
689 }
else if (
v8_flags.stress_compaction_random) {
691 size_t pages_to_mark_count =
692 static_cast<size_t>(fraction * (pages.size() + 1));
694 pages.size(), pages_to_mark_count)) {
696 total_live_bytes += pages[
i].first;
699 }
else if (
v8_flags.stress_compaction) {
700 for (
size_t i = 0;
i < pages.
size();
i++) {
704 total_live_bytes += pages[
i].first;
716 std::sort(pages.begin(), pages.end(),
717 [](
const LiveBytesPagePair& a,
const LiveBytesPagePair& b) {
718 return a.first < b.first;
720 for (
size_t i = 0;
i < pages.
size();
i++) {
721 size_t live_bytes = pages[
i].first;
723 if (
v8_flags.compact_on_every_full_gc ||
724 ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
726 total_live_bytes += live_bytes;
728 if (
v8_flags.trace_fragmentation_verbose) {
730 "compaction-selection-page: space=%s free_bytes_page=%zu "
731 "fragmentation_limit_kb=%zu "
732 "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
733 "compaction_limit_kb=%zu\n",
734 ToString(space->identity()), (area_size - live_bytes) / KB,
735 free_bytes_threshold / KB, target_fragmentation_percent,
736 total_live_bytes / KB, max_evacuated_bytes / KB);
741 int estimated_new_pages =
742 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
743 DCHECK_LE(estimated_new_pages, candidate_count);
744 int estimated_released_pages = candidate_count - estimated_new_pages;
746 if ((estimated_released_pages == 0) && !
v8_flags.compact_on_every_full_gc) {
749 for (
int i = 0;
i < candidate_count;
i++) {
756 "compaction-selection: space=%s reduce_memory=%d pages=%d "
757 "total_live_bytes=%zu\n",
758 ToString(space->identity()), reduce_memory, candidate_count,
759 total_live_bytes / KB);
784 new_space->GarbageCollectionPrologue();
804 cpp_heap->FinishConcurrentMarkingIfNeeded();
814 FullMarkingVerifier verifier(
heap_);
829 size_t surviving_object_size = 0;
831 for (
auto it = space->begin(); it != space->end();) {
834 const size_t object_size =
static_cast<size_t>(
object->Size(cage_base));
835 space->ShrinkPageToObjectSize(current,
object, object_size);
836 surviving_object_size += object_size;
838 space->set_objects_size(surviving_object_size);
871 heap_->VerifyCountersBeforeConcurrentSweeping(
997 const auto target_worklist =
999 if (!target_worklist) {
1009 :
public HeapVisitor<MarkCompactCollector::SharedHeapObjectVisitor> {
1021 if (
object.GetHeapObject(&heap_object))
1068 if (!
object.GetHeapObject(&heap_object))
return;
1125#ifdef V8_ENABLE_SANDBOX
1126class MarkExternalPointerFromExternalStringTable :
public RootVisitor {
1128 explicit MarkExternalPointerFromExternalStringTable(
1129 ExternalPointerTable* shared_table, ExternalPointerTable::Space* space)
1130 : visitor(shared_table, space) {}
1132 void VisitRootPointers(Root root,
const char* description,
1133 FullObjectSlot
start, FullObjectSlot
end)
override {
1135 for (FullObjectSlot p =
start; p <
end; ++p) {
1136 Tagged<Object> o = *p;
1137 if (IsHeapObject(o)) {
1138 Tagged<HeapObject> heap_object = Cast<HeapObject>(o);
1139 if (IsExternalString(heap_object)) {
1140 Tagged<ExternalString>
string = Cast<ExternalString>(heap_object);
1141 string->VisitExternalPointers(&visitor);
1151 class MarkExternalPointerTableVisitor :
public ObjectVisitor {
1153 explicit MarkExternalPointerTableVisitor(ExternalPointerTable* table,
1154 ExternalPointerTable::Space* space)
1156 void VisitExternalPointer(Tagged<HeapObject> host,
1157 ExternalPointerSlot slot)
override {
1158 DCHECK(!slot.tag_range().IsEmpty());
1159 DCHECK(IsSharedExternalPointerType(slot.tag_range()));
1163 void VisitPointers(Tagged<HeapObject> host, ObjectSlot
start,
1164 ObjectSlot
end)
override {
1167 void VisitPointers(Tagged<HeapObject> host, MaybeObjectSlot
start,
1168 MaybeObjectSlot
end)
override {
1171 void VisitInstructionStreamPointer(Tagged<Code> host,
1172 InstructionStreamSlot slot)
override {
1175 void VisitCodeTarget(Tagged<InstructionStream> host,
1176 RelocInfo* rinfo)
override {
1179 void VisitEmbeddedPointer(Tagged<InstructionStream> host,
1180 RelocInfo* rinfo)
override {
1185 ExternalPointerTable*
table_;
1186 ExternalPointerTable::Space*
space_;
1189 MarkExternalPointerTableVisitor visitor;
1205 }
else if (IsAllocationSite(heap_object) &&
1211 while (IsAllocationSite(nested)) {
1215 nested = current_site->nested_site();
1216 current_site->MarkZombie();
1280 DCHECK(IsEphemeronHashTable(host));
1342 if (value.IsStrongOrWeak()) {
1352 host_metadata, host_chunk->
Offset(slot));
1360 host_metadata, host_chunk->
Offset(slot));
1373 host_chunk->
Offset(slot));
1376 host_metadata, host_chunk->
Offset(slot));
1380 host_metadata, host_chunk->
Offset(slot));
1392 host_metadata, host_chunk->
Offset(slot));
1426 IsBytecodeArray(dst)) {
1451 abort_evacuation_at_address_ = chunk->
area_end();
1454 void SetUpAbortEvacuationAtAddress(MutablePageMetadata* chunk) {
1458 const double kFraction = 0.05;
1460 if (
rng_->NextDouble() < kFraction) {
1461 const double abort_evacuation_percentage =
rng_->NextDouble();
1462 abort_evacuation_at_address_ =
1463 chunk->area_start() +
1464 abort_evacuation_percentage * chunk->area_size();
1469 abort_evacuation_at_address_ = chunk->area_end();
1477#if V8_COMPRESS_POINTERS
1489 template <MigrationMode mode>
1494 Address src_addr = src.address();
1503 base->heap_->CopyBlock(dst_addr, src_addr, size);
1505 base->ExecuteMigrationObservers(dest, src, dst, size);
1509 base->record_visitor_->Visit(dst->map(
cage_base), dst, size);
1513 base->heap_->CopyBlock(dst_addr, src_addr, size);
1515 base->ExecuteMigrationObservers(dest, src, dst, size);
1517 base->record_visitor_->Visit(dst->map(
cage_base), dst, size);
1521 base->heap_->CopyBlock(dst_addr, src_addr, size);
1523 base->ExecuteMigrationObservers(dest, src, dst, size);
1527 base->record_visitor_->Visit(dst->map(
cage_base), dst, size);
1535 writable_allocation.
CopyData(0,
reinterpret_cast<uint8_t*
>(src_addr),
1539 reinterpret_cast<uint8_t*
>(src_addr +
1543 istream->Relocate(writable_allocation, dst_addr - src_addr);
1546 base->ExecuteMigrationObservers(dest, src, dst, size);
1550 base->record_visitor_->Visit(dst->map(
cage_base), dst, size);
1554 base->heap_->CopyBlock(dst_addr, src_addr, size);
1556 base->ExecuteMigrationObservers(dest, src, dst, size);
1577 heap->isolate()->has_shared_space()) {
1593 if (
V8_UNLIKELY(
object.address() >= abort_evacuation_at_address_)) {
1606 if (allocation.To(target_object)) {
1616 map->instance_type());
1625 obs->Move(dest, src, dst, size);
1643 std::optional<base::RandomNumberGenerator>
rng_;
1658 v8_flags.shortcut_strings_with_stack) {
1660 heap->incremental_marking()->IsMajorMarking());
1672 "MarkCompactCollector: young object promotion failed");
1690 if (map->visitor_id() == kVisitThinString) {
1709 if (allocation.IsFailure()) {
1713 bool ok = allocation.
To(target_object);
1716 return space_allocated_in;
1723 if (allocation.IsFailure()) {
1725 "MarkCompactCollector: semi-space copy, fallback in old gen");
1749 page->heap()->new_space()->PromotePageToOldSpace(
1783 size, &target_object)) {
1785 .IsForwardingAddress());
1802 DCHECK_EQ(object->SizeFromMap(map), size);
1804 visitor.
Visit(map,
object, size);
1822 heap,
heap->non_atomic_marking_state(), heap_object);
1831 Heap* shared_space_heap =
1854 if (isolate->is_shared_space_isolate()) {
1857 &custom_root_body_visitor);
1859 isolate->global_safepoint()->IterateClientIsolates(
1860 [
this, &client_root_visitor,
1861 &client_custom_root_body_visitor](
Isolate* client) {
1863 &client_root_visitor,
1876 Heap::IterateRootsMode::kMainIsolate);
1879 if (isolate->is_shared_space_isolate()) {
1883 isolate->global_safepoint()->IterateClientIsolates(
1884 [v = &client_root_visitor](
Isolate* client) {
1886 v, Heap::IterateRootsMode::kClientIsolate);
1893 if (!isolate->is_shared_space_isolate())
return;
1896 [collector =
this](
Isolate* client) {
1897 collector->MarkObjectsFromClientHeap(client);
1910 Heap* client_heap = client->
heap();
1920 if (
auto* new_space = client_heap->
new_space()) {
1930 std::unique_ptr<ObjectIterator> iterator =
1933 obj = iterator->Next()) {
1946 chunk = chunk_iterator.
next()) {
1959 collector->MarkRootObject(
1960 Root::kClientHeap, heap_object,
1969 if (slot_count == 0) {
1984 collector->MarkRootObject(
1985 Root::kClientHeap, heap_object,
1993 if (typed_slot_count == 0) {
1997 const auto protected_slot_count =
2011 collector->MarkRootObject(
2012 Root::kClientHeap, heap_object,
2021 if (protected_slot_count == 0) {
2026#ifdef V8_ENABLE_SANDBOX
2031 ExternalPointerTable& shared_table = client->shared_external_pointer_table();
2032 ExternalPointerTable::Space* shared_space =
2033 client->shared_external_pointer_space();
2034 MarkExternalPointerFromExternalStringTable external_string_visitor(
2035 &shared_table, shared_space);
2042 int max_iterations =
v8_flags.ephemeron_fixpoint_iterations;
2044 bool another_ephemeron_iteration_main_thread;
2047 if (iterations >= max_iterations) {
2063 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2073 }
while (another_ephemeron_iteration_main_thread ||
2083 bool another_ephemeron_iteration =
false;
2089 another_ephemeron_iteration =
true;
2095 size_t objects_processed;
2096 std::tie(std::ignore, objects_processed) =
2102 if (objects_processed > 0) another_ephemeron_iteration =
true;
2108 return another_ephemeron_iteration;
2113 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
2128 it->second.push_back(ephemeron.
value);
2139 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2156 }
while (work_to_do);
2170 if (!cpp_heap)
return;
2178constexpr size_t kDeadlineCheckInterval = 128u;
2182template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
2186 size_t bytes_processed = 0;
2187 size_t objects_processed = 0;
2201 CHECK(!IsFreeSpaceOrFiller(
object, cage_base));
2208 if constexpr (mode ==
2213 const auto target_worklist =
2215 if (target_worklist) {
2216 MarkObject(
object, value, target_worklist.value());
2224 if (is_per_context_mode) {
2235 if (is_per_context_mode) {
2237 map,
object, visited_size);
2239 bytes_processed += visited_size;
2240 objects_processed++;
2242 "kDeadlineCheckInterval must be power of 2");
2245 if ((objects_processed & (kDeadlineCheckInterval -1)) == 0 &&
2249 if (bytes_processed >= max_bytes_to_process) {
2253 return std::make_pair(bytes_processed, objects_processed);
2280 if (!target_worklist) {
2320 for (
auto value : values) {
2346 if (it.frame()->is_unoptimized_js())
return;
2347 if (it.frame()->is_optimized_js()) {
2349 if (!lookup_result->has_instruction_stream())
return;
2350 if (!lookup_result->CanDeoptAt(isolate,
2351 it.frame()->maybe_unauthenticated_pc())) {
2353 lookup_result->raw_instruction_stream());
2374 std::stringstream live, dead;
2382 if (
v8_flags.trace_gc_object_stats) {
2415 const bool should_retain_maps =
2419 DCHECK_EQ(0, retained_maps->length() % 2);
2420 for (
int i = 0;
i < retained_maps->
length();
i += 2) {
2423 if (!value.GetHeapObjectIfWeak(&map_heap_object)) {
2426 int age = retained_maps->Get(
i + 1).
ToSmi().value();
2450 new_age =
v8_flags.retain_maps_for_n_gc;
2453 if (new_age != age) {
2464 const bool was_marked_incrementally =
2466 if (was_marked_incrementally) {
2469 heap_->
tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL,
2471 DCHECK(incremental_marking->IsMajorMarking());
2472 incremental_marking->Stop();
2478 state_ = MARK_LIVE_OBJECTS;
2511 GCTracer::Scope::MC_MARK_FULL_CLOSURE_PARALLEL_JOIN);
2530 cpp_heap->EnterProcessGlobalAtomicPause();
2540 if (was_marked_incrementally) {
2553class ParallelClearingJob final :
public v8::JobTask {
2555 class ClearingItem {
2557 virtual ~ClearingItem() =
default;
2561 explicit ParallelClearingJob(MarkCompactCollector* collector)
2563 ~ParallelClearingJob()
override =
default;
2564 ParallelClearingJob(
const ParallelClearingJob&) =
delete;
2565 ParallelClearingJob& operator=(
const ParallelClearingJob&) =
delete;
2568 void Run(JobDelegate* delegate)
override {
2569 std::unique_ptr<ClearingItem> item;
2572 item = std::move(
items_.back());
2575 item->Run(delegate);
2578 size_t GetMaxConcurrency(
size_t worker_count)
const override {
2580 if (!
v8_flags.parallel_weak_ref_clearing ||
2581 !
collector_->UseBackgroundThreadsInCycle()) {
2582 return std::min<size_t>(
items_.size(), 1);
2587 void Add(std::unique_ptr<ClearingItem> item) {
2588 items_.push_back(std::move(item));
2594 std::vector<std::unique_ptr<ClearingItem>>
items_;
2597class ClearStringTableJobItem final :
public ParallelClearingJob::ClearingItem {
2599 explicit ClearStringTableJobItem(Isolate* isolate)
2601 trace_id_(
reinterpret_cast<uint64_t
>(
this) ^
2602 isolate->heap()->tracer()->CurrentEpoch(
2603 GCTracer::Scope::MC_CLEAR_STRING_TABLE)) {}
2605 void Run(JobDelegate* delegate)
final {
2608 SetCurrentIsolateScope isolate_scope(isolate_);
2610 if (
isolate_->OwnsStringTables()) {
2612 GCTracer::Scope::MC_CLEAR_STRING_TABLE,
2619 StringTable* string_table =
isolate_->string_table();
2620 InternalizedStringTableCleaner internalized_visitor(
isolate_->heap());
2621 string_table->DropOldData();
2622 string_table->IterateElements(&internalized_visitor);
2623 string_table->NotifyElementsRemoved(
2624 internalized_visitor.PointersRemoved());
2628 uint64_t trace_id()
const {
return trace_id_; }
2649 v8_flags.transition_strings_during_gc_with_stack);
2656 forwarding_table->
Reset();
2668 !
v8_flags.transition_strings_during_gc_with_stack);
2709 if (IsThinString(original_string)) {
2723 if (IsExternalString(original_string)) {
2724 record->DisposeUnusedExternalResource(
isolate_, original_string);
2730 record->external_resource(&is_one_byte);
2731 if (external_resource ==
nullptr)
return;
2734 original_string->MakeExternalDuringGC(
2737 external_resource));
2739 original_string->MakeExternalDuringGC(
2741 external_resource));
2747 if (IsInternalizedString(original_string))
return;
2761 original_string->MakeThin(
isolate_, forward_string);
2774class SharedStructTypeRegistryCleaner final :
public RootVisitor {
2778 void VisitRootPointers(
Root root,
const char* description,
2783 void VisitRootPointers(
Root root,
const char* description,
2786 DCHECK_EQ(root, Root::kSharedStructTypeRegistry);
2790 auto* marking_state =
heap_->marking_state();
2813class ClearSharedStructTypeRegistryJobItem final
2814 :
public ParallelClearingJob::ClearingItem {
2816 explicit ClearSharedStructTypeRegistryJobItem(Isolate* isolate)
2818 DCHECK(isolate->is_shared_space_isolate());
2822 void Run(JobDelegate* delegate)
final {
2825 SetCurrentIsolateScope isolate_scope(isolate_);
2827 auto* registry =
isolate_->shared_struct_type_registry();
2828 SharedStructTypeRegistryCleaner cleaner(
isolate_->heap());
2829 registry->IterateElements(isolate_, &cleaner);
2830 registry->NotifyElementsRemoved(cleaner.ElementsRemoved());
2840 :
public ParallelClearingJob::ClearingItem {
2844 trace_id_(reinterpret_cast<uint64_t>(this) ^
2845 collector->
heap()->tracer()->CurrentEpoch(
2856 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_TRIVIAL,
2872 :
public ParallelClearingJob::ClearingItem {
2877 reinterpret_cast<uint64_t>(this) ^
2878 collector->
heap()->tracer()->CurrentEpoch(
2879 GCTracer::
Scope::MC_CLEAR_WEAK_REFERENCES_FILTER_NON_TRIVIAL)) {
2891 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_FILTER_NON_TRIVIAL,
2909 if (isolate->OwnsStringTables()) {
2911 GCTracer::Scope::MC_CLEAR_STRING_FORWARDING_TABLE);
2920 v8_flags.transition_strings_during_gc_with_stack) {
2930 isolate->topmost_script_having_context();
2934 isolate->clear_topmost_script_having_context();
2938 std::unique_ptr<JobHandle> clear_string_table_job_handle;
2940 auto job = std::make_unique<ParallelClearingJob>(
this);
2941 auto job_item = std::make_unique<ClearStringTableJobItem>(isolate);
2942 const uint64_t trace_id = job_item->trace_id();
2943 job->Add(std::move(job_item));
2946 if (isolate->is_shared_space_isolate() &&
2947 isolate->shared_struct_type_registry()) {
2948 auto registry_job_item =
2949 std::make_unique<ClearSharedStructTypeRegistryJobItem>(isolate);
2950 job->Add(std::move(registry_job_item));
2956 clear_string_table_job_handle->NotifyConcurrencyIncrease();
2962 external_visitor(
heap_);
2972 isolate->global_handles()->IterateWeakRootsForPhantomHandles(
2976 if (isolate->is_shared_space_isolate()) {
2977 isolate->global_safepoint()->IterateClientIsolates([](
Isolate* client) {
2991#ifndef V8_ENABLE_LEAPTIERING
2997#ifdef V8_ENABLE_LEAPTIERING
3003 jdt->Sweep(
heap_->js_dispatch_table_space(), isolate->counters(),
3004 [&](JSDispatchEntry& entry) {
3005 Tagged<Code> code = entry.GetCode();
3006 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3007 heap_, marking_state_, code)) {
3020 DCHECK(code->kind() == CodeKind::FOR_TESTING ||
3021 code->kind() == CodeKind::BASELINE ||
3022 code->kind() == CodeKind::MAGLEV ||
3023 code->kind() == CodeKind::TURBOFAN_JS);
3024 entry.SetCodeAndEntrypointPointer(
3025 compile_lazy.ptr(), compile_lazy->instruction_start());
3035 ClearFlushedJsFunctions();
3041 MarkCompactWeakObjectRetainer mark_compact_object_retainer(
heap_,
3050 ClearFullMapTransitions();
3053 WeakenStrongDescriptorArrays();
3071 std::unique_ptr<JobHandle> clear_trivial_weakrefs_job_handle;
3073 auto job = std::make_unique<ParallelClearingJob>(
this);
3074 auto job_item = std::make_unique<ClearTrivialWeakRefJobItem>(
this);
3075 const uint64_t trace_id = job_item->trace_id();
3076 job->Add(std::move(job_item));
3082 std::unique_ptr<JobHandle> filter_non_trivial_weakrefs_job_handle;
3084 auto job = std::make_unique<ParallelClearingJob>(
this);
3085 auto job_item = std::make_unique<FilterNonTrivialWeakRefJobItem>(
this);
3086 const uint64_t trace_id = job_item->trace_id();
3087 job->Add(std::move(job_item));
3090 filter_non_trivial_weakrefs_job_handle =
3094 if (
v8_flags.parallel_weak_ref_clearing && UseBackgroundThreadsInCycle()) {
3095 clear_trivial_weakrefs_job_handle->NotifyConcurrencyIncrease();
3096 filter_non_trivial_weakrefs_job_handle->NotifyConcurrencyIncrease();
3099#ifdef V8_COMPRESS_POINTERS
3109 isolate->external_pointer_table().EvacuateAndSweepAndCompact(
3110 isolate->heap()->old_external_pointer_space(),
3111 isolate->heap()->young_external_pointer_space(), isolate->counters());
3112 isolate->heap()->young_external_pointer_space()->AssertEmpty();
3113 if (isolate->owns_shareable_data()) {
3114 isolate->shared_external_pointer_table().SweepAndCompact(
3115 isolate->shared_external_pointer_space(), isolate->counters());
3117 isolate->cpp_heap_pointer_table().SweepAndCompact(
3118 isolate->heap()->cpp_heap_pointer_space(), isolate->counters());
3122#ifdef V8_ENABLE_SANDBOX
3125 isolate->trusted_pointer_table().Sweep(
heap_->trusted_pointer_space(),
3126 isolate->counters());
3127 if (isolate->owns_shareable_data()) {
3128 isolate->shared_trusted_pointer_table().Sweep(
3129 isolate->shared_trusted_pointer_space(), isolate->counters());
3136 heap_->code_pointer_space(), isolate->counters());
3140#ifdef V8_ENABLE_WEBASSEMBLY
3143 GCTracer::Scope::MC_SWEEP_WASM_CODE_POINTER_TABLE);
3150 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_JOIN_FILTER_JOB);
3151 filter_non_trivial_weakrefs_job_handle->Join();
3156 ClearNonTrivialWeakReferences();
3157 ClearWeakCollections();
3170 AllowDeoptimization allow_deoptimization(
heap_->
isolate());
3171 MarkDependentCodeForDeoptimization();
3176 clear_string_table_job_handle->Join();
3177 clear_trivial_weakrefs_job_handle->Join();
3183 heap()->DeactivateMajorGCInProgressFlag();
3195#ifndef V8_ENABLE_LEAPTIERING
3200void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
3202 while (local_weak_objects()->weak_objects_in_code_local.Pop(
3203 &weak_object_in_code)) {
3206 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3207 heap_, non_atomic_marking_state_,
object) &&
3208 !code->embedded_objects_cleared()) {
3209 if (!code->marked_for_deoptimization()) {
3210 code->SetMarkedForDeoptimization(
heap_->isolate(),
3211 LazyDeoptimizeReason::kWeakObjects);
3212 have_code_to_deoptimize_ =
true;
3214 code->ClearEmbeddedObjectsAndJSDispatchHandles(
heap_);
3215 DCHECK(code->embedded_objects_cleared());
3220void MarkCompactCollector::ClearPotentialSimpleMapTransition(
3222 DCHECK(non_atomic_marking_state_->IsUnmarked(dead_target));
3223 Tagged<Object> potential_parent = dead_target->constructor_or_back_pointer();
3224 if (IsMap(potential_parent)) {
3227 if (MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3231 ClearPotentialSimpleMapTransition(parent, dead_target);
3236void MarkCompactCollector::ClearPotentialSimpleMapTransition(
3238 DCHECK(!map->is_prototype_map());
3239 DCHECK(!dead_target->is_prototype_map());
3240 DCHECK_EQ(map->raw_transitions(), MakeWeak(dead_target));
3242 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
3244 map->instance_descriptors(
heap_->isolate());
3245 if (descriptors == dead_target->instance_descriptors(
heap_->isolate()) &&
3246 number_of_own_descriptors > 0) {
3247 TrimDescriptorArray(map, descriptors);
3248 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
3255 ClearPotentialSimpleMapTransition(map);
3263 if (IsDescriptorArray(host)) {
3276 if (map->is_stable() && FieldType::kFieldTypesCanBeClearedOnGC) {
3277 location.
store(FieldType::None());
3279 location.
store(FieldType::Any());
3286void MarkCompactCollector::FlushBytecodeFromSFI(
3288 DCHECK(shared_info->HasBytecodeArray());
3292 int start_position = shared_info->StartPosition();
3293 int end_position = shared_info->EndPosition();
3295 shared_info->DiscardCompiledMetadata(
3302 static_assert(BytecodeArray::SizeFor(0) >=
3303 UncompiledDataWithoutPreparseData::kSize);
3307 shared_info->GetBytecodeArray(
heap_->isolate());
3309#ifdef V8_ENABLE_SANDBOX
3310 DCHECK(!HeapLayout::InWritableSharedSpace(shared_info));
3312 TrustedPointerTable& table =
heap_->isolate()->trusted_pointer_table();
3314 bytecode_array->RawIndirectPointerField(
3315 BytecodeArray::kSelfIndirectPointerOffset,
3316 kBytecodeArrayIndirectPointerTag);
3321 Address compiled_data_start = compiled_data.
address();
3324 MutablePageMetadata::FromAddress(compiled_data_start);
3328 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3329 SlotSet::FREE_EMPTY_BUCKETS);
3331 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3332 SlotSet::FREE_EMPTY_BUCKETS);
3334 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3335 SlotSet::FREE_EMPTY_BUCKETS);
3337 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3338 SlotSet::FREE_EMPTY_BUCKETS);
3340 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
3341 SlotSet::FREE_EMPTY_BUCKETS);
3345 compiled_data->set_map_after_allocation(
3351 if (!
heap_->IsLargeObject(compiled_data)) {
3352 const int aligned_filler_offset =
3354 heap_->CreateFillerObjectAt(compiled_data.
address() + aligned_filler_offset,
3355 compiled_data_size - aligned_filler_offset);
3361 uncompiled_data->InitAfterBytecodeFlush(
3362 heap_->isolate(), inferred_name, start_position, end_position,
3370 if (MarkingHelper::GetLivenessMode(
heap_, uncompiled_data) ==
3371 MarkingHelper::LivenessMode::kMarkbit) {
3375#ifdef V8_ENABLE_SANDBOX
3377 TrustedPointerTable::Space* space =
heap_->trusted_pointer_space();
3381 shared_info->set_uncompiled_data(uncompiled_data);
3382 DCHECK(!shared_info->is_compiled());
3385void MarkCompactCollector::ProcessOldCodeCandidates() {
3389 int number_of_flushed_sfis = 0;
3390 while (local_weak_objects()->code_flushing_candidates_local.Pop(
3391 &flushing_candidate)) {
3392 bool is_bytecode_live;
3393 if (
v8_flags.flush_baseline_code && flushing_candidate->HasBaselineCode()) {
3394 is_bytecode_live = ProcessOldBaselineSFI(flushing_candidate);
3396 is_bytecode_live = ProcessOldBytecodeSFI(flushing_candidate);
3399 if (!is_bytecode_live) number_of_flushed_sfis++;
3403#ifndef V8_ENABLE_SANDBOX
3407 ObjectSlot slot = flushing_candidate->RawField(
3408 SharedFunctionInfo::kTrustedFunctionDataOffset);
3409 if (IsHeapObject(*slot)) {
3410 RecordSlot(flushing_candidate, slot, Cast<HeapObject>(*slot));
3417 number_of_flushed_sfis);
3421bool MarkCompactCollector::ProcessOldBytecodeSFI(
3430 const bool bytecode_already_decompiled =
3431 flushing_candidate->HasUncompiledData();
3432 if (!bytecode_already_decompiled) {
3435 flushing_candidate->GetBytecodeArray(isolate);
3436 if (MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3441 FlushSFI(flushing_candidate, bytecode_already_decompiled);
3445bool MarkCompactCollector::ProcessOldBaselineSFI(
3450 baseline_code->instruction_stream(baseline_code->code_cage_base(),
3453 baseline_code->bytecode_or_interpreter_data();
3460 const bool bytecode_already_decompiled =
3461 IsUncompiledData(baseline_bytecode_or_interpreter_data,
heap_->isolate());
3462 bool is_bytecode_live =
false;
3463 if (!bytecode_already_decompiled) {
3465 flushing_candidate->GetBytecodeArray(
heap_->isolate());
3466 is_bytecode_live = MarkingHelper::IsMarkedOrAlwaysLive(
3467 heap_, non_atomic_marking_state_, bytecode);
3470 if (MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3471 baseline_istream)) {
3478 DCHECK(is_bytecode_live);
3484 DCHECK(MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3486 }
else if (is_bytecode_live || bytecode_already_decompiled) {
3491 flushing_candidate->FlushBaselineCode();
3494 if (!is_bytecode_live) {
3495 FlushSFI(flushing_candidate, bytecode_already_decompiled);
3497 return is_bytecode_live;
3501 bool bytecode_already_decompiled) {
3506 if (bytecode_already_decompiled) {
3507 sfi->DiscardCompiledMetadata(
3514 FlushBytecodeFromSFI(sfi);
3518void MarkCompactCollector::ClearFlushedJsFunctions() {
3522 while (local_weak_objects()->flushed_js_functions_local.Pop(
3523 &flushed_js_function)) {
3526 RecordSlot(
object, slot, Cast<HeapObject>(target));
3528 flushed_js_function->ResetIfCodeFlushed(
heap_->isolate(),
3529 gc_notify_updated_slot);
3533#ifndef V8_ENABLE_LEAPTIERING
3535void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
3539 while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
3540 &flushed_js_function)) {
3543 RecordSlot(
object, slot, Cast<HeapObject>(target));
3545 flushed_js_function->ResetIfCodeFlushed(
heap_->isolate(),
3546 gc_notify_updated_slot);
3548#ifndef V8_ENABLE_SANDBOX
3555 ObjectSlot slot = flushed_js_function->RawField(JSFunction::kCodeOffset);
3556 RecordSlot(flushed_js_function, slot, Cast<HeapObject>(*slot));
3563void MarkCompactCollector::ClearFullMapTransitions() {
3567 while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
3568 int num_transitions = array->number_of_transitions();
3569 if (num_transitions > 0) {
3573 if (array->GetTargetIfExists(0, isolate, &map)) {
3576 map->constructor_or_back_pointer();
3578 DCHECK(isolate->has_active_deserializer());
3580 Smi::uninitialized_deserialization_value());
3583 Tagged<Map> parent = Cast<Map>(map->constructor_or_back_pointer());
3584 const bool parent_is_alive = MarkingHelper::IsMarkedOrAlwaysLive(
3585 heap_, non_atomic_marking_state_, parent);
3587 parent_is_alive ? parent->instance_descriptors(isolate)
3589 bool descriptors_owner_died =
3590 CompactTransitionArray(parent, array, descriptors);
3591 if (descriptors_owner_died) {
3592 TrimDescriptorArray(parent, descriptors);
3601bool MarkCompactCollector::TransitionArrayNeedsCompaction(
3604 for (
int i = 0;
i < num_transitions; ++
i) {
3606 if (raw_target.
IsSmi()) {
3608 DCHECK(
heap_->isolate()->has_active_deserializer());
3609 DCHECK_EQ(raw_target.
ToSmi(), Smi::uninitialized_deserialization_value());
3612 for (
int j = 0; j < num_transitions; ++j) {
3614 !transitions->GetRawTarget(j).IsSmi(),
3615 !non_atomic_marking_state_->IsUnmarked(transitions->GetTarget(j)));
3619 }
else if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3620 heap_, non_atomic_marking_state_,
3621 TransitionsAccessor::GetTargetFromRaw(raw_target))) {
3624 for (
int j = 0; j < num_transitions; ++j) {
3625 DCHECK(!transitions->GetRawTarget(j).IsSmi());
3634bool MarkCompactCollector::CompactTransitionArray(
3637 DCHECK(!map->is_prototype_map());
3638 int num_transitions = transitions->number_of_transitions();
3639 if (!TransitionArrayNeedsCompaction(transitions, num_transitions)) {
3643 bool descriptors_owner_died =
false;
3644 int transition_index = 0;
3646 for (
int i = 0;
i < num_transitions; ++
i) {
3648 DCHECK_EQ(target->constructor_or_back_pointer(), map);
3650 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3651 heap_, non_atomic_marking_state_, target)) {
3653 target->instance_descriptors(
heap_->isolate()) == descriptors) {
3654 DCHECK(!target->is_prototype_map());
3655 descriptors_owner_died =
true;
3660 if (
i != transition_index) {
3662 transitions->SetKey(transition_index,
key);
3663 HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
3664 RecordSlot(transitions, key_slot,
key);
3666 transitions->SetRawTarget(transition_index, raw_target);
3667 HeapObjectSlot target_slot = transitions->GetTargetSlot(transition_index);
3668 RecordSlot(transitions, target_slot, raw_target.
GetHeapObject());
3673 if (transition_index == num_transitions) {
3674 DCHECK(!descriptors_owner_died);
3681 int old_capacity_in_entries = transitions->Capacity();
3682 if (transition_index < old_capacity_in_entries) {
3683 int old_capacity = transitions->length();
3684 static_assert(TransitionArray::kEntryKeyIndex == 0);
3685 DCHECK_EQ(TransitionArray::ToKeyIndex(old_capacity_in_entries),
3687 int new_capacity = TransitionArray::ToKeyIndex(transition_index);
3688 heap_->RightTrimArray(transitions, new_capacity, old_capacity);
3689 transitions->SetNumberOfTransitions(transition_index);
3691 return descriptors_owner_died;
3694void MarkCompactCollector::RightTrimDescriptorArray(
3696 int old_nof_all_descriptors = array->number_of_all_descriptors();
3697 int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
3700 Address
start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
3701 Address
end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
3704 SlotSet::FREE_EMPTY_BUCKETS);
3706 chunk,
start,
end, SlotSet::FREE_EMPTY_BUCKETS);
3708 SlotSet::FREE_EMPTY_BUCKETS);
3710 SlotSet::FREE_EMPTY_BUCKETS);
3714 if (aligned_start < aligned_end) {
3715 heap_->CreateFillerObjectAt(
3716 aligned_start,
static_cast<int>(aligned_end - aligned_start));
3718 if (heap::ShouldZapGarbage()) {
3719 Address zap_end = std::min(aligned_start,
end);
3727 array->set_number_of_all_descriptors(new_nof_all_descriptors);
3730void MarkCompactCollector::RecordStrongDescriptorArraysForWeakening(
3732 DCHECK(
heap_->incremental_marking()->IsMajorMarking());
3734 strong_descriptor_arrays_.push_back(std::move(strong_descriptor_arrays));
3737void MarkCompactCollector::WeakenStrongDescriptorArrays() {
3740 for (
auto& vec : strong_descriptor_arrays_) {
3741 for (
auto it = vec.begin(); it != vec.end(); ++it) {
3743 DCHECK(IsStrongDescriptorArray(raw));
3744 raw->set_map_safe_transition_no_write_barrier(
heap_->isolate(),
3745 descriptor_array_map);
3749 strong_descriptor_arrays_.clear();
3752void MarkCompactCollector::TrimDescriptorArray(
3754 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
3755 if (number_of_own_descriptors == 0) {
3760 descriptors->number_of_all_descriptors() - number_of_own_descriptors;
3762 descriptors->set_number_of_descriptors(number_of_own_descriptors);
3763 RightTrimDescriptorArray(descriptors, to_trim);
3765 TrimEnumCache(map, descriptors);
3766 descriptors->Sort();
3768 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
3769 map->set_owns_descriptors(
true);
3774 int live_enum = map->EnumLength();
3775 if (live_enum == kInvalidEnumCacheSentinel) {
3776 live_enum = map->NumberOfEnumerableProperties();
3778 if (live_enum == 0)
return descriptors->ClearEnumCache();
3782 int keys_length = keys->length();
3783 if (live_enum >= keys_length)
return;
3784 heap_->RightTrimArray(keys, live_enum, keys_length);
3787 int indices_length = indices->length();
3788 if (live_enum >= indices_length)
return;
3789 heap_->RightTrimArray(indices, live_enum, indices_length);
3792void MarkCompactCollector::ClearWeakCollections() {
3793 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
3795 while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
3801 if (IsHeapObject(value)) {
3805 heap_, non_atomic_marking_state_,
key),
3806 MarkingHelper::IsMarkedOrAlwaysLive(
3807 heap_, non_atomic_marking_state_, heap_object));
3811 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3812 heap_, non_atomic_marking_state_,
key)) {
3813 table->RemoveEntry(
i);
3817 auto* table_map =
heap_->ephemeron_remembered_set()->tables();
3818 for (
auto it = table_map->begin(); it != table_map->end();) {
3819 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3820 heap_, non_atomic_marking_state_, it->first)) {
3821 it = table_map->erase(it);
3828template <
typename TObjectAndSlot,
typename TMaybeSlot>
3829void MarkCompactCollector::ClearWeakReferences(
3832 TObjectAndSlot slot;
3833 while (worklist.
Pop(&slot)) {
3837 TMaybeSlot location(slot.slot);
3838 if (location.load().GetHeapObjectIfWeak(&value)) {
3839 DCHECK(!IsWeakCell(value));
3842 if (MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3845 RecordSlot(slot.heap_object, slot.slot, value);
3847 DCHECK(MainMarkingVisitor::IsTrivialWeakReferenceValue(slot.heap_object,
3852 location.store(cleared_weak_ref);
3858void MarkCompactCollector::ClearTrivialWeakReferences() {
3860 ClearWeakReferences<HeapObjectAndSlot, MaybeObjectSlot>(
3861 local_weak_objects()->weak_references_trivial_local, cleared_weak_ref);
3864void MarkCompactCollector::ClearTrustedWeakReferences() {
3866 ClearWeakReferences<TrustedObjectAndSlot, ProtectedMaybeObjectSlot>(
3867 local_weak_objects()->weak_references_trusted_local, cleared_weak_ref);
3870void MarkCompactCollector::FilterNonTrivialWeakReferences() {
3872 while (local_weak_objects()->weak_references_non_trivial_local.Pop(&slot)) {
3877 if ((*location).GetHeapObjectIfWeak(&value)) {
3878 DCHECK(!IsWeakCell(value));
3881 if (MarkingHelper::IsMarkedOrAlwaysLive(
heap_, non_atomic_marking_state_,
3886 DCHECK(!MainMarkingVisitor::IsTrivialWeakReferenceValue(
3898void MarkCompactCollector::ClearNonTrivialWeakReferences() {
3900 GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES_NON_TRIVIAL);
3903 while (local_weak_objects()->weak_references_non_trivial_unmarked_local.Pop(
3908 DCHECK(!IsWeakCell(value));
3909 DCHECK(!HeapLayout::InReadOnlySpace(value));
3911 !HeapLayout::InBlackAllocatedPage(value));
3912 DCHECK(!non_atomic_marking_state_->IsMarked(value));
3915 if (!SpecialClearMapSlot(slot.
heap_object, Cast<Map>(value), slot.
slot)) {
3916 slot.
slot.store(cleared_weak_ref);
3921void MarkCompactCollector::ClearJSWeakRefs() {
3922 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_CLEAR_JS_WEAK_REFERENCES);
3925 while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
3927 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3928 heap_, non_atomic_marking_state_, target)) {
3929 weak_ref->set_target(
ReadOnlyRoots(isolate).undefined_value());
3932 ObjectSlot slot = weak_ref->RawField(JSWeakRef::kTargetOffset);
3933 RecordSlot(weak_ref, slot, target);
3937 while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
3940 if (IsHeapObject(target)) {
3941 RecordSlot(
object, slot, Cast<HeapObject>(target));
3945 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3946 heap_, non_atomic_marking_state_, target)) {
3947 DCHECK(Object::CanBeHeldWeakly(target));
3950 Cast<JSFinalizationRegistry>(weak_cell->finalization_registry());
3951 if (!finalization_registry->scheduled_for_cleanup()) {
3952 heap_->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
3953 gc_notify_updated_slot);
3958 weak_cell->Nullify(isolate, gc_notify_updated_slot);
3959 DCHECK(finalization_registry->NeedsCleanup());
3960 DCHECK(finalization_registry->scheduled_for_cleanup());
3963 ObjectSlot slot = weak_cell->RawField(WeakCell::kTargetOffset);
3964 RecordSlot(weak_cell, slot, Cast<HeapObject>(*slot));
3968 if (MarkingHelper::IsUnmarkedAndNotAlwaysLive(
3969 heap_, non_atomic_marking_state_, unregister_token)) {
3970 DCHECK(Object::CanBeHeldWeakly(unregister_token));
3977 Cast<JSFinalizationRegistry>(weak_cell->finalization_registry());
3978 finalization_registry->RemoveUnregisterToken(
3979 unregister_token, isolate,
3980 JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
3981 gc_notify_updated_slot);
3984 ObjectSlot slot = weak_cell->RawField(WeakCell::kUnregisterTokenOffset);
3985 RecordSlot(weak_cell, slot, Cast<HeapObject>(*slot));
3988 heap_->PostFinalizationRegistryCleanupTaskIfNeeded();
3995 MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
3996 MemoryChunk* target_chunk = MemoryChunk::FromHeapObject(target);
4014 if (RelocInfo::IsCodeTargetMode(rmode)) {
4015 slot_type = SlotType::kConstPoolCodeEntry;
4016 }
else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
4017 slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
4019 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
4020 slot_type = SlotType::kConstPoolEmbeddedObjectFull;
4025 if (RelocInfo::IsCodeTargetMode(rmode)) {
4026 slot_type = SlotType::kCodeEntry;
4027 }
else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
4028 slot_type = SlotType::kEmbeddedObjectFull;
4030 DCHECK(RelocInfo::IsCompressedEmbeddedObject(rmode));
4031 slot_type = SlotType::kEmbeddedObjectCompressed;
4035 MemoryChunk*
const source_chunk = MemoryChunk::FromHeapObject(host);
4037 MutablePageMetadata::cast(source_chunk->
Metadata());
4040 result.page_metadata = source_page_metadata;
4041 result.slot_type = slot_type;
4051 if (!ShouldRecordRelocSlot(host, rinfo, target))
return;
4056 std::optional<base::MutexGuard> opt_guard;
4057 if (
v8_flags.concurrent_sparkplug) {
4058 opt_guard.emplace(info.page_metadata->mutex());
4068template <
typename TSlot, HeapObjectReferenceType reference_type>
4072Tagged<Object> MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
4079MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4080 Tagged<HeapObject> heap_object) {
4086MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4087 Tagged<HeapObject> heap_object) {
4088 return MakeWeak(heap_object);
4093MakeSlotValue<WriteProtectedSlot<ObjectSlot>, HeapObjectReferenceType::STRONG>(
4094 Tagged<HeapObject> heap_object) {
4098#ifdef V8_ENABLE_SANDBOX
4100Tagged<Object> MakeSlotValue<WriteProtectedSlot<ProtectedPointerSlot>,
4101 HeapObjectReferenceType::STRONG>(
4102 Tagged<HeapObject> heap_object) {
4108MakeSlotValue<ProtectedMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4109 Tagged<HeapObject> heap_object) {
4115MakeSlotValue<ProtectedMaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4116 Tagged<HeapObject> heap_object) {
4123MakeSlotValue<OffHeapObjectSlot, HeapObjectReferenceType::STRONG>(
4124 Tagged<HeapObject> heap_object) {
4128#ifdef V8_COMPRESS_POINTERS
4130Tagged<Object> MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
4131 Tagged<HeapObject> heap_object) {
4137MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
4138 Tagged<HeapObject> heap_object) {
4144MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>(
4145 Tagged<HeapObject> heap_object) {
4149#ifdef V8_EXTERNAL_CODE_SPACE
4152MakeSlotValue<InstructionStreamSlot, HeapObjectReferenceType::STRONG>(
4153 Tagged<HeapObject> heap_object) {
4158#ifdef V8_ENABLE_SANDBOX
4161MakeSlotValue<ProtectedPointerSlot, HeapObjectReferenceType::STRONG>(
4162 Tagged<HeapObject> heap_object) {
4172template <HeapObjectReferenceType reference_type,
typename TSlot>
4173static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot,
4174 Tagged<HeapObject> heap_obj) {
4176 std::is_same_v<TSlot, FullObjectSlot> ||
4177 std::is_same_v<TSlot, ObjectSlot> ||
4178 std::is_same_v<TSlot, FullMaybeObjectSlot> ||
4179 std::is_same_v<TSlot, MaybeObjectSlot> ||
4180 std::is_same_v<TSlot, OffHeapObjectSlot> ||
4181 std::is_same_v<TSlot, InstructionStreamSlot> ||
4182 std::is_same_v<TSlot, ProtectedPointerSlot> ||
4183 std::is_same_v<TSlot, ProtectedMaybeObjectSlot> ||
4184 std::is_same_v<TSlot, WriteProtectedSlot<ObjectSlot>> ||
4185 std::is_same_v<TSlot, WriteProtectedSlot<ProtectedPointerSlot>>,
4186 "Only [Full|OffHeap]ObjectSlot, [Full]MaybeObjectSlot, "
4187 "InstructionStreamSlot, Protected[Pointer|MaybeObject]Slot, "
4188 "or WriteProtectedSlot are expected here");
4189 MapWord map_word = heap_obj->map_word(cage_base,
kRelaxedLoad);
4190 if (!map_word.IsForwardingAddress())
return;
4192 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
4193 MemoryChunk::FromHeapObject(heap_obj)->IsFlagSet(
4194 MemoryChunk::COMPACTION_WAS_ABORTED));
4195 typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
4196 map_word.ToForwardingAddress(heap_obj));
4200 slot.Relaxed_Store(target);
4202 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
4205template <
typename TSlot>
4206static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot) {
4207 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4208 Tagged<HeapObject> heap_obj;
4209 if constexpr (TSlot::kCanBeWeak) {
4210 if (obj.GetHeapObjectIfWeak(&heap_obj)) {
4211 return UpdateSlot<HeapObjectReferenceType::WEAK>(cage_base, slot,
4215 if (obj.GetHeapObjectIfStrong(&heap_obj)) {
4216 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4220template <
typename TSlot>
4222 PtrComprCageBase cage_base, TSlot slot) {
4223 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4224 Tagged<HeapObject> heap_obj;
4226 if constexpr (TSlot::kCanBeWeak) {
4227 if (obj.GetHeapObjectIfWeak(&heap_obj)) {
4228 UpdateSlot<HeapObjectReferenceType::WEAK>(cage_base, slot, heap_obj);
4229 return HeapLayout::InWritableSharedSpace(heap_obj) ?
KEEP_SLOT
4234 if (obj.GetHeapObjectIfStrong(&heap_obj)) {
4235 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4236 return HeapLayout::InWritableSharedSpace(heap_obj) ?
KEEP_SLOT
4243template <
typename TSlot>
4244static inline void UpdateStrongSlot(PtrComprCageBase cage_base, TSlot slot) {
4245 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
4246#ifdef V8_ENABLE_DIRECT_HANDLE
4247 if (obj.ptr() == kTaggedNullAddress)
return;
4250 Tagged<HeapObject> heap_obj;
4251 if (obj.GetHeapObject(&heap_obj)) {
4252 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4257 PtrComprCageBase cage_base, FullMaybeObjectSlot slot) {
4258 Tagged<MaybeObject> obj = slot.Relaxed_Load(cage_base);
4259#ifdef V8_ENABLE_DIRECT_HANDLE
4260 if (obj.ptr() == kTaggedNullAddress)
return REMOVE_SLOT;
4263 Tagged<HeapObject> heap_obj;
4264 if (obj.GetHeapObject(&heap_obj)) {
4265 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4266 return HeapLayout::InWritableSharedSpace(heap_obj) ?
KEEP_SLOT
4273static inline void UpdateStrongCodeSlot(IsolateForSandbox isolate,
4274 PtrComprCageBase cage_base,
4275 PtrComprCageBase code_cage_base,
4276 InstructionStreamSlot slot) {
4277 Tagged<Object> obj = slot.Relaxed_Load(code_cage_base);
4279 Tagged<HeapObject> heap_obj;
4280 if (obj.GetHeapObject(&heap_obj)) {
4281 UpdateSlot<HeapObjectReferenceType::STRONG>(cage_base, slot, heap_obj);
4283 Tagged<Code> code = Cast<Code>(HeapObject::FromAddress(
4284 slot.address() - Code::kInstructionStreamOffset));
4285 Tagged<InstructionStream> instruction_stream =
4286 code->instruction_stream(code_cage_base);
4287 code->UpdateInstructionStart(isolate, instruction_stream);
4302 UpdateStrongSlotInternal(cage_base(), p);
4306 UpdateSlotInternal(cage_base(), p);
4312 UpdateStrongSlotInternal(cage_base(), p);
4319 UpdateSlotInternal(cage_base(), p);
4325 UpdateStrongCodeSlot(
isolate_, cage_base(), code_cage_base(), slot);
4331 UpdateRootSlotInternal(cage_base(), p);
4337 UpdateRootSlotInternal(cage_base(), p);
4345 UpdateRootSlotInternal(cage_base(), p);
4364 UpdateStrongSlot(cage_base, slot);
4369 UpdateStrongSlot(cage_base, slot);
4374 UpdateStrongSlot(cage_base, slot);
4379 UpdateStrongSlot(cage_base, slot);
4384 UpdateSlot(cage_base, slot);
4399 if (IsExternalString(new_string)) {
4400 MutablePageMetadata::MoveExternalBackingStoreBytes(
4401 ExternalBackingStoreType::kExternalString,
4402 PageMetadata::FromAddress((*p).ptr()),
4403 PageMetadata::FromHeapObject(new_string),
4404 Cast<ExternalString>(new_string)->ExternalPayloadSize());
4409 return Cast<String>(*p);
4412void MarkCompactCollector::EvacuatePrologue() {
4415 DCHECK(new_space_evacuation_pages_.empty());
4416 std::copy_if(new_space->begin(), new_space->end(),
4417 std::back_inserter(new_space_evacuation_pages_),
4420 SemiSpaceNewSpace::From(new_space)->SwapSemiSpaces();
4426 new_lo_space->Flip();
4427 new_lo_space->ResetPendingObject();
4431 DCHECK(old_space_evacuation_pages_.empty());
4432 old_space_evacuation_pages_ = std::move(evacuation_candidates_);
4433 evacuation_candidates_.clear();
4434 DCHECK(evacuation_candidates_.empty());
4437void MarkCompactCollector::EvacuateEpilogue() {
4438 aborted_evacuation_candidates_due_to_oom_.clear();
4439 aborted_evacuation_candidates_due_to_flags_.clear();
4442 if (
heap_->new_space()) {
4447 ReleaseEvacuationCandidates();
4450 VerifyRememberedSetsAfterEvacuation(
heap_, GarbageCollector::MARK_COMPACTOR);
4464 case kObjectsNewToOld:
4465 return "objects-new-to-old";
4467 return "page-new-to-old";
4468 case kObjectsOldToOld:
4469 return "objects-old-to-old";
4475 if (chunk->
IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
4476 return kPageNewToOld;
4478 return kObjectsOldToOld;
4483 local_pretenuring_feedback_(
4485 local_allocator_(
heap_,
4487 record_visitor_(
heap_),
4488 new_space_visitor_(
heap_, &local_allocator_, &record_visitor_,
4489 &local_pretenuring_feedback_),
4490 new_to_old_page_visitor_(
heap_, &record_visitor_,
4491 &local_pretenuring_feedback_),
4493 old_space_visitor_(
heap_, &local_allocator_, &record_visitor_),
4495 bytes_compacted_(0) {}
4500 new_space_visitor_.AddObserver(observer);
4501 old_space_visitor_.AddObserver(observer);
4515 duration_ += duration;
4516 bytes_compacted_ += bytes_compacted;
4540 DCHECK(page->SweepingDone());
4541 intptr_t saved_live_bytes = page->live_bytes();
4542 double evacuation_time = 0.0;
4543 bool success =
false;
4546 success = RawEvacuatePage(page);
4548 ReportCompactionProgress(evacuation_time, saved_live_bytes);
4552 "evacuation[%p]: page=%p new_space=%d "
4553 "page_evacuation=%d executable=%d can_promote=%d "
4554 "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
4555 static_cast<void*
>(
this),
static_cast<void*
>(page),
4557 chunk->
IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
4558 chunk->
IsFlagSet(MemoryChunk::IS_EXECUTABLE),
4559 heap_->new_space()->IsPromotionCandidate(page),
4560 saved_live_bytes, evacuation_time, success);
4564void Evacuator::Finalize() {
4565 local_allocator_.Finalize();
4566 heap_->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
4567 heap_->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
4568 new_to_old_page_visitor_.moved_bytes());
4569 heap_->IncrementYoungSurvivorsCounter(
4570 new_space_visitor_.promoted_size() +
4571 new_to_old_page_visitor_.moved_bytes());
4572 heap_->pretenuring_handler()->MergeAllocationSitePretenuringFeedback(
4573 local_pretenuring_feedback_);
4584 template <
class Visitor>
4585 static bool VisitMarkedObjects(
PageMetadata* page, Visitor* visitor,
4591 template <
class Visitor>
4592 static void VisitMarkedObjectsNoFail(
PageMetadata* page, Visitor* visitor);
4595template <
class Visitor>
4596bool LiveObjectVisitor::VisitMarkedObjects(
PageMetadata* page, Visitor* visitor,
4599 "LiveObjectVisitor::VisitMarkedObjects");
4601 if (!visitor->Visit(
object, size)) {
4602 *failed_object = object;
4609template <
class Visitor>
4613 "LiveObjectVisitor::VisitMarkedObjectsNoFail");
4615 const bool success = visitor->Visit(
object, size);
4623 const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
4625 "FullEvacuator::RawEvacuatePage",
"evacuation_mode",
4626 EvacuationModeName(evacuation_mode),
"live_bytes",
4627 page->live_bytes());
4628 switch (evacuation_mode) {
4629 case kObjectsNewToOld:
4631 new_space_visitor_.DisableAbortEvacuationAtAddress(page);
4633 LiveObjectVisitor::VisitMarkedObjectsNoFail(PageMetadata::cast(page),
4634 &new_space_visitor_);
4635 page->ClearLiveness();
4639 auto object = LargePageMetadata::cast(page)->GetObject();
4640 bool success = new_to_old_page_visitor_.Visit(
object, object->Size());
4644 LiveObjectVisitor::VisitMarkedObjectsNoFail(PageMetadata::cast(page),
4645 &new_to_old_page_visitor_);
4647 new_to_old_page_visitor_.account_moved_bytes(page->live_bytes());
4649 case kObjectsOldToOld: {
4651 old_space_visitor_.SetUpAbortEvacuationAtAddress(page);
4654 if (LiveObjectVisitor::VisitMarkedObjects(
4655 PageMetadata::cast(page), &old_space_visitor_, &failed_object)) {
4656 page->ClearLiveness();
4660 heap_->mark_compact_collector()
4661 ->ReportAbortedEvacuationCandidateDueToOOM(
4676 std::vector<std::unique_ptr<Evacuator>>* evacuators,
4677 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4680 evacuators_(evacuators),
4681 evacuation_items_(
std::move(evacuation_items)),
4682 remaining_evacuation_items_(evacuation_items_.size()),
4684 tracer_(isolate->
heap()->tracer()),
4685 trace_id_(reinterpret_cast<uint64_t>(this) ^
4697 ProcessItems(delegate, evacuator);
4700 tracer_, GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
4702 ProcessItems(delegate, evacuator);
4707 while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
4708 std::optional<size_t> index =
generator_.GetNext();
4710 for (
size_t i = *index;
i < evacuation_items_.
size(); ++
i) {
4711 auto& work_item = evacuation_items_[
i];
4712 if (!work_item.first.TryAcquire())
break;
4714 if (remaining_evacuation_items_.fetch_sub(
4715 1, std::memory_order_relaxed) <= 1) {
4723 const size_t kItemsPerWorker = std::max(1, MB / PageMetadata::kPageSize);
4726 size_t wanted_num_workers =
4727 (remaining_evacuation_items_.load(std::memory_order_relaxed) +
4728 kItemsPerWorker - 1) /
4730 wanted_num_workers =
4731 std::min<size_t>(wanted_num_workers, evacuators_->size());
4732 if (!
collector_->UseBackgroundThreadsInCycle()) {
4733 return std::min<size_t>(wanted_num_workers, 1);
4735 return wanted_num_workers;
4743 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4745 std::atomic<size_t> remaining_evacuation_items_{0};
4753size_t CreateAndExecuteEvacuationTasks(
4755 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4757 std::optional<ProfilingMigrationObserver> profiling_observer;
4758 if (
heap->isolate()->log_object_relocation()) {
4759 profiling_observer.emplace(
heap);
4761 std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
4762 const int wanted_num_tasks = NumberOfParallelCompactionTasks(
heap);
4763 for (
int i = 0;
i < wanted_num_tasks;
i++) {
4764 auto evacuator = std::make_unique<Evacuator>(
heap);
4765 if (profiling_observer) {
4766 evacuator->AddObserver(&profiling_observer.value());
4768 evacuators.push_back(std::move(evacuator));
4770 auto page_evacuation_job = std::make_unique<PageEvacuationJob>(
4771 heap->isolate(), collector, &evacuators, std::move(evacuation_items));
4773 page_evacuation_job->trace_id(),
4775 V8::GetCurrentPlatform()
4777 std::move(page_evacuation_job))
4779 for (
auto& evacuator : evacuators) {
4780 evacuator->Finalize();
4782 return wanted_num_tasks;
4785enum class MemoryReductionMode {
kNone, kShouldReduceMemory };
4789intptr_t NewSpacePageEvacuationThreshold() {
4790 return v8_flags.page_promotion_threshold *
4791 MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
4794bool ShouldMovePage(PageMetadata* p, intptr_t live_bytes,
4795 MemoryReductionMode memory_reduction_mode) {
4796 Heap*
heap = p->heap();
4797 DCHECK(!p->Chunk()->NeverEvacuate());
4798 const bool should_move_page =
4800 (memory_reduction_mode == MemoryReductionMode::kNone) &&
4801 (live_bytes > NewSpacePageEvacuationThreshold()) &&
4802 heap->CanExpandOldGeneration(live_bytes);
4803 if (
v8_flags.trace_page_promotions) {
4805 "[Page Promotion] %p: collector=mc, should move: %d"
4806 ", live bytes = %zu, promotion threshold = %zu"
4807 ", allocated labs size = %zu\n",
4808 p, should_move_page, live_bytes,
4809 NewSpacePageEvacuationThreshold(), p->AllocatedLabSize());
4811 return should_move_page;
4814void TraceEvacuation(Isolate* isolate,
size_t pages_count,
4815 size_t wanted_num_tasks,
size_t live_bytes,
4816 size_t aborted_pages) {
4819 "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
4820 "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
4821 " compaction_speed=%.f aborted=%zu\n",
4822 isolate->time_millis_since_init(),
4823 v8_flags.parallel_compaction ?
"yes" :
"no", pages_count,
4825 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
4829 ->CompactionSpeedInBytesPerMillisecond()
4841 should_pin_in_shared_space_(
4842 collector->
heap()->isolate()->is_shared_space_isolate()) {}
4859 if (!
object.IsHeapObject()) {
4862 MemoryChunk* chunk = MemoryChunk::FromHeapObject(Cast<HeapObject>(
object));
4881 if (!chunk->
IsFlagSet(MemoryChunk::EVACUATION_CANDIDATE)) {
4884 collector_->ReportAbortedEvacuationCandidateDueToFlags(
4885 PageMetadata::cast(chunk->
Metadata()), chunk);
4892void MarkCompactCollector::PinPreciseRootsIfNeeded() {
4893 if (!
v8_flags.precise_object_pinning) {
4897 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_PIN_PAGES);
4905 heap_->IterateRootsForPrecisePinning(&root_visitor);
4907 if (isolate->is_shared_space_isolate()) {
4909 isolate->global_safepoint()->IterateClientIsolates(
4910 [&client_root_visitor](
Isolate* client) {
4916void MarkCompactCollector::EvacuatePagesInParallel() {
4917 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
4919 intptr_t live_bytes = 0;
4921 PinPreciseRootsIfNeeded();
4925 bool force_page_promotion =
4927 for (
PageMetadata* page : new_space_evacuation_pages_) {
4928 intptr_t live_bytes_on_page = page->live_bytes();
4930 live_bytes += live_bytes_on_page;
4931 MemoryReductionMode memory_reduction_mode =
4932 heap_->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory
4933 : MemoryReductionMode::kNone;
4934 if (ShouldMovePage(page, live_bytes_on_page, memory_reduction_mode) ||
4935 force_page_promotion || page->Chunk()->IsQuarantined()) {
4936 EvacuateNewToOldSpacePageVisitor::Move(page);
4937 page->Chunk()->SetFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
4941 heap_->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(), page);
4946 if (
heap_->IsGCWithStack()) {
4947 if (!
v8_flags.compact_with_stack) {
4948 for (
PageMetadata* page : old_space_evacuation_pages_) {
4949 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4951 }
else if (!
v8_flags.compact_code_space_with_stack ||
4952 heap_->isolate()->InFastCCall()) {
4955 for (
PageMetadata* page : old_space_evacuation_pages_) {
4956 if (page->owner_identity() !=
CODE_SPACE)
continue;
4957 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4968 const double kFraction = 0.05;
4970 for (
PageMetadata* page : old_space_evacuation_pages_) {
4971 if (
heap_->isolate()->fuzzer_rng()->NextDouble() < kFraction) {
4972 ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
4977 for (
PageMetadata* page : old_space_evacuation_pages_) {
4979 if (chunk->
IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED))
continue;
4981 live_bytes += page->live_bytes();
4986 if (
auto* new_lo_space =
heap_->new_lo_space()) {
4987 for (
auto it = new_lo_space->begin(); it != new_lo_space->end();) {
4992 !HeapLayout::InBlackAllocatedPage(
object));
4994 heap_->lo_space()->PromoteNewLargeObject(current);
4995 current->Chunk()->SetFlagNonExecutable(
4996 MemoryChunk::PAGE_NEW_OLD_PROMOTION);
4997 promoted_large_pages_.push_back(current);
5001 new_lo_space->set_objects_size(0);
5004 const size_t pages_count = evacuation_items.size();
5005 size_t wanted_num_tasks = 0;
5006 if (!evacuation_items.empty()) {
5008 "MarkCompactCollector::EvacuatePagesInParallel",
"pages",
5009 evacuation_items.size());
5011 wanted_num_tasks = CreateAndExecuteEvacuationTasks(
5012 heap_,
this, std::move(evacuation_items));
5015 const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();
5018 TraceEvacuation(
heap_->isolate(), pages_count, wanted_num_tasks, live_bytes,
5026 if (
object.IsHeapObject()) {
5028 MapWord map_word = heap_object->map_word(kRelaxedLoad);
5037void MarkCompactCollector::Evacuate() {
5041 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
5046 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
5047 EvacuatePagesInParallel();
5050 UpdatePointersAfterEvacuation();
5053 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
5058 USE(owner_identity);
5059 if (chunk->
IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) {
5072 DCHECK(p->SweepingDone());
5074 if (space->ShouldReleaseEmptyPage()) {
5075 space->ReleasePage(p);
5077 sweeper_->SweepEmptyNewSpacePage(p);
5081 new_space_evacuation_pages_.clear();
5089 MarkBit::From(
object).Clear();
5092 p->marking_progress_tracker().ResetIfEnabled();
5094 promoted_large_pages_.clear();
5098 if (chunk->
IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5099 sweeper_->AddPage(p->owner_identity(), p);
5106 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
5111 if (
v8_flags.verify_heap && !sweeper_->sweeping_in_progress()) {
5112 EvacuationVerifier verifier(
heap_);
5128 std::vector<std::unique_ptr<UpdatingItem>> updating_items)
5130 updating_items_(
std::move(updating_items)),
5131 remaining_updating_items_(updating_items_.size()),
5133 tracer_(isolate->
heap()->tracer()),
5134 trace_id_(reinterpret_cast<uint64_t>(this) ^
5144 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
5146 UpdatePointers(delegate);
5149 tracer_, GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS,
5151 UpdatePointers(delegate);
5156 while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
5157 std::optional<size_t> index =
generator_.GetNext();
5159 for (
size_t i = *index;
i < updating_items_.
size(); ++
i) {
5160 auto& work_item = updating_items_[
i];
5161 if (!work_item->TryAcquire())
break;
5162 work_item->Process();
5163 if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
5172 size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
5173 if (!
v8_flags.parallel_pointer_update ||
5174 !
collector_->UseBackgroundThreadsInCycle()) {
5175 return std::min<size_t>(items, 1);
5177 const size_t kMaxPointerUpdateTasks = 8;
5178 size_t max_concurrency = std::min<size_t>(kMaxPointerUpdateTasks, items);
5180 return max_concurrency;
5188 std::atomic<size_t> remaining_updating_items_{0};
5205 ~RememberedSetUpdatingItem()
override =
default;
5207 void Process()
override {
5209 "RememberedSetUpdatingItem::Process");
5210 UpdateUntypedPointers();
5211 UpdateTypedPointers();
5215 template <
typename TSlot>
5221 if (!slot.load(cage_base).GetHeapObject(&heap_object)) {
5225 if (HeapLayout::InWritableSharedSpace(heap_object)) {
5227 page, page->Offset(slot.address()));
5231 inline void CheckSlotForOldToSharedTyped(
5235 UpdateTypedSlotHelper::GetTargetObject(page->heap(), slot_type, addr);
5238 UpdateTypedSlotHelper::UpdateTypedSlot(
5239 jit_allocation, page->heap(), slot_type, addr,
5241 DCHECK_EQ((*slot).GetHeapObjectAssumeStrong(), heap_object);
5246 if (HeapLayout::InWritableSharedSpace(heap_object)) {
5247 const uintptr_t
offset = page->Offset(addr);
5250 static_cast<uint32_t
>(
offset));
5254 template <
typename TSlot>
5255 inline void CheckAndUpdateOldToNewSlot(TSlot slot,
5258 std::is_same_v<TSlot, FullMaybeObjectSlot> ||
5259 std::is_same_v<TSlot, MaybeObjectSlot>,
5260 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
5262 if (!(*slot).GetHeapObject(&heap_object))
return;
5263 if (!HeapLayout::InYoungGeneration(heap_object))
return;
5267 Heap::InToPage(heap_object));
5269 Heap::InFromPage(heap_object));
5274 DCHECK_IMPLIES(!heap_object->map_word(kRelaxedLoad).IsForwardingAddress(),
5276 UpdateSlot(cage_base, slot);
5279 void UpdateUntypedPointers() {
5280 UpdateUntypedOldToNewPointers<OLD_TO_NEW>();
5281 UpdateUntypedOldToNewPointers<OLD_TO_NEW_BACKGROUND>();
5282 UpdateUntypedOldToOldPointers();
5283 UpdateUntypedTrustedToCodePointers();
5284 UpdateUntypedTrustedToTrustedPointers();
5287 template <RememberedSetType old_to_new_type>
5288 void UpdateUntypedOldToNewPointers() {
5289 if (!
chunk_->slot_set<old_to_new_type, AccessMode::NON_ATOMIC>()) {
5300 CheckAndUpdateOldToNewSlot(slot, cage_base);
5304 CheckSlotForOldToSharedUntyped(cage_base,
chunk_, slot);
5310 SlotSet::KEEP_EMPTY_BUCKETS);
5313 chunk_->ReleaseSlotSet(old_to_new_type);
5316 void UpdateUntypedOldToOldPointers() {
5322 if (
chunk_->Chunk()->executable()) {
5333 jit_allocation, slot.
address()));
5338 SlotSet::KEEP_EMPTY_BUCKETS);
5343 UpdateSlot(cage_base, slot);
5347 CheckSlotForOldToSharedUntyped(cage_base,
chunk_, slot);
5353 SlotSet::KEEP_EMPTY_BUCKETS);
5359 void UpdateUntypedTrustedToCodePointers() {
5364#ifdef V8_ENABLE_SANDBOX
5375#ifdef V8_EXTERNAL_CODE_SPACE
5382 [cage_base, code_cage_base,
5384 DCHECK(IsCode(HeapObject::FromAddress(slot.address() -
5385 Code::kInstructionStreamOffset),
5387 UpdateStrongCodeSlot(isolate, cage_base, code_cage_base,
5393 SlotSet::FREE_EMPTY_BUCKETS);
5398 void UpdateUntypedTrustedToTrustedPointers() {
5403#ifdef V8_ENABLE_SANDBOX
5417 if (
chunk_->Chunk()->executable()) {
5427 UpdateStrongSlot(unused_cage_base,
5429 jit_allocation, slot.
address()));
5434 SlotSet::FREE_EMPTY_BUCKETS);
5439 UpdateSlot(unused_cage_base,
5445 SlotSet::FREE_EMPTY_BUCKETS);
5451 void UpdateTypedPointers() {
5452 if (!
chunk_->Chunk()->executable()) {
5460 UpdateTypedOldToNewPointers(jit_page);
5461 UpdateTypedOldToOldPointers(jit_page);
5465 if (
chunk_->typed_slot_set<
OLD_TO_NEW, AccessMode::NON_ATOMIC>() ==
nullptr)
5468 const auto check_and_update_old_to_new_slot_fn =
5470 CheckAndUpdateOldToNewSlot(slot, cage_base);
5475 chunk_, [
this, &check_and_update_old_to_new_slot_fn, &jit_page](
5476 SlotType slot_type, Address slot) {
5479 UpdateTypedSlotHelper::UpdateTypedSlot(
5480 jit_allocation,
heap_, slot_type, slot,
5481 check_and_update_old_to_new_slot_fn);
5485 CheckSlotForOldToSharedTyped(
chunk_, slot_type, slot,
5499 if (
chunk_->typed_slot_set<
OLD_TO_OLD, AccessMode::NON_ATOMIC>() ==
nullptr)
5503 chunk_, [
this, cage_base, &jit_page](
SlotType slot_type, Address slot) {
5508 SlotCallbackResult
result = UpdateTypedSlotHelper::UpdateTypedSlot(
5509 jit_allocation,
heap_, slot_type, slot,
5511 UpdateStrongSlot(cage_base, slot);
5518 CheckSlotForOldToSharedTyped(
chunk_, slot_type, slot,
5535template <
typename IterateableSpace>
5536void CollectRememberedSetUpdatingItems(
5537 std::vector<std::unique_ptr<UpdatingItem>>* items,
5538 IterateableSpace* space) {
5539 for (MutablePageMetadata* page : *space) {
5542 if (page->Chunk()->IsEvacuationCandidate())
continue;
5543 if (page->ContainsAnySlots()) {
5544 items->emplace_back(
5545 std::make_unique<RememberedSetUpdatingItem>(space->heap(), page));
5560 "EphemeronTableUpdatingItem::Process");
5563 auto* table_map =
heap_->ephemeron_remembered_set()->tables();
5564 for (
auto it = table_map->begin(); it != table_map->end(); it++) {
5566 auto& indices = it->second;
5567 if (Cast<HeapObject>(table)
5568 ->map_word(kRelaxedLoad)
5569 .IsForwardingAddress()) {
5573 DCHECK(IsMap(table->map(), cage_base));
5574 DCHECK(IsEphemeronHashTable(table, cage_base));
5575 for (
auto iti = indices.begin(); iti != indices.end(); ++iti) {
5577 ObjectSlot key_slot(table->RawFieldOfElementAt(
5582 MapWord map_word =
key->map_word(cage_base, kRelaxedLoad);
5596void MarkCompactCollector::UpdatePointersAfterEvacuation() {
5597 TRACE_GC(
heap_->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
5601 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
5604 heap_->IterateRootsIncludingClients(
5607 SkipRoot::kConservativeStack,
5608 SkipRoot::kReadOnlyBuiltins});
5613 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
5614 UpdatePointersInClientHeaps();
5619 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
5620 std::vector<std::unique_ptr<UpdatingItem>> updating_items;
5622 CollectRememberedSetUpdatingItems(&updating_items,
heap_->old_space());
5623 CollectRememberedSetUpdatingItems(&updating_items,
heap_->code_space());
5624 if (
heap_->shared_space()) {
5625 CollectRememberedSetUpdatingItems(&updating_items,
heap_->shared_space());
5627 CollectRememberedSetUpdatingItems(&updating_items,
heap_->lo_space());
5628 CollectRememberedSetUpdatingItems(&updating_items,
heap_->code_lo_space());
5629 if (
heap_->shared_lo_space()) {
5630 CollectRememberedSetUpdatingItems(&updating_items,
5631 heap_->shared_lo_space());
5633 CollectRememberedSetUpdatingItems(&updating_items,
heap_->trusted_space());
5634 CollectRememberedSetUpdatingItems(&updating_items,
5635 heap_->trusted_lo_space());
5636 if (
heap_->shared_trusted_space()) {
5637 CollectRememberedSetUpdatingItems(&updating_items,
5638 heap_->shared_trusted_space());
5640 if (
heap_->shared_trusted_lo_space()) {
5641 CollectRememberedSetUpdatingItems(&updating_items,
5642 heap_->shared_trusted_lo_space());
5650 updating_items.push_back(
5651 std::make_unique<EphemeronTableUpdatingItem>(
heap_));
5653 auto pointers_updating_job = std::make_unique<PointersUpdatingJob>(
5654 heap_->isolate(),
this, std::move(updating_items));
5656 pointers_updating_job->trace_id(),
5658 V8::GetCurrentPlatform()
5660 std::move(pointers_updating_job))
5666 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
5668 heap_->UpdateReferencesInExternalStringTable(
5678 if (
heap_->isolate()->OwnsStringTables() ||
5680 heap_->isolate()->string_forwarding_table()->UpdateAfterFullEvacuation();
5684 heap_->ProcessWeakListRoots(&evacuation_object_retainer);
5689 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_POINTER_TABLES);
5690 UpdatePointersInPointerTables();
5694 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
5697void MarkCompactCollector::UpdatePointersInClientHeaps() {
5699 if (!isolate->is_shared_space_isolate())
return;
5701 isolate->global_safepoint()->IterateClientIsolates(
5702 [
this](
Isolate* client) { UpdatePointersInClientHeap(client); });
5705void MarkCompactCollector::UpdatePointersInClientHeap(
Isolate* client) {
5709 while (chunk_iterator.
HasNext()) {
5716 return UpdateOldToSharedSlot(cage_base, slot);
5718 SlotSet::FREE_EMPTY_BUCKETS);
5726 const auto protected_slot_count =
5731 return UpdateOldToSharedSlot(unused_cage_base, protected_slot);
5733 SlotSet::FREE_EMPTY_BUCKETS);
5734 if (protected_slot_count == 0) {
5744 page->area_start(), page->area_size());
5746 page, [
this, &jit_page](
SlotType slot_type, Address slot) {
5752 return UpdateTypedSlotHelper::UpdateTypedSlot(
5753 jit_allocation,
heap_, slot_type, slot,
5755 return UpdateStrongOldToSharedSlot(cage_base, slot);
5763void MarkCompactCollector::UpdatePointersInPointerTables() {
5764#if defined(V8_ENABLE_SANDBOX) || defined(V8_ENABLE_LEAPTIERING)
5773 DCHECK(IsExposedTrustedObject(relocated_object));
5774 return Cast<ExposedTrustedObject>(relocated_object);
5778#ifdef V8_ENABLE_SANDBOX
5779 TrustedPointerTable*
const tpt = &
heap_->isolate()->trusted_pointer_table();
5780 tpt->IterateActiveEntriesIn(
5781 heap_->trusted_pointer_space(),
5783 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5784 if (!relocated_object.is_null()) {
5785 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5786 auto instance_type = relocated_object->map()->instance_type();
5787 auto tag = IndirectPointerTagFromInstanceType(instance_type);
5788 tpt->Set(handle, relocated_object.ptr(), tag);
5792 TrustedPointerTable*
const stpt =
5793 &
heap_->isolate()->shared_trusted_pointer_table();
5794 stpt->IterateActiveEntriesIn(
5795 heap_->isolate()->shared_trusted_pointer_space(),
5797 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5798 if (!relocated_object.is_null()) {
5799 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5800 auto instance_type = relocated_object->map()->instance_type();
5801 auto tag = IndirectPointerTagFromInstanceType(instance_type);
5802 DCHECK(IsSharedTrustedPointerType(tag));
5803 stpt->Set(handle, relocated_object.ptr(), tag);
5807 CodePointerTable*
const cpt = IsolateGroup::current()->code_pointer_table();
5808 cpt->IterateActiveEntriesIn(
5809 heap_->code_pointer_space(),
5810 [&](CodePointerHandle handle, Address content) {
5811 Tagged<ExposedTrustedObject> relocated_object = process_entry(content);
5812 if (!relocated_object.is_null()) {
5813 DCHECK_EQ(handle, relocated_object->self_indirect_pointer_handle());
5814 cpt->SetCodeObject(handle, relocated_object.address());
5819#ifdef V8_ENABLE_LEAPTIERING
5820 JSDispatchTable*
const jdt = IsolateGroup::current()->js_dispatch_table();
5821 const EmbeddedData& embedded_data = EmbeddedData::FromBlob(
heap_->isolate());
5822 jdt->IterateActiveEntriesIn(
5823 heap_->js_dispatch_table_space(), [&](JSDispatchHandle handle) {
5824 Address code_address = jdt->GetCodeAddress(handle);
5825 Address entrypoint_address = jdt->GetEntrypoint(handle);
5826 Tagged<TrustedObject> relocated_code = process_entry(code_address);
5827 bool code_object_was_relocated = !relocated_code.is_null();
5828 Tagged<Code> code = Cast<Code>(code_object_was_relocated
5830 : Tagged<Object>(code_address));
5831 bool instruction_stream_was_relocated =
5832 code->instruction_start() != entrypoint_address;
5833 if (code_object_was_relocated || instruction_stream_was_relocated) {
5834 Address old_entrypoint = jdt->GetEntrypoint(handle);
5836 Address new_entrypoint = ([&]() {
5837#define CASE(name, ...) \
5838 if (old_entrypoint == embedded_data.InstructionStartOf(Builtin::k##name)) { \
5839 return old_entrypoint; \
5841 BUILTIN_LIST_BASE_TIERING(CASE)
5843 return code->instruction_start();
5845 jdt->SetCodeAndEntrypointNoWriteBarrier(handle, code, new_entrypoint);
5846 CHECK_IMPLIES(jdt->IsTieringRequested(handle),
5847 old_entrypoint == new_entrypoint);
5853void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
5856 aborted_evacuation_candidates_due_to_oom_.push_back(
5857 std::make_pair(failed_start, page));
5860void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
5863 if (chunk->
IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5866 chunk->
SetFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
5867 aborted_evacuation_candidates_due_to_flags_.push_back(page);
5873 DCHECK(page->Chunk()->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED));
5879 page->marking_bitmap()->ClearRange<AccessMode::NON_ATOMIC>(
5880 MarkingBitmap::AddressToIndex(page->area_start()),
5881 MarkingBitmap::LimitAddressToIndex(failed_start));
5885 SlotSet::FREE_EMPTY_BUCKETS);
5890 page, page->area_start(), failed_start, SlotSet::FREE_EMPTY_BUCKETS);
5891 DCHECK_NULL(page->typed_slot_set<OLD_TO_NEW_BACKGROUND>());
5894 page, page->area_start(), failed_start, SlotSet::FREE_EMPTY_BUCKETS);
5900 LiveObjectVisitor::VisitMarkedObjectsNoFail(page, &visitor);
5901 page->SetLiveBytes(visitor.live_object_size());
5907size_t MarkCompactCollector::PostProcessAbortedEvacuationCandidates() {
5908 for (
auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
5912 chunk->
SetFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
5914 for (
auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
5915 ReRecordPage(
heap_, start_and_page.first, start_and_page.second);
5917 for (
auto page : aborted_evacuation_candidates_due_to_flags_) {
5918 ReRecordPage(
heap_, page->area_start(), page);
5920 const size_t aborted_pages =
5921 aborted_evacuation_candidates_due_to_oom_.size() +
5922 aborted_evacuation_candidates_due_to_flags_.size();
5923 size_t aborted_pages_verified = 0;
5926 if (chunk->
IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
5931 p->ClearEvacuationCandidate();
5932 aborted_pages_verified++;
5935 DCHECK(p->SweepingDone());
5938 DCHECK_EQ(aborted_pages_verified, aborted_pages);
5939 USE(aborted_pages_verified);
5940 return aborted_pages;
5943void MarkCompactCollector::ReleaseEvacuationCandidates() {
5945 if (!p->Chunk()->IsEvacuationCandidate())
continue;
5948 CHECK(p->SweepingDone());
5949 space->ReleasePage(p);
5951 old_space_evacuation_pages_.clear();
5952 compacting_ =
false;
5955void MarkCompactCollector::StartSweepNewSpace() {
5959 int will_be_swept = 0;
5961 heap_->StartResizeNewSpace();
5963 DCHECK(empty_new_space_pages_to_be_swept_.empty());
5964 for (
auto it = paged_space->
begin(); it != paged_space->
end();) {
5977 empty_new_space_pages_to_be_swept_.push_back(p);
5984 "sweeping: space=%s initialized_for_sweeping=%d",
5985 ToString(paged_space->identity()), will_be_swept);
5989void MarkCompactCollector::ResetAndRelinkBlackAllocatedPage(
5991 DCHECK(page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
5994 DCHECK(page->marking_bitmap()->IsClean());
5995 std::optional<RwxMemoryWriteScope> scope;
5996 if (page->Chunk()->InCodeSpace()) {
5997 scope.emplace(
"For writing flags.");
5999 page->Chunk()->ClearFlagUnlocked(MemoryChunk::BLACK_ALLOCATED);
6000 space->IncreaseAllocatedBytes(page->allocated_bytes(), page);
6001 space->RelinkFreeListCategories(page);
6006 space->ClearAllocatorState();
6008 int will_be_swept = 0;
6009 bool unused_page_present =
false;
6014 for (
auto it = space->begin(); it != space->end();) {
6028 ResetAndRelinkBlackAllocatedPage(space, p);
6034 if (unused_page_present) {
6037 static_cast<void*
>(p));
6039 space->ReleasePage(p);
6042 unused_page_present =
true;
6045 sweeper->
AddPage(space->identity(), p);
6055 "sweeping: space=%s initialized_for_sweeping=%d",
6056 ToString(space->identity()), will_be_swept);
6066 if (space->identity() == LO_SPACE)
return true;
6069 if (space->identity() == SHARED_LO_SPACE)
return true;
6076 size_t surviving_object_size = 0;
6078 ShouldPostponeFreeingEmptyPages(space)
6079 ? MemoryAllocator::FreeMode::kPostpone
6080 : MemoryAllocator::FreeMode::kImmediately;
6081 for (
auto it = space->begin(); it != space->end();) {
6083 DCHECK(!current->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
6087 space->RemovePage(current);
6088 heap_->memory_allocator()->Free(free_mode, current);
6093 MarkBit::From(
object).Clear();
6094 current->SetLiveBytes(0);
6096 current->marking_progress_tracker().ResetIfEnabled();
6097 surviving_object_size +=
static_cast<size_t>(
object->Size(cage_base));
6099 space->set_objects_size(surviving_object_size);
6102void MarkCompactCollector::Sweep() {
6103 DCHECK(!sweeper_->sweeping_in_progress());
6104 sweeper_->InitializeMajorSweeping();
6107 heap_->tracer(), GCTracer::Scope::MC_SWEEP, ThreadKind::kMain,
6108 sweeper_->GetTraceIdForFlowEvent(GCTracer::Scope::MC_SWEEP),
6117 SweepLargeSpace(
heap_->lo_space());
6121 heap_->tracer(), GCTracer::Scope::MC_SWEEP_CODE_LO, ThreadKind::kMain);
6122 SweepLargeSpace(
heap_->code_lo_space());
6124 if (
heap_->shared_space()) {
6126 GCTracer::Scope::MC_SWEEP_SHARED_LO,
6128 SweepLargeSpace(
heap_->shared_lo_space());
6133 StartSweepSpace(
heap_->old_space());
6138 StartSweepSpace(
heap_->code_space());
6140 if (
heap_->shared_space()) {
6142 heap_->tracer(), GCTracer::Scope::MC_SWEEP_SHARED, ThreadKind::kMain);
6143 StartSweepSpace(
heap_->shared_space());
6147 heap_->tracer(), GCTracer::Scope::MC_SWEEP_TRUSTED, ThreadKind::kMain);
6148 StartSweepSpace(
heap_->trusted_space());
6150 if (
heap_->shared_trusted_space()) {
6152 heap_->tracer(), GCTracer::Scope::MC_SWEEP_SHARED, ThreadKind::kMain);
6153 StartSweepSpace(
heap_->shared_trusted_space());
6157 GCTracer::Scope::MC_SWEEP_TRUSTED_LO,
6159 SweepLargeSpace(
heap_->trusted_lo_space());
6164 StartSweepNewSpace();
6167 sweeper_->StartMajorSweeping();
6179 IsInstructionStream(istream_or_smi_zero));
6182 collector_->heap()->isolate()->code_cage_base()}),
6183 istream_or_smi_zero);
6187 code->IterateDeoptimizationLiterals(
this);
6189 if (istream_or_smi_zero !=
Smi::zero()) {
#define SBXCHECK_EQ(lhs, rhs)
#define SBXCHECK(condition)
V8_INLINE bool Pop(EntryType *entry)
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
constexpr void Add(E element)
double NextDouble() V8_WARN_UNUSED_RESULT
std::vector< uint64_t > NextSample(uint64_t max, size_t n) V8_WARN_UNUSED_RESULT
int64_t NextInt64() V8_WARN_UNUSED_RESULT
static constexpr TimeDelta Max()
bool To(Tagged< T > *obj) const
void RequestSweep(SweepingType sweeping_type, TreatAllYoungAsPromoted treat_all_young_as_promoted)
V8_EXPORT_PRIVATE Tagged< Code > code(Builtin builtin)
void MarkCompactPrologue()
void FlushMemoryChunkData()
GarbageCollector garbage_collector() const
void FlushNativeContexts(NativeContextStats *main_stats)
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
void set_another_ephemeron_iteration(bool another_ephemeron_iteration)
bool another_ephemeron_iteration()
static CppHeap * From(v8::CppHeap *heap)
void EnterFinalPause(cppgc::EmbedderStackState stack_state)
static void DeoptimizeMarkedCode(Isolate *isolate)
EphemeronTableUpdatingItem(Heap *heap)
~EphemeronTableUpdatingItem() override=default
PretenuringHandler::PretenuringFeedbackMap * local_pretenuring_feedback_
AllocationSpace AllocateTargetObject(Tagged< HeapObject > old_object, int size, Tagged< HeapObject > *target_object)
bool is_incremental_marking_
EvacuateNewSpaceVisitor(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor, PretenuringHandler::PretenuringFeedbackMap *local_pretenuring_feedback)
AllocationResult AllocateInOldSpace(int size_in_bytes, AllocationAlignment alignment)
PretenuringHandler *const pretenuring_handler_
bool TryEvacuateWithoutCopy(Tagged< HeapObject > object)
bool Visit(Tagged< HeapObject > object, int size) override
const bool shortcut_strings_
RecordMigratedSlotVisitor * record_visitor_
EvacuateNewToOldSpacePageVisitor(Heap *heap, RecordMigratedSlotVisitor *record_visitor, PretenuringHandler::PretenuringFeedbackMap *local_pretenuring_feedback)
void account_moved_bytes(intptr_t bytes)
static void Move(PageMetadata *page)
bool Visit(Tagged< HeapObject > object, int size) override
PretenuringHandler::PretenuringFeedbackMap * local_pretenuring_feedback_
PretenuringHandler *const pretenuring_handler_
bool Visit(Tagged< HeapObject > object, int size) override
EvacuateOldSpaceVisitor(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor)
EvacuateRecordOnlyVisitor(Heap *heap)
size_t live_object_size() const
const PtrComprCageBase cage_base_
bool Visit(Tagged< HeapObject > object, int size) override
void ExecuteMigrationObservers(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size)
const bool shared_string_table_
static void RawMigrateObject(EvacuateVisitorBase *base, Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest)
bool ShouldPromoteIntoSharedHeap(Tagged< Map > map)
void(*)(EvacuateVisitorBase *base, Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest) MigrateFunction
std::optional< base::RandomNumberGenerator > rng_
bool TryEvacuateObject(AllocationSpace target_space, Tagged< HeapObject > object, int size, Tagged< HeapObject > *target_object)
PtrComprCageBase cage_base()
RecordMigratedSlotVisitor * record_visitor_
void AddObserver(MigrationObserver *observer)
EvacuateVisitorBase(Heap *heap, EvacuationAllocator *local_allocator, RecordMigratedSlotVisitor *record_visitor)
EvacuationAllocator * local_allocator_
void MigrateObject(Tagged< HeapObject > dst, Tagged< HeapObject > src, int size, AllocationSpace dest)
std::vector< MigrationObserver * > observers_
MigrateFunction migration_function_
AllocationResult Allocate(AllocationSpace space, int object_size, AllocationAlignment alignment)
Tagged< Object > RetainAs(Tagged< Object > object) override
PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_
EvacuateOldSpaceVisitor old_space_visitor_
static EvacuationMode ComputeEvacuationMode(MemoryChunk *chunk)
intptr_t bytes_compacted_
EvacuateNewSpaceVisitor new_space_visitor_
void ReportCompactionProgress(double duration, intptr_t bytes_compacted)
void EvacuatePage(MutablePageMetadata *chunk)
static const char * EvacuationModeName(EvacuationMode mode)
RecordMigratedSlotVisitor record_visitor_
EvacuateNewToOldSpacePageVisitor new_to_old_page_visitor_
EvacuationAllocator local_allocator_
void AddObserver(MigrationObserver *observer)
Isolate * isolate() const
Tagged< MaybeObject > Relaxed_Load() const
void store(Tagged< MaybeObject > value) const
void Relaxed_Store(Tagged< Object > value) const
Tagged< Object > load() const
Tagged< Object > Relaxed_Load() const
void TransitionStrings(StringForwardingTable::Record *record)
void MarkForwardObject(StringForwardingTable::Record *record)
void ProcessFullWithStack()
void TryExternalize(Tagged< String > original_string, StringForwardingTable::Record *record)
FullStringForwardingTableCleaner(Heap *heap)
void TryInternalize(Tagged< String > original_string, StringForwardingTable::Record *record)
uint16_t CodeFlushingIncrease() const
void NotifyMarkingStart()
std::optional< double > CompactionSpeedInBytesPerMillisecond() const
void ClearListOfYoungNodes()
void IterateWeakRootsForPhantomHandles(WeakSlotCallbackWithHeap should_reset_handle)
void IterateClientIsolates(Callback callback)
MainAllocator * new_space_allocator()
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InCodeSpace(Tagged< HeapObject > object)
virtual ~HeapObjectVisitor()=default
virtual bool Visit(Tagged< HeapObject > object, int size)=0
static constexpr int kHeaderSize
static constexpr int kMapOffset
static AllocationAlignment RequiredAlignment(Tagged< Map > map)
static V8_INLINE Heap * GetOwnerHeap(Tagged< HeapObject > object)
V8_INLINE size_t Visit(Tagged< HeapObject > object)
ExternalStringTable external_string_table_
std::unique_ptr< ObjectStats > live_object_stats_
void ProcessAllWeakReferences(WeakObjectRetainer *retainer)
NewSpace * new_space() const
SharedSpace * shared_space() const
OldLargeObjectSpace * lo_space() const
NewLargeObjectSpace * new_lo_space() const
bool use_new_space() const
MarkCompactCollector * mark_compact_collector()
std::unique_ptr< ObjectStats > dead_object_stats_
V8_EXPORT_PRIVATE bool Contains(Tagged< HeapObject > value) const
bool ShouldCurrentGCKeepAgesUnchanged() const
void OnMoveEvent(Tagged< HeapObject > source, Tagged< HeapObject > target, int size_in_bytes)
IncrementalMarking * incremental_marking() const
MemoryMeasurement * memory_measurement()
OldSpace * old_space() const
ArrayBufferSweeper * array_buffer_sweeper()
ConcurrentMarking * concurrent_marking() const
TrustedSpace * trusted_space() const
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
MemoryAllocator * memory_allocator()
void IterateConservativeStackRoots(RootVisitor *root_visitor, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
void IterateRootsForPrecisePinning(RootVisitor *visitor)
CodeLargeObjectSpace * code_lo_space() const
TrustedLargeObjectSpace * trusted_lo_space() const
StackState embedder_stack_state_
CodeSpace * code_space() const
MarkingState * marking_state()
LocalHeap * main_thread_local_heap_
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char *location)
bool ShouldUseBackgroundThreads() const
V8_EXPORT_PRIVATE void Unmark()
PagedNewSpace * paged_new_space() const
std::vector< Handle< NativeContext > > FindAllNativeContexts()
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage()
std::vector< Tagged< WeakArrayList > > FindAllRetainedMaps()
NonAtomicMarkingState * non_atomic_marking_state()
v8::CppHeap * cpp_heap() const
bool IsGCWithStack() const
Isolate * isolate() const
void EnsureQuarantinedPagesSweepingCompleted()
HeapAllocator * allocator()
bool ShouldReduceMemory() const
bool IsMajorMarking() const
IndirectPointerHandle Relaxed_LoadHandle() const
static void IterateBody(Tagged< Map > map, Tagged< HeapObject > obj, ObjectVisitor *v)
static Tagged< InstructionStream > FromTargetAddress(Address address)
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
InternalizedStringTableCleaner(Heap *heap)
int PointersRemoved() const
void VisitRootPointers(Root root, const char *description, OffHeapObjectSlot start, OffHeapObjectSlot end) override
static IsolateGroup * current()
GlobalHandles * global_handles() const
bool serializer_enabled() const
CompilationCache * compilation_cache()
Bootstrapper * bootstrapper()
GlobalSafepoint * global_safepoint() const
TracedHandles * traced_handles()
bool AllowsCodeCompaction() const
Isolate * shared_space_isolate() const
StringForwardingTable * string_forwarding_table() const
base::RandomNumberGenerator * fuzzer_rng()
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) override
size_t Size() const override
MarkingBarrier * marking_barrier()
Address original_top_acquire() const
V8_INLINE bool IsLabValid() const
void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
void RecordSlot(Tagged< HeapObject > object, TSlot slot, Tagged< HeapObject > target)
MainMarkingVisitor(MarkingWorklists::Local *local_marking_worklists, WeakObjects::Local *local_weak_objects, Heap *heap, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged, uint16_t code_flushing_increase)
static constexpr bool IsPacked(Address)
bool IsForwardingAddress() const
static MapWord FromForwardingAddress(Tagged< HeapObject > map_word_host, Tagged< HeapObject > object)
Tagged< HeapObject > ToForwardingAddress(Tagged< HeapObject > map_word_host)
MarkCompactCollector * collector_
ClearTrivialWeakRefJobItem(MarkCompactCollector *collector)
uint64_t trace_id() const
void Run(JobDelegate *delegate) final
V8_INLINE void MarkObject(Tagged< HeapObject > host, Tagged< Object > object)
void VisitMapPointer(Tagged< HeapObject > host) final
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
MarkCompactCollector *const collector_
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
CustomRootBodyMarkingVisitor(MarkCompactCollector *collector)
void Run(JobDelegate *delegate) final
uint64_t trace_id() const
MarkCompactCollector * collector_
FilterNonTrivialWeakRefJobItem(MarkCompactCollector *collector)
SharedHeapObjectVisitor(MarkCompactCollector *collector)
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
V8_INLINE void CheckForSharedObject(Tagged< HeapObject > host, ObjectSlot slot, Tagged< Object > object)
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitMapPointer(Tagged< HeapObject > host) final
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
MarkCompactCollector *const collector_
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) final
KeyToValues key_to_values_
std::pair< size_t, size_t > ProcessMarkingWorklist(v8::base::TimeDelta max_duration, size_t max_bytes_to_process)
WeakObjects weak_objects_
void ClearNonLiveReferences()
V8_INLINE void MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
void MarkTransitiveClosure()
void SweepLargeSpace(LargeObjectSpace *space)
void MarkRoots(RootVisitor *root_visitor)
bool use_background_threads_in_cycle_
void StartMarking(std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule={})
base::EnumSet< CodeFlushMode > code_flush_mode_
void SweepArrayBufferExtensions()
bool MarkTransitiveClosureUntilFixpoint()
bool have_code_to_deoptimize_
void ProcessOldCodeCandidates()
void AddEvacuationCandidate(PageMetadata *p)
NativeContextInferrer native_context_inferrer_
void CollectEvacuationCandidates(PagedSpace *space)
static bool IsUnmarkedSharedHeapObject(Heap *heap, FullObjectSlot p)
void ClearTrivialWeakReferences()
V8_INLINE void MarkRootObject(Root root, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
std::vector< PageMetadata * > evacuation_candidates_
void ComputeEvacuationHeuristics(size_t area_size, int *target_fragmentation_percent, size_t *max_evacuated_bytes)
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
bool ProcessEphemeron(Tagged< HeapObject > key, Tagged< HeapObject > value)
std::unique_ptr< MarkingWorklists::Local > local_marking_worklists_
WeakObjects * weak_objects()
MarkingWorklists marking_worklists_
void ProcessFlushedBaselineCandidates()
void MaybeEnableBackgroundThreadsInCycle(CallOrigin origin)
MarkingState *const marking_state_
void ClearTrustedWeakReferences()
WeakObjects::Local * local_weak_objects()
void FinishConcurrentMarking()
std::vector< PageMetadata * > empty_new_space_pages_to_be_swept_
void PerformWrapperTracing()
bool UseBackgroundThreadsInCycle() const
static V8_INLINE void RecordSlot(Tagged< HeapObject > object, THeapObjectSlot slot, Tagged< HeapObject > target)
void MarkTransitiveClosureLinear()
void ProcessTopOptimizedFrame(ObjectVisitor *visitor, Isolate *isolate)
EphemeronResult ApplyEphemeronSemantics(Tagged< HeapObject > key, Tagged< HeapObject > value)
void MarkObjectsFromClientHeap(Isolate *client)
void VerifyEphemeronMarking()
std::unique_ptr< WeakObjects::Local > local_weak_objects_
@ kProcessRememberedEphemerons
bool StartCompaction(StartCompactionMode mode)
static bool IsUnmarkedHeapObject(Heap *heap, FullObjectSlot p)
void MarkObjectsFromClientHeaps()
void MarkRootsFromConservativeStack(RootVisitor *root_visitor)
NativeContextStats native_context_stats_
static void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
void FilterNonTrivialWeakReferences()
std::unique_ptr< MainMarkingVisitor > marking_visitor_
MarkCompactCollector(Heap *heap)
NonAtomicMarkingState *const non_atomic_marking_state_
base::EnumSet< CodeFlushMode > code_flush_mode() const
MarkingState *const marking_state_
MarkCompactWeakObjectRetainer(Heap *heap, MarkingState *marking_state)
Tagged< Object > RetainAs(Tagged< Object > object) override
static V8_EXPORT_PRIVATE void PublishAll(Heap *heap)
static void DeactivateAll(Heap *heap)
V8_INLINE bool IsMarked(const Tagged< HeapObject > obj) const
V8_INLINE bool TryMarkAndAccountLiveBytes(Tagged< HeapObject > obj)
V8_INLINE bool IsUnmarked(const Tagged< HeapObject > obj) const
static constexpr std::nullptr_t kNoCppMarkingState
void CreateContextWorklists(const std::vector< Address > &contexts)
void ReleaseContextWorklists()
void ReleaseQueuedPages()
V8_INLINE MutablePageMetadata * Next()
void ClearFlagSlow(Flag flag)
V8_INLINE bool InWritableSharedSpace() const
bool IsEvacuationCandidate() const
@ FORCE_EVACUATION_CANDIDATE_FOR_TESTING
V8_INLINE void SetFlagNonExecutable(Flag flag)
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
V8_INLINE MemoryChunkMetadata * Metadata()
static V8_INLINE MemoryChunk * FromAddress(Address addr)
bool NeverEvacuate() const
size_t Offset(Address addr) const
void SetFlagSlow(Flag flag)
V8_INLINE bool InYoungGeneration() const
bool IsQuarantined() const
bool ShouldSkipEvacuationSlotRecording() const
V8_INLINE void ClearFlagNonExecutable(Flag flag)
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
V8_INLINE bool InReadOnlySpace() const
std::vector< Address > StartProcessing()
void FinishProcessing(const NativeContextStats &stats)
virtual void Move(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size)=0
virtual ~MigrationObserver()=default
MigrationObserver(Heap *heap)
void IncrementLiveBytesAtomically(intptr_t diff)
size_t live_bytes() const
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static const int kPageSize
bool SweepingDone() const
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
int ComputeFreeListsLength()
V8_INLINE bool Infer(PtrComprCageBase cage_base, Tagged< Map > map, Tagged< HeapObject > object, Address *native_context)
V8_INLINE void IncrementSize(Address context, Tagged< Map > map, Tagged< HeapObject > object, size_t size)
virtual void GarbageCollectionEpilogue()=0
PtrComprCageBase code_cage_base() const
PtrComprCageBase cage_base() const
MutablePageMetadata * next()
PageEvacuationJob(Isolate *isolate, MarkCompactCollector *collector, std::vector< std::unique_ptr< Evacuator > > *evacuators, std::vector< std::pair< ParallelWorkItem, MutablePageMetadata * > > evacuation_items)
IndexGenerator generator_
size_t GetMaxConcurrency(size_t worker_count) const override
void ProcessItems(JobDelegate *delegate, Evacuator *evacuator)
std::vector< std::pair< ParallelWorkItem, MutablePageMetadata * > > evacuation_items_
MarkCompactCollector * collector_
void Run(JobDelegate *delegate) override
uint64_t trace_id() const
std::vector< std::unique_ptr< Evacuator > > * evacuators_
void MarkEvacuationCandidate()
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
PagedSpaceForNewSpace * paged_space()
static PagedNewSpace * From(NewSpace *space)
void ClearAllocatorState()
bool ShouldReleaseEmptyPage() const
void ReleasePage(PageMetadata *page) final
std::vector< std::unique_ptr< UpdatingItem > > updating_items_
size_t GetMaxConcurrency(size_t worker_count) const override
MarkCompactCollector * collector_
void Run(JobDelegate *delegate) override
void UpdatePointers(JobDelegate *delegate)
PointersUpdatingJob(Isolate *isolate, MarkCompactCollector *collector, std::vector< std::unique_ptr< UpdatingItem > > updating_items)
uint64_t trace_id() const
IndexGenerator generator_
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) override
static void UpdateStrongMaybeObjectSlotInternal(PtrComprCageBase cage_base, MaybeObjectSlot slot)
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) override
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
IsolateForSandbox isolate_
static void UpdateStrongSlotInternal(PtrComprCageBase cage_base, ObjectSlot slot)
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
PointersUpdatingVisitor(Heap *heap)
static void UpdateRootSlotInternal(PtrComprCageBase cage_base, OffHeapObjectSlot slot)
static void UpdateRootSlotInternal(PtrComprCageBase cage_base, FullObjectSlot slot)
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
void VisitRootPointers(Root root, const char *description, OffHeapObjectSlot start, OffHeapObjectSlot end) override
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) override
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) override
static void UpdateSlotInternal(PtrComprCageBase cage_base, MaybeObjectSlot slot)
MarkCompactCollector *const collector_
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) final
PrecisePagePinningVisitor(MarkCompactCollector *collector)
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
const bool should_pin_in_shared_space_
void HandlePointer(FullObjectSlot p)
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
static void UpdateAllocationSite(Heap *heap, Tagged< Map > map, Tagged< HeapObject > object, int object_size, PretenuringFeedbackMap *pretenuring_feedback)
ProfilingMigrationObserver(Heap *heap)
void Move(AllocationSpace dest, Tagged< HeapObject > src, Tagged< HeapObject > dst, int size) final
void VisitMapPointer(Tagged< HeapObject > host) final
void VisitTrustedPointerTableEntry(Tagged< HeapObject > host, IndirectPointerSlot slot) final
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointer(Tagged< HeapObject > host, ObjectSlot p) final
void VisitPointer(Tagged< HeapObject > host, MaybeObjectSlot p) final
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) override
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
static V8_INLINE constexpr bool UsePrecomputedObjectSize()
void VisitIndirectPointer(Tagged< HeapObject > host, IndirectPointerSlot slot, IndirectPointerMode mode) final
void RecordMigratedSlot(Tagged< HeapObject > host, Tagged< MaybeObject > value, Address slot)
void VisitExternalPointer(Tagged< HeapObject > host, ExternalPointerSlot slot) final
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) final
RecordMigratedSlotVisitor(Heap *heap)
void VisitProtectedPointer(Tagged< TrustedObject > host, ProtectedPointerSlot slot) final
void VisitInternalReference(Tagged< InstructionStream > host, RelocInfo *rinfo) final
void VisitEphemeron(Tagged< HeapObject > host, int index, ObjectSlot key, ObjectSlot value) override
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
void VisitProtectedPointer(Tagged< TrustedObject > host, ProtectedMaybeObjectSlot slot) final
void VisitExternalReference(Tagged< InstructionStream > host, RelocInfo *rinfo) final
static constexpr bool IsCodeTargetMode(Mode mode)
V8_INLINE Address target_address()
static constexpr bool IsEmbeddedObjectMode(Mode mode)
V8_INLINE Tagged< HeapObject > target_object(PtrComprCageBase cage_base)
V8_INLINE Address constant_pool_entry_address()
static void Insert(MutablePageMetadata *page, size_t slot_offset)
static int IterateTyped(MutablePageMetadata *chunk, Callback callback)
static int Iterate(MutablePageMetadata *chunk, Callback callback, SlotSet::EmptyBucketMode mode)
V8_INLINE void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
void VisitRunningCode(FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) final
static constexpr Tagged< Smi > deleted_element()
static constexpr Tagged< Smi > FromInt(int value)
static constexpr Tagged< Smi > zero()
NonAtomicMarkingState *const marking_state_
void DisposeExternalResource(StringForwardingTable::Record *record)
V8_INLINE void IterateElements(Func &&callback)
static constexpr Tagged< Smi > deleted_element()
static constexpr Tagged< Smi > deleted_element()
static bool IsInPlaceInternalizableExcludingExternal(InstanceType instance_type)
bool sweeping_in_progress() const
V8_EXPORT_PRIVATE void StartMajorSweeperTasks()
uint64_t GetTraceIdForFlowEvent(GCTracer::Scope::ScopeId scope_id) const
void SweepEmptyNewSpacePage(PageMetadata *page)
void AddPage(AllocationSpace space, PageMetadata *page)
V8_INLINE constexpr StorageType ptr() const
bool GetHeapObject(Tagged< HeapObject > *result) const
bool ToSmi(Tagged< Smi > *value) const
V8_INLINE constexpr bool is_null() const
constexpr V8_INLINE bool IsHeapObject() const
constexpr V8_INLINE bool IsSmi() const
static WritableJitAllocation RegisterInstructionStreamAllocation(Address addr, size_t size, bool enforce_write_api=false)
bool HasSimpleTransitionTo(Tagged< Map > map)
static Tagged< HeapObject > GetTargetObject(Heap *heap, SlotType slot_type, Address addr)
virtual ~UpdatingItem()=default
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
V8_EXPORT_PRIVATE void Publish()
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
static WritableJitAllocation ForInstructionStream(Tagged< InstructionStream > istream)
V8_INLINE void WriteHeaderSlot(T value)
V8_INLINE void CopyData(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_INLINE WritableJitAllocation LookupAllocationContaining(Address addr)
static void GenerationalForRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > object)
static void SharedForRelocInfo(Tagged< InstructionStream > host, RelocInfo *, Tagged< HeapObject > value)
void SweepSegments(size_t threshold=2 *kEntriesPerSegment)
#define PROFILE(the_isolate, Call)
#define V8_COMPRESS_POINTERS_8GB_BOOL
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
#define HAS_WEAK_HEAP_OBJECT_TAG(value)
constexpr const char * ToString(DataViewOp op)
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
Disallow flags or implications overriding each other abort_on_contradictory_flags true
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
#define TRACE_GC(tracer, scope_id)
#define TRACE_GC_ARG1(tracer, scope_id, arg0_name, arg0_value)
#define TRACE_GC_CATEGORIES
#define TRACE_GC1_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
std::unique_ptr< icu::DateTimePatternGenerator > generator_
ZoneVector< RpoNumber > & result
MarkCompactCollector * collector_
NonAtomicMarkingState * marking_state_
MutablePageMetadata * chunk_
std::vector< std::unique_ptr< ClearingItem > > items_
const bool record_old_to_shared_slots_
void MakeWeak(i::Address *location, void *parameter, WeakCallbackInfo< void >::Callback weak_callback, WeakCallbackType type)
constexpr bool IsPowerOfTwo(T value)
LockGuard< Mutex > MutexGuard
void Add(RWDigits Z, Digits X, Digits Y)
V8_EXPORT_PRIVATE WasmCodePointerTable * GetProcessWideWasmCodePointerTable()
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
static V8_INLINE bool HasWeakHeapObjectTag(const Tagged< Object > value)
IndirectPointerHandle TrustedPointerHandle
constexpr const char * ToString(DeoptimizeKind kind)
constexpr int kTaggedSize
V8_INLINE constexpr PtrComprCageBase GetPtrComprCageBaseFromOnHeapAddress(Address address)
SlotTraits::TObjectSlot ObjectSlot
void PrintF(const char *format,...)
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
kInterpreterTrampolineOffset Tagged< HeapObject >
@ kCompactionSpaceForMarkCompact
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Handle< To > UncheckedCast(Handle< From > value)
constexpr int kTaggedSizeLog2
constexpr uint32_t kZapValue
static void TraceFragmentation(PagedSpace *space)
bool IsCppHeapMarkingFinished(Heap *heap, MarkingWorklists::Local *local_marking_worklists)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalStringResourceTag
@ kExternalStringResourceDataTag
V8_INLINE constexpr bool IsHeapObject(TaggedImpl< kRefType, StorageType > obj)
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< ClearedWeakValue > ClearedTrustedValue()
uint32_t ExternalPointerHandle
static Tagged< String > UpdateReferenceInExternalStringTableEntry(Heap *heap, FullObjectSlot p)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates pages(requires --stress_compaction).") DEFINE_BOOL(cppheap_incremental_marking
@ TRUSTED_TO_SHARED_TRUSTED
static constexpr Address kNullAddress
void PrintIsolate(void *isolate, const char *format,...)
V8_INLINE bool InsideSandbox(uintptr_t address)
constructor_or_back_pointer
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
static constexpr RelaxedLoadTag kRelaxedLoad
static constexpr RelaxedStoreTag kRelaxedStore
static constexpr AcquireLoadTag kAcquireLoad
SourcePositionTable *const table_
WeakObjects weak_objects_
WeakObjects::Local local_weak_objects_
#define DCHECK_CODEOBJECT_SIZE(size)
#define DCHECK_OBJECT_SIZE(size)
#define DCHECK_LE(v1, v2)
#define CHECK_IMPLIES(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr bool IsAligned(T value, U alignment)
Tagged< HeapObject > value
other heap size max size of the shared heap(in Mbytes)
Tagged< HeapObject > heap_object
static V8_INLINE bool TryMarkAndPush(Heap *heap, MarkingWorklists::Local *marking_worklist, MarkingState *marking_state, WorklistTarget target_worklis, Tagged< HeapObject > object)
static V8_INLINE bool IsMarkedOrAlwaysLive(Heap *heap, MarkingStateT *marking_state, Tagged< HeapObject > object)
static V8_INLINE bool IsUnmarkedAndNotAlwaysLive(Heap *heap, MarkingStateT *marking_state, Tagged< HeapObject > object)
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
static V8_INLINE LivenessMode GetLivenessMode(Heap *heap, Tagged< HeapObject > object)
static V8_EXPORT_PRIVATE std::atomic_uint gc_stats
static bool is_gc_stats_enabled()
Tagged< HeapObject > heap_object
#define TRACE_EVENT0(category_group, name)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_SCOPE_THREAD
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define TRACE_STR_COPY(str)
#define V8_LIKELY(condition)
#define V8_UNLIKELY(condition)
std::unique_ptr< ValueMirror > value
std::unique_ptr< ValueMirror > key