v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
scavenger.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <optional>
10#include <unordered_map>
11
13#include "src/common/globals.h"
15#include "src/flags/flags.h"
22#include "src/heap/gc-tracer.h"
23#include "src/heap/heap-inl.h"
27#include "src/heap/heap.h"
35#include "src/heap/new-spaces.h"
40#include "src/heap/slot-set.h"
41#include "src/heap/sweeper.h"
42#include "src/heap/zapping.h"
47#include "src/objects/objects.h"
48#include "src/objects/slots.h"
50#include "src/utils/utils-inl.h"
51
52namespace v8 {
53namespace internal {
54
56 : public HeapVisitor<IterateAndScavengePromotedObjectsVisitor> {
57 public:
59 : HeapVisitor(scavenger->heap()->isolate()), scavenger_(scavenger) {}
60
61 V8_INLINE static constexpr bool ShouldUseUncheckedCast() { return true; }
62
63 V8_INLINE static constexpr bool UsePrecomputedObjectSize() { return true; }
64
66
71
76
77 inline void VisitEphemeron(Tagged<HeapObject> obj, int entry, ObjectSlot key,
78 ObjectSlot value) override {
79 DCHECK(HeapLayout::IsSelfForwarded(obj) || IsEphemeronHashTable(obj));
80 VisitPointer(obj, value);
81
83 // We cannot check the map here, as it might be a large object.
86 } else {
87 VisitPointer(obj, key);
88 }
89 }
90
92 ExternalPointerSlot slot) override {
93#ifdef V8_COMPRESS_POINTERS
94 DCHECK(!slot.tag_range().IsEmpty());
96 // TODO(chromium:337580006): Remove when pointer compression always uses
97 // EPT.
98 if (!slot.HasExternalPointerHandle()) return;
99 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
100 Heap* heap = scavenger_->heap();
101 ExternalPointerTable& table = heap->isolate()->external_pointer_table();
102
103 // For survivor objects, the scavenger marks their EPT entries when they are
104 // copied and then sweeps the young EPT space at the end of collection,
105 // reclaiming unmarked EPT entries.
106 //
107 // However when promoting, we just evacuate the entry from new to old space.
108 // Usually the entry will be unmarked, unless the slot was initialized since
109 // the last GC (external pointer tags have the mark bit set), in which case
110 // it may be marked already. In any case, transfer the color from new to
111 // old EPT space.
112 table.Evacuate(heap->young_external_pointer_space(),
113 heap->old_external_pointer_space(), handle, slot.address(),
114 ExternalPointerTable::EvacuateMarkMode::kTransferMark);
115#endif // V8_COMPRESS_POINTERS
116 }
117
118 // Special cases: Unreachable visitors for objects that are never found in the
119 // young generation and thus cannot be found when iterating promoted objects.
130
131 private:
132 template <typename TSlot>
134 TSlot end) {
135 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
136 // Treat weak references as strong.
137 // TODO(marja): Proper weakness handling in the young generation.
138 for (TSlot slot = start; slot < end; ++slot) {
139 typename TSlot::TObject object = *slot;
140 Tagged<HeapObject> heap_object;
141 if (object.GetHeapObject(&heap_object)) {
142 HandleSlot(host, THeapObjectSlot(slot), heap_object);
143 }
144 }
145 }
146
147 template <typename THeapObjectSlot>
148 V8_INLINE void HandleSlot(Tagged<HeapObject> host, THeapObjectSlot slot,
149 Tagged<HeapObject> target) {
150 static_assert(
151 std::is_same_v<THeapObjectSlot, FullHeapObjectSlot> ||
152 std::is_same_v<THeapObjectSlot, HeapObjectSlot>,
153 "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
155
156 if (Heap::InFromPage(target)) {
157 SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
158 bool success = (*slot).GetHeapObject(&target);
159 USE(success);
160 DCHECK(success);
161
162 if (result == KEEP_SLOT) {
163 SLOW_DCHECK(IsHeapObject(target));
165 MutablePageMetadata* page =
167
168 // Sweeper is stopped during scavenge, so we can directly
169 // insert into its remembered set here.
171 page, chunk->Offset(slot.address()));
172 }
174 }
175
180 page, chunk->Offset(slot.address()));
181 }
182 }
183
185};
186
187namespace {
188
189V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Tagged<Object> object) {
190 return Heap::InFromPage(object) && !Cast<HeapObject>(object)
191 ->map_word(kRelaxedLoad)
192 .IsForwardingAddress();
193}
194
195// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
196V8_INLINE bool IsUnscavengedHeapObject(Heap* heap,
197 Tagged<HeapObject> heap_object) {
198 return Heap::InFromPage(heap_object) &&
199 !heap_object->map_word(kRelaxedLoad).IsForwardingAddress();
200}
201
202bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
203 return IsUnscavengedHeapObject(heap, *p);
204}
205
206} // namespace
207
209 ScavengerCollector* collector,
210 std::vector<std::unique_ptr<Scavenger>>* scavengers,
211 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
212 old_to_new_chunks,
213 const Scavenger::CopiedList& copied_list,
214 const Scavenger::PinnedList& pinned_list,
215 const Scavenger::PromotedList& promoted_list)
216 : collector_(collector),
217 scavengers_(scavengers),
218 old_to_new_chunks_(std::move(old_to_new_chunks)),
219 remaining_memory_chunks_(old_to_new_chunks_.size()),
220 generator_(old_to_new_chunks_.size()),
221 copied_list_(copied_list),
222 pinned_list_(pinned_list),
223 promoted_list_(promoted_list),
224 trace_id_(reinterpret_cast<uint64_t>(this) ^
225 collector_->heap_->tracer()->CurrentEpoch(
227
229 DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
230 // Set the current isolate such that trusted pointer tables etc are
231 // available and the cage base is set correctly for multi-cage mode.
232 SetCurrentIsolateScope isolate_scope(collector_->heap_->isolate());
233
234 collector_->estimate_concurrency_.fetch_add(1, std::memory_order_relaxed);
235
236 Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
237 if (delegate->IsJoiningThread()) {
238 TRACE_GC_WITH_FLOW(collector_->heap_->tracer(),
239 GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL, trace_id_,
241 ProcessItems(delegate, scavenger);
242 } else {
244 collector_->heap_->tracer(),
245 GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
247 ProcessItems(delegate, scavenger);
248 }
249}
250
252 size_t worker_count) const {
253 // We need to account for local segments held by worker_count in addition to
254 // GlobalPoolSize() of copied_list_, pinned_list_ and promoted_list_.
255 size_t wanted_num_workers =
256 std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
257 worker_count + copied_list_.Size() +
258 pinned_list_.Size() + promoted_list_.Size());
259 if (!collector_->heap_->ShouldUseBackgroundThreads() ||
260 collector_->heap_->ShouldOptimizeForBattery()) {
261 return std::min<size_t>(wanted_num_workers, 1);
262 }
263 return std::min<size_t>(scavengers_->size(), wanted_num_workers);
264}
265
267 Scavenger* scavenger) {
268 double scavenging_time = 0.0;
269 {
270 TimedScope scope(&scavenging_time);
271
272 scavenger->VisitPinnedObjects();
273 ConcurrentScavengePages(scavenger);
274 scavenger->Process(delegate);
275 }
276 if (V8_UNLIKELY(v8_flags.trace_parallel_scavenge)) {
277 PrintIsolate(collector_->heap_->isolate(),
278 "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
279 static_cast<void*>(this), scavenging_time,
280 scavenger->bytes_copied(), scavenger->bytes_promoted());
281 }
282}
283
285 Scavenger* scavenger) {
286 while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
287 std::optional<size_t> index = generator_.GetNext();
288 if (!index) {
289 return;
290 }
291 for (size_t i = *index; i < old_to_new_chunks_.size(); ++i) {
292 auto& work_item = old_to_new_chunks_[i];
293 if (!work_item.first.TryAcquire()) {
294 break;
295 }
296 scavenger->ScavengePage(work_item.second);
297 if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
298 1) {
299 return;
300 }
301 }
302 }
303}
304
307
308namespace {
309
310// Helper class for updating weak global handles. There's no additional scavenge
311// processing required here as this phase runs after actual scavenge.
312class GlobalHandlesWeakRootsUpdatingVisitor final : public RootVisitor {
313 public:
314 void VisitRootPointer(Root root, const char* description,
315 FullObjectSlot p) final {
316 UpdatePointer(p);
317 }
318 void VisitRootPointers(Root root, const char* description,
319 FullObjectSlot start, FullObjectSlot end) final {
320 for (FullObjectSlot p = start; p < end; ++p) {
321 UpdatePointer(p);
322 }
323 }
324
325 private:
326 void UpdatePointer(FullObjectSlot p) {
327 Tagged<Object> object = *p;
329 // The object may be in the old generation as global handles over
330 // approximates the list of young nodes. This checks also bails out for
331 // Smis.
332 if (!HeapLayout::InYoungGeneration(object)) {
333 return;
334 }
335
336 Tagged<HeapObject> heap_object = Cast<HeapObject>(object);
337 // TODO(chromium:1336158): Turn the following CHECKs into DCHECKs after
338 // flushing out potential issues.
339 CHECK(Heap::InFromPage(heap_object));
340 MapWord first_word = heap_object->map_word(kRelaxedLoad);
341 CHECK(first_word.IsForwardingAddress());
342 Tagged<HeapObject> dest = first_word.ToForwardingAddress(heap_object);
343 if (heap_object == dest) {
344 DCHECK(Heap::IsLargeObject(heap_object) ||
345 MemoryChunk::FromHeapObject(heap_object)->IsQuarantined());
346 return;
347 }
348 UpdateHeapObjectReferenceSlot(FullHeapObjectSlot(p), dest);
349 // The destination object should be in the "to" space. However, it could
350 // also be a large string if the original object was a shortcut candidate.
352 Heap::InToPage(dest) ||
353 (Heap::IsLargeObject(dest) && Heap::InFromPage(dest) &&
354 dest->map_word(kRelaxedLoad).IsForwardingAddress()));
355 }
356};
357
358} // namespace
359
360// Remove this crashkey after chromium:1010312 is fixed.
362 public:
363 explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
364 isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
365 }
369
370 private:
371 Isolate* isolate_ = nullptr;
372};
373
374namespace {
375
376// A conservative stack scanning visitor implementation that:
377// 1) Filters out non-young objects, and
378// 2) Use the marking bitmap as a temporary object start bitmap.
379class YoungGenerationConservativeStackVisitor
381 YoungGenerationConservativeStackVisitor> {
382 public:
383 YoungGenerationConservativeStackVisitor(Isolate* isolate,
384 RootVisitor* root_visitor)
385 : ConservativeStackVisitorBase(isolate, root_visitor), isolate_(isolate) {
386 DCHECK(v8_flags.scavenger_conservative_object_pinning);
387 DCHECK(!v8_flags.minor_ms);
388 DCHECK(!v8_flags.sticky_mark_bits);
389 DCHECK(std::all_of(
390 isolate_->heap()->semi_space_new_space()->to_space().begin(),
391 isolate_->heap()->semi_space_new_space()->to_space().end(),
392 [](const PageMetadata* page) {
393 return page->marking_bitmap()->IsClean();
394 }));
395 DCHECK(std::all_of(
396 isolate_->heap()->semi_space_new_space()->from_space().begin(),
397 isolate_->heap()->semi_space_new_space()->from_space().end(),
398 [](const PageMetadata* page) {
399 return page->marking_bitmap()->IsClean();
400 }));
401 }
402
403 ~YoungGenerationConservativeStackVisitor() {
404 DCHECK(std::all_of(
405 isolate_->heap()->semi_space_new_space()->to_space().begin(),
406 isolate_->heap()->semi_space_new_space()->to_space().end(),
407 [](const PageMetadata* page) {
408 return page->marking_bitmap()->IsClean();
409 }));
410 for (PageMetadata* page :
411 isolate_->heap()->semi_space_new_space()->from_space()) {
412 page->marking_bitmap()->Clear<AccessMode::NON_ATOMIC>();
413 }
414 }
415
416 private:
417 static constexpr bool kOnlyVisitMainV8Cage [[maybe_unused]] = true;
418
419 static bool FilterPage(const MemoryChunk* chunk) {
420 return chunk->IsFromPage();
421 }
422
423 static bool FilterLargeObject(Tagged<HeapObject> object, MapWord map_word) {
424 DCHECK_EQ(map_word, object->map_word(kRelaxedLoad));
425 return !HeapLayout::IsSelfForwarded(object, map_word);
426 }
427
428 static bool FilterNormalObject(Tagged<HeapObject> object, MapWord map_word,
429 MarkingBitmap* bitmap) {
430 DCHECK_EQ(map_word, object->map_word(kRelaxedLoad));
431 if (map_word.IsForwardingAddress()) {
433 DCHECK(
434 MarkingBitmap::MarkBitFromAddress(bitmap, object->address()).Get());
435 return false;
436 }
437 MarkingBitmap::MarkBitFromAddress(bitmap, object->address())
439 return true;
440 }
441
442 static void HandleObjectFound(Tagged<HeapObject> object, size_t object_size,
443 MarkingBitmap* bitmap) {
444 DCHECK_EQ(object_size, object->Size());
445 Address object_address = object->address();
446 if (object_address + object_size <
447 PageMetadata::FromHeapObject(object)->area_end()) {
448 MarkingBitmap::MarkBitFromAddress(bitmap, object_address + object_size)
450 }
451 }
452
453 Isolate* const isolate_;
454
455 friend class ConservativeStackVisitorBase<
456 YoungGenerationConservativeStackVisitor>;
457};
458
459template <typename ConcreteVisitor>
460class ObjectPinningVisitorBase : public RootVisitor {
461 public:
462 ObjectPinningVisitorBase(const Heap* heap, Scavenger& scavenger,
463 ScavengerCollector::PinnedObjects& pinned_objects)
464 : RootVisitor(),
465 heap_(heap),
466 scavenger_(scavenger),
467 pinned_objects_(pinned_objects) {}
468
469 void VisitRootPointer(Root root, const char* description,
470 FullObjectSlot p) final {
471 DCHECK(root == Root::kStackRoots || root == Root::kHandleScope);
472 static_cast<ConcreteVisitor*>(this)->HandlePointer(p);
473 }
474
475 void VisitRootPointers(Root root, const char* description,
476 FullObjectSlot start, FullObjectSlot end) final {
477 DCHECK(root == Root::kStackRoots || root == Root::kHandleScope);
478 for (FullObjectSlot p = start; p < end; ++p) {
479 static_cast<ConcreteVisitor*>(this)->HandlePointer(p);
480 }
481 }
482
483 protected:
484 void HandleHeapObject(Tagged<HeapObject> object) {
486 DCHECK(!MapWord::IsPacked(object.ptr()));
488 if (IsAllocationMemento(object)) {
489 // Don't pin allocation mementos since they should not survive a GC.
490 return;
491 }
492 if (scavenger_.PromoteIfLargeObject(object)) {
493 // Large objects are not moved and thus don't require pinning. Instead,
494 // we scavenge large pages eagerly to keep them from being reclaimed (if
495 // the page is only reachable from stack).
496 return;
497 }
498 DCHECK(!MemoryChunk::FromHeapObject(object)->IsLargePage());
500 DCHECK(Heap::InFromPage(object));
501 Address object_address = object.address();
502 MapWord map_word = object->map_word(kRelaxedLoad);
503 DCHECK(!map_word.IsForwardingAddress());
504 DCHECK(std::all_of(
505 pinned_objects_.begin(), pinned_objects_.end(),
506 [object_address](ScavengerCollector::PinnedObjectEntry& entry) {
507 return entry.address != object_address;
508 }));
509 int object_size = object->SizeFromMap(map_word.ToMap());
510 DCHECK_LT(0, object_size);
511 pinned_objects_.push_back(
512 {object_address, map_word, static_cast<size_t>(object_size)});
513 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
514 if (!chunk->IsQuarantined()) {
515 chunk->SetFlagNonExecutable(MemoryChunk::IS_QUARANTINED);
516 if (v8_flags.scavenger_promote_quarantined_pages &&
517 heap_->semi_space_new_space()->ShouldPageBePromoted(chunk)) {
518 chunk->SetFlagNonExecutable(MemoryChunk::WILL_BE_PROMOTED);
519 }
520 }
521 scavenger_.PinAndPushObject(chunk, object, map_word);
522 }
523
524 private:
525 const Heap* const heap_;
528};
529
530class ConservativeObjectPinningVisitor final
531 : public ObjectPinningVisitorBase<ConservativeObjectPinningVisitor> {
532 public:
533 ConservativeObjectPinningVisitor(
534 const Heap* heap, Scavenger& scavenger,
535 ScavengerCollector::PinnedObjects& pinned_objects)
536 : ObjectPinningVisitorBase<ConservativeObjectPinningVisitor>(
537 heap, scavenger, pinned_objects) {}
538
539 private:
540 void HandlePointer(FullObjectSlot p) {
541 HandleHeapObject(Cast<HeapObject>(*p));
542 }
543
544 friend class ObjectPinningVisitorBase<ConservativeObjectPinningVisitor>;
545};
546
547class PreciseObjectPinningVisitor final
548 : public ObjectPinningVisitorBase<PreciseObjectPinningVisitor> {
549 public:
550 PreciseObjectPinningVisitor(const Heap* heap, Scavenger& scavenger,
551 ScavengerCollector::PinnedObjects& pinned_objects)
552 : ObjectPinningVisitorBase<PreciseObjectPinningVisitor>(heap, scavenger,
553 pinned_objects) {}
554
555 private:
556 void HandlePointer(FullObjectSlot p) {
557 Tagged<Object> object = *p;
558 if (!object.IsHeapObject()) {
559 return;
560 }
561 Tagged<HeapObject> heap_object = Cast<HeapObject>(object);
562 if (!MemoryChunk::FromHeapObject(heap_object)->IsFromPage()) {
563 return;
564 }
565 if (HeapLayout::IsSelfForwarded(heap_object)) {
566 return;
567 }
568 HandleHeapObject(heap_object);
569 }
570
571 friend class ObjectPinningVisitorBase<PreciseObjectPinningVisitor>;
572};
573
574// A visitor for treating precise references conservatively (by passing them to
575// the conservative stack visitor). This visitor is used for streesing object
576// pinning in Scavenger.
577class TreatConservativelyVisitor final : public RootVisitor {
578 public:
579 TreatConservativelyVisitor(YoungGenerationConservativeStackVisitor* v,
580 Heap* heap)
581 : RootVisitor(),
583 rng_(heap->isolate()->fuzzer_rng()),
585 v8_flags.stress_scavenger_conservative_object_pinning_random
586 ? rng_->NextDouble()
587 : 0) {}
588
589 void VisitRootPointer(Root root, const char* description,
590 FullObjectSlot p) final {
591 HandlePointer(p);
592 }
593
594 void VisitRootPointers(Root root, const char* description,
595 FullObjectSlot start, FullObjectSlot end) final {
596 for (FullObjectSlot p = start; p < end; ++p) {
597 HandlePointer(p);
598 }
599 }
600
601 private:
602 void HandlePointer(FullObjectSlot p) {
603 if (rng_->NextDouble() < stressing_threshold_) {
604 return;
605 }
606 Tagged<Object> object = *p;
607 stack_visitor_->VisitPointer(reinterpret_cast<void*>(object.ptr()));
608 }
609
610 YoungGenerationConservativeStackVisitor* const stack_visitor_;
611 base::RandomNumberGenerator* const rng_;
613};
614
615void RestorePinnedObjects(
616 SemiSpaceNewSpace& new_space,
617 const ScavengerCollector::PinnedObjects& pinned_objects) {
618 // Restore the maps of quarantined objects. We use the iteration over
619 // quarantined objects to split them based on pages. This will be used below
620 // for sweeping the quarantined pages (since there are no markbits).
621 DCHECK_EQ(0, new_space.QuarantinedPageCount());
622 size_t quarantined_objects_size = 0;
623 for (const auto& [object_address, map_word, object_size] : pinned_objects) {
624 DCHECK(!map_word.IsForwardingAddress());
625 Tagged<HeapObject> object = HeapObject::FromAddress(object_address);
627 object->set_map_word(map_word.ToMap(), kRelaxedStore);
629 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
630 DCHECK(chunk->IsQuarantined());
631 if (!chunk->IsFlagSet(MemoryChunk::WILL_BE_PROMOTED)) {
632 quarantined_objects_size += object_size;
633 }
634 }
635 new_space.SetQuarantinedSize(quarantined_objects_size);
636}
637
638void QuarantinePinnedPages(SemiSpaceNewSpace& new_space) {
639 PageMetadata* next_page = new_space.from_space().first_page();
640 while (next_page) {
641 PageMetadata* current_page = next_page;
642 next_page = current_page->next_page();
643 MemoryChunk* chunk = current_page->Chunk();
644 DCHECK(chunk->IsFromPage());
645 if (!chunk->IsQuarantined()) {
646 continue;
647 }
648 if (chunk->IsFlagSet(MemoryChunk::WILL_BE_PROMOTED)) {
649 // free list categories will be relinked by the quarantined page sweeper
650 // after sweeping is done.
651 new_space.PromotePageToOldSpace(current_page,
653 DCHECK(!chunk->InYoungGeneration());
654 } else {
655 new_space.MoveQuarantinedPage(chunk);
656 DCHECK(!chunk->IsFromPage());
657 DCHECK(chunk->IsToPage());
658 }
659 DCHECK(current_page->marking_bitmap()->IsClean());
660 DCHECK(!chunk->IsFromPage());
661 DCHECK(!chunk->IsQuarantined());
662 DCHECK(!chunk->IsFlagSet(MemoryChunk::WILL_BE_PROMOTED));
663 }
664}
665
666} // namespace
667
669 Heap* heap, const PinnedObjects&& pinned_objects)
670 : heap_(heap),
671 trace_id_(reinterpret_cast<uint64_t>(this) ^
672 heap_->tracer()->CurrentEpoch(GCTracer::Scope::SCAVENGER)),
673 should_zap_(heap::ShouldZapGarbage()),
674 pinned_objects_(std::move(pinned_objects)) {
675 DCHECK(!pinned_objects.empty());
676}
677
679 JobDelegate* delegate) {
680#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
681 SetCurrentIsolateScope current_isolate_scope(heap_->isolate());
682#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
684 heap_->tracer(),
685 GCTracer::Scope::SCAVENGER_BACKGROUND_QUARANTINED_PAGE_SWEEPING,
688 DCHECK(!is_done_.load(std::memory_order_relaxed));
689 DCHECK(!pinned_objects_.empty());
690 if (pinned_object_per_page_.empty()) {
691 // Populate the per page map.
692 for (const PinnedObjectEntry& entry : pinned_objects_) {
694 HeapObject::FromAddress(entry.address), heap_->isolate()));
695 MemoryChunk* chunk = MemoryChunk::FromAddress(entry.address);
696 DCHECK(!chunk->IsQuarantined());
697 ObjectsAndSizes& objects_for_page = pinned_object_per_page_[chunk];
698 DCHECK(!std::any_of(objects_for_page.begin(), objects_for_page.end(),
699 [entry](auto& object_and_size) {
700 return object_and_size.first == entry.address;
701 }));
702 objects_for_page.emplace_back(entry.address, entry.size);
703 }
704 // Initialize the iterator.
705 next_page_iterator_ = pinned_object_per_page_.begin();
706 DCHECK_NE(next_page_iterator_, pinned_object_per_page_.end());
707 }
708 // Sweep all quarantined pages.
709 while (next_page_iterator_ != pinned_object_per_page_.end()) {
710 if (delegate->ShouldYield()) {
711 TRACE_GC_NOTE("Quarantined page sweeping preempted");
712 return;
713 }
714 MemoryChunk* chunk = next_page_iterator_->first;
715 PageMetadata* page = static_cast<PageMetadata*>(chunk->Metadata());
716 DCHECK(!chunk->IsFromPage());
717 if (chunk->IsToPage()) {
718 SweepPage(CreateFillerFreeSpaceHandler, chunk, page,
719 next_page_iterator_->second);
720 } else {
721 DCHECK_EQ(chunk->Metadata()->owner()->identity(), OLD_SPACE);
722 base::MutexGuard guard(page->mutex());
723 // If for some reason the page is swept twice, this DCHECK will fail.
724 DCHECK_EQ(page->area_size(), page->allocated_bytes());
725 size_t filler_size_on_page =
726 SweepPage(AddToFreeListFreeSpaceHandler, chunk, page,
727 next_page_iterator_->second);
728 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
729 OldSpace* old_space = static_cast<OldSpace*>(page->owner());
730 old_space->RelinkQuarantinedPageFreeList(page, filler_size_on_page);
731 }
732 next_page_iterator_++;
733 }
734 is_done_.store(true, std::memory_order_relaxed);
735 pinned_object_per_page_.clear();
736 pinned_objects_.clear();
737}
738
739// static
741 CreateFillerFreeSpaceHandler(Heap* heap, Address address, size_t size,
742 bool should_zap) {
743 if (should_zap) {
744 heap::ZapBlock(address, size, heap::ZapValue());
745 }
746 heap->CreateFillerObjectAt(address, static_cast<int>(size));
747}
748
749// static
751 AddToFreeListFreeSpaceHandler(Heap* heap, Address address, size_t size,
752 bool should_zap) {
753 if (should_zap) {
754 heap::ZapBlock(address, size, heap::ZapValue());
755 }
756 DCHECK_EQ(OLD_SPACE, PageMetadata::FromAddress(address)->owner()->identity());
757 DCHECK(PageMetadata::FromAddress(address)->SweepingDone());
758 OldSpace* const old_space = heap->old_space();
759 old_space->FreeDuringSweep(address, size);
760}
761
763 FreeSpaceHandler free_space_handler, MemoryChunk* chunk, PageMetadata* page,
764 ObjectsAndSizes& pinned_objects_on_page) {
765 DCHECK_EQ(page, chunk->Metadata());
766 DCHECK(!pinned_objects_on_page.empty());
767 Address start = page->area_start();
768 std::sort(pinned_objects_on_page.begin(), pinned_objects_on_page.end());
769 size_t filler_size_on_page = 0;
770 for (const auto& [object_adress, object_size] : pinned_objects_on_page) {
771 DCHECK_LE(start, object_adress);
772 if (start != object_adress) {
773 size_t filler_size = object_adress - start;
774 free_space_handler(heap_, start, filler_size, should_zap_);
775 filler_size_on_page += filler_size;
776 }
777 start = object_adress + object_size;
778 }
779 Address end = page->area_end();
780 if (start != end) {
781 size_t filler_size = end - start;
782 free_space_handler(heap_, start, filler_size, should_zap_);
783 filler_size_on_page += filler_size;
784 }
785 return filler_size_on_page;
786}
787
789 const ScavengerCollector::PinnedObjects&& pinned_objects) {
791 DCHECK(!pinned_objects.empty());
792 auto job = std::make_unique<JobTask>(heap_, std::move(pinned_objects));
793 TRACE_GC_NOTE_WITH_FLOW("Quarantined page sweeper started", job->trace_id(),
796 v8::TaskPriority::kUserVisible, std::move(job));
797}
798
803
805 ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
806
809
811 new_space->GarbageCollectionPrologue();
812 new_space->SwapSemiSpaces();
813
814 // We also flip the young generation large object space. All large objects
815 // will be in the from space.
816 heap_->new_lo_space()->Flip();
818
820
821 Scavenger::EmptyChunksList empty_chunks;
822 Scavenger::CopiedList copied_list;
823 Scavenger::PinnedList pinned_list;
824 Scavenger::PromotedList promoted_list;
825 EphemeronRememberedSet::TableList ephemeron_table_list;
826
827 PinnedObjects pinned_objects;
828
829 const int num_scavenge_tasks = NumberOfScavengeTasks();
830 std::vector<std::unique_ptr<Scavenger>> scavengers;
831
832 const bool is_logging = isolate_->log_object_relocation();
833 for (int i = 0; i < num_scavenge_tasks; ++i) {
834 scavengers.emplace_back(
835 new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
836 &pinned_list, &promoted_list, &ephemeron_table_list));
837 }
838 Scavenger& main_thread_scavenger = *scavengers[kMainThreadId].get();
839
840 {
841 // Identify weak unmodified handles. Requires an unmodified graph.
843 GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
845 }
846
847 std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
848 old_to_new_chunks;
849 {
850 // Copy roots.
851 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
852
853 // We must collect old-to-new pages before starting Scavenge because
854 // pages could be removed from the old generation for allocation which
855 // hides them from the iteration.
857 heap_, [&old_to_new_chunks](MutablePageMetadata* chunk) {
858 if (chunk->slot_set<OLD_TO_NEW>() ||
859 chunk->typed_slot_set<OLD_TO_NEW>() ||
861 old_to_new_chunks.emplace_back(ParallelWorkItem{}, chunk);
862 }
863 });
864
865 if (v8_flags.scavenger_conservative_object_pinning &&
866 heap_->IsGCWithStack()) {
867 // Pinning objects must be the first step and must happen before
868 // scavenging any objects. Specifically we must all pin all objects
869 // before visiting other pinned objects. If we scavenge some object X
870 // and move it before all stack-reachable objects are pinned, and we
871 // later find that we need to pin X, it will be too late to undo the
872 // moving.
874 GCTracer::Scope::SCAVENGER_SCAVENGE_PIN_OBJECTS);
875 ConservativeObjectPinningVisitor conservative_pinning_visitor(
876 heap_, main_thread_scavenger, pinned_objects);
877 // Scavenger reuses the page's marking bitmap as a temporary object
878 // start bitmap. Stack scanning will incrementally build the map as it
879 // searches through pages.
880 YoungGenerationConservativeStackVisitor stack_visitor(
881 isolate_, &conservative_pinning_visitor);
882 // Marker was already set by Heap::CollectGarbage.
883 heap_->IterateConservativeStackRoots(&stack_visitor);
884 if (V8_UNLIKELY(v8_flags.stress_scavenger_conservative_object_pinning)) {
885 TreatConservativelyVisitor handles_visitor(&stack_visitor, heap_);
886 heap_->IterateRootsForPrecisePinning(&handles_visitor);
887 }
888 }
889 if (v8_flags.scavenger_precise_object_pinning) {
890 PreciseObjectPinningVisitor precise_pinning_visitor(
891 heap_, main_thread_scavenger, pinned_objects);
892 ClearStaleLeftTrimmedPointerVisitor left_trim_visitor(
893 heap_, &precise_pinning_visitor);
894 heap_->IterateRootsForPrecisePinning(&left_trim_visitor);
895 }
896
897 // Scavenger treats all weak roots except for global handles as strong.
898 // That is why we don't set skip_weak = true here and instead visit
899 // global handles separately.
904 if (v8_flags.scavenger_precise_object_pinning) {
906 }
907 RootScavengeVisitor root_scavenge_visitor(main_thread_scavenger);
908
909 heap_->IterateRoots(&root_scavenge_visitor, options);
911 &root_scavenge_visitor);
912 isolate_->traced_handles()->IterateYoungRoots(&root_scavenge_visitor);
913 }
914 {
915 // Parallel phase scavenging all copied and promoted objects.
917 GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL_PHASE,
918 "UseBackgroundThreads", heap_->ShouldUseBackgroundThreads());
919
920 auto job = std::make_unique<JobTask>(
921 this, &scavengers, std::move(old_to_new_chunks), copied_list,
922 pinned_list, promoted_list);
923 TRACE_GC_NOTE_WITH_FLOW("Parallel scavenge started", job->trace_id(),
927 ->Join();
928 DCHECK(copied_list.IsEmpty());
929 DCHECK(pinned_list.IsEmpty());
930 DCHECK(promoted_list.IsEmpty());
931 }
932
933 {
934 // Scavenge weak global handles.
936 GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
937 GlobalHandlesWeakRootsUpdatingVisitor visitor;
939 &visitor, &IsUnscavengedHeapObjectSlot);
941 &visitor, &IsUnscavengedHeapObjectSlot);
942 }
943
944 {
945 // Finalize parallel scavenging.
946 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
947
949
950 for (auto& scavenger : scavengers) {
951 scavenger->Finalize();
952 }
953 scavengers.clear();
954
955#ifdef V8_COMPRESS_POINTERS
956 // Sweep the external pointer table.
959 heap_->isolate()->external_pointer_table().Sweep(
960 heap_->young_external_pointer_space(), heap_->isolate()->counters());
961#endif // V8_COMPRESS_POINTERS
962
964
967 }
968
969 {
970 // Update references into new space
971 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
973 &Heap::UpdateYoungReferenceInExternalStringTableEntry);
974
975 if (V8_UNLIKELY(v8_flags.always_use_string_forwarding_table)) {
977 }
978 }
979
980 ProcessWeakReferences(&ephemeron_table_list);
981
982 {
984 GCTracer::Scope::SCAVENGER_SCAVENGE_RESTORE_AND_QUARANTINE_PINNED);
985 RestorePinnedObjects(*new_space, pinned_objects);
986 QuarantinePinnedPages(*new_space);
987 }
988
989 // Need to free new space LAB that was allocated during scavenge.
991
992 // Since we promote all surviving large objects immediately, all remaining
993 // large objects must be dead.
994 // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
996 [](Tagged<HeapObject>) { return true; });
997
998 new_space->GarbageCollectionEpilogue();
999
1000 // Start sweeping quarantined pages.
1001 if (!pinned_objects.empty()) {
1002 quarantined_page_sweeper_.StartSweeping(std::move(pinned_objects));
1003 } else {
1004 // Sweeping is not started since there are no pages to sweep. Mark sweeping
1005 // as completed so that the current GC cycle can be stopped since there is
1006 // no sweeper to mark it has completed later.
1008 }
1009
1010 {
1011 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
1012 Scavenger::EmptyChunksList::Local empty_chunks_local(empty_chunks);
1013 MutablePageMetadata* chunk;
1014 while (empty_chunks_local.Pop(&chunk)) {
1017 }
1018
1019#ifdef DEBUG
1021 heap_, [](MutablePageMetadata* chunk) {
1022 if (chunk->slot_set<OLD_TO_NEW>() ||
1023 chunk->typed_slot_set<OLD_TO_NEW>() ||
1024 chunk->slot_set<OLD_TO_NEW_BACKGROUND>()) {
1026 }
1027 });
1028#endif
1029 }
1030
1032
1035
1036 // Update how much has survived scavenge.
1038
1039 {
1040 TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_RESIZE_NEW_SPACE);
1042 }
1043}
1044
1053
1056
1057 for (SurvivingNewLargeObjectMapEntry update_info :
1059 Tagged<HeapObject> object = update_info.first;
1060 Tagged<Map> map = update_info.second;
1061 // Order is important here. We have to re-install the map to have access
1062 // to meta-data like size during page promotion.
1063 object->set_map_word(map, kRelaxedStore);
1064
1067 }
1070}
1071
1073 const SurvivingNewLargeObjectsMap& objects) {
1074 for (SurvivingNewLargeObjectMapEntry object : objects) {
1075 bool success = surviving_new_large_objects_.insert(object).second;
1076 USE(success);
1077 DCHECK(success);
1078 }
1079}
1080
1088
1090 if (!v8_flags.parallel_scavenge) {
1091 return 1;
1092 }
1093 const int num_scavenge_tasks =
1094 static_cast<int>(
1096 MB +
1097 1;
1098 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
1099 int tasks = std::max(
1100 1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
1102 static_cast<size_t>(tasks * PageMetadata::kPageSize))) {
1103 // Optimize for memory usage near the heap limit.
1104 tasks = 1;
1105 }
1106 return tasks;
1107}
1108
1109Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
1110 EmptyChunksList* empty_chunks, CopiedList* copied_list,
1111 PinnedList* pinned_list, PromotedList* promoted_list,
1112 EphemeronRememberedSet::TableList* ephemeron_table_list)
1113 : collector_(collector),
1114 heap_(heap),
1115 local_empty_chunks_(*empty_chunks),
1116 local_copied_list_(*copied_list),
1117 local_pinned_list_(*pinned_list),
1118 local_promoted_list_(*promoted_list),
1119 local_ephemeron_table_list_(*ephemeron_table_list),
1120 local_pretenuring_feedback_(PretenuringHandler::kInitialFeedbackCapacity),
1122 is_logging_(is_logging),
1123 shared_string_table_(v8_flags.shared_string_table &&
1124 heap->isolate()->has_shared_space()),
1125 mark_shared_heap_(heap->isolate()->is_shared_space_isolate()),
1126 shortcut_strings_(
1127 heap->CanShortcutStringsDuringGC(GarbageCollector::SCAVENGER)) {
1128 DCHECK(!heap->incremental_marking()->IsMarking());
1129}
1130
1132 Tagged<Map> map, int size) {
1133 // We are not collecting slots on new space objects during mutation thus we
1134 // have to scan for pointers to evacuation candidates when we promote
1135 // objects. But we should not record any slots in non-black objects. Grey
1136 // object's slots would be rescanned. White object might not survive until
1137 // the end of collection it would be a violation of the invariant to record
1138 // its slots.
1140
1141 // Iterate all outgoing pointers including map word.
1142 visitor.Visit(map, target, size);
1143
1144 if (IsJSArrayBufferMap(map)) {
1145 DCHECK(!MemoryChunk::FromHeapObject(target)->IsLargePage());
1146 GCSafeCast<JSArrayBuffer>(target, heap_)->YoungMarkExtensionPromoted();
1147 }
1148}
1149
1151 int index) {
1152 auto indices = local_ephemeron_remembered_set_.insert(
1153 {table, std::unordered_set<int>()});
1154 indices.first->second.insert(index);
1155}
1156
1158 const bool record_old_to_shared_slots = heap_->isolate()->has_shared_space();
1159
1160 MemoryChunk* chunk = page->Chunk();
1161
1162 if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
1164 page,
1165 [this, chunk, page, record_old_to_shared_slots](MaybeObjectSlot slot) {
1166 SlotCallbackResult result = CheckAndScavengeObject(heap_, slot);
1167 // A new space string might have been promoted into the shared heap
1168 // during GC.
1169 if (result == REMOVE_SLOT && record_old_to_shared_slots) {
1170 CheckOldToNewSlotForSharedUntyped(chunk, page, slot);
1171 }
1172 return result;
1173 },
1175 }
1176
1177 if (chunk->executable()) {
1178 std::vector<std::tuple<Tagged<HeapObject>, SlotType, Address>> slot_updates;
1179
1180 // The code running write access to executable memory poses CFI attack
1181 // surface and needs to be kept to a minimum. So we do the the iteration in
1182 // two rounds. First we iterate the slots and scavenge objects and in the
1183 // second round with write access, we only perform the pointer updates.
1184 const auto typed_slot_count = RememberedSet<OLD_TO_NEW>::IterateTyped(
1185 page, [this, chunk, page, record_old_to_shared_slots, &slot_updates](
1186 SlotType slot_type, Address slot_address) {
1187 Tagged<HeapObject> old_target =
1189 slot_address);
1190 Tagged<HeapObject> new_target = old_target;
1192 SlotCallbackResult result = CheckAndScavengeObject(heap(), slot);
1193 if (result == REMOVE_SLOT && record_old_to_shared_slots) {
1194 CheckOldToNewSlotForSharedTyped(chunk, page, slot_type,
1195 slot_address, *slot);
1196 }
1197 if (new_target != old_target) {
1198 slot_updates.emplace_back(new_target, slot_type, slot_address);
1199 }
1200 return result;
1201 });
1202 // Typed slots only exist in code objects. Since code is never young, it is
1203 // safe to release an empty typed slot set as no other scavenge thread will
1204 // attempt to promote to the page and write to the slot set.
1205 if (typed_slot_count == 0) {
1206 page->ReleaseTypedSlotSet(OLD_TO_NEW);
1207 }
1208
1210 page->area_start(), page->area_size());
1211 for (auto& slot_update : slot_updates) {
1212 Tagged<HeapObject> new_target = std::get<0>(slot_update);
1213 SlotType slot_type = std::get<1>(slot_update);
1214 Address slot_address = std::get<2>(slot_update);
1215
1216 WritableJitAllocation jit_allocation =
1217 jit_page.LookupAllocationContaining(slot_address);
1219 jit_allocation, heap_, slot_type, slot_address,
1221 slot.store(new_target);
1222 return KEEP_SLOT;
1223 });
1224 }
1225 } else {
1226 DCHECK_NULL(page->typed_slot_set<OLD_TO_NEW>());
1227 }
1228
1229 if (page->slot_set<OLD_TO_NEW_BACKGROUND, AccessMode::ATOMIC>() != nullptr) {
1231 page,
1232 [this, chunk, page, record_old_to_shared_slots](MaybeObjectSlot slot) {
1233 SlotCallbackResult result = CheckAndScavengeObject(heap_, slot);
1234 // A new space string might have been promoted into the shared heap
1235 // during GC.
1236 if (result == REMOVE_SLOT && record_old_to_shared_slots) {
1237 CheckOldToNewSlotForSharedUntyped(chunk, page, slot);
1238 }
1239 return result;
1240 },
1242 }
1243}
1244
1246 ScavengeVisitor scavenge_visitor(this);
1247
1248 bool done;
1249 size_t objects = 0;
1250 do {
1251 done = true;
1252 Tagged<HeapObject> object;
1254 local_copied_list_.Pop(&object)) {
1255 scavenge_visitor.Visit(object);
1256 done = false;
1257 if (delegate && ((++objects % kInterruptThreshold) == 0)) {
1259 delegate->NotifyConcurrencyIncrease();
1260 }
1261 }
1262 }
1263
1264 struct PromotedListEntry entry;
1265 while (local_promoted_list_.Pop(&entry)) {
1266 Tagged<HeapObject> target = entry.heap_object;
1267 IterateAndScavengePromotedObject(target, entry.map, entry.size);
1268 done = false;
1269 if (delegate && ((++objects % kInterruptThreshold) == 0)) {
1271 delegate->NotifyConcurrencyIncrease();
1272 }
1273 }
1274 }
1275 } while (!done);
1276}
1277
1279 EphemeronRememberedSet::TableList* ephemeron_table_list) {
1280 ClearYoungEphemerons(ephemeron_table_list);
1282}
1283
1284// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
1285// entry has a dead new-space key.
1287 EphemeronRememberedSet::TableList* ephemeron_table_list) {
1288 ephemeron_table_list->Iterate([this](Tagged<EphemeronHashTable> table) {
1289 for (InternalIndex i : table->IterateEntries()) {
1290 // Keys in EphemeronHashTables must be heap objects.
1291 HeapObjectSlot key_slot(
1292 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
1293 Tagged<HeapObject> key = key_slot.ToHeapObject();
1294 if (IsUnscavengedHeapObject(heap_, key)) {
1295 table->RemoveEntry(i);
1296 } else {
1298 key_slot.StoreHeapObject(forwarded);
1299 }
1300 }
1301 });
1302 ephemeron_table_list->Clear();
1303}
1304
1305// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
1306// entry has a dead new-space key.
1308 auto* table_map = heap_->ephemeron_remembered_set_->tables();
1309 for (auto it = table_map->begin(); it != table_map->end();) {
1310 Tagged<EphemeronHashTable> table = it->first;
1311 auto& indices = it->second;
1312 for (auto iti = indices.begin(); iti != indices.end();) {
1313 // Keys in EphemeronHashTables must be heap objects.
1314 HeapObjectSlot key_slot(table->RawFieldOfElementAt(
1315 EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
1316 Tagged<HeapObject> key = key_slot.ToHeapObject();
1317 if (IsUnscavengedHeapObject(heap_, key)) {
1318 table->RemoveEntry(InternalIndex(*iti));
1319 iti = indices.erase(iti);
1320 } else {
1322 key_slot.StoreHeapObject(forwarded);
1324 iti = indices.erase(iti);
1325 } else {
1326 ++iti;
1327 }
1328 }
1329 }
1330
1331 if (indices.empty()) {
1332 it = table_map->erase(it);
1333 } else {
1334 ++it;
1335 }
1336 }
1337}
1338
1342 for (const auto& it : local_ephemeron_remembered_set_) {
1343 // The ephemeron objects in the remembered set should be either large
1344 // objects, promoted to old space, or pinned objects on quarantined pages
1345 // that will be promoted.
1348 !HeapLayout::InYoungGeneration(it.first) ||
1349 (HeapLayout::IsSelfForwarded(it.first) &&
1354 it.first, std::move(it.second));
1355 }
1362}
1363
1369
1373
1374template <typename TSlot>
1376 MutablePageMetadata* page,
1377 TSlot slot) {
1378 Tagged<MaybeObject> object = *slot;
1379 Tagged<HeapObject> heap_object;
1380
1381 if (object.GetHeapObject(&heap_object) &&
1382 HeapLayout::InWritableSharedSpace(heap_object)) {
1384 page, chunk->Offset(slot.address()));
1385 }
1386}
1387
1389 MemoryChunk* chunk, MutablePageMetadata* page, SlotType slot_type,
1390 Address slot_address, Tagged<MaybeObject> new_target) {
1391 Tagged<HeapObject> heap_object;
1392
1393 if (new_target.GetHeapObject(&heap_object) &&
1394 HeapLayout::InWritableSharedSpace(heap_object)) {
1395 const uintptr_t offset = chunk->Offset(slot_address);
1396 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
1397
1398 base::MutexGuard guard(page->mutex());
1400 static_cast<uint32_t>(offset));
1401 }
1402}
1403
1405 Tagged<Map> map = object->map();
1406 return HandleLargeObject(map, object, object->SizeFromMap(map),
1407 Map::ObjectFieldsFrom(map->visitor_id()));
1408}
1409
1411 MapWord map_word) {
1412 DCHECK(chunk->Metadata()->Contains(object->address()));
1413 DCHECK_EQ(map_word, object->map_word(kRelaxedLoad));
1414 Tagged<Map> map = map_word.ToMap();
1415 int object_size = object->SizeFromMap(map);
1416 PretenuringHandler::UpdateAllocationSite(heap_, map, object, object_size,
1418 object->set_map_word_forwarded(object, kRelaxedStore);
1419 DCHECK(object->map_word(kRelaxedLoad).IsForwardingAddress());
1422 PushPinnedPromotedObject(object, map, object_size);
1423 } else {
1424 PushPinnedObject(object, map, object_size);
1425 }
1426}
1427
1429 int object_size) {
1431 DCHECK(!MemoryChunk::FromHeapObject(object)->IsFlagSet(
1433 DCHECK_EQ(object_size, object->SizeFromMap(map));
1434 local_pinned_list_.Push(ObjectAndMap(object, map));
1435 copied_size_ += object_size;
1436}
1437
1439 Tagged<Map> map, int object_size) {
1441 DCHECK(MemoryChunk::FromHeapObject(object)->IsFlagSet(
1443 DCHECK_EQ(object_size, object->SizeFromMap(map));
1444 local_promoted_list_.Push({object, map, object_size});
1445 promoted_size_ += object_size;
1446}
1447
1449 ScavengeVisitor scavenge_visitor(this);
1450
1451 ObjectAndMap object_and_map;
1452 while (local_pinned_list_.Pop(&object_and_map)) {
1453 DCHECK(HeapLayout::IsSelfForwarded(object_and_map.first));
1454 scavenge_visitor.Visit(object_and_map.second, object_and_map.first);
1455 }
1456}
1457
1458void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
1459 FullObjectSlot p) {
1461 DCHECK(!MapWord::IsPacked((*p).ptr()));
1462 ScavengePointer(p);
1463}
1464
1465void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
1468 // Copy all HeapObject pointers in [start, end)
1469 for (FullObjectSlot p = start; p < end; ++p) {
1470 ScavengePointer(p);
1471 }
1472}
1473
1482
1485
1487
1489 : NewSpaceVisitor<ScavengeVisitor>(scavenger->heap()->isolate()),
1490 scavenger_(scavenger) {}
1491
1492} // namespace internal
1493} // namespace v8
Isolate * isolate_
RegisterAllocator * allocator_
#define SLOW_DCHECK(condition)
Definition checks.h:21
V8_INLINE void Push(EntryType entry)
Definition worklist.h:393
V8_INLINE bool Pop(EntryType *entry)
Definition worklist.h:402
bool IsEmpty() const
Definition worklist.h:126
void Iterate(Callback callback) const
Definition worklist.h:181
virtual void NotifyConcurrencyIncrease()=0
virtual bool ShouldYield()=0
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
virtual int NumberOfWorkerThreads()=0
std::unique_ptr< JobHandle > CreateJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
void RequestSweep(SweepingType sweeping_type, TreatAllYoungAsPromoted treat_all_young_as_promoted)
AllocationSpace identity() const
Definition base-space.h:32
void RecordEphemeronKeyWrites(Tagged< EphemeronHashTable > table, IndicesSet indices)
ExternalPointerTagRange tag_range() const
Definition slots.h:421
Tagged< HeapObject > ToHeapObject() const
Definition slots-inl.h:187
void StoreHeapObject(Tagged< HeapObject > value) const
Definition slots-inl.h:193
void store(Tagged< MaybeObject > value) const
Definition slots-inl.h:137
void SampleConcurrencyEsimate(size_t concurrency)
Definition gc-tracer.cc:680
void NotifyYoungSweepingCompleted()
Definition gc-tracer.cc:571
void NotifyYoungSweepingCompletedAndStopCycleIfFinished()
Definition gc-tracer.cc:589
void IterateYoungStrongAndDependentRoots(RootVisitor *v)
void ProcessWeakYoungObjects(RootVisitor *v, WeakSlotCallbackWithHeap should_reset_handle)
MainAllocator * new_space_allocator()
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static bool IsSelfForwarded(Tagged< HeapObject > object)
static Tagged< HeapObject > FromAddress(Address address)
V8_INLINE size_t Visit(Tagged< HeapObject > object)
NewSpace * new_space() const
Definition heap.h:727
OldLargeObjectSpace * lo_space() const
Definition heap.h:734
NewLargeObjectSpace * new_lo_space() const
Definition heap.h:737
void IncrementNewSpaceSurvivingObjectSize(size_t object_size)
Definition heap.h:1305
std::unique_ptr< EphemeronRememberedSet > ephemeron_remembered_set_
Definition heap.h:2282
void ResizeNewSpace()
Definition heap.cc:3841
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
void IncrementPromotedObjectsSize(size_t object_size)
Definition heap.h:1300
ArrayBufferSweeper * array_buffer_sweeper()
Definition heap.h:823
ConcurrentMarking * concurrent_marking() const
Definition heap.h:1070
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size) const
Definition heap.cc:428
void IncrementYoungSurvivorsCounter(size_t survived)
Definition heap.h:1324
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4657
void UpdateYoungReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition heap.cc:2863
void IterateConservativeStackRoots(RootVisitor *root_visitor, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4836
void IterateRootsForPrecisePinning(RootVisitor *visitor)
Definition heap.cc:4861
EphemeronRememberedSet * ephemeron_remembered_set()
Definition heap.h:362
bool ShouldUseBackgroundThreads() const
Definition heap.cc:456
GCTracer * tracer()
Definition heap.h:800
bool IsGCWithStack() const
Definition heap.cc:526
Isolate * isolate() const
Definition heap-inl.h:61
size_t SurvivedYoungObjectSize()
Definition heap.h:1312
HeapAllocator * allocator()
Definition heap.h:1640
PretenuringHandler * pretenuring_handler()
Definition heap.h:1627
GlobalHandles * global_handles() const
Definition isolate.h:1416
Counters * counters()
Definition isolate.h:1180
TracedHandles * traced_handles()
Definition isolate.h:1418
bool has_shared_space() const
Definition isolate.h:2303
bool log_object_relocation() const
Definition isolate.h:2317
StringForwardingTable * string_forwarding_table() const
Definition isolate.h:785
static V8_INLINE constexpr bool UsePrecomputedObjectSize()
Definition scavenger.cc:63
V8_INLINE void VisitPointersImpl(Tagged< HeapObject > host, TSlot start, TSlot end)
Definition scavenger.cc:133
void VisitInstructionStreamPointer(Tagged< Code >, InstructionStreamSlot) final
Definition scavenger.cc:120
V8_INLINE void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
Definition scavenger.cc:72
V8_INLINE void HandleSlot(Tagged< HeapObject > host, THeapObjectSlot slot, Tagged< HeapObject > target)
Definition scavenger.cc:148
void VisitCodeTarget(Tagged< InstructionStream >, RelocInfo *) final
Definition scavenger.cc:124
static V8_INLINE constexpr bool ShouldUseUncheckedCast()
Definition scavenger.cc:61
V8_INLINE void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) final
Definition scavenger.cc:67
V8_INLINE void VisitMapPointer(Tagged< HeapObject > host) final
Definition scavenger.cc:65
void VisitEmbeddedPointer(Tagged< InstructionStream >, RelocInfo *) final
Definition scavenger.cc:127
void VisitExternalPointer(Tagged< HeapObject > host, ExternalPointerSlot slot) override
Definition scavenger.cc:91
void VisitEphemeron(Tagged< HeapObject > obj, int entry, ObjectSlot key, ObjectSlot value) override
Definition scavenger.cc:77
size_t Size() const override
void set_objects_size(size_t objects_size)
static V8_INLINE LargePageMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE bool IsLabValid() const
V8_EXPORT_PRIVATE void FreeLinearAllocationArea()
static constexpr bool IsPacked(Address)
Definition objects.h:846
Tagged< Map > ToMap() const
static constexpr ObjectFields ObjectFieldsFrom(VisitorId visitor_id)
Definition map.h:919
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
static V8_INLINE MarkBit MarkBitFromAddress(Address address)
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE MemoryChunkMetadata * Metadata()
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
PossiblyEmptyBuckets * possibly_empty_buckets()
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
void FreeDeadObjects(const std::function< bool(Tagged< HeapObject >)> &is_dead)
virtual void VisitPointer(Tagged< HeapObject > host, ObjectSlot p)
Definition visitors.h:157
static void ForAll(Heap *heap, Callback callback)
void PromoteNewLargeObject(LargePageMetadata *page)
void RelinkQuarantinedPageFreeList(PageMetadata *page, size_t filler_size_on_page)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_INLINE PageMetadata * FromAddress(Address addr)
V8_INLINE size_t FreeDuringSweep(Address start, size_t size_in_bytes)
void MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap &local_pretenuring_feedback)
static void UpdateAllocationSite(Heap *heap, Tagged< Map > map, Tagged< HeapObject > object, int object_size, PretenuringFeedbackMap *pretenuring_feedback)
static bool CheckPossiblyEmptyBuckets(MutablePageMetadata *chunk)
static void InsertTyped(MutablePageMetadata *memory_chunk, SlotType slot_type, uint32_t offset)
static void Insert(MutablePageMetadata *page, size_t slot_offset)
static int IterateAndTrackEmptyBuckets(MutablePageMetadata *chunk, Callback callback, ::heap::base::Worklist< MutablePageMetadata *, 64 >::Local *empty_chunks)
static int IterateTyped(MutablePageMetadata *chunk, Callback callback)
RootScavengeVisitor(Scavenger &scavenger)
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) final
void ScavengePointer(FullObjectSlot p)
ScavengeVisitor(Scavenger *scavenger)
size_t GetMaxConcurrency(size_t worker_count) const override
Definition scavenger.cc:251
void ProcessItems(JobDelegate *delegate, Scavenger *scavenger)
Definition scavenger.cc:266
void Run(JobDelegate *delegate) override
Definition scavenger.cc:228
void ConcurrentScavengePages(Scavenger *scavenger)
Definition scavenger.cc:284
JobTask(ScavengerCollector *collector, std::vector< std::unique_ptr< Scavenger > > *scavengers, std::vector< std::pair< ParallelWorkItem, MutablePageMetadata * > > old_to_new_chunks, const Scavenger::CopiedList &copied_list, const Scavenger::PinnedList &pinned_list, const Scavenger::PromotedList &promoted_list)
Definition scavenger.cc:208
size_t SweepPage(FreeSpaceHandler free_space_handler, MemoryChunk *chunk, PageMetadata *page, ObjectsAndSizes &pinned_objects_on_page)
Definition scavenger.cc:762
static void AddToFreeListFreeSpaceHandler(Heap *heap, Address address, size_t size, bool should_zap)
Definition scavenger.cc:751
std::vector< std::pair< Address, size_t > > ObjectsAndSizes
Definition scavenger.h:326
JobTask(Heap *heap, const PinnedObjects &&pinned_objects)
Definition scavenger.cc:668
static void CreateFillerFreeSpaceHandler(Heap *heap, Address address, size_t size, bool should_zap)
Definition scavenger.cc:741
std::function< void(Heap *, Address, size_t, bool)> FreeSpaceHandler
Definition scavenger.h:330
void StartSweeping(const PinnedObjects &&pinned_objects)
Definition scavenger.cc:788
void MergeSurvivingNewLargeObjects(const SurvivingNewLargeObjectsMap &objects)
QuarantinedPageSweeper quarantined_page_sweeper_
Definition scavenger.h:380
void ClearYoungEphemerons(EphemeronRememberedSet::TableList *ephemeron_table_list)
static const int kMaxScavengerTasks
Definition scavenger.h:243
void ProcessWeakReferences(EphemeronRememberedSet::TableList *ephemeron_table_list)
SurvivingNewLargeObjectsMap surviving_new_large_objects_
Definition scavenger.h:378
std::vector< PinnedObjectEntry > PinnedObjects
Definition scavenger.h:241
EvacuationAllocator allocator_
Definition scavenger.h:204
EphemeronRememberedSet::TableList::Local local_ephemeron_table_list_
Definition scavenger.h:198
void PushPinnedObject(Tagged< HeapObject > object, Tagged< Map > map, int object_size)
ScavengerCollector *const collector_
Definition scavenger.h:192
PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback_
Definition scavenger.h:199
void RememberPromotedEphemeron(Tagged< EphemeronHashTable > table, int index)
void AddEphemeronHashTable(Tagged< EphemeronHashTable > table)
size_t bytes_copied() const
Definition scavenger.h:91
EmptyChunksList::Local local_empty_chunks_
Definition scavenger.h:194
std::pair< Tagged< HeapObject >, Tagged< Map > > ObjectAndMap
Definition scavenger.h:51
Scavenger(ScavengerCollector *collector, Heap *heap, bool is_logging, EmptyChunksList *empty_chunks, CopiedList *copied_list, PinnedList *pinned_list, PromotedList *promoted_list, EphemeronRememberedSet::TableList *ephemeron_table_list)
PromotedList::Local local_promoted_list_
Definition scavenger.h:197
void SynchronizePageAccess(Tagged< MaybeObject > object) const
V8_INLINE bool ShouldEagerlyProcessPromotedList() const
static const int kInterruptThreshold
Definition scavenger.h:99
void IterateAndScavengePromotedObject(Tagged< HeapObject > target, Tagged< Map > map, int size)
SlotCallbackResult ScavengeObject(THeapObjectSlot p, Tagged< HeapObject > object)
V8_INLINE bool HandleLargeObject(Tagged< Map > map, Tagged< HeapObject > object, int object_size, ObjectFields object_fields)
bool PromoteIfLargeObject(Tagged< HeapObject > object)
SlotCallbackResult CheckAndScavengeObject(Heap *heap, TSlot slot)
CopiedList::Local local_copied_list_
Definition scavenger.h:195
void Process(JobDelegate *delegate=nullptr)
PinnedList::Local local_pinned_list_
Definition scavenger.h:196
void CheckOldToNewSlotForSharedUntyped(MemoryChunk *chunk, MutablePageMetadata *page, TSlot slot)
size_t bytes_promoted() const
Definition scavenger.h:92
void ScavengePage(MutablePageMetadata *page)
void CheckOldToNewSlotForSharedTyped(MemoryChunk *chunk, MutablePageMetadata *page, SlotType slot_type, Address slot_address, Tagged< MaybeObject > new_target)
SurvivingNewLargeObjectsMap local_surviving_new_large_objects_
Definition scavenger.h:201
EphemeronRememberedSet::TableMap local_ephemeron_remembered_set_
Definition scavenger.h:200
void PushPinnedPromotedObject(Tagged< HeapObject > object, Tagged< Map > map, int object_size)
void PinAndPushObject(MemoryChunk *chunk, Tagged< HeapObject > object, MapWord map_word)
ScopedFullHeapCrashKey(Isolate *isolate)
Definition scavenger.cc:363
size_t TotalCapacity() const final
Definition new-spaces.h:301
static SemiSpaceNewSpace * From(NewSpace *space)
Definition new-spaces.h:259
Address address() const
Definition slots.h:78
static WritableJitPage LookupWritableJitPage(Address addr, size_t size)
void ProcessWeakYoungObjects(RootVisitor *v, WeakSlotCallbackWithHeap should_reset_handle)
void IterateYoungRoots(RootVisitor *)
static const int kMaxOffset
Definition slot-set.h:275
static SlotCallbackResult UpdateTypedSlot(WritableJitAllocation &jit_allocation, Heap *heap, SlotType slot_type, Address addr, Callback callback)
static Tagged< HeapObject > GetTargetObject(Heap *heap, SlotType slot_type, Address addr)
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
V8_INLINE WritableJitAllocation LookupAllocationContaining(Address addr)
int start
int end
DirectHandle< Object > new_target
Definition execution.cc:75
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
Definition gc-tracer.h:98
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
Definition gc-tracer.h:52
#define TRACE_GC(tracer, scope_id)
Definition gc-tracer.h:35
#define TRACE_GC_NOTE(note)
Definition gc-tracer.h:93
#define TRACE_GC_ARG1(tracer, scope_id, arg0_name, arg0_value)
Definition gc-tracer.h:43
int32_t offset
std::map< const std::string, const std::string > map
std::unique_ptr< icu::DateTimePatternGenerator > generator_
DirectHandle< JSReceiver > options
bool forwarded
ZoneVector< RpoNumber > & result
MarkCompactCollector * collector_
STL namespace.
void ZapBlock(Address start, size_t size, uintptr_t zap_value)
Definition zapping.cc:26
uintptr_t ZapValue()
Definition zapping.h:30
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
static V8_INLINE bool HasWeakHeapObjectTag(const Tagged< Object > value)
Definition objects.h:653
std::pair< Tagged< HeapObject >, Tagged< Map > > SurvivingNewLargeObjectMapEntry
Definition scavenger.h:37
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
@ kDoNotLinkCategory
Definition free-list.h:42
kInterpreterTrampolineOffset Tagged< HeapObject >
Tagged< T > GCSafeCast(Tagged< Object > object, const Heap *heap)
Definition casting.h:142
Handle< To > UncheckedCast(Handle< From > value)
Definition handles-inl.h:55
std::unordered_map< Tagged< HeapObject >, Tagged< Map >, Object::Hasher > SurvivingNewLargeObjectsMap
Definition scavenger.h:35
Tagged< T > ForwardingAddress(Tagged< T > heap_obj)
Definition heap-inl.h:48
V8_INLINE constexpr bool IsHeapObject(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:669
V8_EXPORT_PRIVATE FlagValues v8_flags
uint32_t ExternalPointerHandle
void UpdateHeapObjectReferenceSlot(THeapObjectSlot slot, Tagged< HeapObject > value)
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in Scavenger
Definition flags.cc:502
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
base::RandomNumberGenerator *const rng_
Definition scavenger.cc:611
Scavenger & scavenger_
Definition scavenger.cc:526
YoungGenerationConservativeStackVisitor *const stack_visitor_
Definition scavenger.cc:610
ScavengerCollector::PinnedObjects & pinned_objects_
Definition scavenger.cc:527
double stressing_threshold_
Definition scavenger.cc:612
static constexpr bool kOnlyVisitMainV8Cage
Definition scavenger.cc:417
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr bool IsEmpty() const
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
const uint64_t trace_id_
Heap * heap_
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693