61 if (page ==
nullptr)
return true;
89 if (page ==
nullptr)
return true;
132 RunImpl(delegate, delegate->IsJoiningThread());
136 static constexpr int kPagePerTask = 2;
137 return std::min<size_t>(
194 RunImpl(delegate, delegate->IsJoiningThread());
198 static constexpr int kPagePerTask = 2;
199 return std::min<size_t>(
230template <Sweeper::SweepingScope scope>
232 : sweeper_(sweeper) {}
234template <Sweeper::SweepingScope scope>
237 DCHECK(concurrent_sweepers_.empty());
241template <Sweeper::SweepingScope scope>
243 return job_handle_ && job_handle_->IsValid();
246template <Sweeper::SweepingScope scope>
248 return HasValidJob() && job_handle_->IsActive();
251template <Sweeper::SweepingScope scope>
253 if (HasValidJob()) job_handle_->Cancel();
256template <Sweeper::SweepingScope scope>
260 DCHECK(concurrent_sweepers_.empty());
263 !sweeper_->heap_->ShouldReduceMemory());
265 sweeper_->heap_->ShouldReduceMemory();
267 (
reinterpret_cast<uint64_t
>(sweeper_) ^
268 sweeper_->heap_->tracer()->CurrentEpoch(
270 : GCTracer::Scope::MINOR_MS_SWEEP))
275template <Sweeper::SweepingScope scope>
279 DCHECK(concurrent_sweepers_.empty());
285template <Sweeper::SweepingScope scope>
290 !sweeper_->heap_->delay_sweeper_tasks_for_testing_) {
292 std::make_unique<SweeperJob>(sweeper_->heap_->isolate(), sweeper_);
295 ? GCTracer::Scope::MINOR_MS_SWEEP_START_JOBS
296 : GCTracer::Scope::MC_SWEEP_START_JOBS;
300 int max_concurrent_sweeper_count =
301 std::min(SweeperJob::kMaxTasks,
303 if (concurrent_sweepers_.empty()) {
304 for (
int i = 0;
i < max_concurrent_sweeper_count; ++
i) {
305 concurrent_sweepers_.emplace_back(sweeper_);
308 DCHECK_EQ(max_concurrent_sweeper_count, concurrent_sweepers_.size());
314template <Sweeper::SweepingScope scope>
317 if (HasValidJob()) job_handle_->Join();
320template <Sweeper::SweepingScope scope>
326 concurrent_sweepers_.clear();
327 in_progress_ =
false;
330template <Sweeper::SweepingScope scope>
332 if (!job_handle_ || !job_handle_->IsValid())
return;
335 job_handle_->Cancel();
339template <Sweeper::SweepingScope scope>
344 std::make_unique<SweeperJob>(sweeper_->heap_->isolate(), sweeper_));
349 uint32_t max_pages) {
350 uint32_t pages_swept = 0;
351 bool found_usable_pages =
false;
353 while ((page = sweeper_->GetSweepingPageSafe(identity)) !=
nullptr) {
354 ParallelSweepPage(page, identity, sweeping_mode);
356 found_usable_pages =
true;
363 sweeper_->sweeping_list_[space_index];
364 DCHECK(std::all_of(sweeping_list.begin(), sweeping_list.end(),
366 return p->Chunk()->IsFlagSet(
367 MemoryChunk::NEVER_ALLOCATE_ON_PAGE);
371 if (++pages_swept >= max_pages)
break;
373 return found_usable_pages;
381 DCHECK(!page->SweepingDone());
385 DCHECK(!page->SweepingDone());
387 page->concurrent_sweeping_state());
388 page->set_concurrent_sweeping_state(
394 !sweeper_->minor_sweeping_state_.should_reduce_memory());
396 page, free_space_treatment_mode, sweeping_mode,
399 : sweeper_->major_sweeping_state_.should_reduce_memory());
400 sweeper_->AddSweptPage(page, identity);
401 DCHECK(page->SweepingDone());
407 return ContributeAndWaitForPromotedPagesIterationImpl(
412 return ContributeAndWaitForPromotedPagesIterationImpl([]() {
return false; });
417 return ParallelIteratePromotedPagesImpl(
422 return ParallelIteratePromotedPagesImpl([]() {
return false; });
426class PromotedPageRecordMigratedSlotVisitor final
431 host_page->
heap()->isolate()),
435 host_page->
heap()->ephemeron_remembered_set()) {
442 if (Map::ObjectFieldsFrom(map->visitor_id()) == ObjectFields::kDataOnly) {
453 V8_INLINE static constexpr bool EnableConcurrentVisitation() {
return true; }
455 V8_INLINE void VisitMapPointer(Tagged<HeapObject> host)
final {
457 VisitObjectImpl(host, host->map(cage_base()), host->map_slot().address());
460 V8_INLINE void VisitPointer(Tagged<HeapObject> host, ObjectSlot p)
final {
461 VisitPointersImpl(host, p, p + 1);
463 V8_INLINE void VisitPointer(Tagged<HeapObject> host,
464 MaybeObjectSlot p)
final {
465 VisitPointersImpl(host, p, p + 1);
467 V8_INLINE void VisitPointers(Tagged<HeapObject> host, ObjectSlot
start,
468 ObjectSlot
end)
final {
469 VisitPointersImpl(host,
start,
end);
471 V8_INLINE void VisitPointers(Tagged<HeapObject> host, MaybeObjectSlot
start,
472 MaybeObjectSlot
end)
final {
473 VisitPointersImpl(host,
start,
end);
476 V8_INLINE size_t VisitJSArrayBuffer(Tagged<Map> map,
477 Tagged<JSArrayBuffer>
object,
478 MaybeObjectSize maybe_object_size) {
479 object->YoungMarkExtensionPromoted();
480 return NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor>::
481 VisitJSArrayBuffer(map,
object, maybe_object_size);
484 V8_INLINE size_t VisitEphemeronHashTable(Tagged<Map> map,
485 Tagged<EphemeronHashTable> table,
487 NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor>::
488 VisitMapPointerIfNeeded<VisitorId::kVisitEphemeronHashTable>(table);
489 EphemeronRememberedSet::IndicesSet indices;
490 for (InternalIndex
i : table->IterateEntries()) {
492 table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(
i));
493 VisitPointer(table, value_slot);
495 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(
i));
497 Tagged<HeapObject> key_object;
498 if (!
key.GetHeapObject(&key_object))
continue;
499#ifdef THREAD_SANITIZER
500 MemoryChunk::FromHeapObject(key_object)->SynchronizedLoad();
505 HeapLayout::InYoungGeneration(key_object)) {
506 indices.insert(
i.as_int());
509 if (!indices.empty()) {
513 return EphemeronHashTable::BodyDescriptor::SizeOf(map, table);
517 void VisitExternalReference(Tagged<InstructionStream> host,
518 RelocInfo* rinfo)
final {}
519 void VisitInternalReference(Tagged<InstructionStream> host,
520 RelocInfo* rinfo)
final {}
521 void VisitExternalPointer(Tagged<HeapObject> host,
522 ExternalPointerSlot slot)
final {}
525 V8_INLINE static constexpr bool ShouldVisitMapPointer() {
return true; }
526 V8_INLINE static constexpr bool ShouldVisitReadOnlyMapPointer() {
531 V8_INLINE void VerifyHost(Tagged<HeapObject> host) {
532 DCHECK(!HeapLayout::InWritableSharedSpace(host));
533 DCHECK(!HeapLayout::InYoungGeneration(host));
534 DCHECK(!MutablePageMetadata::FromHeapObject(host)->SweepingDone());
538 template <
typename TObject>
539 V8_INLINE void VisitObjectImpl(Tagged<HeapObject> host, TObject
object,
541 Tagged<HeapObject> value_heap_object;
542 if (!
object.GetHeapObject(&value_heap_object))
return;
544 MemoryChunk* value_chunk = MemoryChunk::FromHeapObject(value_heap_object);
545#ifdef THREAD_SANITIZER
546 value_chunk->SynchronizedLoad();
550 if (!
v8_flags.sticky_mark_bits && value_chunk->InYoungGeneration()) {
551 RememberedSet<OLD_TO_NEW_BACKGROUND>::Insert<AccessMode::ATOMIC>(
553 }
else if (value_chunk->InWritableSharedSpace()) {
554 RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(
559 template <
typename TSlot>
560 V8_INLINE void VisitPointersImpl(Tagged<HeapObject> host, TSlot
start,
563 for (TSlot slot =
start; slot <
end; ++slot) {
564 typename TSlot::TObject target =
565 slot.Relaxed_Load(ObjectVisitorWithCageBases::cage_base());
566 VisitObjectImpl(host, target, slot.address());
581 const size_t size_in_tagged = size_in_bytes /
kTaggedSize;
583 for (
size_t i = 0;
i < size_in_tagged; ++
i) {
585 ->store(kZapTagged, std::memory_order_relaxed);
590 if (dead_end != dead_start) {
591 size_t free_size =
static_cast<size_t>(dead_end - dead_start);
592 AtomicZapBlock(dead_start, free_size);
593 WritableFreeSpace free_space =
595 heap->CreateFillerObjectAtBackground(free_space);
599void ZapDeadObjectsOnPage(Heap*
heap, PageMetadata* p) {
608 Address dead_start = p->area_start();
610 for (
auto [
object, size] : LiveObjectRange(p)) {
611 Address dead_end =
object.address();
612 ZapDeadObjectsInRange(
heap, dead_start, dead_end);
613 dead_start = dead_end +
size;
615 ZapDeadObjectsInRange(
heap, dead_start, p->area_end());
627 DCHECK(!page->SweepingDone());
629 page->concurrent_sweeping_state());
630 page->set_concurrent_sweeping_state(
632 PromotedPageRecordMigratedSlotVisitor record_visitor(page);
633 const bool is_large_page = page->Chunk()->IsLargePage();
640 DCHECK(!page->Chunk()->IsEvacuationCandidate());
641 for (
auto [
object,
_] :
643 record_visitor.Process(
object);
645 ZapDeadObjectsOnPage(sweeper_->heap_,
static_cast<PageMetadata*
>(page));
647 page->ClearLiveness();
648 sweeper_->NotifyPromotedPageIterationFinished(page);
649 DCHECK(page->SweepingDone());
713 ComparePagesForSweepingOrder);
717bool ShouldUpdateRememberedSets(
Heap*
heap) {
724 if (
heap->new_space()->Size() > 0) {
730 if (
heap->isolate()->has_shared_space()) {
748void ClearPromotedPages(
Heap*
heap, std::vector<MutablePageMetadata*> pages) {
750 for (
auto* page : pages) {
751 DCHECK(!page->SweepingDone());
753 page->concurrent_sweeping_state());
754 if (!page->Chunk()->IsLargePage()) {
757 page->ClearLiveness();
758 page->set_concurrent_sweeping_state(
769 std::vector<MutablePageMetadata*> promoted_pages_for_clearing;
771 if (ShouldUpdateRememberedSets(
heap_)) {
773 std::memory_order_release);
775 promoted_pages_for_clearing.swap(
782 ClearPromotedPages(
heap_, promoted_pages_for_clearing);
795 false, std::memory_order_release);
805 false, std::memory_order_release);
838 heap_->
tracer(), GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING,
850 heap_->
tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
914 bool should_reduce_memory) {
916 size_t freed_bytes = 0;
917 size_t size =
static_cast<size_t>(free_end - free_start);
920 AtomicZapBlock(free_start, size);
924 if (should_reduce_memory) {
946 if (discard_start < discard_end) {
955 if (size < FreeSpace::kSize) {
959 const Address unused_start = addr + FreeSpace::kSize;
960 DCHECK(page->ContainsLimit(unused_start));
962 DCHECK(page->ContainsLimit(unused_end));
964 std::optional<RwxMemoryWriteScope> scope;
965 if (page->Chunk()->executable()) {
966 scope.emplace(
"For zeroing unused memory.");
968 const std::optional<base::AddressRegion> discard_area =
971#if !defined(V8_OS_WIN)
972 constexpr bool kDiscardEmptyPages =
true;
978 constexpr bool kDiscardEmptyPages =
false;
981 if (kDiscardEmptyPages && discard_area) {
987 reinterpret_cast<void*
>(discard_area->begin()),
988 discard_area->size()));
994 memset(
reinterpret_cast<void*
>(unused_start), 0,
995 discard_area->begin() - unused_start);
996 memset(
reinterpret_cast<void*
>(discard_area->end()), 0,
997 unused_end - discard_area->end());
999 }
else if (
v8_flags.zero_unused_memory) {
1002 memset(
reinterpret_cast<void*
>(unused_start), 0, unused_end - unused_start);
1040 if (record_free_ranges) {
1042 free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
1043 static_cast<uint32_t
>(chunk->
Offset(free_start)),
1044 static_cast<uint32_t
>(chunk->
Offset(free_end))));
1055 page->ClearTypedSlotsInFreeMemory<
OLD_TO_NEW>(free_ranges_map);
1060 page->AssertNoTypedSlotsInFreeMemory<
OLD_TO_OLD>(free_ranges_map);
1061 page->ClearTypedSlotsInFreeMemory<
OLD_TO_SHARED>(free_ranges_map);
1069 page->AssertNoTypedSlotsInFreeMemory<
OLD_TO_NEW>(free_ranges_map);
1071 page->ClearTypedSlotsInFreeMemory<
OLD_TO_SHARED>(free_ranges_map);
1075 size_t live_bytes) {
1082 DCHECK_EQ(live_bytes, page->allocated_bytes());
1087 SweepingMode sweeping_mode,
bool should_reduce_memory) {
1106 std::optional<ActiveSystemPages> active_system_pages_after_sweeping;
1107 if (should_reduce_memory) {
1110 active_system_pages_after_sweeping->Init(
1119 size_t live_bytes = 0;
1136 Address free_end =
object.address();
1137 if (free_end != free_start) {
1139 free_space_treatment_mode,
1140 should_reduce_memory);
1142 free_start, free_end, p, record_free_ranges, &free_ranges_map,
1146 free_start = free_end +
size;
1148 if (active_system_pages_after_sweeping) {
1150 active_system_pages_after_sweeping->Add(
1158 if (free_end != free_start) {
1160 free_space_treatment_mode, should_reduce_memory);
1163 &free_ranges_map, sweeping_mode);
1171 if (active_system_pages_after_sweeping) {
1175 *active_system_pages_after_sweeping);
1225 uint32_t max_pages) {
1234 auto concurrent_sweeping_state = page->concurrent_sweeping_state();
1236 concurrent_sweeping_state ==
1238 if (concurrent_sweeping_state ==
1240 DCHECK(page->SweepingDone());
1252 if ((concurrent_sweeping_state ==
1259 }
else if ((concurrent_sweeping_state ==
1270 CHECK(page->SweepingDone());
1278 while (!page->SweepingDone()) {
1290 std::find(sweeping_list.begin(), sweeping_list.end(), page);
1291 if (
position == sweeping_list.end())
return false;
1293 if (sweeping_list.empty()) {
1295 false, std::memory_order_release);
1319 size_t live_bytes = page->live_bytes();
1323 page->IncrementAgeInNewSpace();
1328 DCHECK(page->SweepingDone());
1337 page->concurrent_sweeping_state());
1340 true, std::memory_order_release);
1372 DCHECK_GE(page->area_size(),
static_cast<size_t>(page->live_bytes()));
1374 page->concurrent_sweeping_state());
1383 VerifyPreparedPage(page);
1384 page->set_concurrent_sweeping_state(
1399 page->ResetAllocationStatistics();
1405 VerifyPreparedPage(page);
1406 page->set_concurrent_sweeping_state(
1412 page->ResetAllocationStatisticsForPromotedPage();
1415 space->free_list()->increase_wasted_bytes(page->wasted_memory());
1424 if (!sweeping_list.empty()) {
1425 page = sweeping_list.back();
1426 sweeping_list.pop_back();
1428 if (sweeping_list.empty()) {
1430 false, std::memory_order_release);
1446 bool is_joining_thread) {
1448 return is_joining_thread ? GCTracer::Scope::MINOR_MS_SWEEP
1449 : GCTracer::Scope::MINOR_MS_BACKGROUND_SWEEPING;
1451 return is_joining_thread ? GCTracer::Scope::MC_SWEEP
1452 : GCTracer::Scope::MC_BACKGROUND_SWEEPING;
1457 std::memory_order_acquire);
1462 page->set_concurrent_sweeping_state(
1466 true, std::memory_order_release);
1473 std::memory_order_acquire);
1480 DCHECK(page->marking_bitmap()->IsClean());
1484 page->concurrent_sweeping_state());
1494 size_t size = page->area_size();
1500 for (
size_t i = 0;
i < size_in_tagged; ++
i) {
1502 ->store(kZapTagged, std::memory_order_relaxed);
1506 page->ResetAllocationStatistics();
1507 page->ResetAgeInNewSpace();
1518 active_system_pages_after_sweeping.
Init(
1523 active_system_pages_after_sweeping);
1528 : sweeper_(sweeper),
1529 resume_on_exit_(sweeper->AreMajorSweeperTasksRunning()) {
1536 if (resume_on_exit_) {
1537 sweeper_->major_sweeping_state_.Resume();
1549bool Sweeper::HasUnsweptPagesForMajorSweeping()
const {
1552 bool has_unswept_pages =
false;
1558 has_unswept_pages =
true;
1560 return has_unswept_pages;
V8_EXPORT_PRIVATE size_t Init(size_t header_size, size_t page_size_bits, size_t user_page_size)
virtual bool ShouldYield()=0
virtual uint8_t GetTaskId()=0
virtual bool DiscardSystemPages(void *address, size_t size)
bool is_linked(FreeList *owner) const
Tagged< Object > Acquire_Load() const
static constexpr bool NeedsYoungEpoch(ScopeId id)
GarbageCollector GetCurrentCollector() const
bool IsMainThread() const
NewSpace * new_space() const
void IncrementNewSpaceSurvivingObjectSize(size_t object_size)
IncrementalMarking * incremental_marking() const
void IncrementPromotedObjectsSize(size_t object_size)
StickySpace * sticky_space() const
void IncrementYoungSurvivorsCounter(size_t survived)
MemoryAllocator * memory_allocator()
PagedSpace * paged_space(int idx) const
PagedNewSpace * paged_new_space() const
Isolate * isolate() const
bool ShouldReduceMemory() const
bool IsMinorMarking() const
bool IsMajorMarking() const
static LargePageMetadata * cast(MutablePageMetadata *metadata)
static V8_INLINE constexpr MarkBitIndex AddressToIndex(Address address)
V8_INLINE bool IsMarked(const Tagged< HeapObject > obj) const
static V8_INLINE intptr_t GetCommitPageSizeBits()
v8::PageAllocator * page_allocator(AllocationSpace space)
static V8_INLINE intptr_t GetCommitPageSize()
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
V8_EXPORT_PRIVATE void ReleasePooledChunksImmediately()
bool IsEvacuationCandidate() const
V8_INLINE bool IsFlagSet(Flag flag) const
size_t Offset(Address addr) const
size_t live_bytes() const
TypedSlotSet * typed_slot_set()
ConcurrentSweepingState concurrent_sweeping_state()
static const int kPageSize
void set_concurrent_sweeping_state(ConcurrentSweepingState state)
AllocationSpace owner_identity() const
bool SweepingDone() const
void ReleaseSlotSet(RememberedSetType type)
PagedSpaceForNewSpace * paged_space()
static PagedNewSpace * From(NewSpace *space)
size_t RelinkFreeListCategories(PageMetadata *page)
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
V8_INLINE size_t FreeDuringSweep(Address start, size_t size_in_bytes)
void ReduceActiveSystemPages(PageMetadata *page, ActiveSystemPages active_system_pages)
static void RemoveRange(MutablePageMetadata *chunk, Address start, Address end, SlotSet::EmptyBucketMode mode)
LocalSweeper local_sweeper_
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate *delegate)
ConcurrentMajorSweeper(Sweeper *sweeper)
bool ConcurrentSweepSpace(JobDelegate *delegate)
LocalSweeper local_sweeper_
bool ConcurrentSweepPromotedPages(JobDelegate *delegate)
ConcurrentMinorSweeper(Sweeper *sweeper)
bool ParallelIteratePromotedPages(JobDelegate *delegate)
void ParallelSweepPage(PageMetadata *page, AllocationSpace identity, SweepingMode sweeping_mode)
bool ParallelIteratePromotedPages()
bool ContributeAndWaitForPromotedPagesIteration()
bool ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode, uint32_t max_pages=std::numeric_limits< uint32_t >::max())
bool ContributeAndWaitForPromotedPagesIteration(JobDelegate *delegate)
void ParallelIteratePromotedPage(MutablePageMetadata *page)
static constexpr int kMaxTasks
static constexpr int kNumberOfMajorSweepingSpaces
void RunImpl(JobDelegate *delegate, bool is_joining_thread)
size_t GetMaxConcurrency(size_t worker_count) const override
std::vector< ConcurrentMajorSweeper > & concurrent_sweepers
~MajorSweeperJob() override=default
void Run(JobDelegate *delegate) final
MajorSweeperJob(const MajorSweeperJob &)=delete
MajorSweeperJob & operator=(const MajorSweeperJob &)=delete
MajorSweeperJob(Isolate *isolate, Sweeper *sweeper)
MinorSweeperJob(Isolate *isolate, Sweeper *sweeper)
MinorSweeperJob & operator=(const MinorSweeperJob &)=delete
std::vector< ConcurrentMinorSweeper > & concurrent_sweepers
void RunImpl(JobDelegate *delegate, bool is_joining_thread)
void Run(JobDelegate *delegate) final
~MinorSweeperJob() override=default
static constexpr int kMaxTasks
MinorSweeperJob(const MinorSweeperJob &)=delete
size_t GetMaxConcurrency(size_t worker_count) const override
~PauseMajorSweepingScope()
const bool resume_on_exit_
PauseMajorSweepingScope(Sweeper *sweeper)
SweepingState(Sweeper *sweeper)
void InitializeSweeping()
bool should_reduce_memory() const
uint64_t trace_id() const
bool HasActiveJob() const
void StopConcurrentSweeping()
void StartConcurrentSweeping()
bool sweeping_in_progress() const
void EnsureMajorCompleted()
void InitializeMinorSweeping()
static int GetSweepSpaceIndex(AllocationSpace space)
bool AreMajorSweeperTasksRunning() const
SweptList GetAllSweptPagesSafe(PagedSpaceBase *space)
V8_EXPORT_PRIVATE void StartMajorSweeperTasks()
static constexpr int kNumberOfSweepingSpaces
PageMetadata * GetSweptPageSafe(PagedSpaceBase *space)
bool ShouldRefillFreelistForSpace(AllocationSpace space) const
bool TryRemovePromotedPageSafe(MutablePageMetadata *chunk)
base::ConditionVariable cv_page_swept_
static V8_EXPORT_PRIVATE std::optional< base::AddressRegion > ComputeDiscardMemoryArea(Address start, Address end)
static bool IsValidSweepingSpace(AllocationSpace space)
MutablePageMetadata * GetPromotedPageSafe()
void PrepareToBeSweptPage(AllocationSpace space, PageMetadata *page)
void CleanupRememberedSetEntriesForFreedMemory(Address free_start, Address free_end, PageMetadata *page, bool record_free_ranges, TypedSlotSet::FreeRangesMap *free_ranges_map, SweepingMode sweeping_mode)
void ContributeAndWaitForPromotedPagesIteration()
void AddNewSpacePage(PageMetadata *page)
uint64_t GetTraceIdForFlowEvent(GCTracer::Scope::ScopeId scope_id) const
size_t promoted_pages_for_iteration_count_
base::ConditionVariable promoted_pages_iteration_notification_variable_
bool major_sweeping_in_progress() const
void ZeroOrDiscardUnusedMemory(PageMetadata *page, Address addr, size_t size)
void StartMajorSweeping()
bool minor_sweeping_in_progress() const
void NotifyPromotedPagesIterationFinished()
void InitializeMajorSweeping()
size_t ConcurrentMinorSweepingPageCount()
NonAtomicMarkingState *const marking_state_
void ClearMarkBitsAndHandleLivenessStatistics(PageMetadata *page, size_t live_bytes)
bool TryRemoveSweepingPageSafe(AllocationSpace space, PageMetadata *page)
SweepingState< SweepingScope::kMajor > major_sweeping_state_
bool IsSweepingDoneForSpace(AllocationSpace space) const
bool ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode, uint32_t max_pages=std::numeric_limits< uint32_t >::max())
size_t ConcurrentMajorSweepingPageCount()
void StartMinorSweeping()
std::atomic< bool > has_swept_pages_[kNumberOfSweepingSpaces]
void EnsurePageIsSwept(PageMetadata *page)
std::atomic< size_t > iterated_promoted_pages_count_
std::atomic< bool > promoted_page_iteration_in_progress_
PageMetadata * GetSweepingPageSafe(AllocationSpace space)
V8_EXPORT_PRIVATE void StartMinorSweeperTasks()
void ForAllSweepingSpaces(Callback callback) const
void CleanupTypedSlotsInFreeMemory(PageMetadata *page, const TypedSlotSet::FreeRangesMap &free_ranges_map, SweepingMode sweeping_mode)
bool AreMinorSweeperTasksRunning() const
SweepingState< SweepingScope::kMinor > minor_sweeping_state_
std::vector< MutablePageMetadata * > sweeping_list_for_promoted_page_iteration_
size_t FreeAndProcessFreedMemory(Address free_start, Address free_end, PageMetadata *page, Space *space, FreeSpaceTreatmentMode free_space_treatment_mode, bool should_reduce_memory)
void AddSweptPage(PageMetadata *page, AllocationSpace identity)
bool IsIteratingPromotedPages() const
void SweepEmptyNewSpacePage(PageMetadata *page)
SweptList swept_list_[kNumberOfSweepingSpaces]
void NotifyPromotedPageIterationFinished(MutablePageMetadata *chunk)
void AddPageImpl(AllocationSpace space, PageMetadata *page)
void WaitForPageToBeSwept(PageMetadata *page)
void AddPromotedPage(MutablePageMetadata *chunk)
base::Mutex promoted_pages_iteration_notification_mutex_
std::atomic< bool > has_sweeping_work_[kNumberOfSweepingSpaces]
void RawSweep(PageMetadata *p, FreeSpaceTreatmentMode free_space_treatment_mode, SweepingMode sweeping_mode, bool should_reduce_memory)
SweepingList sweeping_list_[kNumberOfSweepingSpaces]
void PrepareToBeIteratedPromotedPage(PageMetadata *page)
std::vector< PageMetadata * > SweepingList
bool UsingMajorSweeperTasks() const
LocalSweeper main_thread_local_sweeper_
void EnsureMinorCompleted()
GCTracer::Scope::ScopeId GetTracingScope(AllocationSpace space, bool is_joining_thread)
std::vector< PageMetadata * > SweptList
void AddPage(AllocationSpace space, PageMetadata *page)
std::map< uint32_t, uint32_t > FreeRangesMap
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
static V8_INLINE WritableFreeSpace ForNonExecutableMemory(base::Address addr, size_t size)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
#define TRACE_GC_NOTE(note)
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
constexpr int kTaggedSize
SlotTraits::TObjectSlot ObjectSlot
static constexpr auto kNewSpace
::heap::base::ActiveSystemPages ActiveSystemPages
constexpr uint32_t kZapValue
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
V8_EXPORT_PRIVATE FlagValues v8_flags
@ SURVIVOR_TO_EXTERNAL_POINTER
@ TRUSTED_TO_SHARED_TRUSTED
#define DCHECK_LE(v1, v2)
#define CHECK_GT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
constexpr T RoundDown(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
MutablePageMetadata *const host_page_
MemoryChunk *const host_chunk_
EphemeronRememberedSet * ephemeron_remembered_set_
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
std::unique_ptr< ValueMirror > key