29#if defined(CPPGC_CAGED_HEAP)
38bool EnterIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase&
heap) {
39 if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
40 config.marking_type ==
41 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
43 heap.set_incremental_marking_in_progress(
true);
49bool ExitIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase&
heap) {
50 if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
51 config.marking_type ==
52 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
54 heap.set_incremental_marking_in_progress(
false);
60static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
63 size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
64 typename WorklistLocal,
typename Callback>
65bool DrainWorklistWithBytesAndTimeDeadline(StatsCollector* stats_collector,
66 BasicMarkingState& marking_state,
67 size_t marked_bytes_deadline,
69 WorklistLocal& worklist_local,
72 [&marking_state, marked_bytes_deadline, time_deadline]() {
73 return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
82size_t GetNextIncrementalStepDuration(
84 return schedule.GetNextIncrementalStepDuration(
85 heap.stats_collector()->allocated_object_size());
124 HeapBase::StackSupport::kSupportsConservativeStackScan,
125 runner->NonNestableTasksEnabled());
127 const bool non_nestable_tasks_enabled = runner->NonNestableTasksEnabled();
129 auto task = std::make_unique<IncrementalMarkingTask>(
130 marker, non_nestable_tasks_enabled ? StackState::kNoHeapPointers
131 : StackState::kMayContainHeapPointers);
132 auto handle = task->handle_;
133 if (non_nestable_tasks_enabled) {
134 runner->PostNonNestableTask(std::move(task));
136 runner->PostTask(std::move(task));
145 StatsCollector::kIncrementalMark);
161 heap.compactor().compaction_worklists()) {
173 std::unordered_set<HeapObjectHeader*> objects =
204 current_allocated_size_ += delta;
205 if (current_allocated_size_ > kMinAllocatedBytesPerStep) {
206 marker_.AdvanceMarkingOnAllocation();
207 current_allocated_size_ = 0;
214 heap().stats_collector(),
216 ? StatsCollector::kAtomicMark
217 : StatsCollector::kIncrementalMark);
223 if (EnterIncrementalMarkingIfNeeded(
config_,
heap())) {
225 heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
233 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
253 StatsCollector::kAtomicMark);
255 StatsCollector::kMarkAtomicPrologue);
259 if (ExitIncrementalMarkingIfNeeded(
config_,
heap())) {
279 if (old_marking_type ==
280 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
311 StatsCollector::kAtomicMark);
313 heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
315 const size_t overall_marked_bytes =
333 StatsCollector::kAtomicMark);
349 callback_worklist_(callback_worklist),
354 marker_->heap().stats_collector(),
355 StatsCollector::kConcurrentWeakCallback);
356 MarkingWorklists::WeakCallbackWorklist::Local local(*callback_worklist_);
358 while (local.Pop(&item)) {
364 return std::min(
static_cast<size_t>(1),
365 callback_worklist_->Size() + worker_count);
382 StatsCollector::kAtomicWeak);
406 StatsCollector::kAtomicWeak);
413 std::unique_ptr<cppgc::JobHandle> job_handle{
nullptr};
414 if (
heap().marking_support() ==
417 cppgc::TaskPriority::kUserBlocking,
418 std::make_unique<WeakCallbackJobTask>(
427#if defined(CPPGC_YOUNG_GENERATION)
428 if (
heap().generational_gc_supported()) {
429 auto& remembered_set =
heap().remembered_set();
438 remembered_set.ExecuteCustomCallbacks(
broker);
441 remembered_set.ReleaseCustomCallbacks();
449 heap().stats_collector(),
450 StatsCollector::kWeakContainerCallbacksProcessing);
452 MarkingWorklists::WeakCallbackWorklist::Local& collections_local =
454 while (collections_local.Pop(&item)) {
461 heap().stats_collector(), StatsCollector::kCustomCallbacksProcessing);
463 MarkingWorklists::WeakCustomCallbackWorklist::Local& custom_callbacks =
465 while (custom_callbacks.Pop(&item)) {
467#if defined(CPPGC_YOUNG_GENERATION)
468 if (
heap().generational_gc_supported())
469 heap().remembered_set().AddWeakCallback(item);
478 MarkingWorklists::WeakCallbackWorklist::Local& local =
480 while (local.Pop(&item)) {
491 StatsCollector::kMarkVisitRoots);
499 heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
504 if (stack_state != StackState::kNoHeapPointers) {
506 heap().stats_collector(), StatsCollector::kMarkVisitStack);
512#if defined(CPPGC_YOUNG_GENERATION)
515 heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
524 heap().stats_collector(),
525 StatsCollector::kMarkVisitCrossThreadPersistents);
557 if (stack_state == StackState::kNoHeapPointers) {
567 StatsCollector::kIncrementalMark);
569 StatsCollector::kMarkOnAllocation);
605 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
611 size_t marked_bytes_limit) {
618 heap().stats_collector(),
619 StatsCollector::kMarkTransitiveClosureWithDeadline,
"max_duration_ms",
623 if (marked_bytes_limit == 0) {
624 marked_bytes_limit = GetNextIncrementalStepDuration(
schedule(),
heap_);
628 size_t marked_bytes_deadline =
630 if (marked_bytes_deadline < marked_bytes_limit) {
631 marked_bytes_deadline = SIZE_MAX;
648 stats_collector, StatsCollector::kMarkTransitiveClosure);
649 bool saved_did_discover_new_ephemeron_pairs;
653 schedule().ShouldFlushEphemeronPairs()) {
660 if (!DrainWorklistWithBytesAndTimeDeadline<
661 StatsCollector::kMarkProcessBailOutObjects,
662 kDefaultDeadlineCheckInterval / 5>(
666 mutator_marking_state_.AccountMarkedBytes(
667 BasePage::FromPayload(const_cast<void*>(item.parameter)),
668 item.bailedout_size);
669 item.callback(&visitor(), item.parameter);
674 if (!DrainWorklistWithBytesAndTimeDeadline<
675 StatsCollector::kMarkProcessNotFullyconstructedWorklist>(
680 mutator_marking_state_.AccountMarkedBytes(*header);
681 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
687 if (!DrainWorklistWithBytesAndTimeDeadline<
688 StatsCollector::kMarkProcessMarkingWorklist>(
692 const HeapObjectHeader& header =
693 HeapObjectHeader::FromObject(item.base_object_payload);
694 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
695 DCHECK(header.IsMarked<AccessMode::kAtomic>());
696 mutator_marking_state_.AccountMarkedBytes(header);
697 item.callback(&visitor(), item.base_object_payload);
702 if (!DrainWorklistWithBytesAndTimeDeadline<
703 StatsCollector::kMarkProcessWriteBarrierWorklist>(
707 mutator_marking_state_.AccountMarkedBytes(*header);
708 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
714 if (!DrainWorklistWithBytesAndTimeDeadline<
715 StatsCollector::kMarkProcessRetraceWorklist>(
722 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
728 saved_did_discover_new_ephemeron_pairs =
730 if (!DrainWorklistWithBytesAndTimeDeadline<
731 StatsCollector::kMarkProcessEphemerons>(
736 mutator_marking_state_.ProcessEphemeron(
737 item.key, item.value, item.value_desc, visitor());
742 saved_did_discover_new_ephemeron_pairs);
749 std::unordered_set<HeapObjectHeader*> objects =
752 if (objects.empty()) {
756 heap().stats_collector(),
757 StatsCollector::kMarkVisitNotFullyConstructedObjects);
786 if (resume_on_exit_) {
787 marker_.concurrent_marker().Start();
@ kIncrementalAndConcurrent
bool DidDiscoverNewEphemeronPairs() const
MarkingWorklists::EphemeronPairsWorklist::Local & ephemeron_pairs_for_processing_worklist()
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local & concurrent_marking_bailout_worklist()
MarkingWorklists::WeakCallbackWorklist::Local & weak_container_callback_worklist()
size_t marked_bytes() const
MarkingWorklists::WriteBarrierWorklist::Local & write_barrier_worklist()
void set_in_atomic_pause()
MarkingWorklists::WeakCallbackWorklist::Local & parallel_weak_callback_worklist()
MarkingWorklists::EphemeronPairsWorklist::Local & discovered_ephemeron_pairs_worklist()
void ResetDidDiscoverNewEphemeronPairs()
MarkingWorklists::WeakCustomCallbackWorklist::Local & weak_custom_callback_worklist()
size_t RecentlyMarkedBytes()
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local & previously_not_fully_constructed_worklist()
CompactionWorklists * compaction_worklists()
size_t concurrently_marked_bytes() const
void NotifyIncrementalMutatorStepCompleted()
void NotifyOfWorkIfNeeded(cppgc::TaskPriority priority)
virtual void TraceConservativelyIfNeeded(const void *)
void Iterate(RootVisitor &)
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(cppgc::Heap::StackState)=0
PersistentRegion & GetWeakPersistentRegion()
StackSupport stack_support() const
bool generational_gc_supported() const
PersistentRegion & GetStrongPersistentRegion()
StatsCollector * stats_collector()
CrossThreadPersistentRegion & GetStrongCrossThreadPersistentRegion()
CrossThreadPersistentRegion & GetWeakCrossThreadPersistentRegion()
void SetStackStateOfPrevGC(EmbedderStackState stack_state)
virtual heap::base::Stack * stack()
ObjectAllocator & object_allocator()
static LivenessBroker Create()
IncrementalMarkingAllocationObserver(MarkerBase &marker)
void AllocatedObjectSizeIncreased(size_t delta) final
static Handle Post(cppgc::TaskRunner *, MarkerBase *)
MarkerBase *const marker_
IncrementalMarkingTask(MarkerBase *, StackState)
PauseConcurrentMarkingScope(MarkerBase &)
~PauseConcurrentMarkingScope()
void EnterProcessGlobalAtomicPause()
bool main_marking_disabled_for_testing_
void ProcessCrossThreadWeaknessIfNeeded()
void VisitLocalRoots(StackState)
void AdvanceMarkingOnAllocation()
MarkerBase(const MarkerBase &)=delete
std::shared_ptr< cppgc::TaskRunner > foreground_task_runner_
virtual ConservativeTracingVisitor & conservative_visitor()=0
virtual heap::base::IncrementalMarkingSchedule & schedule()=0
IncrementalMarkingTaskHandle incremental_marking_handle_
void FinishMarking(StackState)
void MarkStrongCrossThreadRoots()
bool IncrementalMarkingStepForTesting(StackState)
bool processed_cross_thread_weakness_
bool AdvanceMarkingWithLimits(v8::base::TimeDelta=kMaximumIncrementalStepDuration, size_t marked_bytes_limit=0)
void HandleNotFullyConstructedObjects()
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration
bool IncrementalMarkingStep(StackState)
virtual cppgc::Visitor & visitor()=0
MutatorMarkingState mutator_marking_state_
void ClearAllWorklistsForTesting()
virtual ConcurrentMarkerBase & concurrent_marker()=0
void VisitCrossThreadRoots()
void EnterAtomicPause(StackState)
cppgc::Platform * platform_
bool JoinConcurrentMarkingIfNeeded()
void MarkNotFullyConstructedObjects()
void AdvanceMarkingWithLimitsEpilogue()
virtual heap::base::StackVisitor & stack_visitor()=0
virtual void AdvanceMarkingOnAllocationImpl()
void NotifyConcurrentMarkingOfWorkIfNeeded(cppgc::TaskPriority)
virtual void ScheduleIncrementalMarkingTask()
size_t last_bytes_marked_
bool ProcessWorklistsWithDeadline(size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline)
MarkingWorklists marking_worklists_
IncrementalMarkingAllocationObserver incremental_marking_allocation_observer_
void SetMainThreadMarkingDisabledForTesting(bool)
bool visited_cross_thread_persistents_in_atomic_pause_
void WaitForConcurrentMarkingForTesting()
void ReEnableConcurrentMarking()
Marker(HeapBase &, cppgc::Platform *, MarkingConfig=MarkingConfig::Default())
MarkingWorklists::MarkingWorklist::Local & marking_worklist()
MarkingWorklists::NotFullyConstructedWorklist & not_fully_constructed_worklist()
std::unordered_set< HeapObjectHeader * > Extract()
WeakCallbackWorklist * parallel_weak_callback_worklist()
NotFullyConstructedWorklist * not_fully_constructed_worklist()
WeakContainersWorklist * weak_containers_worklist()
EphemeronPairsWorklist * discovered_ephemeron_pairs_worklist()
MarkingWorklist * marking_worklist()
V8_EXPORT_PRIVATE void Publish() override
MarkingWorklists::RetraceMarkedObjectsWorklist::Local & retrace_marked_objects_worklist()
void FlushNotFullyConstructedObjects()
void FlushDiscoveredEphemeronPairs()
void ResetLinearAllocationBuffers()
void Iterate(RootVisitor &)
static V8_INLINE void Unlock()
static V8_INLINE void AssertHeld()
static V8_INLINE void Lock()
void RegisterObserver(AllocationObserver *)
void UnregisterObserver(AllocationObserver *)
InternalScope< kDisabled, kMutatorThread > DisabledScope
void NotifyMarkingCompleted(size_t marked_bytes)
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC)
MarkingWorklists::WeakCallbackWorklist * callback_worklist_
size_t GetMaxConcurrency(size_t worker_count) const override
WeakCallbackJobTask(MarkerBase *marker, MarkingWorklists::WeakCallbackWorklist *callback_worklist, LivenessBroker &broker)
void Run(JobDelegate *delegate) override
void AddMutatorThreadMarkedBytes(size_t)
void NotifyIncrementalMarkingStart()
void IteratePointersUntilMarker(StackVisitor *visitor) const
V8_INLINE void SetMarkerIfNeededAndCallback(Callback callback)
static constexpr TimeDelta Max()
double InMillisecondsF() const
JSHeapBroker *const broker_
ConcurrentMarkerBase & concurrent_marker_
std::unique_ptr< ConservativeTracedHandlesMarkingVisitor > marking_visitor_
Schedule const *const schedule_
bool DrainWorklistWithPredicate(Predicate ShouldYield, CreateStatsScopeCallback CreateStatsScope, WorklistLocal &worklist_local, ProcessWorklistItemCallback ProcessWorklistItem)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
const CollectionType collection_type
cppgc::WeakCallback callback
#define V8_UNLIKELY(condition)
std::unique_ptr< ValueMirror > value