50static constexpr size_t kMajorGCYoungGenerationAllocationObserverStep = 64 *
KB;
51static constexpr size_t kMajorGCOldGenerationAllocationObserverStep = 256 *
KB;
59static constexpr size_t kV8ActivationThreshold = 8 *
MB;
60static constexpr size_t kEmbedderActivationThreshold = 8 *
MB;
62static constexpr size_t kV8ActivationThreshold = 0;
63static constexpr size_t kEmbedderActivationThreshold = 0;
66base::TimeDelta GetMaxDuration(
StepOrigin step_origin) {
70 switch (step_origin) {
72 return kMaxStepSizeOnTask;
74 return kMaxStepSizeOnAllocation;
83 incremental_marking_(incremental_marking) {}
86 Heap*
heap = incremental_marking_->heap();
89 RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
90 incremental_marking_->AdvanceOnAllocation();
104 kMajorGCYoungGenerationAllocationObserverStep),
106 kMajorGCOldGenerationAllocationObserverStep) {}
113 static_cast<intptr_t
>(object_size);
151 const size_t old_generation_size_mb =
153 const size_t old_generation_waste_mb =
155 const size_t old_generation_limit_mb =
161 "[IncrementalMarking] Start (%s): (size/waste/limit/slack) v8: %zuMB / "
163 "/ %zuMB global: %zuMB / %zuMB / %zuMB / %zuMB\n",
164 ToString(gc_reason), old_generation_size_mb, old_generation_waste_mb,
165 old_generation_limit_mb,
166 old_generation_size_mb + old_generation_waste_mb >
167 old_generation_limit_mb
169 : old_generation_limit_mb - old_generation_size_mb,
170 global_size_mb, global_waste_mb, global_limit_mb,
171 global_size_mb + global_waste_mb > global_limit_mb
173 : global_limit_mb - global_size_mb);
180 counters->incremental_marking_reason()->AddSample(
181 static_cast<int>(gc_reason));
184 is_major ? counters->gc_incremental_marking_start()
185 : counters->gc_minor_incremental_marking_start());
186 const auto scope_id = is_major ? GCTracer::Scope::MC_INCREMENTAL_START
187 : GCTracer::Scope::MINOR_MS_INCREMENTAL_START;
192 is_major ?
"V8.GCIncrementalMarkingStart"
193 :
"V8.GCMinorIncrementalMarkingStart",
241 if (
isolate()->serializer_enabled()) {
245 if (
v8_flags.trace_incremental_marking) {
247 "[IncrementalMarking] Start delayed - serializer\n");
251 if (
v8_flags.trace_incremental_marking) {
252 isolate()->PrintWithTimestamp(
"[IncrementalMarking] Start marking\n");
269 schedule_->NotifyIncrementalMarkingStart();
271 if (
v8_flags.incremental_marking_unified_schedule) {
288 TRACE_GC(
heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
298 if (
v8_flags.trace_incremental_marking) {
299 isolate()->PrintWithTimestamp(
"[IncrementalMarking] Running\n");
302 if (
heap()->cpp_heap()) {
305 TRACE_GC(
heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
321 if (
v8_flags.trace_incremental_marking) {
323 "[IncrementalMarking] (MinorMS) Start marking\n");
342 TRACE_GC(
heap()->tracer(), GCTracer::Scope::MINOR_MS_MARK_INCREMENTAL_SEED);
352 if (
v8_flags.trace_incremental_marking) {
353 isolate()->PrintWithTimestamp(
"[IncrementalMarking] (MinorMS) Running\n");
366 if (
v8_flags.black_allocated_pages) {
371 if (
isolate()->is_shared_space_isolate()) {
374 if (
v8_flags.black_allocated_pages) {
382 if (
v8_flags.black_allocated_pages) {
393 if (
v8_flags.trace_incremental_marking) {
395 "[IncrementalMarking] Black allocation started\n");
401 if (!
v8_flags.black_allocated_pages) {
404 if (
isolate()->is_shared_space_isolate()) {
416 if (
v8_flags.trace_incremental_marking) {
418 "[IncrementalMarking] Black allocation paused\n");
431 if (
v8_flags.trace_incremental_marking) {
433 "[IncrementalMarking] Black allocation finished\n");
438#ifdef V8_COMPRESS_POINTERS
439 heap()->old_external_pointer_space()->set_allocate_black(
true);
440 heap()->cpp_heap_pointer_space()->set_allocate_black(
true);
442#ifdef V8_ENABLE_SANDBOX
443 heap()->code_pointer_space()->set_allocate_black(
true);
444 heap()->trusted_pointer_space()->set_allocate_black(
true);
445 if (
isolate()->is_shared_space_isolate()) {
446 isolate()->shared_trusted_pointer_space()->set_allocate_black(
true);
449#ifdef V8_ENABLE_LEAPTIERING
450 heap()->js_dispatch_table_space()->set_allocate_black(
true);
455#ifdef V8_COMPRESS_POINTERS
456 heap()->old_external_pointer_space()->set_allocate_black(
false);
457 heap()->cpp_heap_pointer_space()->set_allocate_black(
false);
459#ifdef V8_ENABLE_SANDBOX
460 heap()->code_pointer_space()->set_allocate_black(
false);
461 heap()->trusted_pointer_space()->set_allocate_black(
false);
462 if (
isolate()->is_shared_space_isolate()) {
463 heap()->
isolate()->shared_trusted_pointer_space()->set_allocate_black(
467#ifdef V8_ENABLE_LEAPTIERING
468 heap()->js_dispatch_table_space()->set_allocate_black(
false);
476 if (!cpp_heap || !cpp_heap->incremental_marking_supported()) {
480 TRACE_GC(
heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
482 cpp_heap->AdvanceMarking(max_duration, marked_bytes_limit);
489 if (
v8_flags.trace_incremental_marking) {
490 int old_generation_size_mb =
492 int old_generation_waste_mb =
494 int old_generation_limit_mb =
497 "[IncrementalMarking] Stopping: old generation size %dMB, waste %dMB, "
500 old_generation_size_mb, old_generation_waste_mb,
501 old_generation_limit_mb,
502 std::max(0, old_generation_size_mb + old_generation_waste_mb -
503 old_generation_limit_mb));
517 if (
isolate()->has_shared_space() && !
isolate()->is_shared_space_isolate()) {
521 const bool is_marking =
isolate()
538 intptr_t live_bytes = pair.second;
553 const bool is_shared_space_isolate =
558 space = spaces.
Next()) {
559 if (space->identity() ==
SHARED_SPACE && !is_shared_space_isolate)
continue;
560 total += space->SizeOfObjects();
586 "[IncrementalMarking] Completion: %s GC via stack guard, time left: "
588 wait_for_task ?
"Delaying" :
"Not delaying",
591 return wait_for_task;
597 constexpr double kAllowedOvershootPercentBasedOnWalltime = 0.1;
600 constexpr auto kMinAllowedOvershoot =
603 const auto allowed_overshoot = std::max(
606 kAllowedOvershootPercentBasedOnWalltime));
607 const auto optional_avg_time_to_marking_task =
612 optional_avg_time_to_marking_task.has_value() &&
613 optional_avg_time_to_marking_task.value() <= allowed_overshoot;
614 const auto optional_time_to_current_task =
619 delaying && (!optional_time_to_current_task.has_value() ||
620 optional_time_to_current_task.value() <= allowed_overshoot);
623 !optional_time_to_current_task.has_value()
625 : allowed_overshoot - optional_time_to_current_task.value();
631 "[IncrementalMarking] Completion: %s GC via stack guard, "
632 "avg time to task: %.1fms, current time to task: %.1fms allowed "
633 "overshoot: %.1fms\n",
634 delaying ?
"Delaying" :
"Not delaying",
635 optional_avg_time_to_marking_task.has_value()
636 ? optional_avg_time_to_marking_task->InMillisecondsF()
638 optional_time_to_current_task.has_value()
639 ? optional_time_to_current_task->InMillisecondsF()
641 allowed_overshoot.InMillisecondsF());
651 if (
v8_flags.incremental_marking_unified_schedule) {
653 estimated_live_bytes += cpp_heap->used_size();
656 const size_t marked_bytes_limit =
657 schedule_->GetNextIncrementalStepDuration(estimated_live_bytes);
659 const auto step_info =
schedule_->GetCurrentStepInfo();
661 "[IncrementalMarking] Schedule: %zuKB to mark, origin: %s, elapsed: "
662 "%.1f, marked: %zuKB (mutator: %zuKB, concurrent %zuKB), expected "
663 "marked: %zuKB, estimated live: %zuKB, schedule delta: %+" PRIi64
665 marked_bytes_limit / KB,
ToString(step_origin),
666 step_info.elapsed_time.InMillisecondsF(), step_info.marked_bytes() / KB,
667 step_info.mutator_marked_bytes / KB,
668 step_info.concurrent_marked_bytes / KB,
669 step_info.expected_marked_bytes / KB,
670 step_info.estimated_live_bytes / KB,
671 step_info.scheduled_delta_bytes() / KB);
673 return marked_bytes_limit;
697 size_t max_bytes_to_mark) {
713 !
heap()->always_allocate()) {
729 (!cpp_heap || cpp_heap->ShouldFinalizeIncrementalMarking());
733 if (!
v8_flags.concurrent_marking)
return;
735 const size_t current_bytes_marked_concurrently =
742 schedule_->AddConcurrentlyMarkedBytes(delta);
748 size_t marked_bytes_limit,
751 isolate()->counters()->gc_incremental_marking());
761 std::optional<SafepointScope> safepoint_scope;
770#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
817 size_t cpp_heap_marked_bytes;
818 std::tie(cpp_heap_duration, cpp_heap_marked_bytes) =
822 size_t v8_marked_bytes = 0;
824 if (cpp_heap_duration < max_duration &&
825 (!
v8_flags.incremental_marking_unified_schedule ||
826 (cpp_heap_marked_bytes < marked_bytes_limit))) {
828 const size_t v8_marked_bytes_limit =
829 v8_flags.incremental_marking_unified_schedule
830 ? marked_bytes_limit - cpp_heap_marked_bytes
831 : marked_bytes_limit;
832 std::tie(v8_marked_bytes, std::ignore) =
834 max_duration - cpp_heap_duration, v8_marked_bytes_limit);
847 const auto v8_max_duration = max_duration - cpp_heap_duration;
848 const auto v8_marked_bytes_limit =
849 marked_bytes_limit - cpp_heap_marked_bytes;
851 "[IncrementalMaring] Step: origin: %s overall: %.1fms "
852 "V8: %zuKB (%zuKB), %.1fms (%.1fms), %.1fMB/s "
853 "CppHeap: %zuKB (%zuKB), %.1fms (%.1fms)\n",
857 v8_max_duration.InMillisecondsF(),
858 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond() *
860 cpp_heap_marked_bytes, marked_bytes_limit,
875 kMajorGCYoungGenerationAllocationObserverStep);
static constexpr size_t kStepSizeWhenNotMakingProgress
static std::unique_ptr< IncrementalMarkingSchedule > Create(bool predictable_schedule=false)
static Isolate * TryGetCurrent()
static constexpr TimeDelta Max()
static constexpr TimeDelta FromMillisecondsD(double milliseconds)
double InMillisecondsF() const
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
void TryScheduleJob(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
size_t TotalMarkedBytes()
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
static CppHeap * From(v8::CppHeap *heap)
V8_INLINE CollectionEpoch CurrentEpoch(Scope::ScopeId id) const
void AddIncrementalMarkingStep(double duration, size_t bytes)
void NotifyIncrementalMarkingStart()
void IterateYoungStrongAndDependentRoots(RootVisitor *v)
void IterateSharedSpaceAndClientIsolates(Callback callback)
void MarkLinearAllocationAreasBlack()
void AddAllocationObserver(AllocationObserver *observer, AllocationObserver *new_space_observer)
void UnmarkLinearAllocationsArea()
void RemoveAllocationObserver(AllocationObserver *observer, AllocationObserver *new_space_observer)
void FreeLinearAllocationAreasAndResetFreeLists()
V8_EXPORT_PRIVATE void FreeLinearAllocationAreas()
void InvokeIncrementalMarkingPrologueCallbacks()
SharedLargeObjectSpace * shared_lo_space() const
void FreeSharedLinearAllocationAreasAndResetFreeLists()
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)
bool IsTearingDown() const
HeapState gc_state() const
bool sweeping_in_progress() const
OldLargeObjectSpace * lo_space() const
void SetIsMinorMarkingFlag(bool value)
MarkCompactCollector * mark_compact_collector()
size_t old_generation_allocation_limit() const
IncrementalMarking * incremental_marking() const
ConcurrentMarking * concurrent_marking() const
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects() const
V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const
V8_EXPORT_PRIVATE size_t OldGenerationWastedBytes() const
bool always_allocate() const
MinorMarkSweepCollector * minor_mark_sweep_collector()
void MarkSharedLinearAllocationAreasBlack()
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
bool ShouldUseIncrementalMarking() const
V8_EXPORT_PRIVATE size_t GlobalWastedBytes() const
IsolateSafepoint * safepoint()
CodeLargeObjectSpace * code_lo_space() const
V8_EXPORT_PRIVATE void PublishMainThreadPendingAllocations()
void UnmarkSharedLinearAllocationAreas()
size_t global_allocation_limit() const
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects() const
v8::CppHeap * cpp_heap() const
Isolate * isolate() const
void InvokeIncrementalMarkingEpilogueCallbacks()
HeapAllocator * allocator()
bool minor_sweeping_in_progress() const
bool deserialization_complete() const
void SetIsMarkingFlag(bool value)
std::optional< v8::base::TimeDelta > AverageTimeToTask() const
std::optional< v8::base::TimeDelta > CurrentTimeToTask() const
void ScheduleTask(TaskPriority priority=TaskPriority::kUserBlocking)
Observer(IncrementalMarking *incremental_marking, intptr_t step_size)
void Step(int bytes_allocated, Address, size_t) override
void MarkBlackBackground(Tagged< HeapObject > obj, int object_size)
void FetchBytesMarkedConcurrently()
size_t GetScheduledBytes(StepOrigin step_origin)
Isolate * isolate() const
std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule_
MarkingState * marking_state()
bool IsMinorMarking() const
void AdvanceForTesting(v8::base::TimeDelta max_duration, size_t max_bytes_to_mark=SIZE_MAX)
void MarkRootsForTesting()
void AdvanceAndFinalizeIfComplete()
void StopPointerTableBlackAllocation()
bool IsBelowActivationThresholds() const
bool CanAndShouldBeStarted() const
MarkCompactCollector *const major_collector_
void AdvanceAndFinalizeIfNecessary()
void Step(v8::base::TimeDelta max_duration, size_t max_bytes_to_process, StepOrigin step_origin)
bool ShouldFinalize() const
bool TryInitializeTaskTimeout()
Observer new_generation_observer_
std::unique_ptr< IncrementalMarkingJob > incremental_marking_job_
void Start(GarbageCollector garbage_collector, GarbageCollectionReason gc_reason)
bool completion_task_scheduled_
void FinishBlackAllocation()
MinorMarkSweepCollector *const minor_collector_
MarkingState *const marking_state_
std::optional< uint64_t > current_trace_id_
Observer old_generation_observer_
v8::base::TimeTicks start_time_
MarkingWorklists::Local * local_marking_worklists() const
void PauseBlackAllocation()
void AdvanceOnAllocation()
IncrementalMarkingJob * incremental_marking_job() const
size_t OldGenerationSizeOfObjects() const
IncrementalMarking(Heap *heap, WeakObjects *weak_objects)
std::unordered_map< MutablePageMetadata *, intptr_t, base::hash< MutablePageMetadata * > > background_live_bytes_
base::Mutex background_live_bytes_mutex_
MarkingWorklists::Local * current_local_marking_worklists_
size_t bytes_marked_concurrently_
bool IsMajorMarkingComplete() const
bool major_collection_requested_via_stack_guard_
WeakObjects * weak_objects_
void StartBlackAllocation()
size_t main_thread_marked_bytes_
std::pair< v8::base::TimeDelta, size_t > CppHeapStep(v8::base::TimeDelta max_duration, size_t marked_bytes_limit)
void StartPointerTableBlackAllocation()
v8::base::TimeTicks completion_task_timeout_
bool IsMajorMarking() const
bool CanBeStarted() const
MarkingMode marking_mode_
void IterateLocalHeaps(Callback callback)
GlobalHandles * global_handles() const
bool serializer_enabled() const
bool is_shared_space_isolate() const
GlobalSafepoint * global_safepoint() const
TracedHandles * traced_handles()
StackGuard * stack_guard()
Isolate * shared_space_isolate() const
size_t SizeOfObjects() const override
void FreeLinearAllocationAreasAndResetFreeLists()
void MarkLinearAllocationAreasBlack()
void UnmarkLinearAllocationsArea()
std::pair< size_t, size_t > ProcessMarkingWorklist(v8::base::TimeDelta max_duration, size_t max_bytes_to_process)
void StartMarking(std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule={})
@ kIncrementalMarkingStep
void MaybeEnableBackgroundThreadsInCycle(CallOrigin origin)
bool StartCompaction(StartCompactionMode mode)
MarkingWorklists::Local * local_marking_worklists() const
static void ActivateAll(Heap *heap, bool is_compacting)
static void ActivateYoung(Heap *heap)
MarkingWorklists::Local * local_marking_worklists()
void StartMarking(bool force_use_background_threads)
void IncrementLiveBytesAtomically(intptr_t diff)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
#define TRACE_GC(tracer, scope_id)
LiftoffAssembler::CacheState state
@ kFinalizeMarkingViaStackGuard
@ kFinalizeMarkingViaTask
constexpr const char * ToString(DeoptimizeKind kind)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
V8_EXPORT_PRIVATE FlagValues v8_flags
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define CHECK_IMPLIES(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define V8_LIKELY(condition)
#define V8_UNLIKELY(condition)