v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
incremental-marking.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <inttypes.h>
8
9#include <cmath>
10#include <optional>
11
12#include "src/base/logging.h"
14#include "src/common/globals.h"
16#include "src/flags/flags.h"
21#include "src/heap/gc-tracer.h"
22#include "src/heap/heap-inl.h"
26#include "src/heap/heap.h"
35#include "src/heap/safepoint.h"
36#include "src/init/v8.h"
43#include "src/utils/utils.h"
44
45namespace v8 {
46namespace internal {
47
48namespace {
49
50static constexpr size_t kMajorGCYoungGenerationAllocationObserverStep = 64 * KB;
51static constexpr size_t kMajorGCOldGenerationAllocationObserverStep = 256 * KB;
52
53static constexpr v8::base::TimeDelta kMaxStepSizeOnTask =
55static constexpr v8::base::TimeDelta kMaxStepSizeOnAllocation =
57
58#ifndef DEBUG
59static constexpr size_t kV8ActivationThreshold = 8 * MB;
60static constexpr size_t kEmbedderActivationThreshold = 8 * MB;
61#else
62static constexpr size_t kV8ActivationThreshold = 0;
63static constexpr size_t kEmbedderActivationThreshold = 0;
64#endif // DEBUG
65
66base::TimeDelta GetMaxDuration(StepOrigin step_origin) {
67 if (v8_flags.predictable) {
68 return base::TimeDelta::Max();
69 }
70 switch (step_origin) {
72 return kMaxStepSizeOnTask;
73 case StepOrigin::kV8:
74 return kMaxStepSizeOnAllocation;
75 }
76}
77
78} // namespace
79
81 intptr_t step_size)
82 : AllocationObserver(step_size),
83 incremental_marking_(incremental_marking) {}
84
86 Heap* heap = incremental_marking_->heap();
87 VMState<GC> state(heap->isolate());
88 RCS_SCOPE(heap->isolate(),
89 RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
90 incremental_marking_->AdvanceOnAllocation();
91}
92
94 : heap_(heap),
95 major_collector_(heap->mark_compact_collector()),
96 minor_collector_(heap->minor_mark_sweep_collector()),
97 weak_objects_(weak_objects),
100 v8_flags.incremental_marking_task
101 ? std::make_unique<IncrementalMarkingJob>(heap)
102 : nullptr),
104 kMajorGCYoungGenerationAllocationObserverStep),
106 kMajorGCOldGenerationAllocationObserverStep) {}
107
109 int object_size) {
110 CHECK(marking_state()->TryMark(obj));
113 static_cast<intptr_t>(object_size);
114}
115
119
121 // Only start incremental marking in a safe state:
122 // 1) when incremental marking is turned on
123 // 2) when we are currently not in a GC, and
124 // 3) when we are currently not serializing or deserializing the heap, and
125 // 4) not a shared heap.
126 return v8_flags.incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
128}
129
131 return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
132 heap_->EmbedderSizeOfObjects() <= kEmbedderActivationThreshold;
133}
134
136 GarbageCollectionReason gc_reason) {
137 CHECK(IsStopped());
142 // Do not invoke CanAndShouldBeStarted() here again because its return value
143 // might change across multiple invocations (its internal state could be
144 // updated concurrently from another thread between invocations).
146 // The "current isolate" must be set correctly so we can access pointer
147 // tables.
149
150 if (V8_UNLIKELY(v8_flags.trace_incremental_marking)) {
151 const size_t old_generation_size_mb =
153 const size_t old_generation_waste_mb =
155 const size_t old_generation_limit_mb =
157 const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
158 const size_t global_waste_mb = heap()->GlobalWastedBytes() / MB;
159 const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
160 isolate()->PrintWithTimestamp(
161 "[IncrementalMarking] Start (%s): (size/waste/limit/slack) v8: %zuMB / "
162 "%zuMB / %zuMB "
163 "/ %zuMB global: %zuMB / %zuMB / %zuMB / %zuMB\n",
164 ToString(gc_reason), old_generation_size_mb, old_generation_waste_mb,
165 old_generation_limit_mb,
166 old_generation_size_mb + old_generation_waste_mb >
167 old_generation_limit_mb
168 ? 0
169 : old_generation_limit_mb - old_generation_size_mb,
170 global_size_mb, global_waste_mb, global_limit_mb,
171 global_size_mb + global_waste_mb > global_limit_mb
172 ? 0
173 : global_limit_mb - global_size_mb);
174 }
175
176 Counters* counters = isolate()->counters();
177 const bool is_major = garbage_collector == GarbageCollector::MARK_COMPACTOR;
178 if (is_major) {
179 // Reasons are only reported for major GCs
180 counters->incremental_marking_reason()->AddSample(
181 static_cast<int>(gc_reason));
182 }
183 NestedTimedHistogramScope incremental_marking_scope(
184 is_major ? counters->gc_incremental_marking_start()
185 : counters->gc_minor_incremental_marking_start());
186 const auto scope_id = is_major ? GCTracer::Scope::MC_INCREMENTAL_START
187 : GCTracer::Scope::MINOR_MS_INCREMENTAL_START;
188 DCHECK(!current_trace_id_.has_value());
189 current_trace_id_.emplace(reinterpret_cast<uint64_t>(this) ^
190 heap_->tracer()->CurrentEpoch(scope_id));
191 TRACE_EVENT2("v8",
192 is_major ? "V8.GCIncrementalMarkingStart"
193 : "V8.GCMinorIncrementalMarkingStart",
194 "epoch", heap_->tracer()->CurrentEpoch(scope_id), "reason",
195 ToString(gc_reason));
196 TRACE_GC_EPOCH_WITH_FLOW(heap()->tracer(), scope_id, ThreadKind::kMain,
197 current_trace_id_.value(),
200
206
207 if (is_major) {
209 } else {
211 }
212}
213
237
239
241 if (isolate()->serializer_enabled()) {
242 // Black allocation currently starts when we start incremental marking,
243 // but we cannot enable black allocation while deserializing. Hence, we
244 // have to delay the start of incremental marking in that case.
245 if (v8_flags.trace_incremental_marking) {
246 isolate()->PrintWithTimestamp(
247 "[IncrementalMarking] Start delayed - serializer\n");
248 }
249 return;
250 }
251 if (v8_flags.trace_incremental_marking) {
252 isolate()->PrintWithTimestamp("[IncrementalMarking] Start marking\n");
253 }
254
256
257 // Free all existing LABs in the heap such that selecting evacuation
258 // candidates does not need to deal with LABs on a page. While we don't need
259 // this for correctness, we want to avoid creating additional work for
260 // evacuation.
262
265
266 // The schedule is acquired for CppHeap as well. Initialize it early.
267 schedule_ =
269 schedule_->NotifyIncrementalMarkingStart();
270
271 if (v8_flags.incremental_marking_unified_schedule) {
273 } else {
275 }
278
280 heap_->SetIsMarkingFlag(true);
281
284
286
287 {
288 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
289 MarkRoots();
290 }
291
292 if (v8_flags.concurrent_marking && !heap_->IsTearingDown()) {
295 }
296
297 // Ready to start incremental marking.
298 if (v8_flags.trace_incremental_marking) {
299 isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
300 }
301
302 if (heap()->cpp_heap()) {
303 // `StartMarking()` may call back into V8 in corner cases, requiring that
304 // marking (including write barriers) is fully set up.
305 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
306 CppHeap::From(heap()->cpp_heap())->StartMarking();
307 }
308
310
315 }
316}
317
319 // Removed serializer_enabled() check because we don't do black allocation.
320
321 if (v8_flags.trace_incremental_marking) {
322 isolate()->PrintWithTimestamp(
323 "[IncrementalMarking] (MinorMS) Start marking\n");
324 }
325
326 // We only reach this code if Heap::ShouldUseBackgroundThreads() returned
327 // true. So we can force the use of background threads here.
331
333 heap_->SetIsMarkingFlag(true);
335
336 {
337 Sweeper::PauseMajorSweepingScope pause_sweeping_scope(heap_->sweeper());
339 }
340
341 {
342 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MS_MARK_INCREMENTAL_SEED);
343 MarkRoots();
344 }
345
346 if (v8_flags.concurrent_minor_ms_marking && !heap_->IsTearingDown()) {
350 }
351
352 if (v8_flags.trace_incremental_marking) {
353 isolate()->PrintWithTimestamp("[IncrementalMarking] (MinorMS) Running\n");
354 }
355
357
358 // Allocation observers are not currently used by MinorMS because we don't
359 // do incremental marking.
360}
361
365 black_allocation_ = true;
366 if (v8_flags.black_allocated_pages) {
368 } else {
370 }
371 if (isolate()->is_shared_space_isolate()) {
373 [](Isolate* client) {
374 if (v8_flags.black_allocated_pages) {
376 } else {
378 }
379 });
380 }
381 heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
382 if (v8_flags.black_allocated_pages) {
383 // The freelists of the underlying spaces must anyway be empty after the
384 // first call to FreeLinearAllocationAreasAndResetFreeLists(). However,
385 // don't call FreeLinearAllocationAreas(), since it also frees the
386 // shared-space areas.
388 } else {
389 local_heap->MarkLinearAllocationAreasBlack();
390 }
391 });
393 if (v8_flags.trace_incremental_marking) {
394 isolate()->PrintWithTimestamp(
395 "[IncrementalMarking] Black allocation started\n");
396 }
397}
398
401 if (!v8_flags.black_allocated_pages) {
403
404 if (isolate()->is_shared_space_isolate()) {
406 [](Isolate* client) {
408 });
409 }
410
411 heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
412 local_heap->UnmarkLinearAllocationsArea();
413 });
414 }
416 if (v8_flags.trace_incremental_marking) {
417 isolate()->PrintWithTimestamp(
418 "[IncrementalMarking] Black allocation paused\n");
419 }
420 black_allocation_ = false;
421}
422
424 if (!black_allocation_) {
425 return;
426 }
427 // Don't fixup the marking bitmaps of the black allocated pages, since the
428 // concurrent marker may still be running and will access the page flags.
429 black_allocation_ = false;
431 if (v8_flags.trace_incremental_marking) {
432 isolate()->PrintWithTimestamp(
433 "[IncrementalMarking] Black allocation finished\n");
434 }
435}
436
438#ifdef V8_COMPRESS_POINTERS
439 heap()->old_external_pointer_space()->set_allocate_black(true);
440 heap()->cpp_heap_pointer_space()->set_allocate_black(true);
441#endif // V8_COMPRESS_POINTERS
442#ifdef V8_ENABLE_SANDBOX
443 heap()->code_pointer_space()->set_allocate_black(true);
444 heap()->trusted_pointer_space()->set_allocate_black(true);
445 if (isolate()->is_shared_space_isolate()) {
446 isolate()->shared_trusted_pointer_space()->set_allocate_black(true);
447 }
448#endif // V8_ENABLE_SANDBOX
449#ifdef V8_ENABLE_LEAPTIERING
450 heap()->js_dispatch_table_space()->set_allocate_black(true);
451#endif // V8_ENABLE_LEAPTIERING
452}
453
455#ifdef V8_COMPRESS_POINTERS
456 heap()->old_external_pointer_space()->set_allocate_black(false);
457 heap()->cpp_heap_pointer_space()->set_allocate_black(false);
458#endif // V8_COMPRESS_POINTERS
459#ifdef V8_ENABLE_SANDBOX
460 heap()->code_pointer_space()->set_allocate_black(false);
461 heap()->trusted_pointer_space()->set_allocate_black(false);
462 if (isolate()->is_shared_space_isolate()) {
463 heap()->isolate()->shared_trusted_pointer_space()->set_allocate_black(
464 false);
465 }
466#endif // V8_ENABLE_SANDBOX
467#ifdef V8_ENABLE_LEAPTIERING
468 heap()->js_dispatch_table_space()->set_allocate_black(false);
469#endif // V8_ENABLE_LEAPTIERING
470}
471
472std::pair<v8::base::TimeDelta, size_t> IncrementalMarking::CppHeapStep(
473 v8::base::TimeDelta max_duration, size_t marked_bytes_limit) {
474 DCHECK(IsMarking());
475 auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
476 if (!cpp_heap || !cpp_heap->incremental_marking_supported()) {
477 return {};
478 }
479
480 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
481 const auto start = v8::base::TimeTicks::Now();
482 cpp_heap->AdvanceMarking(max_duration, marked_bytes_limit);
483 return {v8::base::TimeTicks::Now() - start, cpp_heap->last_bytes_marked()};
484}
485
487 if (IsStopped()) return false;
488
489 if (v8_flags.trace_incremental_marking) {
490 int old_generation_size_mb =
491 static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
492 int old_generation_waste_mb =
493 static_cast<int>(heap()->OldGenerationWastedBytes() / MB);
494 int old_generation_limit_mb =
495 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
496 isolate()->PrintWithTimestamp(
497 "[IncrementalMarking] Stopping: old generation size %dMB, waste %dMB, "
498 "limit %dMB, "
499 "overshoot %dMB\n",
500 old_generation_size_mb, old_generation_waste_mb,
501 old_generation_limit_mb,
502 std::max(0, old_generation_size_mb + old_generation_waste_mb -
503 old_generation_limit_mb));
504 }
505
506 if (IsMajorMarking()) {
510 isolate()->stack_guard()->ClearGC();
511 }
512
515 current_trace_id_.reset();
516
517 if (isolate()->has_shared_space() && !isolate()->is_shared_space_isolate()) {
518 // When disabling local incremental marking in a client isolate (= worker
519 // isolate), the marking barrier needs to stay enabled when incremental
520 // marking in the shared heap is running.
521 const bool is_marking = isolate()
523 ->heap()
525 ->IsMajorMarking();
526 heap_->SetIsMarkingFlag(is_marking);
527 } else {
528 heap_->SetIsMarkingFlag(false);
529 }
530
532 is_compacting_ = false;
534
535 // Merge live bytes counters of background threads
536 for (const auto& pair : background_live_bytes_) {
537 MutablePageMetadata* memory_chunk = pair.first;
538 intptr_t live_bytes = pair.second;
539 if (live_bytes) {
540 memory_chunk->IncrementLiveBytesAtomically(live_bytes);
541 }
542 }
544 schedule_.reset();
545
546 return true;
547}
548
550 // TODO(v8:14140): This is different to Heap::OldGenerationSizeOfObjects() in
551 // that it only considers shared space for the shared space isolate. Consider
552 // adjusting the Heap version.
553 const bool is_shared_space_isolate =
555 size_t total = 0;
557 for (PagedSpace* space = spaces.Next(); space != nullptr;
558 space = spaces.Next()) {
559 if (space->identity() == SHARED_SPACE && !is_shared_space_isolate) continue;
560 total += space->SizeOfObjects();
561 }
562 total += heap_->lo_space()->SizeOfObjects();
563 total += heap_->code_lo_space()->SizeOfObjects();
564 if (heap_->shared_lo_space() && is_shared_space_isolate) {
565 total += heap_->shared_lo_space()->SizeOfObjects();
566 }
567 return total;
568}
569
573 return false;
574 }
578 return false;
579 }
580 }
581
582 const auto now = v8::base::TimeTicks::Now();
583 const bool wait_for_task = now < completion_task_timeout_;
584 if (V8_UNLIKELY(v8_flags.trace_incremental_marking)) {
585 isolate()->PrintWithTimestamp(
586 "[IncrementalMarking] Completion: %s GC via stack guard, time left: "
587 "%.1fms\n",
588 wait_for_task ? "Delaying" : "Not delaying",
589 (completion_task_timeout_ - now).InMillisecondsF());
590 }
591 return wait_for_task;
592}
593
596 // Allowed overshoot percentage of incremental marking walltime.
597 constexpr double kAllowedOvershootPercentBasedOnWalltime = 0.1;
598 // Minimum overshoot in ms. This is used to allow moving away from stack
599 // when marking was fast.
600 constexpr auto kMinAllowedOvershoot =
602 const auto now = v8::base::TimeTicks::Now();
603 const auto allowed_overshoot = std::max(
604 kMinAllowedOvershoot, v8::base::TimeDelta::FromMillisecondsD(
605 (now - start_time_).InMillisecondsF() *
606 kAllowedOvershootPercentBasedOnWalltime));
607 const auto optional_avg_time_to_marking_task =
609 // Only allowed to delay if the recorded average exists and is below the
610 // threshold.
611 bool delaying =
612 optional_avg_time_to_marking_task.has_value() &&
613 optional_avg_time_to_marking_task.value() <= allowed_overshoot;
614 const auto optional_time_to_current_task =
616 // Don't bother delaying if the currently scheduled task is already waiting
617 // too long.
618 delaying =
619 delaying && (!optional_time_to_current_task.has_value() ||
620 optional_time_to_current_task.value() <= allowed_overshoot);
621 if (delaying) {
622 const auto delta =
623 !optional_time_to_current_task.has_value()
624 ? allowed_overshoot
625 : allowed_overshoot - optional_time_to_current_task.value();
626 completion_task_timeout_ = now + delta;
627 }
628 DCHECK_IMPLIES(!delaying, completion_task_timeout_ <= now);
629 if (V8_UNLIKELY(v8_flags.trace_incremental_marking)) {
630 isolate()->PrintWithTimestamp(
631 "[IncrementalMarking] Completion: %s GC via stack guard, "
632 "avg time to task: %.1fms, current time to task: %.1fms allowed "
633 "overshoot: %.1fms\n",
634 delaying ? "Delaying" : "Not delaying",
635 optional_avg_time_to_marking_task.has_value()
636 ? optional_avg_time_to_marking_task->InMillisecondsF()
637 : NAN,
638 optional_time_to_current_task.has_value()
639 ? optional_time_to_current_task->InMillisecondsF()
640 : NAN,
641 allowed_overshoot.InMillisecondsF());
642 }
643 return delaying;
644}
645
648 // TODO(v8:14140): Consider the size including young generation here as well
649 // as the full marker marks both the young and old generations.
650 size_t estimated_live_bytes = OldGenerationSizeOfObjects();
651 if (v8_flags.incremental_marking_unified_schedule) {
652 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
653 estimated_live_bytes += cpp_heap->used_size();
654 }
655 }
656 const size_t marked_bytes_limit =
657 schedule_->GetNextIncrementalStepDuration(estimated_live_bytes);
658 if (V8_UNLIKELY(v8_flags.trace_incremental_marking)) {
659 const auto step_info = schedule_->GetCurrentStepInfo();
660 isolate()->PrintWithTimestamp(
661 "[IncrementalMarking] Schedule: %zuKB to mark, origin: %s, elapsed: "
662 "%.1f, marked: %zuKB (mutator: %zuKB, concurrent %zuKB), expected "
663 "marked: %zuKB, estimated live: %zuKB, schedule delta: %+" PRIi64
664 "KB\n",
665 marked_bytes_limit / KB, ToString(step_origin),
666 step_info.elapsed_time.InMillisecondsF(), step_info.marked_bytes() / KB,
667 step_info.mutator_marked_bytes / KB,
668 step_info.concurrent_marked_bytes / KB,
669 step_info.expected_marked_bytes / KB,
670 step_info.estimated_live_bytes / KB,
671 step_info.scheduled_delta_bytes() / KB);
672 }
673 return marked_bytes_limit;
674}
675
685
695
697 size_t max_bytes_to_mark) {
698 Step(max_duration, max_bytes_to_mark, StepOrigin::kV8);
699}
700
702 DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
703 DCHECK(v8_flags.incremental_marking);
705
706 const size_t max_bytes_to_process = GetScheduledBytes(StepOrigin::kV8);
707 Step(GetMaxDuration(StepOrigin::kV8), max_bytes_to_process, StepOrigin::kV8);
708
709 // Bail out when an AlwaysAllocateScope is active as the assumption is that
710 // there's no GC being triggered. Check this condition at last position to
711 // allow a completion task to be scheduled.
713 !heap()->always_allocate()) {
714 // When completion task isn't run soon enough, fall back to stack guard to
715 // force completion.
717 isolate()->stack_guard()->RequestGC();
718 }
719}
720
722 DCHECK(IsMarking());
723
724 const auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
725 return heap()
728 ->IsEmpty() &&
729 (!cpp_heap || cpp_heap->ShouldFinalizeIncrementalMarking());
730}
731
733 if (!v8_flags.concurrent_marking) return;
734
735 const size_t current_bytes_marked_concurrently =
737 // The concurrent_marking()->TotalMarkedBytes() is not monotonic for a
738 // short period of time when a concurrent marking task is finishing.
739 if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
740 const size_t delta =
741 current_bytes_marked_concurrently - bytes_marked_concurrently_;
742 schedule_->AddConcurrentlyMarkedBytes(delta);
743 bytes_marked_concurrently_ = current_bytes_marked_concurrently;
744 }
745}
746
748 size_t marked_bytes_limit,
749 StepOrigin step_origin) {
750 NestedTimedHistogramScope incremental_marking_scope(
751 isolate()->counters()->gc_incremental_marking());
752 TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
753 heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
755 heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL, ThreadKind::kMain,
756 current_trace_id_.value(),
759 const auto start = v8::base::TimeTicks::Now();
760
761 std::optional<SafepointScope> safepoint_scope;
762 // Conceptually an incremental marking step (even though it always runs on the
763 // main thread) may introduce a form of concurrent marking when background
764 // threads access the heap concurrently (e.g. concurrent compilation). On
765 // builds that verify concurrent heap accesses this may lead to false positive
766 // reports. We can avoid this by stopping background threads just in this
767 // configuration. This should not hide potential issues because the concurrent
768 // marker doesn't rely on correct synchronization but e.g. on black allocation
769 // and the on_hold worklist.
770#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
771 {
772 DCHECK(!v8_flags.concurrent_marking);
773 // Ensure that the isolate has no shared heap. Otherwise a shared GC might
774 // happen when trying to enter the safepoint.
775 DCHECK(!isolate()->has_shared_space());
776 AllowGarbageCollection allow_gc;
777 safepoint_scope.emplace(isolate(), SafepointKind::kIsolate);
778 }
779#endif
780
781 if (V8_LIKELY(v8_flags.concurrent_marking)) {
782 // It is safe to merge back all objects that were on hold to the shared
783 // work list at Step because we are at a safepoint where all objects
784 // are properly initialized. The exception is the last allocated object
785 // before invoking an AllocationObserver. This allocation had no way to
786 // escape and get marked though.
788
791 }
792 if (step_origin == StepOrigin::kTask) {
793 // We cannot publish the pending allocations for V8 step origin because the
794 // last object was allocated before invoking the step.
796 }
797
798 // Perform a single V8 and a single embedder step. In case both have been
799 // observed as empty back to back, we can finalize.
800 //
801 // This ignores that case where the embedder finds new V8-side objects. The
802 // assumption is that large graphs are well connected and can mostly be
803 // processed on their own. For small graphs, helping is not necessary.
804 //
805 // The idea of a unified incremental marking step is the following:
806 // - We use a single schedule for both V8 and CppHeap.
807 // - Process CppHeap first in here as there's some objects in there that can
808 // only be processed on the main thread.
809 // - Use the left over time and bytes for a V8 step.
810 // - We ignore the case where both individual steps discover new references to
811 // each other and assume that graphs are generally well connected.
812 // - Always flush objects to enable concurrent marking to make progress.
813
814 // Start with a CppHeap step as there's objects on CppHeap that must be marked
815 // on the main thread.
816 v8::base::TimeDelta cpp_heap_duration;
817 size_t cpp_heap_marked_bytes;
818 std::tie(cpp_heap_duration, cpp_heap_marked_bytes) =
819 CppHeapStep(max_duration, marked_bytes_limit);
820
821 // Add an optional V8 step if we are not exceeding our limits.
822 size_t v8_marked_bytes = 0;
823 v8::base::TimeDelta v8_time;
824 if (cpp_heap_duration < max_duration &&
825 (!v8_flags.incremental_marking_unified_schedule ||
826 (cpp_heap_marked_bytes < marked_bytes_limit))) {
827 const auto v8_start = v8::base::TimeTicks::Now();
828 const size_t v8_marked_bytes_limit =
829 v8_flags.incremental_marking_unified_schedule
830 ? marked_bytes_limit - cpp_heap_marked_bytes
831 : marked_bytes_limit;
832 std::tie(v8_marked_bytes, std::ignore) =
834 max_duration - cpp_heap_duration, v8_marked_bytes_limit);
835 v8_time = v8::base::TimeTicks::Now() - v8_start;
837 v8_marked_bytes);
838 }
839
840 if (V8_LIKELY(v8_flags.concurrent_marking)) {
844 }
845
846 if (V8_UNLIKELY(v8_flags.trace_incremental_marking)) {
847 const auto v8_max_duration = max_duration - cpp_heap_duration;
848 const auto v8_marked_bytes_limit =
849 marked_bytes_limit - cpp_heap_marked_bytes;
850 isolate()->PrintWithTimestamp(
851 "[IncrementalMaring] Step: origin: %s overall: %.1fms "
852 "V8: %zuKB (%zuKB), %.1fms (%.1fms), %.1fMB/s "
853 "CppHeap: %zuKB (%zuKB), %.1fms (%.1fms)\n",
854 ToString(step_origin),
855 (v8::base::TimeTicks::Now() - start).InMillisecondsF(), v8_marked_bytes,
856 v8_marked_bytes_limit, v8_time.InMillisecondsF(),
857 v8_max_duration.InMillisecondsF(),
858 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond() *
859 1000 / MB,
860 cpp_heap_marked_bytes, marked_bytes_limit,
861 cpp_heap_duration.InMillisecondsF(), max_duration.InMillisecondsF());
862 }
863}
864
866
867// The allocation observer step size determines the LAB size when marking is on.
868// Objects in the LAB are not marked until the LAB bounds are reset in marking
869// steps. As a result, the concurrent marker may pick up this many bytes. If
870// kStepSizeWhenNotMakingProgress is too small, we would consider marking
871// objects in the LAB as making progress which means we would not finalize as
872// long as we allocate objects in the LAB between steps.
873static_assert(
875 kMajorGCYoungGenerationAllocationObserverStep);
876
877} // namespace internal
878} // namespace v8
static std::unique_ptr< IncrementalMarkingSchedule > Create(bool predictable_schedule=false)
static Isolate * TryGetCurrent()
Definition api.cc:9954
static constexpr TimeDelta Max()
Definition time.h:233
static constexpr TimeDelta FromMillisecondsD(double milliseconds)
Definition time.h:97
double InMillisecondsF() const
Definition time.cc:226
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
static TimeTicks Now()
Definition time.cc:736
void TryScheduleJob(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
V8_INLINE CollectionEpoch CurrentEpoch(Scope::ScopeId id) const
void AddIncrementalMarkingStep(double duration, size_t bytes)
Definition gc-tracer.cc:737
void NotifyIncrementalMarkingStart()
void IterateYoungStrongAndDependentRoots(RootVisitor *v)
void IterateSharedSpaceAndClientIsolates(Callback callback)
Definition safepoint.h:200
void AddAllocationObserver(AllocationObserver *observer, AllocationObserver *new_space_observer)
void RemoveAllocationObserver(AllocationObserver *observer, AllocationObserver *new_space_observer)
V8_EXPORT_PRIVATE void FreeLinearAllocationAreas()
Definition heap.cc:3535
void InvokeIncrementalMarkingPrologueCallbacks()
Definition heap.cc:3925
SharedLargeObjectSpace * shared_lo_space() const
Definition heap.h:736
void FreeSharedLinearAllocationAreasAndResetFreeLists()
Definition heap.cc:3570
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)
Definition heap.cc:3912
bool IsTearingDown() const
Definition heap.h:525
HeapState gc_state() const
Definition heap.h:521
bool sweeping_in_progress() const
Definition heap.h:1532
OldLargeObjectSpace * lo_space() const
Definition heap.h:734
void SetIsMinorMarkingFlag(bool value)
Definition heap.cc:7198
MarkCompactCollector * mark_compact_collector()
Definition heap.h:813
size_t old_generation_allocation_limit() const
Definition heap.h:1924
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
ConcurrentMarking * concurrent_marking() const
Definition heap.h:1070
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects() const
Definition heap.cc:5153
V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const
Definition heap.cc:5221
V8_EXPORT_PRIVATE size_t OldGenerationWastedBytes() const
Definition heap.cc:5173
bool always_allocate() const
Definition heap.h:1957
MinorMarkSweepCollector * minor_mark_sweep_collector()
Definition heap.h:817
void MarkSharedLinearAllocationAreasBlack()
Definition heap.cc:3551
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4657
bool ShouldUseIncrementalMarking() const
Definition heap.cc:461
V8_EXPORT_PRIVATE size_t GlobalWastedBytes() const
Definition heap.cc:5231
Sweeper * sweeper()
Definition heap.h:821
IsolateSafepoint * safepoint()
Definition heap.h:579
CodeLargeObjectSpace * code_lo_space() const
Definition heap.h:735
V8_EXPORT_PRIVATE void PublishMainThreadPendingAllocations()
Definition heap.cc:1031
void UnmarkSharedLinearAllocationAreas()
Definition heap.cc:3561
size_t global_allocation_limit() const
Definition heap.h:1928
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects() const
Definition heap.cc:5225
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
GCTracer * tracer()
Definition heap.h:800
Isolate * isolate() const
Definition heap-inl.h:61
void InvokeIncrementalMarkingEpilogueCallbacks()
Definition heap.cc:3932
HeapAllocator * allocator()
Definition heap.h:1640
bool minor_sweeping_in_progress() const
Definition heap.h:1536
bool deserialization_complete() const
Definition heap.h:638
void SetIsMarkingFlag(bool value)
Definition heap.cc:7190
std::optional< v8::base::TimeDelta > AverageTimeToTask() const
std::optional< v8::base::TimeDelta > CurrentTimeToTask() const
void ScheduleTask(TaskPriority priority=TaskPriority::kUserBlocking)
Observer(IncrementalMarking *incremental_marking, intptr_t step_size)
void Step(int bytes_allocated, Address, size_t) override
void MarkBlackBackground(Tagged< HeapObject > obj, int object_size)
size_t GetScheduledBytes(StepOrigin step_origin)
std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule_
void AdvanceForTesting(v8::base::TimeDelta max_duration, size_t max_bytes_to_mark=SIZE_MAX)
MarkCompactCollector *const major_collector_
void Step(v8::base::TimeDelta max_duration, size_t max_bytes_to_process, StepOrigin step_origin)
std::unique_ptr< IncrementalMarkingJob > incremental_marking_job_
void Start(GarbageCollector garbage_collector, GarbageCollectionReason gc_reason)
MinorMarkSweepCollector *const minor_collector_
std::optional< uint64_t > current_trace_id_
MarkingWorklists::Local * local_marking_worklists() const
IncrementalMarkingJob * incremental_marking_job() const
IncrementalMarking(Heap *heap, WeakObjects *weak_objects)
std::unordered_map< MutablePageMetadata *, intptr_t, base::hash< MutablePageMetadata * > > background_live_bytes_
MarkingWorklists::Local * current_local_marking_worklists_
std::pair< v8::base::TimeDelta, size_t > CppHeapStep(v8::base::TimeDelta max_duration, size_t marked_bytes_limit)
void IterateLocalHeaps(Callback callback)
Definition safepoint.h:37
GlobalHandles * global_handles() const
Definition isolate.h:1416
bool serializer_enabled() const
Definition isolate.h:1549
bool is_shared_space_isolate() const
Definition isolate.h:2292
Counters * counters()
Definition isolate.h:1180
GlobalSafepoint * global_safepoint() const
Definition isolate.h:2305
TracedHandles * traced_handles()
Definition isolate.h:1418
StackGuard * stack_guard()
Definition isolate.h:1198
Isolate * shared_space_isolate() const
Definition isolate.h:2295
size_t SizeOfObjects() const override
void FreeLinearAllocationAreasAndResetFreeLists()
void MarkLinearAllocationAreasBlack()
std::pair< size_t, size_t > ProcessMarkingWorklist(v8::base::TimeDelta max_duration, size_t max_bytes_to_process)
void StartMarking(std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule={})
void MaybeEnableBackgroundThreadsInCycle(CallOrigin origin)
bool StartCompaction(StartCompactionMode mode)
MarkingWorklists::Local * local_marking_worklists() const
static void ActivateAll(Heap *heap, bool is_compacting)
static void ActivateYoung(Heap *heap)
MarkingWorklists::Local * local_marking_worklists()
void StartMarking(bool force_use_background_threads)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
int start
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC(tracer, scope_id)
Definition gc-tracer.h:35
LiftoffAssembler::CacheState state
STL namespace.
GarbageCollectionReason
Definition globals.h:1428
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
#define RCS_SCOPE(...)
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660