v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
gc-tracer.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <cstdarg>
8#include <limits>
9#include <optional>
10
11#include "include/v8-metrics.h"
13#include "src/base/logging.h"
15#include "src/base/strings.h"
16#include "src/common/globals.h"
21#include "src/heap/heap-inl.h"
22#include "src/heap/heap.h"
25#include "src/heap/spaces.h"
27#include "src/logging/metrics.h"
30
31namespace v8 {
32namespace internal {
33
34static size_t CountTotalHolesSize(Heap* heap) {
35 size_t holes_size = 0;
37 for (PagedSpace* space = spaces.Next(); space != nullptr;
38 space = spaces.Next()) {
39 DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
40 holes_size += space->Waste() + space->Available();
41 }
42 return holes_size;
43}
44
45namespace {
46
47std::atomic<CollectionEpoch> global_epoch{0};
48
49CollectionEpoch next_epoch() {
50 return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
51}
52
53using BytesAndDuration = ::heap::base::BytesAndDuration;
54
55std::optional<double> BoundedAverageSpeed(
56 const base::RingBuffer<BytesAndDuration>& buffer) {
57 constexpr size_t kMinNonEmptySpeedInBytesPerMs = 1;
58 constexpr size_t kMaxSpeedInBytesPerMs = GB;
59 return ::heap::base::AverageSpeed(buffer, BytesAndDuration(), std::nullopt,
60 kMinNonEmptySpeedInBytesPerMs,
61 kMaxSpeedInBytesPerMs);
62}
63
64double BoundedThroughput(const ::heap::base::SmoothedBytesAndDuration& buffer) {
65 constexpr double kMaxSpeedInBytesPerMs = static_cast<double>(GB);
66 return std::min(buffer.GetThroughput(), kMaxSpeedInBytesPerMs);
67}
68
69} // namespace
70
73 const char* collector_reason,
75 : type(type),
76 state(state),
77 gc_reason(gc_reason),
78 collector_reason(collector_reason),
80
81const char* ToString(GCTracer::Event::Type type, bool short_name) {
82 switch (type) {
84 return (short_name) ? "s" : "Scavenge";
87 return (short_name) ? "mc" : "Mark-Compact";
90 return (short_name) ? "mms" : "Minor Mark-Sweep";
92 return (short_name) ? "st" : "Start";
93 }
94}
95
98 if (Heap::IsYoungGenerationCollector(collector)) {
99 type_timer_ = nullptr;
100 type_priority_timer_ = nullptr;
101 if (!v8_flags.minor_ms) {
102 mode_ = Mode::Scavenger;
103 trace_event_name_ = "V8.GCScavenger";
104 } else {
105 mode_ = Mode::None;
106 trace_event_name_ = "V8.GCMinorMS";
107 }
108 } else {
110 Counters* counters = heap->isolate()->counters();
111 const bool in_background = heap->isolate()->is_backgrounded();
112 const bool is_incremental = !heap->incremental_marking()->IsStopped();
113 mode_ = Mode::None;
114 // The following block selects histogram counters to emit. The trace event
115 // name should be changed when metrics are updated.
116 //
117 // Memory reducing GCs take priority over memory measurement GCs. They can
118 // happen at the same time when measuring memory is folded into a memory
119 // reducing GC.
120 if (is_incremental) {
121 if (heap->ShouldReduceMemory()) {
122 type_timer_ = counters->gc_finalize_incremental_memory_reducing();
123 type_priority_timer_ =
124 in_background
125 ? counters->gc_finalize_incremental_memory_reducing_background()
126 : counters
127 ->gc_finalize_incremental_memory_reducing_foreground();
128 trace_event_name_ = "V8.GCFinalizeMCReduceMemory";
129 } else if (reason == GarbageCollectionReason::kMeasureMemory) {
130 type_timer_ = counters->gc_finalize_incremental_memory_measure();
131 type_priority_timer_ =
132 in_background
133 ? counters->gc_finalize_incremental_memory_measure_background()
134 : counters->gc_finalize_incremental_memory_measure_foreground();
135 trace_event_name_ = "V8.GCFinalizeMCMeasureMemory";
136 } else {
137 type_timer_ = counters->gc_finalize_incremental_regular();
138 type_priority_timer_ =
139 in_background
140 ? counters->gc_finalize_incremental_regular_background()
141 : counters->gc_finalize_incremental_regular_foreground();
142 trace_event_name_ = "V8.GCFinalizeMC";
143 mode_ = Mode::Finalize;
144 }
145 } else {
146 trace_event_name_ = "V8.GCCompactor";
147 if (heap->ShouldReduceMemory()) {
148 type_timer_ = counters->gc_finalize_non_incremental_memory_reducing();
149 type_priority_timer_ =
150 in_background
151 ? counters
152 ->gc_finalize_non_incremental_memory_reducing_background()
153 : counters
154 ->gc_finalize_non_incremental_memory_reducing_foreground();
155 } else if (reason == GarbageCollectionReason::kMeasureMemory) {
156 type_timer_ = counters->gc_finalize_non_incremental_memory_measure();
157 type_priority_timer_ =
158 in_background
159 ? counters
160 ->gc_finalize_non_incremental_memory_measure_background()
161 : counters
162 ->gc_finalize_non_incremental_memory_measure_foreground();
163 } else {
164 type_timer_ = counters->gc_finalize_non_incremental_regular();
165 type_priority_timer_ =
166 in_background
167 ? counters->gc_finalize_non_incremental_regular_background()
168 : counters->gc_finalize_non_incremental_regular_foreground();
169 }
170 }
171 }
172}
173
175 GarbageCollectionReason initial_gc_reason)
176 : heap_(heap),
177 current_(Event::Type::START, Event::State::NOT_RUNNING, initial_gc_reason,
178 nullptr, heap_->isolate()->priority()),
180 allocation_time_(startup_time),
182#if defined(V8_USE_PERFETTO)
183 ,
184 parent_track_(perfetto::ThreadTrack::Current())
185#endif
186{
187 // All accesses to incremental_marking_scope assume that incremental marking
188 // scopes come first.
189 static_assert(0 == Scope::FIRST_INCREMENTAL_SCOPE);
190 // We assume that MC_INCREMENTAL is the first scope so that we can properly
191 // map it to RuntimeCallStats.
192 static_assert(0 == Scope::MC_INCREMENTAL);
193 // Starting a new cycle will make the current event the previous event.
194 // Setting the current end time here allows us to refer back to a previous
195 // event's end time to compute time spent in mutator.
197}
198
205
207 DCHECK(!IsInObservablePause());
208 start_of_observable_pause_.emplace(time);
209}
210
212 const char* collector_reason) {
213 // For incremental marking, the event has already been created and we just
214 // need to update a few fields.
218 DCHECK(IsInObservablePause());
219 current_.gc_reason = gc_reason;
220 current_.collector_reason = collector_reason;
221 // TODO(chromium:1154636): The start_time of the current event contains
222 // currently the start time of the observable pause. This should be
223 // reconsidered.
226}
227
229 GarbageCollectionReason gc_reason,
230 const char* collector_reason, MarkingType marking) {
231 // We cannot start a new cycle while there's another one in its atomic pause.
233 // We cannot start a new cycle while a young generation GC cycle has
234 // already interrupted a full GC cycle.
236
241 // The cases for interruption are: Scavenger, MinorMS interrupting sweeping.
242 // In both cases we are fine with fetching background counters now and
243 // fixing them up later in StopAtomicPause().
245 }
246
248 Heap::IsYoungGenerationCollector(collector));
251
253 switch (collector) {
256 break;
258 type = marking == MarkingType::kIncremental
261 break;
263 type = marking == MarkingType::kIncremental
266 break;
267 }
268
272
274 current_ = Event(type, Event::State::MARKING, gc_reason, collector_reason,
275 heap_->isolate()->priority());
276
277 switch (marking) {
279 DCHECK(IsInObservablePause());
280 // TODO(chromium:1154636): The start_time of the current event contains
281 // currently the start time of the observable pause. This should be
282 // reconsidered.
285 break;
287 // The current event will be updated later.
288 DCHECK_IMPLIES(Heap::IsYoungGenerationCollector(collector),
289 (v8_flags.minor_ms &&
291 DCHECK(!IsInObservablePause());
292 break;
293 }
294
295 if (Heap::IsYoungGenerationCollector(collector)) {
296 epoch_young_ = next_epoch();
297 } else {
298 epoch_full_ = next_epoch();
299 }
300}
301
306
321
328
329 // Do not include the GC pause for calculating the allocation rate. GC pause
330 // with heap verification can decrease the allocation rate significantly.
332
333 if (v8_flags.memory_balancer) {
335 }
336}
337
339 base::TimeTicks time) {
340 DCHECK(IsConsistentWithCollector(collector));
341 DCHECK(IsInObservablePause());
343
344 // TODO(chromium:1154636): The end_time of the current event contains
345 // currently the end time of the observable pause. This should be
346 // reconsidered.
348
350
352 auto* long_task_stats = heap_->isolate()->GetCurrentLongTaskStats();
353 const bool is_young = Heap::IsYoungGenerationCollector(collector);
354 if (is_young) {
356 BytesAndDuration(current_.survived_young_object_size, duration));
357 long_task_stats->gc_young_wall_clock_duration_us +=
358 duration.InMicroseconds();
359 } else {
364 BytesAndDuration(current_.end_object_size, duration));
365 for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
368 }
369 } else {
371 BytesAndDuration(current_.end_object_size, duration));
374 }
377 long_task_stats->gc_full_atomic_wall_clock_duration_us +=
378 duration.InMicroseconds();
381 }
382
383 heap_->UpdateTotalGCTime(duration);
384
385 if (v8_flags.trace_gc_ignore_scavenger && is_young) return;
386
387 if (v8_flags.trace_gc_nvp) {
388 PrintNVP();
389 } else {
390 Print();
391 }
392
393 // Reset here because Print() still uses these scopes.
395 for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
397 }
398 }
399
400 if (v8_flags.trace_gc) {
402 }
403
404 if (V8_UNLIKELY(TracingFlags::gc.load(std::memory_order_relaxed) &
406 TRACE_GC_NOTE("V8.GC_HEAP_DUMP_STATISTICS");
407 std::stringstream heap_stats;
408 heap_->DumpJSONHeapStatistics(heap_stats);
409
410 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GC_Heap_Stats",
412 TRACE_STR_COPY(heap_stats.str().c_str()));
413 }
414}
415
417 DCHECK(v8_flags.memory_balancer);
418 size_t major_gc_bytes = current_.start_object_size;
419 const base::TimeDelta atomic_pause_duration =
421 const base::TimeDelta blocked_time_taken =
422 atomic_pause_duration + current_.incremental_marking_duration;
423 base::TimeDelta concurrent_gc_time;
424 {
426 concurrent_gc_time =
427 background_scopes_[Scope::MC_BACKGROUND_EVACUATE_COPY] +
428 background_scopes_[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS] +
429 background_scopes_[Scope::MC_BACKGROUND_MARKING] +
430 background_scopes_[Scope::MC_BACKGROUND_SWEEPING];
431 }
432 const base::TimeDelta major_gc_duration =
433 blocked_time_taken + concurrent_gc_time;
434 const base::TimeDelta major_allocation_duration =
436 blocked_time_taken;
437 CHECK_GE(major_allocation_duration, base::TimeDelta());
438
439 heap_->mb_->UpdateGCSpeed(major_gc_bytes, major_gc_duration);
440}
441
446
447namespace {
448
449// Estimate of young generation wall time across all threads up to and including
450// the atomic pause.
451constexpr v8::base::TimeDelta YoungGenerationWallTime(
452 const GCTracer::Event& event) {
453 return
454 // Scavenger events.
455 event.scopes[GCTracer::Scope::SCAVENGER] +
456 event.scopes[GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL] +
457 // Minor MS events.
458 event.scopes[GCTracer::Scope::MINOR_MS] +
459 event.scopes[GCTracer::Scope::MINOR_MS_BACKGROUND_MARKING];
460}
461
462} // namespace
463
467
468 DCHECK(IsConsistentWithCollector(collector));
469
471
472 if (Heap::IsYoungGenerationCollector(collector)) {
474
475 const v8::base::TimeDelta per_thread_wall_time =
476 YoungGenerationWallTime(current_) / current_.concurrency_estimate;
477 recorded_minor_gc_per_thread_.Push(BytesAndDuration(
478 current_.survived_young_object_size, per_thread_wall_time));
479
480 // If a young generation GC interrupted an unfinished full GC cycle, restore
481 // the event corresponding to the full GC cycle.
483 // Sweeping for full GC could have occured during the young GC. Copy over
484 // any sweeping scope values to the previous_ event. The full GC sweeping
485 // scopes are never reported by young cycles.
486 previous_.scopes[Scope::MC_SWEEP] += current_.scopes[Scope::MC_SWEEP];
487 previous_.scopes[Scope::MC_BACKGROUND_SWEEPING] +=
488 current_.scopes[Scope::MC_BACKGROUND_SWEEPING];
489 std::swap(current_, previous_);
491 }
492 } else {
494
495 heap_->isolate()->counters()->mark_compact_reason()->AddSample(
496 static_cast<int>(current_.gc_reason));
497
498 if (v8_flags.trace_gc_freelists) {
500 "FreeLists statistics before collection:\n");
502 }
503 }
504}
505
515
520 // Check if young cppgc was scheduled but hasn't completed yet.
523 return;
524 bool was_young_gc_while_full_gc_ = young_gc_while_full_gc_;
531 if (was_young_gc_while_full_gc_) {
532 // Check if the full gc cycle is ready to be stopped.
534 }
535}
536
538 // Notifying twice that V8 sweeping is finished for the same cycle is possible
539 // only if Oilpan sweeping is still in progress.
543
545 bool was_young_gc_while_full_gc = young_gc_while_full_gc_;
546 bool was_full_sweeping_notified = notified_full_sweeping_completed_;
548 // NotifyYoungSweepingCompletedAndStopCycleIfFinished checks if the full
549 // cycle needs to be stopped as well. If full sweeping was already notified,
550 // nothing more needs to be done here.
551 if (!was_young_gc_while_full_gc || was_full_sweeping_notified) return;
552 }
553
555 // Sweeping finalization can also be triggered from inside a full GC cycle's
556 // atomic pause.
559
560 // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
561 // finished sweeping. This method is invoked by v8.
562 if (v8_flags.trace_gc_freelists) {
564 "FreeLists statistics after sweeping completed:\n");
566 }
569}
570
573 if (v8_flags.verify_heap) {
574 // If heap verification is enabled, sweeping finalization can also be
575 // triggered from inside a full GC cycle's atomic pause.
581 } else {
582 DCHECK(IsSweepingInProgress() || (current_.type == Event::Type::SCAVENGER));
583 }
584
587}
588
594
596 // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
597 // finished sweeping. This method is invoked by cppgc.
599 const auto* metric_recorder =
601 USE(metric_recorder);
602 DCHECK(metric_recorder->FullGCMetricsReportPending());
605 // Cppgc sweeping may finalize during MinorMS sweeping. In that case, delay
606 // stopping the cycle until the nested MinorMS cycle is stopped.
610 return;
611 }
613}
614
616 // Stop a young GC cycle only when both v8 and cppgc (if available) GCs have
617 // finished sweeping. This method is invoked by cppgc.
620 const auto* metric_recorder =
622 USE(metric_recorder);
623 DCHECK(metric_recorder->YoungGCMetricsReportPending());
627}
628
633
635 size_t new_space_counter_bytes,
636 size_t old_generation_counter_bytes,
637 size_t embedder_counter_bytes) {
638 int64_t new_space_allocated_bytes = std::max<int64_t>(
639 new_space_counter_bytes - new_space_allocation_counter_bytes_, 0);
640 int64_t old_generation_allocated_bytes = std::max<int64_t>(
641 old_generation_counter_bytes - old_generation_allocation_counter_bytes_,
642 0);
643 int64_t embedder_allocated_bytes = std::max<int64_t>(
644 embedder_counter_bytes - embedder_allocation_counter_bytes_, 0);
645 const base::TimeDelta allocation_duration = current - allocation_time_;
647
648 new_space_allocation_counter_bytes_ = new_space_counter_bytes;
649 old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
650 embedder_allocation_counter_bytes_ = embedder_counter_bytes;
651
653 BytesAndDuration(new_space_allocated_bytes, allocation_duration));
655 BytesAndDuration(old_generation_allocated_bytes, allocation_duration));
657 BytesAndDuration(embedder_allocated_bytes, allocation_duration));
658
659 if (v8_flags.memory_balancer) {
660 heap_->mb_->UpdateAllocationRate(old_generation_allocated_bytes,
661 allocation_duration);
662 }
663
664#if defined(V8_USE_PERFETTO)
665 TRACE_COUNTER(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
666 perfetto::CounterTrack("OldGenerationAllocationThroughput",
667 parent_track_),
669 TRACE_COUNTER(
671 perfetto::CounterTrack("EmbedderAllocationThroughput", parent_track_),
673 TRACE_COUNTER(
675 perfetto::CounterTrack("NewSpaceAllocationThroughput", parent_track_),
677#endif
678}
679
680void GCTracer::SampleConcurrencyEsimate(size_t concurrency) {
681 // For now, we only expect a single sample.
683 DCHECK_GT(concurrency, 0);
684 current_.concurrency_estimate = concurrency;
685}
686
688 const auto marking_start = base::TimeTicks::Now();
689
690 // Handle code flushing time deltas. Times are incremented conservatively:
691 // 1. The first delta is 0s.
692 // 2. Any delta is rounded downwards to a full second.
693 // 3. 0s-deltas are carried over to the next GC with their precise diff. This
694 // allows for frequent GCs (within a single second) to be attributed
695 // correctly later on.
696 // 4. The first non-zero increment after a reset always just increments by 1s.
697 using SFIAgeType = decltype(code_flushing_increase_s_);
698 static_assert(SharedFunctionInfo::kAgeSize == sizeof(SFIAgeType));
699 static constexpr auto kMaxDeltaForSFIAge =
700 base::TimeDelta::FromSeconds(std::numeric_limits<SFIAgeType>::max());
701 SFIAgeType code_flushing_increase_s = 0;
703 const auto diff =
704 marking_start - last_marking_start_time_for_code_flushing_.value();
705 if (diff > kMaxDeltaForSFIAge) {
706 code_flushing_increase_s = std::numeric_limits<SFIAgeType>::max();
707 } else {
708 code_flushing_increase_s = static_cast<SFIAgeType>(diff.InSeconds());
709 }
710 }
711 DCHECK_LE(code_flushing_increase_s, std::numeric_limits<SFIAgeType>::max());
712 code_flushing_increase_s_ = code_flushing_increase_s;
714 code_flushing_increase_s > 0) {
716 }
717 if (V8_UNLIKELY(v8_flags.trace_flush_code)) {
718 PrintIsolate(heap_->isolate(), "code flushing: increasing time: %u s\n",
720 }
721}
722
726
727void GCTracer::AddCompactionEvent(double duration,
728 size_t live_bytes_compacted) {
729 recorded_compactions_.Push(BytesAndDuration(
730 live_bytes_compacted, base::TimeDelta::FromMillisecondsD(duration)));
731}
732
733void GCTracer::AddSurvivalRatio(double promotion_ratio) {
734 recorded_survival_ratios_.Push(promotion_ratio);
735}
736
737void GCTracer::AddIncrementalMarkingStep(double duration, size_t bytes) {
738 if (bytes > 0) {
742 }
744}
745
749
750void GCTracer::Output(const char* format, ...) const {
751 if (v8_flags.trace_gc) {
752 va_list arguments;
753 va_start(arguments, format);
754 base::OS::VPrint(format, arguments);
755 va_end(arguments);
756 }
757
758 const int kBufferSize = 256;
759 char raw_buffer[kBufferSize];
760 base::Vector<char> buffer(raw_buffer, kBufferSize);
761 va_list arguments2;
762 va_start(arguments2, format);
763 base::VSNPrintF(buffer, format, arguments2);
764 va_end(arguments2);
765
766 heap_->AddToRingBuffer(buffer.begin());
767}
768
769void GCTracer::Print() const {
771 const size_t kIncrementalStatsSize = 128;
772 char incremental_buffer[kIncrementalStatsSize] = {0};
773
775 base::OS::SNPrintF(
776 incremental_buffer, kIncrementalStatsSize,
777 " (+ %.1f ms in %d steps since start of marking, "
778 "biggest step %.1f ms, walltime since start of marking %.f ms)",
779 current_scope(Scope::MC_INCREMENTAL),
780 incremental_scope(Scope::MC_INCREMENTAL).steps,
781 incremental_scope(Scope::MC_INCREMENTAL).longest_step.InMillisecondsF(),
783 .InMillisecondsF());
784 }
785
786 const double total_external_time =
787 current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) +
788 current_scope(Scope::HEAP_EXTERNAL_EPILOGUE) +
789 current_scope(Scope::HEAP_EXTERNAL_PROLOGUE) +
790 current_scope(Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE) +
791 current_scope(Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
792
793 // Avoid PrintF as Output also appends the string to the tracing ring buffer
794 // that gets printed on OOM failures.
797 Output(
798 "[%d:%p] "
799 "%8.0f ms: "
800 "%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
801 "pooled: %.1f MB, "
802 "%.2f / %.2f ms%s (average mu = %.3f, current mu = %.3f) %s; %s\n",
804 reinterpret_cast<void*>(heap_->isolate()),
806 ToString(current_.type, false), current_.reduce_memory ? " (reduce)" : "",
807 young_gc_while_full_gc_ ? " (during sweeping)" : "",
808 static_cast<double>(current_.start_object_size) / MB,
809 static_cast<double>(current_.start_memory_size) / MB,
810 static_cast<double>(current_.end_object_size) / MB,
811 static_cast<double>(current_.end_memory_size) / MB,
812 static_cast<double>(heap_->memory_allocator()->GetPooledChunksCount() *
814 MB,
815 duration.InMillisecondsF(), total_external_time, incremental_buffer,
819}
820
821void GCTracer::PrintNVP() const {
823 const base::TimeDelta spent_in_mutator =
825 size_t allocated_since_last_gc =
827
828 base::TimeDelta incremental_walltime_duration;
830 incremental_walltime_duration =
832 }
833
834 // Avoid data races when printing the background scopes.
836
837 switch (current_.type) {
839 heap_->isolate()->PrintWithTimestamp(
840 "pause=%.1f "
841 "mutator=%.1f "
842 "gc=%s "
843 "reduce_memory=%d "
844 "during_sweeping=%d "
845 "time_to_safepoint=%.2f "
846 "heap.prologue=%.2f "
847 "heap.epilogue=%.2f "
848 "heap.external.prologue=%.2f "
849 "heap.external.epilogue=%.2f "
850 "heap.external_weak_global_handles=%.2f "
851 "complete.sweep_array_buffers=%.2f "
852 "scavenge=%.2f "
853 "scavenge.free_remembered_set=%.2f "
854 "scavenge.roots=%.2f "
855 "scavenge.weak=%.2f "
856 "scavenge.weak_global_handles.identify=%.2f "
857 "scavenge.weak_global_handles.process=%.2f "
858 "scavenge.parallel=%.2f "
859 "scavenge.update_refs=%.2f "
860 "scavenge.pin_objects=%.2f "
861 "scavenge.restore_pinned=%.2f "
862 "scavenge.sweep_array_buffers=%.2f "
863 "scavenge.resize_new_space=%.2f "
864 "background.scavenge.parallel=%.2f "
865 "incremental.steps_count=%d "
866 "incremental.steps_took=%.1f "
867 "scavenge_throughput=%.f "
868 "start_object_size=%zu "
869 "end_object_size=%zu "
870 "start_memory_size=%zu "
871 "end_memory_size=%zu "
872 "start_holes_size=%zu "
873 "end_holes_size=%zu "
874 "allocated=%zu "
875 "promoted=%zu "
876 "quarantined_size=%zu "
877 "quarantined_pages=%zu "
878 "new_space_survived=%zu "
879 "nodes_died_in_new=%d "
880 "nodes_copied_in_new=%d "
881 "nodes_promoted=%d "
882 "promotion_ratio=%.1f%% "
883 "average_survival_ratio=%.1f%% "
884 "promotion_rate=%.1f%% "
885 "new_space_survive_rate_=%.1f%% "
886 "new_space_allocation_throughput=%.1f "
887 "new_space_capacity=%zu "
888 "old_gen_allocation_limit=%zu "
889 "global_allocation_limit=%zu "
890 "allocation_throughput=%.1f "
891 "pool_local_chunks=%zu "
892 "pool_shared_chunks=%zu "
893 "pool_total_chunks=%zu\n",
894 duration.InMillisecondsF(), spent_in_mutator.InMillisecondsF(),
897 current_.scopes[Scope::TIME_TO_SAFEPOINT].InMillisecondsF(),
898 current_scope(Scope::HEAP_PROLOGUE),
899 current_scope(Scope::HEAP_EPILOGUE),
900 current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
901 current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
902 current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
903 current_scope(Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS),
904 current_scope(Scope::SCAVENGER_SCAVENGE),
905 current_scope(Scope::SCAVENGER_FREE_REMEMBERED_SET),
906 current_scope(Scope::SCAVENGER_SCAVENGE_ROOTS),
907 current_scope(Scope::SCAVENGER_SCAVENGE_WEAK),
908 current_scope(Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY),
909 current_scope(Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS),
910 current_scope(Scope::SCAVENGER_SCAVENGE_PARALLEL),
911 current_scope(Scope::SCAVENGER_SCAVENGE_UPDATE_REFS),
912 current_scope(Scope::SCAVENGER_SCAVENGE_PIN_OBJECTS),
914 Scope::SCAVENGER_SCAVENGE_RESTORE_AND_QUARANTINE_PINNED),
915 current_scope(Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS),
916 current_scope(Scope::SCAVENGER_RESIZE_NEW_SPACE),
917 current_scope(Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL),
918 incremental_scope(GCTracer::Scope::MC_INCREMENTAL).steps,
919 current_scope(Scope::MC_INCREMENTAL),
922 .value_or(0.0),
926 allocated_since_last_gc, heap_->promoted_objects_size(),
942 break;
945 heap_->isolate()->PrintWithTimestamp(
946 "pause=%.1f "
947 "mutator=%.1f "
948 "gc=%s "
949 "reduce_memory=%d "
950 "minor_ms=%.2f "
951 "time_to_safepoint=%.2f "
952 "mark=%.2f "
953 "mark.incremental_seed=%.2f "
954 "mark.finish_incremental=%.2f "
955 "mark.seed=%.2f "
956 "mark.traced_handles=%.2f "
957 "mark.closure_parallel=%.2f "
958 "mark.closure=%.2f "
959 "mark.conservative_stack=%.2f "
960 "clear=%.2f "
961 "clear.string_forwarding_table=%.2f "
962 "clear.string_table=%.2f "
963 "clear.global_handles=%.2f "
964 "complete.sweep_array_buffers=%.2f "
965 "complete.sweeping=%.2f "
966 "sweep=%.2f "
967 "sweep.new=%.2f "
968 "sweep.new_lo=%.2f "
969 "sweep.update_string_table=%.2f "
970 "sweep.start_jobs=%.2f "
971 "sweep.array_buffers=%.2f "
972 "finish=%.2f "
973 "finish.ensure_capacity=%.2f "
974 "finish.sweep_array_buffers=%.2f "
975 "background.mark=%.2f "
976 "background.sweep=%.2f "
977 "background.sweep.array_buffers=%.2f "
978 "conservative_stack_scanning=%.2f "
979 "start_object_size=%zu "
980 "end_object_size=%zu "
981 "start_memory_size=%zu "
982 "end_memory_size=%zu "
983 "start_holes_size=%zu "
984 "end_holes_size=%zu "
985 "allocated=%zu "
986 "promoted=%zu "
987 "new_space_survived=%zu "
988 "nodes_died_in_new=%d "
989 "nodes_copied_in_new=%d "
990 "nodes_promoted=%d "
991 "promotion_ratio=%.1f%% "
992 "average_survival_ratio=%.1f%% "
993 "promotion_rate=%.1f%% "
994 "new_space_survive_rate_=%.1f%% "
995 "new_space_capacity=%zu "
996 "old_gen_allocation_limit=%zu "
997 "global_allocation_limit=%zu "
998 "new_space_allocation_throughput=%.1f "
999 "allocation_throughput=%.1f\n",
1000 duration.InMillisecondsF(), spent_in_mutator.InMillisecondsF(), "mms",
1001 current_.reduce_memory, current_scope(Scope::MINOR_MS),
1002 current_scope(Scope::TIME_TO_SAFEPOINT),
1003 current_scope(Scope::MINOR_MS_MARK),
1004 current_scope(Scope::MINOR_MS_MARK_INCREMENTAL_SEED),
1005 current_scope(Scope::MINOR_MS_MARK_FINISH_INCREMENTAL),
1006 current_scope(Scope::MINOR_MS_MARK_SEED),
1007 current_scope(Scope::MINOR_MS_MARK_TRACED_HANDLES),
1008 current_scope(Scope::MINOR_MS_MARK_CLOSURE_PARALLEL),
1009 current_scope(Scope::MINOR_MS_MARK_CLOSURE),
1010 current_scope(Scope::MINOR_MS_MARK_CONSERVATIVE_STACK),
1011 current_scope(Scope::MINOR_MS_CLEAR),
1012 current_scope(Scope::MINOR_MS_CLEAR_STRING_FORWARDING_TABLE),
1013 current_scope(Scope::MINOR_MS_CLEAR_STRING_TABLE),
1014 current_scope(Scope::MINOR_MS_CLEAR_WEAK_GLOBAL_HANDLES),
1015 current_scope(Scope::MINOR_MS_COMPLETE_SWEEP_ARRAY_BUFFERS),
1016 current_scope(Scope::MINOR_MS_COMPLETE_SWEEPING),
1017 current_scope(Scope::MINOR_MS_SWEEP),
1018 current_scope(Scope::MINOR_MS_SWEEP_NEW),
1019 current_scope(Scope::MINOR_MS_SWEEP_NEW_LO),
1020 current_scope(Scope::MINOR_MS_SWEEP_UPDATE_STRING_TABLE),
1021 current_scope(Scope::MINOR_MS_SWEEP_START_JOBS),
1022 current_scope(Scope::YOUNG_ARRAY_BUFFER_SWEEP),
1023 current_scope(Scope::MINOR_MS_FINISH),
1024 current_scope(Scope::MINOR_MS_FINISH_ENSURE_CAPACITY),
1025 current_scope(Scope::MINOR_MS_FINISH_SWEEP_ARRAY_BUFFERS),
1026 current_scope(Scope::MINOR_MS_BACKGROUND_MARKING),
1027 current_scope(Scope::MINOR_MS_BACKGROUND_SWEEPING),
1028 current_scope(Scope::BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP),
1029 current_scope(Scope::CONSERVATIVE_STACK_SCANNING),
1033 allocated_since_last_gc, heap_->promoted_objects_size(),
1044 break;
1047 heap_->isolate()->PrintWithTimestamp(
1048 "pause=%.1f "
1049 "mutator=%.1f "
1050 "gc=%s "
1051 "reduce_memory=%d "
1052 "time_to_safepoint=%.2f "
1053 "heap.prologue=%.2f "
1054 "heap.embedder_tracing_epilogue=%.2f "
1055 "heap.epilogue=%.2f "
1056 "heap.external.prologue=%.1f "
1057 "heap.external.epilogue=%.1f "
1058 "heap.external.weak_global_handles=%.1f "
1059 "clear=%1.f "
1060 "clear.external_string_table=%.1f "
1061 "clear.string_forwarding_table=%.1f "
1062 "clear.weak_global_handles=%.1f "
1063 "clear.dependent_code=%.1f "
1064 "clear.maps=%.1f "
1065 "clear.slots_buffer=%.1f "
1066 "clear.weak_collections=%.1f "
1067 "clear.weak_lists=%.1f "
1068 "clear.weak_references_trivial=%.1f "
1069 "clear.weak_references_non_trivial=%.1f "
1070 "clear.weak_references_filter_non_trivial=%.1f "
1071 "clear.js_weak_references=%.1f "
1072 "clear.join_filter_job=%.1f"
1073 "clear.join_job=%.1f "
1074 "weakness_handling=%.1f "
1075 "complete.sweep_array_buffers=%.1f "
1076 "complete.sweeping=%.1f "
1077 "epilogue=%.1f "
1078 "evacuate=%.1f "
1079 "evacuate.pin_pages=%.1f "
1080 "evacuate.candidates=%.1f "
1081 "evacuate.clean_up=%.1f "
1082 "evacuate.copy=%.1f "
1083 "evacuate.prologue=%.1f "
1084 "evacuate.epilogue=%.1f "
1085 "evacuate.rebalance=%.1f "
1086 "evacuate.update_pointers=%.1f "
1087 "evacuate.update_pointers.to_new_roots=%.1f "
1088 "evacuate.update_pointers.slots.main=%.1f "
1089 "evacuate.update_pointers.weak=%.1f "
1090 "finish=%.1f "
1091 "finish.sweep_array_buffers=%.1f "
1092 "mark=%.1f "
1093 "mark.finish_incremental=%.1f "
1094 "mark.roots=%.1f "
1095 "mark.full_closure_parallel=%.1f "
1096 "mark.full_closure=%.1f "
1097 "mark.ephemeron.marking=%.1f "
1098 "mark.ephemeron.linear=%.1f "
1099 "mark.embedder_prologue=%.1f "
1100 "mark.embedder_tracing=%.1f "
1101 "prologue=%.1f "
1102 "sweep=%.1f "
1103 "sweep.code=%.1f "
1104 "sweep.map=%.1f "
1105 "sweep.new=%.1f "
1106 "sweep.new_lo=%.1f "
1107 "sweep.old=%.1f "
1108 "sweep.start_jobs=%.1f "
1109 "incremental=%.1f "
1110 "incremental.finalize.external.prologue=%.1f "
1111 "incremental.finalize.external.epilogue=%.1f "
1112 "incremental.layout_change=%.1f "
1113 "incremental.sweep_array_buffers=%.1f "
1114 "incremental.sweeping=%.1f "
1115 "incremental.embedder_tracing=%.1f "
1116 "incremental_wrapper_tracing_longest_step=%.1f "
1117 "incremental_longest_step=%.1f "
1118 "incremental_steps_count=%d "
1119 "incremental_marking_throughput=%.f "
1120 "incremental_walltime_duration=%.f "
1121 "background.mark=%.1f "
1122 "background.sweep=%.1f "
1123 "background.evacuate.copy=%.1f "
1124 "background.evacuate.update_pointers=%.1f "
1125 "conservative_stack_scanning=%.2f "
1126 "start_object_size=%zu "
1127 "end_object_size=%zu "
1128 "start_memory_size=%zu "
1129 "end_memory_size=%zu "
1130 "start_holes_size=%zu "
1131 "end_holes_size=%zu "
1132 "allocated=%zu "
1133 "promoted=%zu "
1134 "new_space_survived=%zu "
1135 "nodes_died_in_new=%d "
1136 "nodes_copied_in_new=%d "
1137 "nodes_promoted=%d "
1138 "promotion_ratio=%.1f%% "
1139 "average_survival_ratio=%.1f%% "
1140 "promotion_rate=%.1f%% "
1141 "new_space_survive_rate=%.1f%% "
1142 "new_space_allocation_throughput=%.1f "
1143 "new_space_capacity=%zu "
1144 "old_gen_allocation_limit=%zu "
1145 "global_allocation_limit=%zu "
1146 "allocation_throughput=%.1f "
1147 "pool_local_chunks=%zu "
1148 "pool_shared_chunks=%zu "
1149 "pool_total_chunks=%zu "
1150 "compaction_speed=%.1f\n",
1151 duration.InMillisecondsF(), spent_in_mutator.InMillisecondsF(),
1153 current_scope(Scope::TIME_TO_SAFEPOINT),
1154 current_scope(Scope::HEAP_PROLOGUE),
1155 current_scope(Scope::HEAP_EMBEDDER_TRACING_EPILOGUE),
1156 current_scope(Scope::HEAP_EPILOGUE),
1157 current_scope(Scope::HEAP_EXTERNAL_PROLOGUE),
1158 current_scope(Scope::HEAP_EXTERNAL_EPILOGUE),
1159 current_scope(Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES),
1160 current_scope(Scope::MC_CLEAR),
1161 current_scope(Scope::MC_CLEAR_EXTERNAL_STRING_TABLE),
1162 current_scope(Scope::MC_CLEAR_STRING_FORWARDING_TABLE),
1163 current_scope(Scope::MC_CLEAR_WEAK_GLOBAL_HANDLES),
1164 current_scope(Scope::MC_CLEAR_DEPENDENT_CODE),
1165 current_scope(Scope::MC_CLEAR_MAPS),
1166 current_scope(Scope::MC_CLEAR_SLOTS_BUFFER),
1167 current_scope(Scope::MC_CLEAR_WEAK_COLLECTIONS),
1168 current_scope(Scope::MC_CLEAR_WEAK_LISTS),
1169 current_scope(Scope::MC_CLEAR_WEAK_REFERENCES_TRIVIAL),
1170 current_scope(Scope::MC_CLEAR_WEAK_REFERENCES_NON_TRIVIAL),
1171 current_scope(Scope::MC_CLEAR_WEAK_REFERENCES_FILTER_NON_TRIVIAL),
1172 current_scope(Scope::MC_CLEAR_JS_WEAK_REFERENCES),
1173 current_scope(Scope::MC_CLEAR_WEAK_REFERENCES_JOIN_FILTER_JOB),
1174 current_scope(Scope::MC_CLEAR_JOIN_JOB),
1175 current_scope(Scope::MC_WEAKNESS_HANDLING),
1176 current_scope(Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS),
1177 current_scope(Scope::MC_COMPLETE_SWEEPING),
1178 current_scope(Scope::MC_EPILOGUE), current_scope(Scope::MC_EVACUATE),
1179 current_scope(Scope::MC_EVACUATE_PIN_PAGES),
1180 current_scope(Scope::MC_EVACUATE_CANDIDATES),
1181 current_scope(Scope::MC_EVACUATE_CLEAN_UP),
1182 current_scope(Scope::MC_EVACUATE_COPY),
1183 current_scope(Scope::MC_EVACUATE_PROLOGUE),
1184 current_scope(Scope::MC_EVACUATE_EPILOGUE),
1185 current_scope(Scope::MC_EVACUATE_REBALANCE),
1186 current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS),
1187 current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS),
1188 current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN),
1189 current_scope(Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK),
1190 current_scope(Scope::MC_FINISH),
1191 current_scope(Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS),
1192 current_scope(Scope::MC_MARK),
1193 current_scope(Scope::MC_MARK_FINISH_INCREMENTAL),
1194 current_scope(Scope::MC_MARK_ROOTS),
1195 current_scope(Scope::MC_MARK_FULL_CLOSURE_PARALLEL),
1196 current_scope(Scope::MC_MARK_FULL_CLOSURE),
1197 current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING),
1198 current_scope(Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR),
1199 current_scope(Scope::MC_MARK_EMBEDDER_PROLOGUE),
1200 current_scope(Scope::MC_MARK_EMBEDDER_TRACING),
1201 current_scope(Scope::MC_PROLOGUE), current_scope(Scope::MC_SWEEP),
1202 current_scope(Scope::MC_SWEEP_CODE),
1203 current_scope(Scope::MC_SWEEP_MAP),
1204 current_scope(Scope::MC_SWEEP_NEW),
1205 current_scope(Scope::MC_SWEEP_NEW_LO),
1206 current_scope(Scope::MC_SWEEP_OLD),
1207 current_scope(Scope::MC_SWEEP_START_JOBS),
1208 current_scope(Scope::MC_INCREMENTAL),
1209 current_scope(Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE),
1210 current_scope(Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE),
1211 current_scope(Scope::MC_INCREMENTAL_LAYOUT_CHANGE),
1212 current_scope(Scope::MC_INCREMENTAL_START),
1213 current_scope(Scope::MC_INCREMENTAL_SWEEPING),
1214 current_scope(Scope::MC_INCREMENTAL_EMBEDDER_TRACING),
1215 incremental_scope(Scope::MC_INCREMENTAL_EMBEDDER_TRACING)
1216 .longest_step.InMillisecondsF(),
1217 incremental_scope(Scope::MC_INCREMENTAL)
1218 .longest_step.InMillisecondsF(),
1219 incremental_scope(Scope::MC_INCREMENTAL).steps,
1221 incremental_walltime_duration.InMillisecondsF(),
1222 current_scope(Scope::MC_BACKGROUND_MARKING),
1223 current_scope(Scope::MC_BACKGROUND_SWEEPING),
1224 current_scope(Scope::MC_BACKGROUND_EVACUATE_COPY),
1225 current_scope(Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS),
1226 current_scope(Scope::CONSERVATIVE_STACK_SCANNING),
1230 allocated_since_last_gc, heap_->promoted_objects_size(),
1244 CompactionSpeedInBytesPerMillisecond().value_or(0.0));
1245 break;
1246 case Event::Type::START:
1247 break;
1248 }
1249}
1250
1252 base::TimeDelta duration) {
1254 if (duration.IsZero() || bytes == 0) return;
1255 double current_speed =
1256 static_cast<double>(bytes) / duration.InMillisecondsF();
1259 } else {
1261 (recorded_major_incremental_marking_speed_ + current_speed) / 2;
1262 }
1263}
1264
1266 base::TimeDelta time_to_task) {
1268 average_time_to_incremental_marking_task_.emplace(time_to_task);
1269 } else {
1271 (average_time_to_incremental_marking_task_.value() + time_to_task) / 2;
1272 }
1273}
1274
1276 const {
1278}
1279
1281 base::TimeDelta duration) {
1282 recorded_embedder_marking_.Push(BytesAndDuration(bytes, duration));
1283}
1284
1286 base::TimeDelta mark_compact_duration) {
1288 mark_compact_end_time - previous_mark_compact_end_time_;
1290 const base::TimeDelta mutator_duration =
1291 total_duration_since_last_mark_compact_ - mark_compact_duration;
1292 DCHECK_GE(mutator_duration, base::TimeDelta());
1294 // This is the first event with mutator and mark-compact durations.
1295 average_mark_compact_duration_ = mark_compact_duration.InMillisecondsF();
1296 average_mutator_duration_ = mutator_duration.InMillisecondsF();
1297 } else {
1299 mark_compact_duration.InMillisecondsF()) /
1300 2;
1302 (average_mutator_duration_ + mutator_duration.InMillisecondsF()) / 2;
1303 }
1306 ? mutator_duration.InMillisecondsF() /
1308 : 0;
1309 previous_mark_compact_end_time_ = mark_compact_end_time;
1310}
1311
1313 double average_total_duration =
1315 if (average_total_duration == 0) return 1.0;
1316 return average_mutator_duration_ / average_total_duration;
1317}
1318
1322
1333
1335 return BoundedAverageSpeed(recorded_embedder_marking_);
1336}
1337
1339 YoungGenerationSpeedMode mode) const {
1340 switch (mode) {
1342 return BoundedAverageSpeed(recorded_minor_gc_per_thread_);
1344 return BoundedAverageSpeed(recorded_minor_gc_atomic_pause_);
1345 }
1346 UNREACHABLE();
1347}
1348
1350 return BoundedAverageSpeed(recorded_compactions_);
1351}
1352
1354 return BoundedAverageSpeed(recorded_mark_compacts_);
1355}
1356
1357std::optional<double>
1361
1363 if (v8_flags.gc_speed_uses_counters) {
1364 return BoundedAverageSpeed(recorded_major_totals_);
1365 }
1366
1367 const double kMinimumMarkingSpeed = 0.5;
1368 if (combined_mark_compact_speed_cache_.has_value())
1370 // MarkCompact speed is more stable than incremental marking speed, because
1371 // there might not be many incremental marking steps because of concurrent
1372 // marking.
1374 if (combined_mark_compact_speed_cache_.has_value())
1377 double speed2 =
1379 if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
1380 // No data for the incremental marking speed.
1381 // Return the non-incremental mark-compact speed.
1384 } else {
1385 // Combine the speed of incremental step and the speed of the final step.
1386 // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
1387 combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
1388 }
1390}
1391
1395
1400
1404
1409
1411 if (recorded_survival_ratios_.Empty()) return 0.0;
1412 double sum = recorded_survival_ratios_.Reduce(
1413 [](double a, double b) { return a + b; }, 0.0);
1414 return sum / recorded_survival_ratios_.Size();
1415}
1416
1420
1422
1426
1435
1436namespace {
1437
1438V8_INLINE int TruncateToMs(base::TimeDelta delta) {
1439 return static_cast<int>(delta.InMilliseconds());
1440}
1441
1442} // namespace
1443
1445 Counters* counters = heap_->isolate()->counters();
1447 DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
1448 counters->gc_finalize_clear()->AddSample(
1449 TruncateToMs(current_.scopes[Scope::MC_CLEAR]));
1450 counters->gc_finalize_epilogue()->AddSample(
1451 TruncateToMs(current_.scopes[Scope::MC_EPILOGUE]));
1452 counters->gc_finalize_evacuate()->AddSample(
1453 TruncateToMs(current_.scopes[Scope::MC_EVACUATE]));
1454 counters->gc_finalize_finish()->AddSample(
1455 TruncateToMs(current_.scopes[Scope::MC_FINISH]));
1456 counters->gc_finalize_mark()->AddSample(
1457 TruncateToMs(current_.scopes[Scope::MC_MARK]));
1458 counters->gc_finalize_prologue()->AddSample(
1459 TruncateToMs(current_.scopes[Scope::MC_PROLOGUE]));
1460 counters->gc_finalize_sweep()->AddSample(
1461 TruncateToMs(current_.scopes[Scope::MC_SWEEP]));
1463 heap_->isolate()->counters()->incremental_marking_sum()->AddSample(
1465 }
1466 DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
1467 } else if (mode == RecordGCPhasesInfo::Mode::Scavenger) {
1468 counters->gc_scavenger_scavenge_main()->AddSample(
1469 TruncateToMs(current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL]));
1470 counters->gc_scavenger_scavenge_roots()->AddSample(
1471 TruncateToMs(current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS]));
1472 }
1473}
1474
1476 const base::TimeDelta atomic_pause_duration =
1477 current_.scopes[Scope::MARK_COMPACTOR];
1478 const base::TimeDelta incremental_marking =
1479 incremental_scopes_[Scope::MC_INCREMENTAL_LAYOUT_CHANGE].duration +
1480 incremental_scopes_[Scope::MC_INCREMENTAL_START].duration +
1482 const base::TimeDelta incremental_sweeping =
1483 incremental_scopes_[Scope::MC_INCREMENTAL_SWEEPING].duration;
1484 const base::TimeDelta overall_duration =
1485 atomic_pause_duration + incremental_marking + incremental_sweeping;
1486 const base::TimeDelta atomic_marking_duration =
1487 current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
1488 const base::TimeDelta marking_duration =
1489 atomic_marking_duration + incremental_marking;
1490 base::TimeDelta background_duration;
1491 base::TimeDelta marking_background_duration;
1492 {
1494 background_duration =
1495 background_scopes_[Scope::MC_BACKGROUND_EVACUATE_COPY] +
1496 background_scopes_[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS] +
1497 background_scopes_[Scope::MC_BACKGROUND_MARKING] +
1498 background_scopes_[Scope::MC_BACKGROUND_SWEEPING];
1499 marking_background_duration =
1500 background_scopes_[Scope::MC_BACKGROUND_MARKING];
1501 }
1502
1504 BytesAndDuration(current_.end_object_size, overall_duration));
1505
1506 // Emit trace event counters.
1508 TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCMarkCompactorSummary",
1509 TRACE_EVENT_SCOPE_THREAD, "duration", overall_duration.InMillisecondsF(),
1510 "background_duration", background_duration.InMillisecondsF());
1512 TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCMarkCompactorMarkingSummary",
1513 TRACE_EVENT_SCOPE_THREAD, "duration", marking_duration.InMillisecondsF(),
1514 "background_duration", marking_background_duration.InMillisecondsF());
1515 TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCSpeedSummary",
1516 TRACE_EVENT_SCOPE_THREAD, "old_generation_speed",
1518 "embedder_speed",
1519 EmbedderSpeedInBytesPerMillisecond().value_or(0.0));
1520}
1521
1523#if defined(V8_USE_PERFETTO)
1524 TRACE_COUNTER(
1526 perfetto::CounterTrack("OldGenerationConsumedBytes", parent_track_),
1528 TRACE_COUNTER(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
1529 perfetto::CounterTrack("GlobalConsumedBytes", parent_track_),
1531 TRACE_COUNTER(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
1532 perfetto::CounterTrack("ExternalMemoryBytes", parent_track_),
1534#endif
1535}
1536
1537namespace {
1538
1539void CopyTimeMetrics(
1542 cppgc_metrics) {
1543 // Allow for uninitialized values (-1), in case incremental marking/sweeping
1544 // were not used.
1545 DCHECK_LE(-1, cppgc_metrics.mark_duration_us);
1546 metrics.mark_wall_clock_duration_in_us = cppgc_metrics.mark_duration_us;
1547 DCHECK_LE(-1, cppgc_metrics.sweep_duration_us);
1548 metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
1549 // The total duration is initialized, even if both incremental
1550 // marking and sweeping were not used.
1552 std::max(INT64_C(0), metrics.mark_wall_clock_duration_in_us) +
1553 std::max(INT64_C(0), metrics.sweep_wall_clock_duration_in_us);
1554}
1555
1556void CopyTimeMetrics(
1559 DCHECK_NE(-1, cppgc_metrics.compact_duration_us);
1561 DCHECK_NE(-1, cppgc_metrics.mark_duration_us);
1562 metrics.mark_wall_clock_duration_in_us = cppgc_metrics.mark_duration_us;
1563 DCHECK_NE(-1, cppgc_metrics.sweep_duration_us);
1564 metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
1565 DCHECK_NE(-1, cppgc_metrics.weak_duration_us);
1566 metrics.weak_wall_clock_duration_in_us = cppgc_metrics.weak_duration_us;
1572}
1573
1574void CopySizeMetrics(
1577 DCHECK_NE(-1, cppgc_metrics.after_bytes);
1578 metrics.bytes_after = cppgc_metrics.after_bytes;
1579 DCHECK_NE(-1, cppgc_metrics.before_bytes);
1580 metrics.bytes_before = cppgc_metrics.before_bytes;
1581 DCHECK_NE(-1, cppgc_metrics.freed_bytes);
1582 metrics.bytes_freed = cppgc_metrics.freed_bytes;
1583}
1584
1586 v8::internal::Isolate* isolate) {
1587 DCHECK_NOT_NULL(isolate);
1588 if (isolate->context().is_null())
1590 HandleScope scope(isolate);
1591 return isolate->GetOrRegisterRecorderContextId(isolate->native_context());
1592}
1593
1594template <typename EventType>
1595void FlushBatchedEvents(
1597 Isolate* isolate) {
1598 DCHECK_NOT_NULL(isolate->metrics_recorder());
1599 DCHECK(!batched_events.events.empty());
1600 isolate->metrics_recorder()->AddMainThreadEvent(std::move(batched_events),
1601 GetContextId(isolate));
1602 batched_events = {};
1603}
1604
1605} // namespace
1606
1610 auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
1611 DCHECK_IMPLIES(cpp_heap,
1612 cpp_heap->GetMetricRecorder()->FullGCMetricsReportPending());
1613 const std::shared_ptr<metrics::Recorder>& recorder =
1615 DCHECK_NOT_NULL(recorder);
1616 if (!recorder->HasEmbedderRecorder()) {
1619 if (cpp_heap) {
1620 cpp_heap->GetMetricRecorder()->ClearCachedEvents();
1621 }
1622 return;
1623 }
1625 FlushBatchedEvents(incremental_mark_batched_events_, heap_->isolate());
1626 }
1628 FlushBatchedEvents(incremental_sweep_batched_events_, heap_->isolate());
1629 }
1630
1632 event.reason = static_cast<int>(current_.gc_reason);
1633 event.priority = current_.priority;
1634
1635 // Managed C++ heap statistics:
1636 if (cpp_heap) {
1637 cpp_heap->GetMetricRecorder()->FlushBatchedIncrementalEvents();
1638 const std::optional<cppgc::internal::MetricRecorder::GCCycle>
1639 optional_cppgc_event =
1640 cpp_heap->GetMetricRecorder()->ExtractLastFullGcEvent();
1641 DCHECK(optional_cppgc_event.has_value());
1642 DCHECK(!cpp_heap->GetMetricRecorder()->FullGCMetricsReportPending());
1643 const cppgc::internal::MetricRecorder::GCCycle& cppgc_event =
1644 optional_cppgc_event.value();
1645 DCHECK_EQ(cppgc_event.type,
1647 CopyTimeMetrics(event.total_cpp, cppgc_event.total);
1648 CopyTimeMetrics(event.main_thread_cpp, cppgc_event.main_thread);
1649 CopyTimeMetrics(event.main_thread_atomic_cpp,
1650 cppgc_event.main_thread_atomic);
1651 CopyTimeMetrics(event.main_thread_incremental_cpp,
1652 cppgc_event.main_thread_incremental);
1653 CopySizeMetrics(event.objects_cpp, cppgc_event.objects);
1654 CopySizeMetrics(event.memory_cpp, cppgc_event.memory);
1655 DCHECK_NE(-1, cppgc_event.collection_rate_in_percent);
1656 event.collection_rate_cpp_in_percent =
1657 cppgc_event.collection_rate_in_percent;
1658 DCHECK_NE(-1, cppgc_event.efficiency_in_bytes_per_us);
1659 event.efficiency_cpp_in_bytes_per_us =
1660 cppgc_event.efficiency_in_bytes_per_us;
1662 event.main_thread_efficiency_cpp_in_bytes_per_us =
1664
1666 event.collection_weight_cpp_in_percent = 0;
1667 event.main_thread_collection_weight_cpp_in_percent = 0;
1668 } else {
1669 event.collection_weight_cpp_in_percent =
1670 static_cast<double>(event.total_cpp.total_wall_clock_duration_in_us) /
1672 event.main_thread_collection_weight_cpp_in_percent =
1673 static_cast<double>(
1674 event.main_thread_cpp.total_wall_clock_duration_in_us) /
1676 }
1677 }
1678
1679 // Unified heap statistics:
1680 const base::TimeDelta atomic_pause_duration =
1681 current_.scopes[Scope::MARK_COMPACTOR];
1682 const base::TimeDelta incremental_marking =
1683 current_.incremental_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
1684 .duration +
1685 current_.incremental_scopes[Scope::MC_INCREMENTAL_START].duration +
1687 const base::TimeDelta incremental_sweeping =
1688 current_.incremental_scopes[Scope::MC_INCREMENTAL_SWEEPING].duration;
1689 const base::TimeDelta overall_duration =
1690 atomic_pause_duration + incremental_marking + incremental_sweeping;
1691 const base::TimeDelta marking_background_duration =
1692 current_.scopes[Scope::MC_BACKGROUND_MARKING];
1693 const base::TimeDelta sweeping_background_duration =
1694 current_.scopes[Scope::MC_BACKGROUND_SWEEPING];
1695 const base::TimeDelta compact_background_duration =
1696 current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY] +
1697 current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS];
1698 const base::TimeDelta background_duration = marking_background_duration +
1699 sweeping_background_duration +
1700 compact_background_duration;
1701 const base::TimeDelta atomic_marking_duration =
1702 current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
1703 const base::TimeDelta marking_duration =
1704 atomic_marking_duration + incremental_marking;
1705 const base::TimeDelta weak_duration = current_.scopes[Scope::MC_CLEAR];
1706 const base::TimeDelta compact_duration = current_.scopes[Scope::MC_EVACUATE] +
1707 current_.scopes[Scope::MC_FINISH] +
1708 current_.scopes[Scope::MC_EPILOGUE];
1709 const base::TimeDelta atomic_sweeping_duration =
1710 current_.scopes[Scope::MC_SWEEP];
1711 const base::TimeDelta sweeping_duration =
1712 atomic_sweeping_duration + incremental_sweeping;
1713
1714 event.main_thread_atomic.total_wall_clock_duration_in_us =
1715 atomic_pause_duration.InMicroseconds();
1716 event.main_thread.total_wall_clock_duration_in_us =
1717 overall_duration.InMicroseconds();
1718 event.total.total_wall_clock_duration_in_us =
1719 (overall_duration + background_duration).InMicroseconds();
1720 event.main_thread_atomic.mark_wall_clock_duration_in_us =
1721 atomic_marking_duration.InMicroseconds();
1722 event.main_thread.mark_wall_clock_duration_in_us =
1723 marking_duration.InMicroseconds();
1724 event.total.mark_wall_clock_duration_in_us =
1725 (marking_duration + marking_background_duration).InMicroseconds();
1726 event.main_thread_atomic.weak_wall_clock_duration_in_us =
1727 event.main_thread.weak_wall_clock_duration_in_us =
1728 event.total.weak_wall_clock_duration_in_us =
1729 weak_duration.InMicroseconds();
1730 event.main_thread_atomic.compact_wall_clock_duration_in_us =
1731 event.main_thread.compact_wall_clock_duration_in_us =
1732 compact_duration.InMicroseconds();
1733 event.total.compact_wall_clock_duration_in_us =
1734 (compact_duration + compact_background_duration).InMicroseconds();
1735 event.main_thread_atomic.sweep_wall_clock_duration_in_us =
1736 atomic_sweeping_duration.InMicroseconds();
1737 event.main_thread.sweep_wall_clock_duration_in_us =
1738 sweeping_duration.InMicroseconds();
1739 event.total.sweep_wall_clock_duration_in_us =
1740 (sweeping_duration + sweeping_background_duration).InMicroseconds();
1742 event.main_thread_incremental.mark_wall_clock_duration_in_us =
1743 incremental_marking.InMicroseconds();
1744 event.incremental_marking_start_stop_wall_clock_duration_in_us =
1746 .InMicroseconds();
1747 } else {
1748 DCHECK(incremental_marking.IsZero());
1749 event.main_thread_incremental.mark_wall_clock_duration_in_us = -1;
1750 }
1751 // TODO(chromium:1154636): We always report the value of incremental sweeping,
1752 // even if it is zero.
1753 event.main_thread_incremental.sweep_wall_clock_duration_in_us =
1754 incremental_sweeping.InMicroseconds();
1755
1756 // Objects:
1757 event.objects.bytes_before = current_.start_object_size;
1758 event.objects.bytes_after = current_.end_object_size;
1759 event.objects.bytes_freed =
1761 // Memory:
1762 event.memory.bytes_before = current_.start_memory_size;
1763 event.memory.bytes_after = current_.end_memory_size;
1764 event.memory.bytes_freed =
1767 : 0U;
1768 // Collection Rate:
1769 if (event.objects.bytes_before == 0) {
1770 event.collection_rate_in_percent = 0;
1771 } else {
1772 event.collection_rate_in_percent =
1773 static_cast<double>(event.objects.bytes_freed) /
1774 event.objects.bytes_before;
1775 }
1776 // Efficiency:
1777 if (event.objects.bytes_freed == 0) {
1778 event.efficiency_in_bytes_per_us = 0;
1779 event.main_thread_efficiency_in_bytes_per_us = 0;
1780 } else {
1781 // Here, event.main_thread or even event.total can be
1782 // zero if the clock resolution is not small enough and the entire GC was
1783 // very short, so the timed value was zero. This appears to happen on
1784 // Windows, see crbug.com/1338256 and crbug.com/1339180. In this case, we
1785 // are only here if the number of freed bytes is nonzero and the division
1786 // below produces an infinite value.
1787 event.efficiency_in_bytes_per_us =
1788 static_cast<double>(event.objects.bytes_freed) /
1790 event.main_thread_efficiency_in_bytes_per_us =
1791 static_cast<double>(event.objects.bytes_freed) /
1793 }
1795 event.collection_weight_in_percent = 0;
1796 event.main_thread_collection_weight_in_percent = 0;
1797 } else {
1798 event.collection_weight_in_percent =
1799 static_cast<double>(event.total.total_wall_clock_duration_in_us) /
1801 event.main_thread_collection_weight_in_percent =
1802 static_cast<double>(event.main_thread.total_wall_clock_duration_in_us) /
1804 }
1805
1806 recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
1807}
1808
1811 static constexpr int kMaxBatchedEvents =
1813 const std::shared_ptr<metrics::Recorder>& recorder =
1815 DCHECK_NOT_NULL(recorder);
1816 if (!recorder->HasEmbedderRecorder()) return;
1818 if (heap_->cpp_heap()) {
1819 const std::optional<
1824 if (cppgc_event.has_value()) {
1825 DCHECK_NE(-1, cppgc_event.value().duration_us);
1827 .cpp_wall_clock_duration_in_us = cppgc_event.value().duration_us;
1828 }
1829 }
1830 incremental_mark_batched_events_.events.back().wall_clock_duration_in_us =
1831 static_cast<int64_t>(v8_duration *
1833 if (incremental_mark_batched_events_.events.size() == kMaxBatchedEvents) {
1834 FlushBatchedEvents(incremental_mark_batched_events_, heap_->isolate());
1835 }
1836}
1837
1839 static constexpr int kMaxBatchedEvents =
1841 const std::shared_ptr<metrics::Recorder>& recorder =
1843 DCHECK_NOT_NULL(recorder);
1844 if (!recorder->HasEmbedderRecorder()) return;
1846 incremental_sweep_batched_events_.events.back().wall_clock_duration_in_us =
1847 static_cast<int64_t>(v8_duration *
1849 if (incremental_sweep_batched_events_.events.size() == kMaxBatchedEvents) {
1850 FlushBatchedEvents(incremental_sweep_batched_events_, heap_->isolate());
1851 }
1852}
1853
1857 const std::shared_ptr<metrics::Recorder>& recorder =
1859 DCHECK_NOT_NULL(recorder);
1860 if (!recorder->HasEmbedderRecorder()) return;
1861
1863 // Reason:
1864 event.reason = static_cast<int>(current_.gc_reason);
1865 event.priority = current_.priority;
1866#if defined(CPPGC_YOUNG_GENERATION)
1867 // Managed C++ heap statistics:
1868 auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
1869 if (cpp_heap && cpp_heap->generational_gc_supported()) {
1870 auto* metric_recorder = cpp_heap->GetMetricRecorder();
1871 const std::optional<cppgc::internal::MetricRecorder::GCCycle>
1872 optional_cppgc_event = metric_recorder->ExtractLastYoungGcEvent();
1873 // We bail out from Oilpan's young GC if the full GC is already in progress.
1874 // Check here if the young generation event was reported.
1875 if (optional_cppgc_event) {
1876 DCHECK(!metric_recorder->YoungGCMetricsReportPending());
1877 const cppgc::internal::MetricRecorder::GCCycle& cppgc_event =
1878 optional_cppgc_event.value();
1879 DCHECK_EQ(cppgc_event.type,
1881 CopyTimeMetrics(event.total_cpp, cppgc_event.total);
1882 CopySizeMetrics(event.objects_cpp, cppgc_event.objects);
1883 CopySizeMetrics(event.memory_cpp, cppgc_event.memory);
1884 DCHECK_NE(-1, cppgc_event.collection_rate_in_percent);
1885 event.collection_rate_cpp_in_percent =
1886 cppgc_event.collection_rate_in_percent;
1887 DCHECK_NE(-1, cppgc_event.efficiency_in_bytes_per_us);
1888 event.efficiency_cpp_in_bytes_per_us =
1889 cppgc_event.efficiency_in_bytes_per_us;
1891 event.main_thread_efficiency_cpp_in_bytes_per_us =
1893 }
1894 }
1895#endif // defined(CPPGC_YOUNG_GENERATION)
1896
1897 // Total:
1898 const base::TimeDelta total_wall_clock_duration =
1899 YoungGenerationWallTime(current_);
1900
1901 // TODO(chromium:1154636): Consider adding BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
1902 // (both for the case of the scavenger and the minor mark-sweeper).
1903 event.total_wall_clock_duration_in_us =
1904 total_wall_clock_duration.InMicroseconds();
1905 // MainThread:
1906 const base::TimeDelta main_thread_wall_clock_duration =
1907 current_.scopes[Scope::SCAVENGER] +
1908 current_.scopes[Scope::MINOR_MARK_SWEEPER];
1909 event.main_thread_wall_clock_duration_in_us =
1910 main_thread_wall_clock_duration.InMicroseconds();
1911 // Collection Rate:
1912 if (current_.young_object_size == 0) {
1913 event.collection_rate_in_percent = 0;
1914 } else {
1915 event.collection_rate_in_percent =
1916 static_cast<double>(current_.survived_young_object_size) /
1918 }
1919 // Efficiency:
1920 //
1921 // It's possible that time durations are rounded/clamped to zero, in which
1922 // case we report infinity efficiency.
1923 const double freed_bytes = static_cast<double>(
1925 event.efficiency_in_bytes_per_us =
1926 total_wall_clock_duration.IsZero()
1927 ? std::numeric_limits<double>::infinity()
1928 : freed_bytes / total_wall_clock_duration.InMicroseconds();
1929 event.main_thread_efficiency_in_bytes_per_us =
1930 main_thread_wall_clock_duration.IsZero()
1931 ? std::numeric_limits<double>::infinity()
1932 : freed_bytes / main_thread_wall_clock_duration.InMicroseconds();
1933 recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
1934}
1935
1950
1952 // If the priority is changed, reset the priority field to denote a mixed
1953 // priority cycle.
1954 if (!current_.priority.has_value() || (current_.priority == priority)) {
1955 return;
1956 }
1957 current_.priority = std::nullopt;
1958}
1959
1960#ifdef DEBUG
1961bool GCTracer::IsInObservablePause() const {
1962 return start_of_observable_pause_.has_value();
1963}
1964
1965bool GCTracer::IsInAtomicPause() const {
1967}
1968
1969bool GCTracer::IsConsistentWithCollector(GarbageCollector collector) const {
1970 switch (collector) {
1979 }
1980}
1981
1982bool GCTracer::IsSweepingInProgress() const {
1988}
1989#endif
1990
1991} // namespace internal
1992} // namespace v8
void Update(BytesAndDuration bytes_and_duration)
Definition bytes.h:64
static int GetCurrentProcessId()
constexpr void Clear()
Definition ring-buffer.h:34
constexpr bool Empty() const
Definition ring-buffer.h:32
constexpr T Reduce(Callback callback, const T &initial) const
Definition ring-buffer.h:40
constexpr uint8_t Size() const
Definition ring-buffer.h:30
constexpr void Push(const T &value)
Definition ring-buffer.h:22
static constexpr int64_t kMicrosecondsPerMillisecond
Definition time.h:48
static constexpr TimeDelta FromMillisecondsD(double milliseconds)
Definition time.h:97
static constexpr TimeDelta FromSeconds(int64_t seconds)
Definition time.h:81
double InMillisecondsF() const
Definition time.cc:226
constexpr bool IsZero() const
Definition time.h:113
int64_t InMilliseconds() const
Definition time.cc:234
int64_t InMicroseconds() const
Definition time.cc:251
static TimeTicks Now()
Definition time.cc:736
const std::optional< cppgc::internal::MetricRecorder::MainThreadIncrementalMark > ExtractLastIncrementalMarkEvent()
Definition cpp-heap.cc:458
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
MetricRecorderAdapter * GetMetricRecorder() const
Definition cpp-heap.cc:1234
static V8_INLINE constexpr bool IsYoungGenerationEvent(Type type)
base::TimeTicks incremental_marking_start_time
Definition gc-tracer.h:242
base::TimeTicks end_atomic_pause_time
Definition gc-tracer.h:246
Event(Type type, State state, GarbageCollectionReason gc_reason, const char *collector_reason, Priority priority)
Definition gc-tracer.cc:71
base::TimeTicks start_atomic_pause_time
Definition gc-tracer.h:245
base::TimeDelta scopes[Scope::NUMBER_OF_SCOPES]
Definition gc-tracer.h:249
GarbageCollectionReason gc_reason
Definition gc-tracer.h:189
std::optional< Priority > priority
Definition gc-tracer.h:195
base::TimeDelta incremental_marking_duration
Definition gc-tracer.h:240
IncrementalInfos incremental_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES]
Definition gc-tracer.h:252
RecordGCPhasesInfo(Heap *heap, GarbageCollector collector, GarbageCollectionReason reason)
Definition gc-tracer.cc:96
std::optional< double > EmbedderSpeedInBytesPerMillisecond() const
BytesAndDurationBuffer recorded_incremental_mark_compacts_
Definition gc-tracer.h:560
void UpdateCurrentEventPriority(Priority priority)
GCTracer(Heap *heap, base::TimeTicks startup_time, GarbageCollectionReason initial_gc_reason=GarbageCollectionReason::kUnknown)
Definition gc-tracer.cc:174
size_t new_space_allocation_counter_bytes_
Definition gc-tracer.h:543
void UpdateMemoryBalancerGCSpeed()
Definition gc-tracer.cc:416
base::TimeTicks allocation_time_
Definition gc-tracer.h:542
bool notified_full_cppgc_completed_
Definition gc-tracer.h:584
void AddSurvivalRatio(double survival_ratio)
Definition gc-tracer.cc:733
double AverageSurvivalRatio() const
double AverageMarkCompactMutatorUtilization() const
void ReportIncrementalSweepingStepToRecorder(double v8_duration)
SmoothedBytesAndDuration new_generation_allocations_
Definition gc-tracer.h:568
SmoothedBytesAndDuration embedder_generation_allocations_
Definition gc-tracer.h:572
void StopInSafepoint(base::TimeTicks time)
Definition gc-tracer.cc:322
base::TimeDelta total_duration_since_last_mark_compact_
Definition gc-tracer.h:557
double CurrentMarkCompactMutatorUtilization() const
void RecordGCSizeCounters() const
base::TimeDelta background_scopes_[Scope::NUMBER_OF_SCOPES]
Definition gc-tracer.h:606
void StopCycle(GarbageCollector collector)
Definition gc-tracer.cc:464
uint16_t CodeFlushingIncrease() const
Definition gc-tracer.cc:723
bool notified_full_sweeping_completed_
Definition gc-tracer.h:583
void ReportIncrementalMarkingStepToRecorder(double v8_duration)
std::optional< double > YoungGenerationSpeedInBytesPerMillisecond(YoungGenerationSpeedMode mode) const
SmoothedBytesAndDuration old_generation_allocations_
Definition gc-tracer.h:570
void RecordEmbedderMarkingSpeed(size_t bytes, base::TimeDelta duration)
BytesAndDurationBuffer recorded_compactions_
Definition gc-tracer.h:559
void UpdateCurrentEvent(GarbageCollectionReason gc_reason, const char *collector_reason)
Definition gc-tracer.cc:211
std::optional< double > combined_mark_compact_speed_cache_
Definition gc-tracer.h:547
void RecordIncrementalMarkingSpeed(size_t bytes, base::TimeDelta duration)
void StopObservablePause(GarbageCollector collector, base::TimeTicks time)
Definition gc-tracer.cc:338
double AllocationThroughputInBytesPerMillisecond() const
double recorded_major_incremental_marking_speed_
Definition gc-tracer.h:528
base::RingBuffer< double > recorded_survival_ratios_
Definition gc-tracer.h:579
static constexpr double kConservativeSpeedInBytesPerMillisecond
Definition gc-tracer.h:278
std::optional< double > MarkCompactSpeedInBytesPerMillisecond() const
uint16_t code_flushing_increase_s_
Definition gc-tracer.h:535
BytesAndDurationBuffer recorded_minor_gc_atomic_pause_
Definition gc-tracer.h:578
std::optional< double > FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const
std::optional< base::TimeTicks > start_of_observable_pause_
Definition gc-tracer.h:521
double NewSpaceAllocationThroughputInBytesPerMillisecond() const
double average_mark_compact_duration_
Definition gc-tracer.h:551
void StartInSafepoint(base::TimeTicks time)
Definition gc-tracer.cc:307
void SampleConcurrencyEsimate(size_t concurrency)
Definition gc-tracer.cc:680
V8_INLINE double current_scope(Scope::ScopeId id) const
std::optional< double > OldGenerationSpeedInBytesPerMillisecond()
BytesAndDurationBuffer recorded_minor_gc_per_thread_
Definition gc-tracer.h:577
size_t embedder_allocation_counter_bytes_
Definition gc-tracer.h:545
void NotifyYoungSweepingCompleted()
Definition gc-tracer.cc:571
void RecordTimeToIncrementalMarkingTask(base::TimeDelta time_to_task)
void StartObservablePause(base::TimeTicks time)
Definition gc-tracer.cc:206
std::optional< base::TimeDelta > average_time_to_incremental_marking_task_
Definition gc-tracer.h:530
void AddCompactionEvent(double duration, size_t live_bytes_compacted)
Definition gc-tracer.cc:727
void RecordMutatorUtilization(base::TimeTicks mark_compactor_end_time, base::TimeDelta mark_compactor_duration)
void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode)
CollectionEpoch epoch_full_
Definition gc-tracer.h:525
void AddIncrementalMarkingStep(double duration, size_t bytes)
Definition gc-tracer.cc:737
double IncrementalMarkingSpeedInBytesPerMillisecond() const
bool notified_young_sweeping_completed_
Definition gc-tracer.h:587
BytesAndDurationBuffer recorded_embedder_marking_
Definition gc-tracer.h:563
base::TimeTicks previous_mark_compact_end_time_
Definition gc-tracer.h:556
std::optional< base::TimeTicks > last_marking_start_time_for_code_flushing_
Definition gc-tracer.h:534
bool SurvivalEventsRecorded() const
void AddIncrementalSweepingStep(double duration)
Definition gc-tracer.cc:746
void NotifyYoungSweepingCompletedAndStopCycleIfFinished()
Definition gc-tracer.cc:589
void SampleAllocation(base::TimeTicks current, size_t new_space_counter_bytes, size_t old_generation_counter_bytes, size_t embedder_counter_bytes)
Definition gc-tracer.cc:634
double current_mark_compact_mutator_utilization_
Definition gc-tracer.h:552
double OldGenerationAllocationThroughputInBytesPerMillisecond() const
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark incremental_mark_batched_events_
Definition gc-tracer.h:601
bool notified_young_cppgc_completed_
Definition gc-tracer.h:590
V8_INLINE constexpr const IncrementalInfos & incremental_scope(Scope::ScopeId id) const
void NotifyIncrementalMarkingStart()
bool full_cppgc_completed_during_minor_gc_
Definition gc-tracer.h:585
base::Mutex background_scopes_mutex_
Definition gc-tracer.h:605
std::optional< base::TimeDelta > AverageTimeToIncrementalMarkingTask() const
void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason, const char *collector_reason, MarkingType marking)
Definition gc-tracer.cc:228
std::optional< double > CompactionSpeedInBytesPerMillisecond() const
void PrintNVP() const
Definition gc-tracer.cc:821
double average_mutator_duration_
Definition gc-tracer.h:550
CollectionEpoch epoch_young_
Definition gc-tracer.h:524
void NotifyFullSweepingCompletedAndStopCycleIfFinished()
Definition gc-tracer.cc:537
IncrementalInfos incremental_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES]
Definition gc-tracer.h:539
BytesAndDurationBuffer recorded_major_totals_
Definition gc-tracer.h:562
BytesAndDurationBuffer recorded_mark_compacts_
Definition gc-tracer.h:561
size_t old_generation_allocation_counter_bytes_
Definition gc-tracer.h:544
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalSweep incremental_sweep_batched_events_
Definition gc-tracer.h:603
double EmbedderAllocationThroughputInBytesPerMillisecond() const
GarbageCollector GetCurrentCollector() const
size_t promoted_objects_size()
Definition heap.h:1303
int nodes_copied_in_new_space_
Definition heap.h:2255
V8_INLINE uint64_t external_memory() const
Definition heap-inl.h:67
void PrintFreeListsStats()
Definition heap.cc:673
NewSpace * new_space() const
Definition heap.h:727
size_t OldGenerationAllocationCounter()
Definition heap.h:1339
NewLargeObjectSpace * new_lo_space() const
Definition heap.h:737
void DumpJSONHeapStatistics(std::stringstream &stream)
Definition heap.cc:748
size_t old_generation_allocation_limit() const
Definition heap.h:1924
size_t new_space_surviving_object_size()
Definition heap.h:1308
size_t EmbedderAllocationCounter() const
Definition heap.cc:7076
double new_space_surviving_rate_
Definition heap.h:2253
std::unique_ptr< MemoryBalancer > mb_
Definition heap.h:2399
int nodes_died_in_new_space_
Definition heap.h:2254
MemoryAllocator * memory_allocator()
Definition heap.h:803
V8_EXPORT_PRIVATE size_t OldGenerationConsumedBytes() const
Definition heap.cc:5183
SemiSpaceNewSpace * semi_space_new_space() const
Definition heap-inl.h:439
size_t global_allocation_limit() const
Definition heap.h:1928
double promotion_ratio_
Definition heap.h:2249
void AddToRingBuffer(const char *string)
Definition heap.cc:5100
void PrintShortHeapStatistics()
Definition heap.cc:572
V8_EXPORT_PRIVATE size_t GlobalConsumedBytes() const
Definition heap.cc:5233
void UpdateTotalGCTime(base::TimeDelta duration)
Definition heap.cc:6684
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
double promotion_rate_
Definition heap.h:2250
Isolate * isolate() const
Definition heap-inl.h:61
size_t SurvivedYoungObjectSize()
Definition heap.h:1312
V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter() const
Definition heap.cc:990
bool ShouldReduceMemory() const
Definition heap.h:1615
V8_EXPORT_PRIVATE size_t SizeOfObjects()
Definition heap.cc:999
Counters * counters()
Definition isolate.h:1180
v8::metrics::LongTaskStats * GetCurrentLongTaskStats()
Definition isolate.cc:7468
double time_millis_since_init() const
Definition isolate.h:1615
const std::shared_ptr< metrics::Recorder > & metrics_recorder()
Definition isolate.h:1187
v8::Isolate::Priority priority()
Definition isolate.h:2082
size_t SizeOfObjects() const override
V8_EXPORT_PRIVATE size_t GetSharedPooledChunksCount()
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
V8_EXPORT_PRIVATE size_t GetTotalPooledChunksCount()
virtual size_t TotalCapacity() const =0
size_t QuarantinedPageCount() const
Definition new-spaces.h:427
static const ContextId Empty()
Definition v8-metrics.h:195
RecordWriteMode const mode_
LineAndColumn current
#define TRACE_GC_NOTE(note)
Definition gc-tracer.h:93
bool defined
TimeRecord time
size_t priority
int VSNPrintF(Vector< char > str, const char *format, va_list args)
Definition strings.cc:16
int GetContextId(Local< Context > context)
GarbageCollectionReason
Definition globals.h:1428
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
constexpr int GB
Definition v8-internal.h:57
YoungGenerationSpeedMode
Definition gc-tracer.h:24
@ kUpToAndIncludingAtomicPause
Definition gc-tracer.h:25
static size_t CountTotalHolesSize(Heap *heap)
Definition gc-tracer.cc:34
uint32_t CollectionEpoch
Definition gc-tracer.h:103
constexpr int U
V8_EXPORT_PRIVATE FlagValues v8_flags
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define CHECK_IMPLIES(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
static V8_EXPORT_PRIVATE std::atomic_uint gc
GarbageCollectionSizes memory_cpp
Definition v8-metrics.h:57
GarbageCollectionPhases main_thread_cpp
Definition v8-metrics.h:49
GarbageCollectionPhases main_thread_incremental_cpp
Definition v8-metrics.h:53
GarbageCollectionPhases main_thread_atomic_cpp
Definition v8-metrics.h:51
GarbageCollectionPhases main_thread
Definition v8-metrics.h:48
GarbageCollectionPhases total_cpp
Definition v8-metrics.h:47
GarbageCollectionSizes objects_cpp
Definition v8-metrics.h:55
#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
#define TRACE_EVENT_SCOPE_THREAD
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_STR_COPY(str)
Definition trace-event.h:50
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type