v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
stats-collector.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <cmath>
10
11#include "src/base/atomicops.h"
12#include "src/base/logging.h"
15
16namespace cppgc {
17namespace internal {
18
19// static
21
25
28 std::find(allocation_observers_.begin(),
29 allocation_observers_.end(), observer));
30 allocation_observers_.push_back(observer);
31}
32
34 auto it = std::find(allocation_observers_.begin(),
35 allocation_observers_.end(), observer);
37 *it = nullptr;
39}
40
42 // The current GC may not have been started. This is ok as recording considers
43 // the whole time range between garbage collections.
45#ifdef CPPGC_VERIFY_HEAP
46 DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
47 tracked_live_bytes_ += bytes;
48#endif // CPPGC_VERIFY_HEAP
49}
50
52 // See IncreaseAllocatedObjectSize for lifetime of the counter.
54#ifdef CPPGC_VERIFY_HEAP
55 DCHECK_GE(tracked_live_bytes_, bytes);
56 tracked_live_bytes_ -= bytes;
57#endif // CPPGC_VERIFY_HEAP
58}
59
67
71
74 static_cast<int64_t>(allocated_bytes_since_safepoint_) -
75 static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
76
77 // Save the epoch to avoid clearing counters when a GC happened, see below.
78 const auto saved_epoch = current_.epoch;
79
80 // These observer methods may start or finalize GC. In case they trigger a
81 // final GC pause, the delta counters are reset there and the following
82 // observer calls are called with '0' updates.
84 // Recompute delta here so that a GC finalization is able to clear the
85 // delta for other observer calls.
86 int64_t delta = allocated_bytes_since_safepoint_ -
88 if (delta < 0) {
89 observer->AllocatedObjectSizeDecreased(static_cast<size_t>(-delta));
90 } else {
91 observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
92 }
93 });
94 // Only clear the counters when no garbage collection happened. In case of a
95 // garbage collection in the callbacks, the counters have been cleared by
96 // `NotifyMarkingFinished()`. In addition, atomic sweeping may have already
97 // allocated new memory which would be dropped from accounting in case
98 // of clearing here.
99 if (saved_epoch == current_.epoch) {
102 }
103}
104
106 static std::atomic<size_t> epoch_counter{0};
107 epoch = epoch_counter.fetch_add(1);
108}
109
115
127
161
165 if (time_of_last_end_of_marking_ == current_time) return 0;
167 (current_time - time_of_last_end_of_marking_).InMillisecondsF();
168}
169
170namespace {
171
172int64_t SumPhases(const MetricRecorder::GCCycle::Phases& phases) {
173 DCHECK_LE(0, phases.mark_duration_us);
174 DCHECK_LE(0, phases.weak_duration_us);
175 DCHECK_LE(0, phases.compact_duration_us);
176 DCHECK_LE(0, phases.sweep_duration_us);
177 return phases.mark_duration_us + phases.weak_duration_us +
178 phases.compact_duration_us + phases.sweep_duration_us;
179}
180
181MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
183 StatsCollector::SweepingType sweeping_type, int64_t atomic_mark_us,
184 int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
185 int64_t incremental_mark_us, int64_t incremental_sweep_us,
186 int64_t concurrent_mark_us, int64_t concurrent_sweep_us,
187 int64_t objects_before_bytes, int64_t objects_after_bytes,
188 int64_t objects_freed_bytes, int64_t memory_before_bytes,
189 int64_t memory_after_bytes, int64_t memory_freed_bytes) {
190 MetricRecorder::GCCycle event;
191 event.type = (type == CollectionType::kMajor)
193 : MetricRecorder::GCCycle::Type::kMinor;
194 // MainThread.Incremental:
195 event.main_thread_incremental.mark_duration_us =
196 marking_type != StatsCollector::MarkingType::kAtomic ? incremental_mark_us
197 : -1;
198 event.main_thread_incremental.sweep_duration_us =
199 sweeping_type != StatsCollector::SweepingType::kAtomic
200 ? incremental_sweep_us
201 : -1;
202 // MainThread.Atomic:
203 event.main_thread_atomic.mark_duration_us = atomic_mark_us;
204 event.main_thread_atomic.weak_duration_us = atomic_weak_us;
205 event.main_thread_atomic.compact_duration_us = atomic_compact_us;
206 event.main_thread_atomic.sweep_duration_us = atomic_sweep_us;
207 // MainThread:
208 event.main_thread.mark_duration_us =
209 event.main_thread_atomic.mark_duration_us + incremental_mark_us;
210 event.main_thread.weak_duration_us =
211 event.main_thread_atomic.weak_duration_us;
212 event.main_thread.compact_duration_us =
213 event.main_thread_atomic.compact_duration_us;
214 event.main_thread.sweep_duration_us =
215 event.main_thread_atomic.sweep_duration_us + incremental_sweep_us;
216 // Total:
217 event.total.mark_duration_us =
218 event.main_thread.mark_duration_us + concurrent_mark_us;
219 event.total.weak_duration_us = event.main_thread.weak_duration_us;
220 event.total.compact_duration_us = event.main_thread.compact_duration_us;
221 event.total.sweep_duration_us =
222 event.main_thread.sweep_duration_us + concurrent_sweep_us;
223 // Objects:
224 event.objects.before_bytes = objects_before_bytes;
225 event.objects.after_bytes = objects_after_bytes;
226 event.objects.freed_bytes = objects_freed_bytes;
227 // Memory:
228 event.memory.before_bytes = memory_before_bytes;
229 event.memory.after_bytes = memory_after_bytes;
230 event.memory.freed_bytes = memory_freed_bytes;
231 // Collection Rate:
232 if (event.objects.before_bytes == 0) {
233 event.collection_rate_in_percent = 0;
234 } else {
235 event.collection_rate_in_percent =
236 static_cast<double>(event.objects.freed_bytes) /
237 event.objects.before_bytes;
238 }
239 // Efficiency:
240 if (event.objects.freed_bytes == 0) {
241 event.efficiency_in_bytes_per_us = 0;
242 event.main_thread_efficiency_in_bytes_per_us = 0;
243 } else {
244 // Here, SumPhases(event.main_thread) or even SumPhases(event.total) can be
245 // zero if the clock resolution is not small enough and the entire GC was
246 // very short, so the timed value was zero. This appears to happen on
247 // Windows, see crbug.com/1338256 and crbug.com/1339180. In this case, we
248 // are only here if the number of freed bytes is nonzero and the division
249 // below produces an infinite value.
250 event.efficiency_in_bytes_per_us =
251 static_cast<double>(event.objects.freed_bytes) / SumPhases(event.total);
252 event.main_thread_efficiency_in_bytes_per_us =
253 static_cast<double>(event.objects.freed_bytes) /
254 SumPhases(event.main_thread);
255 }
256 return event;
257}
258
259} // namespace
260
264 current_.sweeping_type = sweeping_type;
265 previous_ = std::move(current_);
266 current_ = Event();
267 DCHECK_IMPLIES(previous_.marking_type == StatsCollector::MarkingType::kAtomic,
268 previous_.scope_data[kIncrementalMark].IsZero());
270 previous_.sweeping_type == StatsCollector::SweepingType::kAtomic,
271 previous_.scope_data[kIncrementalSweep].IsZero());
272 if (metric_recorder_) {
273 MetricRecorder::GCCycle event = GetCycleEventForMetricRecorder(
276 previous_.scope_data[kAtomicMark].InMicroseconds(),
277 previous_.scope_data[kAtomicWeak].InMicroseconds(),
278 previous_.scope_data[kAtomicCompact].InMicroseconds(),
279 previous_.scope_data[kAtomicSweep].InMicroseconds(),
280 previous_.scope_data[kIncrementalMark].InMicroseconds(),
281 previous_.scope_data[kIncrementalSweep].InMicroseconds(),
282 previous_.concurrent_scope_data[kConcurrentMark],
283 previous_.concurrent_scope_data[kConcurrentSweep],
284 previous_.object_size_before_sweep_bytes /* objects_before */,
285 marked_bytes_so_far_ /* objects_after */,
287 marked_bytes_so_far_ /* objects_freed */,
288 previous_.memory_size_before_sweep_bytes /* memory_before */,
292 metric_recorder_->AddMainThreadEvent(event);
293 }
294}
295
299
303
308
313
316 // During sweeping we refer to the current Event as that already holds the
317 // correct marking information. In all other phases, the previous event holds
318 // the most up-to-date marking information.
319 const Event& event =
321 return event.scope_data[kAtomicMark] + event.scope_data[kIncrementalMark] +
323 &event.concurrent_scope_data[kConcurrentMark]));
324}
325
328#ifdef DEBUG
329 const auto saved_epoch = current_.epoch;
330#endif // DEBUG
332 observer->AllocatedSizeIncreased(static_cast<size_t>(size));
333 });
334#ifdef DEBUG
335 // AllocatedSizeIncreased() must not trigger GC.
336 DCHECK_EQ(saved_epoch, current_.epoch);
337#endif // DEBUG
338}
339
342#ifdef DEBUG
343 const auto saved_epoch = current_.epoch;
344#endif // DEBUG
346 observer->AllocatedSizeDecreased(static_cast<size_t>(size));
347 });
348#ifdef DEBUG
349 // AllocatedSizeDecreased() must not trigger GC.
350 DCHECK_EQ(saved_epoch, current_.epoch);
351#endif // DEBUG
352}
353
355 const size_t old =
356 discarded_bytes_.fetch_add(value, std::memory_order_relaxed);
357 DCHECK_GE(old + value, old);
358 USE(old);
359}
360
362 const size_t old =
363 discarded_bytes_.fetch_sub(value, std::memory_order_relaxed);
364 DCHECK_GE(old, old - value);
365 USE(old);
366}
367
369 discarded_bytes_.store(0, std::memory_order_relaxed);
370}
371
373 return discarded_bytes_.load(std::memory_order_relaxed);
374}
375
377 const auto allocated = allocated_memory_size();
378 const auto discarded = discarded_memory_size();
379 DCHECK_IMPLIES(allocated == 0, discarded == 0);
380 DCHECK_IMPLIES(allocated > 0, allocated > discarded);
381 return allocated - discarded;
382}
383
385 v8::base::TimeDelta time) {
386 switch (scope_id_) {
387 case kIncrementalMark: {
388 MetricRecorder::MainThreadIncrementalMark event{time.InMicroseconds()};
389 metric_recorder_->AddMainThreadEvent(event);
390 break;
391 }
392 case kIncrementalSweep: {
393 MetricRecorder::MainThreadIncrementalSweep event{time.InMicroseconds()};
394 metric_recorder_->AddMainThreadEvent(event);
395 break;
396 }
397 default:
398 break;
399 }
400}
401
402} // namespace internal
403} // namespace cppgc
MarkingType
Definition heap.h:60
SweepingType
Definition heap.h:80
GarbageCollectionState gc_state_
GCConfig::SweepingType SweepingType
double GetRecentAllocationSpeedInBytesPerMs() const
std::unique_ptr< MetricRecorder > metric_recorder_
GCConfig::MarkingType MarkingType
v8::base::TimeDelta marking_time() const
void RegisterObserver(AllocationObserver *)
static constexpr size_t kAllocationThresholdBytes
std::vector< AllocationObserver * > allocation_observers_
void UnregisterObserver(AllocationObserver *)
void NotifyMarkingCompleted(size_t marked_bytes)
void NotifyUnmarkingStarted(CollectionType)
void ForAllAllocationObservers(Callback callback)
v8::base::TimeTicks time_of_last_end_of_marking_
void NotifySweepingCompleted(SweepingType)
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC)
void RecordHistogramSample(ScopeId, v8::base::TimeDelta)
std::atomic< size_t > discarded_bytes_
static constexpr TimeDelta FromMicroseconds(int64_t microseconds)
Definition time.h:87
constexpr bool IsZero() const
Definition time.h:113
int64_t InMicroseconds() const
Definition time.cc:251
static TimeTicks Now()
Definition time.cc:736
v8::Platform * platform_
Definition cpp-heap.cc:193
Atomic8 Relaxed_Load(volatile const Atomic8 *ptr)
Definition atomicops.h:234
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
v8::base::TimeDelta scope_data[kNumHistogramScopeIds]
v8::base::AtomicWord concurrent_scope_data[kNumHistogramConcurrentScopeIds]