v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
stats-collector.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CPPGC_STATS_COLLECTOR_H_
6#define V8_HEAP_CPPGC_STATS_COLLECTOR_H_
7
8#include <stddef.h>
9#include <stdint.h>
10
11#include <algorithm>
12#include <atomic>
13#include <vector>
14
16#include "src/base/logging.h"
17#include "src/base/macros.h"
22
23namespace cppgc {
24namespace internal {
25
26// Histogram scopes contribute to histogram as well as to traces and metrics.
27// Other scopes contribute only to traces and metrics.
28#define CPPGC_FOR_ALL_HISTOGRAM_SCOPES(V) \
29 V(AtomicMark) \
30 V(AtomicWeak) \
31 V(AtomicCompact) \
32 V(AtomicSweep) \
33 V(IncrementalMark) \
34 V(IncrementalSweep)
35
36#define CPPGC_FOR_ALL_SCOPES(V) \
37 V(Unmark) \
38 V(MarkIncrementalStart) \
39 V(MarkIncrementalFinalize) \
40 V(MarkAtomicPrologue) \
41 V(MarkAtomicEpilogue) \
42 V(MarkTransitiveClosure) \
43 V(MarkTransitiveClosureWithDeadline) \
44 V(MarkFlushEphemerons) \
45 V(MarkOnAllocation) \
46 V(MarkProcessBailOutObjects) \
47 V(MarkProcessMarkingWorklist) \
48 V(MarkProcessRetraceWorklist) \
49 V(MarkProcessWriteBarrierWorklist) \
50 V(MarkProcessNotFullyconstructedWorklist) \
51 V(MarkProcessEphemerons) \
52 V(MarkVisitRoots) \
53 V(MarkVisitNotFullyConstructedObjects) \
54 V(MarkVisitPersistents) \
55 V(MarkVisitCrossThreadPersistents) \
56 V(MarkVisitStack) \
57 V(MarkVisitRememberedSets) \
58 V(WeakContainerCallbacksProcessing) \
59 V(CustomCallbacksProcessing) \
60 V(SweepEmptyPages) \
61 V(SweepFinish) \
62 V(SweepFinalizeEmptyPages) \
63 V(SweepFinalizeSweptPages) \
64 V(SweepFinishIfOutOfWork) \
65 V(SweepInvokePreFinalizers) \
66 V(SweepInLowPriorityTask) \
67 V(SweepInTask) \
68 V(SweepInTaskForStatistics) \
69 V(SweepOnAllocation) \
70 V(SweepPages)
71
72#define CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(V) \
73 V(ConcurrentMark) \
74 V(ConcurrentSweep) \
75 V(ConcurrentWeakCallback)
76
77#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) \
78 V(ConcurrentMarkProcessEphemeronWorklist) \
79 V(ConcurrentMarkProcessMarkingWorklist) \
80 V(ConcurrentMarkProcessNotFullyconstructedWorklist) \
81 V(ConcurrentMarkProcessWriteBarrierWorklist)
82
83// Sink for various time and memory statistics.
86
87 public:
90
91#if defined(CPPGC_DECLARE_ENUM)
92 static_assert(false, "CPPGC_DECLARE_ENUM macro is already defined");
93#endif
94
95 enum ScopeId {
96#define CPPGC_DECLARE_ENUM(name) k##name,
98 kNumHistogramScopeIds,
100#undef CPPGC_DECLARE_ENUM
102 };
103
105#define CPPGC_DECLARE_ENUM(name) k##name,
107 kNumHistogramConcurrentScopeIds,
109#undef CPPGC_DECLARE_ENUM
110 kNumConcurrentScopeIds
111 };
112
113 // POD to hold interesting data accumulated during a garbage collection cycle.
114 //
115 // The event is always fully populated when looking at previous events but
116 // may only be partially populated when looking at the current event.
117 struct Event final {
118 V8_EXPORT_PRIVATE explicit Event();
119
120 v8::base::TimeDelta scope_data[kNumHistogramScopeIds];
121 v8::base::AtomicWord concurrent_scope_data[kNumHistogramConcurrentScopeIds]{
122 0};
123
124 size_t epoch = -1;
125 CollectionType collection_type = CollectionType::kMajor;
126 MarkingType marking_type = MarkingType::kAtomic;
127 SweepingType sweeping_type = SweepingType::kAtomic;
128 IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
129 // Marked bytes collected during marking.
130 size_t marked_bytes = 0;
131 size_t object_size_before_sweep_bytes = -1;
132 size_t memory_size_before_sweep_bytes = -1;
133 };
134
135 private:
136#if defined(CPPGC_CASE)
137 static_assert(false, "CPPGC_CASE macro is already defined");
138#endif
139
140 constexpr static const char* GetScopeName(ScopeId id, CollectionType type) {
141 switch (id) {
142#define CPPGC_CASE(name) \
143 case k##name: \
144 return type == CollectionType::kMajor ? "CppGC." #name \
145 : "CppGC." #name ".Minor";
148#undef CPPGC_CASE
149 default:
150 return nullptr;
151 }
152 }
153
154 constexpr static const char* GetScopeName(ConcurrentScopeId id,
155 CollectionType type) {
156 switch (id) {
157#define CPPGC_CASE(name) \
158 case k##name: \
159 return type == CollectionType::kMajor ? "CppGC." #name \
160 : "CppGC." #name ".Minor";
163#undef CPPGC_CASE
164 default:
165 return nullptr;
166 }
167 }
168
170 enum ScopeContext { kMutatorThread, kConcurrentThread };
171
172 // Trace a particular scope. Will emit a trace event and record the time in
173 // the corresponding StatsCollector.
174 template <TraceCategory trace_category, ScopeContext scope_category>
176 using ScopeIdType = std::conditional_t<scope_category == kMutatorThread,
178
179 public:
180 template <typename... Args>
181 InternalScope(StatsCollector* stats_collector, ScopeIdType scope_id,
182 Args... args)
183 : stats_collector_(stats_collector),
184 start_time_(v8::base::TimeTicks::Now()),
185 scope_id_(scope_id) {
186 DCHECK_LE(0, scope_id_);
187 DCHECK_LT(static_cast<int>(scope_id_),
188 scope_category == kMutatorThread
189 ? static_cast<int>(kNumScopeIds)
190 : static_cast<int>(kNumConcurrentScopeIds));
191 DCHECK_NE(static_cast<int>(scope_id_),
192 scope_category == kMutatorThread
193 ? static_cast<int>(kNumHistogramScopeIds)
194 : static_cast<int>(kNumHistogramConcurrentScopeIds));
195 StartTrace(args...);
196 }
197
199 StopTrace();
200 IncreaseScopeTime();
201 }
202
203 InternalScope(const InternalScope&) = delete;
205
209
210 private:
211 void* operator new(size_t, void*) = delete;
212 void* operator new(size_t) = delete;
213
214 inline constexpr static const char* TraceCategory();
215
216 template <typename... Args>
217 inline void StartTrace(Args... args);
218 inline void StopTrace();
219
220 inline void StartTraceImpl();
221 template <typename Value1>
222 inline void StartTraceImpl(const char* k1, Value1 v1);
223 template <typename Value1, typename Value2>
224 inline void StartTraceImpl(const char* k1, Value1 v1, const char* k2,
225 Value2 v2);
226 inline void StopTraceImpl();
227
228 inline void IncreaseScopeTime();
229
233 };
234
235 public:
240
241 // Observer for allocated object size. May e.g. be used to implement heap
242 // growing heuristics. Observers may register/unregister observers at any
243 // time when being invoked.
245 public:
246 // Called after observing at least
247 // StatsCollector::kAllocationThresholdBytes changed bytes through
248 // allocation or explicit free. Reports both, negative and positive
249 // increments, to allow observer to decide whether absolute values or only
250 // the deltas is interesting.
251 //
252 // May trigger GC.
253 virtual void AllocatedObjectSizeIncreased(size_t) {}
254 virtual void AllocatedObjectSizeDecreased(size_t) {}
255
256 // Called when the exact size of allocated object size is known. In
257 // practice, this is after marking when marked bytes == allocated bytes.
258 //
259 // Must not trigger GC synchronously.
260 virtual void ResetAllocatedObjectSize(size_t) {}
261
262 // Called upon allocating/releasing chunks of memory (e.g. pages) that can
263 // contain objects.
264 //
265 // Must not trigger GC.
266 virtual void AllocatedSizeIncreased(size_t) {}
267 virtual void AllocatedSizeDecreased(size_t) {}
268 };
269
270 // Observers are implemented using virtual calls. Avoid notifications below
271 // reasonably interesting sizes.
272 static constexpr size_t kAllocationThresholdBytes = 1024;
273
274 explicit StatsCollector(Platform*);
277
278 void RegisterObserver(AllocationObserver*);
279 void UnregisterObserver(AllocationObserver*);
280
281 void NotifyAllocation(size_t);
282 void NotifyExplicitFree(size_t);
283 // Safepoints should only be invoked when garbage collections are possible.
284 // This is necessary as increments and decrements are reported as close to
285 // their actual allocation/reclamation as possible.
286 void NotifySafePointForConservativeCollection();
287
288 void NotifySafePointForTesting();
289
290 // Indicates a new garbage collection cycle. The phase is optional and is only
291 // used for major GC when generational GC is enabled.
292 void NotifyUnmarkingStarted(CollectionType);
293 // Indicates a new minor garbage collection cycle or a major, if generational
294 // GC is not enabled.
295 void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC);
296 // Indicates that marking of the current garbage collection cycle is
297 // completed.
298 void NotifyMarkingCompleted(size_t marked_bytes);
299 // Indicates the end of a garbage collection cycle. This means that sweeping
300 // is finished at this point.
301 void NotifySweepingCompleted(SweepingType);
302
303 size_t allocated_memory_size() const;
304 // Size of live objects in bytes on the heap. Based on the most recent marked
305 // bytes and the bytes allocated since last marking.
306 size_t allocated_object_size() const;
307
308 // Returns the overall marked bytes count, i.e. if young generation is
309 // enabled, it returns the accumulated number. Should not be called during
310 // marking.
311 size_t marked_bytes() const;
312
313 // Returns the marked bytes for the current cycle. Should only be called
314 // within GC cycle.
315 size_t marked_bytes_on_current_cycle() const;
316
317 // Returns the overall duration of the most recent marking phase. Should not
318 // be called during marking.
319 v8::base::TimeDelta marking_time() const;
320
321 double GetRecentAllocationSpeedInBytesPerMs() const;
322
323 const Event& GetPreviousEventForTesting() const { return previous_; }
324
325 void NotifyAllocatedMemory(int64_t);
326 void NotifyFreedMemory(int64_t);
327
328 void IncrementDiscardedMemory(size_t);
329 void DecrementDiscardedMemory(size_t);
330 void ResetDiscardedMemory();
331 size_t discarded_memory_size() const;
332 size_t resident_memory_size() const;
333
334 void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) {
335 metric_recorder_ = std::move(histogram_recorder);
336 }
337
338 MetricRecorder* GetMetricRecorder() const { return metric_recorder_.get(); }
339
340 private:
341 enum class GarbageCollectionState : uint8_t {
342 kNotRunning,
343 kUnmarking,
344 kMarking,
345 kSweeping
346 };
347
348 void RecordHistogramSample(ScopeId, v8::base::TimeDelta);
350
351 // Invokes |callback| for all registered observers.
352 template <typename Callback>
353 void ForAllAllocationObservers(Callback callback);
354
355 void AllocatedObjectSizeSafepointImpl();
356
357 // Allocated bytes since the end of marking. These bytes are reset after
358 // marking as they are accounted in marked_bytes then. May be negative in case
359 // an object was explicitly freed that was marked as live in the previous
360 // cycle.
361 int64_t allocated_bytes_since_end_of_marking_ = 0;
362 v8::base::TimeTicks time_of_last_end_of_marking_ = v8::base::TimeTicks::Now();
363 // Counters for allocation and free. The individual values are never negative
364 // but their delta may be because of the same reason the overall
365 // allocated_bytes_since_end_of_marking_ may be negative. Keep integer
366 // arithmetic for simplicity.
367 int64_t allocated_bytes_since_safepoint_ = 0;
368 int64_t explicitly_freed_bytes_since_safepoint_ = 0;
369#ifdef CPPGC_VERIFY_HEAP
370 // Tracks live bytes for overflows.
371 size_t tracked_live_bytes_ = 0;
372#endif // CPPGC_VERIFY_HEAP
373
374 // The number of bytes marked so far. For young generation (with sticky bits)
375 // keeps track of marked bytes across multiple GC cycles.
376 size_t marked_bytes_so_far_ = 0;
377
378 int64_t memory_allocated_bytes_ = 0;
379 int64_t memory_freed_bytes_since_end_of_marking_ = 0;
380 std::atomic<size_t> discarded_bytes_{0};
381
382 // vector to allow fast iteration of observers. Register/Unregisters only
383 // happens on startup/teardown.
384 std::vector<AllocationObserver*> allocation_observers_;
385 bool allocation_observer_deleted_ = false;
386
387 GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
388
389 // The event being filled by the current GC cycle between NotifyMarkingStarted
390 // and NotifySweepingFinished.
392 // The previous GC event which is populated at NotifySweepingFinished.
394
395 std::unique_ptr<MetricRecorder> metric_recorder_;
396
397 // |platform_| is used by the TRACE_EVENT_* macros.
399};
400
401template <typename Callback>
403 // Iterate using indices to allow push_back() of new observers.
404 for (size_t i = 0; i < allocation_observers_.size(); ++i) {
405 auto* observer = allocation_observers_[i];
406 if (observer) {
407 callback(observer);
408 }
409 }
412 std::remove(allocation_observers_.begin(), allocation_observers_.end(),
413 nullptr),
416 }
417}
418
419template <StatsCollector::TraceCategory trace_category,
420 StatsCollector::ScopeContext scope_category>
421constexpr const char*
423 switch (trace_category) {
424 case kEnabled:
425 return "cppgc";
426 case kDisabled:
427 return TRACE_DISABLED_BY_DEFAULT("cppgc");
428 }
429}
430
431template <StatsCollector::TraceCategory trace_category,
432 StatsCollector::ScopeContext scope_category>
433template <typename... Args>
435 Args... args) {
436 // Top level scopes that contribute to histogram should always be enabled.
437 DCHECK_IMPLIES(static_cast<int>(scope_id_) <
438 (scope_category == kMutatorThread
439 ? static_cast<int>(kNumHistogramScopeIds)
440 : static_cast<int>(kNumHistogramConcurrentScopeIds)),
442 StartTraceImpl(args...);
443}
444
445template <StatsCollector::TraceCategory trace_category,
446 StatsCollector::ScopeContext scope_category>
447void StatsCollector::InternalScope<trace_category,
448 scope_category>::StopTrace() {
449 StopTraceImpl();
450}
451
452template <StatsCollector::TraceCategory trace_category,
453 StatsCollector::ScopeContext scope_category>
454void StatsCollector::InternalScope<trace_category,
455 scope_category>::StartTraceImpl() {
458 GetScopeName(scope_id_, stats_collector_->current_.collection_type));
459}
460
461template <StatsCollector::TraceCategory trace_category,
462 StatsCollector::ScopeContext scope_category>
463template <typename Value1>
465 trace_category, scope_category>::StartTraceImpl(const char* k1, Value1 v1) {
468 GetScopeName(scope_id_, stats_collector_->current_.collection_type), k1,
469 v1);
470}
471
472template <StatsCollector::TraceCategory trace_category,
473 StatsCollector::ScopeContext scope_category>
474template <typename Value1, typename Value2>
476 trace_category, scope_category>::StartTraceImpl(const char* k1, Value1 v1,
477 const char* k2, Value2 v2) {
480 GetScopeName(scope_id_, stats_collector_->current_.collection_type), k1,
481 v1, k2, v2);
482}
483
484template <StatsCollector::TraceCategory trace_category,
485 StatsCollector::ScopeContext scope_category>
486void StatsCollector::InternalScope<trace_category,
487 scope_category>::StopTraceImpl() {
490 GetScopeName(scope_id_, stats_collector_->current_.collection_type),
491 "epoch", stats_collector_->current_.epoch, "forced",
492 stats_collector_->current_.is_forced_gc == IsForcedGC::kForced);
493}
494
495template <StatsCollector::TraceCategory trace_category,
496 StatsCollector::ScopeContext scope_category>
497void StatsCollector::InternalScope<trace_category,
498 scope_category>::IncreaseScopeTime() {
499 DCHECK_NE(GarbageCollectionState::kNotRunning, stats_collector_->gc_state_);
500 // Only record top level scopes.
501 if (static_cast<int>(scope_id_) >=
502 (scope_category == kMutatorThread
503 ? static_cast<int>(kNumHistogramScopeIds)
504 : static_cast<int>(kNumHistogramConcurrentScopeIds)))
505 return;
507 if (scope_category == StatsCollector::ScopeContext::kMutatorThread) {
510 stats_collector_->RecordHistogramSample(scope_id_, time);
511 return;
512 }
513 // scope_category == StatsCollector::ScopeContext::kConcurrentThread
514 using AtomicWord = v8::base::AtomicWord;
515 const int64_t us = time.InMicroseconds();
518 static_cast<AtomicWord>(us));
519}
520
521} // namespace internal
522} // namespace cppgc
523
524#endif // V8_HEAP_CPPGC_STATS_COLLECTOR_H_
MarkingType
Definition heap.h:60
SweepingType
Definition heap.h:80
static constexpr const char * TraceCategory()
void DecreaseStartTimeForTesting(v8::base::TimeDelta delta)
InternalScope & operator=(const InternalScope &)=delete
std::conditional_t< scope_category==kMutatorThread, ScopeId, ConcurrentScopeId > ScopeIdType
InternalScope(const InternalScope &)=delete
InternalScope(StatsCollector *stats_collector, ScopeIdType scope_id, Args... args)
const Event & GetPreviousEventForTesting() const
void RecordHistogramSample(ConcurrentScopeId, v8::base::TimeDelta)
static constexpr const char * GetScopeName(ScopeId id, CollectionType type)
GarbageCollectionState gc_state_
std::unique_ptr< MetricRecorder > metric_recorder_
StatsCollector & operator=(const StatsCollector &)=delete
std::vector< AllocationObserver * > allocation_observers_
static constexpr const char * GetScopeName(ConcurrentScopeId id, CollectionType type)
void SetMetricRecorder(std::unique_ptr< MetricRecorder > histogram_recorder)
MetricRecorder * GetMetricRecorder() const
void ForAllAllocationObservers(Callback callback)
void RecordHistogramSample(ScopeId, v8::base::TimeDelta)
StatsCollector(const StatsCollector &)=delete
static TimeTicks Now()
Definition time.cc:736
StatsCollector * stats_collector_
Definition sweeper.cc:595
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
TNode< Object > callback
TimeRecord time
base::TimeTicks start_time_
Atomic32 AtomicWord
Definition atomicops.h:76
Atomic32 Relaxed_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
Definition atomicops.h:140
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V)
#define CPPGC_DECLARE_ENUM(name)
#define CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(V)
#define CPPGC_CASE(name)
#define CPPGC_FOR_ALL_HISTOGRAM_SCOPES(V)
#define CPPGC_FOR_ALL_SCOPES(V)
v8::base::TimeDelta scope_data[kNumHistogramScopeIds]
v8::base::AtomicWord concurrent_scope_data[kNumHistogramConcurrentScopeIds]
#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_BEGIN0(category_group, name)
#define V8_NODISCARD
Definition v8config.h:693