45#ifdef CPPGC_VERIFY_HEAP
46 DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
47 tracked_live_bytes_ += bytes;
54#ifdef CPPGC_VERIFY_HEAP
56 tracked_live_bytes_ -= bytes;
106 static std::atomic<size_t> epoch_counter{0};
107 epoch = epoch_counter.fetch_add(1);
143#ifdef CPPGC_VERIFY_HEAP
175 DCHECK_LE(0, phases.compact_duration_us);
177 return phases.mark_duration_us + phases.weak_duration_us +
178 phases.compact_duration_us + phases.sweep_duration_us;
181MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
184 int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
185 int64_t incremental_mark_us, int64_t incremental_sweep_us,
186 int64_t concurrent_mark_us, int64_t concurrent_sweep_us,
187 int64_t objects_before_bytes, int64_t objects_after_bytes,
188 int64_t objects_freed_bytes, int64_t memory_before_bytes,
189 int64_t memory_after_bytes, int64_t memory_freed_bytes) {
190 MetricRecorder::GCCycle event;
193 : MetricRecorder::GCCycle::Type::
kMinor;
195 event.main_thread_incremental.mark_duration_us =
196 marking_type != StatsCollector::MarkingType::kAtomic ? incremental_mark_us
198 event.main_thread_incremental.sweep_duration_us =
199 sweeping_type != StatsCollector::SweepingType::kAtomic
200 ? incremental_sweep_us
203 event.main_thread_atomic.mark_duration_us = atomic_mark_us;
204 event.main_thread_atomic.weak_duration_us = atomic_weak_us;
205 event.main_thread_atomic.compact_duration_us = atomic_compact_us;
206 event.main_thread_atomic.sweep_duration_us = atomic_sweep_us;
208 event.main_thread.mark_duration_us =
209 event.main_thread_atomic.mark_duration_us + incremental_mark_us;
210 event.main_thread.weak_duration_us =
211 event.main_thread_atomic.weak_duration_us;
212 event.main_thread.compact_duration_us =
213 event.main_thread_atomic.compact_duration_us;
214 event.main_thread.sweep_duration_us =
215 event.main_thread_atomic.sweep_duration_us + incremental_sweep_us;
217 event.total.mark_duration_us =
218 event.main_thread.mark_duration_us + concurrent_mark_us;
219 event.total.weak_duration_us =
event.main_thread.weak_duration_us;
220 event.total.compact_duration_us =
event.main_thread.compact_duration_us;
221 event.total.sweep_duration_us =
222 event.main_thread.sweep_duration_us + concurrent_sweep_us;
224 event.objects.before_bytes = objects_before_bytes;
225 event.objects.after_bytes = objects_after_bytes;
226 event.objects.freed_bytes = objects_freed_bytes;
228 event.memory.before_bytes = memory_before_bytes;
229 event.memory.after_bytes = memory_after_bytes;
230 event.memory.freed_bytes = memory_freed_bytes;
232 if (event.objects.before_bytes == 0) {
233 event.collection_rate_in_percent = 0;
235 event.collection_rate_in_percent =
236 static_cast<double>(
event.objects.freed_bytes) /
237 event.objects.before_bytes;
240 if (event.objects.freed_bytes == 0) {
241 event.efficiency_in_bytes_per_us = 0;
242 event.main_thread_efficiency_in_bytes_per_us = 0;
250 event.efficiency_in_bytes_per_us =
251 static_cast<double>(
event.objects.freed_bytes) / SumPhases(event.total);
252 event.main_thread_efficiency_in_bytes_per_us =
253 static_cast<double>(
event.objects.freed_bytes) /
254 SumPhases(event.main_thread);
321 return event.
scope_data[kAtomicMark] +
event.scope_data[kIncrementalMark] +
323 &event.concurrent_scope_data[kConcurrentMark]));
381 return allocated - discarded;
387 case kIncrementalMark: {
392 case kIncrementalSweep: {
virtual void AllocatedObjectSizeIncreased(size_t)
virtual void AllocatedSizeIncreased(size_t)
virtual void AllocatedObjectSizeDecreased(size_t)
virtual void ResetAllocatedObjectSize(size_t)
virtual void AllocatedSizeDecreased(size_t)
int64_t explicitly_freed_bytes_since_safepoint_
GarbageCollectionState gc_state_
GCConfig::SweepingType SweepingType
double GetRecentAllocationSpeedInBytesPerMs() const
std::unique_ptr< MetricRecorder > metric_recorder_
size_t marked_bytes_on_current_cycle() const
GCConfig::MarkingType MarkingType
size_t marked_bytes_so_far_
void DecrementDiscardedMemory(size_t)
v8::base::TimeDelta marking_time() const
void RegisterObserver(AllocationObserver *)
size_t allocated_memory_size() const
static constexpr size_t kAllocationThresholdBytes
std::vector< AllocationObserver * > allocation_observers_
void UnregisterObserver(AllocationObserver *)
bool allocation_observer_deleted_
size_t allocated_object_size() const
size_t resident_memory_size() const
void NotifyMarkingCompleted(size_t marked_bytes)
StatsCollector(Platform *)
void NotifyUnmarkingStarted(CollectionType)
int64_t memory_freed_bytes_since_end_of_marking_
void ForAllAllocationObservers(Callback callback)
void NotifyFreedMemory(int64_t)
void AllocatedObjectSizeSafepointImpl()
void NotifyExplicitFree(size_t)
void ResetDiscardedMemory()
v8::base::TimeTicks time_of_last_end_of_marking_
void NotifySweepingCompleted(SweepingType)
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC)
void NotifyAllocation(size_t)
size_t discarded_memory_size() const
int64_t allocated_bytes_since_end_of_marking_
int64_t allocated_bytes_since_safepoint_
size_t marked_bytes() const
void IncrementDiscardedMemory(size_t)
void RecordHistogramSample(ScopeId, v8::base::TimeDelta)
void NotifyAllocatedMemory(int64_t)
int64_t memory_allocated_bytes_
void NotifySafePointForTesting()
void NotifySafePointForConservativeCollection()
std::atomic< size_t > discarded_bytes_
static constexpr TimeDelta FromMicroseconds(int64_t microseconds)
constexpr bool IsZero() const
int64_t InMicroseconds() const
Atomic8 Relaxed_Load(volatile const Atomic8 *ptr)
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK_EQ(v1, v2)
V8_EXPORT_PRIVATE Event()
v8::base::TimeDelta scope_data[kNumHistogramScopeIds]
v8::base::AtomicWord concurrent_scope_data[kNumHistogramConcurrentScopeIds]
size_t memory_size_before_sweep_bytes
SweepingType sweeping_type
CollectionType collection_type
size_t object_size_before_sweep_bytes