v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-reducer.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include "src/flags/flags.h"
9#include "src/heap/heap-inl.h"
11#include "src/init/v8.h"
12#include "src/utils/utils.h"
13
14namespace v8 {
15namespace internal {
16
17const int MemoryReducer::kLongDelayMs = 8000;
18const int MemoryReducer::kShortDelayMs = 500;
19const int MemoryReducer::kWatchdogDelayMs = 100000;
22
24 : heap_(heap),
25 taskrunner_(heap->GetForegroundTaskRunner()),
26 state_(State::CreateUninitialized()),
27 js_calls_counter_(0),
28 js_calls_sample_time_ms_(0.0) {
29 DCHECK(v8_flags.incremental_marking);
30 DCHECK(v8_flags.memory_reducer);
31}
32
34 : CancelableTask(memory_reducer->heap()->isolate()),
35 memory_reducer_(memory_reducer) {}
36
37
39 Heap* heap = memory_reducer_->heap();
40 // Set the current isolate such that trusted pointer tables etc are
41 // available and the cage base is set correctly for multi-cage mode.
42 SetCurrentIsolateScope isolate_scope(heap->isolate());
43
44 const double time_ms = heap->MonotonicallyIncreasingTimeInMs();
45 heap->allocator()->new_space_allocator()->FreeLinearAllocationArea();
46 heap->tracer()->SampleAllocation(base::TimeTicks::Now(),
47 heap->NewSpaceAllocationCounter(),
48 heap->OldGenerationAllocationCounter(),
49 heap->EmbedderAllocationCounter());
50 const bool low_allocation_rate = heap->HasLowAllocationRate();
51 const bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
52 if (v8_flags.trace_memory_reducer) {
53 heap->isolate()->PrintWithTimestamp(
54 "Memory reducer: %s, %s\n",
55 low_allocation_rate ? "low alloc" : "high alloc",
56 optimize_for_memory ? "background" : "foreground");
57 }
58 // The memory reducer will start incremental marking if
59 // 1) mutator is likely idle: js call rate is low and allocation rate is low.
60 // 2) mutator is in background: optimize for memory flag is set.
61 const Event event{
62 kTimer,
63 time_ms,
64 heap->CommittedOldGenerationMemory(),
65 false,
66 low_allocation_rate || optimize_for_memory,
67 heap->incremental_marking()->IsStopped() &&
68 heap->incremental_marking()->CanAndShouldBeStarted(),
70 };
71 memory_reducer_->NotifyTimer(event);
72}
73
74
76 if (state_.id() != kWait) return;
77 DCHECK_EQ(kTimer, event.type);
78 state_ = Step(state_, event);
79 if (state_.id() == kRun) {
80 DCHECK(heap()->incremental_marking()->IsStopped());
81 DCHECK(v8_flags.incremental_marking);
82 if (v8_flags.trace_memory_reducer) {
83 heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
85 }
86 GCFlags gc_flags = v8_flags.memory_reducer_favors_memory
89 heap()->StartIncrementalMarking(gc_flags,
92 } else if (state_.id() == kWait) {
93 // Re-schedule the timer.
95 if (v8_flags.trace_memory_reducer) {
96 heap()->isolate()->PrintWithTimestamp(
97 "Memory reducer: waiting for %.f ms\n",
99 }
100 }
101}
102
103void MemoryReducer::NotifyMarkCompact(size_t committed_memory_before) {
104 if (!v8_flags.incremental_marking) return;
105 const size_t committed_memory = heap()->CommittedOldGenerationMemory();
106
107 // Trigger one more GC if
108 // - this GC decreased committed memory,
109 // - there is high fragmentation,
110 const MemoryReducer::Event event{
113 committed_memory,
114 (committed_memory_before > committed_memory + MB) ||
116 false,
117 false,
118 IsFrozen(heap())};
119 const State old_state = state_;
120 state_ = Step(state_, event);
121 if (old_state.id() != kWait && state_.id() == kWait) {
122 // If we are transitioning to the WAIT state, start the timer.
123 ScheduleTimer(state_.next_gc_start_ms() - event.time_ms);
124 }
125 if (old_state.id() == kRun && v8_flags.trace_memory_reducer) {
126 heap()->isolate()->PrintWithTimestamp(
127 "Memory reducer: finished GC #%d (%s)\n", old_state.started_gcs(),
128 state_.id() == kWait ? "will do more" : "done");
129 }
130}
131
133 if (!v8_flags.incremental_marking) return;
136 0,
137 false,
138 false,
139 false,
140 IsFrozen(heap_)};
141 const Id old_action = state_.id();
142 state_ = Step(state_, event);
143 if (old_action != kWait && state_.id() == kWait) {
144 // If we are transitioning to the WAIT state, start the timer.
145 ScheduleTimer(state_.next_gc_start_ms() - event.time_ms);
146 }
147}
148
149bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
150 return state.last_gc_time_ms() != 0 &&
151 event.time_ms > state.last_gc_time_ms() + kWatchdogDelayMs;
152}
153
154
155// For specification of this function see the comment for MemoryReducer class.
157 const Event& event) {
158 DCHECK(v8_flags.memory_reducer);
159 DCHECK(v8_flags.incremental_marking);
160
161 switch (state.id()) {
162 case kUninit:
163 case kDone:
164 if (event.type == kTimer) {
165 return state;
166 } else if (event.type == kMarkCompact) {
167 if (event.committed_memory <
168 std::max(
169 static_cast<size_t>(state.committed_memory_at_last_run() *
171 state.committed_memory_at_last_run() + kCommittedMemoryDelta)) {
172 return state;
173 } else {
174 return State::CreateWait(0, event.time_ms + kLongDelayMs,
175 event.time_ms);
176 }
177 } else {
179 return State::CreateWait(
180 0, event.time_ms + v8_flags.gc_memory_reducer_start_delay_ms,
181 state.last_gc_time_ms());
182 }
183 case kWait:
184 CHECK_LE(state.started_gcs(), MaxNumberOfGCs());
185 switch (event.type) {
186 case kPossibleGarbage:
187 return state;
188 case kTimer:
189 if (event.is_frozen || state.started_gcs() >= MaxNumberOfGCs()) {
190 return State::CreateDone(state.last_gc_time_ms(),
191 event.committed_memory);
192 } else if (event.can_start_incremental_gc &&
194 WatchdogGC(state, event))) {
195 if (state.next_gc_start_ms() <= event.time_ms) {
196 return State::CreateRun(state.started_gcs() + 1);
197 } else {
198 return state;
199 }
200 } else {
201 return State::CreateWait(state.started_gcs(),
202 event.time_ms + kLongDelayMs,
203 state.last_gc_time_ms());
204 }
205 case kMarkCompact:
206 return State::CreateWait(state.started_gcs(),
207 event.time_ms + kLongDelayMs, event.time_ms);
208 }
209 case kRun:
210 CHECK_LE(state.started_gcs(), MaxNumberOfGCs());
211 if (event.type == kMarkCompact) {
212 if (!event.is_frozen && state.started_gcs() < MaxNumberOfGCs() &&
214 state.started_gcs() == 1)) {
215 return State::CreateWait(state.started_gcs(),
216 event.time_ms + kShortDelayMs,
217 event.time_ms);
218 } else {
219 return State::CreateDone(event.time_ms, event.committed_memory);
220 }
221 } else {
222 return state;
223 }
224 }
225 UNREACHABLE();
226}
227
228void MemoryReducer::ScheduleTimer(double delay_ms) {
229 DCHECK_LT(0, delay_ms);
230 if (heap()->IsTearingDown()) return;
231 // Leave some room for precision error in task scheduler.
232 const double kSlackMs = 100;
233 taskrunner_->PostDelayedTask(std::make_unique<MemoryReducer::TimerTask>(this),
234 (delay_ms + kSlackMs) / 1000.0);
235}
236
238
239// static
241 DCHECK_GT(v8_flags.memory_reducer_gc_count, 0);
242 return v8_flags.memory_reducer_gc_count;
243}
244
245// static
247 return v8_flags.memory_reducer_respects_frozen_state &&
248 heap->isolate()->IsFrozen();
249}
250
251} // namespace internal
252} // namespace v8
void PostDelayedTask(std::unique_ptr< Task > task, double delay_in_seconds, const SourceLocation &location=SourceLocation::Current())
static TimeTicks Now()
Definition time.cc:736
V8_EXPORT_PRIVATE void StartIncrementalMarking(GCFlags gc_flags, GarbageCollectionReason gc_reason, GCCallbackFlags gc_callback_flags=GCCallbackFlags::kNoGCCallbackFlags, GarbageCollector collector=GarbageCollector::MARK_COMPACTOR)
Definition heap.cc:1871
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const
Definition heap.cc:4098
bool HasHighFragmentation()
Definition heap.cc:3737
size_t CommittedOldGenerationMemory()
Definition heap.cc:350
Isolate * isolate() const
Definition heap-inl.h:61
static State CreateWait(int started_gcs, double next_gc_time_ms, double last_gc_time_ms)
static State CreateDone(double last_gc_time_ms, size_t committed_memory)
static State CreateRun(int started_gcs)
TimerTask(MemoryReducer *memory_reducer)
static bool IsFrozen(const Heap *heap)
std::shared_ptr< v8::TaskRunner > taskrunner_
static bool WatchdogGC(const State &state, const Event &event)
static const double kCommittedMemoryFactor
void NotifyTimer(const Event &event)
static const size_t kCommittedMemoryDelta
void ScheduleTimer(double delay_ms)
static State Step(const State &state, const Event &event)
void NotifyMarkCompact(size_t committed_memory_before)
static const int kWatchdogDelayMs
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
LiftoffAssembler::CacheState state
V8_EXPORT_PRIVATE FlagValues v8_flags
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
@ kGCCallbackFlagCollectAllExternalMemory
#define CHECK_LE(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
Heap * heap_