v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
pretenuring-handler.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
9#include "src/flags/flags.h"
13#include "src/heap/new-spaces.h"
15
16namespace v8 {
17namespace internal {
18
20 : heap_(heap), global_pretenuring_feedback_(kInitialFeedbackCapacity) {}
21
23
24namespace {
25
26static constexpr int kMinMementoCount = 100;
27
28double GetPretenuringRatioThreshold(size_t new_space_capacity) {
29 static constexpr double kScavengerPretenureRatio = 0.85;
30 // MinorMS allows for a much larger new space, thus we require a lower
31 // survival rate for pretenuring.
32 static constexpr double kMinorMSPretenureMaxRatio = 0.8;
33 static constexpr double kMinorMSMinCapacity = 16 * MB;
34 if (!v8_flags.minor_ms) return kScavengerPretenureRatio;
35 if (new_space_capacity <= kMinorMSMinCapacity)
36 return kMinorMSPretenureMaxRatio;
37 // When capacity is 64MB, the pretenuring ratio would be 0.2.
38 return kMinorMSPretenureMaxRatio * kMinorMSMinCapacity / new_space_capacity;
39}
40
41inline bool MakePretenureDecision(
42 Tagged<AllocationSite> site,
43 AllocationSite::PretenureDecision current_decision, double ratio,
44 bool new_space_capacity_was_above_pretenuring_threshold,
45 size_t new_space_capacity) {
46 // Here we just allow state transitions from undecided or maybe tenure
47 // to don't tenure, maybe tenure, or tenure.
48 if ((current_decision == AllocationSite::kUndecided ||
49 current_decision == AllocationSite::kMaybeTenure)) {
50 if (ratio >= GetPretenuringRatioThreshold(new_space_capacity)) {
51 // We just transition into tenure state when the semi-space was at
52 // maximum capacity.
53 if (new_space_capacity_was_above_pretenuring_threshold) {
54 site->set_deopt_dependent_code(true);
55 site->set_pretenure_decision(AllocationSite::kTenure);
56 // Currently we just need to deopt when we make a state transition to
57 // tenure.
58 return true;
59 }
60 site->set_pretenure_decision(AllocationSite::kMaybeTenure);
61 } else {
62 site->set_pretenure_decision(AllocationSite::kDontTenure);
63 }
64 }
65 return false;
66}
67
68// Clear feedback calculation fields until the next gc.
69inline void ResetPretenuringFeedback(Tagged<AllocationSite> site) {
70 site->set_memento_found_count(0);
71 site->set_memento_create_count(0);
72}
73
74inline bool DigestPretenuringFeedback(
75 Isolate* isolate, Tagged<AllocationSite> site,
76 bool new_space_capacity_was_above_pretenuring_threshold,
77 size_t new_space_capacity) {
78 bool deopt = false;
79 int create_count = site->memento_create_count();
80 int found_count = site->memento_found_count();
81 bool minimum_mementos_created = create_count >= kMinMementoCount;
82 double ratio =
83 minimum_mementos_created || v8_flags.trace_pretenuring_statistics
84 ? static_cast<double>(found_count) / create_count
85 : 0.0;
86 AllocationSite::PretenureDecision current_decision =
87 site->pretenure_decision();
88
89 if (minimum_mementos_created) {
90 deopt = MakePretenureDecision(
91 site, current_decision, ratio,
92 new_space_capacity_was_above_pretenuring_threshold, new_space_capacity);
93 }
94
95 if (V8_UNLIKELY(v8_flags.trace_pretenuring_statistics)) {
96 PrintIsolate(isolate,
97 "pretenuring: AllocationSite(%p): (created, found, ratio) "
98 "(%d, %d, %f) %s => %s\n",
99 reinterpret_cast<void*>(site.ptr()), create_count, found_count,
100 ratio, site->PretenureDecisionName(current_decision),
101 site->PretenureDecisionName(site->pretenure_decision()));
102 }
103
104 ResetPretenuringFeedback(site);
105 return deopt;
106}
107
108bool PretenureAllocationSiteManually(Isolate* isolate,
109 Tagged<AllocationSite> site) {
110 AllocationSite::PretenureDecision current_decision =
111 site->pretenure_decision();
112 bool deopt = true;
113 if (current_decision == AllocationSite::kUndecided ||
114 current_decision == AllocationSite::kMaybeTenure) {
115 site->set_deopt_dependent_code(true);
116 site->set_pretenure_decision(AllocationSite::kTenure);
117 } else {
118 deopt = false;
119 }
120 if (v8_flags.trace_pretenuring_statistics) {
121 PrintIsolate(isolate,
122 "pretenuring manually requested: AllocationSite(%p): "
123 "%s => %s\n",
124 reinterpret_cast<void*>(site.ptr()),
125 site->PretenureDecisionName(current_decision),
126 site->PretenureDecisionName(site->pretenure_decision()));
127 }
128
129 ResetPretenuringFeedback(site);
130 return deopt;
131}
132
133} // namespace
134
135// static
137 return kMinMementoCount;
138}
139
141 const PretenuringFeedbackMap& local_pretenuring_feedback) {
142 PtrComprCageBase cage_base(heap_->isolate());
144 for (auto& site_and_count : local_pretenuring_feedback) {
145 site = site_and_count.first;
146 MapWord map_word = site->map_word(kRelaxedLoad);
147 if (map_word.IsForwardingAddress()) {
148 DCHECK(!HeapLayout::IsSelfForwarded(site, map_word));
149 site = Cast<AllocationSite>(map_word.ToForwardingAddress(site));
150 }
151
152 // We have not validated the allocation site yet, since we have not
153 // dereferenced the site during collecting information.
154 // This is an inlined check of AllocationMemento::IsValid.
155 if (!IsAllocationSite(site) || site->IsZombie()) continue;
156
157 const int value = static_cast<int>(site_and_count.second);
158 DCHECK_LT(0, value);
159 if (site->IncrementMementoFoundCount(value) >= kMinMementoCount) {
160 // For sites in the global map the count is accessed through the site.
161 global_pretenuring_feedback_.insert(std::make_pair(site, 0));
162 }
163 }
164}
165
170
172 size_t new_space_capacity_target_capacity) {
173 // The minimum new space capacity from which allocation sites can be
174 // pretenured. A too small capacity means frequent GCs. Objects thus don't get
175 // a chance to die before being promoted, which may lead to wrong pretenuring
176 // decisions.
177 static constexpr size_t kDefaultMinNewSpaceCapacityForPretenuring =
178 8192 * KB * Heap::kPointerMultiplier;
179
180 DCHECK(heap_->tracer()->IsInAtomicPause());
181
182 if (!v8_flags.allocation_site_pretenuring) return;
183
184 // TODO(333906585): Adjust capacity for sticky bits.
185 const size_t max_capacity = v8_flags.sticky_mark_bits
188 const size_t min_new_space_capacity_for_pretenuring =
189 std::min(max_capacity, kDefaultMinNewSpaceCapacityForPretenuring);
190
191 bool trigger_deoptimization = false;
192 int tenure_decisions = 0;
193 int dont_tenure_decisions = 0;
194 int allocation_mementos_found = 0;
195 int allocation_sites = 0;
196 int active_allocation_sites = 0;
197
199
200 // Step 1: Digest feedback for recorded allocation sites.
201 // This is the pretenuring trigger for allocation sites that are in maybe
202 // tenure state. When we switched to a large enough new space size we
203 // deoptimize the code that belongs to the allocation site and derive the
204 // lifetime of the allocation site.
205 const bool new_space_was_above_pretenuring_threshold =
206 new_space_capacity_target_capacity >=
207 min_new_space_capacity_for_pretenuring;
208
209 for (auto& site_and_count : global_pretenuring_feedback_) {
210 allocation_sites++;
211 site = site_and_count.first;
212 // Count is always access through the site.
213 DCHECK_EQ(0, site_and_count.second);
214 int found_count = site->memento_found_count();
215 // An entry in the storage does not imply that the count is > 0 because
216 // allocation sites might have been reset due to too many objects dying
217 // in old space.
218 if (found_count > 0) {
219 DCHECK(IsAllocationSite(site));
220 active_allocation_sites++;
221 allocation_mementos_found += found_count;
222 if (DigestPretenuringFeedback(heap_->isolate(), site,
223 new_space_was_above_pretenuring_threshold,
224 new_space_capacity_target_capacity)) {
225 trigger_deoptimization = true;
226 }
227 if (site->GetAllocationType() == AllocationType::kOld) {
228 tenure_decisions++;
229 } else {
230 dont_tenure_decisions++;
231 }
232 }
233 }
234
235 // Step 2: Pretenure allocation sites for manual requests.
237 while (!allocation_sites_to_pretenure_->empty()) {
238 auto pretenure_site = allocation_sites_to_pretenure_->Pop();
239 if (PretenureAllocationSiteManually(heap_->isolate(), pretenure_site)) {
240 trigger_deoptimization = true;
241 }
242 }
244 }
245
246 // Step 3: Deopt maybe tenured allocation sites if necessary.
247 // New space capacity was too low for pretenuring but is now above the
248 // threshold. Maybe tenured allocation sites may be pretenured on the next GC.
249 bool deopt_maybe_tenured = (heap_->NewSpaceTargetCapacity() >=
250 min_new_space_capacity_for_pretenuring) &&
251 !new_space_was_above_pretenuring_threshold;
252 if (deopt_maybe_tenured) {
254 [&allocation_sites, &trigger_deoptimization](
256 DCHECK(IsAllocationSite(site));
257 allocation_sites++;
258 if (site->IsMaybeTenure()) {
259 site->set_deopt_dependent_code(true);
260 trigger_deoptimization = true;
261 }
262 });
263 }
264
265 if (trigger_deoptimization) {
266 heap_->isolate()->stack_guard()->RequestDeoptMarkedAllocationSites();
267 }
268
269 if (V8_UNLIKELY(v8_flags.trace_pretenuring_statistics) &&
270 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
271 dont_tenure_decisions > 0)) {
273 heap_->isolate(),
274 "pretenuring: threshold=%.2f deopt_maybe_tenured=%d visited_sites=%d "
275 "active_sites=%d "
276 "mementos=%d tenured=%d not_tenured=%d\n",
277 GetPretenuringRatioThreshold(new_space_capacity_target_capacity),
278 deopt_maybe_tenured ? 1 : 0, allocation_sites, active_allocation_sites,
279 allocation_mementos_found, tenure_decisions, dont_tenure_decisions);
280 }
281
282 global_pretenuring_feedback_.clear();
283 global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
284}
285
286void PretenuringHandler::PretenureAllocationSiteOnNextCollection(
288 if (!allocation_sites_to_pretenure_) {
289 allocation_sites_to_pretenure_.reset(
291 }
292 allocation_sites_to_pretenure_->Push(site);
293}
294
295void PretenuringHandler::reset() { allocation_sites_to_pretenure_.reset(); }
296
297} // namespace internal
298} // namespace v8
static bool IsSelfForwarded(Tagged< HeapObject > object)
NewSpace * new_space() const
Definition heap.h:727
StickySpace * sticky_space() const
Definition heap-inl.h:443
size_t NewSpaceTargetCapacity() const
Definition heap.cc:3904
Tagged< UnionOf< Smi, Undefined, AllocationSiteWithWeakNext > > allocation_sites_list()
Definition heap.h:471
void ForeachAllocationSite(Tagged< Object > list, const std::function< void(Tagged< AllocationSite >)> &visitor)
Definition heap.cc:2929
GCTracer * tracer()
Definition heap.h:800
Isolate * isolate() const
Definition heap-inl.h:61
StackGuard * stack_guard()
Definition isolate.h:1198
bool IsForwardingAddress() const
Tagged< HeapObject > ToForwardingAddress(Tagged< HeapObject > map_word_host)
virtual size_t MaximumCapacity() const =0
void ProcessPretenuringFeedback(size_t new_space_capacity_before_gc)
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
std::unique_ptr< GlobalHandleVector< AllocationSite > > allocation_sites_to_pretenure_
void RemoveAllocationSitePretenuringFeedback(Tagged< AllocationSite > site)
void MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap &local_pretenuring_feedback)
PretenuringFeedbackMap global_pretenuring_feedback_
static V8_EXPORT_PRIVATE int GetMinMementoCountForTesting()
V8_EXPORT_PRIVATE FlagValues v8_flags
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
Heap * heap_
#define V8_UNLIKELY(condition)
Definition v8config.h:660