20 :
heap_(
heap), global_pretenuring_feedback_(kInitialFeedbackCapacity) {}
26static constexpr int kMinMementoCount = 100;
28double GetPretenuringRatioThreshold(
size_t new_space_capacity) {
29 static constexpr double kScavengerPretenureRatio = 0.85;
32 static constexpr double kMinorMSPretenureMaxRatio = 0.8;
33 static constexpr double kMinorMSMinCapacity = 16 *
MB;
34 if (!
v8_flags.minor_ms)
return kScavengerPretenureRatio;
35 if (new_space_capacity <= kMinorMSMinCapacity)
36 return kMinorMSPretenureMaxRatio;
38 return kMinorMSPretenureMaxRatio * kMinorMSMinCapacity / new_space_capacity;
41inline bool MakePretenureDecision(
42 Tagged<AllocationSite> site,
44 bool new_space_capacity_was_above_pretenuring_threshold,
45 size_t new_space_capacity) {
50 if (ratio >= GetPretenuringRatioThreshold(new_space_capacity)) {
53 if (new_space_capacity_was_above_pretenuring_threshold) {
54 site->set_deopt_dependent_code(
true);
69inline void ResetPretenuringFeedback(Tagged<AllocationSite> site) {
70 site->set_memento_found_count(0);
71 site->set_memento_create_count(0);
74inline bool DigestPretenuringFeedback(
75 Isolate* isolate, Tagged<AllocationSite> site,
76 bool new_space_capacity_was_above_pretenuring_threshold,
77 size_t new_space_capacity) {
79 int create_count = site->memento_create_count();
80 int found_count = site->memento_found_count();
81 bool minimum_mementos_created = create_count >= kMinMementoCount;
83 minimum_mementos_created ||
v8_flags.trace_pretenuring_statistics
84 ?
static_cast<double>(found_count) / create_count
87 site->pretenure_decision();
89 if (minimum_mementos_created) {
90 deopt = MakePretenureDecision(
91 site, current_decision, ratio,
92 new_space_capacity_was_above_pretenuring_threshold, new_space_capacity);
97 "pretenuring: AllocationSite(%p): (created, found, ratio) "
98 "(%d, %d, %f) %s => %s\n",
99 reinterpret_cast<void*
>(site.ptr()), create_count, found_count,
100 ratio, site->PretenureDecisionName(current_decision),
101 site->PretenureDecisionName(site->pretenure_decision()));
104 ResetPretenuringFeedback(site);
108bool PretenureAllocationSiteManually(Isolate* isolate,
109 Tagged<AllocationSite> site) {
111 site->pretenure_decision();
115 site->set_deopt_dependent_code(
true);
120 if (
v8_flags.trace_pretenuring_statistics) {
122 "pretenuring manually requested: AllocationSite(%p): "
124 reinterpret_cast<void*
>(site.ptr()),
125 site->PretenureDecisionName(current_decision),
126 site->PretenureDecisionName(site->pretenure_decision()));
129 ResetPretenuringFeedback(site);
137 return kMinMementoCount;
144 for (
auto& site_and_count : local_pretenuring_feedback) {
145 site = site_and_count.first;
155 if (!IsAllocationSite(site) || site->IsZombie())
continue;
157 const int value =
static_cast<int>(site_and_count.second);
159 if (site->IncrementMementoFoundCount(value) >= kMinMementoCount) {
172 size_t new_space_capacity_target_capacity) {
177 static constexpr size_t kDefaultMinNewSpaceCapacityForPretenuring =
178 8192 * KB * Heap::kPointerMultiplier;
182 if (!
v8_flags.allocation_site_pretenuring)
return;
185 const size_t max_capacity =
v8_flags.sticky_mark_bits
188 const size_t min_new_space_capacity_for_pretenuring =
189 std::min(max_capacity, kDefaultMinNewSpaceCapacityForPretenuring);
191 bool trigger_deoptimization =
false;
192 int tenure_decisions = 0;
193 int dont_tenure_decisions = 0;
194 int allocation_mementos_found = 0;
195 int allocation_sites = 0;
196 int active_allocation_sites = 0;
205 const bool new_space_was_above_pretenuring_threshold =
206 new_space_capacity_target_capacity >=
207 min_new_space_capacity_for_pretenuring;
211 site = site_and_count.first;
214 int found_count = site->memento_found_count();
218 if (found_count > 0) {
219 DCHECK(IsAllocationSite(site));
220 active_allocation_sites++;
221 allocation_mementos_found += found_count;
223 new_space_was_above_pretenuring_threshold,
224 new_space_capacity_target_capacity)) {
225 trigger_deoptimization =
true;
230 dont_tenure_decisions++;
239 if (PretenureAllocationSiteManually(
heap_->
isolate(), pretenure_site)) {
240 trigger_deoptimization =
true;
250 min_new_space_capacity_for_pretenuring) &&
251 !new_space_was_above_pretenuring_threshold;
252 if (deopt_maybe_tenured) {
254 [&allocation_sites, &trigger_deoptimization](
256 DCHECK(IsAllocationSite(site));
258 if (site->IsMaybeTenure()) {
259 site->set_deopt_dependent_code(true);
260 trigger_deoptimization = true;
265 if (trigger_deoptimization) {
270 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
271 dont_tenure_decisions > 0)) {
274 "pretenuring: threshold=%.2f deopt_maybe_tenured=%d visited_sites=%d "
276 "mementos=%d tenured=%d not_tenured=%d\n",
277 GetPretenuringRatioThreshold(new_space_capacity_target_capacity),
278 deopt_maybe_tenured ? 1 : 0, allocation_sites, active_allocation_sites,
279 allocation_mementos_found, tenure_decisions, dont_tenure_decisions);
282 global_pretenuring_feedback_.clear();
283 global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
286void PretenuringHandler::PretenureAllocationSiteOnNextCollection(
288 if (!allocation_sites_to_pretenure_) {
289 allocation_sites_to_pretenure_.reset(
292 allocation_sites_to_pretenure_->Push(site);
295void PretenuringHandler::reset() { allocation_sites_to_pretenure_.reset(); }
static bool IsSelfForwarded(Tagged< HeapObject > object)
NewSpace * new_space() const
StickySpace * sticky_space() const
size_t NewSpaceTargetCapacity() const
Tagged< UnionOf< Smi, Undefined, AllocationSiteWithWeakNext > > allocation_sites_list()
void ForeachAllocationSite(Tagged< Object > list, const std::function< void(Tagged< AllocationSite >)> &visitor)
Isolate * isolate() const
StackGuard * stack_guard()
bool IsForwardingAddress() const
Tagged< HeapObject > ToForwardingAddress(Tagged< HeapObject > map_word_host)
virtual size_t MaximumCapacity() const =0
void ProcessPretenuringFeedback(size_t new_space_capacity_before_gc)
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
std::unique_ptr< GlobalHandleVector< AllocationSite > > allocation_sites_to_pretenure_
void RemoveAllocationSitePretenuringFeedback(Tagged< AllocationSite > site)
PretenuringHandler(Heap *heap)
void MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap &local_pretenuring_feedback)
PretenuringFeedbackMap global_pretenuring_feedback_
static V8_EXPORT_PRIVATE int GetMinMementoCountForTesting()
V8_EXPORT_PRIVATE FlagValues v8_flags
void PrintIsolate(void *isolate, const char *format,...)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
static constexpr RelaxedLoadTag kRelaxedLoad
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define V8_UNLIKELY(condition)