v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
tiering-manager.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
15#include "src/common/globals.h"
19#include "src/flags/flags.h"
24#include "src/objects/code.h"
26
27#ifdef V8_ENABLE_SPARKPLUG
29#endif // V8_ENABLE_SPARKPLUG
30
31namespace v8 {
32namespace internal {
33
34#define OPTIMIZATION_REASON_LIST(V) \
35 V(DoNotOptimize, "do not optimize") \
36 V(HotAndStable, "hot and stable")
37
38enum class OptimizationReason : uint8_t {
39#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
41#undef OPTIMIZATION_REASON_CONSTANTS
42};
43
45 static char const* reasons[] = {
46#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
48#undef OPTIMIZATION_REASON_TEXTS
49 };
50 size_t const index = static_cast<size_t>(reason);
51 DCHECK_LT(index, arraysize(reasons));
52 return reasons[index];
53}
54
55#undef OPTIMIZATION_REASON_LIST
56
57std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
58 return os << OptimizationReasonToString(reason);
59}
60
62 public:
63 static constexpr OptimizationDecision Maglev() {
64 // TODO(v8:7700): Consider using another reason here.
65 return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV,
67 }
69 return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN_JS,
71 }
73 return {OptimizationReason::kDoNotOptimize,
74 // These values don't matter but we have to pass something.
75 CodeKind::TURBOFAN_JS, ConcurrencyMode::kConcurrent};
76 }
77
78 constexpr bool should_optimize() const {
79 return optimization_reason != OptimizationReason::kDoNotOptimize;
80 }
81
85
86 private:
94};
95// Since we pass by value:
96static_assert(sizeof(OptimizationDecision) <= kInt32Size);
97
98namespace {
99
100void TraceInOptimizationQueue(Tagged<JSFunction> function,
101 CodeKind current_code_kind) {
102 if (v8_flags.trace_opt_verbose) {
103 PrintF("[not marking function %s (%s) for optimization: already queued]\n",
104 function->DebugNameCStr().get(),
105 CodeKindToString(current_code_kind));
106 }
107}
108
109void TraceHeuristicOptimizationDisallowed(Tagged<JSFunction> function) {
110 if (v8_flags.trace_opt_verbose) {
111 PrintF(
112 "[not marking function %s for optimization: marked with "
113 "%%PrepareFunctionForOptimization for manual optimization]\n",
114 function->DebugNameCStr().get());
115 }
116}
117
118void TraceRecompile(Isolate* isolate, Tagged<JSFunction> function,
119 OptimizationDecision d) {
120 if (v8_flags.trace_opt) {
121 CodeTracer::Scope scope(isolate->GetCodeTracer());
122 PrintF(scope.file(), "[marking ");
123 ShortPrint(function, scope.file());
124 PrintF(scope.file(), " for optimization to %s, %s, reason: %s",
125 CodeKindToString(d.code_kind), ToString(d.concurrency_mode),
126 OptimizationReasonToString(d.optimization_reason));
127 PrintF(scope.file(), "]\n");
128 }
129}
130
131} // namespace
132
134 ConcurrencyMode concurrency_mode) {
135 if (v8_flags.trace_opt) {
136 PrintF("[manually marking ");
137 ShortPrint(function);
138 PrintF(" for optimization to %s, %s]\n", CodeKindToString(code_kind),
139 ToString(concurrency_mode));
140 }
141}
142
145 DCHECK(d.should_optimize());
146 TraceRecompile(isolate_, function, d);
147 function->RequestOptimization(isolate_, d.code_kind, d.concurrency_mode);
148}
149
153
154namespace {
155
156// Returns true when |function| should be enqueued for sparkplug compilation for
157// the first time.
158bool FirstTimeTierUpToSparkplug(Isolate* isolate, Tagged<JSFunction> function) {
159 return !function->has_feedback_vector() ||
160 // We request sparkplug even in the presence of a fbv, if we are
161 // running ignition and haven't enqueued the function for sparkplug
162 // batch compilation yet. This ensures we tier-up to sparkplug when the
163 // feedback vector is allocated eagerly (e.g. for logging function
164 // events; see JSFunction::InitializeFeedbackCell()).
165 (function->ActiveTierIsIgnition(isolate) &&
166 CanCompileWithBaseline(isolate, function->shared()) &&
167 function->shared()->cached_tiering_decision() ==
169}
170
171bool TiersUpToMaglev(CodeKind code_kind) {
174}
175
176bool TiersUpToMaglev(std::optional<CodeKind> code_kind) {
177 return code_kind.has_value() && TiersUpToMaglev(code_kind.value());
178}
179
180int InterruptBudgetFor(Isolate* isolate, std::optional<CodeKind> code_kind,
181 Tagged<JSFunction> function,
182 CachedTieringDecision cached_tiering_decision,
183 int bytecode_length) {
184 // Avoid interrupts while we're already tiering.
185 if (function->tiering_in_progress()) return INT_MAX / 2;
186
187 const std::optional<CodeKind> existing_request =
188 function->GetRequestedOptimizationIfAny(isolate);
189 if (existing_request == CodeKind::TURBOFAN_JS ||
190 (code_kind.has_value() && code_kind.value() == CodeKind::TURBOFAN_JS)) {
191 return v8_flags.invocation_count_for_osr * bytecode_length;
192 }
193 if (maglev::IsMaglevOsrEnabled() && existing_request == CodeKind::MAGLEV) {
194 return v8_flags.invocation_count_for_maglev_osr * bytecode_length;
195 }
196
197 if (TiersUpToMaglev(code_kind) &&
198 !function->IsTieringRequestedOrInProgress()) {
199 if (v8_flags.profile_guided_optimization) {
200 switch (cached_tiering_decision) {
202 return (std::max(v8_flags.invocation_count_for_maglev,
203 v8_flags.minimum_invocations_after_ic_update) +
204 v8_flags.invocation_count_for_maglev_with_delay) *
205 bytecode_length;
208 return v8_flags.invocation_count_for_early_optimization *
209 bytecode_length;
213 return v8_flags.invocation_count_for_maglev * bytecode_length;
214 }
215 // The enum value is coming from inside the sandbox and while the switch
216 // is exhaustive, it's not guaranteed that value is one of the declared
217 // values.
218 SBXCHECK(false);
219 }
220 return v8_flags.invocation_count_for_maglev * bytecode_length;
221 }
222 return v8_flags.invocation_count_for_turbofan * bytecode_length;
223}
224
225} // namespace
226
227// static
229 Isolate* isolate, Tagged<JSFunction> function,
230 std::optional<CodeKind> override_active_tier) {
231 DCHECK(function->shared()->is_compiled());
232 const int bytecode_length =
233 function->shared()->GetBytecodeArray(isolate)->length();
234
235 if (FirstTimeTierUpToSparkplug(isolate, function)) {
236 return bytecode_length * v8_flags.invocation_count_for_feedback_allocation;
237 }
238
239 DCHECK(function->has_feedback_vector());
240 if (bytecode_length > v8_flags.max_optimized_bytecode_size) {
241 // Decrease times of interrupt budget underflow, the reason of not setting
242 // to INT_MAX is the interrupt budget may overflow when doing add
243 // operation for forward jump.
244 return INT_MAX / 2;
245 }
246 return ::i::InterruptBudgetFor(
247 isolate,
248 override_active_tier ? override_active_tier
249 : function->GetActiveTier(isolate),
250 function, function->shared()->cached_tiering_decision(), bytecode_length);
251}
252
253namespace {
254
255void TrySetOsrUrgency(Isolate* isolate, Tagged<JSFunction> function,
256 int osr_urgency) {
257 Tagged<SharedFunctionInfo> shared = function->shared();
258 if (V8_UNLIKELY(!v8_flags.use_osr)) return;
259 if (V8_UNLIKELY(shared->optimization_disabled())) return;
260
261 // We've passed all checks - bump the OSR urgency.
262
263 Tagged<FeedbackVector> fv = function->feedback_vector();
264 if (V8_UNLIKELY(v8_flags.trace_osr)) {
265 CodeTracer::Scope scope(isolate->GetCodeTracer());
266 PrintF(scope.file(),
267 "[OSR - setting osr urgency. function: %s, old urgency: %d, new "
268 "urgency: %d]\n",
269 function->DebugNameCStr().get(), fv->osr_urgency(), osr_urgency);
270 }
271
272 DCHECK_GE(osr_urgency, fv->osr_urgency()); // Never lower urgency here.
273 fv->set_osr_urgency(osr_urgency);
274}
275
276void TryIncrementOsrUrgency(Isolate* isolate, Tagged<JSFunction> function) {
277 int old_urgency = function->feedback_vector()->osr_urgency();
278 int new_urgency = std::min(old_urgency + 1, FeedbackVector::kMaxOsrUrgency);
279 TrySetOsrUrgency(isolate, function, new_urgency);
280}
281
282void TryRequestOsrAtNextOpportunity(Isolate* isolate,
283 Tagged<JSFunction> function) {
284 TrySetOsrUrgency(isolate, function, FeedbackVector::kMaxOsrUrgency);
285}
286
287} // namespace
288
291 TryRequestOsrAtNextOpportunity(isolate_, function);
292}
293
295 CodeKind current_code_kind) {
296 const bool tiering_in_progress = function->tiering_in_progress();
297 const bool osr_in_progress =
298 function->feedback_vector()->osr_tiering_in_progress();
299 // Attenzione! Update this constant in case the condition below changes.
301 if (V8_UNLIKELY(tiering_in_progress) || V8_UNLIKELY(osr_in_progress)) {
302 if (v8_flags.concurrent_recompilation_front_running &&
303 ((tiering_in_progress && function->ActiveTierIsMaglev(isolate_)) ||
304 (osr_in_progress &&
305 function->feedback_vector()->maybe_has_optimized_osr_code()))) {
306 // TODO(olivf): In the case of Maglev we tried a queue with two
307 // priorities, but it seems not actually beneficial. More
308 // investigation is needed.
310 function->shared());
311 }
312 // Note: This effectively disables further tiering actions (e.g. OSR, or
313 // tiering up into Maglev) for the function while it is being compiled.
314 TraceInOptimizationQueue(function, current_code_kind);
315 return;
316 }
317
318 if (V8_UNLIKELY(v8_flags.testing_d8_test_runner) &&
320 function)) {
321 TraceHeuristicOptimizationDisallowed(function);
322 return;
323 }
324
325 // TODO(v8:7700): Consider splitting this up for Maglev/Turbofan.
326 if (V8_UNLIKELY(function->shared()->optimization_disabled())) return;
327
328 if (V8_UNLIKELY(v8_flags.always_osr)) {
329 TryRequestOsrAtNextOpportunity(isolate_, function);
330 // Continue below and do a normal optimized compile as well.
331 }
332
333 const bool maglev_osr = maglev::IsMaglevOsrEnabled();
334 const CodeKinds available_kinds = function->GetAvailableCodeKinds(isolate_);
335 const bool waiting_for_tierup =
336 (current_code_kind < CodeKind::TURBOFAN_JS &&
337 (available_kinds & CodeKindFlag::TURBOFAN_JS)) ||
338 (maglev_osr && current_code_kind < CodeKind::MAGLEV &&
339 (available_kinds & CodeKindFlag::MAGLEV));
340 // Baseline OSR uses a separate mechanism and must not be considered here,
341 // therefore we limit to kOptimizedJSFunctionCodeKindsMask.
342 if (function->IsOptimizationRequested(isolate_) || waiting_for_tierup) {
343 if (V8_UNLIKELY(maglev_osr && current_code_kind == CodeKind::MAGLEV &&
344 (!v8_flags.osr_from_maglev ||
347 return;
348 }
349
350 // OSR kicks in only once we've previously decided to tier up, but we are
351 // still in a lower-tier frame (this implies a long-running loop).
352 TryIncrementOsrUrgency(isolate_, function);
353
354 // Return unconditionally and don't run through the optimization decision
355 // again; we've already decided to tier up previously.
356 return;
357 }
358
359 const std::optional<CodeKind> existing_request =
360 function->GetRequestedOptimizationIfAny(isolate_);
361 DCHECK(existing_request != CodeKind::TURBOFAN_JS);
362 DCHECK(!function->HasAvailableCodeKind(isolate_, CodeKind::TURBOFAN_JS));
364 ShouldOptimize(function->feedback_vector(), current_code_kind);
365 // We might be stuck in a baseline frame that wants to tier up to Maglev, but
366 // is in a loop, and can't OSR, because Maglev doesn't have OSR. Allow it to
367 // skip over Maglev by re-checking ShouldOptimize as if we were in Maglev.
369 d.should_optimize() && d.code_kind == CodeKind::MAGLEV)) {
370 bool is_marked_for_maglev_optimization =
371 existing_request == CodeKind::MAGLEV ||
372 (available_kinds & CodeKindFlag::MAGLEV);
373 if (is_marked_for_maglev_optimization) {
374 d = ShouldOptimize(function->feedback_vector(), CodeKind::MAGLEV);
375 }
376 }
377
379 d.code_kind != CodeKind::TURBOFAN_JS)) {
380 d.concurrency_mode = ConcurrencyMode::kSynchronous;
381 }
382
383 if (d.should_optimize()) Optimize(function, d);
384}
385
387 Tagged<FeedbackVector> feedback_vector, CodeKind current_code_kind) {
388 Tagged<SharedFunctionInfo> shared = feedback_vector->shared_function_info();
389 if (current_code_kind == CodeKind::TURBOFAN_JS) {
391 }
392
393 if (TiersUpToMaglev(current_code_kind) &&
394 shared->PassesFilter(v8_flags.maglev_filter) &&
395 !shared->maglev_compilation_failed()) {
396 if (v8_flags.profile_guided_optimization &&
397 shared->cached_tiering_decision() ==
400 }
402 }
403
404 if (V8_UNLIKELY(!v8_flags.turbofan ||
405 !shared->PassesFilter(v8_flags.turbo_filter) ||
406 (v8_flags.efficiency_mode_disable_turbofan &&
410 }
411
413 v8_flags.efficiency_mode_delay_turbofan &&
414 feedback_vector->invocation_count() <
415 v8_flags.efficiency_mode_delay_turbofan) {
417 }
418
419 Tagged<BytecodeArray> bytecode = shared->GetBytecodeArray(isolate_);
420 if (bytecode->length() > v8_flags.max_optimized_bytecode_size) {
422 }
423
425}
426
427namespace {
428
429bool ShouldResetInterruptBudgetByICChange(
430 CachedTieringDecision cached_tiering_decision) {
431 switch (cached_tiering_decision) {
434 return false;
439 return true;
440 }
441 // The enum value is coming from inside the sandbox and while the switch is
442 // exhaustive, it's not guaranteed that value is one of the declared values.
443 SBXCHECK(false);
444}
445
446} // namespace
447
449 CodeKind code_kind = vector->shared_function_info()->HasBaselineCode()
450 ? CodeKind::BASELINE
451 : CodeKind::INTERPRETED_FUNCTION;
452
453#ifndef V8_ENABLE_LEAPTIERING
454 if (vector->has_optimized_code()) {
455 code_kind = vector->optimized_code(isolate_)->kind();
456 }
457#endif // !V8_ENABLE_LEAPTIERING
458
459 if (code_kind == CodeKind::INTERPRETED_FUNCTION &&
460 CanCompileWithBaseline(isolate_, vector->shared_function_info()) &&
461 vector->shared_function_info()->cached_tiering_decision() ==
463 // Don't delay tier-up if we haven't tiered up to baseline yet, but will --
464 // baseline code is feedback independent.
465 return;
466 }
467
468 OptimizationDecision decision = ShouldOptimize(vector, code_kind);
469 if (decision.should_optimize()) {
470 Tagged<SharedFunctionInfo> shared = vector->shared_function_info();
471 int bytecode_length = shared->GetBytecodeArray(isolate_)->length();
472 Tagged<FeedbackCell> cell = vector->parent_feedback_cell();
473 int invocations = v8_flags.minimum_invocations_after_ic_update;
474 int bytecodes = std::min(bytecode_length, (kMaxInt >> 1) / invocations);
475 int new_budget = invocations * bytecodes;
476 int current_budget = cell->interrupt_budget();
477 if (v8_flags.profile_guided_optimization &&
478 shared->cached_tiering_decision() <=
480 DCHECK_LT(v8_flags.invocation_count_for_early_optimization,
482 if (vector->invocation_count_before_stable() <
483 v8_flags.invocation_count_for_early_optimization) {
484 // Record how many invocation count were consumed before the last IC
485 // change.
486 int new_invocation_count_before_stable;
487 if (vector->interrupt_budget_reset_by_ic_change()) {
488 // Initial interrupt budget is
489 // v8_flags.minimum_invocations_after_ic_update * bytecodes
490 int new_consumed_budget = new_budget - current_budget;
491 new_invocation_count_before_stable =
492 vector->invocation_count_before_stable(kRelaxedLoad) +
493 std::ceil(static_cast<float>(new_consumed_budget) / bytecodes);
494 } else {
495 // Initial interrupt budget is
496 // v8_flags.invocation_count_for_{maglev|turbofan} * bytecodes
497 int total_consumed_budget =
499 ? v8_flags.invocation_count_for_maglev
500 : v8_flags.invocation_count_for_turbofan) *
501 bytecodes -
502 current_budget;
503 new_invocation_count_before_stable =
504 std::ceil(static_cast<float>(total_consumed_budget) / bytecodes);
505 }
506 if (new_invocation_count_before_stable >=
507 v8_flags.invocation_count_for_early_optimization) {
508 vector->set_invocation_count_before_stable(
509 v8_flags.invocation_count_for_early_optimization, kRelaxedStore);
510 shared->set_cached_tiering_decision(CachedTieringDecision::kNormal);
511 } else {
512 vector->set_invocation_count_before_stable(
513 new_invocation_count_before_stable, kRelaxedStore);
514 }
515 } else {
516 shared->set_cached_tiering_decision(CachedTieringDecision::kNormal);
517 }
518 }
519 if (!v8_flags.profile_guided_optimization ||
520 ShouldResetInterruptBudgetByICChange(
521 shared->cached_tiering_decision())) {
522 if (new_budget > current_budget) {
523 if (v8_flags.trace_opt_verbose) {
524 PrintF("[delaying optimization of %s, IC changed]\n",
525 shared->DebugNameCStr().get());
526 }
527 vector->set_interrupt_budget_reset_by_ic_change(true);
528 cell->set_interrupt_budget(new_budget);
529 }
530 }
531 }
532}
533
536 "V8.MarkCandidatesForOptimization");
537}
538
540 CodeKind code_kind) {
541 IsCompiledScope is_compiled_scope(
542 function->shared()->is_compiled_scope(isolate_));
543
544 // Remember whether the function had a vector at this point. This is
545 // relevant later since the configuration 'Ignition without a vector' can be
546 // considered a tier on its own. We begin tiering up to tiers higher than
547 // Sparkplug only when reaching this point *with* a feedback vector.
548 const bool had_feedback_vector = function->has_feedback_vector();
549 const bool first_time_tiered_up_to_sparkplug =
550 FirstTimeTierUpToSparkplug(isolate_, *function);
551 // We don't want to trigger GC in the middle of OSR, so do not build a
552 // baseline code for such case.
553 const bool maybe_had_optimized_osr_code =
554 had_feedback_vector &&
555 function->feedback_vector()->maybe_has_optimized_osr_code();
556 const bool compile_sparkplug =
557 CanCompileWithBaseline(isolate_, function->shared()) &&
558 function->ActiveTierIsIgnition(isolate_) && !maybe_had_optimized_osr_code;
559
560 // Ensure that the feedback vector has been allocated.
561 if (!had_feedback_vector) {
562 if (compile_sparkplug && function->shared()->cached_tiering_decision() ==
564 // Mark the function as compiled with sparkplug before the feedback
565 // vector is created to initialize the interrupt budget for the next
566 // tier.
567 function->shared()->set_cached_tiering_decision(
569 }
571 &is_compiled_scope);
572 DCHECK(is_compiled_scope.is_compiled());
573 // Also initialize the invocation count here. This is only really needed
574 // for OSR. When we OSR functions with lazy feedback allocation we want to
575 // have a non zero invocation count so we can inline functions.
576 function->feedback_vector()->set_invocation_count(1, kRelaxedStore);
577 }
578
579 DCHECK(function->has_feedback_vector());
580 DCHECK(function->shared()->is_compiled());
581 DCHECK(function->shared()->HasBytecodeArray());
582
583 // TODO(jgruber): Consider integrating this into a linear tiering system
584 // controlled by TieringState in which the order is always
585 // Ignition-Sparkplug-Turbofan, and only a single tierup is requested at
586 // once.
587 // It's unclear whether this is possible and/or makes sense - for example,
588 // batching compilation can introduce arbitrary latency between the SP
589 // compile request and fulfillment, which doesn't work with strictly linear
590 // tiering.
591 if (compile_sparkplug) {
592#ifdef V8_ENABLE_SPARKPLUG
593 if (v8_flags.baseline_batch_compilation) {
594 isolate_->baseline_batch_compiler()->EnqueueFunction(function);
595 } else {
596 IsCompiledScope inner_is_compiled_scope(
597 function->shared()->is_compiled_scope(isolate_));
599 &inner_is_compiled_scope);
600 }
601#else
602 UNREACHABLE();
603#endif // V8_ENABLE_SPARKPLUG
604 }
605
606 // We only tier up beyond sparkplug if we already had a feedback vector.
607 if (first_time_tiered_up_to_sparkplug) {
608 // If we didn't have a feedback vector, the interrupt budget has already
609 // been set by JSFunction::CreateAndAttachFeedbackVector, so no need to
610 // set it again.
611 if (had_feedback_vector) {
612 if (function->shared()->cached_tiering_decision() ==
614 function->shared()->set_cached_tiering_decision(
616 }
617 function->SetInterruptBudget(isolate_, BudgetModification::kRaise);
618 }
619 return;
620 }
621
622 // Don't tier up if Turbofan is disabled.
623 // TODO(jgruber): Update this for a multi-tier world.
625 function->SetInterruptBudget(isolate_, BudgetModification::kRaise);
626 return;
627 }
628
629 // --- We've decided to proceed for now. ---
630
633 Tagged<JSFunction> function_obj = *function;
634
635 MaybeOptimizeFrame(function_obj, code_kind);
636
637 // Make sure to set the interrupt budget after maybe starting an optimization,
638 // so that the interrupt budget size takes into account tiering state.
639 DCHECK(had_feedback_vector);
640 function->SetInterruptBudget(isolate_, BudgetModification::kRaise);
641}
642
643} // namespace internal
644} // namespace v8
#define SBXCHECK(condition)
Definition check.h:61
static bool CompileBaseline(Isolate *isolate, DirectHandle< JSFunction > function, ClearExceptionFlag flag, IsCompiledScope *is_compiled_scope)
Definition compiler.cc:3132
static constexpr uint8_t kInvocationCountBeforeStableDeoptSentinel
static constexpr int kMaxOsrUrgency
bool EfficiencyModeEnabledForTiering()
Definition isolate.h:2102
void IncreaseConcurrentOptimizationPriority(CodeKind kind, Tagged< SharedFunctionInfo > function)
Definition isolate.cc:6087
bool BatterySaverModeEnabled()
Definition isolate.h:2111
static void CreateAndAttachFeedbackVector(Isolate *isolate, DirectHandle< JSFunction > function, IsCompiledScope *compiled_scope)
static bool IsMarkedForManualOptimization(Isolate *isolate, Tagged< JSFunction > function)
static constexpr OptimizationDecision Maglev()
constexpr bool should_optimize() const
static constexpr OptimizationDecision DoNotOptimize()
static constexpr OptimizationDecision TurbofanHotAndStable()
constexpr OptimizationDecision(OptimizationReason optimization_reason, CodeKind code_kind, ConcurrencyMode concurrency_mode)
void RequestOsrAtNextOpportunity(Tagged< JSFunction > function)
void MaybeOptimizeFrame(Tagged< JSFunction > function, CodeKind code_kind)
OptimizationDecision ShouldOptimize(Tagged< FeedbackVector > feedback_vector, CodeKind code_kind)
void NotifyICChanged(Tagged< FeedbackVector > vector)
static int InterruptBudgetFor(Isolate *isolate, Tagged< JSFunction > function, std::optional< CodeKind > override_active_tier={})
void Optimize(Tagged< JSFunction > function, OptimizationDecision decision)
void OnInterruptTick(DirectHandle< JSFunction > function, CodeKind code_kind)
void MarkForTurboFanOptimization(Tagged< JSFunction > function)
static bool IsMaglevOsrEnabled()
Definition compiler.h:59
static bool IsMaglevEnabled()
Definition compiler.h:57
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
void PrintF(const char *format,...)
Definition utils.cc:39
const char * CodeKindToString(CodeKind kind)
Definition code-kind.cc:10
char const * OptimizationReasonToString(OptimizationReason reason)
Tagged(T object) -> Tagged< T >
std::ostream & operator<<(std::ostream &os, AtomicMemoryOrder order)
OPTIMIZATION_REASON_CONSTANTS(Constant, message)
void ShortPrint(Tagged< Object > obj, FILE *out)
Definition objects.cc:1865
void TraceManualRecompile(Tagged< JSFunction > function, CodeKind code_kind, ConcurrencyMode concurrency_mode)
constexpr int kInt32Size
Definition globals.h:401
constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind)
Definition code-kind.h:59
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr bool kTieringStateInProgressBlocksTierup
Definition globals.h:2487
constexpr int kMaxInt
Definition globals.h:374
bool CanCompileWithBaseline(Isolate *isolate, Tagged< SharedFunctionInfo > shared)
Definition baseline.cc:82
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define arraysize(array)
Definition macros.h:67
#define OPTIMIZATION_REASON_LIST(V)
#define OPTIMIZATION_REASON_TEXTS(Constant, message)
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660