25void LogExecution(Isolate* isolate, DirectHandle<JSFunction> function) {
27 if (!function->has_feedback_vector())
return;
28#ifdef V8_ENABLE_LEAPTIERING
29 DCHECK(function->IsLoggingRequested(isolate));
31 function->dispatch_handle());
33 if (!function->feedback_vector()->log_next_execution())
return;
35 DirectHandle<SharedFunctionInfo> sfi(function->shared(), isolate);
39 std::string event_name =
"first-execution";
40 CodeKind kind = function->abstract_code(isolate)->kind(isolate);
42 if (
kind != CodeKind::INTERPRETED_FUNCTION) {
46 LOG(isolate, FunctionEvent(
47 event_name.c_str(),
Cast<Script>(raw_sfi->script())->id(), 0,
48 raw_sfi->StartPosition(), raw_sfi->EndPosition(), *name));
49#ifndef V8_ENABLE_LEAPTIERING
50 function->feedback_vector()->set_log_next_execution(
false);
62 return isolate->StackOverflow();
67 DCHECK(!function->is_compiled(isolate));
69 if (
v8_flags.trace_lazy && sfi->is_compiled()) {
70 PrintF(
"[unoptimized: %s]\n", function->DebugNameCStr().get());
75 &is_compiled_scope)) {
78#ifndef V8_ENABLE_LEAPTIERING
80 LogExecution(isolate, function);
83 DCHECK(function->is_compiled(isolate));
84 return function->code(isolate);
92 DCHECK(sfi->HasBaselineCode());
96 DCHECK(!function->HasAvailableOptimizedCode(isolate));
97 DCHECK(!function->has_feedback_vector());
103 function->UpdateCodeKeepTieringRequests(baseline_code);
104#ifdef V8_ENABLE_LEAPTIERING
105 return baseline_code;
111 LogExecution(isolate, function);
124 DCHECK(sfi->is_compiled());
126 if (
V8_LIKELY(sfi_code->kind() != CodeKind::BASELINE ||
127 function->has_feedback_vector())) {
128 function->UpdateCode(sfi_code);
135 DCHECK(!function->HasAvailableOptimizedCode(isolate));
136 DCHECK(!function->has_feedback_vector());
137 JSFunction::CreateAndAttachFeedbackVector(isolate, function,
139 Tagged<Code> sfi_code = function->shared()->GetCode(isolate);
140 function->UpdateCode(sfi_code);
144#ifdef V8_ENABLE_LEAPTIERING
148void CompileOptimized(DirectHandle<JSFunction> function, ConcurrencyMode mode,
149 CodeKind target_kind,
Isolate* isolate) {
151 function->ResetTieringRequests();
156 IsCompiledScope is_compiled_scope(function->shared(), isolate);
157 if (
V8_UNLIKELY(!is_compiled_scope.is_compiled())) {
163 function->ResetIfCodeFlushed(isolate);
167 if (mode == ConcurrencyMode::kConcurrent) {
171 if (function->tiering_in_progress() ||
172 function->GetActiveTier(isolate) >= target_kind) {
173 static_assert(kTieringStateInProgressBlocksTierup);
174 function->SetInterruptBudget(isolate, BudgetModification::kRaise);
182 StackLimitCheck check(isolate);
183 if (check.JsHasOverflowed(gap))
return;
185 Compiler::CompileOptimized(isolate, function, mode, target_kind);
187 DCHECK(function->is_compiled(isolate));
193 HandleScope scope(isolate);
195 DirectHandle<JSFunction> function =
args.at<JSFunction>(0);
196 DCHECK(function->IsOptimizationRequested(isolate));
197 CompileOptimized(function, ConcurrencyMode::kConcurrent, CodeKind::MAGLEV,
199 return ReadOnlyRoots(isolate).undefined_value();
203 HandleScope scope(isolate);
205 DirectHandle<JSFunction> function =
args.at<JSFunction>(0);
206 DCHECK(function->IsOptimizationRequested(isolate));
207 CompileOptimized(function, ConcurrencyMode::kConcurrent,
208 CodeKind::TURBOFAN_JS, isolate);
209 return ReadOnlyRoots(isolate).undefined_value();
213 HandleScope scope(isolate);
215 DirectHandle<JSFunction> function =
args.at<JSFunction>(0);
216 DCHECK(function->IsOptimizationRequested(isolate));
217 CompileOptimized(function, ConcurrencyMode::kSynchronous, CodeKind::MAGLEV,
219 return ReadOnlyRoots(isolate).undefined_value();
223 HandleScope scope(isolate);
225 DirectHandle<JSFunction> function =
args.at<JSFunction>(0);
226 DCHECK(function->IsOptimizationRequested(isolate));
227 CompileOptimized(function, ConcurrencyMode::kSynchronous,
228 CodeKind::TURBOFAN_JS, isolate);
229 return ReadOnlyRoots(isolate).undefined_value();
233 HandleScope scope(isolate);
235 DirectHandle<JSFunction> function =
args.at<JSFunction>(0);
236 bool reoptimize = (*
args.at<Smi>(1)).
value();
238 IsCompiledScope is_compiled_scope(function->shared(), isolate);
239 if (!is_compiled_scope.is_compiled()) {
240 StackLimitCheck check(isolate);
242 check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB))) {
243 return isolate->StackOverflow();
245 if (!Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
246 &is_compiled_scope)) {
247 return ReadOnlyRoots(isolate).exception();
253 function->ResetTieringRequests();
257 function->raw_feedback_cell()->set_interrupt_budget(1);
259 function->SetInterruptBudget(isolate, BudgetModification::kRaise,
260 CodeKind::INTERPRETED_FUNCTION);
262 return ReadOnlyRoots(isolate).undefined_value();
274 DCHECK(function->has_feedback_vector());
275 switch (function->tiering_state()) {
276 case TieringState::kRequestMaglev_Synchronous:
277 target_kind = CodeKind::MAGLEV;
278 mode = ConcurrencyMode::kSynchronous;
280 case TieringState::kRequestMaglev_Concurrent:
281 target_kind = CodeKind::MAGLEV;
282 mode = ConcurrencyMode::kConcurrent;
284 case TieringState::kRequestTurbofan_Synchronous:
285 target_kind = CodeKind::TURBOFAN_JS;
286 mode = ConcurrencyMode::kSynchronous;
288 case TieringState::kRequestTurbofan_Concurrent:
289 target_kind = CodeKind::TURBOFAN_JS;
290 mode = ConcurrencyMode::kConcurrent;
292 case TieringState::kNone:
293 case TieringState::kInProgress:
307 if (check.JsHasOverflowed(gap))
return isolate->StackOverflow();
309 Compiler::CompileOptimized(isolate, function, mode, target_kind);
311 DCHECK(function->is_compiled(isolate));
313 LogExecution(isolate, function);
315 return function->code(isolate);
323 DCHECK(function->shared()->is_compiled());
325 function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
326 isolate, function->shared(),
"Runtime_HealOptimizedCodeSlot");
327 return function->code(isolate);
337 LogExecution(isolate, js_function);
338 return js_function->code(isolate);
354 if (IsJSReceiver(
args[1])) {
358 if (IsJSReceiver(
args[2])) {
362 if (IsJSArrayBuffer(
args[3])) {
366#if V8_ENABLE_WEBASSEMBLY
367 if (shared->HasAsmWasmData()) {
370 isolate, shared, data, stdlib, foreign, memory);
372 isolate->counters()->asmjs_instantiate_result()->AddSample(
374 return *
result.ToHandleChecked();
376 if (isolate->has_exception()) {
380 DCHECK(isolate->is_execution_terminating());
383 isolate->counters()->asmjs_instantiate_result()->AddSample(
389 SharedFunctionInfo::DiscardCompiled(isolate, shared);
391 shared->set_is_asm_wasm_broken(
true);
394 function->UpdateCode(*
BUILTIN_CODE(isolate, CompileLazy));
395 DCHECK(!isolate->has_exception());
401bool TryGetOptimizedOsrCode(
Isolate* isolate, Tagged<FeedbackVector> vector,
402 const interpreter::BytecodeArrayIterator& it,
403 Tagged<Code>* code_out) {
404 std::optional<Tagged<Code>> maybe_code =
405 vector->GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
406 if (maybe_code.has_value()) {
407 *code_out = maybe_code.value();
424void DeoptAllOsrLoopsContainingDeoptExit(Isolate* isolate,
425 Tagged<JSFunction> function,
426 BytecodeOffset deopt_exit_offset) {
428 DCHECK(!deopt_exit_offset.IsNone());
431 !function->feedback_vector()->maybe_has_optimized_osr_code()) {
434 Handle<BytecodeArray> bytecode_array(
435 function->shared()->GetBytecodeArray(isolate), isolate);
436 DCHECK(interpreter::BytecodeArrayIterator::IsValidOffset(
437 bytecode_array, deopt_exit_offset.ToInt()));
439 interpreter::BytecodeArrayIterator it(bytecode_array,
440 deopt_exit_offset.ToInt());
442 Tagged<FeedbackVector> vector = function->feedback_vector();
444 base::SmallVector<Tagged<Code>, 8> osr_codes;
446 for (; !it.done(); it.Advance()) {
448 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop)
continue;
450 if (base::IsInRange(deopt_exit_offset.ToInt(), it.GetJumpTargetOffset(),
451 it.current_offset())) {
459 const int loop_nesting_level = it.GetImmediateOperand(1);
460 if (loop_nesting_level == 0)
return;
461 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
463 osr_codes.push_back(code);
466 if (it.done())
return;
467 for (
size_t i = 0, size = osr_codes.
size();
i < size;
i++) {
469 Deoptimizer::DeoptimizeFunction(function, LazyDeoptimizeReason::kEagerDeopt,
473 int last_deopt_in_range_loop_jump_target;
474 for (; !it.done(); it.Advance()) {
476 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop)
continue;
484 if (it.GetJumpTargetOffset() > deopt_exit_offset.ToInt())
break;
485 last_deopt_in_range_loop_jump_target = it.GetJumpTargetOffset();
486 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
488 Deoptimizer::DeoptimizeFunction(function,
489 LazyDeoptimizeReason::kEagerDeopt, code);
493 const int loop_nesting_level = it.GetImmediateOperand(1);
494 if (loop_nesting_level == 0)
break;
496 if (it.done())
return;
498 for (it.SetOffset(last_deopt_in_range_loop_jump_target);
499 it.current_offset() < deopt_exit_offset.ToInt(); it.Advance()) {
501 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop)
continue;
502 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
504 Deoptimizer::DeoptimizeFunction(function,
505 LazyDeoptimizeReason::kEagerDeopt, code);
515 Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
517 DCHECK(AllowGarbageCollection::IsAllowed());
518 DCHECK(isolate->context().is_null());
532 isolate->set_context(deoptimizer->
function()->native_context());
548 isolate->set_context(Cast<Context>(top_frame->
context()));
553 if (deopt_kind == DeoptimizeKind::kLazy) {
560 if (deopt_reason == DeoptimizeReason::kPrepareForOnStackReplacement &&
561 function->ActiveTierIsMaglev(isolate)) {
562 isolate->tiering_manager()->MarkForTurboFanOptimization(*function);
579 if (osr_offset.
IsNone()) {
580 Deoptimizer::DeoptimizeFunction(
581 *function, LazyDeoptimizeReason::kEagerDeopt, *optimized_code);
582 DeoptAllOsrLoopsContainingDeoptExit(isolate, *function, deopt_exit_offset);
583 }
else if (deopt_reason != DeoptimizeReason::kOSREarlyExit &&
584 Deoptimizer::DeoptExitIsInsideOsrLoop(
585 isolate, *function, deopt_exit_offset, osr_offset)) {
586 Deoptimizer::DeoptimizeFunction(
587 *function, LazyDeoptimizeReason::kEagerDeopt, *optimized_code);
620void GetOsrOffsetAndFunctionForOSR(
Isolate* isolate, BytecodeOffset* osr_offset,
622 DCHECK(osr_offset->IsNone());
623 DCHECK(function->is_null());
626 JavaScriptStackFrameIterator it(isolate);
627 UnoptimizedJSFrame* frame = UnoptimizedJSFrame::cast(it.frame());
629 frame->LookupCode()->is_interpreter_trampoline_builtin());
631 frame->LookupCode()->kind() == CodeKind::BASELINE);
633 *osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
634 *function = handle(frame->function(), isolate);
636 DCHECK(!osr_offset->IsNone());
637 DCHECK((*function)->shared()->HasBytecodeArray());
640Tagged<Object> CompileOptimizedOSR(Isolate* isolate,
641 DirectHandle<JSFunction> function,
642 CodeKind min_opt_level,
643 BytecodeOffset osr_offset) {
644 ConcurrencyMode mode =
645 V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
646 v8_flags.concurrent_osr)
647 ? ConcurrencyMode::kConcurrent
648 : ConcurrencyMode::kSynchronous;
650 if (
V8_UNLIKELY(isolate->EfficiencyModeEnabledForTiering() &&
651 min_opt_level == CodeKind::MAGLEV)) {
652 mode = ConcurrencyMode::kSynchronous;
655 DirectHandle<Code>
result;
656 if (!Compiler::CompileOptimizedOSR(
657 isolate, function, osr_offset, mode,
658 (maglev::IsMaglevOsrEnabled() && min_opt_level == CodeKind::MAGLEV)
660 : CodeKind::TURBOFAN_JS)
662 result->marked_for_deoptimization()) {
667#ifndef V8_ENABLE_LEAPTIERING
668 if (!function->HasAttachedOptimizedCode(isolate)) {
669 function->UpdateCode(function->shared()->GetCode(isolate));
681 Tagged<DeoptimizationData> data =
682 Cast<DeoptimizationData>(
result->deoptimization_data());
683 DCHECK_EQ(BytecodeOffset(data->OsrBytecodeOffset().value()), osr_offset);
684 DCHECK_GE(data->OsrPcOffset().value(), 0);
700 GetOsrOffsetAndFunctionForOSR(isolate, &osr_offset, &function);
702 return CompileOptimizedOSR(isolate, function, CodeKind::MAGLEV, osr_offset);
707Tagged<Object> CompileOptimizedOSRFromMaglev(
Isolate* isolate,
708 DirectHandle<JSFunction> function,
709 BytecodeOffset osr_offset) {
712 if (
V8_UNLIKELY(!isolate->concurrent_recompilation_enabled() ||
713 !v8_flags.concurrent_osr)) {
728 if (v8_flags.trace_osr) {
729 CodeTracer::Scope scope(isolate->GetCodeTracer());
731 "[OSR - Tiering from Maglev to Turbofan failed because "
732 "concurrent_osr is disabled. function: %s, osr offset: %d]\n",
733 function->DebugNameCStr().get(), osr_offset.ToInt());
738 if (
V8_UNLIKELY(isolate->EfficiencyModeEnabledForTiering() ||
739 isolate->BatterySaverModeEnabled())) {
740 function->feedback_vector()->reset_osr_urgency();
741 function->SetInterruptBudget(isolate, BudgetModification::kRaise);
745 return CompileOptimizedOSR(isolate, function, CodeKind::TURBOFAN_JS,
759 MaglevFrame* frame = MaglevFrame::cast(it.frame());
763 return CompileOptimizedOSRFromMaglev(isolate, function, osr_offset);
775 MaglevFrame* frame = MaglevFrame::cast(it.frame());
778 if (*function != frame->
function()) {
781 if (!frame->
function()->ActiveTierIsTurbofan(isolate)) {
782 isolate->tiering_manager()->MarkForTurboFanOptimization(
787 return CompileOptimizedOSRFromMaglev(isolate, function, osr_offset);
797 GetOsrOffsetAndFunctionForOSR(isolate, &osr_offset, &function);
801 "[OSR - entry. function: %s, osr offset: %d]\n",
802 function->DebugNameCStr().get(), osr_offset.
ToInt());
804#ifndef V8_ENABLE_LEAPTIERING
806 LogExecution(isolate, function);
815 int eval_scope_info_index,
int eval_position) {
822 std::tie(source, unknown_object) = Compiler::ValidateDynamicCompilationSource(
825 if (unknown_object) {
828 if (source.is_null()) {
833 MessageTemplate::kCodeGenFromStrings, error_message);
834 if (maybe_error.
ToHandle(&error)) isolate->Throw(*error);
843 if (!Is<NativeContext>(*context) &&
v8_flags.reuse_scope_infos) {
846 CHECK(array->get(eval_scope_info_index)
847 .GetHeapObjectIfWeak(isolate, &stored_info));
848 CHECK_EQ(stored_info, context->scope_info());
852 Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info,
853 context, language_mode, restriction,
867 if (*callee != isolate->native_context()->global_eval_fun()) {
876 language_mode,
args.smi_value_at(4),
877 args.smi_value_at(5));
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
#define BUILTIN_CODE(isolate, name)
constexpr bool IsNone() const
constexpr int ToInt() const
static bool Compile(Isolate *isolate, Handle< SharedFunctionInfo > shared, ClearExceptionFlag flag, IsCompiledScope *is_compiled_scope, CreateSourcePositions create_source_positions_flag=CreateSourcePositions::kNo)
DirectHandle< JSFunction > function() const
static DeoptInfo GetDeoptInfo(Tagged< Code > code, Address from)
DirectHandle< Code > compiled_code() const
void MaterializeHeapObjects()
DeoptimizeKind deopt_kind() const
BytecodeOffset bytecode_offset_in_outermost_frame() const
static IsolateGroup * current()
static void CreateAndAttachFeedbackVector(Isolate *isolate, DirectHandle< JSFunction > function, IsCompiledScope *compiled_scope)
Tagged< JSFunction > function() const override
Tagged< Object > context() const override
JavaScriptFrame * frame() const
V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(DirectHandle< S > *out) const
static Handle< String > DebugName(Isolate *isolate, DirectHandle< SharedFunctionInfo > shared)
V8_EXPORT_PRIVATE Tagged< Code > LookupCode() const
#define V8_ENABLE_LEAPTIERING_BOOL
#define RUNTIME_FUNCTION(Name)
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)
base::Vector< const DirectHandle< Object > > args
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
SharedFunctionInfoRef shared
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
InstructionOperand source
bool is_valid_language_mode(int language_mode)
constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind)
PerThreadAssertScopeDebugOnly< false, SAFEPOINTS_ASSERT, HEAP_ALLOCATION_ASSERT > DisallowGarbageCollection
constexpr int kNoSourcePosition
void PrintF(const char *format,...)
static Tagged< Object > CompileGlobalEval(Isolate *isolate, Handle< i::Object > source_object, DirectHandle< SharedFunctionInfo > outer_info, LanguageMode language_mode, int eval_scope_info_index, int eval_position)
const char * CodeKindToString(CodeKind kind)
Tagged(T object) -> Tagged< T >
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsDeoptimizationWithoutCodeInvalidation(DeoptimizeReason reason)
constexpr int kStackSpaceRequiredForCompilation
@ kAsmJsInstantiateSuccess
constexpr bool IsConcurrent(ConcurrencyMode mode)
constexpr bool CodeKindCanDeoptimize(CodeKind kind)
!IsContextMap !IsContextMap native_context
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
static constexpr AcquireLoadTag kAcquireLoad
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
const DeoptimizeReason deopt_reason
#define TRACE_EVENT0(category_group, name)
#define V8_LIKELY(condition)
#define V8_UNLIKELY(condition)
std::unique_ptr< ValueMirror > value