v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
runtime-compiler.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <optional>
6
7#include "src/asmjs/asm-js.h"
11#include "src/common/globals.h"
21
22namespace v8::internal {
23
24namespace {
25void LogExecution(Isolate* isolate, DirectHandle<JSFunction> function) {
26 DCHECK(v8_flags.log_function_events);
27 if (!function->has_feedback_vector()) return;
28#ifdef V8_ENABLE_LEAPTIERING
29 DCHECK(function->IsLoggingRequested(isolate));
30 IsolateGroup::current()->js_dispatch_table()->ResetTieringRequest(
31 function->dispatch_handle());
32#else
33 if (!function->feedback_vector()->log_next_execution()) return;
34#endif
35 DirectHandle<SharedFunctionInfo> sfi(function->shared(), isolate);
36 DirectHandle<String> name = SharedFunctionInfo::DebugName(isolate, sfi);
38 Tagged<SharedFunctionInfo> raw_sfi = *sfi;
39 std::string event_name = "first-execution";
40 CodeKind kind = function->abstract_code(isolate)->kind(isolate);
41 // Not adding "-interpreter" for tooling backwards compatibility.
42 if (kind != CodeKind::INTERPRETED_FUNCTION) {
43 event_name += "-";
44 event_name += CodeKindToString(kind);
45 }
46 LOG(isolate, FunctionEvent(
47 event_name.c_str(), Cast<Script>(raw_sfi->script())->id(), 0,
48 raw_sfi->StartPosition(), raw_sfi->EndPosition(), *name));
49#ifndef V8_ENABLE_LEAPTIERING
50 function->feedback_vector()->set_log_next_execution(false);
51#endif // !V8_ENABLE_LEAPTIERING
52}
53} // namespace
54
55RUNTIME_FUNCTION(Runtime_CompileLazy) {
56 HandleScope scope(isolate);
57 DCHECK_EQ(1, args.length());
59 StackLimitCheck check(isolate);
60 if (V8_UNLIKELY(
61 check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB))) {
62 return isolate->StackOverflow();
63 }
64
65 DirectHandle<SharedFunctionInfo> sfi(function->shared(), isolate);
66
67 DCHECK(!function->is_compiled(isolate));
68#ifdef DEBUG
69 if (v8_flags.trace_lazy && sfi->is_compiled()) {
70 PrintF("[unoptimized: %s]\n", function->DebugNameCStr().get());
71 }
72#endif
73 IsCompiledScope is_compiled_scope;
74 if (!Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
75 &is_compiled_scope)) {
76 return ReadOnlyRoots(isolate).exception();
77 }
78#ifndef V8_ENABLE_LEAPTIERING
79 if (V8_UNLIKELY(v8_flags.log_function_events)) {
80 LogExecution(isolate, function);
81 }
82#endif // !V8_ENABLE_LEAPTIERING
83 DCHECK(function->is_compiled(isolate));
84 return function->code(isolate);
85}
86
87RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
88 HandleScope scope(isolate);
89 DCHECK_EQ(1, args.length());
91 DirectHandle<SharedFunctionInfo> sfi(function->shared(), isolate);
92 DCHECK(sfi->HasBaselineCode());
93 {
94 if (!V8_ENABLE_LEAPTIERING_BOOL || !function->has_feedback_vector()) {
95 IsCompiledScope is_compiled_scope(*sfi, isolate);
96 DCHECK(!function->HasAvailableOptimizedCode(isolate));
97 DCHECK(!function->has_feedback_vector());
99 &is_compiled_scope);
100 }
102 Tagged<Code> baseline_code = sfi->baseline_code(kAcquireLoad);
103 function->UpdateCodeKeepTieringRequests(baseline_code);
104#ifdef V8_ENABLE_LEAPTIERING
105 return baseline_code;
106 }
107#else
108 if V8_LIKELY (!v8_flags.log_function_events) return baseline_code;
109 }
110 DCHECK(v8_flags.log_function_events);
111 LogExecution(isolate, function);
112 // LogExecution might allocate, reload the baseline code
113 return sfi->baseline_code(kAcquireLoad);
114#endif // V8_ENABLE_LEAPTIERING
115}
116
117RUNTIME_FUNCTION(Runtime_InstallSFICode) {
118 HandleScope scope(isolate);
119 DCHECK_EQ(1, args.length());
120 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
121 {
123 Tagged<SharedFunctionInfo> sfi = function->shared();
124 DCHECK(sfi->is_compiled());
125 Tagged<Code> sfi_code = sfi->GetCode(isolate);
126 if (V8_LIKELY(sfi_code->kind() != CodeKind::BASELINE ||
127 function->has_feedback_vector())) {
128 function->UpdateCode(sfi_code);
129 return sfi_code;
130 }
131 }
132 // This could be the first time we are installing baseline code so we need to
133 // ensure that a feedback vectors is allocated.
134 IsCompiledScope is_compiled_scope(function->shared(), isolate);
135 DCHECK(!function->HasAvailableOptimizedCode(isolate));
136 DCHECK(!function->has_feedback_vector());
137 JSFunction::CreateAndAttachFeedbackVector(isolate, function,
138 &is_compiled_scope);
139 Tagged<Code> sfi_code = function->shared()->GetCode(isolate);
140 function->UpdateCode(sfi_code);
141 return sfi_code;
142}
143
144#ifdef V8_ENABLE_LEAPTIERING
145
146namespace {
147
148void CompileOptimized(DirectHandle<JSFunction> function, ConcurrencyMode mode,
149 CodeKind target_kind, Isolate* isolate) {
150 // Ensure that the tiering request is reset even if compilation fails.
151 function->ResetTieringRequests();
152
153 // As a pre- and post-condition of CompileOptimized, the function *must* be
154 // compiled, i.e. the installed InstructionStream object must not be
155 // CompileLazy.
156 IsCompiledScope is_compiled_scope(function->shared(), isolate);
157 if (V8_UNLIKELY(!is_compiled_scope.is_compiled())) {
158 // This happens if the code is flushed while we still have an optimization
159 // request pending (or if manually an optimization is requested on an
160 // uncompiled function).
161 // Instead of calling into Compiler::Compile and having to do exception
162 // handling here, we reset and return and thus tail-call into CompileLazy.
163 function->ResetIfCodeFlushed(isolate);
164 return;
165 }
166
167 if (mode == ConcurrencyMode::kConcurrent) {
168 // No need to start another compile job.
169 // Also, various fuzzing flags like --always-turbofan might already compile
170 // this function in the above Compiler::Compile function.
171 if (function->tiering_in_progress() ||
172 function->GetActiveTier(isolate) >= target_kind) {
173 static_assert(kTieringStateInProgressBlocksTierup);
174 function->SetInterruptBudget(isolate, BudgetModification::kRaise);
175 return;
176 }
177 }
178
179 // Concurrent optimization runs on another thread, thus no additional gap.
180 const int gap =
182 StackLimitCheck check(isolate);
183 if (check.JsHasOverflowed(gap)) return;
184
185 Compiler::CompileOptimized(isolate, function, mode, target_kind);
186
187 DCHECK(function->is_compiled(isolate));
188}
189
190} // namespace
191
192RUNTIME_FUNCTION(Runtime_StartMaglevOptimizeJob) {
193 HandleScope scope(isolate);
194 DCHECK_EQ(1, args.length());
195 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
196 DCHECK(function->IsOptimizationRequested(isolate));
197 CompileOptimized(function, ConcurrencyMode::kConcurrent, CodeKind::MAGLEV,
198 isolate);
199 return ReadOnlyRoots(isolate).undefined_value();
200}
201
202RUNTIME_FUNCTION(Runtime_StartTurbofanOptimizeJob) {
203 HandleScope scope(isolate);
204 DCHECK_EQ(1, args.length());
205 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
206 DCHECK(function->IsOptimizationRequested(isolate));
207 CompileOptimized(function, ConcurrencyMode::kConcurrent,
208 CodeKind::TURBOFAN_JS, isolate);
209 return ReadOnlyRoots(isolate).undefined_value();
210}
211
212RUNTIME_FUNCTION(Runtime_OptimizeMaglevEager) {
213 HandleScope scope(isolate);
214 DCHECK_EQ(1, args.length());
215 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
216 DCHECK(function->IsOptimizationRequested(isolate));
217 CompileOptimized(function, ConcurrencyMode::kSynchronous, CodeKind::MAGLEV,
218 isolate);
219 return ReadOnlyRoots(isolate).undefined_value();
220}
221
222RUNTIME_FUNCTION(Runtime_OptimizeTurbofanEager) {
223 HandleScope scope(isolate);
224 DCHECK_EQ(1, args.length());
225 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
226 DCHECK(function->IsOptimizationRequested(isolate));
227 CompileOptimized(function, ConcurrencyMode::kSynchronous,
228 CodeKind::TURBOFAN_JS, isolate);
229 return ReadOnlyRoots(isolate).undefined_value();
230}
231
232RUNTIME_FUNCTION(Runtime_MarkLazyDeoptimized) {
233 HandleScope scope(isolate);
234 DCHECK_EQ(2, args.length());
235 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
236 bool reoptimize = (*args.at<Smi>(1)).value();
237
238 IsCompiledScope is_compiled_scope(function->shared(), isolate);
239 if (!is_compiled_scope.is_compiled()) {
240 StackLimitCheck check(isolate);
241 if (V8_UNLIKELY(
242 check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB))) {
243 return isolate->StackOverflow();
244 }
245 if (!Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
246 &is_compiled_scope)) {
247 return ReadOnlyRoots(isolate).exception();
248 }
249 // In case this code was flushed we should not re-optimize it too quickly.
250 reoptimize = false;
251 }
252
253 function->ResetTieringRequests();
254 if (reoptimize) {
255 // Set the budget such that we have one invocation which allows us to detect
256 // if any ICs need updating before re-optimization.
257 function->raw_feedback_cell()->set_interrupt_budget(1);
258 } else {
259 function->SetInterruptBudget(isolate, BudgetModification::kRaise,
260 CodeKind::INTERPRETED_FUNCTION);
261 }
262 return ReadOnlyRoots(isolate).undefined_value();
263}
264
265#else
266
267RUNTIME_FUNCTION(Runtime_CompileOptimized) {
268 HandleScope scope(isolate);
269 DCHECK_EQ(1, args.length());
270 Handle<JSFunction> function = args.at<JSFunction>(0);
271
272 CodeKind target_kind;
274 DCHECK(function->has_feedback_vector());
275 switch (function->tiering_state()) {
276 case TieringState::kRequestMaglev_Synchronous:
277 target_kind = CodeKind::MAGLEV;
278 mode = ConcurrencyMode::kSynchronous;
279 break;
280 case TieringState::kRequestMaglev_Concurrent:
281 target_kind = CodeKind::MAGLEV;
282 mode = ConcurrencyMode::kConcurrent;
283 break;
284 case TieringState::kRequestTurbofan_Synchronous:
285 target_kind = CodeKind::TURBOFAN_JS;
286 mode = ConcurrencyMode::kSynchronous;
287 break;
288 case TieringState::kRequestTurbofan_Concurrent:
289 target_kind = CodeKind::TURBOFAN_JS;
290 mode = ConcurrencyMode::kConcurrent;
291 break;
292 case TieringState::kNone:
293 case TieringState::kInProgress:
294 UNREACHABLE();
295 }
296
297 // As a pre- and post-condition of CompileOptimized, the function *must* be
298 // compiled, i.e. the installed InstructionStream object must not be
299 // CompileLazy.
300 IsCompiledScope is_compiled_scope(function->shared(), isolate);
301 DCHECK(is_compiled_scope.is_compiled());
302
303 StackLimitCheck check(isolate);
304 // Concurrent optimization runs on another thread, thus no additional gap.
305 const int gap =
307 if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
308
309 Compiler::CompileOptimized(isolate, function, mode, target_kind);
310
311 DCHECK(function->is_compiled(isolate));
312 if (V8_UNLIKELY(v8_flags.log_function_events)) {
313 LogExecution(isolate, function);
314 }
315 return function->code(isolate);
316}
317
318RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
319 SealHandleScope scope(isolate);
320 DCHECK_EQ(1, args.length());
321 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
322
323 DCHECK(function->shared()->is_compiled());
324
325 function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
326 isolate, function->shared(), "Runtime_HealOptimizedCodeSlot");
327 return function->code(isolate);
328}
329
330#endif // !V8_ENABLE_LEAPTIERING
331
332RUNTIME_FUNCTION(Runtime_FunctionLogNextExecution) {
333 HandleScope scope(isolate);
334 DCHECK_EQ(1, args.length());
335 DirectHandle<JSFunction> js_function = args.at<JSFunction>(0);
336 DCHECK(v8_flags.log_function_events);
337 LogExecution(isolate, js_function);
338 return js_function->code(isolate);
339}
340
341// The enum values need to match "AsmJsInstantiateResult" in
342// tools/metrics/histograms/enums.xml.
347
348RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
349 HandleScope scope(isolate);
350 DCHECK_EQ(args.length(), 4);
351 DirectHandle<JSFunction> function = args.at<JSFunction>(0);
352
354 if (IsJSReceiver(args[1])) {
355 stdlib = args.at<JSReceiver>(1);
356 }
358 if (IsJSReceiver(args[2])) {
359 foreign = args.at<JSReceiver>(2);
360 }
362 if (IsJSArrayBuffer(args[3])) {
363 memory = args.at<JSArrayBuffer>(3);
364 }
365 DirectHandle<SharedFunctionInfo> shared(function->shared(), isolate);
366#if V8_ENABLE_WEBASSEMBLY
367 if (shared->HasAsmWasmData()) {
368 DirectHandle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
369 MaybeDirectHandle<Object> result = AsmJs::InstantiateAsmWasm(
370 isolate, shared, data, stdlib, foreign, memory);
371 if (!result.is_null()) {
372 isolate->counters()->asmjs_instantiate_result()->AddSample(
374 return *result.ToHandleChecked();
375 }
376 if (isolate->has_exception()) {
377 // If instantiation fails, we do not propagate the exception but instead
378 // fall back to JS execution. The only exception (to that rule) is the
379 // termination exception.
380 DCHECK(isolate->is_execution_terminating());
381 return ReadOnlyRoots{isolate}.exception();
382 }
383 isolate->counters()->asmjs_instantiate_result()->AddSample(
385
386 // Remove wasm data, mark as broken for asm->wasm, replace AsmWasmData on
387 // the SFI with UncompiledData and set entrypoint to CompileLazy builtin,
388 // and return a smi 0 to indicate failure.
389 SharedFunctionInfo::DiscardCompiled(isolate, shared);
390 }
391 shared->set_is_asm_wasm_broken(true);
392#endif
393 DCHECK_EQ(function->code(isolate), *BUILTIN_CODE(isolate, InstantiateAsmJs));
394 function->UpdateCode(*BUILTIN_CODE(isolate, CompileLazy));
395 DCHECK(!isolate->has_exception());
396 return Smi::zero();
397}
398
399namespace {
400
401bool TryGetOptimizedOsrCode(Isolate* isolate, Tagged<FeedbackVector> vector,
402 const interpreter::BytecodeArrayIterator& it,
403 Tagged<Code>* code_out) {
404 std::optional<Tagged<Code>> maybe_code =
405 vector->GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
406 if (maybe_code.has_value()) {
407 *code_out = maybe_code.value();
408 return true;
409 }
410 return false;
411}
412
413// Deoptimize all osr'd loops which is in the same outermost loop with deopt
414// exit. For example:
415// for (;;) {
416// for (;;) {
417// } // Type a: loop start < OSR backedge < deopt exit
418// for (;;) {
419// <- Deopt
420// for (;;) {
421// } // Type b: deopt exit < loop start < OSR backedge
422// } // Type c: loop start < deopt exit < OSR backedge
423// } // The outermost loop
424void DeoptAllOsrLoopsContainingDeoptExit(Isolate* isolate,
425 Tagged<JSFunction> function,
426 BytecodeOffset deopt_exit_offset) {
428 DCHECK(!deopt_exit_offset.IsNone());
429
430 if (!v8_flags.use_ic ||
431 !function->feedback_vector()->maybe_has_optimized_osr_code()) {
432 return;
433 }
434 Handle<BytecodeArray> bytecode_array(
435 function->shared()->GetBytecodeArray(isolate), isolate);
436 DCHECK(interpreter::BytecodeArrayIterator::IsValidOffset(
437 bytecode_array, deopt_exit_offset.ToInt()));
438
439 interpreter::BytecodeArrayIterator it(bytecode_array,
440 deopt_exit_offset.ToInt());
441
442 Tagged<FeedbackVector> vector = function->feedback_vector();
443 Tagged<Code> code;
444 base::SmallVector<Tagged<Code>, 8> osr_codes;
445 // Visit before the first loop-with-deopt is found
446 for (; !it.done(); it.Advance()) {
447 // We're only interested in loop ranges.
448 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop) continue;
449 // Is the deopt exit contained in the current loop?
450 if (base::IsInRange(deopt_exit_offset.ToInt(), it.GetJumpTargetOffset(),
451 it.current_offset())) {
452 break;
453 }
454 // We've reached nesting level 0, i.e. the current JumpLoop concludes a
455 // top-level loop, return as the deopt exit is not in any loop. For example:
456 // <- Deopt
457 // for (;;) {
458 // } // The outermost loop
459 const int loop_nesting_level = it.GetImmediateOperand(1);
460 if (loop_nesting_level == 0) return;
461 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
462 // Collect type b osr'd loops
463 osr_codes.push_back(code);
464 }
465 }
466 if (it.done()) return;
467 for (size_t i = 0, size = osr_codes.size(); i < size; i++) {
468 // Deoptimize type b osr'd loops
469 Deoptimizer::DeoptimizeFunction(function, LazyDeoptimizeReason::kEagerDeopt,
470 osr_codes[i]);
471 }
472 // Visit after the first loop-with-deopt is found
473 int last_deopt_in_range_loop_jump_target;
474 for (; !it.done(); it.Advance()) {
475 // We're only interested in loop ranges.
476 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop) continue;
477 // We've reached a new nesting loop in the case of the deopt exit is in a
478 // loop whose outermost loop was removed. For example:
479 // for (;;) {
480 // <- Deopt
481 // } // The non-outermost loop
482 // for (;;) {
483 // } // The outermost loop
484 if (it.GetJumpTargetOffset() > deopt_exit_offset.ToInt()) break;
485 last_deopt_in_range_loop_jump_target = it.GetJumpTargetOffset();
486 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
487 // Deoptimize type c osr'd loops
488 Deoptimizer::DeoptimizeFunction(function,
489 LazyDeoptimizeReason::kEagerDeopt, code);
490 }
491 // We've reached nesting level 0, i.e. the current JumpLoop concludes a
492 // top-level loop.
493 const int loop_nesting_level = it.GetImmediateOperand(1);
494 if (loop_nesting_level == 0) break;
495 }
496 if (it.done()) return;
497 // Revisit from start of the last deopt in range loop to deopt
498 for (it.SetOffset(last_deopt_in_range_loop_jump_target);
499 it.current_offset() < deopt_exit_offset.ToInt(); it.Advance()) {
500 // We're only interested in loop ranges.
501 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop) continue;
502 if (TryGetOptimizedOsrCode(isolate, vector, it, &code)) {
503 // Deoptimize type a osr'd loops
504 Deoptimizer::DeoptimizeFunction(function,
505 LazyDeoptimizeReason::kEagerDeopt, code);
506 }
507 }
508}
509
510} // namespace
511
512RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
513 HandleScope scope(isolate);
514 DCHECK_EQ(0, args.length());
515 Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
516 DCHECK(CodeKindCanDeoptimize(deoptimizer->compiled_code()->kind()));
517 DCHECK(AllowGarbageCollection::IsAllowed());
518 DCHECK(isolate->context().is_null());
519
521 TRACE_EVENT0("v8", "V8.DeoptimizeCode");
522 DirectHandle<JSFunction> function = deoptimizer->function();
523 // For OSR the optimized code isn't installed on the function, so get the
524 // code object from deoptimizer.
525 DirectHandle<Code> optimized_code = deoptimizer->compiled_code();
526 const DeoptimizeKind deopt_kind = deoptimizer->deopt_kind();
527 const DeoptimizeReason deopt_reason =
528 deoptimizer->GetDeoptInfo().deopt_reason;
529
530 // TODO(turbofan): We currently need the native context to materialize
531 // the arguments object, but only to get to its map.
532 isolate->set_context(deoptimizer->function()->native_context());
533
534 // When this is called from WasmGC code, clear the "thread in wasm" flag,
535 // which is important in case any GC needs to happen.
536 // TODO(40192807): Find a better fix, likely by replacing the global flag.
537 SaveAndClearThreadInWasmFlag clear_wasm_flag(isolate);
538
539 // Make sure to materialize objects before causing any allocation.
540 deoptimizer->MaterializeHeapObjects();
541 const BytecodeOffset deopt_exit_offset =
543 delete deoptimizer;
544
545 // Ensure the context register is updated for materialized objects.
546 JavaScriptStackFrameIterator top_it(isolate);
547 JavaScriptFrame* top_frame = top_it.frame();
548 isolate->set_context(Cast<Context>(top_frame->context()));
549
550 // Lazy deopts don't invalidate the underlying optimized code since the code
551 // object itself is still valid (as far as we know); the called function
552 // caused the deopt, not the function we're currently looking at.
553 if (deopt_kind == DeoptimizeKind::kLazy) {
554 return ReadOnlyRoots(isolate).undefined_value();
555 }
556
557 // Some eager deopts also don't invalidate InstructionStream (e.g. when
558 // preparing for OSR from Maglev to Turbofan).
559 if (IsDeoptimizationWithoutCodeInvalidation(deopt_reason)) {
560 if (deopt_reason == DeoptimizeReason::kPrepareForOnStackReplacement &&
561 function->ActiveTierIsMaglev(isolate)) {
562 isolate->tiering_manager()->MarkForTurboFanOptimization(*function);
563 }
564 return ReadOnlyRoots(isolate).undefined_value();
565 }
566
567 // Non-OSR'd code is deoptimized unconditionally. If the deoptimization occurs
568 // inside the outermost loop containing a loop that can trigger OSR
569 // compilation, we remove the OSR code, it will avoid hit the out of date OSR
570 // code and soon later deoptimization.
571 //
572 // For OSR'd code, we keep the optimized code around if deoptimization occurs
573 // outside the outermost loop containing the loop that triggered OSR
574 // compilation. The reasoning is that OSR is intended to speed up the
575 // long-running loop; so if the deoptimization occurs outside this loop it is
576 // still worth jumping to the OSR'd code on the next run. The reduced cost of
577 // the loop should pay for the deoptimization costs.
578 const BytecodeOffset osr_offset = optimized_code->osr_offset();
579 if (osr_offset.IsNone()) {
580 Deoptimizer::DeoptimizeFunction(
581 *function, LazyDeoptimizeReason::kEagerDeopt, *optimized_code);
582 DeoptAllOsrLoopsContainingDeoptExit(isolate, *function, deopt_exit_offset);
583 } else if (deopt_reason != DeoptimizeReason::kOSREarlyExit &&
584 Deoptimizer::DeoptExitIsInsideOsrLoop(
585 isolate, *function, deopt_exit_offset, osr_offset)) {
586 Deoptimizer::DeoptimizeFunction(
587 *function, LazyDeoptimizeReason::kEagerDeopt, *optimized_code);
588 }
589
590 return ReadOnlyRoots(isolate).undefined_value();
591}
592
593RUNTIME_FUNCTION(Runtime_ObserveNode) {
594 // The %ObserveNode intrinsic only tracks the changes to an observed node in
595 // code compiled by TurboFan.
596 HandleScope scope(isolate);
597 DCHECK_EQ(1, args.length());
598 DirectHandle<Object> obj = args.at(0);
599 return *obj;
600}
601
602RUNTIME_FUNCTION(Runtime_VerifyType) {
603 // %VerifyType has no effect in the interpreter.
604 HandleScope scope(isolate);
605 DCHECK_EQ(1, args.length());
606 DirectHandle<Object> obj = args.at(0);
607 return *obj;
608}
609
610RUNTIME_FUNCTION(Runtime_CheckTurboshaftTypeOf) {
611 // %CheckTurboshaftTypeOf has no effect in the interpreter.
612 HandleScope scope(isolate);
613 DCHECK_EQ(2, args.length());
614 DirectHandle<Object> obj = args.at(0);
615 return *obj;
616}
617
618namespace {
619
620void GetOsrOffsetAndFunctionForOSR(Isolate* isolate, BytecodeOffset* osr_offset,
621 Handle<JSFunction>* function) {
622 DCHECK(osr_offset->IsNone());
623 DCHECK(function->is_null());
624
625 // Determine the frame that triggered the OSR request.
626 JavaScriptStackFrameIterator it(isolate);
627 UnoptimizedJSFrame* frame = UnoptimizedJSFrame::cast(it.frame());
628 DCHECK_IMPLIES(frame->is_interpreted(),
629 frame->LookupCode()->is_interpreter_trampoline_builtin());
630 DCHECK_IMPLIES(frame->is_baseline(),
631 frame->LookupCode()->kind() == CodeKind::BASELINE);
632
633 *osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
634 *function = handle(frame->function(), isolate);
635
636 DCHECK(!osr_offset->IsNone());
637 DCHECK((*function)->shared()->HasBytecodeArray());
638}
639
640Tagged<Object> CompileOptimizedOSR(Isolate* isolate,
641 DirectHandle<JSFunction> function,
642 CodeKind min_opt_level,
643 BytecodeOffset osr_offset) {
644 ConcurrencyMode mode =
645 V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
646 v8_flags.concurrent_osr)
647 ? ConcurrencyMode::kConcurrent
648 : ConcurrencyMode::kSynchronous;
649
650 if (V8_UNLIKELY(isolate->EfficiencyModeEnabledForTiering() &&
651 min_opt_level == CodeKind::MAGLEV)) {
652 mode = ConcurrencyMode::kSynchronous;
653 }
654
655 DirectHandle<Code> result;
656 if (!Compiler::CompileOptimizedOSR(
657 isolate, function, osr_offset, mode,
658 (maglev::IsMaglevOsrEnabled() && min_opt_level == CodeKind::MAGLEV)
659 ? CodeKind::MAGLEV
660 : CodeKind::TURBOFAN_JS)
661 .ToHandle(&result) ||
662 result->marked_for_deoptimization()) {
663 // An empty result can mean one of two things:
664 // 1) we've started a concurrent compilation job - everything is fine.
665 // 2) synchronous compilation failed for some reason.
666
667#ifndef V8_ENABLE_LEAPTIERING
668 if (!function->HasAttachedOptimizedCode(isolate)) {
669 function->UpdateCode(function->shared()->GetCode(isolate));
670 }
671#endif // V8_ENABLE_LEAPTIERING
672
673 return Smi::zero();
674 }
675
676 DCHECK(!result.is_null());
677 DCHECK(result->is_turbofanned() || result->is_maglevved());
679
680#ifdef DEBUG
681 Tagged<DeoptimizationData> data =
682 Cast<DeoptimizationData>(result->deoptimization_data());
683 DCHECK_EQ(BytecodeOffset(data->OsrBytecodeOffset().value()), osr_offset);
684 DCHECK_GE(data->OsrPcOffset().value(), 0);
685#endif // DEBUG
686
687 // First execution logging happens in LogOrTraceOptimizedOSREntry
688 return *result;
689}
690
691} // namespace
692
693RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
694 HandleScope handle_scope(isolate);
695 DCHECK_EQ(0, args.length());
696 DCHECK(v8_flags.use_osr);
697
698 BytecodeOffset osr_offset = BytecodeOffset::None();
699 Handle<JSFunction> function;
700 GetOsrOffsetAndFunctionForOSR(isolate, &osr_offset, &function);
701
702 return CompileOptimizedOSR(isolate, function, CodeKind::MAGLEV, osr_offset);
703}
704
705namespace {
706
707Tagged<Object> CompileOptimizedOSRFromMaglev(Isolate* isolate,
708 DirectHandle<JSFunction> function,
709 BytecodeOffset osr_offset) {
710 // This path is only relevant for tests (all production configurations enable
711 // concurrent OSR). It's quite subtle, if interested read on:
712 if (V8_UNLIKELY(!isolate->concurrent_recompilation_enabled() ||
713 !v8_flags.concurrent_osr)) {
714 // - Synchronous Turbofan compilation may trigger lazy deoptimization (e.g.
715 // through compilation dependency finalization actions).
716 // - Maglev (currently) disallows marking an opcode as both can_lazy_deopt
717 // and can_eager_deopt.
718 // - Maglev's JumpLoop opcode (the logical caller of this runtime function)
719 // is marked as can_eager_deopt since OSR'ing to Turbofan involves
720 // deoptimizing to Ignition under the hood.
721 // - Thus this runtime function *must not* trigger a lazy deopt, and
722 // therefore cannot trigger synchronous Turbofan compilation (see above).
723 //
724 // We solve this synchronous OSR case by bailing out early to Ignition, and
725 // letting it handle OSR. How do we trigger the early bailout? Returning
726 // any non-null InstructionStream from this function triggers the deopt in
727 // JumpLoop.
728 if (v8_flags.trace_osr) {
729 CodeTracer::Scope scope(isolate->GetCodeTracer());
730 PrintF(scope.file(),
731 "[OSR - Tiering from Maglev to Turbofan failed because "
732 "concurrent_osr is disabled. function: %s, osr offset: %d]\n",
733 function->DebugNameCStr().get(), osr_offset.ToInt());
734 }
735 return Smi::zero();
736 }
737
738 if (V8_UNLIKELY(isolate->EfficiencyModeEnabledForTiering() ||
739 isolate->BatterySaverModeEnabled())) {
740 function->feedback_vector()->reset_osr_urgency();
741 function->SetInterruptBudget(isolate, BudgetModification::kRaise);
742 return Smi::zero();
743 }
744
745 return CompileOptimizedOSR(isolate, function, CodeKind::TURBOFAN_JS,
746 osr_offset);
747}
748
749} // namespace
750
751RUNTIME_FUNCTION(Runtime_CompileOptimizedOSRFromMaglev) {
752 HandleScope handle_scope(isolate);
753 DCHECK_EQ(1, args.length());
754 DCHECK(v8_flags.use_osr);
755
756 const BytecodeOffset osr_offset(args.positive_smi_value_at(0));
757
759 MaglevFrame* frame = MaglevFrame::cast(it.frame());
760 DCHECK_EQ(frame->LookupCode()->kind(), CodeKind::MAGLEV);
761 DirectHandle<JSFunction> function = direct_handle(frame->function(), isolate);
762
763 return CompileOptimizedOSRFromMaglev(isolate, function, osr_offset);
764}
765
766RUNTIME_FUNCTION(Runtime_CompileOptimizedOSRFromMaglevInlined) {
767 HandleScope handle_scope(isolate);
768 DCHECK_EQ(2, args.length());
769 DCHECK(v8_flags.use_osr);
770
771 const BytecodeOffset osr_offset(args.positive_smi_value_at(0));
772 DirectHandle<JSFunction> function = args.at<JSFunction>(1);
773
775 MaglevFrame* frame = MaglevFrame::cast(it.frame());
776 DCHECK_EQ(frame->LookupCode()->kind(), CodeKind::MAGLEV);
777
778 if (*function != frame->function()) {
779 // We are OSRing an inlined function. Mark the top frame one for
780 // optimization.
781 if (!frame->function()->ActiveTierIsTurbofan(isolate)) {
782 isolate->tiering_manager()->MarkForTurboFanOptimization(
783 frame->function());
784 }
785 }
786
787 return CompileOptimizedOSRFromMaglev(isolate, function, osr_offset);
788}
789
790RUNTIME_FUNCTION(Runtime_LogOrTraceOptimizedOSREntry) {
791 HandleScope handle_scope(isolate);
792 DCHECK_EQ(0, args.length());
793 CHECK(v8_flags.trace_osr || v8_flags.log_function_events);
794
795 BytecodeOffset osr_offset = BytecodeOffset::None();
796 Handle<JSFunction> function;
797 GetOsrOffsetAndFunctionForOSR(isolate, &osr_offset, &function);
798
799 if (v8_flags.trace_osr) {
800 PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
801 "[OSR - entry. function: %s, osr offset: %d]\n",
802 function->DebugNameCStr().get(), osr_offset.ToInt());
803 }
804#ifndef V8_ENABLE_LEAPTIERING
805 if (V8_UNLIKELY(v8_flags.log_function_events)) {
806 LogExecution(isolate, function);
807 }
808#endif // !V8_ENABLE_LEAPTIERING
809 return ReadOnlyRoots(isolate).undefined_value();
810}
811
813 Isolate* isolate, Handle<i::Object> source_object,
814 DirectHandle<SharedFunctionInfo> outer_info, LanguageMode language_mode,
815 int eval_scope_info_index, int eval_position) {
816 DirectHandle<NativeContext> native_context = isolate->native_context();
817
818 // Check if native context allows code generation from
819 // strings. Throw an exception if it doesn't.
821 bool unknown_object;
822 std::tie(source, unknown_object) = Compiler::ValidateDynamicCompilationSource(
823 isolate, native_context, source_object);
824 // If the argument is an unhandled string time, bounce to GlobalEval.
825 if (unknown_object) {
826 return native_context->global_eval_fun();
827 }
828 if (source.is_null()) {
829 Handle<Object> error_message =
830 native_context->ErrorMessageForCodeGenerationFromStrings();
832 MaybeDirectHandle<Object> maybe_error = isolate->factory()->NewEvalError(
833 MessageTemplate::kCodeGenFromStrings, error_message);
834 if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
835 return ReadOnlyRoots(isolate).exception();
836 }
837
838 // Deal with a normal eval call with a string argument. Compile it
839 // and return the compiled function bound in the local context.
840 static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
842 DirectHandle<Context> context(isolate->context(), isolate);
843 if (!Is<NativeContext>(*context) && v8_flags.reuse_scope_infos) {
844 Tagged<WeakFixedArray> array = Cast<Script>(outer_info->script())->infos();
845 Tagged<ScopeInfo> stored_info;
846 CHECK(array->get(eval_scope_info_index)
847 .GetHeapObjectIfWeak(isolate, &stored_info));
848 CHECK_EQ(stored_info, context->scope_info());
849 }
851 isolate, compiled,
852 Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info,
853 context, language_mode, restriction,
854 kNoSourcePosition, eval_position),
855 ReadOnlyRoots(isolate).exception());
856 return *compiled;
857}
858
859RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
860 HandleScope scope(isolate);
861 DCHECK_EQ(6, args.length());
862
863 DirectHandle<Object> callee = args.at(0);
864
865 // If "eval" didn't refer to the original GlobalEval, it's not a
866 // direct call to eval.
867 if (*callee != isolate->native_context()->global_eval_fun()) {
868 return *callee;
869 }
870
871 DCHECK(is_valid_language_mode(args.smi_value_at(3)));
872 LanguageMode language_mode = static_cast<LanguageMode>(args.smi_value_at(3));
873 DirectHandle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
874 isolate);
875 return CompileGlobalEval(isolate, args.at<Object>(1), outer_info,
876 language_mode, args.smi_value_at(4),
877 args.smi_value_at(5));
878}
879
880} // namespace v8::internal
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
#define BUILTIN_CODE(isolate, name)
Definition builtins.h:45
constexpr bool IsNone() const
Definition utils.h:684
constexpr int ToInt() const
Definition utils.h:673
static bool Compile(Isolate *isolate, Handle< SharedFunctionInfo > shared, ClearExceptionFlag flag, IsCompiledScope *is_compiled_scope, CreateSourcePositions create_source_positions_flag=CreateSourcePositions::kNo)
Definition compiler.cc:2906
DirectHandle< JSFunction > function() const
static DeoptInfo GetDeoptInfo(Tagged< Code > code, Address from)
DirectHandle< Code > compiled_code() const
DeoptimizeKind deopt_kind() const
Definition deoptimizer.h:72
BytecodeOffset bytecode_offset_in_outermost_frame() const
Definition deoptimizer.h:78
static IsolateGroup * current()
static void CreateAndAttachFeedbackVector(Isolate *isolate, DirectHandle< JSFunction > function, IsCompiledScope *compiled_scope)
Tagged< JSFunction > function() const override
Definition frames.cc:2492
Tagged< Object > context() const override
Definition frames.cc:2510
JavaScriptFrame * frame() const
Definition frames.h:1760
V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(DirectHandle< S > *out) const
static Handle< String > DebugName(Isolate *isolate, DirectHandle< SharedFunctionInfo > shared)
V8_EXPORT_PRIVATE Tagged< Code > LookupCode() const
Definition frames.cc:757
Handle< Code > code
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define RUNTIME_FUNCTION(Name)
Definition arguments.h:162
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)
Definition isolate.h:276
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
TNode< Context > context
SharedFunctionInfoRef shared
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
Definition log.h:78
InstructionOperand source
bool is_valid_language_mode(int language_mode)
Definition globals.h:781
constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind)
Definition code-kind.h:66
PerThreadAssertScopeDebugOnly< false, SAFEPOINTS_ASSERT, HEAP_ALLOCATION_ASSERT > DisallowGarbageCollection
constexpr int kNoSourcePosition
Definition globals.h:850
void PrintF(const char *format,...)
Definition utils.cc:39
static Tagged< Object > CompileGlobalEval(Isolate *isolate, Handle< i::Object > source_object, DirectHandle< SharedFunctionInfo > outer_info, LanguageMode language_mode, int eval_scope_info_index, int eval_position)
const char * CodeKindToString(CodeKind kind)
Definition code-kind.cc:10
Tagged(T object) -> Tagged< T >
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
@ NO_PARSE_RESTRICTION
Definition globals.h:1654
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsDeoptimizationWithoutCodeInvalidation(DeoptimizeReason reason)
constexpr int kStackSpaceRequiredForCompilation
Definition globals.h:207
constexpr bool IsConcurrent(ConcurrencyMode mode)
Definition globals.h:2599
constexpr bool CodeKindCanDeoptimize(CodeKind kind)
Definition code-kind.h:83
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
const DeoptimizeReason deopt_reason
Definition deoptimizer.h:47
#define TRACE_EVENT0(category_group, name)
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660
std::unique_ptr< ValueMirror > value