v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
deoptimizer.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
9#include "src/base/memory.h"
13#include "src/debug/debug.h"
22#include "src/heap/heap-inl.h"
24#include "src/logging/log.h"
28#include "src/objects/oddball.h"
30#include "src/utils/utils.h"
31
32#if V8_ENABLE_WEBASSEMBLY
41#endif // V8_ENABLE_WEBASSEMBLY
42
43namespace v8 {
44
45using base::Memory;
46
47namespace internal {
48
49namespace {
50
51class DeoptimizableCodeIterator {
52 public:
53 explicit DeoptimizableCodeIterator(Isolate* isolate);
54 DeoptimizableCodeIterator(const DeoptimizableCodeIterator&) = delete;
55 DeoptimizableCodeIterator& operator=(const DeoptimizableCodeIterator&) =
56 delete;
57 Tagged<Code> Next();
58
59 private:
60 Isolate* const isolate_;
61 std::unique_ptr<SafepointScope> safepoint_scope_;
62 std::unique_ptr<ObjectIterator> object_iterator_;
63 enum { kIteratingCodeSpace, kIteratingCodeLOSpace, kDone } state_;
64
66};
67
68DeoptimizableCodeIterator::DeoptimizableCodeIterator(Isolate* isolate)
69 : isolate_(isolate),
70 safepoint_scope_(std::make_unique<SafepointScope>(
71 isolate, isolate->is_shared_space_isolate()
75 isolate->heap()->code_space()->GetObjectIterator(isolate->heap())),
76 state_(kIteratingCodeSpace) {}
77
78Tagged<Code> DeoptimizableCodeIterator::Next() {
79 while (true) {
80 Tagged<HeapObject> object = object_iterator_->Next();
81 if (object.is_null()) {
82 // No objects left in the current iterator, try to move to the next space
83 // based on the state.
84 switch (state_) {
85 case kIteratingCodeSpace: {
87 isolate_->heap()->code_lo_space()->GetObjectIterator(
88 isolate_->heap());
89 state_ = kIteratingCodeLOSpace;
90 continue;
91 }
92 case kIteratingCodeLOSpace:
93 // No other spaces to iterate, so clean up and we're done. Keep the
94 // object iterator so that it keeps returning null on Next(), to avoid
95 // needing to branch on state_ before the while loop, but drop the
96 // safepoint scope since we no longer need to stop the heap from
97 // moving.
98 safepoint_scope_.reset();
99 state_ = kDone;
100 [[fallthrough]];
101 case kDone:
102 return Code();
103 }
104 }
105 Tagged<InstructionStream> istream = Cast<InstructionStream>(object);
106 Tagged<Code> code;
107 if (!istream->TryGetCode(&code, kAcquireLoad)) continue;
108 if (!CodeKindCanDeoptimize(code->kind())) continue;
109 return code;
110 }
111}
112
113} // namespace
114
115// {FrameWriter} offers a stack writer abstraction for writing
116// FrameDescriptions. The main service the class provides is managing
117// {top_offset_}, i.e. the offset of the next slot to write to.
118//
119// Note: Not in an anonymous namespace due to the friend class declaration
120// in Deoptimizer.
122 public:
123 static const int NO_INPUT_INDEX = -1;
125 CodeTracer::Scope* trace_scope)
126 : deoptimizer_(deoptimizer),
127 frame_(frame),
128 trace_scope_(trace_scope),
129 top_offset_(frame->GetFrameSize()) {}
130
131 void PushRawValue(intptr_t value, const char* debug_hint) {
132 PushValue(value);
133 if (trace_scope_ != nullptr) {
134 DebugPrintOutputValue(value, debug_hint);
135 }
136 }
137
138 void PushRawObject(Tagged<Object> obj, const char* debug_hint) {
139 intptr_t value = obj.ptr();
140 PushValue(value);
141 if (trace_scope_ != nullptr) {
142 DebugPrintOutputObject(obj, top_offset_, debug_hint);
143 }
144 }
145
146 // There is no check against the allowed addresses for bottommost frames, as
147 // the caller's pc could be anything. The caller's pc pushed here should never
148 // be re-signed.
149 void PushBottommostCallerPc(intptr_t pc) {
150 top_offset_ -= kPCOnStackSize;
151 frame_->SetFrameSlot(top_offset_, pc);
152 DebugPrintOutputPc(pc, "bottommost caller's pc\n");
153 }
154
155 void PushApprovedCallerPc(intptr_t pc) {
156 top_offset_ -= kPCOnStackSize;
157 frame_->SetCallerPc(top_offset_, pc);
158 DebugPrintOutputPc(pc, "caller's pc\n");
159 }
160
161 void PushCallerFp(intptr_t fp) {
162 top_offset_ -= kFPOnStackSize;
163 frame_->SetCallerFp(top_offset_, fp);
164 DebugPrintOutputValue(fp, "caller's fp\n");
165 }
166
167 void PushCallerConstantPool(intptr_t cp) {
168 top_offset_ -= kSystemPointerSize;
169 frame_->SetCallerConstantPool(top_offset_, cp);
170 DebugPrintOutputValue(cp, "caller's constant_pool\n");
171 }
172
174 const char* debug_hint = "") {
175 Tagged<Object> obj = iterator->GetRawValue();
176 PushRawObject(obj, debug_hint);
177 if (trace_scope_ != nullptr) {
178 PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index());
179 }
180 deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj,
181 iterator);
182 }
183
185 const TranslatedFrame::iterator& iterator) {
186 // Push a marker temporarily.
187 PushRawObject(ReadOnlyRoots(deoptimizer_->isolate()).arguments_marker(),
188 "feedback vector");
189 deoptimizer_->QueueFeedbackVectorForMaterialization(
190 output_address(top_offset_), iterator);
191 }
192
194 int parameters_count) {
195 std::vector<TranslatedFrame::iterator> parameters;
196 parameters.reserve(parameters_count);
197 for (int i = 0; i < parameters_count; ++i, ++iterator) {
198 parameters.push_back(iterator);
199 }
200 for (auto& parameter : base::Reversed(parameters)) {
201 PushTranslatedValue(parameter, "stack parameter");
202 }
203 }
204
205 unsigned top_offset() const { return top_offset_; }
206
208
209 private:
210 void PushValue(intptr_t value) {
211 CHECK_GE(top_offset_, 0);
212 top_offset_ -= kSystemPointerSize;
213 frame_->SetFrameSlot(top_offset_, value);
214 }
215
216 Address output_address(unsigned output_offset) {
217 Address output_address =
218 static_cast<Address>(frame_->GetTop()) + output_offset;
219 return output_address;
220 }
221
222 void DebugPrintOutputValue(intptr_t value, const char* debug_hint = "") {
223 if (trace_scope_ != nullptr) {
224 PrintF(trace_scope_->file(),
225 " " V8PRIxPTR_FMT ": [top + %3d] <- " V8PRIxPTR_FMT " ; %s",
226 output_address(top_offset_), top_offset_, value, debug_hint);
227 }
228 }
229
230 void DebugPrintOutputPc(intptr_t value, const char* debug_hint = "") {
231#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
232 if (trace_scope_ != nullptr) {
233 PrintF(trace_scope_->file(),
234 " " V8PRIxPTR_FMT ": [top + %3d] <- " V8PRIxPTR_FMT
235 " (signed) " V8PRIxPTR_FMT " (unsigned) ; %s",
236 output_address(top_offset_), top_offset_, value,
237 PointerAuthentication::StripPAC(value), debug_hint);
238 }
239#else
240 DebugPrintOutputValue(value, debug_hint);
241#endif
242 }
243
244 void DebugPrintOutputObject(Tagged<Object> obj, unsigned output_offset,
245 const char* debug_hint = "") {
246 if (trace_scope_ != nullptr) {
247 PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ",
248 output_address(output_offset), output_offset);
249 if (IsSmi(obj)) {
250 PrintF(trace_scope_->file(), V8PRIxPTR_FMT " <Smi %d>", obj.ptr(),
251 Cast<Smi>(obj).value());
252 } else {
253 ShortPrint(obj, trace_scope_->file());
254 }
255 PrintF(trace_scope_->file(), " ; %s", debug_hint);
256 }
257 }
258
262 unsigned top_offset_;
263};
264
265// We rely on this function not causing a GC. It is called from generated code
266// without having a real stack frame in place.
267Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
268 Address from, int fp_to_sp_delta,
269 Isolate* isolate) {
270 // This is zero for wasm.
271 Tagged<JSFunction> function =
272 raw_function != 0 ? Cast<JSFunction>(Tagged<Object>(raw_function))
274 Deoptimizer* deoptimizer =
275 new Deoptimizer(isolate, function, kind, from, fp_to_sp_delta);
276 isolate->set_current_deoptimizer(deoptimizer);
277 return deoptimizer;
278}
279
280Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
281 Deoptimizer* result = isolate->GetAndClearCurrentDeoptimizer();
283 return result;
284}
285
286size_t Deoptimizer::DeleteForWasm(Isolate* isolate) {
287 // The deoptimizer disallows garbage collections.
288 DCHECK(!AllowGarbageCollection::IsAllowed());
289 Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
290 int output_count = deoptimizer->output_count();
291 delete deoptimizer;
292 // Now garbage collections are allowed again.
293 DCHECK(AllowGarbageCollection::IsAllowed());
294 return output_count;
295}
296
297DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
298 JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) {
299 CHECK(frame->is_optimized_js());
300
301 TranslatedState translated_values(frame);
302 translated_values.Prepare(frame->fp());
303
304 TranslatedState::iterator frame_it = translated_values.end();
305 int counter = jsframe_index;
306 for (auto it = translated_values.begin(); it != translated_values.end();
307 it++) {
308 if (it->kind() == TranslatedFrame::kUnoptimizedFunction ||
309 it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
310 it->kind() ==
311 TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
312 if (counter == 0) {
313 frame_it = it;
314 break;
315 }
316 counter--;
317 }
318 }
319 CHECK(frame_it != translated_values.end());
320 // We only include kJavaScriptBuiltinContinuation frames above to get the
321 // counting right.
322 CHECK_EQ(frame_it->kind(), TranslatedFrame::kUnoptimizedFunction);
323
325 new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
326
327 return info;
328}
329
330namespace {
331class ActivationsFinder : public ThreadVisitor {
332 public:
333 ActivationsFinder(Tagged<GcSafeCode> topmost_optimized_code,
334 bool safe_to_deopt_topmost_optimized_code) {
335#ifdef DEBUG
336 topmost_ = topmost_optimized_code;
337 safe_to_deopt_ = safe_to_deopt_topmost_optimized_code;
338#endif
339 }
340
341 // Find the frames with activations of codes marked for deoptimization, search
342 // for the trampoline to the deoptimizer call respective to each code, and use
343 // it to replace the current pc on the stack.
344 void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
345 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
346 if (it.frame()->is_optimized_js()) {
347 Tagged<GcSafeCode> code = it.frame()->GcSafeLookupCode();
348 if (CodeKindCanDeoptimize(code->kind()) &&
349 code->marked_for_deoptimization()) {
350 // Obtain the trampoline to the deoptimizer call.
351 int trampoline_pc;
352 if (code->is_maglevved()) {
353 MaglevSafepointEntry safepoint = MaglevSafepointTable::FindEntry(
354 isolate, code, it.frame()->pc());
355 trampoline_pc = safepoint.trampoline_pc();
356 } else {
357 SafepointEntry safepoint = SafepointTable::FindEntry(
358 isolate, code, it.frame()->maybe_unauthenticated_pc());
359 trampoline_pc = safepoint.trampoline_pc();
360 }
361 // TODO(saelo): currently we have to use full pointer comparison as
362 // builtin Code is still inside the sandbox while runtime-generated
363 // Code is in trusted space.
364 static_assert(!kAllCodeObjectsLiveInTrustedSpace);
365 DCHECK_IMPLIES(code.SafeEquals(topmost_), safe_to_deopt_);
366 static_assert(SafepointEntry::kNoTrampolinePC == -1);
367 CHECK_GE(trampoline_pc, 0);
368 if (!it.frame()->InFastCCall()) {
369 Address new_pc = code->instruction_start() + trampoline_pc;
370 if (v8_flags.cet_compatible) {
371 Address pc = *it.frame()->pc_address();
372 Deoptimizer::PatchToJump(pc, new_pc);
373 } else {
374 // Replace the current pc on the stack with the trampoline.
375 // TODO(v8:10026): avoid replacing a signed pointer.
376 Address* pc_addr = it.frame()->pc_address();
377 PointerAuthentication::ReplacePC(pc_addr, new_pc,
378 kSystemPointerSize);
379 }
380 }
381 }
382 }
383 }
384 }
385
386 private:
387#ifdef DEBUG
388 Tagged<GcSafeCode> topmost_;
389 bool safe_to_deopt_;
390#endif
391};
392} // namespace
393
394// Replace pc on the stack for codes marked for deoptimization.
395// static
396void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
398
399 Tagged<GcSafeCode> topmost_optimized_code;
400 bool safe_to_deopt_topmost_optimized_code = false;
401#ifdef DEBUG
402 // Make sure all activations of optimized code can deopt at their current PC.
403 // The topmost optimized code has special handling because it cannot be
404 // deoptimized due to weak object dependency.
405 for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
406 it.Advance()) {
407 if (it.frame()->is_optimized_js()) {
408 Tagged<GcSafeCode> code = it.frame()->GcSafeLookupCode();
409 Tagged<JSFunction> function =
410 static_cast<OptimizedJSFrame*>(it.frame())->function();
411 TraceFoundActivation(isolate, function);
412 bool safe_if_deopt_triggered;
413 if (code->is_maglevved()) {
414 MaglevSafepointEntry safepoint =
415 MaglevSafepointTable::FindEntry(isolate, code, it.frame()->pc());
416 safe_if_deopt_triggered = safepoint.has_deoptimization_index();
417 } else {
418 SafepointEntry safepoint = SafepointTable::FindEntry(
419 isolate, code, it.frame()->maybe_unauthenticated_pc());
420 safe_if_deopt_triggered = safepoint.has_deoptimization_index();
421 }
422
423 // Deopt is checked when we are patching addresses on stack.
424 bool is_builtin_code = code->kind() == CodeKind::BUILTIN;
425 DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered ||
426 is_builtin_code);
427 if (topmost_optimized_code.is_null()) {
428 topmost_optimized_code = code;
429 safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered;
430 }
431 }
432 }
433#endif
434
435 ActivationsFinder visitor(topmost_optimized_code,
436 safe_to_deopt_topmost_optimized_code);
437 // Iterate over the stack of this thread.
438 visitor.VisitThread(isolate, isolate->thread_local_top());
439 // In addition to iterate over the stack of this thread, we also
440 // need to consider all the other threads as they may also use
441 // the code currently beings deoptimized.
442 isolate->thread_manager()->IterateArchivedThreads(&visitor);
443}
444
445void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
446 RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
448 TRACE_EVENT0("v8", "V8.DeoptimizeCode");
449 TraceDeoptAll(isolate);
450 isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
451
452 // Mark all code, then deoptimize.
453 {
454 DeoptimizableCodeIterator it(isolate);
455 for (Tagged<Code> code = it.Next(); !code.is_null(); code = it.Next()) {
456 code->SetMarkedForDeoptimization(isolate,
457 LazyDeoptimizeReason::kDebugger);
458 }
459 }
460
461 DeoptimizeMarkedCode(isolate);
462}
463
464// static
465void Deoptimizer::DeoptimizeFunction(Tagged<JSFunction> function,
467 Tagged<Code> code) {
468 Isolate* isolate = function->GetIsolate();
469 RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
471 TRACE_EVENT0("v8", "V8.DeoptimizeCode");
472 function->ResetIfCodeFlushed(isolate);
473 if (code.is_null()) code = function->code(isolate);
474
475 if (CodeKindCanDeoptimize(code->kind())) {
476 // Mark the code for deoptimization and unlink any functions that also
477 // refer to that code. The code cannot be shared across native contexts,
478 // so we only need to search one.
479 code->SetMarkedForDeoptimization(isolate, reason);
480#ifndef V8_ENABLE_LEAPTIERING_BOOL
481 // The code in the function's optimized code feedback vector slot might
482 // be different from the code on the function - evict it if necessary.
483 function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
484 isolate, function->shared(), "unlinking code marked for deopt");
485#endif // !V8_ENABLE_LEAPTIERING_BOOL
486
487 DeoptimizeMarkedCode(isolate);
488 }
489}
490
491// static
492void Deoptimizer::DeoptimizeAllOptimizedCodeWithFunction(
493 Isolate* isolate, DirectHandle<SharedFunctionInfo> function) {
494 RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
496 TRACE_EVENT0("v8", "V8.DeoptimizeAllOptimizedCodeWithFunction");
497
498 // Make sure no new code is compiled with the function.
499 isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
500
501 // Mark all code that inlines this function, then deoptimize.
502 bool any_marked = false;
503 {
504 DeoptimizableCodeIterator it(isolate);
505 for (Tagged<Code> code = it.Next(); !code.is_null(); code = it.Next()) {
506 if (code->Inlines(*function)) {
507 code->SetMarkedForDeoptimization(isolate,
508 LazyDeoptimizeReason::kDebugger);
509 any_marked = true;
510 }
511 }
512 }
513 if (any_marked) {
514 DeoptimizeMarkedCode(isolate);
515 }
516}
517
518#define DEOPTIMIZATION_HELPER_BUILTINS(V) \
519 V(Builtin::kInterpreterEnterAtBytecode, \
520 deopt_pc_offset_after_adapt_shadow_stack) \
521 V(Builtin::kInterpreterEnterAtNextBytecode, \
522 deopt_pc_offset_after_adapt_shadow_stack) \
523 V(Builtin::kContinueToCodeStubBuiltinWithResult, \
524 deopt_pc_offset_after_adapt_shadow_stack) \
525 V(Builtin::kContinueToCodeStubBuiltin, \
526 deopt_pc_offset_after_adapt_shadow_stack) \
527 V(Builtin::kContinueToJavaScriptBuiltinWithResult, \
528 deopt_pc_offset_after_adapt_shadow_stack) \
529 V(Builtin::kContinueToJavaScriptBuiltin, \
530 deopt_pc_offset_after_adapt_shadow_stack) \
531 V(Builtin::kRestartFrameTrampoline, \
532 deopt_pc_offset_after_adapt_shadow_stack) \
533 V(Builtin::kJSConstructStubGeneric, construct_stub_create_deopt_pc_offset) \
534 V(Builtin::kInterpreterPushArgsThenFastConstructFunction, \
535 construct_stub_invoke_deopt_pc_offset)
536
537// static
538Address Deoptimizer::EnsureValidReturnAddress(Isolate* isolate,
539 Address address) {
540 // TODO(42201233): We should make sure everything here we use for validation
541 // (builtins array, code object, and offset values) are not writable.
542 Builtins* builtins = isolate->builtins();
543 Heap* heap = isolate->heap();
544#define CHECK_BUILTIN(builtin, offset) \
545 if (builtins->code(builtin)->instruction_start() + heap->offset().value() - \
546 Deoptimizer::kAdaptShadowStackOffsetToSubtract == \
547 address) \
548 return address;
549
551#undef CHECK_BUILTIN
552
553 // NotifyDeoptimized is used for continuation.
554 if (builtins->code(Builtin::kNotifyDeoptimized)->instruction_start() ==
555 address)
556 return address;
557
558#if V8_ENABLE_WEBASSEMBLY
559 if (v8_flags.wasm_deopt &&
560 wasm::GetWasmCodeManager()->LookupCode(isolate, address) != nullptr) {
561 // TODO(42204618): This does not check for the PC being a valid "deopt
562 // point" but could be any arbitrary address inside a wasm code object
563 // (including pointing into the middle of an instruction).
564 return address;
565 }
566#endif
567
568 CHECK_WITH_MSG(false, "Not allowed return address");
569}
570
571void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
572 deoptimizer->DoComputeOutputFrames();
573}
574
575const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
576 switch (kind) {
577 case DeoptimizeKind::kEager:
578 return "deopt-eager";
579 case DeoptimizeKind::kLazy:
580 return "deopt-lazy";
581 }
582}
583
584Deoptimizer::Deoptimizer(Isolate* isolate, Tagged<JSFunction> function,
585 DeoptimizeKind kind, Address from, int fp_to_sp_delta)
586 : isolate_(isolate),
587 function_(function),
588 deopt_exit_index_(kFixedExitSizeMarker),
589 deopt_kind_(kind),
590 from_(from),
591 fp_to_sp_delta_(fp_to_sp_delta),
592 deoptimizing_throw_(false),
593 catch_handler_data_(-1),
594 catch_handler_pc_offset_(-1),
595 restart_frame_index_(-1),
596 input_(nullptr),
597 output_count_(0),
598 output_(nullptr),
599 caller_frame_top_(0),
600 caller_fp_(0),
601 caller_pc_(0),
602 caller_constant_pool_(0),
603 actual_argument_count_(0),
604 stack_fp_(0),
605 trace_scope_(v8_flags.trace_deopt || v8_flags.log_deopt
606 ? new CodeTracer::Scope(isolate->GetCodeTracer())
607 : nullptr) {
608 if (isolate->deoptimizer_lazy_throw()) {
609 CHECK_EQ(kind, DeoptimizeKind::kLazy);
610 isolate->set_deoptimizer_lazy_throw(false);
611 deoptimizing_throw_ = true;
612 }
613
614 if (isolate->debug()->IsRestartFrameScheduled()) {
615 CHECK(deoptimizing_throw_);
616 restart_frame_index_ = isolate->debug()->restart_inline_frame_index();
617 CHECK_GE(restart_frame_index_, 0);
618 isolate->debug()->clear_restart_frame();
619 }
620
621 DCHECK_NE(from, kNullAddress);
622
623#ifdef DEBUG
624 DCHECK(AllowGarbageCollection::IsAllowed());
625 disallow_garbage_collection_ = new DisallowGarbageCollection();
626#endif // DEBUG
627
628#if V8_ENABLE_WEBASSEMBLY
629 if (v8_flags.wasm_deopt && function.is_null()) {
630#if V8_ENABLE_SANDBOX
631 no_heap_access_during_wasm_deopt_ =
633#endif
634 wasm::WasmCode* code =
635 wasm::GetWasmCodeManager()->LookupCode(isolate, from);
636 compiled_optimized_wasm_code_ = code;
637 DCHECK_NOT_NULL(code);
639 wasm::WasmDeoptView deopt_view(code->deopt_data());
640 const wasm::WasmDeoptData& deopt_data = deopt_view.GetDeoptData();
641 DCHECK_NE(deopt_data.translation_array_size, 0);
642 CHECK_GE(from, deopt_data.deopt_exit_start_offset);
643 Address deopt_exit_offset = from - code->instruction_start();
644 // All eager deopt exits are calls "at the end" of the code to the builtin
645 // generated by Generate_DeoptimizationEntry_Eager. These calls have a fixed
646 // size kEagerDeoptExitsSize and the deopt data contains the offset of the
647 // first such call to the beginning of the code, so we can map any PC of
648 // such call to a unique index for this deopt point.
650 static_cast<uint32_t>(deopt_exit_offset -
651 deopt_data.deopt_exit_start_offset -
654
655 // Note: The parameter stack slots are not really part of the frame.
656 // However, the deoptimizer needs access to the incoming parameter values
657 // and therefore they need to be included in the FrameDescription. Between
658 // the parameters and the actual frame there are 2 pointers (the caller's pc
659 // and saved stack pointer) that therefore also need to be included. Both
660 // pointers as well as the incoming parameter stack slots are going to be
661 // copied into the outgoing FrameDescription which will "push" them back
662 // onto the stack. (This is consistent with how JS handles this.)
663 const wasm::FunctionSig* sig =
664 code->native_module()->module()->functions[code->index()].sig;
665 int parameter_stack_slots, return_stack_slots;
666 GetWasmStackSlotsCounts(sig, &parameter_stack_slots, &return_stack_slots);
667
668 unsigned input_frame_size = fp_to_sp_delta +
669 parameter_stack_slots * kSystemPointerSize +
671 input_ = FrameDescription::Create(input_frame_size, parameter_stack_slots,
672 isolate_);
673 return;
674 }
675#endif
676
678 DCHECK(!compiled_code_.is_null());
679 DCHECK(IsCode(compiled_code_));
680
681 DCHECK(IsJSFunction(function));
683 {
684 HandleScope scope(isolate_);
687 }
688 unsigned size = ComputeInputFrameSize();
689 const int parameter_count = compiled_code_->parameter_count();
690 DCHECK_EQ(
692 function->shared()->internal_formal_parameter_count_with_receiver());
694
696 // Calculate the deopt exit index from return address.
699 Tagged<DeoptimizationData> deopt_data =
700 Cast<DeoptimizationData>(compiled_code_->deoptimization_data());
701 Address deopt_start = compiled_code_->instruction_start() +
702 deopt_data->DeoptExitStart().value();
703 int eager_deopt_count = deopt_data->EagerDeoptCount().value();
704 Address lazy_deopt_start =
705 deopt_start + eager_deopt_count * kEagerDeoptExitSize;
706 // The deoptimization exits are sorted so that lazy deopt exits appear after
707 // eager deopts.
708 static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
709 static_cast<int>(kLastDeoptimizeKind),
710 "lazy deopts are expected to be emitted last");
711 // from_ is the value of the link register after the call to the
712 // deoptimizer, so for the last lazy deopt, from_ points to the first
713 // non-lazy deopt, so we use <=, similarly for the last non-lazy deopt and
714 // the first deopt with resume entry.
715 if (from_ <= lazy_deopt_start) {
717 int offset = static_cast<int>(from_ - kEagerDeoptExitSize - deopt_start);
720 } else {
722 int offset =
723 static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
725 deopt_exit_index_ = eager_deopt_count + (offset / kLazyDeoptExitSize);
726 }
727}
728
732
736
738 DCHECK(input_ == nullptr && output_ == nullptr);
739#ifdef V8_ENABLE_CET_SHADOW_STACK
740 DCHECK_NULL(shadow_stack_);
741#endif
742 DCHECK_NULL(disallow_garbage_collection_);
743 delete trace_scope_;
744}
745
747 delete input_;
748 for (int i = 0; i < output_count_; ++i) {
749 if (output_[i] != input_) delete output_[i];
750 }
751 delete[] output_;
752 input_ = nullptr;
753 output_ = nullptr;
754#ifdef V8_ENABLE_CET_SHADOW_STACK
755 if (shadow_stack_ != nullptr) {
756 delete[] shadow_stack_;
757 shadow_stack_ = nullptr;
758 }
759#endif // V8_ENABLE_CET_SHADOW_STACK
760#ifdef DEBUG
761 DCHECK(!AllowGarbageCollection::IsAllowed());
762 DCHECK_NOT_NULL(disallow_garbage_collection_);
763 delete disallow_garbage_collection_;
764 disallow_garbage_collection_ = nullptr;
765#endif // DEBUG
766}
767
769 switch (kind) {
771 return Builtin::kDeoptimizationEntry_Eager;
773 return Builtin::kDeoptimizationEntry_Lazy;
774 }
775}
776
777namespace {
778
779int LookupCatchHandler(Isolate* isolate, TranslatedFrame* translated_frame,
780 int* data_out) {
781 switch (translated_frame->kind()) {
783 int bytecode_offset = translated_frame->bytecode_offset().ToInt();
784 HandlerTable table(
785 translated_frame->raw_shared_info()->GetBytecodeArray(isolate));
786 int handler_index = table.LookupHandlerIndexForRange(bytecode_offset);
787 if (handler_index == HandlerTable::kNoHandlerFound) return handler_index;
788 *data_out = table.GetRangeData(handler_index);
789 table.MarkHandlerUsed(handler_index);
790 return table.GetRangeHandler(handler_index);
791 }
793 return 0;
794 }
795 default:
796 break;
797 }
798 return -1;
799}
800
801} // namespace
802
803void Deoptimizer::TraceDeoptBegin(int optimization_id,
804 BytecodeOffset bytecode_offset) {
806 FILE* file = trace_scope()->file();
808 PrintF(file, "[bailout (kind: %s, reason: %s): begin. deoptimizing ",
809 MessageFor(deopt_kind_), DeoptimizeReasonToString(info.deopt_reason));
810 if (IsJSFunction(function_)) {
811 ShortPrint(function_, file);
812 PrintF(file, ", ");
813 }
815 PrintF(file,
816 ", opt id %d, "
817#ifdef DEBUG
818 "node id %d, "
819#endif // DEBUG
820 "bytecode offset %d, deopt exit %d, FP to SP "
821 "delta %d, "
822 "caller SP " V8PRIxPTR_FMT ", pc " V8PRIxPTR_FMT "]\n",
823 optimization_id,
824#ifdef DEBUG
825 info.node_id,
826#endif // DEBUG
827 bytecode_offset.ToInt(), deopt_exit_index_, fp_to_sp_delta_,
830 PrintF(file, " ;;; deoptimize at ");
831 OFStream outstr(file);
832 info.position.Print(outstr, compiled_code_);
833 PrintF(file, "\n");
834 }
835}
836
837void Deoptimizer::TraceDeoptEnd(double deopt_duration) {
839 PrintF(trace_scope()->file(), "[bailout end. took %0.3f ms]\n",
840 deopt_duration);
841}
842
843// static
845 Tagged<Code> code,
846 LazyDeoptimizeReason reason) {
847 // `DiscardBaselineCodeVisitor` can discard baseline code for debug purpose,
848 // and it may use `MarkForDeoptimization` for interpreting the new stack
849 // frame as an interpreter frame, but it does not have deoptimization data.
850 if (code->kind() == CodeKind::BASELINE) return;
851
852 DCHECK(code->uses_deoptimization_data());
853 if (!v8_flags.trace_deopt && !v8_flags.log_deopt) return;
854
856 Tagged<DeoptimizationData> deopt_data =
857 Cast<DeoptimizationData>(code->deoptimization_data());
858 CodeTracer::Scope scope(isolate->GetCodeTracer());
859 if (v8_flags.trace_deopt) {
860 PrintF(scope.file(), "[marking dependent code ");
861 ShortPrint(code, scope.file());
862 PrintF(scope.file(), " (");
863 ShortPrint(deopt_data->GetSharedFunctionInfo(), scope.file());
864 PrintF(") (opt id %d) for deoptimization, reason: %s]\n",
865 deopt_data->OptimizationId().value(),
867 }
868 if (!v8_flags.log_deopt) return;
869 no_gc.Release();
870 {
871 HandleScope handle_scope(isolate);
872 PROFILE(isolate,
873 CodeDependencyChangeEvent(
874 direct_handle(code, isolate),
875 direct_handle(deopt_data->GetSharedFunctionInfo(), isolate),
876 DeoptimizeReasonToString(reason)));
877 }
878}
879
880// static
882 Isolate* isolate, Tagged<SharedFunctionInfo> sfi, const char* reason) {
883 if (!v8_flags.trace_deopt_verbose) return;
884
886 CodeTracer::Scope scope(isolate->GetCodeTracer());
887 PrintF(scope.file(),
888 "[evicting optimized code marked for deoptimization (%s) for ",
889 reason);
890 ShortPrint(sfi, scope.file());
891 PrintF(scope.file(), "]\n");
892}
893
894#ifdef DEBUG
895// static
896void Deoptimizer::TraceFoundActivation(Isolate* isolate,
897 Tagged<JSFunction> function) {
898 if (!v8_flags.trace_deopt_verbose) return;
899 CodeTracer::Scope scope(isolate->GetCodeTracer());
900 PrintF(scope.file(), "[deoptimizer found activation of function: ");
901 function->PrintName(scope.file());
902 PrintF(scope.file(), " / %" V8PRIxPTR "]\n", function.ptr());
903}
904#endif // DEBUG
905
906// static
908 if (!v8_flags.trace_deopt_verbose) return;
909 CodeTracer::Scope scope(isolate->GetCodeTracer());
910 PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
911}
912
913#if V8_ENABLE_WEBASSEMBLY
914namespace {
915std::pair<wasm::WasmCode*,
916 std::unique_ptr<wasm::LiftoffFrameDescriptionForDeopt>>
917CompileWithLiftoffAndGetDeoptInfo(wasm::NativeModule* native_module,
918 int function_index,
919 BytecodeOffset deopt_point, bool is_topmost) {
921 // We only deopt after the NativeModule is finished, hence wire bytes do not
922 // change any more. We can thus hold a non-owning vector here.
923 base::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
924 const wasm::WasmFunction* function = &env.module->functions[function_index];
925 bool is_shared = env.module->type(function->sig_index).is_shared;
926 wasm::FunctionBody body{function->sig, function->code.offset(),
927 wire_bytes.begin() + function->code.offset(),
928 wire_bytes.begin() + function->code.end_offset(),
929 is_shared};
930 wasm::WasmCompilationResult result = ExecuteLiftoffCompilation(
931 &env, body,
932 wasm::LiftoffOptions{}
933 .set_func_index(function_index)
934 .set_deopt_info_bytecode_offset(deopt_point.ToInt())
935 .set_deopt_location_kind(
938
939 // Replace the optimized code with the unoptimized code in the
940 // WasmCodeManager as a deopt was reached.
941 wasm::UnpublishedWasmCode compiled_code =
942 native_module->AddCompiledCode(result);
943 wasm::WasmCodeRefScope code_ref_scope;
944 // TODO(mliedtke): This might unoptimize functions because they were inlined
945 // into a function that now needs to deopt them while the optimized function
946 // might have taken different inlining decisions.
947 // TODO(mliedtke): The code cache should also be invalidated.
948 wasm::WasmCode* wasm_code = native_module->compilation_state()->PublishCode(
949 base::VectorOf(&compiled_code, 1))[0];
950 return {wasm_code, std::move(result.liftoff_frame_descriptions)};
951}
952} // anonymous namespace
953
954FrameDescription* Deoptimizer::DoComputeWasmLiftoffFrame(
955 TranslatedFrame& frame, wasm::NativeModule* native_module,
956 Tagged<WasmTrustedInstanceData> wasm_trusted_instance, int frame_index,
957 std::stack<intptr_t>& shadow_stack) {
958 // Given inlined frames where function a calls b, b is considered the topmost
959 // because b is on top of the call stack! This is aligned with the names used
960 // by the JS deopt.
961 const bool is_bottommost = frame_index == 0;
962 const bool is_topmost = output_count_ - 1 == frame_index;
963 // Recompile the liftoff (unoptimized) wasm code for the input frame.
964 // TODO(mliedtke): This recompiles every single function even if it never got
965 // optimized and exists as a liftoff variant in the WasmCodeManager as we also
966 // need to compute the deopt information. Can we avoid some of the extra work
967 // here?
968 auto [wasm_code, liftoff_description] = CompileWithLiftoffAndGetDeoptInfo(
969 native_module, frame.wasm_function_index(), frame.bytecode_offset(),
970 is_topmost);
971
972 DCHECK(liftoff_description);
973
974 int parameter_stack_slots, return_stack_slots;
975 const wasm::FunctionSig* sig =
976 native_module->module()->functions[frame.wasm_function_index()].sig;
977 GetWasmStackSlotsCounts(sig, &parameter_stack_slots, &return_stack_slots);
978
979 // Allocate and populate the FrameDescription describing the output frame.
980 const uint32_t output_frame_size = liftoff_description->total_frame_size;
981 const uint32_t total_output_frame_size =
982 output_frame_size + parameter_stack_slots * kSystemPointerSize +
984
986 std::ostringstream outstream;
987 outstream << " Liftoff stack & register state for function index "
988 << frame.wasm_function_index() << ", frame size "
989 << output_frame_size << ", total frame size "
990 << total_output_frame_size << '\n';
991 size_t index = 0;
992 for (const wasm::LiftoffVarState& state : liftoff_description->var_state) {
993 outstream << " " << index++ << ": " << state << '\n';
994 }
995 FILE* file = trace_scope()->file();
996 PrintF(file, "%s", outstream.str().c_str());
997 }
998
1000 total_output_frame_size, parameter_stack_slots, isolate());
1001
1002 // Copy the parameter stack slots.
1004 2 * kSystemPointerSize);
1005 uint32_t output_offset = total_output_frame_size;
1006 // Zero out the incoming parameter slots. This will make sure that tagged
1007 // values are safely ignored by the gc.
1008 // Note that zero is clearly not the correct value. Still, liftoff copies
1009 // all parameters into "its own" stack slots at the beginning and always
1010 // uses these slots to restore parameters from the stack.
1011 for (int i = 0; i < parameter_stack_slots; ++i) {
1013 output_frame->SetFrameSlot(output_offset, 0);
1014 }
1015
1016 // Calculate top and update previous caller's pc.
1017 Address top = is_bottommost ? caller_frame_top_ - total_output_frame_size
1018 : output_[frame_index - 1]->GetTop() -
1019 total_output_frame_size;
1020 output_frame->SetTop(top);
1021 Address pc = wasm_code->instruction_start() + liftoff_description->pc_offset;
1022 // Sign the PC. Note that for the non-topmost frames the stack pointer at
1023 // which the PC is stored as the "caller pc" / return address depends on the
1024 // amount of parameter stack slots of the callee. To simplify the code, we
1025 // just sign it as if there weren't any parameter stack slots.
1026 // When building up the next frame we can check and "move" the caller PC by
1027 // signing it again with the correct stack pointer.
1028 output_frame->SetPc(PointerAuthentication::SignAndCheckPC(
1029 isolate(), pc, output_frame->GetTop()));
1030#ifdef V8_ENABLE_CET_SHADOW_STACK
1031 if (v8_flags.cet_compatible) {
1032 if (is_topmost) {
1033 shadow_stack.push(pc);
1034 } else {
1035 shadow_stack.push(wasm_code->instruction_start() +
1036 liftoff_description->adapt_shadow_stack_pc_offset);
1037 }
1038 }
1039#endif // V8_ENABLE_CET_SHADOW_STACK
1040
1041 // Sign the previous frame's PC.
1042 if (is_bottommost) {
1043 Address old_context =
1045 Address new_context =
1046 caller_frame_top_ - parameter_stack_slots * kSystemPointerSize;
1048 new_context, old_context);
1049 } else if (parameter_stack_slots != 0) {
1050 // The previous frame's PC is stored at a different stack slot, so we need
1051 // to re-sign the PC for the new context (stack pointer).
1052 FrameDescription* previous_frame = output_[frame_index - 1];
1053 Address old_context = previous_frame->GetTop();
1054 Address new_context =
1055 old_context - parameter_stack_slots * kSystemPointerSize;
1057 isolate(), previous_frame->GetPc(), new_context, old_context);
1058 previous_frame->SetPc(signed_pc);
1059 }
1060
1061 // Store the caller PC.
1063 output_frame->SetFrameSlot(
1065 is_bottommost ? caller_pc_ : output_[frame_index - 1]->GetPc());
1066 // Store the caller frame pointer.
1068 output_frame->SetFrameSlot(
1070 is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp());
1071
1072 CHECK_EQ(output_frame_size, output_offset);
1073 int base_offset = output_frame_size;
1074
1075 // Set trusted instance data on output frame.
1076 output_frame->SetFrameSlot(
1078 wasm_trusted_instance.ptr());
1079 if (liftoff_description->trusted_instance != no_reg) {
1080 output_frame->SetRegister(liftoff_description->trusted_instance.code(),
1081 wasm_trusted_instance.ptr());
1082 }
1083
1084 DCHECK_GE(translated_state_.frames().size(), 1);
1085 auto liftoff_iter = liftoff_description->var_state.begin();
1086 if constexpr (Is64()) {
1087 // On 32 bit platforms int64s are represented as 2 values on Turbofan.
1088 // Liftoff on the other hand treats them as 1 value (a register pair).
1089 CHECK_EQ(liftoff_description->var_state.size(), frame.GetValueCount());
1090 }
1091
1092 bool int64_lowering_is_low = true;
1093
1094 for (const TranslatedValue& value : frame) {
1095 bool skip_increase_liftoff_iter = false;
1096 switch (liftoff_iter->loc()) {
1098 if (!Is64() && liftoff_iter->kind() == wasm::ValueKind::kI64) {
1099 if (int64_lowering_is_low) skip_increase_liftoff_iter = true;
1100 int64_lowering_is_low = !int64_lowering_is_low;
1101 }
1102 break; // Nothing to be done for constants in liftoff frame.
1104 if (liftoff_iter->is_gp_reg()) {
1105 intptr_t reg_value = kZapValue;
1106 switch (value.kind()) {
1108 // Ensure that the upper half is zeroed out.
1109 reg_value = static_cast<uint32_t>(value.int32_value());
1110 break;
1112 reg_value = value.raw_literal().ptr();
1113 break;
1115 reg_value = value.int64_value();
1116 break;
1117 default:
1118 UNIMPLEMENTED();
1119 }
1120 output_frame->SetRegister(liftoff_iter->reg().gp().code(), reg_value);
1121 } else if (liftoff_iter->is_fp_reg()) {
1122 switch (value.kind()) {
1124 output_frame->SetDoubleRegister(liftoff_iter->reg().fp().code(),
1125 value.double_value());
1126 break;
1128 // Liftoff doesn't have a concept of floating point registers.
1129 // This is an important distinction as e.g. on arm s1 and d1 are
1130 // two completely distinct registers.
1131 static_assert(std::is_same_v<decltype(liftoff_iter->reg().fp()),
1133 output_frame->SetDoubleRegister(
1134 liftoff_iter->reg().fp().code(),
1135 Float64::FromBits(value.float_value().get_bits()));
1136 break;
1138 output_frame->SetSimd128Register(liftoff_iter->reg().fp().code(),
1139 value.simd_value());
1140 break;
1141 default:
1142 UNIMPLEMENTED();
1143 }
1144 } else if (!Is64() && liftoff_iter->is_gp_reg_pair()) {
1145 intptr_t reg_value = kZapValue;
1146 switch (value.kind()) {
1148 // Ensure that the upper half is zeroed out.
1149 reg_value = static_cast<uint32_t>(value.int32_value());
1150 break;
1152 reg_value = value.raw_literal().ptr();
1153 break;
1154 default:
1155 UNREACHABLE();
1156 }
1157 int8_t reg = int64_lowering_is_low
1158 ? liftoff_iter->reg().low_gp().code()
1159 : liftoff_iter->reg().high_gp().code();
1160 output_frame->SetRegister(reg, reg_value);
1161 if (int64_lowering_is_low) skip_increase_liftoff_iter = true;
1162 int64_lowering_is_low = !int64_lowering_is_low;
1163 } else if (!Is64() && liftoff_iter->is_fp_reg_pair()) {
1165 Simd128 simd_value = value.simd_value();
1166 Address val_ptr = reinterpret_cast<Address>(&simd_value);
1167 output_frame->SetDoubleRegister(
1168 liftoff_iter->reg().low_fp().code(),
1169 Float64::FromBits(base::ReadUnalignedValue<uint64_t>(val_ptr)));
1170 output_frame->SetDoubleRegister(
1171 liftoff_iter->reg().high_fp().code(),
1172 Float64::FromBits(base::ReadUnalignedValue<uint64_t>(
1173 val_ptr + sizeof(double))));
1174 } else {
1175 UNREACHABLE();
1176 }
1177 break;
1179#ifdef V8_TARGET_BIG_ENDIAN
1180 static constexpr int kLiftoffStackBias = 4;
1181#else
1182 static constexpr int kLiftoffStackBias = 0;
1183#endif
1184 switch (liftoff_iter->kind()) {
1185 case wasm::ValueKind::kI32:
1186 CHECK(value.kind() == TranslatedValue::Kind::kInt32 ||
1187 value.kind() == TranslatedValue::Kind::kUint32);
1188 output_frame->SetLiftoffFrameSlot32(
1189 base_offset - liftoff_iter->offset() + kLiftoffStackBias,
1190 value.int32_value_);
1191 break;
1192 case wasm::ValueKind::kF32:
1194 output_frame->SetLiftoffFrameSlot32(
1195 base_offset - liftoff_iter->offset() + kLiftoffStackBias,
1196 value.float_value().get_bits());
1197 break;
1198 case wasm::ValueKind::kI64:
1199 if constexpr (Is64()) {
1200 CHECK(value.kind() == TranslatedValue::Kind::kInt64 ||
1201 value.kind() == TranslatedValue::Kind::kUint64);
1202 output_frame->SetLiftoffFrameSlot64(
1203 base_offset - liftoff_iter->offset(), value.int64_value_);
1204 } else {
1205 CHECK(value.kind() == TranslatedValue::Kind::kInt32 ||
1206 value.kind() == TranslatedValue::Kind::kUint32);
1207 // TODO(bigendian): Either the offsets or the default for
1208 // int64_lowering_is_low might have to be swapped.
1209 if (int64_lowering_is_low) {
1210 skip_increase_liftoff_iter = true;
1211 output_frame->SetLiftoffFrameSlot32(
1212 base_offset - liftoff_iter->offset(), value.int32_value_);
1213 } else {
1214 output_frame->SetLiftoffFrameSlot32(
1215 base_offset - liftoff_iter->offset() + sizeof(int32_t),
1216 value.int32_value_);
1217 }
1218 int64_lowering_is_low = !int64_lowering_is_low;
1219 }
1220 break;
1221 case wasm::ValueKind::kS128: {
1222 int64x2 values = value.simd_value().to_i64x2();
1223 const int offset = base_offset - liftoff_iter->offset();
1224 output_frame->SetLiftoffFrameSlot64(offset, values.val[0]);
1225 output_frame->SetLiftoffFrameSlot64(offset + sizeof(int64_t),
1226 values.val[1]);
1227 break;
1228 }
1229 case wasm::ValueKind::kF64:
1231 output_frame->SetLiftoffFrameSlot64(
1232 base_offset - liftoff_iter->offset(),
1233 value.double_value().get_bits());
1234 break;
1235 case wasm::ValueKind::kRef:
1236 case wasm::ValueKind::kRefNull:
1238 output_frame->SetLiftoffFrameSlotPointer(
1239 base_offset - liftoff_iter->offset(), value.raw_literal_.ptr());
1240 break;
1241 default:
1242 UNIMPLEMENTED();
1243 }
1244 break;
1245 }
1246 DCHECK_IMPLIES(skip_increase_liftoff_iter, !Is64());
1247 if (!skip_increase_liftoff_iter) {
1248 ++liftoff_iter;
1249 }
1250 }
1251
1252 // Store frame kind.
1253 uint32_t frame_type_offset =
1255 output_frame->SetFrameSlot(frame_type_offset,
1256 StackFrame::TypeToMarker(StackFrame::WASM));
1257 // Store feedback vector in stack slot.
1258 Tagged<FixedArray> module_feedback =
1259 wasm_trusted_instance->feedback_vectors();
1260 uint32_t feedback_offset =
1262 uint32_t fct_feedback_index = wasm::declared_function_index(
1263 native_module->module(), frame.wasm_function_index());
1264 CHECK_LT(fct_feedback_index, module_feedback->length());
1265 Tagged<Object> feedback_vector = module_feedback->get(fct_feedback_index);
1266 if (IsSmi(feedback_vector)) {
1268 PrintF(trace_scope()->file(),
1269 "Deopt with uninitialized feedback vector for function %s [%d]\n",
1270 wasm_code->DebugName().c_str(), frame.wasm_function_index());
1271 }
1272 // Not having a feedback vector can happen with multiple instantiations of
1273 // the same module as the type feedback is separate per instance but the
1274 // code is shared (even cross-isolate).
1275 // Note that we cannot allocate the feedback vector here. Instead, store
1276 // the function index, so that the feedback vector can be populated by the
1277 // deopt finish builtin called from Liftoff.
1278 output_frame->SetFrameSlot(feedback_offset,
1279 Smi::FromInt(fct_feedback_index).ptr());
1280 } else {
1281 output_frame->SetFrameSlot(feedback_offset, feedback_vector.ptr());
1282 }
1283
1284 // Instead of a builtin continuation for wasm the deopt builtin will
1285 // call a c function to destroy the Deoptimizer object and then directly
1286 // return to the liftoff code.
1287 output_frame->SetContinuation(0);
1288
1289 const intptr_t fp_value = top + output_frame_size;
1290 output_frame->SetFp(fp_value);
1292 output_frame->SetRegister(fp_reg.code(), fp_value);
1293 output_frame->SetRegister(kRootRegister.code(), isolate()->isolate_root());
1294#ifdef V8_COMPRESS_POINTERS
1295 output_frame->SetRegister(kPtrComprCageBaseRegister.code(),
1296 isolate()->cage_base());
1297#endif
1298
1299 return output_frame;
1300}
1301
1302// Build up the output frames for a wasm deopt. This creates the
1303// FrameDescription objects representing the output frames to be "materialized"
1304// on the stack.
1305void Deoptimizer::DoComputeOutputFramesWasmImpl() {
1306 CHECK(v8_flags.wasm_deopt);
1307 base::ElapsedTimer timer;
1308 // Lookup the deopt info for the input frame.
1309 wasm::WasmCode* code = compiled_optimized_wasm_code_;
1310 DCHECK_NOT_NULL(code);
1312 wasm::WasmDeoptView deopt_view(code->deopt_data());
1313 wasm::WasmDeoptEntry deopt_entry =
1314 deopt_view.GetDeoptEntry(deopt_exit_index_);
1315
1316 if (tracing_enabled()) {
1317 timer.Start();
1318 FILE* file = trace_scope()->file();
1319 PrintF(file,
1320 "[bailout (kind: %s, reason: %s, type: Wasm): begin. deoptimizing "
1321 "%s, function index %d, bytecode offset %d, deopt exit %d, FP to SP "
1322 "delta %d, "
1323 "pc " V8PRIxPTR_FMT "]\n",
1325 DeoptimizeReasonToString(DeoptimizeReason::kWrongCallTarget),
1326 code->DebugName().c_str(), code->index(),
1327 deopt_entry.bytecode_offset.ToInt(), deopt_entry.translation_index,
1329 }
1330
1331 base::Vector<const uint8_t> off_heap_translations =
1332 deopt_view.GetTranslationsArray();
1333
1334 DeoptTranslationIterator state_iterator(off_heap_translations,
1335 deopt_entry.translation_index);
1336 wasm::NativeModule* native_module = code->native_module();
1337 int parameter_count = static_cast<int>(
1338 native_module->module()->functions[code->index()].sig->parameter_count());
1339 DeoptimizationLiteralProvider literals(
1340 deopt_view.BuildDeoptimizationLiteralArray());
1341
1343 stack_fp_ = input_->GetRegister(fp_reg.code());
1344 Address fp_address = input_->GetFramePointerAddress();
1345 caller_fp_ = Memory<intptr_t>(fp_address);
1346 caller_pc_ =
1347 Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset);
1350
1351 FILE* trace_file =
1352 verbose_tracing_enabled() ? trace_scope()->file() : nullptr;
1354 &state_iterator, {}, literals,
1355 input_->GetRegisterValues(), trace_file,
1357
1358 const size_t output_frames = translated_state_.frames().size();
1359 CHECK_GT(output_frames, 0);
1360 output_count_ = static_cast<int>(output_frames);
1361 output_ = new FrameDescription* [output_frames] {};
1362
1363 // The top output function *should* be the same as the optimized function
1364 // with the deopt. However, this is not the case in case of inlined return
1365 // calls. The optimized function still needs to be invalidated.
1366 if (translated_state_.frames()[0].wasm_function_index() !=
1367 compiled_optimized_wasm_code_->index()) {
1368 CompileWithLiftoffAndGetDeoptInfo(native_module,
1369 compiled_optimized_wasm_code_->index(),
1370 deopt_entry.bytecode_offset, false);
1371 }
1372
1373 // Read the trusted instance data from the input frame.
1374 Tagged<WasmTrustedInstanceData> wasm_trusted_instance =
1376 input_->GetFrameSize() -
1379
1380 std::stack<intptr_t> shadow_stack;
1381 for (int i = 0; i < output_count_; ++i) {
1382 TranslatedFrame& frame = translated_state_.frames()[i];
1383 output_[i] = DoComputeWasmLiftoffFrame(
1384 frame, native_module, wasm_trusted_instance, i, shadow_stack);
1385 }
1386
1387#ifdef V8_ENABLE_CET_SHADOW_STACK
1388 if (v8_flags.cet_compatible) {
1389 CHECK_EQ(shadow_stack_count_, 0);
1390 shadow_stack_ = new intptr_t[shadow_stack.size()];
1391 while (!shadow_stack.empty()) {
1392 shadow_stack_[shadow_stack_count_++] = shadow_stack.top();
1393 shadow_stack.pop();
1394 }
1395 CHECK_EQ(shadow_stack_count_, output_count_);
1396 }
1397#endif // V8_ENABLE_CET_SHADOW_STACK
1398
1399 {
1400 // Mark the cached feedback result produced by the
1401 // TransitiveTypeFeedbackProcessor as outdated.
1402 // This is required to prevent deopt loops as new feedback is ignored
1403 // otherwise.
1404 wasm::TypeFeedbackStorage& feedback =
1405 native_module->module()->type_feedback;
1406 base::MutexGuard mutex_guard(&feedback.mutex);
1407 for (const TranslatedFrame& frame : translated_state_) {
1408 int index = frame.wasm_function_index();
1409 auto iter = feedback.feedback_for_function.find(index);
1410 if (iter != feedback.feedback_for_function.end()) {
1411 iter->second.needs_reprocessing_after_deopt = true;
1412 }
1413 }
1414 // Reset tierup priority. This is important as the tierup trigger will only
1415 // be taken into account if the tierup_priority is a power of two (to
1416 // prevent a hot function being enqueued too many times into the compilation
1417 // queue.)
1418 feedback.feedback_for_function[code->index()].tierup_priority = 0;
1419 // Add sample for how many times this function was deopted.
1420 isolate()->counters()->wasm_deopts_per_function()->AddSample(
1421 ++feedback.deopt_count_for_function[code->index()]);
1422 }
1423
1424 // Reset tiering budget of the function that triggered the deopt.
1425 int declared_func_index =
1426 wasm::declared_function_index(native_module->module(), code->index());
1427 wasm_trusted_instance->tiering_budget_array()[declared_func_index].store(
1428 v8_flags.wasm_tiering_budget, std::memory_order_relaxed);
1429
1430 isolate()->counters()->wasm_deopts_executed()->AddSample(
1431 wasm::GetWasmEngine()->IncrementDeoptsExecutedCount());
1432
1434 TraceDeoptEnd(timer.Elapsed().InMillisecondsF());
1435 }
1436}
1437
1438void Deoptimizer::GetWasmStackSlotsCounts(const wasm::FunctionSig* sig,
1439 int* parameter_stack_slots,
1440 int* return_stack_slots) {
1441 class DummyResultCollector {
1442 public:
1443 void AddParamAt(size_t index, LinkageLocation location) {}
1444 void AddReturnAt(size_t index, LinkageLocation location) {}
1445 } result_collector;
1446
1447 // On 32 bits we need to perform the int64 lowering for the signature.
1448#if V8_TARGET_ARCH_32_BIT
1449 if (!alloc_) {
1450 DCHECK(!zone_);
1451 alloc_.emplace();
1452 zone_.emplace(&*alloc_, "deoptimizer i32sig lowering");
1453 }
1454 sig = GetI32Sig(&*zone_, sig);
1455#endif
1456 int untagged_slots, untagged_return_slots; // Unused.
1457 wasm::IterateSignatureImpl(sig, false, result_collector, &untagged_slots,
1458 parameter_stack_slots, &untagged_return_slots,
1459 return_stack_slots);
1460}
1461#endif // V8_ENABLE_WEBASSEMBLY
1462
1463namespace {
1464
1465bool DeoptimizedMaglevvedCodeEarly(Isolate* isolate,
1466 Tagged<JSFunction> function,
1467 Tagged<Code> code) {
1468 if (!code->is_maglevved()) return false;
1469 if (function->GetRequestedOptimizationIfAny(isolate) ==
1470 CodeKind::TURBOFAN_JS) {
1471 // We request turbofan after consuming the invocation_count_for_turbofan
1472 // budget which is greater than
1473 // invocation_count_for_maglev_with_delay.
1474 return false;
1475 }
1476 int current_invocation_budget =
1477 function->raw_feedback_cell()->interrupt_budget() /
1478 function->shared()->GetBytecodeArray(isolate)->length();
1479 return current_invocation_budget >=
1480 v8_flags.invocation_count_for_turbofan -
1481 v8_flags.invocation_count_for_maglev_with_delay;
1482}
1483
1484} // namespace
1485
1486// We rely on this function not causing a GC. It is called from generated code
1487// without having a real stack frame in place.
1489 // When we call this function, the return address of the previous frame has
1490 // been removed from the stack by the DeoptimizationEntry builtin, so the
1491 // stack is not iterable by the StackFrameIteratorForProfiler.
1492#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
1493 DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
1494#endif
1495 base::ElapsedTimer timer;
1496
1497#if V8_ENABLE_WEBASSEMBLY
1498 if (v8_flags.wasm_deopt && function_.is_null()) {
1500 DoComputeOutputFramesWasmImpl();
1502 return;
1503 }
1504#endif
1505
1506 // Determine basic deoptimization information. The optimized frame is
1507 // described by the input data.
1508 Tagged<DeoptimizationData> input_data =
1509 Cast<DeoptimizationData>(compiled_code_->deoptimization_data());
1510
1511 {
1512 // Read caller's PC, caller's FP and caller's constant pool values
1513 // from input frame. Compute caller's frame top address.
1514
1516 stack_fp_ = input_->GetRegister(fp_reg.code());
1517
1519
1520 Address fp_address = input_->GetFramePointerAddress();
1521 caller_fp_ = Memory<intptr_t>(fp_address);
1522 caller_pc_ =
1523 Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset);
1524 actual_argument_count_ = static_cast<int>(
1525 Memory<intptr_t>(fp_address + StandardFrameConstants::kArgCOffset));
1526
1528 caller_constant_pool_ = Memory<intptr_t>(
1530 }
1531 }
1532
1533 StackGuard* const stack_guard = isolate()->stack_guard();
1534 CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
1535 stack_guard->real_jslimit());
1536
1537 BytecodeOffset bytecode_offset =
1538 input_data->GetBytecodeOffsetOrBuiltinContinuationId(deopt_exit_index_);
1539 auto translations = input_data->FrameTranslation();
1540 unsigned translation_index =
1541 input_data->TranslationIndex(deopt_exit_index_).value();
1542
1543 if (tracing_enabled()) {
1544 timer.Start();
1545 TraceDeoptBegin(input_data->OptimizationId().value(), bytecode_offset);
1546 }
1547
1548 FILE* trace_file =
1549 verbose_tracing_enabled() ? trace_scope()->file() : nullptr;
1550 DeoptimizationFrameTranslation::Iterator state_iterator(translations,
1551 translation_index);
1552 DeoptimizationLiteralProvider literals(input_data->LiteralArray());
1554 &state_iterator, input_data->ProtectedLiteralArray(),
1555 literals, input_->GetRegisterValues(), trace_file,
1556 compiled_code_->parameter_count_without_receiver(),
1558
1560 translated_state_.frames()[0].bytecode_offset();
1561
1562 // Do the input frame to output frame(s) translation.
1563 size_t count = translated_state_.frames().size();
1564 if (is_restart_frame()) {
1565 // If the debugger requested to restart a particular frame, only materialize
1566 // up to that frame.
1568 } else if (deoptimizing_throw_) {
1569 // If we are supposed to go to the catch handler, find the catching frame
1570 // for the catch and make sure we only deoptimize up to that frame.
1571 size_t catch_handler_frame_index = count;
1572 for (size_t i = count; i-- > 0;) {
1573 catch_handler_pc_offset_ = LookupCatchHandler(
1575 if (catch_handler_pc_offset_ >= 0) {
1576 catch_handler_frame_index = i;
1577 break;
1578 }
1579 }
1580 CHECK_LT(catch_handler_frame_index, count);
1581 count = catch_handler_frame_index + 1;
1582 }
1583
1585 output_ = new FrameDescription* [count] {};
1586 output_count_ = static_cast<int>(count);
1587
1588 // Translate each output frame.
1589 int frame_index = 0;
1590 size_t total_output_frame_size = 0;
1591 for (size_t i = 0; i < count; ++i, ++frame_index) {
1592 TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
1593 const bool handle_exception = deoptimizing_throw_ && i == count - 1;
1594 switch (translated_frame->kind()) {
1596 DoComputeUnoptimizedFrame(translated_frame, frame_index,
1597 handle_exception);
1598 break;
1600 DoComputeInlinedExtraArguments(translated_frame, frame_index);
1601 break;
1603 DoComputeConstructCreateStubFrame(translated_frame, frame_index);
1604 break;
1606 DoComputeConstructInvokeStubFrame(translated_frame, frame_index);
1607 break;
1609#if V8_ENABLE_WEBASSEMBLY
1610 case TranslatedFrame::kJSToWasmBuiltinContinuation:
1611#endif // V8_ENABLE_WEBASSEMBLY
1612 DoComputeBuiltinContinuation(translated_frame, frame_index,
1614 break;
1616 DoComputeBuiltinContinuation(translated_frame, frame_index,
1618 break;
1621 translated_frame, frame_index,
1622 handle_exception
1625 break;
1626#if V8_ENABLE_WEBASSEMBLY
1627 case TranslatedFrame::kWasmInlinedIntoJS:
1628 FATAL("inlined wasm frames may not appear in JS deopts");
1629 case TranslatedFrame::kLiftoffFunction:
1630 FATAL("wasm liftoff frames may not appear in JS deopts");
1631#endif
1633 FATAL("invalid frame");
1634 }
1635 total_output_frame_size += output_[frame_index]->GetFrameSize();
1636 }
1637
1638 FrameDescription* topmost = output_[count - 1];
1640 isolate()->isolate_root());
1641#ifdef V8_COMPRESS_POINTERS
1643 isolate()->cage_base());
1644#endif
1645
1646#ifdef V8_ENABLE_CET_SHADOW_STACK
1647 if (v8_flags.cet_compatible) {
1648 CHECK_EQ(shadow_stack_count_, 0);
1649 shadow_stack_ = new intptr_t[count + 1];
1650
1651 // We should jump to the continuation through AdaptShadowStack to avoid
1652 // security exception.
1653 // Clear the continuation so that DeoptimizationEntry does not push the
1654 // address onto the stack, and push it to the shadow stack instead.
1655 if (output_[count - 1]->GetContinuation()) {
1656 shadow_stack_[shadow_stack_count_++] =
1658 output_[count - 1]->SetContinuation(0);
1659 }
1660
1661 // Add topmost frame's pc to the shadow stack.
1662 shadow_stack_[shadow_stack_count_++] =
1663 output_[count - 1]->GetPc() -
1665
1666 // Add return addresses to the shadow stack, except for the bottommost.
1667 // The bottommost frame's return address already exists in the shadow stack.
1668 for (int i = static_cast<int>(count) - 1; i > 0; i--) {
1669 if (!output_[i]->HasCallerPc()) continue;
1670 shadow_stack_[shadow_stack_count_++] =
1671 output_[i]->GetCallerPc() -
1673 }
1674 }
1675#endif // V8_ENABLE_CET_SHADOW_STACK
1676
1677 // Don't reset the tiering state for OSR code since we might reuse OSR code
1678 // after deopt, and we still want to tier up to non-OSR code even if OSR code
1679 // deoptimized.
1680 bool osr_early_exit = Deoptimizer::GetDeoptInfo().deopt_reason ==
1681 DeoptimizeReason::kOSREarlyExit;
1682 // TODO(saelo): We have to use full pointer comparisons here while not all
1683 // Code objects have been migrated into trusted space.
1684 static_assert(!kAllCodeObjectsLiveInTrustedSpace);
1685 if (IsJSFunction(function_) &&
1686 (compiled_code_->osr_offset().IsNone()
1687 ? function_->code(isolate()).SafeEquals(compiled_code_)
1688 : (!osr_early_exit &&
1691 compiled_code_->osr_offset())))) {
1692 if (v8_flags.profile_guided_optimization &&
1693 function_->shared()->cached_tiering_decision() !=
1695 if (DeoptimizedMaglevvedCodeEarly(isolate(), function_, compiled_code_)) {
1696 function_->shared()->set_cached_tiering_decision(
1698 } else {
1699 function_->shared()->set_cached_tiering_decision(
1701 }
1702 }
1703 function_->ResetTieringRequests();
1704 // This allows us to quickly re-spawn a new compilation request even if
1705 // there is already one running. In particular it helps to squeeze in a
1706 // maglev compilation when there is a long running turbofan one that was
1707 // started right before the deopt.
1708 function_->SetTieringInProgress(false);
1709 function_->SetInterruptBudget(isolate_, BudgetModification::kReset,
1710 CodeKind::INTERPRETED_FUNCTION);
1711 function_->feedback_vector()->set_was_once_deoptimized();
1712 }
1713
1714 // Print some helpful diagnostic information.
1716 TraceDeoptEnd(timer.Elapsed().InMillisecondsF());
1717 }
1718
1719 // The following invariant is fairly tricky to guarantee, since the size of
1720 // an optimized frame and its deoptimized counterparts usually differs. We
1721 // thus need to consider the case in which deoptimized frames are larger than
1722 // the optimized frame in stack checks in optimized code. We do this by
1723 // applying an offset to stack checks (see kArchStackPointerGreaterThan in the
1724 // code generator).
1725 // Note that we explicitly allow deopts to exceed the limit by a certain
1726 // number of slack bytes.
1727 CHECK_GT(
1728 static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
1730}
1731
1732// static
1734 Tagged<JSFunction> function,
1735 BytecodeOffset deopt_exit_offset,
1736 BytecodeOffset osr_offset) {
1738 HandleScope scope(isolate);
1739 DCHECK(!deopt_exit_offset.IsNone());
1740 DCHECK(!osr_offset.IsNone());
1741
1742 Handle<BytecodeArray> bytecode_array(
1743 function->shared()->GetBytecodeArray(isolate), isolate);
1745 bytecode_array, deopt_exit_offset.ToInt()));
1746
1747 interpreter::BytecodeArrayIterator it(bytecode_array, osr_offset.ToInt());
1748 CHECK(it.CurrentBytecodeIsValidOSREntry());
1749
1750 for (; !it.done(); it.Advance()) {
1751 const int current_offset = it.current_offset();
1752 // If we've reached the deopt exit, it's contained in the current loop
1753 // (this is covered by IsInRange below, but this check lets us avoid
1754 // useless iteration).
1755 if (current_offset == deopt_exit_offset.ToInt()) return true;
1756 // We're only interested in loop ranges.
1757 if (it.current_bytecode() != interpreter::Bytecode::kJumpLoop) continue;
1758 // Is the deopt exit contained in the current loop?
1759 if (base::IsInRange(deopt_exit_offset.ToInt(), it.GetJumpTargetOffset(),
1760 current_offset)) {
1761 return true;
1762 }
1763 // We've reached nesting level 0, i.e. the current JumpLoop concludes a
1764 // top-level loop.
1765 const int loop_nesting_level = it.GetImmediateOperand(1);
1766 if (loop_nesting_level == 0) return false;
1767 }
1768
1769 UNREACHABLE();
1770}
1771namespace {
1772
1773// Get the dispatch builtin for unoptimized frames.
1774Builtin DispatchBuiltinFor(bool advance_bc, bool is_restart_frame) {
1775 if (is_restart_frame) return Builtin::kRestartFrameTrampoline;
1776
1777 return advance_bc ? Builtin::kInterpreterEnterAtNextBytecode
1778 : Builtin::kInterpreterEnterAtBytecode;
1779}
1780
1781} // namespace
1782
1784 int frame_index,
1785 bool goto_catch_handler) {
1786 Tagged<BytecodeArray> bytecode_array = translated_frame->raw_bytecode_array();
1787 TranslatedFrame::iterator value_iterator = translated_frame->begin();
1788 const bool is_bottommost = (0 == frame_index);
1789 const bool is_topmost = (output_count_ - 1 == frame_index);
1790
1791 const int real_bytecode_offset = translated_frame->bytecode_offset().ToInt();
1792 const int bytecode_offset =
1793 goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
1794
1795 const int parameters_count = bytecode_array->parameter_count();
1796
1797 // If this is the bottom most frame or the previous frame was the inlined
1798 // extra arguments frame, then we already have extra arguments in the stack
1799 // (including any extra padding). Therefore we should not try to add any
1800 // padding.
1801 bool should_pad_arguments =
1802 !is_bottommost && (translated_state_.frames()[frame_index - 1]).kind() !=
1804
1805 const int locals_count = translated_frame->height();
1807 parameters_count, locals_count, is_topmost, should_pad_arguments);
1808 const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
1809
1810 TranslatedFrame::iterator function_iterator = value_iterator++;
1811
1812 std::optional<Tagged<DebugInfo>> debug_info =
1813 translated_frame->raw_shared_info()->TryGetDebugInfo(isolate());
1814 if (debug_info.has_value() && debug_info.value()->HasBreakInfo()) {
1815 // TODO(leszeks): Validate this bytecode.
1816 bytecode_array = debug_info.value()->DebugBytecodeArray(isolate());
1817 }
1818
1819 // Allocate and store the output frame description.
1820 FrameDescription* output_frame =
1821 FrameDescription::Create(output_frame_size, parameters_count, isolate());
1822 FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
1823
1824 CHECK(frame_index >= 0 && frame_index < output_count_);
1825 CHECK_NULL(output_[frame_index]);
1826 output_[frame_index] = output_frame;
1827
1828 // Compute this frame's PC and state.
1829 // For interpreted frames, the PC will be a special builtin that
1830 // continues the bytecode dispatch. Note that non-topmost and lazy-style
1831 // bailout handlers also advance the bytecode offset before dispatch, hence
1832 // simulating what normal handlers do upon completion of the operation.
1833 // For baseline frames, the PC will be a builtin to convert the interpreter
1834 // frame to a baseline frame before continuing execution of baseline code.
1835 // We can't directly continue into baseline code, because of CFI.
1836 Builtins* builtins = isolate_->builtins();
1837 const bool advance_bc =
1838 (!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
1839 !goto_catch_handler;
1840 const bool restart_frame = goto_catch_handler && is_restart_frame();
1841 Tagged<Code> dispatch_builtin =
1842 builtins->code(DispatchBuiltinFor(advance_bc, restart_frame));
1843
1845 PrintF(trace_scope()->file(), " translating interpreted frame ");
1846 std::unique_ptr<char[]> name =
1847 translated_frame->raw_shared_info()->DebugNameCStr();
1848 PrintF(trace_scope()->file(), "%s", name.get());
1849 PrintF(trace_scope()->file(), " => bytecode_offset=%d, ",
1850 real_bytecode_offset);
1851 PrintF(trace_scope()->file(), "variable_frame_size=%d, frame_size=%d%s\n",
1852 frame_info.frame_size_in_bytes_without_fixed(), output_frame_size,
1853 goto_catch_handler ? " (throw)" : "");
1854 }
1855
1856 // The top address of the frame is computed from the previous frame's top and
1857 // this frame's size.
1858 const intptr_t top_address =
1859 is_bottommost ? caller_frame_top_ - output_frame_size
1860 : output_[frame_index - 1]->GetTop() - output_frame_size;
1861 output_frame->SetTop(top_address);
1862
1863 // Compute the incoming parameter translation.
1864 ReadOnlyRoots roots(isolate());
1865 if (should_pad_arguments) {
1866 for (int i = 0; i < ArgumentPaddingSlots(parameters_count); ++i) {
1867 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
1868 }
1869 }
1870
1871 if (verbose_tracing_enabled() && is_bottommost &&
1872 actual_argument_count_ > parameters_count) {
1874 " -- %d extra argument(s) already in the stack --\n",
1875 actual_argument_count_ - parameters_count);
1876 }
1877 frame_writer.PushStackJSArguments(value_iterator, parameters_count);
1878
1879 DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(should_pad_arguments),
1880 frame_writer.top_offset());
1882 PrintF(trace_scope()->file(), " -------------------------\n");
1883 }
1884
1885 // There are no translation commands for the caller's pc and fp, the
1886 // context, the function and the bytecode offset. Synthesize
1887 // their values and set them up
1888 // explicitly.
1889 //
1890 // The caller's pc for the bottommost output frame is the same as in the
1891 // input frame. For all subsequent output frames, it can be read from the
1892 // previous one. This frame's pc can be computed from the non-optimized
1893 // function code and bytecode offset of the bailout.
1894 if (is_bottommost) {
1895 frame_writer.PushBottommostCallerPc(caller_pc_);
1896 } else {
1897 frame_writer.PushApprovedCallerPc(output_[frame_index - 1]->GetPc());
1898 }
1899
1900 // The caller's frame pointer for the bottommost output frame is the same
1901 // as in the input frame. For all subsequent output frames, it can be
1902 // read from the previous one. Also compute and set this frame's frame
1903 // pointer.
1904 const intptr_t caller_fp =
1905 is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp();
1906 frame_writer.PushCallerFp(caller_fp);
1907
1908 const intptr_t fp_value = top_address + frame_writer.top_offset();
1909 output_frame->SetFp(fp_value);
1910 if (is_topmost) {
1912 output_frame->SetRegister(fp_reg.code(), fp_value);
1913 }
1914
1916 // For the bottommost output frame the constant pool pointer can be gotten
1917 // from the input frame. For subsequent output frames, it can be read from
1918 // the previous frame.
1919 const intptr_t caller_cp =
1920 is_bottommost ? caller_constant_pool_
1921 : output_[frame_index - 1]->GetConstantPool();
1922 frame_writer.PushCallerConstantPool(caller_cp);
1923 }
1924
1925 // For the bottommost output frame the context can be gotten from the input
1926 // frame. For all subsequent output frames it can be gotten from the function
1927 // so long as we don't inline functions that need local contexts.
1928
1929 // When deoptimizing into a catch block, we need to take the context
1930 // from a register that was specified in the handler table.
1931 TranslatedFrame::iterator context_pos = value_iterator++;
1932 if (goto_catch_handler) {
1933 // Skip to the translated value of the register specified
1934 // in the handler table.
1935 for (int i = 0; i < catch_handler_data_ + 1; ++i) {
1936 context_pos++;
1937 }
1938 }
1939 // Read the context from the translations.
1940 frame_writer.PushTranslatedValue(context_pos, "context");
1941
1942 // The function was mentioned explicitly in the BEGIN_FRAME.
1943 frame_writer.PushTranslatedValue(function_iterator, "function");
1944
1945 // Actual argument count.
1946 int argc;
1947 if (is_bottommost) {
1949 } else {
1950 TranslatedFrame::Kind previous_frame_kind =
1951 (translated_state_.frames()[frame_index - 1]).kind();
1952 argc = previous_frame_kind == TranslatedFrame::kInlinedExtraArguments
1953 ? output_[frame_index - 1]->parameter_count()
1954 : parameters_count;
1955 }
1956 frame_writer.PushRawValue(argc, "actual argument count\n");
1957
1958 // Set the bytecode array pointer.
1959 frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
1960
1961 // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
1962 const int raw_bytecode_offset =
1963 BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
1964 Tagged<Smi> smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
1965 frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n");
1966
1967 // We need to materialize the closure before getting the feedback vector.
1968 frame_writer.PushFeedbackVectorForMaterialization(function_iterator);
1969
1971 PrintF(trace_scope()->file(), " -------------------------\n");
1972 }
1973
1974 // Translate the rest of the interpreter registers in the frame.
1975 // The return_value_offset is counted from the top. Here, we compute the
1976 // register index (counted from the start).
1977 const int return_value_first_reg =
1978 locals_count - translated_frame->return_value_offset();
1979 const int return_value_count = translated_frame->return_value_count();
1980 for (int i = 0; i < locals_count; ++i, ++value_iterator) {
1981 // Ensure we write the return value if we have one and we are returning
1982 // normally to a lazy deopt point.
1983 if (is_topmost && !goto_catch_handler &&
1984 deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg &&
1985 i < return_value_first_reg + return_value_count) {
1986 const int return_index = i - return_value_first_reg;
1987 if (return_index == 0) {
1989 "return value 0\n");
1990 // We do not handle the situation when one return value should go into
1991 // the accumulator and another one into an ordinary register. Since
1992 // the interpreter should never create such situation, just assert
1993 // this does not happen.
1994 CHECK_LE(return_value_first_reg + return_value_count, locals_count);
1995 } else {
1996 CHECK_EQ(return_index, 1);
1998 "return value 1\n");
1999 }
2000 } else {
2001 // This is not return value, just write the value from the translations.
2002 frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
2003 }
2004 }
2005
2006 uint32_t register_slots_written = static_cast<uint32_t>(locals_count);
2007 DCHECK_LE(register_slots_written, frame_info.register_stack_slot_count());
2008 // Some architectures must pad the stack frame with extra stack slots
2009 // to ensure the stack frame is aligned. Do this now.
2010 while (register_slots_written < frame_info.register_stack_slot_count()) {
2011 register_slots_written++;
2012 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2013 }
2014
2015 // Translate the accumulator register (depending on frame position).
2016 if (is_topmost) {
2017 for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
2018 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2019 }
2020 // For topmost frame, put the accumulator on the stack. The
2021 // {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
2022 // after materialization).
2023 if (goto_catch_handler) {
2024 // If we are lazy deopting to a catch handler, we set the accumulator to
2025 // the exception (which lives in the result register).
2026 intptr_t accumulator_value =
2028 frame_writer.PushRawObject(Tagged<Object>(accumulator_value),
2029 "accumulator\n");
2030 } else {
2031 // If we are lazily deoptimizing make sure we store the deopt
2032 // return value into the appropriate slot.
2034 translated_frame->return_value_offset() == 0 &&
2035 translated_frame->return_value_count() > 0) {
2036 CHECK_EQ(translated_frame->return_value_count(), 1);
2038 "return value 0\n");
2039 } else {
2040 frame_writer.PushTranslatedValue(value_iterator, "accumulator");
2041 }
2042 }
2043 ++value_iterator; // Move over the accumulator.
2044 } else {
2045 // For non-topmost frames, skip the accumulator translation. For those
2046 // frames, the return value from the callee will become the accumulator.
2047 ++value_iterator;
2048 }
2049 CHECK_EQ(translated_frame->end(), value_iterator);
2050 CHECK_EQ(0u, frame_writer.top_offset());
2051
2052 const intptr_t pc =
2053 static_cast<intptr_t>(dispatch_builtin->instruction_start()) +
2054 isolate()->heap()->deopt_pc_offset_after_adapt_shadow_stack().value();
2055 if (is_topmost) {
2056 // Only the pc of the topmost frame needs to be signed since it is
2057 // authenticated at the end of the DeoptimizationEntry builtin.
2058 const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
2059 isolate(), pc, frame_writer.frame()->GetTop());
2060 output_frame->SetPc(top_most_pc);
2061 } else {
2062 output_frame->SetPc(pc);
2063 }
2064
2065 // Update constant pool.
2067 intptr_t constant_pool_value =
2068 static_cast<intptr_t>(dispatch_builtin->constant_pool());
2069 output_frame->SetConstantPool(constant_pool_value);
2070 if (is_topmost) {
2071 Register constant_pool_reg =
2073 output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
2074 }
2075 }
2076
2077 // Clear the context register. The context might be a de-materialized object
2078 // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
2079 // safety we use Tagged<Smi>(0) instead of the potential {arguments_marker}
2080 // here.
2081 if (is_topmost) {
2082 intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
2084 output_frame->SetRegister(context_reg.code(), context_value);
2085 // Set the continuation for the topmost frame.
2086 Tagged<Code> continuation = builtins->code(Builtin::kNotifyDeoptimized);
2087 output_frame->SetContinuation(
2088 static_cast<intptr_t>(continuation->instruction_start()));
2089 }
2090}
2091
2093 TranslatedFrame* translated_frame, int frame_index) {
2094 // Inlined arguments frame can not be the topmost, nor the bottom most frame.
2095 CHECK(frame_index < output_count_ - 1);
2096 CHECK_GT(frame_index, 0);
2097 CHECK_NULL(output_[frame_index]);
2098
2099 // During deoptimization we need push the extra arguments of inlined functions
2100 // (arguments with index greater than the formal parameter count).
2101 // For more info, see the design document:
2102 // https://docs.google.com/document/d/150wGaUREaZI6YWqOQFD5l2mWQXaPbbZjcAIJLOFrzMs
2103
2104 TranslatedFrame::iterator value_iterator = translated_frame->begin();
2105 const int argument_count_without_receiver = translated_frame->height() - 1;
2106 const int formal_parameter_count_without_receiver =
2107 translated_frame->formal_parameter_count() - 1;
2108 SBXCHECK_GE(formal_parameter_count_without_receiver, 0);
2109 const int extra_argument_count =
2110 argument_count_without_receiver - formal_parameter_count_without_receiver;
2111 // The number of pushed arguments is the maximum of the actual argument count
2112 // and the formal parameter count + the receiver.
2113 const int padding =
2114 ArgumentPaddingSlots(std::max(argument_count_without_receiver,
2115 formal_parameter_count_without_receiver) +
2116 1);
2117 const int output_frame_size =
2118 (std::max(0, extra_argument_count) + padding) * kSystemPointerSize;
2121 " translating inlined arguments frame => variable_size=%d\n",
2122 output_frame_size);
2123 }
2124
2125 // Allocate and store the output frame description.
2127 output_frame_size, JSParameterCount(argument_count_without_receiver),
2128 isolate());
2129 // The top address of the frame is computed from the previous frame's top and
2130 // this frame's size.
2131 const intptr_t top_address =
2132 output_[frame_index - 1]->GetTop() - output_frame_size;
2133 output_frame->SetTop(top_address);
2134 // This is not a real frame, we take PC and FP values from the parent frame.
2135 output_frame->SetPc(output_[frame_index - 1]->GetPc());
2136 output_frame->SetFp(output_[frame_index - 1]->GetFp());
2137 output_[frame_index] = output_frame;
2138
2139 FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
2140
2141 ReadOnlyRoots roots(isolate());
2142 for (int i = 0; i < padding; ++i) {
2143 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2144 }
2145
2146 if (extra_argument_count > 0) {
2147 // The receiver and arguments with index below the formal parameter
2148 // count are in the fake adaptor frame, because they are used to create the
2149 // arguments object. We should however not push them, since the interpreter
2150 // frame will do that.
2151 value_iterator++; // Skip function.
2152 value_iterator++; // Skip receiver.
2153 for (int i = 0; i < formal_parameter_count_without_receiver; i++)
2154 value_iterator++;
2155 frame_writer.PushStackJSArguments(value_iterator, extra_argument_count);
2156 }
2157}
2158
2160 TranslatedFrame* translated_frame, int frame_index) {
2161 TranslatedFrame::iterator value_iterator = translated_frame->begin();
2162 const bool is_topmost = (output_count_ - 1 == frame_index);
2163 // The construct frame could become topmost only if we inlined a constructor
2164 // call which does a tail call (otherwise the tail callee's frame would be
2165 // the topmost one). So it could only be the DeoptimizeKind::kLazy case.
2166 CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
2168
2169 const int parameters_count = translated_frame->height();
2170 ConstructStubFrameInfo frame_info =
2171 ConstructStubFrameInfo::Precise(parameters_count, is_topmost);
2172 const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
2173
2174 TranslatedFrame::iterator function_iterator = value_iterator++;
2176 PrintF(trace_scope()->file(),
2177 " translating construct create stub => variable_frame_size=%d, "
2178 "frame_size=%d\n",
2179 frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
2180 }
2181
2182 // Allocate and store the output frame description.
2183 FrameDescription* output_frame =
2184 FrameDescription::Create(output_frame_size, parameters_count, isolate());
2185 FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
2186 DCHECK(frame_index > 0 && frame_index < output_count_);
2187 DCHECK_NULL(output_[frame_index]);
2188 output_[frame_index] = output_frame;
2189
2190 // The top address of the frame is computed from the previous frame's top and
2191 // this frame's size.
2192 const intptr_t top_address =
2193 output_[frame_index - 1]->GetTop() - output_frame_size;
2194 output_frame->SetTop(top_address);
2195
2196 ReadOnlyRoots roots(isolate());
2197 for (int i = 0; i < ArgumentPaddingSlots(parameters_count); ++i) {
2198 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2199 }
2200
2201 // The allocated receiver of a construct stub frame is passed as the
2202 // receiver parameter through the translation. It might be encoding
2203 // a captured object, so we need save it for later.
2204 TranslatedFrame::iterator receiver_iterator = value_iterator;
2205
2206 // Compute the incoming parameter translation.
2207 frame_writer.PushStackJSArguments(value_iterator, parameters_count);
2208
2209 DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
2210 frame_writer.top_offset());
2211
2212 // Read caller's PC from the previous frame.
2213 const intptr_t caller_pc = output_[frame_index - 1]->GetPc();
2214 frame_writer.PushApprovedCallerPc(caller_pc);
2215
2216 // Read caller's FP from the previous frame, and set this frame's FP.
2217 const intptr_t caller_fp = output_[frame_index - 1]->GetFp();
2218 frame_writer.PushCallerFp(caller_fp);
2219
2220 const intptr_t fp_value = top_address + frame_writer.top_offset();
2221 output_frame->SetFp(fp_value);
2222 if (is_topmost) {
2224 output_frame->SetRegister(fp_reg.code(), fp_value);
2225 }
2226
2228 // Read the caller's constant pool from the previous frame.
2229 const intptr_t caller_cp = output_[frame_index - 1]->GetConstantPool();
2230 frame_writer.PushCallerConstantPool(caller_cp);
2231 }
2232
2233 // A marker value is used to mark the frame.
2234 intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
2235 frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n");
2236
2237 frame_writer.PushTranslatedValue(value_iterator++, "context");
2238
2239 // Number of incoming arguments.
2240 const uint32_t argc = parameters_count;
2241 frame_writer.PushRawValue(argc, "argc\n");
2242
2243 // The constructor function was mentioned explicitly in the
2244 // CONSTRUCT_STUB_FRAME.
2245 frame_writer.PushTranslatedValue(function_iterator, "constructor function\n");
2246
2247 // The deopt info contains the implicit receiver or the new target at the
2248 // position of the receiver. Copy it to the top of stack, with the hole value
2249 // as padding to maintain alignment.
2250 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2251 frame_writer.PushTranslatedValue(receiver_iterator, "new target\n");
2252
2253 if (is_topmost) {
2254 for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
2255 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2256 }
2257 // Ensure the result is restored back when we return to the stub.
2258 Register result_reg = kReturnRegister0;
2259 intptr_t result = input_->GetRegister(result_reg.code());
2260 frame_writer.PushRawValue(result, "subcall result\n");
2261 }
2262
2263 CHECK_EQ(translated_frame->end(), value_iterator);
2264 CHECK_EQ(0u, frame_writer.top_offset());
2265
2266 // Compute this frame's PC.
2267 Tagged<Code> construct_stub =
2268 isolate_->builtins()->code(Builtin::kJSConstructStubGeneric);
2269 Address start = construct_stub->instruction_start();
2270 const int pc_offset =
2271 isolate_->heap()->construct_stub_create_deopt_pc_offset().value();
2272 intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
2273 if (is_topmost) {
2274 // Only the pc of the topmost frame needs to be signed since it is
2275 // authenticated at the end of the DeoptimizationEntry builtin.
2277 isolate(), pc_value, frame_writer.frame()->GetTop()));
2278 } else {
2279 output_frame->SetPc(pc_value);
2280 }
2281
2282 // Update constant pool.
2284 intptr_t constant_pool_value =
2285 static_cast<intptr_t>(construct_stub->constant_pool());
2286 output_frame->SetConstantPool(constant_pool_value);
2287 if (is_topmost) {
2288 Register constant_pool_reg =
2290 output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
2291 }
2292 }
2293
2294 // Clear the context register. The context might be a de-materialized object
2295 // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
2296 // safety we use Tagged<Smi>(0) instead of the potential {arguments_marker}
2297 // here.
2298 if (is_topmost) {
2299 intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
2301 output_frame->SetRegister(context_reg.code(), context_value);
2302
2303 // Set the continuation for the topmost frame.
2306 isolate_->builtins()->code(Builtin::kNotifyDeoptimized);
2307 output_frame->SetContinuation(
2308 static_cast<intptr_t>(continuation->instruction_start()));
2309 }
2310}
2311
2313 TranslatedFrame* translated_frame, int frame_index) {
2314 TranslatedFrame::iterator value_iterator = translated_frame->begin();
2315 const bool is_topmost = (output_count_ - 1 == frame_index);
2316 // The construct frame could become topmost only if we inlined a constructor
2317 // call which does a tail call (otherwise the tail callee's frame would be
2318 // the topmost one). So it could only be the DeoptimizeKind::kLazy case.
2319 CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
2321 DCHECK_EQ(translated_frame->height(), 0);
2322
2323 FastConstructStubFrameInfo frame_info =
2325 const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
2327 PrintF(trace_scope()->file(),
2328 " translating construct invoke stub => variable_frame_size=%d, "
2329 "frame_size=%d\n",
2330 frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
2331 }
2332
2333 // Allocate and store the output frame description.
2334 FrameDescription* output_frame =
2335 FrameDescription::Create(output_frame_size, 0, isolate());
2336 FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
2337 DCHECK(frame_index > 0 && frame_index < output_count_);
2338 DCHECK_NULL(output_[frame_index]);
2339 output_[frame_index] = output_frame;
2340
2341 // The top address of the frame is computed from the previous frame's top and
2342 // this frame's size.
2343 const intptr_t top_address =
2344 output_[frame_index - 1]->GetTop() - output_frame_size;
2345 output_frame->SetTop(top_address);
2346
2347 // The allocated receiver of a construct stub frame is passed as the
2348 // receiver parameter through the translation. It might be encoding
2349 // a captured object, so we need save it for later.
2350 TranslatedFrame::iterator receiver_iterator = value_iterator;
2351 value_iterator++;
2352
2353 // Read caller's PC from the previous frame.
2354 const intptr_t caller_pc = output_[frame_index - 1]->GetPc();
2355 frame_writer.PushApprovedCallerPc(caller_pc);
2356
2357 // Read caller's FP from the previous frame, and set this frame's FP.
2358 const intptr_t caller_fp = output_[frame_index - 1]->GetFp();
2359 frame_writer.PushCallerFp(caller_fp);
2360
2361 const intptr_t fp_value = top_address + frame_writer.top_offset();
2362 output_frame->SetFp(fp_value);
2363 if (is_topmost) {
2365 output_frame->SetRegister(fp_reg.code(), fp_value);
2366 }
2367
2369 // Read the caller's constant pool from the previous frame.
2370 const intptr_t caller_cp = output_[frame_index - 1]->GetConstantPool();
2371 frame_writer.PushCallerConstantPool(caller_cp);
2372 }
2373 intptr_t marker = StackFrame::TypeToMarker(StackFrame::FAST_CONSTRUCT);
2374 frame_writer.PushRawValue(marker, "fast construct stub sentinel\n");
2375 frame_writer.PushTranslatedValue(value_iterator++, "context");
2376 frame_writer.PushTranslatedValue(receiver_iterator, "implicit receiver");
2377
2378 // The FastConstructFrame needs to be aligned in some architectures.
2379 ReadOnlyRoots roots(isolate());
2380 for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
2381 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2382 }
2383
2384 if (is_topmost) {
2385 for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
2386 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2387 }
2388 // Ensure the result is restored back when we return to the stub.
2389 Register result_reg = kReturnRegister0;
2390 intptr_t result = input_->GetRegister(result_reg.code());
2391 frame_writer.PushRawValue(result, "subcall result\n");
2392 }
2393
2394 CHECK_EQ(translated_frame->end(), value_iterator);
2395 CHECK_EQ(0u, frame_writer.top_offset());
2396
2397 // Compute this frame's PC.
2398 Tagged<Code> construct_stub = isolate_->builtins()->code(
2399 Builtin::kInterpreterPushArgsThenFastConstructFunction);
2400 Address start = construct_stub->instruction_start();
2401 const int pc_offset =
2402 isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value();
2403 intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
2404 if (is_topmost) {
2405 // Only the pc of the topmost frame needs to be signed since it is
2406 // authenticated at the end of the DeoptimizationEntry builtin.
2408 isolate(), pc_value, frame_writer.frame()->GetTop()));
2409 } else {
2410 output_frame->SetPc(pc_value);
2411 }
2412
2413 // Update constant pool.
2415 intptr_t constant_pool_value =
2416 static_cast<intptr_t>(construct_stub->constant_pool());
2417 output_frame->SetConstantPool(constant_pool_value);
2418 if (is_topmost) {
2419 Register constant_pool_reg =
2421 output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
2422 }
2423 }
2424
2425 // Clear the context register. The context might be a de-materialized object
2426 // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
2427 // safety we use Tagged<Smi>(0) instead of the potential {arguments_marker}
2428 // here.
2429 if (is_topmost) {
2430 intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
2432 output_frame->SetRegister(context_reg.code(), context_value);
2433
2434 // Set the continuation for the topmost frame.
2437 isolate_->builtins()->code(Builtin::kNotifyDeoptimized);
2438 output_frame->SetContinuation(
2439 static_cast<intptr_t>(continuation->instruction_start()));
2440 }
2441}
2442
2443namespace {
2444
2445bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode) {
2446 switch (mode) {
2448 return false;
2452 return true;
2453 }
2454 UNREACHABLE();
2455}
2456
2457StackFrame::Type BuiltinContinuationModeToFrameType(
2459 switch (mode) {
2461 return StackFrame::BUILTIN_CONTINUATION;
2463 return StackFrame::JAVASCRIPT_BUILTIN_CONTINUATION;
2465 return StackFrame::JAVASCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
2467 return StackFrame::JAVASCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
2468 }
2469 UNREACHABLE();
2470}
2471
2472} // namespace
2473
2475 BuiltinContinuationMode mode, bool must_handle_result) {
2476 switch (mode) {
2478 return must_handle_result ? Builtin::kContinueToCodeStubBuiltinWithResult
2479 : Builtin::kContinueToCodeStubBuiltin;
2483 return must_handle_result
2484 ? Builtin::kContinueToJavaScriptBuiltinWithResult
2485 : Builtin::kContinueToJavaScriptBuiltin;
2486 }
2487 UNREACHABLE();
2488}
2489
2490#if V8_ENABLE_WEBASSEMBLY
2491TranslatedValue Deoptimizer::TranslatedValueForWasmReturnKind(
2492 std::optional<wasm::ValueKind> wasm_call_return_kind) {
2493 if (wasm_call_return_kind) {
2494 switch (wasm_call_return_kind.value()) {
2495 case wasm::kI32:
2498 static_cast<int32_t>(input_->GetRegister(kReturnRegister0.code())));
2499 case wasm::kI64:
2502 static_cast<int64_t>(input_->GetRegister(kReturnRegister0.code())));
2503 case wasm::kF32:
2506 Float32(*reinterpret_cast<float*>(
2508 .get_bits_address())));
2509 case wasm::kF64:
2513 case wasm::kRefNull:
2514 case wasm::kRef:
2518 default:
2519 UNREACHABLE();
2520 }
2521 }
2523 ReadOnlyRoots(isolate()).undefined_value());
2524}
2525#endif // V8_ENABLE_WEBASSEMBLY
2526
2527// BuiltinContinuationFrames capture the machine state that is expected as input
2528// to a builtin, including both input register values and stack parameters. When
2529// the frame is reactivated (i.e. the frame below it returns), a
2530// ContinueToBuiltin stub restores the register state from the frame and tail
2531// calls to the actual target builtin, making it appear that the stub had been
2532// directly called by the frame above it. The input values to populate the frame
2533// are taken from the deopt's FrameState.
2534//
2535// Frame translation happens in two modes, EAGER and LAZY. In EAGER mode, all of
2536// the parameters to the Builtin are explicitly specified in the TurboFan
2537// FrameState node. In LAZY mode, there is always one fewer parameters specified
2538// in the FrameState than expected by the Builtin. In that case, construction of
2539// BuiltinContinuationFrame adds the final missing parameter during
2540// deoptimization, and that parameter is always on the stack and contains the
2541// value returned from the callee of the call site triggering the LAZY deopt
2542// (e.g. rax on x64). This requires that continuation Builtins for LAZY deopts
2543// must have at least one stack parameter.
2544//
2545// TO
2546// | .... |
2547// +-------------------------+
2548// | arg padding (arch dept) |<- at most 1*kSystemPointerSize
2549// +-------------------------+
2550// | builtin param 0 |<- FrameState input value n becomes
2551// +-------------------------+
2552// | ... |
2553// +-------------------------+
2554// | builtin param m |<- FrameState input value n+m-1, or in
2555// +-----needs-alignment-----+ the LAZY case, return LAZY result value
2556// | ContinueToBuiltin entry |
2557// +-------------------------+
2558// | | saved frame (FP) |
2559// | +=====needs=alignment=====+<- fpreg
2560// | |constant pool (if ool_cp)|
2561// v +-------------------------+
2562// |BUILTIN_CONTINUATION mark|
2563// +-------------------------+
2564// | JSFunction (or zero) |<- only if JavaScript builtin
2565// +-------------------------+
2566// | frame height above FP |
2567// +-------------------------+
2568// | context |<- this non-standard context slot contains
2569// +-------------------------+ the context, even for non-JS builtins.
2570// | builtin index |
2571// +-------------------------+
2572// | builtin input GPR reg0 |<- populated from deopt FrameState using
2573// +-------------------------+ the builtin's CallInterfaceDescriptor
2574// | ... | to map a FrameState's 0..n-1 inputs to
2575// +-------------------------+ the builtin's n input register params.
2576// | builtin input GPR regn |
2577// +-------------------------+
2578// | reg padding (arch dept) |
2579// +-----needs--alignment----+
2580// | res padding (arch dept) |<- only if {is_topmost}; result is pop'd by
2581// +-------------------------+<- kNotifyDeopt ASM stub and moved to acc
2582// | result value |<- reg, as ContinueToBuiltin stub expects.
2583// +-----needs-alignment-----+<- spreg
2584//
2586 TranslatedFrame* translated_frame, int frame_index,
2588 TranslatedFrame::iterator result_iterator = translated_frame->end();
2589
2590 bool is_js_to_wasm_builtin_continuation = false;
2591#if V8_ENABLE_WEBASSEMBLY
2592 is_js_to_wasm_builtin_continuation =
2593 translated_frame->kind() == TranslatedFrame::kJSToWasmBuiltinContinuation;
2594 if (is_js_to_wasm_builtin_continuation) {
2595 // For JSToWasmBuiltinContinuations, add a TranslatedValue with the result
2596 // of the Wasm call, extracted from the input FrameDescription.
2597 // This TranslatedValue will be written in the output frame in place of the
2598 // hole and we'll use ContinueToCodeStubBuiltin in place of
2599 // ContinueToCodeStubBuiltinWithResult.
2600 TranslatedValue result = TranslatedValueForWasmReturnKind(
2601 translated_frame->wasm_call_return_kind());
2602 translated_frame->Add(result);
2603 }
2604#endif // V8_ENABLE_WEBASSEMBLY
2605
2606 TranslatedFrame::iterator value_iterator = translated_frame->begin();
2607
2608 const BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
2609 Builtin builtin = Builtins::GetBuiltinFromBytecodeOffset(bytecode_offset);
2610 CallInterfaceDescriptor continuation_descriptor =
2612
2614
2615 const bool is_bottommost = (0 == frame_index);
2616 const bool is_topmost = (output_count_ - 1 == frame_index);
2617
2618 const int parameters_count = translated_frame->height();
2619 BuiltinContinuationFrameInfo frame_info =
2621 continuation_descriptor, config,
2622 is_topmost, deopt_kind_, mode);
2623
2624 const unsigned output_frame_size = frame_info.frame_size_in_bytes();
2625 const unsigned output_frame_size_above_fp =
2626 frame_info.frame_size_in_bytes_above_fp();
2627
2628 // Validate types of parameters. They must all be tagged except for argc and
2629 // the dispatch handle for JS builtins.
2630 bool has_argc = false;
2631 const int register_parameter_count =
2632 continuation_descriptor.GetRegisterParameterCount();
2633 for (int i = 0; i < register_parameter_count; ++i) {
2634 MachineType type = continuation_descriptor.GetParameterType(i);
2635 int code = continuation_descriptor.GetRegisterParameter(i).code();
2636 // Only tagged and int32 arguments are supported, and int32 only for the
2637 // arguments count and dispatch handle on JavaScript builtins.
2638 if (type == MachineType::Int32()) {
2641 has_argc = true;
2642 } else {
2643 // Any other argument must be a tagged value.
2644 CHECK(IsAnyTagged(type.representation()));
2645 }
2646 }
2647 CHECK_EQ(BuiltinContinuationModeIsJavaScript(mode), has_argc);
2648
2650 PrintF(trace_scope()->file(),
2651 " translating BuiltinContinuation to %s,"
2652 " => register_param_count=%d,"
2653 " stack_param_count=%d, frame_size=%d\n",
2654 Builtins::name(builtin), register_parameter_count,
2655 frame_info.stack_parameter_count(), output_frame_size);
2656 }
2657
2659 output_frame_size, frame_info.stack_parameter_count(), isolate());
2660 output_[frame_index] = output_frame;
2661 FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
2662
2663 // The top address of the frame is computed from the previous frame's top and
2664 // this frame's size.
2665 const intptr_t top_address =
2666 is_bottommost ? caller_frame_top_ - output_frame_size
2667 : output_[frame_index - 1]->GetTop() - output_frame_size;
2668 output_frame->SetTop(top_address);
2669
2670 // Get the possible JSFunction for the case that this is a
2671 // JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer
2672 // like a normal JavaScriptFrame.
2673 const intptr_t maybe_function = value_iterator->GetRawValue().ptr();
2674 ++value_iterator;
2675
2676 ReadOnlyRoots roots(isolate());
2677 const int padding = ArgumentPaddingSlots(frame_info.stack_parameter_count());
2678 for (int i = 0; i < padding; ++i) {
2679 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2680 }
2681
2682 if (mode == BuiltinContinuationMode::STUB) {
2683 DCHECK_EQ(continuation_descriptor.GetStackArgumentOrder(),
2685 for (uint32_t i = 0; i < frame_info.translated_stack_parameter_count();
2686 ++i, ++value_iterator) {
2687 frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
2688 }
2689 if (frame_info.frame_has_result_stack_slot()) {
2690 if (is_js_to_wasm_builtin_continuation) {
2691 frame_writer.PushTranslatedValue(result_iterator,
2692 "return result on lazy deopt\n");
2693 } else {
2694 DCHECK_EQ(result_iterator, translated_frame->end());
2695 frame_writer.PushRawObject(
2696 roots.the_hole_value(),
2697 "placeholder for return result on lazy deopt\n");
2698 }
2699 }
2700 } else {
2701 // JavaScript builtin.
2702 if (frame_info.frame_has_result_stack_slot()) {
2703 frame_writer.PushRawObject(
2704 roots.the_hole_value(),
2705 "placeholder for return result on lazy deopt\n");
2706 }
2707 switch (mode) {
2709 UNREACHABLE();
2711 break;
2713 frame_writer.PushRawObject(roots.the_hole_value(),
2714 "placeholder for exception on lazy deopt\n");
2715 } break;
2717 intptr_t accumulator_value =
2719 frame_writer.PushRawObject(Tagged<Object>(accumulator_value),
2720 "exception (from accumulator)\n");
2721 } break;
2722 }
2723 frame_writer.PushStackJSArguments(
2724 value_iterator, frame_info.translated_stack_parameter_count());
2725 }
2726
2727 DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
2728 frame_writer.top_offset());
2729
2730 std::vector<TranslatedFrame::iterator> register_values;
2731 int total_registers = config->num_general_registers();
2732 register_values.resize(total_registers, {value_iterator});
2733
2734 for (int i = 0; i < register_parameter_count; ++i, ++value_iterator) {
2735 int code = continuation_descriptor.GetRegisterParameter(i).code();
2736 register_values[code] = value_iterator;
2737 }
2738
2739 // The context register is always implicit in the CallInterfaceDescriptor but
2740 // its register must be explicitly set when continuing to the builtin. Make
2741 // sure that it's harvested from the translation and copied into the register
2742 // set (it was automatically added at the end of the FrameState by the
2743 // instruction selector).
2744 Tagged<Object> context = value_iterator->GetRawValue();
2745 const intptr_t value = context.ptr();
2746 TranslatedFrame::iterator context_register_value = value_iterator++;
2747 register_values[kContextRegister.code()] = context_register_value;
2748 output_frame->SetRegister(kContextRegister.code(), value);
2749
2750 // Set caller's PC (JSFunction continuation).
2751 if (is_bottommost) {
2752 frame_writer.PushBottommostCallerPc(caller_pc_);
2753 } else {
2754 frame_writer.PushApprovedCallerPc(output_[frame_index - 1]->GetPc());
2755 }
2756
2757 // Read caller's FP from the previous frame, and set this frame's FP.
2758 const intptr_t caller_fp =
2759 is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp();
2760 frame_writer.PushCallerFp(caller_fp);
2761
2762 const intptr_t fp_value = top_address + frame_writer.top_offset();
2763 output_frame->SetFp(fp_value);
2764
2765 DCHECK_EQ(output_frame_size_above_fp, frame_writer.top_offset());
2766
2768 // Read the caller's constant pool from the previous frame.
2769 const intptr_t caller_cp =
2770 is_bottommost ? caller_constant_pool_
2771 : output_[frame_index - 1]->GetConstantPool();
2772 frame_writer.PushCallerConstantPool(caller_cp);
2773 }
2774
2775 // A marker value is used in place of the context.
2776 const intptr_t marker =
2777 StackFrame::TypeToMarker(BuiltinContinuationModeToFrameType(mode));
2778 frame_writer.PushRawValue(marker,
2779 "context (builtin continuation sentinel)\n");
2780
2781 if (BuiltinContinuationModeIsJavaScript(mode)) {
2782 frame_writer.PushRawValue(maybe_function, "JSFunction\n");
2783 } else {
2784 frame_writer.PushRawValue(0, "unused\n");
2785 }
2786
2787 // The delta from the SP to the FP; used to reconstruct SP in
2788 // Isolate::UnwindAndFindHandler.
2789 frame_writer.PushRawObject(Smi::FromInt(output_frame_size_above_fp),
2790 "frame height at deoptimization\n");
2791
2792 // The context even if this is a stub continuation frame. We can't use the
2793 // usual context slot, because we must store the frame marker there.
2794 frame_writer.PushTranslatedValue(context_register_value,
2795 "builtin JavaScript context\n");
2796
2797 // The builtin to continue to.
2798 frame_writer.PushRawObject(Smi::FromInt(static_cast<int>(builtin)),
2799 "builtin index\n");
2800
2801 const int allocatable_register_count =
2803 for (int i = 0; i < allocatable_register_count; ++i) {
2804 int code = config->GetAllocatableGeneralCode(i);
2805 base::ScopedVector<char> str(128);
2807 if (BuiltinContinuationModeIsJavaScript(mode) &&
2809 SNPrintF(
2810 str,
2811 "tagged argument count %s (will be untagged by continuation)\n",
2812 RegisterName(Register::from_code(code)));
2813 } else {
2814 SNPrintF(str, "builtin register argument %s\n",
2815 RegisterName(Register::from_code(code)));
2816 }
2817 }
2818 frame_writer.PushTranslatedValue(
2819 register_values[code], verbose_tracing_enabled() ? str.begin() : "");
2820 }
2821
2822 // Some architectures must pad the stack frame with extra stack slots
2823 // to ensure the stack frame is aligned.
2824 const int padding_slot_count =
2826 allocatable_register_count);
2827 for (int i = 0; i < padding_slot_count; ++i) {
2828 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2829 }
2830
2831 if (is_topmost) {
2832 for (int i = 0; i < ArgumentPaddingSlots(1); ++i) {
2833 frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
2834 }
2835
2836 // Ensure the result is restored back when we return to the stub.
2837 // For JS-to-Wasm builtin continuations the returns are handled differently
2838 // and we can't push untagged return values onto the stack as they would be
2839 // visited during a GC and treated as tagged stack slots.
2840 if (frame_info.frame_has_result_stack_slot() &&
2841 !is_js_to_wasm_builtin_continuation) {
2842 Register result_reg = kReturnRegister0;
2843 frame_writer.PushRawValue(input_->GetRegister(result_reg.code()),
2844 "callback result\n");
2845 } else {
2846 frame_writer.PushRawObject(roots.undefined_value(), "callback result\n");
2847 }
2848 }
2849
2850 CHECK_EQ(result_iterator, value_iterator);
2851 CHECK_EQ(0u, frame_writer.top_offset());
2852
2853 // Clear the context register. The context might be a de-materialized object
2854 // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
2855 // safety we use Tagged<Smi>(0) instead of the potential {arguments_marker}
2856 // here.
2857 if (is_topmost) {
2858 intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
2860 output_frame->SetRegister(context_reg.code(), context_value);
2861 }
2862
2863 // Ensure the frame pointer register points to the callee's frame. The builtin
2864 // will build its own frame once we continue to it.
2866 output_frame->SetRegister(fp_reg.code(), fp_value);
2867 // For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
2868 // ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
2869 // return value that we have already set.
2870 Tagged<Code> continue_to_builtin =
2872 mode, frame_info.frame_has_result_stack_slot() &&
2873 !is_js_to_wasm_builtin_continuation));
2874 intptr_t pc =
2875 static_cast<intptr_t>(continue_to_builtin->instruction_start()) +
2876 isolate()->heap()->deopt_pc_offset_after_adapt_shadow_stack().value();
2877 if (is_topmost) {
2878 // Only the pc of the topmost frame needs to be signed since it is
2879 // authenticated at the end of the DeoptimizationEntry builtin.
2880 const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
2881 isolate(), pc, frame_writer.frame()->GetTop());
2882 output_frame->SetPc(top_most_pc);
2883 } else {
2884 output_frame->SetPc(pc);
2885 }
2886
2888 isolate()->builtins()->code(Builtin::kNotifyDeoptimized);
2889 output_frame->SetContinuation(
2890 static_cast<intptr_t>(continuation->instruction_start()));
2891}
2892
2895 if (v8_flags.deopt_every_n_times > 0) {
2896 // Doing a GC here will find problems with the deoptimized frames.
2899 }
2900
2901 for (auto& materialization : values_to_materialize_) {
2902 DirectHandle<Object> value = materialization.value_->GetValue();
2903
2905 PrintF(trace_scope()->file(),
2906 "Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ",
2907 static_cast<intptr_t>(materialization.output_slot_address_),
2908 (*value).ptr());
2909 ShortPrint(*value, trace_scope()->file());
2910 PrintF(trace_scope()->file(), "\n");
2911 }
2912
2913 *(reinterpret_cast<Address*>(materialization.output_slot_address_)) =
2914 (*value).ptr();
2915 }
2916
2917 for (auto& fbv_materialization : feedback_vector_to_materialize_) {
2918 DirectHandle<Object> closure = fbv_materialization.value_->GetValue();
2919 DCHECK(IsJSFunction(*closure));
2920 Tagged<Object> feedback_vector =
2921 Cast<JSFunction>(*closure)->raw_feedback_cell()->value();
2922 CHECK(IsFeedbackVector(feedback_vector));
2923 *(reinterpret_cast<Address*>(fbv_materialization.output_slot_address_)) =
2924 feedback_vector.ptr();
2925 }
2926
2928
2929 bool feedback_updated = translated_state_.DoUpdateFeedback();
2930 if (verbose_tracing_enabled() && feedback_updated) {
2931 FILE* file = trace_scope()->file();
2933 PrintF(file, "Feedback updated from deoptimization at ");
2934 OFStream outstr(file);
2935 info.position.Print(outstr, compiled_code_);
2936 PrintF(file, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
2937 }
2938
2940 static_cast<Address>(stack_fp_));
2941}
2942
2944 Address output_address, Tagged<Object> obj,
2945 const TranslatedFrame::iterator& iterator) {
2946 if (obj == ReadOnlyRoots(isolate_).arguments_marker()) {
2947 values_to_materialize_.push_back({output_address, iterator});
2948 }
2949}
2950
2952 Address output_address, const TranslatedFrame::iterator& iterator) {
2953 feedback_vector_to_materialize_.push_back({output_address, iterator});
2954}
2955
2958 IF_WASM(DCHECK_IMPLIES, function_.is_null(), v8_flags.wasm_deopt);
2959 DCHECK_IMPLIES(function_.is_null(), compiled_code_->parameter_count() == 0);
2961 return fixed_size;
2962}
2963
2964namespace {
2965
2966// Get the actual deopt call PC from the return address of the deopt, which
2967// points to immediately after the deopt call).
2968//
2969// See also the Deoptimizer constructor.
2970Address GetDeoptCallPCFromReturnPC(Address return_pc, Tagged<Code> code) {
2973 Tagged<DeoptimizationData> deopt_data =
2974 Cast<DeoptimizationData>(code->deoptimization_data());
2975 Address deopt_start =
2976 code->instruction_start() + deopt_data->DeoptExitStart().value();
2977 int eager_deopt_count = deopt_data->EagerDeoptCount().value();
2978 Address lazy_deopt_start =
2979 deopt_start + eager_deopt_count * Deoptimizer::kEagerDeoptExitSize;
2980 // The deoptimization exits are sorted so that lazy deopt exits appear
2981 // after eager deopts.
2982 static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
2983 static_cast<int>(kLastDeoptimizeKind),
2984 "lazy deopts are expected to be emitted last");
2985 if (return_pc <= lazy_deopt_start) {
2986 return return_pc - Deoptimizer::kEagerDeoptExitSize;
2987 } else {
2988 return return_pc - Deoptimizer::kLazyDeoptExitSize;
2989 }
2990}
2991
2992} // namespace
2993
2995 // The fp-to-sp delta already takes the context, constant pool pointer and the
2996 // function into account so we have to avoid double counting them.
2997 unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
2998 unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
3000 unsigned stack_slots = compiled_code_->stack_slots();
3001 if (compiled_code_->is_maglevved() && !deoptimizing_throw_) {
3002 // Maglev code can deopt in deferred code which has spilled registers across
3003 // the call. These will be included in the fp_to_sp_delta, but the expected
3004 // frame size won't include them, so we need to check for less-equal rather
3005 // than equal. For deoptimizing throws, these will have already been trimmed
3006 // off.
3007 CHECK_LE(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
3009 result);
3010 // With slow asserts we can check this exactly, by looking up the safepoint.
3011 if (v8_flags.enable_slow_asserts) {
3012 Address deopt_call_pc = GetDeoptCallPCFromReturnPC(from_, compiled_code_);
3013 MaglevSafepointTable table(isolate_, deopt_call_pc, compiled_code_);
3014 MaglevSafepointEntry safepoint = table.FindEntry(deopt_call_pc);
3015 unsigned extra_spills = safepoint.num_extra_spill_slots();
3016 CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
3018 extra_spills * kSystemPointerSize,
3019 result);
3020 }
3021 } else {
3022 unsigned outgoing_size = 0;
3023 CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
3025 result);
3026 }
3027 return result;
3028}
3029
3030// static
3032 int parameter_slots = code->parameter_count();
3033 return parameter_slots * kSystemPointerSize;
3034}
3035
3037 Address pc) {
3038 CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
3039 SourcePosition last_position = SourcePosition::Unknown();
3040 DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
3041 uint32_t last_node_id = 0;
3042 int last_deopt_id = kNoDeoptimizationId;
3048 for (RelocIterator it(code, mask); !it.done(); it.next()) {
3049 RelocInfo* info = it.rinfo();
3050 if (info->pc() >= pc) break;
3051 if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
3052 int script_offset = static_cast<int>(info->data());
3053 it.next();
3054 DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
3055 int inlining_id = static_cast<int>(it.rinfo()->data());
3056 last_position = SourcePosition(script_offset, inlining_id);
3057 } else if (info->rmode() == RelocInfo::DEOPT_ID) {
3058 last_deopt_id = static_cast<int>(info->data());
3059 } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
3060 last_reason = static_cast<DeoptimizeReason>(info->data());
3061 } else if (info->rmode() == RelocInfo::DEOPT_NODE_ID) {
3062 last_node_id = static_cast<uint32_t>(info->data());
3063 }
3064 }
3065 return DeoptInfo(last_position, last_reason, last_node_id, last_deopt_id);
3066}
3067
3068} // namespace internal
3069} // namespace v8
Isolate * isolate_
#define DISALLOW_GARBAGE_COLLECTION(name)
int16_t parameter_count
Definition builtins.cc:67
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
#define SBXCHECK_GE(lhs, rhs)
Definition check.h:65
constexpr T * begin() const
Definition vector.h:96
uint32_t translated_stack_parameter_count() const
Definition frames.h:2026
static BuiltinContinuationFrameInfo Precise(int translation_height, const CallInterfaceDescriptor &continuation_descriptor, const RegisterConfiguration *register_config, bool is_topmost, DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode)
Definition frames.h:1994
V8_EXPORT_PRIVATE Tagged< Code > code(Builtin builtin)
Definition builtins.cc:149
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static Builtin GetBuiltinFromBytecodeOffset(BytecodeOffset)
Definition builtins.cc:104
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition builtins.cc:226
constexpr bool IsNone() const
Definition utils.h:684
constexpr int ToInt() const
Definition utils.h:673
StackArgumentOrder GetStackArgumentOrder() const
MachineType GetParameterType(int index) const
static constexpr int kConstantPoolOffset
static constexpr int kFixedFrameSizeAboveFp
uint32_t frame_size_in_bytes() const
Definition frames.h:1955
static ConstructStubFrameInfo Precise(int translation_height, bool is_topmost)
Definition frames.h:1943
uint32_t frame_size_in_bytes_without_fixed() const
Definition frames.h:1952
unsigned ComputeInputFrameSize() const
bool is_restart_frame() const
void DoComputeInlinedExtraArguments(TranslatedFrame *translated_frame, int frame_index)
void DoComputeConstructInvokeStubFrame(TranslatedFrame *translated_frame, int frame_index)
void DoComputeUnoptimizedFrame(TranslatedFrame *translated_frame, int frame_index, bool goto_catch_handler)
DirectHandle< JSFunction > function() const
void QueueFeedbackVectorForMaterialization(Address output_address, const TranslatedFrame::iterator &iterator)
friend class FrameDescription
std::vector< ValueToMaterialize > feedback_vector_to_materialize_
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static void TraceDeoptAll(Isolate *isolate)
void TraceDeoptBegin(int optimization_id, BytecodeOffset bytecode_offset)
DirectHandle< Code > compiled_code() const
Tagged< Code > compiled_code_
static V8_EXPORT_PRIVATE const int kAdaptShadowStackOffsetToSubtract
void TraceDeoptEnd(double deopt_duration)
static bool DeoptExitIsInsideOsrLoop(Isolate *isolate, Tagged< JSFunction > function, BytecodeOffset deopt_exit_offset, BytecodeOffset osr_offset)
unsigned ComputeInputFrameAboveFpFixedSize() const
static Builtin TrampolineForBuiltinContinuation(BuiltinContinuationMode mode, bool must_handle_result)
CodeTracer::Scope * verbose_trace_scope() const
CodeTracer::Scope *const trace_scope_
static V8_EXPORT_PRIVATE Builtin GetDeoptimizationEntry(DeoptimizeKind kind)
static unsigned ComputeIncomingArgumentSize(Tagged< Code > code)
static void TraceEvictFromOptimizedCodeCache(Isolate *isolate, Tagged< SharedFunctionInfo > sfi, const char *reason)
static constexpr unsigned kFixedExitSizeMarker
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
Isolate * isolate() const
TranslatedState translated_state_
static void TraceMarkForDeoptimization(Isolate *isolate, Tagged< Code > code, LazyDeoptimizeReason reason)
DeoptimizeKind deopt_kind_
CodeTracer::Scope * trace_scope() const
void DoComputeConstructCreateStubFrame(TranslatedFrame *translated_frame, int frame_index)
FrameDescription ** output_
FrameDescription * input_
bool verbose_tracing_enabled() const
BytecodeOffset bytecode_offset_in_outermost_frame_
void DoComputeBuiltinContinuation(TranslatedFrame *translated_frame, int frame_index, BuiltinContinuationMode mode)
DeoptInfo GetDeoptInfo() const
Definition deoptimizer.h:64
void QueueValueForMaterialization(Address output_address, Tagged< Object > obj, const TranslatedFrame::iterator &iterator)
std::vector< ValueToMaterialize > values_to_materialize_
static const char * MessageFor(DeoptimizeKind kind)
Tagged< JSFunction > function_
uint32_t frame_size_in_bytes_without_fixed() const
Definition frames.h:1976
static FastConstructStubFrameInfo Precise(bool is_topmost)
Definition frames.h:1967
uint64_t * get_bits_address()
Definition boxed-float.h:91
unsigned GetLastArgumentSlotOffset(bool pad_arguments=true)
void SetFp(intptr_t frame_pointer)
void SetConstantPool(intptr_t constant_pool)
Float64 GetDoubleRegister(unsigned n) const
static FrameDescription * Create(uint32_t frame_size, int parameter_count, Isolate *isolate)
intptr_t GetFrameSlot(unsigned offset)
void SetRegister(unsigned n, intptr_t value)
intptr_t GetRegister(unsigned n) const
void PushBottommostCallerPc(intptr_t pc)
void PushValue(intptr_t value)
void PushRawValue(intptr_t value, const char *debug_hint)
void PushStackJSArguments(TranslatedFrame::iterator &iterator, int parameters_count)
void PushCallerConstantPool(intptr_t cp)
void DebugPrintOutputPc(intptr_t value, const char *debug_hint="")
void PushFeedbackVectorForMaterialization(const TranslatedFrame::iterator &iterator)
void PushApprovedCallerPc(intptr_t pc)
void PushRawObject(Tagged< Object > obj, const char *debug_hint)
FrameWriter(Deoptimizer *deoptimizer, FrameDescription *frame, CodeTracer::Scope *trace_scope)
void PushTranslatedValue(const TranslatedFrame::iterator &iterator, const char *debug_hint="")
Address output_address(unsigned output_offset)
unsigned top_offset() const
FrameDescription * frame_
void DebugPrintOutputObject(Tagged< Object > obj, unsigned output_offset, const char *debug_hint="")
void DebugPrintOutputValue(intptr_t value, const char *debug_hint="")
void PushCallerFp(intptr_t fp)
FrameDescription * frame()
CodeTracer::Scope *const trace_scope_
static const int kNoHandlerFound
static constexpr int kHeaderSize
V8_EXPORT_PRIVATE Tagged< Code > FindCodeForInnerPointer(Address inner_pointer)
Definition heap.cc:7153
V8_EXPORT_PRIVATE void CollectAllGarbage(GCFlags gc_flags, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition heap.cc:1258
MaterializedObjectStore * materialized_object_store() const
Definition isolate.h:1385
Counters * counters()
Definition isolate.h:1180
StackGuard * stack_guard()
Definition isolate.h:1198
Builtins * builtins()
Definition isolate.h:1443
Tagged< JSFunction > function() const override
Definition frames.cc:2492
static Register constant_pool_pointer_register()
static constexpr MachineType Int32()
static V8_INLINE Address SignAndCheckPC(Isolate *isolate, Address pc, Address sp)
static V8_INLINE Address StripPAC(Address pc)
static V8_INLINE Address MoveSignedPC(Isolate *isolate, Address pc, Address new_sp, Address old_sp)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
void SetRegister(unsigned n, intptr_t value)
static constexpr Register from_code(int code)
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static BlockAccessScope MaybeBlockAccess()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static SourcePosition Unknown()
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
Address fp() const
Definition frames.h:297
bool is_optimized_js() const
Definition frames.h:233
V8_INLINE constexpr StorageType ptr() const
V8_INLINE constexpr bool is_null() const
Definition tagged.h:502
BytecodeOffset bytecode_offset() const
void Add(const TranslatedValue &value)
Tagged< BytecodeArray > raw_bytecode_array() const
Tagged< SharedFunctionInfo > raw_shared_info() const
std::vector< TranslatedFrame > & frames()
void Init(Isolate *isolate, Address input_frame_pointer, Address stack_frame_pointer, DeoptTranslationIterator *iterator, Tagged< ProtectedDeoptimizationLiteralArray > protected_literal_array, const DeoptimizationLiteralProvider &literal_array, RegisterValues *registers, FILE *trace_file, int parameter_count, int actual_argument_count)
std::vector< TranslatedFrame >::iterator iterator
void Prepare(Address stack_frame_pointer)
static TranslatedValue NewDouble(TranslatedState *container, Float64 value)
static TranslatedValue NewInt64ToBigInt(TranslatedState *container, int64_t value)
static TranslatedValue NewInt32(TranslatedState *container, int32_t value)
static TranslatedValue NewFloat(TranslatedState *container, Float32 value)
static TranslatedValue NewTagged(TranslatedState *container, Tagged< Object > literal)
static constexpr int kFrameTypeOffset
uint32_t register_stack_slot_count() const
Definition frames.h:1923
static UnoptimizedFrameInfo Precise(int parameters_count_with_receiver, int translation_height, bool is_topmost, bool pad_arguments)
Definition frames.h:1908
uint32_t frame_size_in_bytes() const
Definition frames.h:1929
uint32_t frame_size_in_bytes_without_fixed() const
Definition frames.h:1926
static bool IsValidOffset(Handle< BytecodeArray > bytecode_array, int offset)
std::vector< WasmCode * > PublishCode(base::Vector< UnpublishedWasmCode > unpublished_code)
CompilationState * compilation_state() const
base::Vector< const uint8_t > wire_bytes() const
V8_WARN_UNUSED_RESULT UnpublishedWasmCode AddCompiledCode(WasmCompilationResult &)
WasmCode * LookupCode(Isolate *isolate, Address pc) const
const WasmDeoptData & GetDeoptData() const
#define PROFILE(the_isolate, Call)
Definition code-events.h:59
Zone * zone_
XMMRegister const input_
Handle< Code > code
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
const JSFunctionRef function_
int start
uint32_t count
Handle< SharedFunctionInfo > info
WasmFrame *const frame_
#define DEOPTIMIZATION_HELPER_BUILTINS(V)
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
#define CHECK_BUILTIN(builtin, offset)
std::unique_ptr< SafepointScope > safepoint_scope_
std::unique_ptr< ObjectIterator > object_iterator_
Isolate *const isolate_
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
MovableLabel continuation
int pc_offset
Comparator::Output * output_
uint32_t const mask
base::SmallVector< int32_t, 1 > stack_slots
STL namespace.
static V ReadUnalignedValue(Address p)
Definition memory.h:28
T & Memory(Address addr)
Definition memory.h:18
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
uintptr_t Address
Definition memory.h:13
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
FloatWithBits< 32 > Float32
Definition index.h:233
WordWithBits< 128 > Simd128
Definition index.h:236
constexpr DoubleRegister kFpReturnRegisters[]
WasmCodeManager * GetWasmCodeManager()
const wasm::FunctionSig * GetI32Sig(Zone *zone, const wasm::FunctionSig *sig)
WasmEngine * GetWasmEngine()
int declared_function_index(const WasmModule *module, int func_index)
void IterateSignatureImpl(const SigType *sig, bool extra_callable_param, ResultCollector &locations, int *untagged_parameter_slots, int *total_parameter_slots, int *untagged_return_slots, int *total_return_slots)
Signature< ValueType > FunctionSig
constexpr Register no_reg
constexpr Register kRootRegister
PerThreadAssertScopeDebugOnly< false, SAFEPOINTS_ASSERT, HEAP_ALLOCATION_ASSERT > DisallowGarbageCollection
DwVfpRegister DoubleRegister
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr int kPCOnStackSize
Definition globals.h:412
Tagged(T object) -> Tagged< T >
char const * DeoptimizeReasonToString(DeoptimizeReason reason)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
BuiltinContinuationMode
Definition frames.h:1899
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kInterpreterAccumulatorRegister
kStaticElementsTemplateOffset kInstancePropertiesTemplateOffset Tagged< FixedArray >
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kFPOnStackSize
Definition globals.h:413
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister1
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr Register kReturnRegister0
void ShortPrint(Tagged< Object > obj, FILE *out)
Definition objects.cc:1865
constexpr DeoptimizeKind kLastDeoptimizeKind
Definition globals.h:874
constexpr Register kContextRegister
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
constexpr Register kPtrComprCageBaseRegister
constexpr bool Is64()
constexpr bool kAllCodeObjectsLiveInTrustedSpace
constexpr int ArgumentPaddingSlots(int argument_count)
static constexpr Address kNullAddress
Definition v8-internal.h:53
constexpr Register cp
JSArrayBuffer::IsDetachableBit is_shared
constexpr int kNoDeoptimizationId
Definition globals.h:861
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr bool CodeKindCanDeoptimize(CodeKind kind)
Definition code-kind.h:83
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define RCS_SCOPE(...)
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_GT(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define CHECK_WITH_MSG(condition, message)
Definition logging.h:118
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define IF_WASM(V,...)
Definition macros.h:472
#define V8PRIxPTR
Definition macros.h:331
#define V8PRIxPTR_FMT
Definition macros.h:340
const DeoptimizeReason deopt_reason
Definition deoptimizer.h:47
static CompilationEnv ForModule(const NativeModule *native_module)
Symbol file
#define TRACE_EVENT0(category_group, name)