v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-generator.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
9#include "src/base/bounds.h"
10#include "src/base/iterator.h"
22#include "src/logging/log.h"
24#include "src/objects/smi.h"
26#include "src/utils/utils.h"
27
28#if V8_ENABLE_WEBASSEMBLY
30#endif
31
32namespace v8 {
33namespace internal {
34namespace compiler {
35
37 public:
39 : next_(next), targets_(targets) {}
40
41 Label* label() { return &label_; }
42 JumpTable* next() const { return next_; }
43 const base::Vector<Label*>& targets() const { return targets_; }
44
45 private:
49};
50
52 InstructionSequence* instructions,
53 OptimizedCompilationInfo* info, Isolate* isolate,
54 std::optional<OsrHelper> osr_helper,
55 int start_source_position,
56 JumpOptimizationInfo* jump_opt,
57 const AssemblerOptions& options, Builtin builtin,
58 size_t max_unoptimized_frame_height,
59 size_t max_pushed_argument_count,
60 const char* debug_name)
61 : zone_(codegen_zone),
62 isolate_(isolate),
63 frame_access_state_(nullptr),
65 instructions_(instructions),
66 unwinding_info_writer_(codegen_zone),
67 info_(info),
68 labels_(codegen_zone->AllocateArray<Label>(
69 instructions->InstructionBlockCount())),
70 current_block_(RpoNumber::Invalid()),
71 start_source_position_(start_source_position),
72 current_source_position_(SourcePosition::Unknown()),
73 masm_(isolate, codegen_zone, options, CodeObjectRequired::kNo,
74 std::unique_ptr<AssemblerBuffer>{}),
75 resolver_(this),
76 safepoints_(codegen_zone),
77 handlers_(codegen_zone),
78 deoptimization_exits_(codegen_zone),
79 protected_deoptimization_literals_(codegen_zone),
80 deoptimization_literals_(codegen_zone),
81 translations_(codegen_zone),
82 max_unoptimized_frame_height_(max_unoptimized_frame_height),
83 max_pushed_argument_count_(max_pushed_argument_count),
84 caller_registers_saved_(false),
85 jump_tables_(nullptr),
86 ools_(nullptr),
87 osr_helper_(std::move(osr_helper)),
88 osr_pc_offset_(-1),
91#if V8_ENABLE_WEBASSEMBLY
92 protected_instructions_(codegen_zone),
93#endif // V8_ENABLE_WEBASSEMBLY
95 block_starts_(codegen_zone),
96 instr_starts_(codegen_zone),
97 debug_name_(debug_name) {
98 for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
99 new (&labels_[i]) Label;
100 }
101 CreateFrameAccessState(frame);
102 CHECK_EQ(info->is_osr(), osr_helper_.has_value());
103 masm_.set_jump_optimization_info(jump_opt);
104 CodeKind code_kind = info->code_kind();
105 if (code_kind == CodeKind::WASM_FUNCTION ||
106 code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
107 code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
108 code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
109 masm_.set_abort_hard(true);
110 }
111 masm_.set_builtin(builtin);
112}
113
114void CodeGenerator::RecordProtectedInstruction(uint32_t instr_offset) {
115#if V8_ENABLE_WEBASSEMBLY
116 protected_instructions_.push_back({instr_offset});
117#endif // V8_ENABLE_WEBASSEMBLY
118}
119
124
126 uint32_t* offset) {
127 DCHECK_EQ(instr->arch_opcode(), kArchStackPointerGreaterThan);
128
130 static_cast<StackCheckKind>(MiscField::decode(instr->opcode()));
131 if (kind != StackCheckKind::kJSFunctionEntry) return false;
132
133 uint32_t stack_check_offset = *offset = GetStackCheckOffset();
134 return stack_check_offset > kStackLimitSlackForDeoptimizationInBytes;
135}
136
138 if (!frame_access_state()->has_frame()) {
141 return 0;
142 }
143
144 size_t incoming_parameter_count =
146 DCHECK(is_int32(incoming_parameter_count));
147 int32_t optimized_frame_height =
148 static_cast<int32_t>(incoming_parameter_count) * kSystemPointerSize +
149 frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
151 int32_t signed_max_unoptimized_frame_height =
152 static_cast<int32_t>(max_unoptimized_frame_height_);
153
154 // The offset is either the delta between the optimized frames and the
155 // interpreted frame, or the maximal number of bytes pushed to the stack
156 // while preparing for function calls, whichever is bigger.
157 uint32_t frame_height_delta = static_cast<uint32_t>(std::max(
158 signed_max_unoptimized_frame_height - optimized_frame_height, 0));
159 uint32_t max_pushed_argument_bytes =
160 static_cast<uint32_t>(max_pushed_argument_count_ * kSystemPointerSize);
161 return std::max(frame_height_delta, max_pushed_argument_bytes);
162}
163
165 DeoptimizationExit* exit) {
166 int deoptimization_id = exit->deoptimization_id();
167 if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
169 }
170
171 DeoptimizeKind deopt_kind = exit->kind();
172 DeoptimizeReason deoptimization_reason = exit->reason();
173 Label* jump_deoptimization_entry_label =
174 &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
175 if (info()->source_positions()) {
176 masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
177 exit->pos(), deoptimization_id);
178 }
179
180 if (deopt_kind == DeoptimizeKind::kLazy) {
182 masm()->BindExceptionHandler(exit->label());
183 } else {
185 masm()->bind(exit->label());
186 }
187 Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
188 masm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
189 deopt_kind, exit->continue_label(),
190 jump_deoptimization_entry_label);
191
192 exit->set_emitted();
193
194 return kSuccess;
195}
196
200
202 OptimizedCompilationInfo* info = this->info();
203 auto call_descriptor = linkage()->GetIncomingDescriptor();
204
205 // Compute incoming parameter count for code using JS linkage. This will
206 // ultimately set the parameter count on the resulting Code object.
207 if (call_descriptor->IsJSFunctionCall()) {
208 parameter_count_ = call_descriptor->ParameterSlotCount();
209 if (Builtins::IsBuiltinId(info->builtin())) {
211 Builtins::GetStackParameterCount(info->builtin()));
212 } else if (info->has_bytecode_array()) {
213 CHECK_EQ(parameter_count_, info->bytecode_array()->parameter_count());
214 }
215 }
216
217 // Open a frame scope to indicate that there is a frame on the stack. The
218 // MANUAL indicates that the scope shouldn't actually generate code to set up
219 // the frame (that is done in AssemblePrologue).
220 FrameScope frame_scope(masm(), StackFrame::MANUAL);
221
222 if (info->source_positions()) {
224 }
226
227 masm()->CodeEntry();
228
229 // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
230 if (v8_flags.debug_code && info->called_with_code_start_register()) {
231 masm()->RecordComment("-- Prologue: check code start register --");
233 }
234
235#ifdef V8_ENABLE_LEAPTIERING
236 // Check that {kJavaScriptCallDispatchHandleRegister} has been set correctly.
237 if (v8_flags.debug_code && call_descriptor->IsJSFunctionCall()) {
238 masm()->RecordComment("-- Prologue: check dispatch handle register --");
239 AssembleDispatchHandleRegisterCheck();
240 }
241#endif
242
243#if V8_ENABLE_WEBASSEMBLY
244 if (info->code_kind() == CodeKind::WASM_TO_JS_FUNCTION ||
245 info->builtin() == Builtin::kWasmToJsWrapperCSA ||
246 wasm::BuiltinLookup::IsWasmBuiltinId(info->builtin())) {
247 // By default the code generator can convert slot IDs to SP-relative memory
248 // operands depending on the offset if the encoding is more efficient.
249 // However the SP may switch to the central stack for wasm-to-js wrappers
250 // and wasm builtins, so disable this optimization there.
251 // TODO(thibaudm): Disable this more selectively, only wasm builtins that
252 // call JS builtins can switch, and only around the call site.
254 }
255#endif
256
258 // We want to bailout only from JS functions, which are the only ones
259 // that are optimized.
260 if (info->IsOptimizing()) {
261 DCHECK(call_descriptor->IsJSFunctionCall());
262 masm()->RecordComment("-- Prologue: check for deoptimization --");
264 }
265
266 // Define deoptimization literals for all inlined functions.
269 info->inlined_functions()) {
270 if (!inlined.shared_info.equals(info->shared_info())) {
271 int index = DefineDeoptimizationLiteral(
272 DeoptimizationLiteral(inlined.shared_info));
273 inlined.RegisterInlinedFunctionId(index);
274 }
275 }
277
279 instructions()->InstructionBlockCount());
280
281 if (info->trace_turbo_json()) {
282 block_starts_.assign(instructions()->instruction_blocks().size(), -1);
283 instr_starts_.assign(instructions()->instructions().size(), {});
284 }
285 // Assemble instructions in assembly order.
287 for (const InstructionBlock* block : instructions()->ao_blocks()) {
288 // Align loop headers on vendor recommended boundaries.
289 if (block->ShouldAlignLoopHeader()) {
291 } else if (block->ShouldAlignCodeTarget()) {
293 }
294
295 if (info->trace_turbo_json()) {
296 block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset();
297 }
298 // Bind a label for a block.
299 current_block_ = block->rpo_number();
301 if (v8_flags.code_comments && !block->omitted_by_jump_threading()) {
302 std::ostringstream buffer;
303 buffer << "-- B" << block->rpo_number().ToInt() << " start";
304 if (block->IsDeferred()) buffer << " (deferred)";
305 if (!block->needs_frame()) buffer << " (no frame)";
306 if (block->must_construct_frame()) buffer << " (construct frame)";
307 if (block->must_deconstruct_frame()) buffer << " (deconstruct frame)";
308
309 if (block->IsLoopHeader()) {
310 buffer << " (loop up to " << block->loop_end().ToInt() << ")";
311 }
312 if (block->loop_header().IsValid()) {
313 buffer << " (in loop " << block->loop_header().ToInt() << ")";
314 }
315 buffer << " --";
316 masm()->RecordComment(buffer.str().c_str(), SourceLocation());
317 }
318
319 frame_access_state()->MarkHasFrame(block->needs_frame());
320
322
323#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
324 if (block->IsSwitchTarget()) {
325 masm()->JumpTarget();
326 }
327#endif
328
329 if (block->must_construct_frame()) {
331 // We need to setup the root register after we assemble the prologue, to
332 // avoid clobbering callee saved registers in case of C linkage and
333 // using the roots.
334 // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
335 if (call_descriptor->InitializeRootRegister()) {
337 }
338 }
339#ifdef CAN_USE_RVV_INSTRUCTIONS
340 // RVV uses VectorUnit to emit vset{i}vl{i}, reducing the static and dynamic
341 // overhead of the vset{i}vl{i} instruction. However there are some jumps
342 // back between blocks. the Rvv instruction may get an incorrect vtype. so
343 // here VectorUnit needs to be cleared to ensure that the vtype is correct
344 // within the block.
345 masm()->VU.clear();
346#endif
347 if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
348 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
349 result_ = AssembleBlock(block);
350 } else {
351 result_ = AssembleBlock(block);
352 }
353 if (result_ != kSuccess) return;
355 }
356
357 // Assemble all out-of-line code.
359 if (ools_) {
360 masm()->RecordComment("-- Out of line code --");
361 for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
362 masm()->bind(ool->entry());
363 ool->Generate();
364 if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
365 }
366 }
367
368 // This nop operation is needed to ensure that the trampoline is not
369 // confused with the pc of the call before deoptimization.
370 // The test regress/regress-259 is an example of where we need it.
371 masm()->nop();
372
373 // For some targets, we must make sure that constant and veneer pools are
374 // emitted before emitting the deoptimization exits.
376
378
379 // Assemble deoptimization exits.
381 int last_updated = 0;
382 // We sort the deoptimization exits here so that the lazy ones will be visited
383 // last. We need this as lazy deopts might need additional instructions.
384 auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
385 // The deoptimization exits are sorted so that lazy deopt exits appear after
386 // eager deopts.
387 static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
388 static_cast<int>(kLastDeoptimizeKind),
389 "lazy deopts are expected to be emitted last");
390 if (a->kind() != b->kind()) {
391 return a->kind() < b->kind();
392 }
393 return a->pc_offset() < b->pc_offset();
394 };
395 std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
396
397 {
398#ifdef V8_TARGET_ARCH_PPC64
400 masm());
401#endif
403 if (exit->emitted()) continue;
404 exit->set_deoptimization_id(next_deoptimization_id_++);
406 if (result_ != kSuccess) return;
407
408 // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
409 // order, which is always the case since they are added to
410 // deoptimization_exits_ in that order, and the optional sort operation
411 // above preserves that order.
412 if (exit->kind() == DeoptimizeKind::kLazy) {
413 int trampoline_pc = exit->label()->pos();
414 last_updated = safepoints()->UpdateDeoptimizationInfo(
415 exit->pc_offset(), trampoline_pc, last_updated,
416 exit->deoptimization_id());
417 }
418 }
419 }
420
422 // TODO(jgruber): Move all inlined metadata generation into a new,
423 // architecture-independent version of FinishCode. Currently, this includes
424 // the safepoint table, handler table, constant pool, and code comments, in
425 // that order.
426 FinishCode();
427
429 // Emit the jump tables.
430 if (jump_tables_) {
432 for (JumpTable* table = jump_tables_; table; table = table->next()) {
433 masm()->bind(table->label());
434 AssembleJumpTable(table->targets());
435 }
436 }
437
438 // The LinuxPerfJitLogger logs code up until here, excluding the safepoint
439 // table. Resolve the unwinding info now so it is aware of the same code
440 // size as reported by perf.
442
443 // Final alignment before starting on the metadata section.
445
446 safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
447
448 // Emit the exception handler table.
449 if (!handlers_.empty()) {
451 for (size_t i = 0; i < handlers_.size(); ++i) {
452 int pos = handlers_[i].handler != nullptr ? handlers_[i].handler->pos()
455 }
456 }
457
460
462}
463
464#ifndef V8_TARGET_ARCH_X64
466 Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
467 std::pair<int32_t, Label*>* end) {
469 while (begin != end) {
470 masm()->JumpIfEqual(input, begin->first, begin->second);
471 ++begin;
472 }
474 return;
475 }
476 auto middle = begin + (end - begin) / 2;
477 Label less_label;
478 masm()->JumpIfLessThan(input, middle->first, &less_label);
479 AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
480 masm()->bind(&less_label);
481 AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
482}
483#endif // V8_TARGET_ARCH_X64
484
489
493
495#if V8_ENABLE_WEBASSEMBLY
496 return base::OwnedCopyOf(
498#else
499 return {};
500#endif // V8_ENABLE_WEBASSEMBLY
501}
502
504 if (result_ != kSuccess) {
506 return {};
507 }
508
509 // Allocate the source position table.
512
513 // Allocate and install the code.
514 CodeDesc desc;
515 masm()->GetCode(isolate()->main_thread_local_isolate(), &desc, safepoints(),
517
518#if defined(V8_OS_WIN64)
520 isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo());
521 }
522#endif // V8_OS_WIN64
523
526 }
527
528 Factory::CodeBuilder builder(isolate(), desc, info()->code_kind());
529 builder.set_builtin(info()->builtin())
530 .set_inlined_bytecode_size(info()->inlined_bytecode_size())
534 .set_stack_slots(frame()->GetTotalFrameSlotCount())
535 .set_profiler_data(info()->profiler_data())
536 .set_osr_offset(info()->osr_offset());
537
538 if (info()->function_context_specializing()) {
540 }
541
544 DCHECK(info()->has_bytecode_array() ||
545 info()->code_kind() == CodeKind::WASM_FUNCTION);
546 }
547
548 MaybeHandle<Code> maybe_code = builder.TryBuild();
549
551 if (!maybe_code.ToHandle(&code)) {
553 return {};
554 }
555
556 LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
557 code->instruction_start(), *source_positions,
559
560 return code;
561}
562
564 return instructions()
566 ->ao_number()
567 .IsNext(instructions()->InstructionBlockAt(block)->ao_number());
568}
569
571 auto safepoint = safepoints()->DefineSafepoint(masm(), pc_offset);
572
573 for (int tagged : frame()->tagged_slots()) {
574 safepoint.DefineTaggedStackSlot(tagged);
575 }
576
577 int frame_header_offset = frame()->GetFixedSlotCount();
578 for (const InstructionOperand& operand : references->reference_operands()) {
579 if (operand.IsStackSlot()) {
580 int index = LocationOperand::cast(operand).index();
581 DCHECK_LE(0, index);
582 // We might index values in the fixed part of the frame (i.e. the
583 // closure pointer or the context pointer); these are not spill slots
584 // and therefore don't work with the SafepointTable currently, but
585 // we also don't need to worry about them, since the GC has special
586 // knowledge about those fields anyway.
587 if (index < frame_header_offset) continue;
588 safepoint.DefineTaggedStackSlot(index);
589 }
590 }
591}
592
594 RootIndex* index_return) {
595 const CallDescriptor* incoming_descriptor =
597 if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
598 return isolate()->roots_table().IsRootHandle(object, index_return) &&
599 RootsTable::IsImmortalImmovable(*index_return);
600 }
601 return false;
602}
603
605 const InstructionBlock* block) {
606 if (block->IsHandler()) {
608 }
609 for (int i = block->code_start(); i < block->code_end(); ++i) {
611 if (result != kSuccess) return result;
612 }
613 return kSuccess;
614}
615
618 if (source.IsImmediate() &&
619 ((push_type & CodeGenerator::kImmediatePush) != 0)) {
620 return true;
621 }
622 if (source.IsRegister() &&
623 ((push_type & CodeGenerator::kRegisterPush) != 0)) {
624 return true;
625 }
626 if (source.IsStackSlot() &&
627 ((push_type & CodeGenerator::kStackSlotPush) != 0)) {
628 return true;
629 }
630 return false;
631}
632
634 PushTypeFlags push_type,
636 static constexpr int first_push_compatible_index =
638 pushes->clear();
641 Instruction::GapPosition inner_pos =
642 static_cast<Instruction::GapPosition>(i);
643 ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
644 if (parallel_move != nullptr) {
645 for (auto move : *parallel_move) {
646 InstructionOperand source = move->source();
647 InstructionOperand destination = move->destination();
648 // If there are any moves from slots that will be overridden by pushes,
649 // then the full gap resolver must be used since optimization with
650 // pushes don't participate in the parallel move and might clobber
651 // values needed for the gap resolve.
652 if (source.IsAnyStackSlot() && LocationOperand::cast(source).index() >=
653 first_push_compatible_index) {
654 pushes->clear();
655 return;
656 }
657 // TODO(danno): Right now, only consider moves from the FIRST gap for
658 // pushes. Theoretically, we could extract pushes for both gaps (there
659 // are cases where this happens), but the logic for that would also have
660 // to check to make sure that non-memory inputs to the pushes from the
661 // LAST gap don't get clobbered in the FIRST gap.
663 if (destination.IsStackSlot() &&
665 first_push_compatible_index) {
667 if (IsValidPush(source, push_type)) {
668 if (index >= static_cast<int>(pushes->size())) {
669 pushes->resize(index + 1);
670 }
671 (*pushes)[index] = move;
672 }
673 }
674 }
675 }
676 }
677 }
678
679 // For now, only support a set of continuous pushes at the end of the list.
680 size_t push_count_upper_bound = pushes->size();
681 size_t push_begin = push_count_upper_bound;
682 for (auto move : base::Reversed(*pushes)) {
683 if (move == nullptr) break;
684 push_begin--;
685 }
686 size_t push_count = pushes->size() - push_begin;
687 std::copy(pushes->begin() + push_begin,
688 pushes->begin() + push_begin + push_count, pushes->begin());
689 pushes->resize(push_count);
690}
691
694 if (source->IsConstant()) {
695 if (destination->IsAnyRegister()) {
697 } else {
698 DCHECK(destination->IsAnyStackSlot());
700 }
701 }
702 DCHECK(LocationOperand::cast(source)->IsCompatible(
704 if (source->IsAnyRegister()) {
705 if (destination->IsAnyRegister()) {
707 } else {
708 DCHECK(destination->IsAnyStackSlot());
710 }
711 } else {
712 DCHECK(source->IsAnyStackSlot());
713 if (destination->IsAnyRegister()) {
715 } else {
716 DCHECK(destination->IsAnyStackSlot());
718 }
719 }
720}
721
724 DCHECK(LocationOperand::cast(source)->IsCompatible(
726 if (source->IsAnyRegister()) {
727 if (destination->IsAnyRegister()) {
729 } else {
730 DCHECK(destination->IsAnyStackSlot());
732 }
733 } else {
734 DCHECK(source->IsAnyStackSlot());
735 DCHECK(destination->IsAnyStackSlot());
737 }
738}
739
743 // Assemble a branch after this instruction.
745 RpoNumber true_rpo =
746 i.InputRpo(instr->InputCount() - kBranchEndOffsetOfTrueBlock);
747 RpoNumber false_rpo =
748 i.InputRpo(instr->InputCount() - kBranchEndOffsetOfFalseBlock);
749
750 if (true_rpo == false_rpo) {
751 return true_rpo;
752 }
753 if (IsNextInAssemblyOrder(true_rpo) || instructions()
754 ->InstructionBlockAt(false_rpo)
755 ->IsLoopHeaderInAssemblyOrder()) {
756 // true block is next, can fall through if condition negated.
757 // false block is loop header, can save one jump if condition negated.
758 std::swap(true_rpo, false_rpo);
760 }
761 branch->condition = condition;
762 branch->true_label = GetLabel(true_rpo);
763 branch->false_label = GetLabel(false_rpo);
764 branch->fallthru = IsNextInAssemblyOrder(false_rpo);
765 return RpoNumber::Invalid();
766}
767
769 int instruction_index, const InstructionBlock* block) {
770 Instruction* instr = instructions()->InstructionAt(instruction_index);
771 if (info()->trace_turbo_json()) {
772 instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset();
773 }
774 int first_unused_stack_slot;
775 FlagsMode mode = FlagsModeField::decode(instr->opcode());
776 if (mode != kFlags_trap) {
778 }
779 bool adjust_stack =
780 GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
781 if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
782 if (instr->opcode() == kArchNop && block->successors().empty() &&
783 block->code_end() - block->code_start() == 1) {
784 // When the frame-less dummy end block in Turbofan contains a Phi node,
785 // don't attempt to access spill slots.
786 // TODO(dmercadier): When the switch to Turboshaft is complete, this
787 // will no longer be required.
788 } else {
790 }
791 if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
793 block->must_deconstruct_frame(),
794 instr != instructions()->InstructionAt(block->last_instruction_index()) ||
795 instr->IsRet() || instr->IsJump());
796 if (instr->IsJump() && block->must_deconstruct_frame()) {
798 }
799 if (info()->trace_turbo_json()) {
800 instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset();
801 }
802 // Assemble architecture-specific code for the instruction.
804 if (result != kSuccess) return result;
805
806 if (info()->trace_turbo_json()) {
807 instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset();
808 }
809
811 switch (mode) {
812 case kFlags_branch:
814 if (mode == kFlags_conditional_branch) {
816 condition = static_cast<FlagsCondition>(
817 i.ToConstant(instr->InputAt(instr->InputCount() -
819 .ToInt64());
820 }
821 BranchInfo branch;
822 RpoNumber target = ComputeBranchInfo(&branch, condition, instr);
823 if (target.IsValid()) {
824 // redundant branch.
825 if (!IsNextInAssemblyOrder(target)) {
826 AssembleArchJump(target);
827 }
828 return kSuccess;
829 }
830 if (mode == kFlags_branch) {
831 // Assemble architecture-specific branch.
832 AssembleArchBranch(instr, &branch);
833 } else {
835 }
836 break;
837 }
838 case kFlags_deoptimize: {
839 // Assemble a conditional eager deoptimization after this instruction.
841 size_t frame_state_offset =
843 size_t immediate_args_count =
846 instr, frame_state_offset, immediate_args_count);
847 BranchInfo branch;
848 branch.condition = condition;
849 branch.true_label = exit->label();
850 branch.false_label = exit->continue_label();
851 branch.fallthru = true;
853 masm()->bind(exit->continue_label());
854 break;
855 }
856 case kFlags_set: {
857 // Assemble a boolean materialization after this instruction.
859 break;
860 }
862 // Assemble a conditional boolean materialization after this instruction.
864 break;
865 }
866 case kFlags_select: {
868 break;
869 }
870 case kFlags_trap: {
871#if V8_ENABLE_WEBASSEMBLY
872 AssembleArchTrap(instr, condition);
873 break;
874#else
875 UNREACHABLE();
876#endif // V8_ENABLE_WEBASSEMBLY
877 }
878 case kFlags_none: {
879 break;
880 }
881 }
882
883 return kSuccess;
884}
885
887 SourcePosition source_position = SourcePosition::Unknown();
888 if (instr->IsNop() && instr->AreMovesRedundant()) return;
889 if (!instructions()->GetSourcePosition(instr, &source_position)) return;
890 AssembleSourcePosition(source_position);
891}
892
894 if (source_position == current_source_position_) return;
895 current_source_position_ = source_position;
896 if (!source_position.IsKnown()) return;
898 source_position, false);
899 if (v8_flags.code_comments) {
900 OptimizedCompilationInfo* info = this->info();
901 if (!info->IsOptimizing()) {
902#if V8_ENABLE_WEBASSEMBLY
903 if (!info->IsWasm()) return;
904#else
905 return;
906#endif // V8_ENABLE_WEBASSEMBLY
907 }
908 std::ostringstream buffer;
909 buffer << "-- ";
910 // Turbolizer only needs the source position, as it can reconstruct
911 // the inlining stack from other information.
912 if (info->trace_turbo_json() || !masm()->isolate() ||
913 masm()->isolate()->concurrent_recompilation_enabled()) {
914 buffer << source_position;
915 } else {
916 AllowGarbageCollection allocation;
917 AllowHandleAllocation handles;
919 buffer << source_position.InliningStack(masm()->isolate(), info);
920 }
921 buffer << " --";
922 masm()->RecordComment(buffer.str().c_str(), SourceLocation());
923 }
924}
925
927 int* slot) {
928 if (instr->IsTailCall()) {
930 *slot = g.InputInt32(instr->InputCount() - 1);
931 return true;
932 } else {
933 return false;
934 }
935}
936
938#if V8_ENABLE_WEBASSEMBLY
940 if (code_kind == CodeKind::WASM_FUNCTION) {
941 return StubCallMode::kCallWasmRuntimeStub;
942 }
943 if (code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
944 code_kind == CodeKind::WASM_TO_JS_FUNCTION) {
946 }
947#endif // V8_ENABLE_WEBASSEMBLY
949}
950
954 Instruction::GapPosition inner_pos =
955 static_cast<Instruction::GapPosition>(i);
956 ParallelMove* move = instr->GetParallelMove(inner_pos);
957 if (move != nullptr) resolver()->Resolve(move);
958 }
959}
960
961namespace {
962
964 OptimizedCompilationInfo* info, Isolate* isolate) {
965 const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
966 info->inlined_functions();
969 isolate, static_cast<int>(inlined_functions.size()));
970 for (size_t i = 0; i < inlined_functions.size(); ++i) {
971 inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
972 }
973 return inl_positions;
974}
975
976} // namespace
977
979 OptimizedCompilationInfo* info = this->info();
980 int deopt_count = static_cast<int>(deoptimization_exits_.size());
981 if (deopt_count == 0 && !info->is_osr()) {
983 }
985 DeoptimizationData::New(isolate(), deopt_count);
986
989 isolate()->main_thread_local_isolate()->factory());
990
991 data->SetFrameTranslation(*translation_array);
992 data->SetInlinedFunctionCount(
993 Smi::FromInt(static_cast<int>(inlined_function_count_)));
994 data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
995
996 data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
997 data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count_));
998 data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count_));
999
1000 if (info->has_shared_info()) {
1002 isolate()->factory()->NewSharedFunctionInfoWrapper(info->shared_info());
1003 data->SetWrappedSharedFunctionInfo(*sfi_wrapper);
1004 } else {
1005 data->SetWrappedSharedFunctionInfo(Smi::zero());
1006 }
1007
1010 static_cast<int>(protected_deoptimization_literals_.size()));
1011 for (unsigned i = 0; i < protected_deoptimization_literals_.size(); i++) {
1014 CHECK(!object.is_null());
1015 protected_literals->set(i, *object);
1016 }
1017 data->SetProtectedLiteralArray(*protected_literals);
1018
1021 static_cast<int>(deoptimization_literals_.size()));
1022 for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
1024 CHECK(!object.is_null());
1025 literals->set(i, *object);
1026 }
1027 data->SetLiteralArray(*literals);
1028
1030 CreateInliningPositions(info, isolate());
1031 data->SetInliningPositions(*inl_pos);
1032
1033 if (info->is_osr()) {
1035 data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
1036 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1037 } else {
1038 BytecodeOffset osr_offset = BytecodeOffset::None();
1039 data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
1040 data->SetOsrPcOffset(Smi::FromInt(-1));
1041 }
1042
1043 // Populate deoptimization entries.
1044 for (int i = 0; i < deopt_count; i++) {
1045 DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i];
1046 CHECK_NOT_NULL(deoptimization_exit);
1047 DCHECK_EQ(i, deoptimization_exit->deoptimization_id());
1048 data->SetBytecodeOffset(i, deoptimization_exit->bailout_id());
1049 data->SetTranslationIndex(
1050 i, Smi::FromInt(deoptimization_exit->translation_id()));
1051 data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset()));
1052#ifdef DEBUG
1053 data->SetNodeId(i, Smi::FromInt(deoptimization_exit->node_id()));
1054#endif // DEBUG
1055 }
1056
1057#ifdef DEBUG
1058 data->Verify(info->bytecode_array());
1059#endif // DEBUG
1060 return data;
1061}
1062
1063#if V8_ENABLE_WEBASSEMBLY
1064base::OwnedVector<uint8_t> CodeGenerator::GenerateWasmDeoptimizationData() {
1065 int deopt_count = static_cast<int>(deoptimization_exits_.size());
1066 if (deopt_count == 0) {
1067 return {};
1068 }
1069 // Lazy deopts are not supported in wasm.
1071 // Wasm doesn't use the JS inlining handling via deopt info.
1073
1074 auto deopt_entries =
1076 // Populate deoptimization entries.
1077 for (int i = 0; i < deopt_count; i++) {
1078 const DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i];
1079 CHECK_NOT_NULL(deoptimization_exit);
1080 DCHECK_EQ(i, deoptimization_exit->deoptimization_id());
1081 deopt_entries[i] = {deoptimization_exit->bailout_id(),
1082 deoptimization_exit->translation_id()};
1083 }
1084
1085 base::Vector<const uint8_t> frame_translations =
1087 base::OwnedVector<uint8_t> result = wasm::WasmDeoptDataProcessor::Serialize(
1088 deopt_exit_start_offset_, eager_deopt_count_, frame_translations,
1090#if DEBUG
1091 // Verify that the serialized data can be deserialized.
1092 wasm::WasmDeoptView view(base::VectorOf(result));
1093 wasm::WasmDeoptData data = view.GetDeoptData();
1094 DCHECK_EQ(data.deopt_exit_start_offset, deopt_exit_start_offset_);
1095 DCHECK_EQ(data.deopt_literals_size, deoptimization_literals_.size());
1096 DCHECK_EQ(data.eager_deopt_count, eager_deopt_count_);
1097 DCHECK_EQ(data.entry_count, deoptimization_exits_.size());
1098 DCHECK_EQ(data.translation_array_size, frame_translations.size());
1099 for (int i = 0; i < deopt_count; i++) {
1100 const DeoptimizationExit* exit = deoptimization_exits_[i];
1101 wasm::WasmDeoptEntry entry = view.GetDeoptEntry(i);
1102 DCHECK_EQ(exit->bailout_id(), entry.bytecode_offset);
1103 DCHECK_EQ(exit->translation_id(), entry.translation_index);
1104 }
1105 std::vector<DeoptimizationLiteral> literals =
1106 view.BuildDeoptimizationLiteralArray();
1107 DCHECK_EQ(literals.size(), deoptimization_literals_.size());
1108 for (size_t i = 0; i < deoptimization_literals_.size(); ++i) {
1110 }
1111#endif
1112 return result;
1113}
1114#endif // V8_ENABLE_WEBASSEMBLY
1115
1120
1121#ifndef V8_TARGET_ARCH_X64
1125#endif
1126
1128 const bool needs_frame_state =
1129 instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
1130 RecordSafepoint(instr->reference_map());
1131
1132 if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
1134 Constant handler_input =
1135 i.ToConstant(instr->InputAt(instr->InputCount() - 1));
1136 if (handler_input.type() == Constant::Type::kRpoNumber) {
1137 RpoNumber handler_rpo = handler_input.ToRpoNumber();
1138 DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
1139 handlers_.push_back(
1140 {GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()});
1141 } else {
1142 // We should lazy deopt on throw.
1144 handlers_.push_back({nullptr, masm()->pc_offset_for_safepoint()});
1145 }
1146 }
1147
1148 if (needs_frame_state) {
1149 RecordDeoptInfo(instr, masm()->pc_offset_for_safepoint());
1150 }
1151}
1152
1154 // If the frame state is present, it starts at argument 1 - after
1155 // the code address.
1156 size_t frame_state_offset = 1;
1157 FrameStateDescriptor* descriptor =
1158 GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
1159 BuildTranslation(instr, pc_offset, frame_state_offset, 0,
1160 descriptor->state_combine());
1161}
1162
1165 unsigned i;
1166 for (i = 0; i < protected_deoptimization_literals_.size(); ++i) {
1167 if (protected_deoptimization_literals_[i].equals(object)) return i;
1168 }
1169 protected_deoptimization_literals_.push_back(object);
1170 return i;
1171}
1172
1174 literal.Validate();
1175 unsigned i;
1176 for (i = 0; i < deoptimization_literals_.size(); ++i) {
1177 deoptimization_literals_[i].Validate();
1178 if (deoptimization_literals_[i] == literal) return i;
1179 }
1181 return i;
1182}
1183
1185 IndirectHandle<TrustedObject> object) const {
1186 for (unsigned i = 0; i < protected_deoptimization_literals_.size(); ++i) {
1187 if (protected_deoptimization_literals_[i].equals(object)) return true;
1188 }
1189 return false;
1190}
1191
1193 Instruction* instr, size_t frame_state_offset) {
1195 int const state_id = i.InputInt32(frame_state_offset);
1196 return instructions()->GetDeoptimizationEntry(state_id);
1197}
1198
1200 StateValueDescriptor* desc, StateValueList* nested,
1202 if (desc->IsNestedObject()) {
1203 translations_.BeginCapturedObject(static_cast<int>(nested->size()));
1204 for (auto field : *nested) {
1205 TranslateStateValueDescriptor(field.desc, field.nested, iter);
1206 }
1207 } else if (desc->IsArgumentsElements()) {
1208 translations_.ArgumentsElements(desc->arguments_type());
1209 } else if (desc->IsArgumentsLength()) {
1211 } else if (desc->IsRestLength()) {
1213 } else if (desc->IsDuplicate()) {
1214 translations_.DuplicateObject(static_cast<int>(desc->id()));
1215 } else if (desc->IsPlain()) {
1216 InstructionOperand* op = iter->Advance();
1217 AddTranslationForOperand(iter->instruction(), op, desc->type());
1218 } else if (desc->IsStringConcat()) {
1220 for (auto field : *nested) {
1221 TranslateStateValueDescriptor(field.desc, field.nested, iter);
1222 }
1223 } else {
1224 DCHECK(desc->IsOptimizedOut());
1226 }
1227}
1228
1231 size_t index = 0;
1232 StateValueList* values = desc->GetStateValueDescriptors();
1233 for (StateValueList::iterator it = values->begin(); it != values->end();
1234 ++it, ++index) {
1235 TranslateStateValueDescriptor((*it).desc, (*it).nested, iter);
1236 }
1237 DCHECK_EQ(desc->GetSize(), index);
1238}
1239
1242 OutputFrameStateCombine state_combine) {
1243 // Outer-most state must be added to translation first.
1244 if (descriptor->outer_state() != nullptr) {
1246 state_combine);
1247 }
1248
1249 Handle<SharedFunctionInfo> shared_info;
1250 if (!descriptor->shared_info().ToHandle(&shared_info)) {
1251 if (!info()->has_shared_info()
1252#if V8_ENABLE_WEBASSEMBLY
1253 && descriptor->type() != compiler::FrameStateType::kLiftoffFunction
1254#endif
1255 ) {
1256 return; // Stub with no SharedFunctionInfo.
1257 }
1258 shared_info = info()->shared_info();
1259 }
1260
1261 const BytecodeOffset bailout_id = descriptor->bailout_id();
1262
1263 const int shared_info_id =
1264#if V8_ENABLE_WEBASSEMBLY
1265 shared_info.is_null()
1268 CHECK_IMPLIES(shared_info.is_null(), v8_flags.wasm_deopt);
1269#else
1271#endif
1272
1273 const unsigned int height =
1274 static_cast<unsigned int>(descriptor->GetHeight());
1275
1276 switch (descriptor->type()) {
1278 int bytecode_array_id = DefineProtectedDeoptimizationLiteral(
1279 descriptor->bytecode_array().ToHandleChecked());
1280 int return_offset = 0;
1281 int return_count = 0;
1282 if (!state_combine.IsOutputIgnored()) {
1283 return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
1284 return_count = static_cast<int>(iter->instruction()->OutputCount());
1285 }
1286 translations_.BeginInterpretedFrame(bailout_id, shared_info_id,
1287 bytecode_array_id, height,
1288 return_offset, return_count);
1289 break;
1290 }
1293 shared_info_id, height,
1294 descriptor->bytecode_array().ToHandleChecked()->parameter_count());
1295 break;
1297 translations_.BeginConstructCreateStubFrame(shared_info_id, height);
1298 break;
1301 break;
1303 translations_.BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
1304 height);
1305 break;
1306 }
1307#if V8_ENABLE_WEBASSEMBLY
1308 case FrameStateType::kWasmInlinedIntoJS:
1309 translations_.BeginWasmInlinedIntoJSFrame(bailout_id, shared_info_id,
1310 height);
1311 break;
1312 case FrameStateType::kJSToWasmBuiltinContinuation: {
1313 const JSToWasmFrameStateDescriptor* js_to_wasm_descriptor =
1314 static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
1315 translations_.BeginJSToWasmBuiltinContinuationFrame(
1316 bailout_id, shared_info_id, height,
1317 js_to_wasm_descriptor->return_kind());
1318 break;
1319 }
1320 case FrameStateType::kLiftoffFunction:
1321 translations_.BeginLiftoffFrame(bailout_id, height,
1322 descriptor->GetWasmFunctionIndex());
1323 break;
1324#endif // V8_ENABLE_WEBASSEMBLY
1327 bailout_id, shared_info_id, height);
1328 break;
1329 }
1332 bailout_id, shared_info_id, height);
1333 break;
1334 }
1335 }
1336
1337 TranslateFrameStateDescriptorOperands(descriptor, iter);
1338}
1339
1341 Instruction* instr, int pc_offset, size_t frame_state_offset,
1342 size_t immediate_args_count, OutputFrameStateCombine state_combine) {
1343 DeoptimizationEntry const& entry =
1344 GetDeoptimizationEntry(instr, frame_state_offset);
1345 FrameStateDescriptor* const descriptor = entry.descriptor();
1346 frame_state_offset++;
1347
1348 const int translation_index = translations_.BeginTranslation(
1349 static_cast<int>(descriptor->GetFrameCount()),
1350 static_cast<int>(descriptor->GetJSFrameCount()),
1351 entry.feedback().IsValid());
1352 if (entry.feedback().IsValid()) {
1355 int literal_id = DefineDeoptimizationLiteral(literal);
1356 translations_.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
1357 }
1358 InstructionOperandIterator iter(instr, frame_state_offset);
1359 BuildTranslationForFrameStateDescriptor(descriptor, &iter, state_combine);
1360
1362 current_source_position_, descriptor->bailout_id(), translation_index,
1363 pc_offset, entry.kind(), entry.reason(),
1364#ifdef DEBUG
1365 entry.node_id());
1366#else // DEBUG
1367 0);
1368#endif // DEBUG
1369 if (immediate_args_count != 0) {
1370 auto immediate_args = zone()->New<ZoneVector<ImmediateOperand*>>(zone());
1372 instr, frame_state_offset - immediate_args_count - 1);
1373 for (size_t i = 0; i < immediate_args_count; i++) {
1374 immediate_args->emplace_back(ImmediateOperand::cast(imm_iter.Advance()));
1375 }
1376 exit->set_immediate_args(immediate_args);
1377 }
1378
1379 deoptimization_exits_.push_back(exit);
1380 return exit;
1381}
1382
1385 MachineType type) {
1386 if (op->IsStackSlot()) {
1387 if (type.representation() == MachineRepresentation::kBit) {
1389 } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1390 type == MachineType::Int32()) {
1392 } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1393 type == MachineType::Uint32()) {
1395 } else if (type == MachineType::Int64()) {
1397 } else if (type == MachineType::SignedBigInt64()) {
1400 } else if (type == MachineType::UnsignedBigInt64()) {
1403 } else {
1404#if defined(V8_COMPRESS_POINTERS)
1405 CHECK(MachineRepresentation::kTagged == type.representation() ||
1406 MachineRepresentation::kCompressed == type.representation());
1407#else
1408 CHECK(MachineRepresentation::kTagged == type.representation());
1409#endif
1411 }
1412 } else if (op->IsFPStackSlot()) {
1413 switch (type.representation()) {
1416 break;
1418 if (type.semantic() == MachineSemantic::kHoleyFloat64) {
1421 } else {
1424 }
1425 break;
1428 break;
1429 default:
1430 UNREACHABLE();
1431 }
1432 } else if (op->IsRegister()) {
1433 InstructionOperandConverter converter(this, instr);
1434 if (type.representation() == MachineRepresentation::kBit) {
1436 } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1437 type == MachineType::Int32()) {
1439 } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1440 type == MachineType::Uint32()) {
1442 } else if (type == MachineType::Int64()) {
1444 } else if (type == MachineType::SignedBigInt64()) {
1446 } else if (type == MachineType::UnsignedBigInt64()) {
1448 } else {
1449#if defined(V8_COMPRESS_POINTERS)
1450 CHECK(MachineRepresentation::kTagged == type.representation() ||
1451 MachineRepresentation::kCompressed == type.representation());
1452#else
1453 CHECK(MachineRepresentation::kTagged == type.representation());
1454#endif
1455 translations_.StoreRegister(converter.ToRegister(op));
1456 }
1457 } else if (op->IsFPRegister()) {
1458 InstructionOperandConverter converter(this, instr);
1459 switch (type.representation()) {
1462 break;
1464 if (type.semantic() == MachineSemantic::kHoleyFloat64) {
1466 converter.ToDoubleRegister(op));
1467 } else {
1469 }
1470 break;
1473 break;
1474 default:
1475 UNREACHABLE();
1476 }
1477 } else {
1478 CHECK(op->IsImmediate());
1479 InstructionOperandConverter converter(this, instr);
1480 Constant constant = converter.ToConstant(op);
1482
1483#if V8_ENABLE_WEBASSEMBLY
1484 if (info_->IsWasm() && v8_flags.wasm_deopt) {
1485 switch (type.representation()) {
1487 literal = DeoptimizationLiteral(constant.ToInt32());
1488 break;
1490 literal = DeoptimizationLiteral(constant.ToInt64());
1491 break;
1493 literal = DeoptimizationLiteral(constant.ToFloat32Safe());
1494 break;
1496 literal = DeoptimizationLiteral(Float64(constant.ToFloat64()));
1497 break;
1500 base::IsInRange(constant.ToInt64(), 0u, UINT32_MAX));
1501 Tagged<Smi> smi(static_cast<Address>(constant.ToInt64()));
1502 DCHECK(IsSmi(smi));
1504 break;
1505 }
1506 default:
1507 UNIMPLEMENTED();
1508 }
1509 int literal_id = DefineDeoptimizationLiteral(literal);
1510 translations_.StoreLiteral(literal_id);
1511 return;
1512 }
1513#endif
1514
1515 switch (constant.type()) {
1516 case Constant::kInt32:
1517 if (type.representation() == MachineRepresentation::kTagged) {
1518 // When pointers are 4 bytes, we can use int32 constants to represent
1519 // Smis.
1521 Tagged<Smi> smi(static_cast<Address>(constant.ToInt32()));
1522 DCHECK(IsSmi(smi));
1523 literal = DeoptimizationLiteral(static_cast<double>(smi.value()));
1524 } else if (type.representation() == MachineRepresentation::kBit) {
1525 if (constant.ToInt32() == 0) {
1526 literal =
1527 DeoptimizationLiteral(isolate()->factory()->false_value());
1528 } else {
1529 DCHECK_EQ(1, constant.ToInt32());
1530 literal = DeoptimizationLiteral(isolate()->factory()->true_value());
1531 }
1532 } else {
1533 DCHECK(type == MachineType::Int32() ||
1534 type == MachineType::Uint32() ||
1536 type.representation() == MachineRepresentation::kNone);
1537 DCHECK(type.representation() != MachineRepresentation::kNone ||
1538 constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
1539 if (type == MachineType::Uint32()) {
1541 static_cast<double>(static_cast<uint32_t>(constant.ToInt32())));
1542 } else {
1543 literal =
1544 DeoptimizationLiteral(static_cast<double>(constant.ToInt32()));
1545 }
1546 }
1547 break;
1548 case Constant::kInt64:
1550 if (type == MachineType::SignedBigInt64()) {
1551 literal = DeoptimizationLiteral(constant.ToInt64());
1552 } else if (type == MachineType::UnsignedBigInt64()) {
1553 literal =
1554 DeoptimizationLiteral(static_cast<uint64_t>(constant.ToInt64()));
1555 } else if (type.representation() == MachineRepresentation::kWord64) {
1556 // TODO(nicohartmann@, chromium:41497374): Disabling this CHECK
1557 // because we can see cases where this is violated in unreachable
1558 // code. We should re-enable once we have an idea on how to prevent
1559 // this from happening.
1560 // CHECK_EQ(
1561 // constant.ToInt64(),
1562 // static_cast<int64_t>(static_cast<double>(constant.ToInt64())));
1563 literal =
1564 DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
1565 } else {
1566 // When pointers are 8 bytes, we can use int64 constants to represent
1567 // Smis.
1568 DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1569 Tagged<Smi> smi(static_cast<Address>(constant.ToInt64()));
1570 DCHECK(IsSmi(smi));
1571 literal = DeoptimizationLiteral(static_cast<double>(smi.value()));
1572 }
1573 break;
1574 case Constant::kFloat32:
1575 DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
1576 type.representation() == MachineRepresentation::kTagged);
1577 literal =
1578 DeoptimizationLiteral(static_cast<double>(constant.ToFloat32()));
1579 break;
1580 case Constant::kFloat64:
1581 DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
1582 type.representation() == MachineRepresentation::kTagged);
1583 if (type == MachineType::HoleyFloat64() &&
1584 constant.ToFloat64().AsUint64() == kHoleNanInt64) {
1586 } else {
1587 literal = DeoptimizationLiteral(constant.ToFloat64().value());
1588 }
1589 break;
1591 DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1592 literal = DeoptimizationLiteral(constant.ToHeapObject());
1593 break;
1596 literal = DeoptimizationLiteral(constant.ToHeapObject());
1597 break;
1598 default:
1599 UNREACHABLE();
1600 }
1601 if (literal.object().equals(info()->closure()) &&
1602 info()->function_context_specializing()) {
1604 } else {
1605 int literal_id = DefineDeoptimizationLiteral(literal);
1606 translations_.StoreLiteral(literal_id);
1607 }
1608 }
1609}
1610
1612 Instruction* instr, size_t frame_state_offset,
1613 size_t immediate_args_count) {
1614 return BuildTranslation(instr, -1, frame_state_offset, immediate_args_count,
1616}
1617
1619 : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
1620 gen->ools_ = this;
1621}
1622
1624
1625} // namespace compiler
1626} // namespace internal
1627} // namespace v8
Isolate * isolate_
constexpr int kReturnAddressStackSlotCount
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static constexpr T decode(U value)
Definition bit-field.h:66
static OwnedVector< T > New(size_t size)
Definition vector.h:287
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
void nop(int type=0)
void AbortedCodeGeneration() override
void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, SourcePosition position, int id)
Definition assembler.cc:232
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
Definition builtins.cc:160
static constexpr bool IsBuiltinId(Builtin builtin)
Definition builtins.h:128
static constexpr BytecodeOffset None()
Definition utils.h:675
constexpr int ToInt() const
Definition utils.h:673
static Handle< DeoptimizationData > New(Isolate *isolate, int deopt_entry_count)
static V8_EXPORT_PRIVATE Handle< DeoptimizationData > Empty(Isolate *isolate)
static DeoptimizationLiteral HoleNaN()
static V8_EXPORT_PRIVATE Builtin GetDeoptimizationEntry(DeoptimizeKind kind)
static constexpr int kMaxNumberOfEntries
void GetEhFrame(CodeDesc *desc)
Definition eh-frame.cc:376
DirectHandle< DeoptimizationLiteralArray > NewDeoptimizationLiteralArray(int length)
Handle< ProtectedFixedArray > NewProtectedFixedArray(int length)
DirectHandle< SharedFunctionInfoWrapper > NewSharedFunctionInfoWrapper(DirectHandle< SharedFunctionInfo > sfi)
CodeBuilder & set_source_position_table(Handle< TrustedByteArray > table)
Definition factory.h:1188
CodeBuilder & set_builtin(Builtin builtin)
Definition factory.h:1169
CodeBuilder & set_stack_slots(int stack_slots)
Definition factory.h:1227
CodeBuilder & set_deoptimization_data(Handle< DeoptimizationData > deopt_data)
Definition factory.h:1204
CodeBuilder & set_parameter_count(uint16_t parameter_count)
Definition factory.h:1232
CodeBuilder & set_osr_offset(BytecodeOffset offset)
Definition factory.h:1182
CodeBuilder & set_inlined_bytecode_size(uint32_t size)
Definition factory.h:1176
CodeBuilder & set_is_context_specialized()
Definition factory.h:1215
CodeBuilder & set_profiler_data(BasicBlockProfilerData *profiler_data)
Definition factory.h:1237
V8_WARN_UNUSED_RESULT MaybeHandle< Code > TryBuild()
Definition factory.cc:284
int BeginTranslation(int frame_count, int jsframe_count, bool update_feedback)
void BeginJavaScriptBuiltinContinuationFrame(BytecodeOffset bailout_id, int literal_id, unsigned height)
base::Vector< const uint8_t > ToFrameTranslationWasm()
void StoreRegister(TranslationOpcode opcode, Register reg)
void BeginJavaScriptBuiltinContinuationWithCatchFrame(BytecodeOffset bailout_id, int literal_id, unsigned height)
void BeginInlinedExtraArguments(int literal_id, unsigned height, uint32_t parameter_count)
void ArgumentsElements(CreateArgumentsType type)
void AddUpdateFeedback(int vector_literal, int slot)
void BeginConstructCreateStubFrame(int literal_id, unsigned height)
void BeginBuiltinContinuationFrame(BytecodeOffset bailout_id, int literal_id, unsigned height)
void BeginInterpretedFrame(BytecodeOffset bytecode_offset, int literal_id, int bytecode_array_id, unsigned height, int return_value_offset, int return_value_count)
DirectHandle< DeoptimizationFrameTranslation > ToFrameTranslation(LocalFactory *factory)
V8_INLINE bool is_null() const
Definition handles.h:69
static void EmitReturnEntry(Assembler *masm, int offset, int handler)
static int EmitReturnTableStart(Assembler *masm)
static const int kLazyDeopt
static constexpr int kMetadataAlignment
v8::internal::Factory * factory()
Definition isolate.h:1527
RootsTable & roots_table()
Definition isolate.h:1250
static constexpr MachineType Uint8()
static constexpr MachineType SignedBigInt64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint32()
static constexpr MachineType UnsignedBigInt64()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType HoleyFloat64()
static constexpr MachineType Int8()
void JumpIfEqual(Register x, int32_t y, Label *dest)
void BindExceptionHandler(Label *label)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(Handle< S > *out) const
IndirectHandle< SharedFunctionInfo > shared_info() const
std::vector< InlinedFunctionHolder > InlinedFunctionList
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
Definition roots-inl.h:65
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
int UpdateDeoptimizationInfo(int pc, int trampoline, int start, int deopt_index)
V8_EXPORT_PRIVATE void Emit(Assembler *assembler, int stack_slot_count)
Safepoint DefineSafepoint(Assembler *assembler, int pc_offset=0)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
base::OwnedVector< uint8_t > ToSourcePositionTableVector()
Handle< TrustedByteArray > ToSourcePositionTable(IsolateT *isolate)
void AddPosition(size_t code_offset, SourcePosition source_position, bool is_statement)
std::vector< SourcePositionInfo > InliningStack(Isolate *isolate, Tagged< Code > code) const
static SourcePosition Unknown()
static DirectHandle< TrustedPodArray< T > > New(Isolate *isolate, int length)
void resize(size_t new_size)
void assign(size_t new_size, const T &value)
T * New(Args &&... args)
Definition zone.h:114
JumpTable(JumpTable *next, const base::Vector< Label * > &targets)
const base::Vector< Label * > & targets() const
static Type InferSwap(InstructionOperand *source, InstructionOperand *destination)
static Type InferMove(InstructionOperand *source, InstructionOperand *destination)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
OptimizedCompilationInfo *const info_
ZoneVector< HandlerInfo > handlers_
CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit *exit)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void BuildTranslationForFrameStateDescriptor(FrameStateDescriptor *descriptor, InstructionOperandIterator *iter, OutputFrameStateCombine state_combine)
CodeGenResult AssembleArchInstruction(Instruction *instr)
bool HasProtectedDeoptimizationLiteral(IndirectHandle< TrustedObject > object) const
static constexpr int kBinarySearchSwitchMinimalCases
SourcePositionTableBuilder source_position_table_builder_
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
bool IsNextInAssemblyOrder(RpoNumber block) const
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount]
DeoptimizationEntry const & GetDeoptimizationEntry(Instruction *instr, size_t frame_state_offset)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
SourcePosition start_source_position() const
bool GetSlotAboveSPBeforeTailCall(Instruction *instr, int *slot)
static void GetPushCompatibleMoves(Instruction *instr, PushTypeFlags push_type, ZoneVector< MoveOperands * > *pushes)
void RecordProtectedInstruction(uint32_t instr_offset)
SafepointTableBuilder * safepoints()
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AddTranslationForOperand(Instruction *instr, InstructionOperand *op, MachineType type)
ZoneDeque< IndirectHandle< TrustedObject > > protected_deoptimization_literals_
DeoptimizationExit * AddDeoptimizationExit(Instruction *instr, size_t frame_state_offset, size_t immediate_args_count)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void TranslateStateValueDescriptor(StateValueDescriptor *desc, StateValueList *nested, InstructionOperandIterator *iter)
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
RpoNumber ComputeBranchInfo(BranchInfo *branch, FlagsCondition condition, Instruction *instr)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
CodeGenResult AssembleBlock(const InstructionBlock *block)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
ZoneDeque< DeoptimizationLiteral > deoptimization_literals_
int DefineDeoptimizationLiteral(DeoptimizationLiteral literal)
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
ZoneDeque< DeoptimizationExit * > deoptimization_exits_
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
void AssembleSourcePosition(Instruction *instr)
InstructionSequence * instructions() const
ZoneVector< TurbolizerInstructionStartInfo > instr_starts_
CodeGenResult AssembleInstruction(int instruction_index, const InstructionBlock *block)
void AssemblePlaceHolderForLazyDeopt(Instruction *instr)
void AssembleGaps(Instruction *instr)
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
CodeGenerator(Zone *codegen_zone, Frame *frame, Linkage *linkage, InstructionSequence *instructions, OptimizedCompilationInfo *info, Isolate *isolate, std::optional< OsrHelper > osr_helper, int start_source_position, JumpOptimizationInfo *jump_opt, const AssemblerOptions &options, Builtin builtin, size_t max_unoptimized_frame_height, size_t max_pushed_argument_count, const char *debug_name=nullptr)
static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type)
base::OwnedVector< uint8_t > GetProtectedInstructionsData()
void RecordCallPosition(Instruction *instr)
base::OwnedVector< uint8_t > GetSourcePositionTable()
TurbolizerCodeOffsetsInfo offsets_info_
Handle< DeoptimizationData > GenerateDeoptimizationData()
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
int DefineProtectedDeoptimizationLiteral(IndirectHandle< TrustedObject > object)
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor *desc, InstructionOperandIterator *iter)
FrameTranslationBuilder translations_
FrameStateDescriptor * descriptor() const
FeedbackSource const & feedback() const
void set_immediate_args(ZoneVector< ImmediateOperand * > *immediate_args)
V8_EXPORT_PRIVATE void MarkHasFrame(bool state)
Definition frame.cc:44
FrameStateDescriptor * outer_state() const
OutputFrameStateCombine state_combine() const
MaybeIndirectHandle< BytecodeArray > bytecode_array() const
MaybeIndirectHandle< SharedFunctionInfo > shared_info() const
V8_EXPORT_PRIVATE void Resolve(ParallelMove *parallel_move)
DoubleRegister ToDoubleRegister(InstructionOperand *op)
FloatRegister ToFloatRegister(InstructionOperand *op)
Constant ToConstant(InstructionOperand *op) const
Simd128Register ToSimd128Register(InstructionOperand *op)
Register ToRegister(InstructionOperand *op) const
DeoptimizationEntry const & GetDeoptimizationEntry(int deoptimization_id)
Instruction * InstructionAt(int index) const
InstructionBlock * InstructionBlockAt(RpoNumber rpo_number)
CallDescriptor * GetIncomingDescriptor() const
Definition linkage.h:405
static LocationOperand * cast(InstructionOperand *op)
static OutputFrameStateCombine Ignore()
const ZoneVector< InstructionOperand > & reference_operands() const
bool IsNext(const RpoNumber other) const
void BeginInstructionBlock(int pc_offset, const InstructionBlock *block)
void EndInstructionBlock(const InstructionBlock *block)
static bool IsWasmBuiltinId(Builtin id)
static base::OwnedVector< uint8_t > Serialize(int deopt_exit_start_offset, int eager_deopt_count, base::Vector< const uint8_t > translation_array, base::Vector< wasm::WasmDeoptEntry > deopt_entries, const ZoneDeque< DeoptimizationLiteral > &deopt_literals)
UnwindingInfoWriter *const unwinding_info_writer_
Zone * zone_
T const result_
Handle< Code > code
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
int end
WasmFrame *const frame_
v8::Global< v8::Promise::Resolver > resolver_
SourcePositionTable * source_positions
Linkage * linkage
int32_t offset
std::optional< TNode< JSArray > > a
Instruction * instr
ZoneVector< RpoNumber > & result
Builtin builtin
ZoneVector< trap_handler::ProtectedInstructionData > protected_instructions_
ZoneVector< HandlerInfo > handlers_
int pc_offset
SourcePositionTableBuilder source_position_table_builder_
FunctionLiteral * literal
Definition liveedit.cc:294
int position
Definition liveedit.cc:290
#define LOG_CODE_EVENT(isolate, Call)
Definition log.h:83
BasicBlock * current_block_
Linkage const *const linkage_
MaglevAssembler *const masm_
InstructionOperand destination
STL namespace.
auto Reversed(T &t)
Definition iterator.h:105
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
OwnedVector< T > OwnedCopyOf(const T *data, size_t size)
Definition vector.h:383
FloatWithBits< 64 > Float64
Definition index.h:234
constexpr size_t kBranchEndOffsetOfTrueBlock
constexpr size_t kBranchEndOffsetOfFalseBlock
constexpr size_t kConditionalBranchEndOffsetOfCondition
FlagsCondition NegateFlagsCondition(FlagsCondition condition)
static constexpr int kLazyDeoptOnThrowSentinel
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr uint64_t kHoleNanInt64
Definition globals.h:1960
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr DeoptimizeKind kLastDeoptimizeKind
Definition globals.h:874
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool CodeKindUsesDeoptimizationData(CodeKind kind)
Definition code-kind.h:109
constexpr bool PointerCompressionIsEnabled()
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
uint32_t equals
BodyGen * gen
ro::BitSet tagged_slots
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
IndirectHandle< FeedbackVector > vector