50template <
typename RegisterT>
51struct RegisterTHelper;
53struct RegisterTHelper<Register> {
63enum NeedsDecompression { kDoesNotNeedDecompression, kNeedsDecompression };
97template <
typename RegisterT,
bool DecompressIfNeeded>
98class ParallelMoveResolver {
100 RegisterTHelper<RegisterT>::kAllocatableRegisters;
101 static_assert(!DecompressIfNeeded || std::is_same_v<Register, RegisterT>);
105 explicit ParallelMoveResolver(MaglevAssembler* masm)
108 void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
109 compiler::AllocatedOperand target,
110 bool target_needs_to_be_decompressed) {
111 if (target.IsAnyRegister()) {
113 target_needs_to_be_decompressed);
115 RecordMoveToStackSlot(source_node, source,
116 masm_->GetFramePointerOffsetForStackSlot(target),
117 target_needs_to_be_decompressed);
121 void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
122 RegisterT target_reg,
123 NeedsDecompression target_needs_to_be_decompressed) {
124 RecordMoveToRegister(source_node, source, target_reg,
125 target_needs_to_be_decompressed);
128 void EmitMoves(RegisterT scratch) {
131 for (RegisterT
reg : kAllocatableRegistersT) {
132 StartEmitMoveChain(
reg);
133 ValueNode* materializing_register_move =
135 if (materializing_register_move) {
136 materializing_register_move->LoadToRegister(masm_,
reg);
145 for (
auto [stack_slot, node] : materializing_stack_slot_moves_) {
146 node->LoadToRegister(masm_, scratch_);
151 ParallelMoveResolver(ParallelMoveResolver&&) =
delete;
152 ParallelMoveResolver operator=(ParallelMoveResolver&&) =
delete;
153 ParallelMoveResolver(
const ParallelMoveResolver&) =
delete;
154 ParallelMoveResolver operator=(
const ParallelMoveResolver&) =
delete;
159 struct DummyNeedsDecompression {
161 DummyNeedsDecompression(NeedsDecompression) {}
166 struct GapMoveTargets {
167 base::SmallVector<int32_t, 1>
stack_slots = base::SmallVector<int32_t, 1>{};
173 std::conditional_t<DecompressIfNeeded, NeedsDecompression,
174 DummyNeedsDecompression>
177 GapMoveTargets() =
default;
178 GapMoveTargets(GapMoveTargets&&)
V8_NOEXCEPT = default;
179 GapMoveTargets& operator=(GapMoveTargets&&)
V8_NOEXCEPT = default;
180 GapMoveTargets(const GapMoveTargets&) = delete;
181 GapMoveTargets& operator=(const GapMoveTargets&) = delete;
189 void CheckNoExistingMoveToRegister(RegisterT target_reg) {
190 for (RegisterT
reg : kAllocatableRegistersT) {
191 if (moves_from_register_[
reg.code()].registers.has(target_reg)) {
192 FATAL(
"Existing move from %s to %s", RegisterName(
reg),
193 RegisterName(target_reg));
196 for (
auto& [stack_slot, targets] : moves_from_stack_slot_) {
197 if (targets.registers.has(target_reg)) {
198 FATAL(
"Existing move from stack slot %d to %s", stack_slot,
199 RegisterName(target_reg));
202 if (materializing_register_moves_[target_reg.code()] !=
nullptr) {
203 FATAL(
"Existing materialization of %p to %s",
204 materializing_register_moves_[target_reg.code()],
205 RegisterName(target_reg));
209 void CheckNoExistingMoveToStackSlot(int32_t target_slot) {
210 for (RegisterT
reg : kAllocatableRegistersT) {
213 [&](int32_t slot) { return slot == target_slot; })) {
214 FATAL(
"Existing move from %s to stack slot %d", RegisterName(
reg),
218 for (
auto& [stack_slot, targets] : moves_from_stack_slot_) {
221 [&](int32_t slot) { return slot == target_slot; })) {
222 FATAL(
"Existing move from stack slot %d to stack slot %d", stack_slot,
226 for (
auto& [stack_slot, node] : materializing_stack_slot_moves_) {
227 if (stack_slot == target_slot) {
228 FATAL(
"Existing materialization of %p to stack slot %d", node,
234 void CheckNoExistingMoveToRegister(RegisterT target_reg) {}
235 void CheckNoExistingMoveToStackSlot(int32_t target_slot) {}
238 void RecordMoveToRegister(ValueNode* node,
239 compiler::InstructionOperand source,
240 RegisterT target_reg,
241 bool target_needs_to_be_decompressed) {
243 CheckNoExistingMoveToRegister(target_reg);
246 if constexpr (DecompressIfNeeded) {
247 if (target_needs_to_be_decompressed &&
248 !node->decompresses_tagged_result()) {
253 node->decompresses_tagged_result());
256 GapMoveTargets* targets;
257 if (source.IsAnyRegister()) {
259 if (target_reg == source_reg) {
267 }
else if (source.IsAnyStackSlot()) {
268 int32_t source_slot =
masm_->GetFramePointerOffsetForStackSlot(
272 DCHECK(source.IsConstant());
280 targets->registers.set(target_reg);
282 targets->needs_decompression = kNeedsDecompression;
286 void RecordMoveToStackSlot(ValueNode* node,
287 compiler::InstructionOperand source,
289 bool target_needs_to_be_decompressed) {
291 CheckNoExistingMoveToStackSlot(target_slot);
294 if constexpr (DecompressIfNeeded) {
295 if (target_needs_to_be_decompressed &&
296 !node->decompresses_tagged_result()) {
301 node->decompresses_tagged_result());
304 GapMoveTargets* targets;
305 if (source.IsAnyRegister()) {
308 }
else if (source.IsAnyStackSlot()) {
309 int32_t source_slot =
masm_->GetFramePointerOffsetForStackSlot(
311 if (source_slot == target_slot &&
317 DCHECK(source.IsConstant());
325 targets->stack_slots.push_back(target_slot);
327 targets->needs_decompression = kNeedsDecompression;
333 GapMoveTargets PopTargets(RegisterT source_reg) {
334 return std::exchange(moves_from_register_[source_reg.code()],
337 GapMoveTargets PopTargets(int32_t source_slot) {
339 if (
handle.empty())
return {};
341 return std::move(
handle.mapped());
348 template <
typename SourceT>
349 void StartEmitMoveChain(SourceT source) {
350 DCHECK(!scratch_has_cycle_start_);
351 GapMoveTargets targets = PopTargets(source);
352 if (targets.is_empty())
return;
356 bool has_cycle = RecursivelyEmitMoveChainTargets(source, targets);
364 if (!scratch_has_cycle_start_) {
368 EmitMovesFromSource(scratch_, std::move(targets));
370 __ RecordComment(
"-- * End of cycle");
372 EmitMovesFromSource(source, std::move(targets));
373 __ RecordComment(
"-- * Chain emitted with no cycles");
377 template <
typename ChainStartT,
typename SourceT>
378 bool ContinueEmitMoveChain(ChainStartT chain_start, SourceT source) {
379 if constexpr (std::is_same_v<ChainStartT, SourceT>) {
382 if (chain_start == source) {
383 __ RecordComment(
"-- * Cycle");
384 DCHECK(!scratch_has_cycle_start_);
385 if constexpr (std::is_same_v<ChainStartT, int32_t>) {
386 __ Move(scratch_, StackSlot{chain_start});
388 __ Move(scratch_, chain_start);
395 GapMoveTargets targets = PopTargets(source);
396 if (targets.is_empty()) {
397 __ RecordComment(
"-- * End of chain");
401 bool has_cycle = RecursivelyEmitMoveChainTargets(chain_start, targets);
403 EmitMovesFromSource(source, std::move(targets));
410 template <
typename ChainStartT>
411 bool RecursivelyEmitMoveChainTargets(ChainStartT chain_start,
412 GapMoveTargets& targets) {
413 bool has_cycle =
false;
414 for (
auto target : targets.registers) {
415 has_cycle |= ContinueEmitMoveChain(chain_start, target);
417 for (int32_t target_slot : targets.stack_slots) {
418 has_cycle |= ContinueEmitMoveChain(chain_start, target_slot);
423 void EmitMovesFromSource(RegisterT source_reg, GapMoveTargets&& targets) {
424 DCHECK(moves_from_register_[source_reg.code()].is_empty());
425 if constexpr (DecompressIfNeeded) {
431 if (targets.needs_decompression == kNeedsDecompression) {
432 __ DecompressTagged(source_reg, source_reg);
435 for (RegisterT target_reg : targets.registers) {
436 DCHECK(moves_from_register_[target_reg.code()].is_empty());
437 __ Move(target_reg, source_reg);
439 for (int32_t target_slot : targets.stack_slots) {
442 __ Move(StackSlot{target_slot}, source_reg);
446 void EmitMovesFromSource(int32_t source_slot, GapMoveTargets&& targets) {
451 RegisterT register_with_slot_value = RegisterT::no_reg();
452 if (!targets.registers.is_empty()) {
455 register_with_slot_value = targets.registers.PopFirst();
457 DCHECK(!targets.stack_slots.empty());
460 if (scratch_has_cycle_start_) {
464 register_with_slot_value =
scratch_;
467 DCHECK(register_with_slot_value.is_valid());
468 DCHECK(moves_from_register_[register_with_slot_value.code()].is_empty());
469 __ Move(register_with_slot_value, StackSlot{source_slot});
472 if constexpr (DecompressIfNeeded) {
478 if (targets.needs_decompression == kNeedsDecompression) {
479 __ DecompressTagged(register_with_slot_value, register_with_slot_value);
480 targets.needs_decompression = kDoesNotNeedDecompression;
483 EmitMovesFromSource(register_with_slot_value, std::move(targets));
486 void Push(Register
reg) {
__ Push(
reg); }
488 void Pop(Register
reg) {
__ Pop(
reg); }
491 MaglevAssembler* masm()
const {
return masm_; }
510 std::array<ValueNode*, RegisterT::kNumRegisters>
519class ExceptionHandlerTrampolineBuilder {
521 static void Build(MaglevAssembler* masm, NodeBase* node) {
522 ExceptionHandlerTrampolineBuilder builder(masm);
523 builder.EmitTrampolineFor(node);
527 explicit ExceptionHandlerTrampolineBuilder(MaglevAssembler* masm)
531 explicit Move(
const ValueLocation& target, ValueNode* source)
532 : target(target), source(source) {}
533 const ValueLocation& target;
534 ValueNode*
const source;
536 using MoveVector = base::SmallVector<Move, 16>;
538 void EmitTrampolineFor(NodeBase* node) {
539 DCHECK(node->properties().can_throw());
541 ExceptionHandlerInfo*
const handler_info = node->exception_handler_info();
542 if (handler_info->ShouldLazyDeopt())
return;
543 DCHECK(handler_info->HasExceptionHandler());
544 BasicBlock*
const catch_block = handler_info->catch_block();
545 LazyDeoptInfo*
const deopt_info = node->lazy_deopt_info();
566 const InterpretedDeoptFrame& lazy_frame =
567 deopt_info->GetFrameForExceptionHandler(handler_info);
570 ParallelMoveResolver<Register, COMPRESS_POINTERS_BOOL> direct_moves(
masm_);
571 MoveVector materialising_moves;
572 bool save_accumulator =
false;
573 RecordMoves(lazy_frame.unit(), catch_block, lazy_frame.frame_state(),
574 &direct_moves, &materialising_moves, &save_accumulator);
575 __ BindJumpTarget(&handler_info->trampoline_entry());
576 __ RecordComment(
"-- Exception handler trampoline START");
577 EmitMaterialisationsAndPushResults(materialising_moves, save_accumulator);
579 __ RecordComment(
"EmitMoves");
580 MaglevAssembler::TemporaryRegisterScope temps(
masm_);
581 Register scratch = temps.AcquireScratch();
582 direct_moves.EmitMoves(scratch);
583 EmitPopMaterialisedResults(materialising_moves, save_accumulator, scratch);
584 __ Jump(catch_block->label());
585 __ RecordComment(
"-- Exception handler trampoline END");
588 MaglevAssembler* masm()
const {
return masm_; }
591 const MaglevCompilationUnit&
unit, BasicBlock* catch_block,
592 const CompactInterpreterFrameState* register_frame,
593 ParallelMoveResolver<Register, COMPRESS_POINTERS_BOOL>* direct_moves,
594 MoveVector* materialising_moves,
bool* save_accumulator) {
595 if (!catch_block->has_phi())
return;
596 for (Phi* phi : *catch_block->phis()) {
597 DCHECK(phi->is_exception_phi());
598 if (!phi->has_valid_live_range())
continue;
600 const ValueLocation& target = phi->result();
608 *save_accumulator =
true;
612 ValueNode* source = register_frame->GetValueOf(phi->owner(),
unit);
614 if (VirtualObject* vobj = source->TryCast<VirtualObject>()) {
615 DCHECK(vobj->allocation()->HasEscaped());
616 source = vobj->allocation();
621 DCHECK(!source->allocation().IsRegister());
623 switch (source->properties().value_representation()) {
625 direct_moves->RecordMove(
626 source, source->allocation(),
628 phi->decompresses_tagged_result() ? kNeedsDecompression
629 : kDoesNotNeedDecompression);
634 materialising_moves->emplace_back(target, source);
638 materialising_moves->emplace_back(target, source);
645 void EmitMaterialisationsAndPushResults(
const MoveVector& moves,
646 bool save_accumulator)
const {
647 if (moves.empty())
return;
662 __ RecordComment(
"EmitMaterialisationsAndPushResults");
668 __ set_allow_call(
true);
670 for (
const Move& move : moves) {
678 __ set_allow_call(
false);
682 void EmitPopMaterialisedResults(
const MoveVector& moves,
683 bool save_accumulator,
684 Register scratch)
const {
685 if (moves.empty())
return;
686 __ RecordComment(
"EmitPopMaterialisedResults");
688 const ValueLocation& target = move.target;
689 Register target_reg = target.operand().IsAnyRegister()
690 ? target.AssignedGeneralRegister()
693 __ MaterialiseValueNode(target_reg, move.source);
697 if (target_reg == scratch) {
698 __ Move(
masm_->ToMemOperand(target.operand()), scratch);
704 MaglevAssembler*
const masm_;
707class MaglevCodeGeneratingNodeProcessor {
709 MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm,
Zone* zone)
712 void PreProcessGraph(Graph* graph) {
715 code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
716 code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
717 code_gen_state()->set_max_deopted_stack_size(
718 graph->max_deopted_stack_size());
719 code_gen_state()->set_max_call_stack_args_(graph->max_call_stack_args());
721 if (
v8_flags.maglev_break_on_entry) {
725 if (graph->is_osr()) {
726 __ OSRPrologue(graph);
733 int deferred_count = ComputeDeferred(graph);
741 if (graph->blocks()[0]->is_deferred()) {
742 graph->blocks()[0]->set_deferred(
false);
747 int non_deferred_count = graph->num_blocks() - deferred_count;
749 ZoneVector<BasicBlock*> new_blocks(graph->num_blocks(), zone_);
751 size_t ix_non_deferred = 0;
752 size_t ix_deferred = non_deferred_count;
753 for (
auto block_it = graph->begin(); block_it != graph->end(); ++block_it) {
754 BasicBlock* block = *block_it;
755 if (block->is_deferred()) {
756 new_blocks[ix_deferred++] =
block;
758 new_blocks[ix_non_deferred++] =
block;
761 CHECK_EQ(ix_deferred, graph->num_blocks());
762 CHECK_EQ(ix_non_deferred, non_deferred_count);
763 graph->set_blocks(new_blocks);
766 ZoneVector<BasicBlock*>& blocks = graph->blocks();
767 size_t current_ix = 0;
768 for (
size_t i = 0;
i < blocks.
size(); ++
i) {
769 BasicBlock* block = blocks[
i];
770 if (code_gen_state()->RealJumpTarget(block) == block) {
772 blocks[current_ix++] =
block;
775 blocks.resize(current_ix);
778 void PostProcessGraph(Graph* graph) {}
779 void PostProcessBasicBlock(BasicBlock* block) {}
780 void PostPhiProcessing() {}
783 if (block->is_loop()) {
784 __ LoopHeaderAlign();
787 std::stringstream ss;
788 ss <<
"-- Block b" << block->id();
789 __ RecordComment(ss.str());
795 template <
typename NodeT>
798 std::stringstream ss;
799 ss <<
"-- " << graph_labeller()->NodeId(node) <<
": "
800 << PrintNode(graph_labeller(), node);
801 __ RecordComment(ss.str());
804 if (
v8_flags.maglev_assert_stack_size) {
805 __ AssertStackSizeCorrect();
811 if (std::is_base_of_v<UnconditionalControlNode, NodeT>) {
816 if (
v8_flags.slow_debug_code && !std::is_same_v<NodeT, Phi>) {
822 for (Input& input : *node) {
824 input.node()->properties().value_representation();
829 if (input.IsGeneralRegister()) {
836 MaglevAssembler::TemporaryRegisterScope scratch_scope(masm());
837 scratch_scope.Include(node->general_temporaries());
838 scratch_scope.IncludeDouble(node->double_temporaries());
841 masm()->set_allow_allocate(node->properties().can_allocate());
842 masm()->set_allow_call(node->properties().is_call());
843 masm()->set_allow_deferred_call(node->properties().is_deferred_call());
846 node->GenerateCode(masm(), state);
849 masm()->set_allow_allocate(
false);
850 masm()->set_allow_call(
false);
851 masm()->set_allow_deferred_call(
false);
854 if (std::is_base_of_v<ValueNode, NodeT>) {
856 if (value_node->has_valid_live_range() && value_node->is_spilled()) {
857 compiler::AllocatedOperand source =
860 if (!source.IsAnyStackSlot()) {
861 if (
v8_flags.code_comments)
__ RecordComment(
"-- Spill:");
862 if (source.IsRegister()) {
872 DCHECK_EQ(source.index(), value_node->spill_slot().index());
879 void EmitBlockEndGapMoves(UnconditionalControlNode* node,
880 const ProcessingState& state) {
881 BasicBlock* target = node->target();
882 if (!target->has_state()) {
883 __ RecordComment(
"-- Target has no state, must be a fallthrough");
887 int predecessor_id = state.block()->predecessor_id();
889 MaglevAssembler::TemporaryRegisterScope temps(masm_);
890 Register scratch = temps.AcquireScratch();
895 ParallelMoveResolver<Register, false> register_moves(masm_);
896 ParallelMoveResolver<DoubleRegister, false> double_register_moves(masm_);
903 __ RecordComment(
"-- Gap moves:");
905 if (target->has_phi()) {
907 for (Phi* phi : *phis) {
911 if (!phi->has_valid_live_range()) {
913 std::stringstream ss;
915 << phi->input(state.block()->predecessor_id()).operand() <<
" → "
916 << target <<
" (n" << graph_labeller()->NodeId(phi)
918 __ RecordComment(ss.str());
922 Input& input = phi->input(state.block()->predecessor_id());
923 ValueNode* input_node = input.node();
924 compiler::InstructionOperand source = input.operand();
925 compiler::AllocatedOperand target_operand =
928 std::stringstream ss;
929 ss <<
"-- * " << source <<
" → " << target <<
" (n"
930 << graph_labeller()->NodeId(phi) <<
")";
931 __ RecordComment(ss.str());
933 if (phi->use_double_register()) {
934 DCHECK(!phi->decompresses_tagged_result());
935 double_register_moves.RecordMove(input_node, source, target_operand,
938 register_moves.RecordMove(input_node, source, target_operand,
939 kDoesNotNeedDecompression);
941 if (target_operand.IsAnyRegister()) {
942 if (phi->use_double_register()) {
943 double_registers_set_by_phis.set(
944 target_operand.GetDoubleRegister());
946 registers_set_by_phis.set(target_operand.GetRegister());
952 target->state()->register_state().ForEachGeneralRegister(
955 if (registers_set_by_phis.has(
reg))
return;
958 RegisterMerge* merge;
960 compiler::InstructionOperand source =
961 merge->operand(predecessor_id);
963 std::stringstream ss;
964 ss <<
"-- * " << source <<
" → " <<
reg;
965 __ RecordComment(ss.str());
967 register_moves.RecordMove(node, source,
reg,
968 kDoesNotNeedDecompression);
972 register_moves.EmitMoves(scratch);
974 __ RecordComment(
"-- Double gap moves:");
976 target->state()->register_state().ForEachDoubleRegister(
979 if (double_registers_set_by_phis.has(
reg))
return;
982 RegisterMerge* merge;
984 compiler::InstructionOperand source =
985 merge->operand(predecessor_id);
987 std::stringstream ss;
988 ss <<
"-- * " << source <<
" → " <<
reg;
989 __ RecordComment(ss.str());
991 double_register_moves.RecordMove(node, source,
reg,
992 kDoesNotNeedDecompression);
996 double_register_moves.EmitMoves(double_scratch);
1000 MaglevAssembler* masm()
const {
return masm_; }
1001 MaglevCodeGenState* code_gen_state()
const {
1002 return masm()->code_gen_state();
1004 MaglevGraphLabeller* graph_labeller()
const {
1005 return code_gen_state()->graph_labeller();
1011 template <
typename NodeT>
1012 void PatchJumps(
NodeT* node) {
1014 UnconditionalControlNode* control_node =
1016 control_node->set_target(
1017 code_gen_state()->RealJumpTarget(control_node->target()));
1019 BranchControlNode* control_node =
1021 control_node->set_if_true(
1022 code_gen_state()->RealJumpTarget(control_node->if_true()));
1023 control_node->set_if_false(
1024 code_gen_state()->RealJumpTarget(control_node->if_false()));
1027 BasicBlockRef* targets = switch_node->targets();
1028 for (
int i = 0;
i < switch_node->
size(); ++
i) {
1029 targets[
i].set_block_ptr(
1030 code_gen_state()->RealJumpTarget(targets[
i].block_ptr()));
1032 if (switch_node->has_fallthrough()) {
1033 switch_node->set_fallthrough(
1034 code_gen_state()->RealJumpTarget(switch_node->fallthrough()));
1039 int ComputeDeferred(Graph* graph) {
1040 int deferred_count = 0;
1048 SmallZoneVector<BasicBlock*, 32> work_queue(zone_);
1049 for (
auto block_it = graph->begin(); block_it != graph->end(); ++block_it) {
1050 BasicBlock* block = *block_it;
1051 if (block->is_deferred()) {
1053 work_queue.emplace_back(block);
1061 while (!work_queue.empty()) {
1062 BasicBlock* block = work_queue.back();
1063 work_queue.pop_back();
1064 DCHECK(block->is_deferred());
1067 block->ForEachSuccessor([&work_queue,
1068 &deferred_count](BasicBlock* successor) {
1069 if (successor->is_deferred()) {
1072 bool should_defer =
true;
1073 successor->ForEachPredecessor([&should_defer](BasicBlock* predecessor) {
1074 if (!predecessor->is_deferred()) {
1075 should_defer =
false;
1080 work_queue.emplace_back(successor);
1081 successor->set_deferred(
true);
1086 block->ForEachPredecessor([&work_queue,
1087 &deferred_count](BasicBlock* predecessor) {
1088 if (predecessor->is_deferred()) {
1091 bool should_defer =
true;
1092 predecessor->ForEachSuccessor([&should_defer](BasicBlock* successor) {
1093 if (!successor->is_deferred()) {
1094 should_defer =
false;
1099 work_queue.emplace_back(predecessor);
1100 predecessor->set_deferred(
true);
1104 return deferred_count;
1106 MaglevAssembler*
const masm_;
1110class SafepointingNodeProcessor {
1112 explicit SafepointingNodeProcessor(LocalIsolate* local_isolate)
1115 void PreProcessGraph(Graph* graph) {}
1116 void PostProcessGraph(Graph* graph) {}
1117 void PostProcessBasicBlock(BasicBlock* block) {}
1121 void PostPhiProcessing() {}
1122 ProcessResult Process(NodeBase* node,
const ProcessingState& state) {
1132DeoptimizationFrameTranslation::FrameCount GetFrameCount(
1133 const DeoptFrame* deopt_frame) {
1137 if (deopt_frame->IsJsFrame()) {
1141 deopt_frame = deopt_frame->parent();
1142 }
while (deopt_frame);
1143 return {total, js_frame};
1147class MaglevFrameTranslationBuilder {
1149 MaglevFrameTranslationBuilder(
1150 LocalIsolate* local_isolate, MaglevAssembler* masm,
1151 FrameTranslationBuilder* translation_array_builder,
1152 IdentityMap<int, base::DefaultAllocationPolicy>* protected_deopt_literals,
1153 IdentityMap<int, base::DefaultAllocationPolicy>* deopt_literals)
1161 void BuildEagerDeopt(EagerDeoptInfo* deopt_info) {
1162 BuildBeginDeopt(deopt_info);
1164 const InputLocation* current_input_location = deopt_info->input_locations();
1165 const VirtualObjectList& virtual_objects =
1166 deopt_info->top_frame().GetVirtualObjects();
1167 RecursiveBuildDeoptFrame(deopt_info->top_frame(), current_input_location,
1171 void BuildLazyDeopt(LazyDeoptInfo* deopt_info) {
1172 BuildBeginDeopt(deopt_info);
1174 const InputLocation* current_input_location = deopt_info->input_locations();
1175 const VirtualObjectList& virtual_objects =
1176 deopt_info->top_frame().GetVirtualObjects();
1178 if (deopt_info->top_frame().parent()) {
1181 RecursiveBuildDeoptFrame(*deopt_info->top_frame().parent(),
1182 current_input_location, virtual_objects);
1185 const DeoptFrame& top_frame = deopt_info->top_frame();
1186 switch (top_frame.type()) {
1188 return BuildSingleDeoptFrame(
1189 top_frame.as_interpreted(), current_input_location, virtual_objects,
1190 deopt_info->result_location(), deopt_info->result_size());
1195 return BuildSingleDeoptFrame(top_frame.as_construct_stub(),
1196 current_input_location, virtual_objects);
1198 return BuildSingleDeoptFrame(top_frame.as_builtin_continuation(),
1199 current_input_location, virtual_objects);
1204 constexpr int DeoptStackSlotIndexFromFPOffset(
int offset) {
1208 int DeoptStackSlotFromStackSlot(
const compiler::AllocatedOperand& operand) {
1209 return DeoptStackSlotIndexFromFPOffset(
1210 masm_->GetFramePointerOffsetForStackSlot(operand));
1213 void BuildBeginDeopt(DeoptInfo* deopt_info) {
1215 auto [frame_count, jsframe_count] = GetFrameCount(&deopt_info->top_frame());
1216 deopt_info->set_translation_index(
1218 frame_count, jsframe_count,
1219 deopt_info->feedback_to_update().IsValid()));
1220 if (deopt_info->feedback_to_update().IsValid()) {
1222 GetDeoptLiteral(*deopt_info->feedback_to_update().vector),
1223 deopt_info->feedback_to_update().index());
1227 void RecursiveBuildDeoptFrame(
const DeoptFrame& frame,
1228 const InputLocation*& current_input_location,
1229 const VirtualObjectList& virtual_objects) {
1230 if (frame.parent()) {
1233 RecursiveBuildDeoptFrame(*frame.parent(), current_input_location,
1237 switch (frame.type()) {
1239 return BuildSingleDeoptFrame(frame.as_interpreted(),
1240 current_input_location, virtual_objects);
1242 return BuildSingleDeoptFrame(frame.as_inlined_arguments(),
1243 current_input_location, virtual_objects);
1245 return BuildSingleDeoptFrame(frame.as_construct_stub(),
1246 current_input_location, virtual_objects);
1248 return BuildSingleDeoptFrame(frame.as_builtin_continuation(),
1249 current_input_location, virtual_objects);
1253 void BuildSingleDeoptFrame(
const InterpretedDeoptFrame& frame,
1254 const InputLocation*& current_input_location,
1255 const VirtualObjectList& virtual_objects,
1256 interpreter::Register result_location,
1258 int return_offset = frame.ComputeReturnOffset(result_location, result_size);
1260 frame.bytecode_position(),
1261 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1262 GetProtectedDeoptLiteral(*frame.GetBytecodeArray().object()),
1263 frame.unit().register_count(), return_offset, result_size);
1265 BuildDeoptFrameValues(frame.unit(), frame.frame_state(), frame.closure(),
1266 current_input_location, virtual_objects,
1267 result_location, result_size);
1270 void BuildSingleDeoptFrame(
const InterpretedDeoptFrame& frame,
1271 const InputLocation*& current_input_location,
1272 const VirtualObjectList& virtual_objects) {
1276 const int return_offset = 0;
1277 const int return_count = 0;
1279 frame.bytecode_position(),
1280 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1281 GetProtectedDeoptLiteral(*frame.GetBytecodeArray().object()),
1282 frame.unit().register_count(), return_offset, return_count);
1284 BuildDeoptFrameValues(frame.unit(), frame.frame_state(), frame.closure(),
1285 current_input_location, virtual_objects,
1289 void BuildSingleDeoptFrame(
const InlinedArgumentsDeoptFrame& frame,
1290 const InputLocation*& current_input_location,
1291 const VirtualObjectList& virtual_objects) {
1293 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1294 static_cast<uint32_t
>(frame.arguments().size()),
1295 frame.GetBytecodeArray().parameter_count());
1298 BuildDeoptFrameSingleValue(frame.closure(), current_input_location,
1305 for (ValueNode* value : frame.arguments()) {
1306 BuildDeoptFrameSingleValue(value, current_input_location,
1311 void BuildSingleDeoptFrame(
const ConstructInvokeStubDeoptFrame& frame,
1312 const InputLocation*& current_input_location,
1313 const VirtualObjectList& virtual_objects) {
1315 GetDeoptLiteral(frame.GetSharedFunctionInfo()));
1318 BuildDeoptFrameSingleValue(frame.receiver(), current_input_location,
1322 BuildDeoptFrameSingleValue(frame.context(), current_input_location,
1326 void BuildSingleDeoptFrame(
const BuiltinContinuationDeoptFrame& frame,
1327 const InputLocation*& current_input_location,
1328 const VirtualObjectList& virtual_objects) {
1329 BytecodeOffset bailout_id =
1331 int literal_id = GetDeoptLiteral(frame.GetSharedFunctionInfo());
1332 int height = frame.parameters().length();
1334 constexpr int kExtraFixedJSFrameParameters =
1336 if (frame.is_javascript()) {
1338 bailout_id, literal_id, height + kExtraFixedJSFrameParameters);
1341 bailout_id, literal_id, height);
1345 if (frame.is_javascript()) {
1347 GetDeoptLiteral(frame.javascript_target()));
1353 for (ValueNode* value : frame.parameters()) {
1354 BuildDeoptFrameSingleValue(value, current_input_location,
1360 if (frame.is_javascript()) {
1363 kExtraFixedJSFrameParameters);
1364 static_assert(kExtraFixedJSFrameParameters ==
1368 GetDeoptLiteral(frame.javascript_target()));
1371 GetDeoptLiteral(ReadOnlyRoots(local_isolate_).undefined_value()));
1375#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1383 ValueNode* value = frame.context();
1384 BuildDeoptFrameSingleValue(value, current_input_location, virtual_objects);
1387 void BuildDeoptStoreRegister(
const compiler::AllocatedOperand& operand,
1404 operand.GetDoubleRegister());
1408 operand.GetDoubleRegister());
1413 void BuildDeoptStoreStackSlot(
const compiler::AllocatedOperand& operand,
1415 int stack_slot = DeoptStackSlotFromStackSlot(operand);
1438 int GetDuplicatedId(intptr_t
id) {
1439 for (
int idx = 0; idx < static_cast<int>(
object_ids_.size()); idx++) {
1440 if (object_ids_[idx] ==
id) {
1452 void BuildHeapNumber(
Float64 number) {
1453 DirectHandle<Object> value =
1459 void BuildConsString(
const VirtualObject*
object,
1460 const InputLocation*& input_location,
1461 const VirtualObjectList& virtual_objects) {
1462 auto cons_string =
object->cons_string();
1464 BuildNestedValue(cons_string.first(), input_location, virtual_objects);
1465 BuildNestedValue(cons_string.second(), input_location, virtual_objects);
1468 void BuildFixedDoubleArray(uint32_t length,
1469 compiler::FixedDoubleArrayRef array) {
1472 GetDeoptLiteral(*
local_isolate_->factory()->fixed_double_array_map()));
1476 Float64 value = array.GetFromImmutableFixedDoubleArray(
i);
1477 if (value.is_hole_nan()) {
1479 GetDeoptLiteral(ReadOnlyRoots(local_isolate_).the_hole_value()));
1481 BuildHeapNumber(value);
1486 void BuildNestedValue(
const ValueNode* value,
1487 const InputLocation*& input_location,
1488 const VirtualObjectList& virtual_objects) {
1491 GetDeoptLiteral(*value->Reify(local_isolate_)));
1495 switch (value->opcode()) {
1496 case Opcode::kArgumentsElements:
1498 value->Cast<ArgumentsElements>()->type());
1504 case Opcode::kArgumentsLength:
1507 case Opcode::kRestLength:
1510 case Opcode::kVirtualObject:
1513 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1518 void BuildVirtualObject(
const VirtualObject*
object,
1519 const InputLocation*& input_location,
1520 const VirtualObjectList& virtual_objects) {
1522 return BuildHeapNumber(object->number());
1525 GetDuplicatedId(
reinterpret_cast<intptr_t
>(object->allocation()));
1526 if (dup_id != kNotDuplicated) {
1528 object->ForEachNestedRuntimeInput(virtual_objects,
1529 [&](ValueNode*) { input_location++; });
1532 switch (object->type()) {
1537 return BuildConsString(
object, input_location, virtual_objects);
1539 return BuildFixedDoubleArray(object->double_elements_length(),
1540 object->double_elements());
1544 DCHECK(object->has_static_map());
1546 GetDeoptLiteral(*object->map().object()));
1547 object->ForEachInput([&](ValueNode* node) {
1548 BuildNestedValue(node, input_location, virtual_objects);
1553 void BuildDeoptFrameSingleValue(
const ValueNode* value,
1554 const InputLocation*& input_location,
1555 const VirtualObjectList& virtual_objects) {
1557 value = value->input(0).node();
1559 DCHECK(!value->Is<VirtualObject>());
1560 if (
const InlinedAllocation* alloc = value->TryCast<InlinedAllocation>()) {
1561 VirtualObject* vobject = virtual_objects.FindAllocatedWith(alloc);
1562 if (vobject && alloc->HasBeenElided()) {
1563 DCHECK(alloc->HasBeenAnalysed());
1564 BuildVirtualObject(vobject, input_location, virtual_objects);
1568 if (input_location->operand().IsConstant()) {
1570 GetDeoptLiteral(*value->Reify(local_isolate_)));
1572 const compiler::AllocatedOperand& operand =
1575 if (operand.IsAnyRegister()) {
1576 BuildDeoptStoreRegister(operand, repr);
1578 BuildDeoptStoreStackSlot(operand, repr);
1584 void BuildDeoptFrameValues(
1585 const MaglevCompilationUnit& compilation_unit,
1586 const CompactInterpreterFrameState* checkpoint_state,
1587 const ValueNode* closure,
const InputLocation*& input_location,
1588 const VirtualObjectList& virtual_objects,
1589 interpreter::Register result_location,
int result_size) {
1596 BuildDeoptFrameSingleValue(closure, input_location, virtual_objects);
1601 checkpoint_state->ForEachParameter(
1602 compilation_unit, [&](ValueNode* value, interpreter::Register
reg) {
1608 BuildDeoptFrameSingleValue(value, input_location,
1616 ValueNode* context_value = checkpoint_state->context(compilation_unit);
1617 BuildDeoptFrameSingleValue(context_value, input_location, virtual_objects);
1622 checkpoint_state->ForEachLocal(
1623 compilation_unit, [&](ValueNode* value, interpreter::Register
reg) {
1628 while (
i <
reg.index()) {
1633 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1636 while (
i < compilation_unit.register_count()) {
1644 if (checkpoint_state->liveness()->AccumulatorIsLive() &&
1648 ValueNode* value = checkpoint_state->accumulator(compilation_unit);
1649 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1657 IdentityMapFindResult<int> res =
1659 if (!res.already_exists) {
1668 if (!res.already_exists) {
1675 int GetDeoptLiteral(compiler::HeapObjectRef ref) {
1676 return GetDeoptLiteral(*ref.object());
1680 MaglevAssembler*
masm_;
1696 graph->tagged_stack_slots()),
1697 frame_translation_builder_(compilation_info->zone()),
1699 graph->max_block_id()),
1700 masm_(isolate->GetMainThreadIsolateUnsafe(), compilation_info->zone(),
1705 retained_maps_(isolate->
heap()),
1706 is_context_specialized_(
1707 compilation_info->specialize_to_function_context()),
1708 zone_(compilation_info->zone()) {
1716#ifdef V8_TARGET_ARCH_ARM
1718 __ CheckConstPool(
true,
false);
1725 if (
v8_flags.maglev_build_code_on_background) {
1729 if (
code_.ToHandle(&code)) {
1732 }
else if (
v8_flags.maglev_deopt_data_on_background) {
1742 if (
v8_flags.maglev_build_code_on_background) {
1744 if (
code_.ToHandle(&code)) {
1745 return handle(*code, isolate);
1763 MaglevCodeGeneratingNodeProcessor>>
1765 MaglevCodeGeneratingNodeProcessor{
masm(),
zone_});
1769 masm_.
Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
1774 processor.ProcessGraph(
graph_);
1795 inlined.RegisterInlinedFunctionId(*res.
entry);
1805 __ RecordComment(
"-- Deferred block");
1806 __ bind(&deferred_code->deferred_code_label);
1807 deferred_code->Generate(
masm());
1820 MaglevFrameTranslationBuilder translation_builder(
1830 Label eager_deopt_entry;
1831 Label lazy_deopt_entry;
1832 __ MaybeEmitDeoptBuiltinsCall(
1838 int deopt_index = 0;
1840 __ RecordComment(
"-- Non-lazy deopts");
1843 translation_builder.BuildEagerDeopt(deopt_info);
1851 __ RecordDeoptReason(deopt_info->reason(), 0,
1852 deopt_info->top_frame().GetSourcePosition(),
1855 __ bind(deopt_info->deopt_entry_label());
1857 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, deopt_index,
1858 deopt_info->deopt_entry_label(),
1860 &eager_deopt_entry);
1865 __ RecordComment(
"-- Lazy deopts");
1866 int last_updated_safepoint = 0;
1869 translation_builder.BuildLazyDeopt(deopt_info);
1872 __ RecordDeoptReason(DeoptimizeReason::kUnknown, 0,
1873 deopt_info->top_frame().GetSourcePosition(),
1876 __ BindExceptionHandler(deopt_info->deopt_entry_label());
1878 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, deopt_index,
1879 deopt_info->deopt_entry_label(),
1883 deopt_info->deopting_call_return_pc(),
1884 deopt_info->deopt_entry_label()->pos(), last_updated_safepoint,
1894 __ RecordComment(
"-- Exception handler trampolines");
1896 ExceptionHandlerTrampolineBuilder::Build(
masm(), node);
1911 !info->trampoline_entry().is_bound());
1913 : info->trampoline_entry().pos();
1923 (
v8_flags.maglev_deopt_data_on_background &&
1924 !
v8_flags.maglev_build_code_on_background)
1943 builder.set_is_context_specialized();
1951 DCHECK(code->is_optimized_code());
1960 if (code->IsWeakObjectInOptimizedCode(target_object)) {
1961 if (IsMap(target_object, cage_base)) {
1971 int eager_deopt_count =
1974 int deopt_count = lazy_deopt_count + eager_deopt_count;
1995 raw_data->SetFrameTranslation(*translations);
1997 raw_data->SetOptimizationId(
2002 raw_data->SetEagerDeoptCount(
Smi::FromInt(eager_deopt_count));
2003 raw_data->SetLazyDeoptCount(
Smi::FromInt(lazy_deopt_count));
2004 raw_data->SetWrappedSharedFunctionInfo(*sfi_wrapper);
2007 int inlined_functions_size =
2017 inlined_functions_size);
2022 *protected_literals;
2026 for (
auto it = iterate.
begin(); it != iterate.
end(); ++it) {
2035 for (
auto it = iterate.
begin(); it != iterate.
end(); ++it) {
2036 raw_literals->set(*it.entry(), it.key());
2040 for (
int i = 0;
i < inlined_functions_size;
i++) {
2042 inlining_positions->set(
i, inlined_function_info.position);
2046 raw_data->SetProtectedLiteralArray(raw_protected_literals);
2047 raw_data->SetLiteralArray(raw_literals);
2048 raw_data->SetInliningPositions(*inlining_positions);
2051 raw_data->SetOsrBytecodeOffset(
2062 DCHECK_NE(deopt_info->translation_index(), -1);
2063 raw_data->SetBytecodeOffset(
i, deopt_info->top_frame().GetBytecodeOffset());
2064 raw_data->SetTranslationIndex(
2066 raw_data->SetPc(
i,
Smi::FromInt(deopt_info->deopt_entry_label()->pos()));
2073 DCHECK_NE(deopt_info->translation_index(), -1);
2074 raw_data->SetBytecodeOffset(
i, deopt_info->top_frame().GetBytecodeOffset());
2075 raw_data->SetTranslationIndex(
2077 raw_data->SetPc(
i,
Smi::FromInt(deopt_info->deopt_entry_label()->pos()));
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
constexpr UnderlyingType & value() &
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
int GetRegisterParameterCount() const
static Handle< DeoptimizationData > New(Isolate *isolate, int deopt_entry_count)
static V8_EXPORT_PRIVATE Handle< DeoptimizationData > Empty(Isolate *isolate)
static constexpr int kMaxNumberOfEntries
DirectHandle< DeoptimizationLiteralArray > NewDeoptimizationLiteralArray(int length)
Handle< ProtectedFixedArray > NewProtectedFixedArray(int length)
DirectHandle< SharedFunctionInfoWrapper > NewSharedFunctionInfoWrapper(DirectHandle< SharedFunctionInfo > sfi)
CodeBuilder & set_stack_slots(int stack_slots)
CodeBuilder & set_deoptimization_data(Handle< DeoptimizationData > deopt_data)
CodeBuilder & set_parameter_count(uint16_t parameter_count)
CodeBuilder & set_empty_source_position_table()
CodeBuilder & set_osr_offset(BytecodeOffset offset)
CodeBuilder & set_inlined_bytecode_size(uint32_t size)
V8_WARN_UNUSED_RESULT MaybeHandle< Code > TryBuild()
DirectHandle< DeoptimizationFrameTranslation > ToFrameTranslation(LocalFactory *factory)
V8_INLINE bool is_null() const
static void EmitReturnEntry(Assembler *masm, int offset, int handler)
static int EmitReturnTableStart(Assembler *masm)
static const int kLazyDeopt
static constexpr int kMetadataAlignment
MaybeIndirectHandle< T > NewPersistentMaybeHandle(MaybeHandleType< T > maybe_handle)
IndirectHandle< T > NewPersistentHandle(Tagged< T > object)
v8::internal::LocalFactory * factory()
void Abort(AbortReason msg)
int UpdateDeoptimizationInfo(int pc, int trampoline, int start, int deopt_index)
V8_EXPORT_PRIVATE void Emit(Assembler *assembler, int stack_slots)
static constexpr bool IsEmbeddedObjectMode(Mode mode)
static int EmbeddedObjectModeMask()
static constexpr Tagged< Smi > FromInt(int value)
static DirectHandle< TrustedPodArray< T > > New(Isolate *isolate, int length)
IndirectHandle< BytecodeArray > object() const
static LocationOperand * cast(InstructionOperand *op)
IndirectHandle< SharedFunctionInfo > object() const
static constexpr Register virtual_accumulator()
static constexpr Register invalid_value()
@ kConstructInvokeStubFrame
@ kBuiltinContinuationFrame
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
int total_inlined_bytecode_size() const
static bool InReturnValues(interpreter::Register reg, interpreter::Register result_location, int result_size)
static constexpr DoubleRegList GetAllocatableDoubleRegisters()
MaglevCompilationInfo * compilation_info() const
static constexpr RegList GetAllocatableRegisters()
void BindJumpTarget(Label *label)
const std::vector< EagerDeoptInfo * > & eager_deopts() const
const std::vector< DeferredCodeInfo * > & deferred_code() const
std::vector< DeferredCodeInfo * > TakeDeferredCode()
const std::vector< NodeBase * > & handlers() const
const std::vector< LazyDeoptInfo * > & lazy_deopts() const
MaglevCompilationInfo * compilation_info() const
void EmitExceptionHandlerTrampolines()
V8_NODISCARD bool EmitDeopts()
GlobalHandleVector< Map > retained_maps_
IdentityMap< int, base::DefaultAllocationPolicy > protected_deopt_literals_
LocalIsolate * local_isolate_
int deopt_exit_start_offset_
V8_NODISCARD bool EmitCode()
MaglevCodeGenerator(LocalIsolate *isolate, MaglevCompilationInfo *compilation_info, Graph *graph)
void RecordInlinedFunctions()
uint16_t parameter_count() const
IdentityMap< int, base::DefaultAllocationPolicy > deopt_literals_
V8_NODISCARD bool Assemble()
IndirectHandle< DeoptimizationData > deopt_data_
FrameTranslationBuilder frame_translation_builder_
MaybeHandle< Code > BuildCodeObject(LocalIsolate *local_isolate)
GlobalHandleVector< Map > CollectRetainedMaps(DirectHandle< Code > code)
bool is_context_specialized_
int stack_slot_count_with_fixed_frame() const
Handle< DeoptimizationData > GenerateDeoptimizationData(LocalIsolate *local_isolate)
int inlined_function_count_
int handler_table_offset_
GlobalHandleVector< Map > RetainedMaps(Isolate *isolate)
MaybeHandle< Code > Generate(Isolate *isolate)
MaybeIndirectHandle< Code > code_
MaglevCodeGenState code_gen_state_
MaglevSafepointTableBuilder safepoint_table_builder_
bool collect_source_positions() const
bool toplevel_is_osr() const
MaglevCompilationUnit * toplevel_compilation_unit() const
BytecodeOffset toplevel_osr_offset() const
compiler::SharedFunctionInfoRef shared_function_info() const
compiler::BytecodeArrayRef bytecode() const
static constexpr Opcode opcode_of
base::ThreadedList< Phi > List
#define COMPRESS_POINTERS_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
FixedOpIndexSidetable< uint8_t > needs_decompression
SafepointTableBuilder safepoint_table_builder_
std::vector< intptr_t > object_ids_
std::array< ValueNode *, RegisterT::kNumRegisters > materializing_register_moves_
static constexpr RegList kAllocatableRegisters
std::unordered_map< int32_t, GapMoveTargets > moves_from_stack_slot_
IdentityMap< int, base::DefaultAllocationPolicy > * deopt_literals_
std::array< GapMoveTargets, RegisterT::kNumRegisters > moves_from_register_
LocalIsolate * local_isolate_
IdentityMap< int, base::DefaultAllocationPolicy > * protected_deopt_literals_
FrameTranslationBuilder * translation_array_builder_
static constexpr auto kAllocatableRegistersT
base::SmallVector< int32_t, 1 > stack_slots
RegListBase< RegisterT > registers
bool scratch_has_cycle_start_
static const int kNotDuplicated
MaglevAssembler *const masm_
std::vector< std::pair< int32_t, ValueNode * > > materializing_stack_slot_moves_
void PushAll(BaselineAssembler *basm, Args... args)
FloatWithBits< 64 > Float64
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
static bool IsMaglevOsrEnabled()
constexpr bool IsConstantNode(Opcode opcode)
Register ToRegister(const compiler::InstructionOperand &operand)
base::PointerWithPayload< void, RegisterStateFlags, 2 > RegisterState
static bool IsMaglevEnabled()
constexpr bool IsZeroExtendedRepresentation(ValueRepresentation repr)
bool LoadMergeState(RegisterState state, RegisterMerge **merge)
constexpr bool IsBranchControlNode(Opcode opcode)
auto ToRegisterT(const compiler::InstructionOperand &operand)
constexpr bool IsUnconditionalControlNode(Opcode opcode)
NodeTMixin< Node, Derived > NodeT
MemOperand GetStackSlot(int offset)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
RegListBase< DoubleRegister > DoubleRegList
constexpr NullMaybeHandleType kNullMaybeHandle
constexpr JSDispatchHandle kInvalidDispatchHandle(0xffffffff<< kJSDispatchHandleShift)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Tagged(T object) -> Tagged< T >
constexpr int kSystemPointerSize
constexpr Register kReturnRegister0
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsDeoptimizationWithoutCodeInvalidation(DeoptimizeReason reason)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define V8_NO_UNIQUE_ADDRESS