v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
6#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7
8#include <map>
9#include <optional>
10
24
25#if V8_ENABLE_WEBASSEMBLY
27#endif // V8_ENABLE_WEBASSEMBLY
28
29namespace v8 {
30namespace internal {
31
32class TickCounter;
33
34namespace compiler {
35
36// Forward declarations.
37class BasicBlock;
38struct CallBufferT; // TODO(bmeurer): Remove this.
39class InstructionSelectorT;
40class Linkage;
41class OperandGeneratorT;
42class SwitchInfoT;
43struct CaseInfoT;
44class TurbofanStateObjectDeduplicator;
45class TurboshaftStateObjectDeduplicator;
46
48 public:
49 enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
50 enum EnableScheduling { kDisableScheduling, kEnableScheduling };
53 kEnableRootsRelativeAddressing
54 };
57 kEnableSwitchJumpTable
58 };
59 enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
60
61 class Features final {
62 public:
63 Features() : bits_(0) {}
64 explicit Features(unsigned bits) : bits_(bits) {}
65 explicit Features(CpuFeature f) : bits_(1u << f) {}
66 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
67
68 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
69
70 private:
71 unsigned bits_;
72 };
73
74 static InstructionSelector ForTurboshaft(
75 Zone* zone, size_t node_count, Linkage* linkage,
77 EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
78 JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
79 size_t* max_pushed_argument_count,
80 SourcePositionMode source_position_mode = kCallSourcePositions,
81 Features features = SupportedFeatures(),
82 EnableScheduling enable_scheduling = v8_flags.turbo_instruction_scheduling
83 ? kEnableScheduling
84 : kDisableScheduling,
85 EnableRootsRelativeAddressing enable_roots_relative_addressing =
86 kDisableRootsRelativeAddressing,
87 EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
88
90
91 std::optional<BailoutReason> SelectInstructions();
92
93 bool IsSupported(CpuFeature feature) const;
94
95 // Returns the features supported on the target platform.
97 return Features(CpuFeatures::SupportedFeatures());
98 }
99
100 const ZoneVector<std::pair<int, int>>& instr_origins() const;
101 const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
102
103 static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
104 static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
105
106 private:
107 InstructionSelector(std::nullptr_t, InstructionSelectorT* turboshaft_impl);
111};
112
113// The flags continuation is a way to combine a branch or a materialization
114// of a boolean value with an instruction that sets the flags register.
115// The whole instruction is treated as a unit by the register allocator, and
116// thus no spills or moves can be introduced between the flags-setting
117// instruction and the branch or set it should be combined with.
119 public:
127 // This limit covered almost all the opportunities when compiling the debug
128 // builtins.
129 static constexpr size_t kMaxCompareChainSize = 4;
130 using compare_chain_t = std::array<ConditionalCompare, kMaxCompareChainSize>;
131
133
134 // Creates a new flags continuation from the given condition and true/false
135 // blocks.
142
143 // Creates a new flags continuation from the given conditional compare chain
144 // and true/false blocks.
152
153 // Creates a new flags continuation for an eager deoptimization exit.
165 // test-instruction-scheduler.cc passes a dummy Node* as frame_state.
166 // Contents don't matter as long as it's not nullptr.
168 feedback, frame_state);
169 }
170
171 // Creates a new flags continuation for a boolean value.
176
177 // Creates a new flags continuation for a conditional boolean value.
185
186 // Creates a new flags continuation for a wasm trap.
190
197
198 bool IsNone() const { return mode_ == kFlags_none; }
199 bool IsBranch() const { return mode_ == kFlags_branch; }
200 bool IsConditionalBranch() const {
202 }
203 bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
204 bool IsSet() const { return mode_ == kFlags_set; }
206 bool IsTrap() const { return mode_ == kFlags_trap; }
207 bool IsSelect() const { return mode_ == kFlags_select; }
209 DCHECK(!IsNone());
210 return condition_;
211 }
218 return reason_;
219 }
220 uint32_t node_id() const {
222 return node_id_;
223 }
224 FeedbackSource const& feedback() const {
226 return feedback_;
227 }
236 TrapId trap_id() const {
237 DCHECK(IsTrap());
238 return trap_id_;
239 }
249 DCHECK(IsSelect());
250 return true_value_;
251 }
253 DCHECK(IsSelect());
254 return false_value_;
255 }
256 const compare_chain_t& compares() const {
258 return compares_;
259 }
264
270
276
281
289
292 switch (condition_) {
293 case kSignedLessThan:
295 break;
298 break;
301 break;
304 break;
305 default:
306 break;
307 }
308 }
309
310 // Encodes this flags continuation into the given opcode.
312 opcode |= FlagsModeField::encode(mode_);
313 if (mode_ != kFlags_none) {
315 }
316 return opcode;
317 }
318
319 private:
331
347
361
368
380
383
396
399 FlagsCondition final_condition_; // Only valid if mode_ ==
400 // kFlags_conditional_set.
401 uint32_t num_conditional_compares_; // Only valid if mode_ ==
402 // kFlags_conditional_set.
403 compare_chain_t compares_; // Only valid if mode_ == kFlags_conditional_set.
404 DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
405 uint32_t node_id_; // Only valid if mode_ == kFlags_deoptimize*
406 FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize*
408 frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
409 // or mode_ == kFlags_set.
410 turboshaft::Block* true_block_; // Only valid if mode_ == kFlags_branch*.
411 turboshaft::Block* false_block_; // Only valid if mode_ == kFlags_branch*.
412 TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
413 turboshaft::OpIndex true_value_; // Only valid if mode_ == kFlags_select.
414 turboshaft::OpIndex false_value_; // Only valid if mode_ == kFlags_select.
415};
416
417// This struct connects nodes of parameters which are going to be pushed on the
418// call stack with their parameter index in the call descriptor of the callee.
427
429
430// Instruction selection generates an InstructionSequence for a given Schedule.
432 public:
439
443
445 Zone* zone, size_t node_count, Linkage* linkage,
448 InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table,
449 TickCounter* tick_counter, JSHeapBroker* broker,
450 size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
451 InstructionSelector::SourcePositionMode source_position_mode =
453 Features features = SupportedFeatures(),
454 InstructionSelector::EnableScheduling enable_scheduling =
455 v8_flags.turbo_instruction_scheduling
459 enable_roots_relative_addressing =
463
464 // Visit code for the entire graph with the included schedule.
465 std::optional<BailoutReason> SelectInstructions();
466
467 void StartBlock(RpoNumber rpo);
468 void EndBlock(RpoNumber rpo);
471
472 // ===========================================================================
473 // ============= Architecture-independent code emission methods. =============
474 // ===========================================================================
475
477 size_t temp_count = 0, InstructionOperand* temps = nullptr);
479 InstructionOperand a, size_t temp_count = 0,
480 InstructionOperand* temps = nullptr);
483 size_t temp_count = 0, InstructionOperand* temps = nullptr);
486 InstructionOperand c, size_t temp_count = 0,
487 InstructionOperand* temps = nullptr);
491 size_t temp_count = 0, InstructionOperand* temps = nullptr);
495 InstructionOperand e, size_t temp_count = 0,
496 InstructionOperand* temps = nullptr);
501 size_t temp_count = 0, InstructionOperand* temps = nullptr);
507 size_t temp_count = 0, InstructionOperand* temps = nullptr);
508 Instruction* Emit(InstructionCode opcode, size_t output_count,
509 InstructionOperand* outputs, size_t input_count,
510 InstructionOperand* inputs, size_t temp_count = 0,
511 InstructionOperand* temps = nullptr);
513
514 // [0-3] operand instructions with no output, uses labels for true and false
515 // blocks of the continuation.
517 FlagsContinuation* cont);
520 FlagsContinuation* cont);
523 FlagsContinuation* cont);
527 FlagsContinuation* cont);
529 InstructionOperand* outputs,
530 size_t input_count,
532 FlagsContinuation* cont);
534 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
535 size_t input_count, InstructionOperand* inputs, size_t temp_count,
537
539
540 // ===========================================================================
541 // ============== Architecture-independent CPU feature methods. ==============
542 // ===========================================================================
543
544 bool IsSupported(CpuFeature feature) const {
545 return features_.Contains(feature);
546 }
547
548 // Returns the features supported on the target platform.
552
553 // ===========================================================================
554 // ============ Architecture-independent graph covering methods. =============
555 // ===========================================================================
556
557 // Used in pattern matching during code generation.
558 // Check if {node} can be covered while generating code for the current
559 // instruction. A node can be covered if the {user} of the node has the only
560 // edge, the two are in the same basic block, and there are no side-effects
561 // in-between. The last check is crucial for soundness.
562 // For pure nodes, CanCover(a,b) is checked to avoid duplicated execution:
563 // If this is not the case, code for b must still be generated for other
564 // users, and fusing is unlikely to improve performance.
565 bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const;
566
568 turboshaft::OpIndex node) const;
569
570 // Used in pattern matching during code generation.
571 // This function checks that {node} and {user} are in the same basic block,
572 // and that {user} is the only user of {node} in this basic block. This
573 // check guarantees that there are no users of {node} scheduled between
574 // {node} and {user}, and thus we can select a single instruction for both
575 // nodes, if such an instruction exists. This check can be used for example
576 // when selecting instructions for:
577 // n = Int32Add(a, b)
578 // c = Word32Compare(n, 0, cond)
579 // Branch(c, true_label, false_label)
580 // Here we can generate a flag-setting add instruction, even if the add has
581 // uses in other basic blocks, since the flag-setting add instruction will
582 // still generate the result of the addition and not just set the flags.
583 // However, if we had uses of the add in the same basic block, we could have:
584 // n = Int32Add(a, b)
585 // o = OtherOp(n, ...)
586 // c = Word32Compare(n, 0, cond)
587 // Branch(c, true_label, false_label)
588 // where we cannot select the add and the compare together. If we were to
589 // select a flag-setting add instruction for Word32Compare and Int32Add while
590 // visiting Word32Compare, we would then have to select an instruction for
591 // OtherOp *afterwards*, which means we would attempt to use the result of
592 // the add before we have defined it.
594 turboshaft::OpIndex node) const;
595
596 // Checks if {node} was already defined, and therefore code was already
597 // generated for it.
598 bool IsDefined(turboshaft::OpIndex node) const;
599
600 // Checks if {node} has any uses, and therefore code has to be generated for
601 // it. Always returns {true} if the node has effect IsRequiredWhenUnused.
602 bool IsUsed(turboshaft::OpIndex node) const;
603 // Checks if {node} has any uses, and therefore code has to be generated for
604 // it. Ignores the IsRequiredWhenUnused effect.
605 bool IsReallyUsed(turboshaft::OpIndex node) const;
606
607 // Checks if {node} is currently live.
608 bool IsLive(turboshaft::OpIndex node) const {
609 return !IsDefined(node) && IsUsed(node);
610 }
611 // Checks if {node} is currently live, ignoring the IsRequiredWhenUnused
612 // effect.
614 return !IsDefined(node) && IsReallyUsed(node);
615 }
616
617 // Gets the effect level of {node}.
618 int GetEffectLevel(turboshaft::OpIndex node) const;
619
620 // Gets the effect level of {node}, appropriately adjusted based on
621 // continuation flags if the node is a branch.
623
625 const std::map<uint32_t, int> GetVirtualRegistersForTesting() const;
626
627 // Check if we can generate loads and stores of ExternalConstants relative
628 // to the roots register.
630 const ExternalReference& reference) const;
631 // Check if we can use the roots register to access GC roots.
632 bool CanUseRootsRegister() const;
633
634 Isolate* isolate() const { return sequence()->isolate(); }
635
639
641 size_t projection_index);
642 template <typename Op>
644 const Op& op = Cast<Op>(node);
645 return InputsImpl(op, std::make_index_sequence<Op::input_count>());
646 }
647 template <typename Op, std::size_t... Is>
648 auto InputsImpl(const Op& op, std::index_sequence<Is...>) {
649 return std::make_tuple(op.input(Is)...);
650 }
651
652 // When we want to do branch-if-overflow fusion, we need to be mindful of the
653 // 1st projection of the OverflowBinop:
654 // - If it has no uses, all good, we can do the fusion.
655 // - If it has any uses, then they must all be already defined: doing the
656 // fusion will lead to emitting the 1st projection, and any non-defined
657 // operation is earlier in the graph by construction, which means that it
658 // won't be able to use the 1st projection that will now be defined later.
660
661 // Records that this ProtectedLoad node can be deleted if not used, even
662 // though it has a required_when_unused effect.
664 DCHECK(this->IsProtectedLoad(node));
665 protected_loads_to_remove_->Add(node.id());
666 }
667
668 // Records that this node embeds a ProtectedLoad as operand, and so it is
669 // itself a "protected" instruction, for which we'll need to record the source
670 // position.
674
676 bool IsCommutative(turboshaft::OpIndex node) const;
678 return schedule_->PreviousIndex(block->end());
679 }
680
681 private:
683
688
690 DeoptimizeReason reason, uint32_t node_id,
691 FeedbackSource const& feedback,
692 turboshaft::OpIndex frame_state,
694
695 void EmitTableSwitch(const SwitchInfo& sw,
696 InstructionOperand const& index_operand);
697 void EmitBinarySearchSwitch(const SwitchInfo& sw,
698 InstructionOperand const& value_operand);
699
701 int GetRename(int virtual_register);
703 void UpdateRenames(Instruction* instruction);
705
706 // Inform the instruction selection that {node} was just defined.
708
709 // Inform the instruction selection that {node} has at least one use and we
710 // will need to generate code for it.
712
713 // Sets the effect level of {node}.
714 void SetEffectLevel(turboshaft::OpIndex node, int effect_level);
715
716 // Inform the register allocation of the representation of the value produced
717 // by {node}.
748
749 // Inform the register allocation of the representation of the unallocated
750 // operand {op}.
752 const InstructionOperand& op);
753
761
762 // Initialize the call buffer with the InstructionOperands, nodes, etc,
763 // corresponding
764 // to the inputs and outputs of the call.
765 // {call_code_immediate} to generate immediate operands to calls of code.
766 // {call_address_immediate} to generate immediate operands to address calls.
769 turboshaft::OptionalOpIndex frame_state_opt,
771 int return_count, int stack_slot_delta = 0);
773
775
781 StateObjectDeduplicator* deduplicator,
787 StateObjectDeduplicator* deduplicator,
793 StateObjectDeduplicator* deduplicator,
795 MachineType type,
797
798 // ===========================================================================
799 // ============= Architecture-specific graph covering methods. ===============
800 // ===========================================================================
801
802 // Visit nodes in the given block and generate code.
803 void VisitBlock(const turboshaft::Block* block);
804
805 // Visit the node for the control flow at the end of the block, generating
806 // code if necessary.
807 void VisitControl(const turboshaft::Block* block);
808
809 // Visit the node and generate code, if any.
811
812 // Visit the node and generate code for IEEE 754 functions.
815
816#define DECLARE_GENERATOR_T(x) void Visit##x(turboshaft::OpIndex node);
817 DECLARE_GENERATOR_T(Word32And)
818 DECLARE_GENERATOR_T(Word32Xor)
819 DECLARE_GENERATOR_T(Int32Add)
820 DECLARE_GENERATOR_T(Int32Sub)
821 DECLARE_GENERATOR_T(Int32Mul)
822 DECLARE_GENERATOR_T(Int32MulHigh)
823 DECLARE_GENERATOR_T(Int32Div)
824 DECLARE_GENERATOR_T(Int32Mod)
825 DECLARE_GENERATOR_T(Uint32Div)
826 DECLARE_GENERATOR_T(Uint32Mod)
827 DECLARE_GENERATOR_T(Uint32MulHigh)
828 DECLARE_GENERATOR_T(Word32Or)
829 DECLARE_GENERATOR_T(Word32Sar)
830 DECLARE_GENERATOR_T(Word32Shl)
831 DECLARE_GENERATOR_T(Word32Shr)
832 DECLARE_GENERATOR_T(Word32Rol)
833 DECLARE_GENERATOR_T(Word32Ror)
834 DECLARE_GENERATOR_T(Word64Shl)
835 DECLARE_GENERATOR_T(Word64Sar)
836 DECLARE_GENERATOR_T(Word64Shr)
837 DECLARE_GENERATOR_T(Word64Rol)
838 DECLARE_GENERATOR_T(Word64Ror)
839 DECLARE_GENERATOR_T(Int32AddWithOverflow)
840 DECLARE_GENERATOR_T(Int32MulWithOverflow)
841 DECLARE_GENERATOR_T(Int32SubWithOverflow)
842 DECLARE_GENERATOR_T(Int64AddWithOverflow)
843 DECLARE_GENERATOR_T(Int64SubWithOverflow)
844 DECLARE_GENERATOR_T(Int64MulWithOverflow)
845 DECLARE_GENERATOR_T(Int64Add)
846 DECLARE_GENERATOR_T(Word64And)
847 DECLARE_GENERATOR_T(Word64Or)
848 DECLARE_GENERATOR_T(Word64Xor)
849 DECLARE_GENERATOR_T(Int64Sub)
850 DECLARE_GENERATOR_T(Int64Mul)
851 DECLARE_GENERATOR_T(Int64MulHigh)
852 DECLARE_GENERATOR_T(Int64Div)
853 DECLARE_GENERATOR_T(Int64Mod)
854 DECLARE_GENERATOR_T(Uint64Div)
855 DECLARE_GENERATOR_T(Uint64Mod)
856 DECLARE_GENERATOR_T(Uint64MulHigh)
857 DECLARE_GENERATOR_T(Word32AtomicStore)
858 DECLARE_GENERATOR_T(Word64AtomicStore)
859 DECLARE_GENERATOR_T(Word32Equal)
860 DECLARE_GENERATOR_T(Word64Equal)
861 DECLARE_GENERATOR_T(Int32LessThan)
862 DECLARE_GENERATOR_T(Int32LessThanOrEqual)
863 DECLARE_GENERATOR_T(Int64LessThan)
864 DECLARE_GENERATOR_T(Int64LessThanOrEqual)
865 DECLARE_GENERATOR_T(Uint32LessThan)
866 DECLARE_GENERATOR_T(Uint32LessThanOrEqual)
867 DECLARE_GENERATOR_T(Uint64LessThan)
868 DECLARE_GENERATOR_T(Uint64LessThanOrEqual)
869 DECLARE_GENERATOR_T(Float64Sub)
870 DECLARE_GENERATOR_T(Float64Div)
871 DECLARE_GENERATOR_T(Float32Equal)
872 DECLARE_GENERATOR_T(Float32LessThan)
873 DECLARE_GENERATOR_T(Float32LessThanOrEqual)
874 DECLARE_GENERATOR_T(Float64Equal)
875 DECLARE_GENERATOR_T(Float64LessThan)
876 DECLARE_GENERATOR_T(Float64LessThanOrEqual)
878 DECLARE_GENERATOR_T(StackPointerGreaterThan)
880 DECLARE_GENERATOR_T(ProtectedStore)
881 DECLARE_GENERATOR_T(BitcastTaggedToWord)
882 DECLARE_GENERATOR_T(BitcastWordToTagged)
883 DECLARE_GENERATOR_T(BitcastSmiToWord)
884 DECLARE_GENERATOR_T(ChangeInt32ToInt64)
885 DECLARE_GENERATOR_T(ChangeInt32ToFloat64)
886 DECLARE_GENERATOR_T(ChangeFloat32ToFloat64)
887 DECLARE_GENERATOR_T(RoundFloat64ToInt32)
888 DECLARE_GENERATOR_T(TruncateFloat64ToWord32)
889 DECLARE_GENERATOR_T(TruncateFloat64ToFloat32)
890 DECLARE_GENERATOR_T(TruncateFloat64ToFloat16RawBits)
891 DECLARE_GENERATOR_T(TruncateFloat32ToInt32)
892 DECLARE_GENERATOR_T(TruncateFloat32ToUint32)
893 DECLARE_GENERATOR_T(ChangeFloat16RawBitsToFloat64)
894 DECLARE_GENERATOR_T(ChangeFloat64ToInt32)
895 DECLARE_GENERATOR_T(ChangeFloat64ToUint32)
896 DECLARE_GENERATOR_T(ChangeFloat64ToInt64)
897 DECLARE_GENERATOR_T(ChangeFloat64ToUint64)
898 DECLARE_GENERATOR_T(TruncateFloat64ToInt64)
899 DECLARE_GENERATOR_T(RoundInt32ToFloat32)
900 DECLARE_GENERATOR_T(RoundInt64ToFloat32)
901 DECLARE_GENERATOR_T(RoundInt64ToFloat64)
902 DECLARE_GENERATOR_T(RoundUint32ToFloat32)
903 DECLARE_GENERATOR_T(RoundUint64ToFloat32)
904 DECLARE_GENERATOR_T(RoundUint64ToFloat64)
905 DECLARE_GENERATOR_T(ChangeInt64ToFloat64)
906 DECLARE_GENERATOR_T(ChangeUint32ToFloat64)
907 DECLARE_GENERATOR_T(ChangeUint32ToUint64)
908 DECLARE_GENERATOR_T(Float64ExtractLowWord32)
909 DECLARE_GENERATOR_T(Float64ExtractHighWord32)
910 DECLARE_GENERATOR_T(Float32Add)
911 DECLARE_GENERATOR_T(Float32Sub)
912 DECLARE_GENERATOR_T(Float32Mul)
913 DECLARE_GENERATOR_T(Float32Div)
914 DECLARE_GENERATOR_T(Float32Max)
915 DECLARE_GENERATOR_T(Float32Min)
916 DECLARE_GENERATOR_T(Float64Atan2)
917 DECLARE_GENERATOR_T(Float64Max)
918 DECLARE_GENERATOR_T(Float64Min)
920 DECLARE_GENERATOR_T(Float64Mul)
921 DECLARE_GENERATOR_T(Float64Mod)
922 DECLARE_GENERATOR_T(Float64Pow)
923 DECLARE_GENERATOR_T(BitcastWord32ToWord64)
924 DECLARE_GENERATOR_T(BitcastFloat32ToInt32)
925 DECLARE_GENERATOR_T(BitcastFloat64ToInt64)
926 DECLARE_GENERATOR_T(BitcastInt32ToFloat32)
927 DECLARE_GENERATOR_T(BitcastInt64ToFloat64)
928 DECLARE_GENERATOR_T(Float32Abs)
929 DECLARE_GENERATOR_T(Float32Neg)
930 DECLARE_GENERATOR_T(Float32RoundDown)
931 DECLARE_GENERATOR_T(Float32RoundTiesEven)
932 DECLARE_GENERATOR_T(Float32RoundTruncate)
933 DECLARE_GENERATOR_T(Float32RoundUp)
934 DECLARE_GENERATOR_T(Float32Sqrt)
935 DECLARE_GENERATOR_T(Float64Abs)
936 DECLARE_GENERATOR_T(Float64Acos)
937 DECLARE_GENERATOR_T(Float64Acosh)
938 DECLARE_GENERATOR_T(Float64Asin)
939 DECLARE_GENERATOR_T(Float64Asinh)
940 DECLARE_GENERATOR_T(Float64Atan)
941 DECLARE_GENERATOR_T(Float64Atanh)
942 DECLARE_GENERATOR_T(Float64Cbrt)
943 DECLARE_GENERATOR_T(Float64Cos)
944 DECLARE_GENERATOR_T(Float64Cosh)
945 DECLARE_GENERATOR_T(Float64Exp)
946 DECLARE_GENERATOR_T(Float64Expm1)
947 DECLARE_GENERATOR_T(Float64Log)
948 DECLARE_GENERATOR_T(Float64Log1p)
949 DECLARE_GENERATOR_T(Float64Log10)
950 DECLARE_GENERATOR_T(Float64Log2)
951 DECLARE_GENERATOR_T(Float64Neg)
952 DECLARE_GENERATOR_T(Float64RoundDown)
953 DECLARE_GENERATOR_T(Float64RoundTiesAway)
954 DECLARE_GENERATOR_T(Float64RoundTiesEven)
955 DECLARE_GENERATOR_T(Float64RoundTruncate)
956 DECLARE_GENERATOR_T(Float64RoundUp)
957 DECLARE_GENERATOR_T(Float64Sin)
958 DECLARE_GENERATOR_T(Float64Sinh)
959 DECLARE_GENERATOR_T(Float64Sqrt)
960 DECLARE_GENERATOR_T(Float64Tan)
961 DECLARE_GENERATOR_T(Float64Tanh)
962 DECLARE_GENERATOR_T(Float64SilenceNaN)
963 DECLARE_GENERATOR_T(Word32Clz)
964 DECLARE_GENERATOR_T(Word32Ctz)
965 DECLARE_GENERATOR_T(Word32ReverseBytes)
966 DECLARE_GENERATOR_T(Word32Popcnt)
967 DECLARE_GENERATOR_T(Word64Popcnt)
968 DECLARE_GENERATOR_T(Word64Clz)
969 DECLARE_GENERATOR_T(Word64Ctz)
970 DECLARE_GENERATOR_T(Word64ReverseBytes)
971 DECLARE_GENERATOR_T(SignExtendWord8ToInt32)
972 DECLARE_GENERATOR_T(SignExtendWord16ToInt32)
973 DECLARE_GENERATOR_T(SignExtendWord8ToInt64)
974 DECLARE_GENERATOR_T(SignExtendWord16ToInt64)
975 DECLARE_GENERATOR_T(TruncateInt64ToInt32)
976 DECLARE_GENERATOR_T(StackSlot)
977 DECLARE_GENERATOR_T(LoadRootRegister)
978 DECLARE_GENERATOR_T(DebugBreak)
979 DECLARE_GENERATOR_T(TryTruncateFloat32ToInt64)
980 DECLARE_GENERATOR_T(TryTruncateFloat64ToInt64)
981 DECLARE_GENERATOR_T(TryTruncateFloat32ToUint64)
982 DECLARE_GENERATOR_T(TryTruncateFloat64ToUint64)
983 DECLARE_GENERATOR_T(TryTruncateFloat64ToInt32)
984 DECLARE_GENERATOR_T(TryTruncateFloat64ToUint32)
985 DECLARE_GENERATOR_T(Int32PairAdd)
986 DECLARE_GENERATOR_T(Int32PairSub)
987 DECLARE_GENERATOR_T(Int32PairMul)
988 DECLARE_GENERATOR_T(Word32PairShl)
989 DECLARE_GENERATOR_T(Word32PairShr)
990 DECLARE_GENERATOR_T(Word32PairSar)
991 DECLARE_GENERATOR_T(Float64InsertLowWord32)
992 DECLARE_GENERATOR_T(Float64InsertHighWord32)
993 DECLARE_GENERATOR_T(Comment)
994 DECLARE_GENERATOR_T(Word32ReverseBits)
995 DECLARE_GENERATOR_T(Word64ReverseBits)
996 DECLARE_GENERATOR_T(AbortCSADcheck)
997 DECLARE_GENERATOR_T(StorePair)
998 DECLARE_GENERATOR_T(UnalignedLoad)
999 DECLARE_GENERATOR_T(UnalignedStore)
1000 DECLARE_GENERATOR_T(Int32AbsWithOverflow)
1001 DECLARE_GENERATOR_T(Int64AbsWithOverflow)
1002 DECLARE_GENERATOR_T(TruncateFloat64ToUint32)
1003 DECLARE_GENERATOR_T(SignExtendWord32ToInt64)
1004 DECLARE_GENERATOR_T(TraceInstruction)
1005 DECLARE_GENERATOR_T(MemoryBarrier)
1006 DECLARE_GENERATOR_T(LoadStackCheckOffset)
1007 DECLARE_GENERATOR_T(LoadFramePointer)
1008 DECLARE_GENERATOR_T(LoadParentFramePointer)
1009 DECLARE_GENERATOR_T(ProtectedLoad)
1010 DECLARE_GENERATOR_T(Word32AtomicAdd)
1011 DECLARE_GENERATOR_T(Word32AtomicSub)
1012 DECLARE_GENERATOR_T(Word32AtomicAnd)
1013 DECLARE_GENERATOR_T(Word32AtomicOr)
1014 DECLARE_GENERATOR_T(Word32AtomicXor)
1015 DECLARE_GENERATOR_T(Word32AtomicExchange)
1016 DECLARE_GENERATOR_T(Word32AtomicCompareExchange)
1017 DECLARE_GENERATOR_T(Word64AtomicAdd)
1018 DECLARE_GENERATOR_T(Word64AtomicSub)
1019 DECLARE_GENERATOR_T(Word64AtomicAnd)
1020 DECLARE_GENERATOR_T(Word64AtomicOr)
1021 DECLARE_GENERATOR_T(Word64AtomicXor)
1022 DECLARE_GENERATOR_T(Word64AtomicExchange)
1023 DECLARE_GENERATOR_T(Word64AtomicCompareExchange)
1024 DECLARE_GENERATOR_T(Word32AtomicLoad)
1025 DECLARE_GENERATOR_T(Word64AtomicLoad)
1026 DECLARE_GENERATOR_T(Word32AtomicPairLoad)
1027 DECLARE_GENERATOR_T(Word32AtomicPairStore)
1028 DECLARE_GENERATOR_T(Word32AtomicPairAdd)
1029 DECLARE_GENERATOR_T(Word32AtomicPairSub)
1030 DECLARE_GENERATOR_T(Word32AtomicPairAnd)
1031 DECLARE_GENERATOR_T(Word32AtomicPairOr)
1032 DECLARE_GENERATOR_T(Word32AtomicPairXor)
1033 DECLARE_GENERATOR_T(Word32AtomicPairExchange)
1034 DECLARE_GENERATOR_T(Word32AtomicPairCompareExchange)
1035 DECLARE_GENERATOR_T(Simd128ReverseBytes)
1038 IF_WASM(DECLARE_GENERATOR_T, LoadStackPointer)
1039 IF_WASM(DECLARE_GENERATOR_T, SetStackPointer)
1040#undef DECLARE_GENERATOR_T
1041
1042 // Visit the load node with a value and opcode to replace with.
1045 void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode);
1050 void VisitPhi(turboshaft::OpIndex node);
1053 void VisitCall(turboshaft::OpIndex call, turboshaft::Block* handler = {});
1058 void VisitGoto(turboshaft::Block* target);
1060 turboshaft::Block* fbranch);
1061 void VisitSwitch(turboshaft::OpIndex node, const SwitchInfo& sw);
1062 void VisitDeoptimize(DeoptimizeReason reason, uint32_t node_id,
1063 FeedbackSource const& feedback,
1064 turboshaft::OpIndex frame_state);
1067 void VisitThrow(Node* node);
1073
1075
1077 FlagsContinuation* cont);
1078
1080 FlagsContinuation* cont);
1081
1083 const CallDescriptor* call_descriptor,
1084 turboshaft::OpIndex node);
1086 const CallDescriptor* call_descriptor,
1087 turboshaft::OpIndex node);
1088
1089 // In LOONG64, calling convention uses free GP param register to pass
1090 // floating-point arguments when no FP param register is available. But
1091 // gap does not support moving from FPR to GPR, so we add EmitMoveFPRToParam
1092 // to complete movement.
1094 // Moving floating-point param from GP param register to FPR to participate in
1095 // subsequent operations, whether CallCFunction or normal floating-point
1096 // operations.
1097 void EmitMoveParamToFPR(turboshaft::OpIndex node, int index);
1098
1099 bool CanProduceSignalingNaN(Node* node);
1100
1101 void AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index,
1102 turboshaft::OpIndex node);
1103
1105 FlagsContinuation* cont);
1106
1107 // ===========================================================================
1108 // ============= Vector instruction (SIMD) helper fns. =======================
1109 // ===========================================================================
1111
1112#if V8_ENABLE_WEBASSEMBLY
1113 // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
1114 // indices, and a boolean indicating if the shuffle is a swizzle (one input).
1115 template <const int simd_size = kSimd128Size>
1116 void CanonicalizeShuffle(TurboshaftAdapter::SimdShuffleView& view,
1117 uint8_t* shuffle, bool* is_swizzle)
1118 requires(simd_size == kSimd128Size || simd_size == kSimd256Size)
1119 {
1120 // Get raw shuffle indices.
1121 if constexpr (simd_size == kSimd128Size) {
1122 DCHECK(view.isSimd128());
1123 memcpy(shuffle, view.data(), kSimd128Size);
1124 } else if constexpr (simd_size == kSimd256Size) {
1125 DCHECK(!view.isSimd128());
1126 memcpy(shuffle, view.data(), kSimd256Size);
1127 } else {
1128 UNREACHABLE();
1129 }
1130 bool needs_swap;
1131 bool inputs_equal =
1132 GetVirtualRegister(view.input(0)) == GetVirtualRegister(view.input(1));
1134 &needs_swap, is_swizzle);
1135 if (needs_swap) {
1136 SwapShuffleInputs(view);
1137 }
1138 // Duplicate the first input; for some shuffles on some architectures, it's
1139 // easiest to implement a swizzle as a shuffle so it might be used.
1140 if (*is_swizzle) {
1141 view.DuplicateFirstInput();
1142 }
1143 }
1144
1145 // Swaps the two first input operands of the node, to help match shuffles
1146 // to specific architectural instructions.
1147 void SwapShuffleInputs(TurboshaftAdapter::SimdShuffleView& node);
1148
1149#if V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
1150 void VisitSimd128LoadPairDeinterleave(turboshaft::OpIndex node);
1151#endif // V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
1152
1153#if V8_ENABLE_WASM_SIMD256_REVEC
1154 void VisitSimd256LoadTransform(turboshaft::OpIndex node);
1155
1156#ifdef V8_TARGET_ARCH_X64
1157 void VisitSimd256Shufd(turboshaft::OpIndex node);
1158 void VisitSimd256Shufps(turboshaft::OpIndex node);
1159 void VisitSimd256Unpack(turboshaft::OpIndex node);
1160 void VisitSimdPack128To256(turboshaft::OpIndex node);
1161#endif // V8_TARGET_ARCH_X64
1162#endif // V8_ENABLE_WASM_SIMD256_REVEC
1163
1164#ifdef V8_TARGET_ARCH_X64
1165 bool CanOptimizeF64x2PromoteLowF32x4(turboshaft::OpIndex node);
1166#endif
1167
1168#endif // V8_ENABLE_WEBASSEMBLY
1169
1170 // ===========================================================================
1171
1173 Linkage* linkage() const { return linkage_; }
1180 Zone* instruction_zone() const { return sequence()->zone(); }
1181 Zone* zone() const { return zone_; }
1182
1187
1189 const turboshaft::ComparisonOp& op) const;
1190
1194 ArchOpcode int8_op, ArchOpcode uint8_op,
1195 ArchOpcode int16_op,
1196 ArchOpcode uint16_op,
1197 ArchOpcode word32_op);
1199 ArchOpcode uint8_op,
1200 ArchOpcode uint16_op,
1201 ArchOpcode uint32_op,
1202 ArchOpcode uint64_op);
1204 ArchOpcode uint16_op, ArchOpcode uint32_op);
1205
1206#if V8_TARGET_ARCH_64_BIT
1207 bool ZeroExtendsWord32ToWord64(turboshaft::OpIndex node,
1208 int recursion_depth = 0);
1209 void MarkNodeAsNotZeroExtended(turboshaft::OpIndex node);
1210 bool ZeroExtendsWord32ToWord64NoPhis(turboshaft::OpIndex node);
1211
1212 enum class Upper32BitsState : uint8_t {
1213 kNotYetChecked,
1214 kZero,
1215 kMayBeNonZero,
1216 };
1217#endif // V8_TARGET_ARCH_64_BIT
1218
1222
1225
1226 struct Hash {
1227 size_t operator()(FrameStateInput const& source) const {
1228 return base::hash_combine(source.node,
1229 static_cast<size_t>(source.kind));
1230 }
1231 };
1232
1233 struct Equal {
1235 FrameStateInput const& rhs) const {
1236 return lhs.node == rhs.node && lhs.kind == rhs.kind;
1237 }
1238 };
1239 };
1240
1241 struct CachedStateValues;
1242 class CachedStateValuesBuilder;
1243
1244 // ===========================================================================
1245
1246 Zone* const zone_;
1270 typename FrameStateInput::Hash,
1271 typename FrameStateInput::Equal>
1273
1279 // The broker is only used for unparking the LocalHeap for diagnostic printing
1280 // for failed StaticAsserts.
1282
1283 // Store the maximal unoptimized frame height and an maximal number of pushed
1284 // arguments (for calls). Later used to apply an offset to stack checks.
1287
1288 // Turboshaft-adapter only.
1289 std::optional<turboshaft::UseMap> turboshaft_use_map_;
1290 std::optional<BitVector> protected_loads_to_remove_;
1291 std::optional<BitVector> additional_protected_instructions_;
1292
1293#if V8_TARGET_ARCH_64_BIT
1294 size_t node_count_;
1295
1296 // Holds lazily-computed results for whether phi nodes guarantee their upper
1297 // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
1298 // for non-phi nodes.
1299 ZoneVector<Upper32BitsState> phi_states_;
1300#endif
1301};
1302
1303} // namespace compiler
1304} // namespace internal
1305} // namespace v8
1306
1307#endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
Schedule * schedule
Builtins::Kind kind
Definition builtins.cc:40
static constexpr U encode(T value)
Definition bit-field.h:55
static unsigned SupportedFeatures()
static LinkageLocation ForAnyRegister(MachineType type=MachineType::None())
static FlagsContinuationT ForDeoptimize(FlagsCondition condition, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
static FlagsContinuationT ForConditionalSet(compare_chain_t &compares, uint32_t num_conditional_compares, FlagsCondition set_condition, turboshaft::OpIndex result)
FlagsContinuationT(FlagsCondition condition, TrapId trap_id)
static FlagsContinuationT ForConditionalBranch(compare_chain_t &compares, uint32_t num_conditional_compares, FlagsCondition branch_condition, turboshaft::Block *true_block, turboshaft::Block *false_block)
static FlagsContinuationT ForSelect(FlagsCondition condition, turboshaft::OpIndex result, turboshaft::OpIndex true_value, turboshaft::OpIndex false_value)
FlagsContinuationT(FlagsCondition condition, turboshaft::OpIndex result, turboshaft::OpIndex true_value, turboshaft::OpIndex false_value)
void OverwriteAndNegateIfEqual(FlagsCondition condition)
FlagsContinuationT(FlagsMode mode, FlagsCondition condition, turboshaft::Block *true_block, turboshaft::Block *false_block)
InstructionCode Encode(InstructionCode opcode)
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
FlagsContinuationT(compare_chain_t &compares, uint32_t num_conditional_compares, FlagsCondition set_condition, turboshaft::OpIndex result)
FlagsContinuationT(FlagsMode mode, FlagsCondition condition, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
static FlagsContinuationT ForDeoptimizeForTesting(FlagsCondition condition, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
FlagsContinuationT(compare_chain_t &compares, uint32_t num_conditional_compares, FlagsCondition branch_condition, turboshaft::Block *true_block, turboshaft::Block *false_block)
FlagsContinuationT(FlagsCondition condition, turboshaft::OpIndex result)
std::array< ConditionalCompare, kMaxCompareChainSize > compare_chain_t
static FlagsContinuationT ForBranch(FlagsCondition condition, turboshaft::Block *true_block, turboshaft::Block *false_block)
static FlagsContinuationT ForTrap(FlagsCondition condition, TrapId trap_id)
void VisitBitcastWord32PairToFloat64(turboshaft::OpIndex node)
bool IsUsed(turboshaft::OpIndex node) const
void SetRename(turboshaft::OpIndex node, turboshaft::OpIndex rename)
void MarkPairProjectionsAsWord32(turboshaft::OpIndex node)
size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor *descriptor, turboshaft::OpIndex state, OperandGenerator *g, StateObjectDeduplicator *deduplicator, InstructionOperandVector *inputs, FrameStateInputKind kind, Zone *zone)
const ZoneVector< std::pair< int, int > > & instr_origins() const
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
void VisitBlock(const turboshaft::Block *block)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void AddOutputToSelectContinuation(OperandGenerator *g, int first_input_index, turboshaft::OpIndex node)
bool CanDoBranchIfOverflowFusion(turboshaft::OpIndex node)
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
void VisitI8x16RelaxedSwizzle(turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
InstructionSelector::EnableRootsRelativeAddressing enable_roots_relative_addressing_
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
bool IsDefined(turboshaft::OpIndex node) const
void MarkAsRepresentation(MachineRepresentation rep, turboshaft::OpIndex node)
InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table_
ZoneVector< std::pair< int, int > > instr_origins_
FrameStateDescriptor * GetFrameStateDescriptor(turboshaft::OpIndex node)
void TryPrepareScheduleFirstProjection(turboshaft::OpIndex maybe_projection)
std::optional< BitVector > additional_protected_instructions_
bool IsReallyUsed(turboshaft::OpIndex node) const
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
turboshaft::OpIndex block_terminator(const turboshaft::Block *block) const
void MarkAsRepresentation(turboshaft::RegisterRepresentation rep, turboshaft::OpIndex node)
void VisitWord64AtomicBinaryOperation(turboshaft::OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, ArchOpcode uint64_op)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
const std::map< uint32_t, int > GetVirtualRegistersForTesting() const
auto InputsImpl(const Op &op, std::index_sequence< Is... >)
size_t AddOperandToStateValueDescriptor(StateValueList *values, InstructionOperandVector *inputs, OperandGenerator *g, StateObjectDeduplicator *deduplicator, turboshaft::OpIndex input, MachineType type, FrameStateInputKind kind, Zone *zone)
void SetProtectedLoadToRemove(turboshaft::OpIndex node)
void InitializeCallBuffer(turboshaft::OpIndex call, CallBuffer *buffer, CallBufferFlags flags, turboshaft::OpIndex callee, turboshaft::OptionalOpIndex frame_state_opt, base::Vector< const turboshaft::OpIndex > arguments, int return_count, int stack_slot_delta=0)
bool CanCoverProtectedLoad(turboshaft::OpIndex user, turboshaft::OpIndex node) const
void VisitDeoptimize(DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
void EmitTableSwitch(const SwitchInfo &sw, InstructionOperand const &index_operand)
void SetEffectLevel(turboshaft::OpIndex node, int effect_level)
void VisitControl(const turboshaft::Block *block)
size_t AddInputsToFrameStateDescriptor(StateValueList *values, InstructionOperandVector *inputs, OperandGenerator *g, StateObjectDeduplicator *deduplicator, turboshaft::OpIndex node, FrameStateInputKind kind, Zone *zone)
InstructionSelector::EnableScheduling enable_scheduling_
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
bool IsReallyLive(turboshaft::OpIndex node) const
std::optional< BailoutReason > SelectInstructions()
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
InstructionSelectorT(Zone *zone, size_t node_count, Linkage *linkage, InstructionSequence *sequence, turboshaft::Graph *schedule, source_position_table_t *source_positions, Frame *frame, InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table, TickCounter *tick_counter, JSHeapBroker *broker, size_t *max_unoptimized_frame_height, size_t *max_pushed_argument_count, InstructionSelector::SourcePositionMode source_position_mode=InstructionSelector::kCallSourcePositions, Features features=SupportedFeatures(), InstructionSelector::EnableScheduling enable_scheduling=v8_flags.turbo_instruction_scheduling ? InstructionSelector::kEnableScheduling :InstructionSelector::kDisableScheduling, InstructionSelector::EnableRootsRelativeAddressing enable_roots_relative_addressing=InstructionSelector::kDisableRootsRelativeAddressing, InstructionSelector::EnableTraceTurboJson trace_turbo=InstructionSelector::kDisableTraceTurboJson)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
void VisitCall(turboshaft::OpIndex call, turboshaft::Block *handler={})
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
std::optional< turboshaft::UseMap > turboshaft_use_map_
bool IsLive(turboshaft::OpIndex node) const
bool IsCommutative(turboshaft::OpIndex node) const
bool IsOnlyUserOfNodeInSameBlock(turboshaft::OpIndex user, turboshaft::OpIndex node) const
int GetEffectLevel(turboshaft::OpIndex node) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
InstructionSelector::SourcePositionMode const source_position_mode_
ZoneUnorderedMap< FrameStateInput, CachedStateValues *, typename FrameStateInput::Hash, typename FrameStateInput::Equal > state_values_cache_
InstructionSelector::EnableTraceTurboJson trace_turbo_
void VisitSwitch(turboshaft::OpIndex node, const SwitchInfo &sw)
void VisitBranch(turboshaft::OpIndex input, turboshaft::Block *tbranch, turboshaft::Block *fbranch)
base::Vector< const turboshaft::OpIndex > turboshaft_uses(turboshaft::OpIndex node) const
void UpdateSourcePosition(Instruction *instruction, turboshaft::OpIndex node)
void AppendDeoptimizeArguments(InstructionOperandVector *args, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state, DeoptimizeKind kind=DeoptimizeKind::kEager)
void VisitWord64AtomicNarrowBinop(Node *node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void EmitBinarySearchSwitch(const SwitchInfo &sw, InstructionOperand const &value_operand)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void VisitWord32AtomicBinaryOperation(turboshaft::OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode word32_op)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
InstructionSelector & operator=(const InstructionSelector &)=delete
InstructionSelector(const InstructionSelector &)=delete
OpIndex PreviousIndex(const OpIndex idx) const
Definition graph.h:695
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
static void CanonicalizeShuffle(bool inputs_equal, uint8_t *shuffle, bool *needs_swap, bool *is_swizzle)
const PropertyKind kind_
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
SourcePositionTable * source_positions
JSHeapBroker * broker
Linkage * linkage
#define DECLARE_GENERATOR_T(x)
Instruction * instr
V8_INLINE size_t hash_combine(size_t seed, size_t hash)
Definition hashing.h:77
TNode< Float64T > Float64Add(TNode< Float64T > a, TNode< Float64T > b)
FlagsCondition CommuteFlagsCondition(FlagsCondition condition)
FlagsCondition NegateFlagsCondition(FlagsCondition condition)
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kSimd256Size
Definition globals.h:709
V8_EXPORT_PRIVATE FlagValues v8_flags
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define MACHINE_SIMD256_OP_LIST(V)
Definition opcodes.h:1159
#define MACHINE_SIMD128_OP_LIST(V)
Definition opcodes.h:879
uint32_t recursion_depth
Node * node_
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define IF_WASM(V,...)
Definition macros.h:472
bool operator()(FrameStateInput const &lhs, FrameStateInput const &rhs) const
FrameStateInput(turboshaft::OpIndex node_, FrameStateInputKind kind_)
PushParameterT(turboshaft::OpIndex n={}, LinkageLocation l=LinkageLocation::ForAnyRegister())
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
bool IsProtectedLoad(turboshaft::OpIndex node) const