5#ifndef V8_COMPILER_BACKEND_RISCV_INSTRUCTION_SELECTOR_RISCV_H_
6#define V8_COMPILER_BACKEND_RISCV_INSTRUCTION_SELECTOR_RISCV_H_
28#define TRACE(...) PrintF(__VA_ARGS__)
30using namespace turboshaft;
51 if ((constant->IsIntegral() && constant->integral() == 0) ||
52 (constant->kind == ConstantOp::Kind::kFloat32 &&
53 constant->float32().get_bits() == 0) ||
54 (constant->kind == ConstantOp::Kind::kFloat64 &&
55 constant->float64().get_bits() == 0))
81 if (!constant)
return false;
82 if (constant->kind == ConstantOp::Kind::kCompressedHeapObject) {
109 TRACE(
"UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__);
115 RiscvOperandGeneratorT g(selector);
116 selector->Emit(opcode, g.DefineAsRegister(node),
117 g.UseRegister(selector->input_at(node, 0)));
154 const turboshaft::Simd128ReplaceLaneOp& op =
180 RiscvOperandGeneratorT g(selector);
181 selector->Emit(opcode, g.DefineSameAsFirst(node),
182 g.UseRegister(selector->input_at(node, 0)),
183 g.UseRegister(selector->input_at(node, 1)),
184 g.UseRegister(selector->input_at(node, 2)));
197 size_t* input_count_return, InstructionOperand* inputs) {
198 RiscvOperandGeneratorT g(selector);
199 if (g.CanBeImmediate(node, *opcode_return)) {
201 inputs[0] = g.UseImmediate(node);
202 *input_count_return = 1;
209template <
typename Matcher>
216 size_t input_count = 0;
218 size_t output_count = 0;
228 }
else if (has_reverse_opcode &&
230 &input_count, &inputs[1])) {
232 opcode = reverse_opcode;
236 inputs[input_count++] = g.
UseOperand(right_node, opcode);
257template <
typename Matcher>
263 reverse_opcode, &cont);
266template <
typename Matcher>
272template <
typename Matcher>
278void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
284 Emit(kArchStackSlot, g.DefineAsRegister(node),
285 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
288void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
289 RiscvOperandGeneratorT g(
this);
290 Emit(kArchAbortCSADcheck, g.NoOutput(),
291 g.UseFixed(this->input_at(node, 0), a0));
298 const Simd128LoadTransformOp& op =
299 this->
Get(node).Cast<Simd128LoadTransformOp>();
300 bool is_protected = (op.load_kind.with_trap_handler);
302 switch (op.transform_kind) {
303 case Simd128LoadTransformOp::TransformKind::k8Splat:
304 opcode = kRiscvS128LoadSplat;
310 case Simd128LoadTransformOp::TransformKind::k16Splat:
311 opcode = kRiscvS128LoadSplat;
317 case Simd128LoadTransformOp::TransformKind::k32Splat:
318 opcode = kRiscvS128LoadSplat;
324 case Simd128LoadTransformOp::TransformKind::k64Splat:
325 opcode = kRiscvS128LoadSplat;
331 case Simd128LoadTransformOp::TransformKind::k8x8S:
332 opcode = kRiscvS128Load64ExtendS;
338 case Simd128LoadTransformOp::TransformKind::k8x8U:
339 opcode = kRiscvS128Load64ExtendU;
345 case Simd128LoadTransformOp::TransformKind::k16x4S:
346 opcode = kRiscvS128Load64ExtendS;
352 case Simd128LoadTransformOp::TransformKind::k16x4U:
353 opcode = kRiscvS128Load64ExtendU;
359 case Simd128LoadTransformOp::TransformKind::k32x2S:
360 opcode = kRiscvS128Load64ExtendS;
366 case Simd128LoadTransformOp::TransformKind::k32x2U:
367 opcode = kRiscvS128Load64ExtendU;
373 case Simd128LoadTransformOp::TransformKind::k32Zero:
374 opcode = kRiscvS128Load32Zero;
380 case Simd128LoadTransformOp::TransformKind::k64Zero:
381 opcode = kRiscvS128Load64Zero;
399#ifdef V8_COMPRESS_POINTERS
400 if (opcode == kRiscvCmp32) {
403 if (right.IsImmediate()) {
476 auto left = selector->
input_at(node, 0);
477 auto right = selector->
input_at(node, 1);
481 std::swap(left, right);
485#if V8_TARGET_ARCH_RISCV64
486 if (opcode == kRiscvTst64 || opcode == kRiscvTst32) {
487#elif V8_TARGET_ARCH_RISCV32
488 if (opcode == kRiscvTst32) {
544 static const size_t kMaxTableSwitchValueRange = 2 << 16;
545 size_t table_space_cost = 10 + 2 * sw.value_range();
546 size_t table_time_cost = 3;
547 size_t lookup_space_cost = 2 + 2 * sw.case_count();
548 size_t lookup_time_cost = sw.case_count();
549 if (sw.case_count() > 0 &&
550 table_space_cost + 3 * table_time_cost <=
551 lookup_space_cost + 3 * lookup_time_cost &&
552 sw.min_value() > std::numeric_limits<int32_t>::min() &&
553 sw.value_range() <= kMaxTableSwitchValueRange) {
555 if (sw.min_value()) {
557 Emit(kRiscvSub32, index_operand, value_operand,
570 FlagsContinuationT* cont) {
571 RiscvOperandGeneratorT g(selector);
572 selector->EmitWithContinuation(kRiscvCmpZero,
573 g.UseRegisterOrImmediateZero(value), cont);
576#ifdef V8_TARGET_ARCH_RISCV64
578void EmitWord32CompareZero(InstructionSelectorT* selector,
OpIndex value,
579 FlagsContinuationT* cont) {
580 RiscvOperandGeneratorT g(selector);
583 selector->EmitWithContinuation(kRiscvCmpZero32, 0,
nullptr,
arraysize(inputs),
588void InstructionSelectorT::VisitFloat32Equal(
OpIndex node) {
593void InstructionSelectorT::VisitFloat32LessThan(
OpIndex node) {
598void InstructionSelectorT::VisitFloat32LessThanOrEqual(
OpIndex node) {
599 FlagsContinuationT cont =
604void InstructionSelectorT::VisitFloat64Equal(
OpIndex node) {
609void InstructionSelectorT::VisitFloat64LessThan(
OpIndex node) {
614void InstructionSelectorT::VisitFloat64LessThanOrEqual(
OpIndex node) {
615 FlagsContinuationT cont =
620void InstructionSelectorT::VisitFloat64ExtractLowWord32(
OpIndex node) {
621 VisitRR(
this, kRiscvFloat64ExtractLowWord32, node);
624void InstructionSelectorT::VisitFloat64ExtractHighWord32(
OpIndex node) {
625 VisitRR(
this, kRiscvFloat64ExtractHighWord32, node);
628void InstructionSelectorT::VisitFloat64SilenceNaN(
OpIndex node) {
629 VisitRR(
this, kRiscvFloat64SilenceNaN, node);
633 RiscvOperandGeneratorT g(
this);
634 const auto& bitcast =
641 Emit(kRiscvFloat64InsertHighWord32, temp, zero, g.
Use(
hi));
646void InstructionSelectorT::VisitFloat64InsertLowWord32(
OpIndex node) {
647 RiscvOperandGeneratorT g(
this);
654void InstructionSelectorT::VisitFloat64InsertHighWord32(
OpIndex node) {
655 RiscvOperandGeneratorT g(
this);
662void InstructionSelectorT::VisitMemoryBarrier(
OpIndex node) {
663 RiscvOperandGeneratorT g(
this);
670 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
672 RiscvOperandGeneratorT g(
this);
675 if (!output.location.IsCallerFrameSlot())
continue;
677 if (output.node.valid()) {
678 DCHECK(!call_descriptor->IsCFunctionCall());
686 int offset = call_descriptor->GetOffsetToReturns();
687 int reverse_slot = -output.location.GetLocation() -
offset;
697 LinkageLocation location) {}
699void InstructionSelectorT::VisitFloat32Abs(
OpIndex node) {
700 VisitRR(
this, kRiscvAbsS, node);
703void InstructionSelectorT::VisitFloat64Abs(
OpIndex node) {
704 VisitRR(
this, kRiscvAbsD, node);
707void InstructionSelectorT::VisitFloat32Sqrt(
OpIndex node) {
708 VisitRR(
this, kRiscvSqrtS, node);
711void InstructionSelectorT::VisitFloat64Sqrt(
OpIndex node) {
712 VisitRR(
this, kRiscvSqrtD, node);
715void InstructionSelectorT::VisitFloat32RoundDown(
OpIndex node) {
716 VisitRR(
this, kRiscvFloat32RoundDown, node);
719void InstructionSelectorT::VisitFloat32Add(
OpIndex node) {
723void InstructionSelectorT::VisitFloat64Add(
OpIndex node) {
727void InstructionSelectorT::VisitFloat32Sub(
OpIndex node) {
731void InstructionSelectorT::VisitFloat64Sub(
OpIndex node) {
735void InstructionSelectorT::VisitFloat32Mul(
OpIndex node) {
739void InstructionSelectorT::VisitFloat64Mul(
OpIndex node) {
743void InstructionSelectorT::VisitFloat32Div(
OpIndex node) {
747void InstructionSelectorT::VisitFloat64Div(
OpIndex node) {
751void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
752 RiscvOperandGeneratorT g(
this);
754 g.
UseFixed(this->input_at(node, 0), fa0),
755 g.
UseFixed(this->input_at(node, 1), fa1))
759void InstructionSelectorT::VisitFloat32Max(
OpIndex node) {
760 VisitRRR(
this, kRiscvFloat32Max, node);
763void InstructionSelectorT::VisitFloat64Max(
OpIndex node) {
764 VisitRRR(
this, kRiscvFloat64Max, node);
767void InstructionSelectorT::VisitFloat32Min(
OpIndex node) {
768 VisitRRR(
this, kRiscvFloat32Min, node);
771void InstructionSelectorT::VisitFloat64Min(
OpIndex node) {
772 VisitRRR(
this, kRiscvFloat64Min, node);
775void InstructionSelectorT::VisitTruncateFloat64ToWord32(
OpIndex node) {
776 VisitRR(
this, kArchTruncateDoubleToI, node);
779void InstructionSelectorT::VisitRoundFloat64ToInt32(
OpIndex node) {
780 VisitRR(
this, kRiscvTruncWD, node);
783void InstructionSelectorT::VisitTruncateFloat64ToFloat32(
OpIndex node) {
784 RiscvOperandGeneratorT g(
this);
788 using Rep = turboshaft::RegisterRepresentation;
790 const turboshaft::Operation& op = this->
Get(value);
791 if (op.Is<turboshaft::ChangeOp>()) {
792 const turboshaft::ChangeOp& change = op.Cast<turboshaft::ChangeOp>();
794 if (change.from == Rep::Word32() && change.to == Rep::Float64()) {
802 VisitRR(
this, kRiscvCvtSD, node);
805void InstructionSelectorT::VisitWord32Shl(
OpIndex node) {
810void InstructionSelectorT::VisitWord32Shr(
OpIndex node) {
814void InstructionSelectorT::VisitWord32Sar(
OpIndex node) {
819void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8S(
OpIndex node) {
820 RiscvOperandGeneratorT g(
this);
832void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8U(
OpIndex node) {
833 RiscvOperandGeneratorT g(
this);
845void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16S(
OpIndex node) {
846 RiscvOperandGeneratorT g(
this);
858void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16U(
OpIndex node) {
859 RiscvOperandGeneratorT g(
this);
871#define SIMD_INT_TYPE_LIST(V) \
877#define SIMD_TYPE_LIST(V) \
884#define SIMD_UNOP_LIST2(V) \
885 V(F32x4Splat, kRiscvVfmvVf, E32, m1) \
886 V(I8x16Neg, kRiscvVnegVv, E8, m1) \
887 V(I16x8Neg, kRiscvVnegVv, E16, m1) \
888 V(I32x4Neg, kRiscvVnegVv, E32, m1) \
889 V(I64x2Neg, kRiscvVnegVv, E64, m1) \
890 V(I8x16Splat, kRiscvVmv, E8, m1) \
891 V(I16x8Splat, kRiscvVmv, E16, m1) \
892 V(I32x4Splat, kRiscvVmv, E32, m1) \
893 V(I64x2Splat, kRiscvVmv, E64, m1) \
894 V(F32x4Neg, kRiscvVfnegVv, E32, m1) \
895 V(F64x2Neg, kRiscvVfnegVv, E64, m1) \
896 V(F64x2Splat, kRiscvVfmvVf, E64, m1) \
897 V(I32x4AllTrue, kRiscvVAllTrue, E32, m1) \
898 V(I16x8AllTrue, kRiscvVAllTrue, E16, m1) \
899 V(I8x16AllTrue, kRiscvVAllTrue, E8, m1) \
900 V(I64x2AllTrue, kRiscvVAllTrue, E64, m1) \
901 V(I64x2Abs, kRiscvVAbs, E64, m1) \
902 V(I32x4Abs, kRiscvVAbs, E32, m1) \
903 V(I16x8Abs, kRiscvVAbs, E16, m1) \
904 V(I8x16Abs, kRiscvVAbs, E8, m1)
906#define SIMD_UNOP_LIST(V) \
907 V(F64x2Abs, kRiscvF64x2Abs) \
908 V(F64x2Sqrt, kRiscvF64x2Sqrt) \
909 V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
910 V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
911 V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
912 V(F64x2Ceil, kRiscvF64x2Ceil) \
913 V(F64x2Floor, kRiscvF64x2Floor) \
914 V(F64x2Trunc, kRiscvF64x2Trunc) \
915 V(F64x2NearestInt, kRiscvF64x2NearestInt) \
916 V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
917 V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
918 V(F32x4Abs, kRiscvF32x4Abs) \
919 V(F32x4Sqrt, kRiscvF32x4Sqrt) \
920 V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
921 V(F32x4Ceil, kRiscvF32x4Ceil) \
922 V(F32x4Floor, kRiscvF32x4Floor) \
923 V(F32x4Trunc, kRiscvF32x4Trunc) \
924 V(F32x4NearestInt, kRiscvF32x4NearestInt) \
925 V(I32x4RelaxedTruncF32x4S, kRiscvI32x4SConvertF32x4) \
926 V(I32x4RelaxedTruncF32x4U, kRiscvI32x4UConvertF32x4) \
927 V(I32x4RelaxedTruncF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
928 V(I32x4RelaxedTruncF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
929 V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
930 V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
931 V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
932 V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
933 V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
934 V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
935 V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
936 V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
937 V(I8x16Popcnt, kRiscvI8x16Popcnt) \
938 V(S128Not, kRiscvVnot) \
939 V(V128AnyTrue, kRiscvV128AnyTrue)
941#define SIMD_SHIFT_OP_LIST(V) \
955#define SIMD_BINOP_LIST(V) \
956 V(I64x2Add, kRiscvVaddVv, E64, m1) \
957 V(I32x4Add, kRiscvVaddVv, E32, m1) \
958 V(I16x8Add, kRiscvVaddVv, E16, m1) \
959 V(I8x16Add, kRiscvVaddVv, E8, m1) \
960 V(I64x2Sub, kRiscvVsubVv, E64, m1) \
961 V(I32x4Sub, kRiscvVsubVv, E32, m1) \
962 V(I16x8Sub, kRiscvVsubVv, E16, m1) \
963 V(I8x16Sub, kRiscvVsubVv, E8, m1) \
964 V(I32x4MaxU, kRiscvVmaxuVv, E32, m1) \
965 V(I16x8MaxU, kRiscvVmaxuVv, E16, m1) \
966 V(I8x16MaxU, kRiscvVmaxuVv, E8, m1) \
967 V(I32x4MaxS, kRiscvVmax, E32, m1) \
968 V(I16x8MaxS, kRiscvVmax, E16, m1) \
969 V(I8x16MaxS, kRiscvVmax, E8, m1) \
970 V(I32x4MinS, kRiscvVminsVv, E32, m1) \
971 V(I16x8MinS, kRiscvVminsVv, E16, m1) \
972 V(I8x16MinS, kRiscvVminsVv, E8, m1) \
973 V(I32x4MinU, kRiscvVminuVv, E32, m1) \
974 V(I16x8MinU, kRiscvVminuVv, E16, m1) \
975 V(I8x16MinU, kRiscvVminuVv, E8, m1) \
976 V(I64x2Mul, kRiscvVmulVv, E64, m1) \
977 V(I32x4Mul, kRiscvVmulVv, E32, m1) \
978 V(I16x8Mul, kRiscvVmulVv, E16, m1) \
979 V(I64x2GtS, kRiscvVgtsVv, E64, m1) \
980 V(I32x4GtS, kRiscvVgtsVv, E32, m1) \
981 V(I16x8GtS, kRiscvVgtsVv, E16, m1) \
982 V(I8x16GtS, kRiscvVgtsVv, E8, m1) \
983 V(I64x2GeS, kRiscvVgesVv, E64, m1) \
984 V(I32x4GeS, kRiscvVgesVv, E32, m1) \
985 V(I16x8GeS, kRiscvVgesVv, E16, m1) \
986 V(I8x16GeS, kRiscvVgesVv, E8, m1) \
987 V(I32x4GeU, kRiscvVgeuVv, E32, m1) \
988 V(I16x8GeU, kRiscvVgeuVv, E16, m1) \
989 V(I8x16GeU, kRiscvVgeuVv, E8, m1) \
990 V(I32x4GtU, kRiscvVgtuVv, E32, m1) \
991 V(I16x8GtU, kRiscvVgtuVv, E16, m1) \
992 V(I8x16GtU, kRiscvVgtuVv, E8, m1) \
993 V(I64x2Eq, kRiscvVeqVv, E64, m1) \
994 V(I32x4Eq, kRiscvVeqVv, E32, m1) \
995 V(I16x8Eq, kRiscvVeqVv, E16, m1) \
996 V(I8x16Eq, kRiscvVeqVv, E8, m1) \
997 V(I64x2Ne, kRiscvVneVv, E64, m1) \
998 V(I32x4Ne, kRiscvVneVv, E32, m1) \
999 V(I16x8Ne, kRiscvVneVv, E16, m1) \
1000 V(I8x16Ne, kRiscvVneVv, E8, m1) \
1001 V(I16x8AddSatS, kRiscvVaddSatSVv, E16, m1) \
1002 V(I8x16AddSatS, kRiscvVaddSatSVv, E8, m1) \
1003 V(I16x8AddSatU, kRiscvVaddSatUVv, E16, m1) \
1004 V(I8x16AddSatU, kRiscvVaddSatUVv, E8, m1) \
1005 V(I16x8SubSatS, kRiscvVsubSatSVv, E16, m1) \
1006 V(I8x16SubSatS, kRiscvVsubSatSVv, E8, m1) \
1007 V(I16x8SubSatU, kRiscvVsubSatUVv, E16, m1) \
1008 V(I8x16SubSatU, kRiscvVsubSatUVv, E8, m1) \
1009 V(F64x2Add, kRiscvVfaddVv, E64, m1) \
1010 V(F32x4Add, kRiscvVfaddVv, E32, m1) \
1011 V(F64x2Sub, kRiscvVfsubVv, E64, m1) \
1012 V(F32x4Sub, kRiscvVfsubVv, E32, m1) \
1013 V(F64x2Mul, kRiscvVfmulVv, E64, m1) \
1014 V(F32x4Mul, kRiscvVfmulVv, E32, m1) \
1015 V(F64x2Div, kRiscvVfdivVv, E64, m1) \
1016 V(F32x4Div, kRiscvVfdivVv, E32, m1) \
1017 V(S128And, kRiscvVandVv, E8, m1) \
1018 V(S128Or, kRiscvVorVv, E8, m1) \
1019 V(S128Xor, kRiscvVxorVv, E8, m1) \
1020 V(I16x8Q15MulRSatS, kRiscvVsmulVv, E16, m1) \
1021 V(I16x8RelaxedQ15MulRS, kRiscvVsmulVv, E16, m1)
1023#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V) \
1025 V(F16x8ExtractLane) \
1026 V(F16x8ReplaceLane) \
1033 V(F16x8NearestInt) \
1046 V(F16x8SConvertI16x8) \
1047 V(F16x8UConvertI16x8) \
1048 V(I16x8SConvertF16x8) \
1049 V(I16x8UConvertF16x8) \
1050 V(F16x8DemoteF32x4Zero) \
1051 V(F16x8DemoteF64x2Zero) \
1052 V(F32x4PromoteLowF16x8) \
1056#define SIMD_VISIT_UNIMPL_FP16_OP(Name) \
1058 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
1060#undef SIMD_VISIT_UNIMPL_FP16_OP
1061#undef UNIMPLEMENTED_SIMD_FP16_OP_LIST
1063void InstructionSelectorT::VisitS128AndNot(
OpIndex node) {
1064 RiscvOperandGeneratorT g(
this);
1065 InstructionOperand temp1 = g.TempFpRegister(v0);
1066 this->
Emit(kRiscvVnotVv, temp1, g.UseRegister(this->input_at(node, 1)),
1067 g.UseImmediate(E8), g.UseImmediate(m1));
1068 this->
Emit(kRiscvVandVv, g.DefineAsRegister(node),
1069 g.UseRegister(this->input_at(node, 0)), temp1, g.UseImmediate(E8),
1070 g.UseImmediate(m1));
1073void InstructionSelectorT::VisitS128Const(
OpIndex node) {
1074 RiscvOperandGeneratorT g(
this);
1075 static const int kUint32Immediates =
kSimd128Size /
sizeof(uint32_t);
1076 uint32_t val[kUint32Immediates];
1077 const turboshaft::Simd128ConstantOp& constant =
1081 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
1082 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
1083 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
1086 Emit(kRiscvS128Zero, dst);
1087 }
else if (all_ones) {
1088 Emit(kRiscvS128AllOnes, dst);
1095void InstructionSelectorT::VisitS128Zero(
OpIndex node) {
1096 RiscvOperandGeneratorT g(
this);
1100#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
1102 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
1103 VisitRRI(this, kRiscv##Type##ExtractLane##Sign, node); \
1113#undef SIMD_VISIT_EXTRACT_LANE
1115#define SIMD_VISIT_REPLACE_LANE(Type) \
1117 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
1118 VisitRRIR(this, kRiscv##Type##ReplaceLane, node); \
1122#undef SIMD_VISIT_REPLACE_LANE
1124#define SIMD_VISIT_UNOP(Name, instruction) \
1126 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1127 VisitRR(this, instruction, node); \
1130#undef SIMD_VISIT_UNOP
1132#define SIMD_VISIT_SHIFT_OP(Name) \
1134 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1135 VisitSimdShift(this, kRiscv##Name, node); \
1138#undef SIMD_VISIT_SHIFT_OP
1140#define SIMD_VISIT_BINOP_RVV(Name, instruction, VSEW, LMUL) \
1142 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1143 RiscvOperandGeneratorT g(this); \
1144 this->Emit(instruction, g.DefineAsRegister(node), \
1145 g.UseRegister(this->input_at(node, 0)), \
1146 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(VSEW), \
1147 g.UseImmediate(LMUL)); \
1150#undef SIMD_VISIT_BINOP_RVV
1152#define SIMD_VISIT_UNOP2(Name, instruction, VSEW, LMUL) \
1154 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1155 RiscvOperandGeneratorT g(this); \
1156 this->Emit(instruction, g.DefineAsRegister(node), \
1157 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(VSEW), \
1158 g.UseImmediate(LMUL)); \
1161#undef SIMD_VISIT_UNOP2
1163void InstructionSelectorT::VisitS128Select(
OpIndex node) {
1164 VisitRRRR(
this, kRiscvS128Select, node);
1167#define SIMD_VISIT_SELECT_LANE(Name) \
1169 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1170 VisitRRRR(this, kRiscvS128Select, node); \
1176#undef SIMD_VISIT_SELECT_LANE
1178#define VISIT_SIMD_QFMOP(Name, instruction) \
1180 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1181 VisitRRRR(this, instruction, node); \
1187#undef VISIT_SIMD_QFMOP
1189void InstructionSelectorT::VisitF32x4Min(
OpIndex node) {
1190 RiscvOperandGeneratorT g(
this);
1191 InstructionOperand temp1 = g.TempFpRegister(v0);
1192 InstructionOperand mask_reg = g.TempFpRegister(v0);
1195 this->
Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
1196 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E32),
1197 g.UseImmediate(m1));
1198 this->
Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
1199 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E32),
1200 g.UseImmediate(m1));
1201 this->
Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E32),
1202 g.UseImmediate(m1));
1206 this->
Emit(kRiscvVmv, NaN, g.UseImmediate(0x7FC00000), g.UseImmediate(E32),
1207 g.UseImmediate(m1));
1208 this->
Emit(kRiscvVfminVv,
result, g.UseRegister(this->input_at(node, 1)),
1209 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E32),
1211 this->
Emit(kRiscvVmv, g.DefineAsRegister(node),
result, g.UseImmediate(E32),
1212 g.UseImmediate(m1));
1215void InstructionSelectorT::VisitF32x4Max(
OpIndex node) {
1216 RiscvOperandGeneratorT g(
this);
1221 this->
Emit(kRiscvVmfeqVv, temp1, g.
UseRegister(this->input_at(node, 0)),
1224 this->
Emit(kRiscvVmfeqVv, temp2, g.
UseRegister(this->input_at(node, 1)),
1241void InstructionSelectorT::VisitF32x4RelaxedMin(
OpIndex node) {
1242 VisitF32x4Min(node);
1245void InstructionSelectorT::VisitF64x2RelaxedMin(
OpIndex node) {
1246 VisitF64x2Min(node);
1249void InstructionSelectorT::VisitF64x2RelaxedMax(
OpIndex node) {
1250 VisitF64x2Max(node);
1253void InstructionSelectorT::VisitF32x4RelaxedMax(
OpIndex node) {
1254 VisitF32x4Max(node);
1257void InstructionSelectorT::VisitF64x2Eq(
OpIndex node) {
1258 RiscvOperandGeneratorT g(
this);
1260 this->
Emit(kRiscvVmfeqVv, temp1, g.
UseRegister(this->input_at(node, 1)),
1270void InstructionSelectorT::VisitF64x2Ne(
OpIndex node) {
1271 RiscvOperandGeneratorT g(
this);
1273 this->
Emit(kRiscvVmfneVv, temp1, g.
UseRegister(this->input_at(node, 1)),
1283void InstructionSelectorT::VisitF64x2Lt(
OpIndex node) {
1284 RiscvOperandGeneratorT g(
this);
1286 this->
Emit(kRiscvVmfltVv, temp1, g.
UseRegister(this->input_at(node, 0)),
1296void InstructionSelectorT::VisitF64x2Le(
OpIndex node) {
1297 RiscvOperandGeneratorT g(
this);
1299 this->
Emit(kRiscvVmfleVv, temp1, g.
UseRegister(this->input_at(node, 0)),
1309void InstructionSelectorT::VisitF32x4Eq(
OpIndex node) {
1310 RiscvOperandGeneratorT g(
this);
1312 this->
Emit(kRiscvVmfeqVv, temp1, g.
UseRegister(this->input_at(node, 1)),
1322void InstructionSelectorT::VisitF32x4Ne(
OpIndex node) {
1323 RiscvOperandGeneratorT g(
this);
1325 this->
Emit(kRiscvVmfneVv, temp1, g.
UseRegister(this->input_at(node, 1)),
1335void InstructionSelectorT::VisitF32x4Lt(
OpIndex node) {
1336 RiscvOperandGeneratorT g(
this);
1338 this->
Emit(kRiscvVmfltVv, temp1, g.
UseRegister(this->input_at(node, 0)),
1348void InstructionSelectorT::VisitF32x4Le(
OpIndex node) {
1349 RiscvOperandGeneratorT g(
this);
1351 this->
Emit(kRiscvVmfleVv, temp1, g.
UseRegister(this->input_at(node, 0)),
1361void InstructionSelectorT::VisitI32x4SConvertI16x8Low(
OpIndex node) {
1362 RiscvOperandGeneratorT g(
this);
1370void InstructionSelectorT::VisitI32x4UConvertI16x8Low(
OpIndex node) {
1371 RiscvOperandGeneratorT g(
this);
1379void InstructionSelectorT::VisitI16x8SConvertI8x16High(
OpIndex node) {
1380 RiscvOperandGeneratorT g(
this);
1388void InstructionSelectorT::VisitI16x8SConvertI32x4(
OpIndex node) {
1389 RiscvOperandGeneratorT g(
this);
1401void InstructionSelectorT::VisitI16x8UConvertI32x4(
OpIndex node) {
1402 RiscvOperandGeneratorT g(
this);
1417void InstructionSelectorT::VisitI8x16RoundingAverageU(
OpIndex node) {
1418 RiscvOperandGeneratorT g(
this);
1420 this->
Emit(kRiscvVwadduVv, temp, g.
UseRegister(this->input_at(node, 0)),
1434void InstructionSelectorT::VisitI8x16SConvertI16x8(
OpIndex node) {
1435 RiscvOperandGeneratorT g(
this);
1447void InstructionSelectorT::VisitI8x16UConvertI16x8(
OpIndex node) {
1448 RiscvOperandGeneratorT g(
this);
1463void InstructionSelectorT::VisitI16x8RoundingAverageU(
OpIndex node) {
1464 RiscvOperandGeneratorT g(
this);
1468 this->
Emit(kRiscvVwadduVv, temp, g.
UseRegister(this->input_at(node, 0)),
1480void InstructionSelectorT::VisitI32x4DotI16x8S(
OpIndex node) {
1481 constexpr int32_t FIRST_INDEX = 0b01010101;
1482 constexpr int32_t SECOND_INDEX = 0b10101010;
1483 RiscvOperandGeneratorT g(
this);
1499void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(
OpIndex node) {
1500 constexpr int32_t FIRST_INDEX = 0b0101010101010101;
1501 constexpr int32_t SECOND_INDEX = 0b1010101010101010;
1502 RiscvOperandGeneratorT g(
this);
1518void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(
OpIndex node) {
1519 constexpr int32_t FIRST_INDEX = 0b0001000100010001;
1520 constexpr int32_t SECOND_INDEX = 0b0010001000100010;
1521 constexpr int32_t THIRD_INDEX = 0b0100010001000100;
1522 constexpr int32_t FOURTH_INDEX = 0b1000100010001000;
1523 RiscvOperandGeneratorT g(
this);
1525 this->
Emit(kRiscvVwmul, intermediate, g.
UseRegister(this->input_at(node, 0)),
1531 this->
Emit(kRiscvVcompress, compressedPart2, intermediate,
1534 this->
Emit(kRiscvVcompress, compressedPart1, intermediate,
1540 this->
Emit(kRiscvVcompress, compressedPart3, intermediate,
1543 this->
Emit(kRiscvVcompress, compressedPart4, intermediate,
1549 this->
Emit(kRiscvVwaddVv, temp2, compressedPart1, compressedPart2,
1551 this->
Emit(kRiscvVwaddVv, temp, compressedPart3, compressedPart4,
1558 this->
Emit(kRiscvVaddVv, dst, mul_result,
1563void InstructionSelectorT::VisitI8x16Shuffle(
OpIndex node) {
1568 auto view = this->simd_shuffle_view(node);
1569 CanonicalizeShuffle(view, shuffle, &is_swizzle);
1570 OpIndex input0 = view.input(0);
1571 OpIndex input1 = view.input(1);
1572 RiscvOperandGeneratorT g(
this);
1602void InstructionSelectorT::VisitI8x16Swizzle(
OpIndex node) {
1603 RiscvOperandGeneratorT g(
this);
1613#define VISIT_BIMASK(TYPE, VSEW, LMUL) \
1615 void InstructionSelectorT::Visit##TYPE##BitMask(OpIndex node) { \
1616 RiscvOperandGeneratorT g(this); \
1617 InstructionOperand temp = g.TempFpRegister(v16); \
1618 this->Emit(kRiscvVmslt, temp, g.UseRegister(this->input_at(node, 0)), \
1619 g.UseImmediate(0), g.UseImmediate(VSEW), g.UseImmediate(m1), \
1620 g.UseImmediate(true)); \
1621 this->Emit(kRiscvVmvXs, g.DefineAsRegister(node), temp, \
1622 g.UseImmediate(E32), g.UseImmediate(m1)); \
1628void InstructionSelectorT::VisitI32x4SConvertI16x8High(
OpIndex node) {
1629 RiscvOperandGeneratorT g(
this);
1631 this->
Emit(kRiscvVslidedown, temp, g.UseRegister(this->input_at(node, 0)),
1632 g.UseImmediate(4), g.UseImmediate(E16), g.UseImmediate(m1));
1633 this->
Emit(kRiscvVsextVf2, g.DefineAsRegister(node), temp,
1634 g.UseImmediate(E32), g.UseImmediate(m1));
1637void InstructionSelectorT::VisitI32x4UConvertI16x8High(
OpIndex node) {
1638 RiscvOperandGeneratorT g(
this);
1640 this->
Emit(kRiscvVslidedown, temp, g.
UseRegister(this->input_at(node, 0)),
1646void InstructionSelectorT::VisitI16x8SConvertI8x16Low(
OpIndex node) {
1647 RiscvOperandGeneratorT g(
this);
1655void InstructionSelectorT::VisitI16x8UConvertI8x16High(
OpIndex node) {
1656 RiscvOperandGeneratorT g(
this);
1664void InstructionSelectorT::VisitI16x8UConvertI8x16Low(
OpIndex node) {
1665 RiscvOperandGeneratorT g(
this);
1673void InstructionSelectorT::VisitSignExtendWord8ToInt32(
OpIndex node) {
1674 RiscvOperandGeneratorT g(
this);
1679void InstructionSelectorT::VisitSignExtendWord16ToInt32(
OpIndex node) {
1680 RiscvOperandGeneratorT g(
this);
1685void InstructionSelectorT::VisitWord32Clz(
OpIndex node) {
1686 VisitRR(
this, kRiscvClz32, node);
1689#define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
1691 void InstructionSelectorT::Visit##OPCODE1##ExtMulLow##OPCODE2##S( \
1693 RiscvOperandGeneratorT g(this); \
1694 Emit(kRiscvVwmul, g.DefineAsRegister(node), \
1695 g.UseUniqueRegister(this->input_at(node, 0)), \
1696 g.UseUniqueRegister(this->input_at(node, 1)), \
1697 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
1700 void InstructionSelectorT::Visit##OPCODE1##ExtMulHigh##OPCODE2##S( \
1702 RiscvOperandGeneratorT g(this); \
1703 InstructionOperand t1 = g.TempFpRegister(v16); \
1704 Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(this->input_at(node, 0)), \
1705 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
1706 g.UseImmediate(m1)); \
1707 InstructionOperand t2 = g.TempFpRegister(v17); \
1708 Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(this->input_at(node, 1)), \
1709 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
1710 g.UseImmediate(m1)); \
1711 Emit(kRiscvVwmul, g.DefineAsRegister(node), t1, t2, \
1712 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
1715 void InstructionSelectorT::Visit##OPCODE1##ExtMulLow##OPCODE2##U( \
1717 RiscvOperandGeneratorT g(this); \
1718 Emit(kRiscvVwmulu, g.DefineAsRegister(node), \
1719 g.UseUniqueRegister(this->input_at(node, 0)), \
1720 g.UseUniqueRegister(this->input_at(node, 1)), \
1721 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
1724 void InstructionSelectorT::Visit##OPCODE1##ExtMulHigh##OPCODE2##U( \
1726 RiscvOperandGeneratorT g(this); \
1727 InstructionOperand t1 = g.TempFpRegister(v16); \
1728 Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(this->input_at(node, 0)), \
1729 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
1730 g.UseImmediate(m1)); \
1731 InstructionOperand t2 = g.TempFpRegister(v17); \
1732 Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(this->input_at(node, 1)), \
1733 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
1734 g.UseImmediate(m1)); \
1735 Emit(kRiscvVwmulu, g.DefineAsRegister(node), t1, t2, \
1736 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
1744void InstructionSelectorT::VisitF32x4Pmin(
OpIndex node) {
1748void InstructionSelectorT::VisitF32x4Pmax(
OpIndex node) {
1752void InstructionSelectorT::VisitF64x2Pmin(
OpIndex node) {
1756void InstructionSelectorT::VisitF64x2Pmax(
OpIndex node) {
1760void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(
OpIndex node) {
1764void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(
OpIndex node) {
1769MachineOperatorBuilder::AlignmentRequirements
1771#ifdef RISCV_HAS_NO_UNALIGNED
1781 int first_input_index,
1786#if V8_ENABLE_WEBASSEMBLY
1788void InstructionSelectorT::VisitSetStackPointer(
OpIndex node) {
1791 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
1795#undef SIMD_BINOP_LIST
1796#undef SIMD_SHIFT_OP_LIST
1797#undef SIMD_UNOP_LIST
1798#undef SIMD_UNOP_LIST2
1799#undef SIMD_TYPE_LIST
1800#undef SIMD_INT_TYPE_LIST
static constexpr U encode(T value)
RootsTable & roots_table()
static constexpr MachineType Float64()
static constexpr MachineType Simd128()
static constexpr MachineType Float32()
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr bool IsReadOnly(RootIndex root_index)
FlagsCondition condition() const
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
bool IsDeoptimize() const
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
void VisitBitcastWord32PairToFloat64(turboshaft::OpIndex node)
void MarkAsSimd128(turboshaft::OpIndex node)
void AddOutputToSelectContinuation(OperandGenerator *g, int first_input_index, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
void MarkAsFloat64(turboshaft::OpIndex node)
Isolate * isolate() const
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table_
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
void EmitTableSwitch(const SwitchInfo &sw, InstructionOperand const &index_operand)
bool IsTailCallAddressImmediate()
PushParameterT PushParameter
OperandGeneratorT OperandGenerator
void MarkAsFloat32(turboshaft::OpIndex node)
InstructionSequence * sequence() const
void VisitSwitch(turboshaft::OpIndex node, const SwitchInfo &sw)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void EmitBinarySearchSwitch(const SwitchInfo &sw, InstructionOperand const &value_operand)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements()
ImmediateOperand AddImmediate(const Constant &constant)
Instruction * MarkAsCall()
static AlignmentRequirements NoUnalignedAccessSupport()
static AlignmentRequirements FullUnalignedAccessSupport()
InstructionOperand TempRegister()
InstructionOperand UseImmediate(int immediate)
InstructionOperand TempSimd128Register()
InstructionOperand UseFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand Use(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionSelectorT * selector() const
InstructionOperand NoOutput()
InstructionOperand UseImmediate64(int64_t immediate)
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand TempFpRegister(FPRegType reg)
InstructionOperand DefineAsFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand TempDoubleRegister()
RiscvOperandGeneratorT(InstructionSelectorT *selector)
InstructionOperand UseOperand(OpIndex node, InstructionCode opcode)
InstructionOperand UseRegisterOrImmediateZero(OpIndex node)
std::optional< int64_t > GetOptionalIntegerConstant(OpIndex operation)
bool CanBeZero(OpIndex node)
bool IsIntegerConstant(OptionalOpIndex node)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const
bool IsIntegerConstant(OpIndex node)
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
static int32_t Pack4Lanes(const uint8_t *shuffle)
#define COMPRESS_POINTERS_BOOL
#define SIMD_SHIFT_OP_LIST(V)
#define SIMD_VISIT_SHIFT_OP(Name)
#define SIMD_VISIT_UNIMPL_FP16_OP(Name)
#define VISIT_EXT_MUL(OPCODE1, OPCODE2)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_TYPE_LIST(V)
#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V)
#define SIMD_UNOP_LIST2(V)
#define SIMD_VISIT_UNOP2(Name, instruction, VSEW, LMUL)
#define SIMD_VISIT_SELECT_LANE(Name)
#define VISIT_SIMD_QFMOP(Name, instruction)
#define SIMD_INT_TYPE_LIST(V)
#define VISIT_BIMASK(TYPE, VSEW, LMUL)
#define SIMD_VISIT_BINOP_RVV(Name, instruction, VSEW, LMUL)
ZoneVector< RpoNumber > & result
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitS128Load(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, VSew sew, Vlmul lmul)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
static void VisitSimdShift(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
bool TryMatchImmediate(InstructionSelectorT *selector, InstructionCode *opcode_return, OpIndex node, size_t *input_count_return, InstructionOperand *inputs)
static void VisitUniqueRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
@ kMemoryAccessProtectedMemOutOfBounds
static Instruction * VisitWordCompareZero(InstructionSelectorT *selector, InstructionOperand value, FlagsContinuationT *cont)
constexpr int kSimd128Size
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr Simd128Register kSimd128ScratchReg
constexpr VRegister kSimd128ScratchReg3
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
int value_input_count(turboshaft::OpIndex node) const
V8_INLINE OpIndex input(size_t i) const
#define V8_STATIC_ROOTS_BOOL