19#define TRACE(...) PrintF(__VA_ARGS__)
27 return is_uint5(value);
35 return is_int12(value);
45 case kRiscvStoreFloat:
46 case kRiscvLoadDouble:
47 case kRiscvStoreDouble:
48 return is_int32(value);
50 return is_int12(value);
67 size_t input_count = 0;
72 output_op = g.DefineAsRegister(output.valid() ? output : node);
79 ptrdiff_t
const delta =
80 *g.GetOptionalIntegerConstant(index.value()) +
86 if (is_int32(delta)) {
87 inputs[0] = g.UseImmediate(
static_cast<int32_t
>(delta));
89 selector->
Emit(opcode, 1, &output_op, input_count, inputs);
95 if (base_op.
Is<LoadRootRegisterOp>()) {
96 DCHECK(g.IsIntegerConstant(index));
98 inputs[0] = g.UseImmediate64(*g.GetOptionalIntegerConstant(index.value()));
100 selector->
Emit(opcode, 1, &output_op, input_count, inputs);
104 if (load.index().has_value() && g.CanBeImmediate(index.value(), opcode)) {
106 g.DefineAsRegister(output.valid() ? output : node),
107 g.UseRegister(base), g.UseImmediate(index.value()));
109 if (index.has_value()) {
110 InstructionOperand addr_reg = g.TempRegister();
112 addr_reg, g.UseRegister(index.value()),
113 g.UseRegister(base));
116 g.DefineAsRegister(output.valid() ? output : node),
117 addr_reg, g.TempImmediate(0));
120 g.DefineAsRegister(output.valid() ? output : node),
121 g.UseRegister(base), g.TempImmediate(0));
148void InstructionSelectorT::VisitStoreLane(
OpIndex node) {
149 const Simd128LaneMemoryOp& store =
Get(node).Cast<Simd128LaneMemoryOp>();
152 if (store.kind.with_trap_handler) {
156 RiscvOperandGeneratorT g(
this);
159 InstructionOperand addr_reg = g.TempRegister();
160 Emit(kRiscvAdd32, addr_reg, g.UseRegister(base), g.UseRegister(index));
161 InstructionOperand
inputs[4] = {
163 g.UseImmediate(store.lane),
171void InstructionSelectorT::VisitLoadLane(
OpIndex node) {
172 const Simd128LaneMemoryOp& load = this->
Get(node).Cast<Simd128LaneMemoryOp>();
175 if (load.kind.with_trap_handler) {
179 RiscvOperandGeneratorT g(
this);
182 InstructionOperand addr_reg = g.TempRegister();
183 Emit(kRiscvAdd32, addr_reg, g.UseRegister(base), g.UseRegister(index));
186 g.UseRegister(this->input_at(node, 2)), g.UseImmediate(load.lane),
187 addr_reg, g.TempImmediate(0));
194 switch (load_rep.representation()) {
199 opcode = kRiscvLoadDouble;
203 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
206 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
236void InstructionSelectorT::VisitStore(
OpIndex node) {
237 RiscvOperandGeneratorT g(
this);
251 InstructionOperand
inputs[4];
258 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
259 size_t const temp_count =
arraysize(temps);
264 code = kArchStoreIndirectWithWriteBarrier;
268 code = kArchStoreWithWriteBarrier;
274 Emit(code, 0,
nullptr, input_count,
inputs, temp_count, temps);
279 code = kRiscvStoreFloat;
282 code = kRiscvStoreDouble;
316 g.UseRegisterOrImmediateZero(value),
317 index.has_value() ? g.UseImmediate(this->value(index))
318 : g.UseImmediate(0));
322 if (index.has_value() && g.CanBeImmediate(this->value(index), code)) {
324 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
325 index.has_value() ? g.UseImmediate(this->value(index))
326 : g.UseImmediate(0));
328 if (index.has_value()) {
329 InstructionOperand addr_reg = g.TempRegister();
331 g.UseRegister(this->value(index)), g.UseRegister(base));
334 g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
337 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
344void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
349void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
354void InstructionSelectorT::VisitWord32And(
OpIndex node) {
358void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
362void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
368void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
372void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
376void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
380void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
381 RiscvOperandGeneratorT g(
this);
383 Emit(kRiscvRev8, g.DefineAsRegister(node),
384 g.UseRegister(this->input_at(node, 0)));
386 Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
387 g.UseRegister(this->input_at(node, 0)));
391void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
395void InstructionSelectorT::VisitWord32Ctz(
OpIndex node) {
396 RiscvOperandGeneratorT g(
this);
397 Emit(kRiscvCtz, g.DefineAsRegister(node),
398 g.UseRegister(this->input_at(node, 0)));
401void InstructionSelectorT::VisitWord32Popcnt(
OpIndex node) {
402 RiscvOperandGeneratorT g(
this);
403 Emit(kRiscvCpop, g.DefineAsRegister(node),
404 g.UseRegister(this->input_at(node, 0)));
407void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
411void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
415void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
419void InstructionSelectorT::VisitInt32MulHigh(
OpIndex node) {
420 VisitRRR(
this, kRiscvMulHigh32, node);
423void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
424 VisitRRR(
this, kRiscvMulHighU32, node);
427void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
432void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
437void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
441void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
445void InstructionSelectorT::VisitChangeFloat32ToFloat64(
OpIndex node) {
446 VisitRR(
this, kRiscvCvtDS, node);
449void InstructionSelectorT::VisitRoundInt32ToFloat32(
OpIndex node) {
450 VisitRR(
this, kRiscvCvtSW, node);
453void InstructionSelectorT::VisitRoundUint32ToFloat32(
OpIndex node) {
454 VisitRR(
this, kRiscvCvtSUw, node);
457void InstructionSelectorT::VisitChangeInt32ToFloat64(
OpIndex node) {
458 VisitRR(
this, kRiscvCvtDW, node);
461void InstructionSelectorT::VisitChangeUint32ToFloat64(
OpIndex node) {
462 VisitRR(
this, kRiscvCvtDUw, node);
465void InstructionSelectorT::VisitTruncateFloat32ToInt32(
OpIndex node) {
466 RiscvOperandGeneratorT g(
this);
474 g.UseRegister(this->input_at(node, 0)));
477void InstructionSelectorT::VisitTruncateFloat32ToUint32(
OpIndex node) {
478 RiscvOperandGeneratorT g(
this);
486 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
489void InstructionSelectorT::VisitChangeFloat64ToInt32(
OpIndex node) {
490 RiscvOperandGeneratorT g(
this);
492 using Rep = turboshaft::RegisterRepresentation;
494 const turboshaft::Operation& op = this->
Get(value);
495 if (op.Is<turboshaft::ChangeOp>()) {
496 const turboshaft::ChangeOp& change = op.Cast<turboshaft::ChangeOp>();
498 if (change.from == Rep::Float32() && change.to == Rep::Float64()) {
499 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
500 g.UseRegister(this->input_at(value, 0)));
506 VisitRR(
this, kRiscvTruncWD, node);
509void InstructionSelectorT::VisitChangeFloat64ToUint32(
OpIndex node) {
510 VisitRR(
this, kRiscvTruncUwD, node);
513void InstructionSelectorT::VisitTruncateFloat64ToUint32(
OpIndex node) {
514 VisitRR(
this, kRiscvTruncUwD, node);
517void InstructionSelectorT::VisitBitcastFloat32ToInt32(
OpIndex node) {
518 VisitRR(
this, kRiscvBitcastFloat32ToInt32, node);
521void InstructionSelectorT::VisitBitcastInt32ToFloat32(
OpIndex node) {
522 VisitRR(
this, kRiscvBitcastInt32ToFloat32, node);
525void InstructionSelectorT::VisitFloat64RoundDown(
OpIndex node) {
529void InstructionSelectorT::VisitFloat32RoundUp(
OpIndex node) {
530 VisitRR(
this, kRiscvFloat32RoundUp, node);
533void InstructionSelectorT::VisitFloat64RoundUp(
OpIndex node) {
537void InstructionSelectorT::VisitFloat32RoundTruncate(
OpIndex node) {
538 VisitRR(
this, kRiscvFloat32RoundTruncate, node);
541void InstructionSelectorT::VisitFloat64RoundTruncate(
OpIndex node) {
545void InstructionSelectorT::VisitFloat64RoundTiesAway(
OpIndex node) {
549void InstructionSelectorT::VisitFloat32RoundTiesEven(
OpIndex node) {
550 VisitRR(
this, kRiscvFloat32RoundTiesEven, node);
553void InstructionSelectorT::VisitFloat64RoundTiesEven(
OpIndex node) {
557void InstructionSelectorT::VisitFloat32Neg(
OpIndex node) {
558 VisitRR(
this, kRiscvNegS, node);
561void InstructionSelectorT::VisitFloat64Neg(
OpIndex node) {
562 VisitRR(
this, kRiscvNegD, node);
567 RiscvOperandGeneratorT g(
this);
569 g.UseFixed(this->input_at(node, 0), fa0),
570 g.UseFixed(this->input_at(node, 1), fa1))
576 RiscvOperandGeneratorT g(
this);
578 g.UseFixed(this->input_at(node, 0), fa1))
583 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
585 RiscvOperandGeneratorT g(
this);
588 if (call_descriptor->IsCFunctionCall()) {
590 call_descriptor->ParameterCount())),
591 0,
nullptr, 0,
nullptr);
596 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
601 int push_count =
static_cast<int>(call_descriptor->ParameterSlotCount());
602 if (push_count > 0) {
603 Emit(kRiscvStackClaim, g.NoOutput(),
606 for (
size_t n = 0; n < arguments->size(); ++
n) {
608 if (input.node.valid()) {
609 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
616void InstructionSelectorT::VisitUnalignedLoad(
OpIndex node) {
619 RiscvOperandGeneratorT g(
this);
624 switch (load_rep.representation()) {
626 opcode = kRiscvULoadFloat;
629 opcode = kRiscvULoadDouble;
632 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
635 opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
661 if (g.CanBeImmediate(index,
opcode)) {
663 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
665 InstructionOperand addr_reg = g.TempRegister();
667 g.UseRegister(index), g.UseRegister(base));
670 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
674void InstructionSelectorT::VisitUnalignedStore(
OpIndex node) {
675 RiscvOperandGeneratorT g(
this);
683 opcode = kRiscvUStoreFloat;
686 opcode = kRiscvUStoreDouble;
718 if (g.CanBeImmediate(index,
opcode)) {
720 g.UseRegister(base), g.UseImmediate(index),
721 g.UseRegisterOrImmediateZero(value));
723 InstructionOperand addr_reg = g.TempRegister();
725 g.UseRegister(index), g.UseRegister(base));
728 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
735 FlagsContinuationT* cont) {
739void VisitAtomicLoad(InstructionSelectorT* selector,
OpIndex node,
742 RiscvOperandGeneratorT g(selector);
743 auto load = selector->load_view(node);
746 if (g.CanBeImmediate(index, opcode)) {
749 g.DefineAsRegister(node), g.UseRegister(base),
750 g.UseImmediate(index));
752 InstructionOperand addr_reg = g.TempRegister();
754 addr_reg, g.UseRegister(index), g.UseRegister(base));
758 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
762void VisitAtomicStore(InstructionSelectorT* selector,
OpIndex node,
764 RiscvOperandGeneratorT g(selector);
766 auto store = selector->store_view(node);
768 OpIndex index = selector->value(store.index());
771 if (g.CanBeImmediate(index, opcode)) {
774 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
775 g.UseRegister(base), g.UseImmediate(index));
777 InstructionOperand addr_reg = g.TempRegister();
779 addr_reg, g.UseRegister(index), g.UseRegister(base));
783 g.NoOutput(), g.UseRegisterOrImmediateZero(value), addr_reg,
790 RiscvOperandGeneratorT g(selector);
792 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
794 OpIndex index = atomic_op.index();
795 OpIndex value = atomic_op.value();
798 InstructionOperand inputs[3];
801 inputs[
input_count++] = g.UseUniqueRegister(index);
802 inputs[
input_count++] = g.UseUniqueRegister(value);
803 InstructionOperand outputs[1];
804 outputs[0] = g.UseUniqueRegister(node);
805 InstructionOperand temps[4];
806 temps[0] = g.TempRegister();
807 temps[1] = g.TempRegister();
808 temps[2] = g.TempRegister();
809 temps[3] = g.TempRegister();
811 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
817 OpIndex node, FlagsContinuationT* cont) {
824 value = op.stack_limit();
828 RiscvOperandGeneratorT g(
this);
831 InstructionOperand*
const outputs =
nullptr;
832 const int output_count = 0;
837 InstructionOperand temps[] = {g.TempRegister()};
843 InstructionOperand
inputs[] = {g.UseRegisterWithMode(value, register_mode)};
847 temp_count, temps, cont);
852 FlagsContinuation* cont) {
854 while (
const ComparisonOp*
equal =
860 value =
equal->left();
866 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
867 switch (comparison->rep.value()) {
869 cont->OverwriteAndNegateIfEqual(
874 case ComparisonOp::Kind::kEqual:
875 cont->OverwriteAndNegateIfEqual(
kEqual);
877 case ComparisonOp::Kind::kSignedLessThan:
880 case ComparisonOp::Kind::kSignedLessThanOrEqual:
888 case ComparisonOp::Kind::kEqual:
889 cont->OverwriteAndNegateIfEqual(
kEqual);
891 case ComparisonOp::Kind::kSignedLessThan:
894 case ComparisonOp::Kind::kSignedLessThanOrEqual:
903 }
else if (
const ProjectionOp* projection =
904 value_op.TryCast<ProjectionOp>()) {
907 if (projection->index == 1u) {
913 OpIndex node = projection->input();
914 if (
const OverflowCheckedBinopOp* binop =
916 binop && CanDoBranchIfOverflowFusion(node)) {
921 switch (binop->kind) {
922 case OverflowCheckedBinopOp::Kind::kSignedAdd:
923 cont->OverwriteAndNegateIfEqual(
kOverflow);
924 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvAddOvf,
926 case OverflowCheckedBinopOp::Kind::kSignedSub:
927 cont->OverwriteAndNegateIfEqual(
kOverflow);
928 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvSubOvf,
930 case OverflowCheckedBinopOp::Kind::kSignedMul:
931 cont->OverwriteAndNegateIfEqual(
kOverflow);
932 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvMulOvf32,
946void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
952 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
954 if (MatchZero(right)) {
955 return VisitWordCompareZero(user, left, &cont);
960void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
961 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
965void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
966 FlagsContinuation cont =
967 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
971void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
972 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
976void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
977 FlagsContinuation cont =
978 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
982void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
984 if (ovf.valid() && IsUsed(ovf.value())) {
985 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
986 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvAddOvf, &cont);
988 FlagsContinuation cont;
989 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvAddOvf, &cont);
992void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
994 if (ovf.valid() && IsUsed(ovf.value())) {
995 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
996 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvSubOvf, &cont);
998 FlagsContinuation cont;
999 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvSubOvf, &cont);
1002void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1004 if (ovf.valid() && IsUsed(ovf.value())) {
1005 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1006 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvMulOvf32, &cont);
1008 FlagsContinuation cont;
1009 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvMulOvf32, &cont);
1012void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
1013 auto load = this->load_view(node);
1016 switch (load_rep.representation()) {
1017 case MachineRepresentation::kWord8:
1018 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1020 case MachineRepresentation::kWord16:
1021 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1023 case MachineRepresentation::kTaggedSigned:
1024 case MachineRepresentation::kTaggedPointer:
1025 case MachineRepresentation::kTagged:
1026 case MachineRepresentation::kWord32:
1027 opcode = kAtomicLoadWord32;
1032 VisitAtomicLoad(
this, node, opcode, AtomicWidth::kWord32);
1035void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
1036 auto store = this->store_view(node);
1037 AtomicStoreParameters store_params(store.stored_rep().representation(),
1038 store.stored_rep().write_barrier_kind(),
1039 store.memory_order().value(),
1040 store.access_kind());
1044 case MachineRepresentation::kWord8:
1045 opcode = kAtomicStoreWord8;
1047 case MachineRepresentation::kWord16:
1048 opcode = kAtomicStoreWord16;
1050 case MachineRepresentation::kTaggedSigned:
1051 case MachineRepresentation::kTaggedPointer:
1052 case MachineRepresentation::kTagged:
1053 case MachineRepresentation::kWord32:
1054 opcode = kAtomicStoreWord32;
1060 VisitAtomicStore(
this, node, opcode, AtomicWidth::kWord32);
1074 size_t input_count = 0;
1084 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1085 AtomicWidthField::encode(width);
1086 selector->
Emit(code, 1, outputs, input_count, inputs, 3, temp);
1101 size_t input_count = 0;
1112 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1113 AtomicWidthField::encode(width);
1114 selector->
Emit(code, 1, outputs, input_count, inputs, 3, temp);
1117void InstructionSelectorT::VisitWord32AtomicExchange(
OpIndex node) {
1120 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
1121 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
1122 opcode = kAtomicExchangeInt8;
1123 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
1124 opcode = kAtomicExchangeUint8;
1125 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
1126 opcode = kAtomicExchangeInt16;
1127 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
1128 opcode = kAtomicExchangeUint16;
1129 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32() ||
1130 atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
1131 opcode = kAtomicExchangeWord32;
1139void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
1142 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
1143 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
1144 opcode = kAtomicCompareExchangeInt8;
1145 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
1146 opcode = kAtomicCompareExchangeUint8;
1147 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
1148 opcode = kAtomicCompareExchangeInt16;
1149 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
1150 opcode = kAtomicCompareExchangeUint16;
1151 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
1152 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
1153 opcode = kAtomicCompareExchangeWord32;
1161void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
1162 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
1163 ArchOpcode uint16_op, ArchOpcode word32_op) {
1166 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
1167 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
1169 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
1171 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
1173 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
1175 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
1176 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
1185#define VISIT_ATOMIC_BINOP(op) \
1187 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
1188 VisitWord32AtomicBinaryOperation( \
1189 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
1190 kAtomic##op##Uint16, kAtomic##op##Word32); \
1197#undef VISIT_ATOMIC_BINOP
1199void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
1203void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
1207template <
unsigned N>
1211 static_assert(
N == 3 ||
N == 4,
1212 "Pair operations can only have 3 or 4 inputs");
1217 if (projection1.
valid()) {
1221 if constexpr (
N == 3) {
1229 selector->
Emit(pair_opcode, 2, outputs,
N, inputs);
1231 }
else if constexpr (
N == 4) {
1240 selector->
Emit(pair_opcode, 2, outputs,
N, inputs);
1252void InstructionSelectorT::VisitInt32PairAdd(
OpIndex node) {
1253 VisitInt32PairBinop<4>(
this, kRiscvAddPair, kRiscvAdd32, node);
1256void InstructionSelectorT::VisitInt32PairSub(OpIndex node) {
1257 VisitInt32PairBinop<4>(
this, kRiscvSubPair, kRiscvSub32, node);
1260void InstructionSelectorT::VisitInt32PairMul(OpIndex node) {
1261 VisitInt32PairBinop<4>(
this, kRiscvMulPair, kRiscvMul32, node);
1264void InstructionSelectorT::VisitI64x2SplatI32Pair(OpIndex node) {
1265 RiscvOperandGeneratorT g(
this);
1266 InstructionOperand low = g.UseRegister(this->input_at(node, 0));
1267 InstructionOperand high = g.UseRegister(this->input_at(node, 1));
1268 Emit(kRiscvI64x2SplatI32Pair, g.DefineAsRegister(node), low, high);
1271void InstructionSelectorT::VisitI64x2ReplaceLaneI32Pair(OpIndex node) {
1279 InstructionCode opcode, OpIndex node) {
1280 RiscvOperandGeneratorT g(selector);
1281 InstructionOperand shift_operand;
1282 OpIndex shift_by = selector->input_at(node, 2);
1283 if (g.IsIntegerConstant(shift_by)) {
1284 shift_operand = g.UseImmediate(shift_by);
1286 shift_operand = g.UseUniqueRegister(shift_by);
1291 InstructionOperand inputs[] = {
1292 g.UseUniqueRegister(selector->input_at(node, 0)),
1293 g.UseUniqueRegister(selector->input_at(node, 1)), shift_operand};
1297 InstructionOperand outputs[2];
1298 InstructionOperand temps[1];
1302 outputs[output_count++] = g.DefineAsRegister(node);
1303 if (projection1.valid()) {
1304 outputs[output_count++] = g.DefineAsRegister(projection1.value());
1306 temps[temp_count++] = g.TempRegister();
1309 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1312void InstructionSelectorT::VisitWord32PairShl(OpIndex node) {
1316void InstructionSelectorT::VisitWord32PairShr(OpIndex node) {
1320void InstructionSelectorT::VisitWord32PairSar(OpIndex node) {
1324void InstructionSelectorT::VisitWord32AtomicPairLoad(OpIndex node) {
1325 RiscvOperandGeneratorT g(
this);
1327 OpIndex index = this->input_at(node, 1);
1329 ArchOpcode opcode = kRiscvWord32AtomicPairLoad;
1331 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1332 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
1333 InstructionOperand temps[3];
1334 size_t temp_count = 0;
1335 temps[temp_count++] = g.TempRegister(t0);
1336 InstructionOperand outputs[2];
1337 size_t output_count = 0;
1341 if (projection0.valid()) {
1342 outputs[output_count++] = g.DefineAsFixed(projection0.value(), a0);
1344 temps[temp_count++] = g.TempRegister(a0);
1346 if (projection1.valid()) {
1347 outputs[output_count++] = g.DefineAsFixed(projection1.value(), a1);
1349 temps[temp_count++] = g.TempRegister(a1);
1351 Emit(code, output_count, outputs,
arraysize(inputs), inputs, temp_count,
1355void InstructionSelectorT::VisitWord32AtomicPairStore(OpIndex node) {
1356 RiscvOperandGeneratorT g(
this);
1357 const AtomicWord32PairOp& store = Cast<AtomicWord32PairOp>(node);
1360 OpIndex index = store.index().value();
1361 OpIndex value_low = store.value_low().value();
1362 OpIndex value_high = store.value_high().value();
1364 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
1365 g.UseFixed(value_low, a1),
1366 g.UseFixed(value_high, a2)};
1367 InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
1369 Emit(kRiscvWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
1383 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1388 size_t output_count = 0;
1390 size_t temp_count = 0;
1395 if (projection0.
valid()) {
1400 if (projection1.
valid()) {
1405 selector->
Emit(code, output_count, outputs,
arraysize(inputs), inputs,
1409void InstructionSelectorT::VisitWord32AtomicPairAdd(
OpIndex node) {
1410 VisitPairAtomicBinop(
this, node, kRiscvWord32AtomicPairAdd);
1413void InstructionSelectorT::VisitWord32AtomicPairSub(OpIndex node) {
1414 VisitPairAtomicBinop(
this, node, kRiscvWord32AtomicPairSub);
1417void InstructionSelectorT::VisitWord32AtomicPairAnd(OpIndex node) {
1418 VisitPairAtomicBinop(
this, node, kRiscvWord32AtomicPairAnd);
1421void InstructionSelectorT::VisitWord32AtomicPairOr(OpIndex node) {
1425void InstructionSelectorT::VisitWord32AtomicPairXor(OpIndex node) {
1429void InstructionSelectorT::VisitWord32AtomicPairExchange(OpIndex node) {
1433void InstructionSelectorT::VisitWord32AtomicPairCompareExchange(OpIndex node) {
1434 RiscvOperandGeneratorT g(
this);
1437 const size_t expected_offset = 4;
1438 const size_t value_offset = 2;
1439 InstructionOperand inputs[] = {
1440 g.UseRegister(this->input_at(node, 0)),
1441 g.UseRegister(this->input_at(node, 1)),
1442 g.UseFixed(this->input_at(node, expected_offset), a1),
1443 g.UseFixed(this->input_at(node, expected_offset + 1), a2),
1444 g.UseFixed(this->input_at(node, value_offset), a3),
1445 g.UseFixed(this->input_at(node, value_offset + 1), a4)};
1448 AddressingModeField::encode(kMode_MRI);
1451 InstructionOperand outputs[2];
1452 size_t output_count = 0;
1453 InstructionOperand temps[3];
1454 size_t temp_count = 0;
1455 temps[temp_count++] = g.TempRegister(t0);
1456 if (projection0.valid()) {
1457 outputs[output_count++] = g.DefineAsFixed(projection0.value(), a0);
1459 temps[temp_count++] = g.TempRegister(a0);
1461 if (projection1.valid()) {
1462 outputs[output_count++] = g.DefineAsFixed(projection1.value(), a1);
1464 temps[temp_count++] = g.TempRegister(a1);
1466 Emit(code, output_count, outputs,
arraysize(inputs), inputs, temp_count,
1470void InstructionSelectorT::VisitF64x2Min(OpIndex node) {
1471 RiscvOperandGeneratorT g(
this);
1472 InstructionOperand temp1 = g.TempFpRegister(v0);
1473 InstructionOperand mask_reg = g.TempFpRegister(v0);
1474 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
1476 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
1477 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1478 g.UseImmediate(m1));
1479 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
1480 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
1481 g.UseImmediate(m1));
1482 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
1483 g.UseImmediate(m1));
1485 InstructionOperand temp3 = g.TempFpRegister(kSimd128ScratchReg);
1486 InstructionOperand temp4 = g.TempFpRegister(kSimd128ScratchReg);
1487 InstructionOperand temp5 = g.TempFpRegister(kSimd128ScratchReg);
1488 this->Emit(kRiscvVmv, temp3, g.UseImmediate(kNaN), g.UseImmediate(E64),
1489 g.UseImmediate(m1));
1490 this->Emit(kRiscvVsll, temp4, temp3, g.UseImmediate(kNaNShift),
1491 g.UseImmediate(E64), g.UseImmediate(m1));
1492 this->Emit(kRiscvVfminVv, temp5, g.UseRegister(this->input_at(node, 1)),
1493 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1494 g.UseImmediate(m1), g.UseImmediate(Mask));
1495 this->Emit(kRiscvVmv, g.DefineAsRegister(node), temp5, g.UseImmediate(E64),
1496 g.UseImmediate(m1));
1499void InstructionSelectorT::VisitF64x2Max(OpIndex node) {
1500 RiscvOperandGeneratorT g(
this);
1501 InstructionOperand temp1 = g.TempFpRegister(v0);
1502 InstructionOperand mask_reg = g.TempFpRegister(v0);
1503 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
1505 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
1506 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1507 g.UseImmediate(m1));
1508 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
1509 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
1510 g.UseImmediate(m1));
1511 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
1512 g.UseImmediate(m1));
1514 InstructionOperand temp3 = g.TempFpRegister(kSimd128ScratchReg);
1515 InstructionOperand temp4 = g.TempFpRegister(kSimd128ScratchReg);
1516 InstructionOperand temp5 = g.TempFpRegister(kSimd128ScratchReg);
1517 this->Emit(kRiscvVmv, temp3, g.UseImmediate(kNaN), g.UseImmediate(E64),
1518 g.UseImmediate(m1));
1519 this->Emit(kRiscvVsll, temp4, temp3, g.UseImmediate(kNaNShift),
1520 g.UseImmediate(E64), g.UseImmediate(m1));
1521 this->Emit(kRiscvVfmaxVv, temp5, g.UseRegister(this->input_at(node, 1)),
1522 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1523 g.UseImmediate(m1), g.UseImmediate(Mask));
1524 this->Emit(kRiscvVmv, g.DefineAsRegister(node), temp5, g.UseImmediate(E64),
1525 g.UseImmediate(m1));
1528MachineOperatorBuilder::Flags
1529InstructionSelector::SupportedMachineOperatorFlags() {
1530 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
1531 flags |= MachineOperatorBuilder::kInt32DivIsSafe |
1532 MachineOperatorBuilder::kUint32DivIsSafe |
1533 MachineOperatorBuilder::kFloat32RoundDown |
1534 MachineOperatorBuilder::kFloat32RoundUp |
1535 MachineOperatorBuilder::kFloat32RoundTruncate |
1536 MachineOperatorBuilder::kFloat32RoundTiesEven;
1537 if (CpuFeatures::IsSupported(ZBB)) {
1538 flags |= MachineOperatorBuilder::kWord32Ctz |
1539 MachineOperatorBuilder::kWord32Popcnt;
static constexpr T decode(U value)
static constexpr U encode(T value)
static bool IsSupported(CpuFeature f)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
Isolate * isolate() const
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
PushParameterT PushParameter
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
Instruction * MarkAsCall()
InstructionOperand TempRegister()
InstructionOperand UseImmediate(int immediate)
InstructionOperand UseFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand DefineAsFixed(turboshaft::OpIndex node, Register reg)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
MachineRepresentation representation() const
WriteBarrierKind write_barrier_kind() const
turboshaft::OptionalOpIndex index() const
IndirectPointerTag indirect_pointer_tag() const
turboshaft::OpIndex base() const
turboshaft::OpIndex value() const
StoreRepresentation stored_rep() const
bool is_store_trap_on_null() const
V8_INLINE const Operation & Get(OpIndex i) const
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
constexpr OpIndex value() const
constexpr bool valid() const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr WordRepresentation Word64()
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
#define VISIT_ATOMIC_BINOP(op)
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitWord32PairShift(InstructionSelectorT *selector, InstructionCode opcode, OpIndex node)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
@ kIndirectPointerWriteBarrier
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitS128Load(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, VSew sew, Vlmul lmul)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
static void VisitInt32PairBinop(InstructionSelectorT *selector, InstructionCode pair_opcode, InstructionCode single_opcode, OpIndex node)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
MachineRepresentation UnalignedStoreRepresentation
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
UnalignedStoreRepresentation const & UnalignedStoreRepresentationOf(Operator const *op)
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitPairAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr int kBitsPerByte
constexpr int kSystemPointerSizeLog2
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V< WordPtr > index() const
OptionalOpIndex expected() const
V< WordPtr > base() const
MemoryRepresentation memory_rep
ExternalReference external_reference() const
underlying_operation_t< Op > & Cast()
#define V8_LIKELY(condition)