18using namespace turboshaft;
20#define TRACE(...) PrintF(__VA_ARGS__)
40 if ((constant->IsIntegral() && constant->integral() == 0) ||
41 (constant->kind == ConstantOp::Kind::kFloat32 &&
42 constant->float32().get_bits() == 0) ||
43 (constant->kind == ConstantOp::Kind::kFloat64 &&
44 constant->float64().get_bits() == 0)) {
65 if (!constant)
return false;
77 return is_uint5(value);
81 return is_uint6(value);
90 return is_uint16(value);
105 return is_int32(value);
107 return is_int16(value);
113 TRACE(
"UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__);
141 Mips64OperandGeneratorT g(selector);
142 selector->Emit(opcode, g.DefineAsRegister(node),
143 g.UseRegister(selector->input_at(node, 0)),
144 g.UseRegister(selector->input_at(node, 1)));
165struct ExtendingLoadMatcher {
195 DCHECK(shift.
kind == ShiftOp::Kind::kShiftRightArithmetic ||
196 shift.
kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
201 int64_t constant_rhs;
204 selector_->MatchIntegralWord64Constant(shift.
right(), &constant_rhs) &&
205 constant_rhs == 32 &&
selector_->CanCover(node, shift.
left())) {
211 if (load.index().has_value()) {
212 int64_t index_constant;
213 if (
selector_->MatchIntegralWord64Constant(load.index().value(),
229 ExtendingLoadMatcher
m(node, selector);
230 Mips64OperandGeneratorT g(selector);
232 InstructionOperand inputs[2];
233 inputs[0] = g.UseRegister(
m.base());
236 DCHECK(is_int32(
m.immediate()));
237 inputs[1] = g.TempImmediate(
static_cast<int32_t
>(
m.immediate()));
238 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
248 size_t* input_count_return, InstructionOperand* inputs) {
249 Mips64OperandGeneratorT g(selector);
250 if (g.CanBeImmediate(node, *opcode_return)) {
252 inputs[0] = g.UseImmediate(node);
253 *input_count_return = 1;
265 size_t input_count = 0;
267 size_t output_count = 0;
277 }
else if (has_reverse_opcode &&
279 &input_count, &inputs[1])) {
281 opcode = reverse_opcode;
285 inputs[input_count++] = g.
UseOperand(right_node, opcode);
303 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
308 VisitBinop(selector, node, opcode,
false, kArchNop, cont);
313 VisitBinop(selector, node, opcode,
false, kArchNop);
316void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
322 Emit(kArchStackSlot, g.DefineAsRegister(node),
323 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
326void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
327 Mips64OperandGeneratorT g(
this);
328 Emit(kArchAbortCSADcheck, g.NoOutput(),
329 g.UseFixed(this->input_at(node, 0), a0));
334 Mips64OperandGeneratorT g(selector);
335 const Operation& op = selector->Get(node);
336 const LoadOp& load = op.Cast<LoadOp>();
341 OpIndex index = load.index().value();
345 InstructionOperand inputs[3];
347 InstructionOperand output_op;
351 output_op = g.DefineAsRegister(output.valid() ? output : node);
353 const Operation& base_op = selector->Get(base);
356 selector->MatchSignedIntegralConstant(index, &index_value)) {
357 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
358 if (selector->CanAddressRelativeToRootsRegister(
359 constant_base.external_reference())) {
360 ptrdiff_t
const delta =
363 selector->isolate(), constant_base.external_reference());
367 if (is_int32(delta)) {
368 inputs[0] = g.UseImmediate(
static_cast<int32_t>(delta));
370 selector->Emit(opcode, 1, &output_op, input_count, inputs);
376 if (base_op.Is<LoadRootRegisterOp>()) {
378 CHECK(selector->MatchSignedIntegralConstant(index, &index_value));
380 inputs[0] = g.UseImmediate64(index_value);
382 selector->Emit(opcode, 1, &output_op, input_count, inputs);
386 if (g.CanBeImmediate(index, opcode)) {
388 g.DefineAsRegister(output.valid() ? output : node),
389 g.UseRegister(base), g.UseImmediate(index));
391 InstructionOperand addr_reg = g.TempRegister();
393 addr_reg, g.UseRegister(index), g.UseRegister(base));
396 g.DefineAsRegister(output.valid() ? output : node), addr_reg,
412 switch (load_rep.representation()) {
421 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
424 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
455void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
462void InstructionSelectorT::VisitStore(
OpIndex node) {
463 Mips64OperandGeneratorT g(
this);
482 InstructionOperand
inputs[3];
489 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
490 size_t const temp_count =
arraysize(temps);
493 Emit(code, 0,
nullptr, input_count,
inputs, temp_count, temps);
539 g.UseImmediate(index), g.UseRegisterOrImmediateZero(value));
543 if (g.CanBeImmediate(index,
opcode)) {
545 g.UseRegister(base), g.UseImmediate(index),
546 g.UseRegisterOrImmediateZero(value));
548 InstructionOperand addr_reg = g.TempRegister();
550 g.UseRegister(index), g.UseRegister(base));
553 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
558void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
565 VisitBinop(
this, node, kMips64And32,
true, kMips64And32);
568void InstructionSelectorT::VisitWord64And(
OpIndex node) {
570 VisitBinop(
this, node, kMips64And,
true, kMips64And);
573void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
574 VisitBinop(
this, node, kMips64Or32,
true, kMips64Or32);
577void InstructionSelectorT::VisitWord64Or(
OpIndex node) {
578 VisitBinop(
this, node, kMips64Or,
true, kMips64Or);
581void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
583 VisitBinop(
this, node, kMips64Xor32,
true, kMips64Xor32);
586void InstructionSelectorT::VisitWord64Xor(
OpIndex node) {
588 VisitBinop(
this, node, kMips64Xor,
true, kMips64Xor);
591void InstructionSelectorT::VisitWord32Shl(
OpIndex node) {
596void InstructionSelectorT::VisitWord32Shr(
OpIndex node) {
606void InstructionSelectorT::VisitWord64Shl(
OpIndex node) {
613 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
615 Mips64OperandGeneratorT g(
this);
618 Emit(kMips64Dshl, g.DefineAsRegister(node),
619 g.UseRegister(lhs.Cast<ChangeOp>().input()),
620 g.UseImmediate64(shift_by));
627void InstructionSelectorT::VisitWord64Shr(
OpIndex node) {
632void InstructionSelectorT::VisitWord64Sar(
OpIndex node) {
638 int64_t constant_rhs;
641 is_uint5(constant_rhs) &&
CanCover(node, shiftop.left())) {
642 OpIndex input = lhs.Cast<ChangeOp>().input();
644 Mips64OperandGeneratorT g(
this);
645 int right =
static_cast<int>(constant_rhs);
646 Emit(kMips64Sar, g.DefineAsRegister(node), g.UseRegister(input),
647 g.UseImmediate(right));
659void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
663void InstructionSelectorT::VisitWord32Clz(
OpIndex node) {
664 VisitRR(
this, kMips64Clz, node);
667void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
671void InstructionSelectorT::VisitWord64ReverseBits(
OpIndex node) {
675void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
676 VisitRR(
this, kMips64ByteSwap64, node);
679void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
680 VisitRR(
this, kMips64ByteSwap32, node);
683void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
687void InstructionSelectorT::VisitWord32Ctz(
OpIndex node) {
688 VisitRR(
this, kMips64Ctz, node);
691void InstructionSelectorT::VisitWord64Ctz(
OpIndex node) {
692 VisitRR(
this, kMips64Dctz, node);
695void InstructionSelectorT::VisitWord32Popcnt(
OpIndex node) {
696 VisitRR(
this, kMips64Popcnt, node);
699void InstructionSelectorT::VisitWord64Popcnt(
OpIndex node) {
700 VisitRR(
this, kMips64Dpopcnt, node);
703void InstructionSelectorT::VisitWord64Ror(
OpIndex node) {
707void InstructionSelectorT::VisitWord64Clz(
OpIndex node) {
708 VisitRR(
this, kMips64Dclz, node);
711void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
713 VisitBinop(
this, node, kMips64Add,
true, kMips64Add);
716void InstructionSelectorT::VisitInt64Add(
OpIndex node) {
718 VisitBinop(
this, node, kMips64Dadd,
true, kMips64Dadd);
721void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
725void InstructionSelectorT::VisitInt64Sub(
OpIndex node) {
729void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
731 VisitBinop(
this, node, kMips64Mul,
true, kMips64Mul);
734void InstructionSelectorT::VisitInt32MulHigh(
OpIndex node) {
735 VisitRRR(
this, kMips64MulHigh, node);
738void InstructionSelectorT::VisitInt64MulHigh(
OpIndex node) {
739 VisitRRR(
this, kMips64DMulHigh, node);
742void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
743 VisitRRR(
this, kMips64MulHighU, node);
746void InstructionSelectorT::VisitUint64MulHigh(
OpIndex node) {
747 VisitRRR(
this, kMips64DMulHighU, node);
750void InstructionSelectorT::VisitInt64Mul(
OpIndex node) {
752 VisitBinop(
this, node, kMips64Dmul,
true, kMips64Dmul);
755void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
756 Mips64OperandGeneratorT g(
this);
759 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(left),
760 g.UseRegister(right));
763void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
764 Mips64OperandGeneratorT g(
this);
767 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(left),
768 g.UseRegister(right));
771void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
772 Mips64OperandGeneratorT g(
this);
775 Emit(kMips64Mod, g.DefineSameAsFirst(node), g.UseRegister(left),
776 g.UseRegister(right));
779void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
783void InstructionSelectorT::VisitInt64Div(
OpIndex node) {
784 Mips64OperandGeneratorT g(
this);
787 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(left),
788 g.UseRegister(right));
791void InstructionSelectorT::VisitUint64Div(
OpIndex node) {
792 Mips64OperandGeneratorT g(
this);
795 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(left),
796 g.UseRegister(right));
799void InstructionSelectorT::VisitInt64Mod(
OpIndex node) {
803void InstructionSelectorT::VisitUint64Mod(
OpIndex node) {
807void InstructionSelectorT::VisitChangeFloat32ToFloat64(
OpIndex node) {
808 VisitRR(
this, kMips64CvtDS, node);
811void InstructionSelectorT::VisitRoundInt32ToFloat32(
OpIndex node) {
812 VisitRR(
this, kMips64CvtSW, node);
815void InstructionSelectorT::VisitRoundUint32ToFloat32(
OpIndex node) {
816 VisitRR(
this, kMips64CvtSUw, node);
819void InstructionSelectorT::VisitChangeInt32ToFloat64(
OpIndex node) {
820 VisitRR(
this, kMips64CvtDW, node);
823void InstructionSelectorT::VisitChangeInt64ToFloat64(
OpIndex node) {
824 VisitRR(
this, kMips64CvtDL, node);
827void InstructionSelectorT::VisitChangeUint32ToFloat64(
OpIndex node) {
828 VisitRR(
this, kMips64CvtDUw, node);
831void InstructionSelectorT::VisitTruncateFloat32ToInt32(
OpIndex node) {
832 Mips64OperandGeneratorT g(
this);
839 g.UseRegister(this->input_at(node, 0)));
842void InstructionSelectorT::VisitTruncateFloat32ToUint32(
OpIndex node) {
843 Mips64OperandGeneratorT g(
this);
852 g.UseRegister(this->input_at(node, 0)));
855void InstructionSelectorT::VisitChangeFloat64ToInt32(
OpIndex node) {
856 VisitRR(
this, kMips64TruncWD, node);
859void InstructionSelectorT::VisitChangeFloat64ToInt64(
OpIndex node) {
860 VisitRR(
this, kMips64TruncLD, node);
863void InstructionSelectorT::VisitChangeFloat64ToUint32(
OpIndex node) {
864 VisitRR(
this, kMips64TruncUwD, node);
867void InstructionSelectorT::VisitChangeFloat64ToUint64(
OpIndex node) {
868 VisitRR(
this, kMips64TruncUlD, node);
871void InstructionSelectorT::VisitTruncateFloat64ToUint32(
OpIndex node) {
872 VisitRR(
this, kMips64TruncUwD, node);
875void InstructionSelectorT::VisitTruncateFloat64ToInt64(
OpIndex node) {
876 Mips64OperandGeneratorT g(
this);
884 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
887void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(
OpIndex node) {
891void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(
OpIndex node) {
895void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(
OpIndex node) {
896 Mips64OperandGeneratorT g(
this);
898 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
899 InstructionOperand outputs[2];
900 size_t output_count = 0;
901 outputs[output_count++] = g.DefineAsRegister(node);
904 if (success_output.valid()) {
905 outputs[output_count++] = g.DefineAsRegister(success_output.value());
908 Emit(kMips64TruncLS, output_count, outputs, 1,
inputs);
911void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(
OpIndex node) {
912 Mips64OperandGeneratorT g(
this);
914 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
915 InstructionOperand outputs[2];
916 size_t output_count = 0;
917 outputs[output_count++] = g.DefineAsRegister(node);
920 if (success_output.valid()) {
921 outputs[output_count++] = g.DefineAsRegister(success_output.value());
924 Emit(kMips64TruncLD, output_count, outputs, 1,
inputs);
927void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(
OpIndex node) {
928 Mips64OperandGeneratorT g(
this);
930 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
931 InstructionOperand outputs[2];
932 size_t output_count = 0;
933 outputs[output_count++] = g.DefineAsRegister(node);
936 if (success_output.valid()) {
937 outputs[output_count++] = g.DefineAsRegister(success_output.value());
940 Emit(kMips64TruncUlS, output_count, outputs, 1,
inputs);
943void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(
OpIndex node) {
944 Mips64OperandGeneratorT g(
this);
946 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
947 InstructionOperand outputs[2];
948 size_t output_count = 0;
949 outputs[output_count++] = g.DefineAsRegister(node);
952 if (success_output.valid()) {
953 outputs[output_count++] = g.DefineAsRegister(success_output.value());
956 Emit(kMips64TruncUlD, output_count, outputs, 1,
inputs);
959void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(
OpIndex node) {
960 Mips64OperandGeneratorT g(
this);
962 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
963 InstructionOperand outputs[2];
964 size_t output_count = 0;
965 outputs[output_count++] = g.DefineAsRegister(node);
968 if (success_output.valid()) {
969 outputs[output_count++] = g.DefineAsRegister(success_output.value());
972 Emit(kMips64TruncWD, output_count, outputs, 1,
inputs);
975void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(
OpIndex node) {
976 Mips64OperandGeneratorT g(
this);
978 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
979 InstructionOperand outputs[2];
980 size_t output_count = 0;
981 outputs[output_count++] = g.DefineAsRegister(node);
984 if (success_output.valid()) {
985 outputs[output_count++] = g.DefineAsRegister(success_output.value());
988 Emit(kMips64TruncUwD, output_count, outputs, 1,
inputs);
991void InstructionSelectorT::VisitBitcastWord32ToWord64(
OpIndex node) {
995void InstructionSelectorT::VisitChangeInt32ToInt64(
OpIndex node) {
996 Mips64OperandGeneratorT g(
this);
998 const Operation& input_op = this->
Get(change_op.input());
999 if (input_op.Is<LoadOp>() &&
CanCover(node, change_op.input())) {
1008 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1011 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1022 CanCover(node, change_op.input())) {
1026 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(change_op.input()),
1027 g.TempImmediate(0));
1030bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(
OpIndex node) {
1033 switch (op.opcode) {
1035 case Opcode::kComparison:
1037 case Opcode::kOverflowCheckedBinop:
1038 return op.Cast<OverflowCheckedBinopOp>().rep ==
1040 case Opcode::kLoad: {
1043 if (load_rep.IsUnsigned()) {
1044 switch (load_rep.representation()) {
1060void InstructionSelectorT::VisitChangeUint32ToUint64(
OpIndex node) {
1061 Mips64OperandGeneratorT g(
this);
1063 OpIndex input = change_op.input();
1066 if (input_op.Is<LoadOp>() &&
CanCover(node, input)) {
1069 if (load_rep.IsUnsigned() &&
1071 EmitLoad(
this, input, kMips64Lwu, node);
1075 if (ZeroExtendsWord32ToWord64(input)) {
1079 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(input),
1080 g.TempImmediate(0), g.TempImmediate(32));
1083void InstructionSelectorT::VisitTruncateInt64ToInt32(
OpIndex node) {
1084 Mips64OperandGeneratorT g(
this);
1088 auto shift_value =
input_at(value, 1);
1092 }
else if (int64_t constant;
1094 if (constant >= 32 && constant <= 63) {
1096 Emit(kMips64Dsar, g.DefineAsRegister(node),
1098 g.UseImmediate(
input_at(value, 1)));
1104 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
1105 g.TempImmediate(0));
1108void InstructionSelectorT::VisitTruncateFloat64ToFloat32(
OpIndex node) {
1109 VisitRR(
this, kMips64CvtSD, node);
1112void InstructionSelectorT::VisitTruncateFloat64ToWord32(
OpIndex node) {
1113 VisitRR(
this, kArchTruncateDoubleToI, node);
1116void InstructionSelectorT::VisitRoundFloat64ToInt32(
OpIndex node) {
1117 VisitRR(
this, kMips64TruncWD, node);
1120void InstructionSelectorT::VisitRoundInt64ToFloat32(
OpIndex node) {
1121 VisitRR(
this, kMips64CvtSL, node);
1124void InstructionSelectorT::VisitRoundInt64ToFloat64(
OpIndex node) {
1125 VisitRR(
this, kMips64CvtDL, node);
1128void InstructionSelectorT::VisitRoundUint64ToFloat32(
OpIndex node) {
1129 VisitRR(
this, kMips64CvtSUl, node);
1132void InstructionSelectorT::VisitRoundUint64ToFloat64(
OpIndex node) {
1133 VisitRR(
this, kMips64CvtDUl, node);
1136void InstructionSelectorT::VisitBitcastFloat32ToInt32(
OpIndex node) {
1137 VisitRR(
this, kMips64Float64ExtractLowWord32, node);
1140void InstructionSelectorT::VisitBitcastFloat64ToInt64(
OpIndex node) {
1141 VisitRR(
this, kMips64BitcastDL, node);
1144void InstructionSelectorT::VisitBitcastInt32ToFloat32(
OpIndex node) {
1148 VisitRR(
this, kMips64BitcastLD, node);
1151void InstructionSelectorT::VisitBitcastInt64ToFloat64(
OpIndex node) {
1152 VisitRR(
this, kMips64BitcastLD, node);
1155void InstructionSelectorT::VisitFloat32Add(
OpIndex node) {
1161void InstructionSelectorT::VisitFloat64Add(
OpIndex node) {
1167void InstructionSelectorT::VisitFloat32Sub(
OpIndex node) {
1173void InstructionSelectorT::VisitFloat64Sub(
OpIndex node) {
1179void InstructionSelectorT::VisitFloat32Mul(
OpIndex node) {
1183void InstructionSelectorT::VisitFloat64Mul(
OpIndex node) {
1187void InstructionSelectorT::VisitFloat32Div(
OpIndex node) {
1191void InstructionSelectorT::VisitFloat64Div(
OpIndex node) {
1195void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
1196 Mips64OperandGeneratorT g(
this);
1197 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1198 g.UseFixed(this->input_at(node, 0), f12),
1199 g.UseFixed(this->input_at(node, 1), f14))
1203void InstructionSelectorT::VisitFloat32Max(
OpIndex node) {
1204 VisitRRR(
this, kMips64Float32Max, node);
1207void InstructionSelectorT::VisitFloat64Max(
OpIndex node) {
1208 VisitRRR(
this, kMips64Float64Max, node);
1211void InstructionSelectorT::VisitFloat32Min(
OpIndex node) {
1212 VisitRRR(
this, kMips64Float32Min, node);
1215void InstructionSelectorT::VisitFloat64Min(
OpIndex node) {
1216 VisitRRR(
this, kMips64Float64Min, node);
1219void InstructionSelectorT::VisitFloat32Abs(
OpIndex node) {
1220 VisitRR(
this, kMips64AbsS, node);
1223void InstructionSelectorT::VisitFloat64Abs(
OpIndex node) {
1224 VisitRR(
this, kMips64AbsD, node);
1227void InstructionSelectorT::VisitFloat32Sqrt(
OpIndex node) {
1228 VisitRR(
this, kMips64SqrtS, node);
1231void InstructionSelectorT::VisitFloat64Sqrt(
OpIndex node) {
1232 VisitRR(
this, kMips64SqrtD, node);
1235void InstructionSelectorT::VisitFloat32RoundDown(
OpIndex node) {
1236 VisitRR(
this, kMips64Float32RoundDown, node);
1239void InstructionSelectorT::VisitFloat64RoundDown(
OpIndex node) {
1240 VisitRR(
this, kMips64Float64RoundDown, node);
1243void InstructionSelectorT::VisitFloat32RoundUp(
OpIndex node) {
1244 VisitRR(
this, kMips64Float32RoundUp, node);
1247void InstructionSelectorT::VisitFloat64RoundUp(
OpIndex node) {
1248 VisitRR(
this, kMips64Float64RoundUp, node);
1251void InstructionSelectorT::VisitFloat32RoundTruncate(
OpIndex node) {
1252 VisitRR(
this, kMips64Float32RoundTruncate, node);
1255void InstructionSelectorT::VisitFloat64RoundTruncate(
OpIndex node) {
1256 VisitRR(
this, kMips64Float64RoundTruncate, node);
1259void InstructionSelectorT::VisitFloat64RoundTiesAway(
OpIndex node) {
1263void InstructionSelectorT::VisitFloat32RoundTiesEven(
OpIndex node) {
1264 VisitRR(
this, kMips64Float32RoundTiesEven, node);
1267void InstructionSelectorT::VisitFloat64RoundTiesEven(
OpIndex node) {
1268 VisitRR(
this, kMips64Float64RoundTiesEven, node);
1271void InstructionSelectorT::VisitFloat32Neg(
OpIndex node) {
1272 VisitRR(
this, kMips64NegS, node);
1275void InstructionSelectorT::VisitFloat64Neg(
OpIndex node) {
1276 VisitRR(
this, kMips64NegD, node);
1281 Mips64OperandGeneratorT g(
this);
1283 g.UseFixed(this->input_at(node, 0), f2),
1284 g.UseFixed(this->input_at(node, 1), f4))
1290 Mips64OperandGeneratorT g(
this);
1292 g.UseFixed(this->input_at(node, 0), f12))
1299 LinkageLocation location) {}
1302 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
1304 Mips64OperandGeneratorT g(
this);
1307 if (call_descriptor->IsCFunctionCall()) {
1309 call_descriptor->ParameterCount())),
1310 0,
nullptr, 0,
nullptr);
1315 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1320 int push_count =
static_cast<int>(call_descriptor->ParameterSlotCount());
1321 if (push_count > 0) {
1325 if (input.node.valid()) {
1326 stack_size += input.location.GetSizeInPointers();
1329 Emit(kMips64StackClaim, g.NoOutput(),
1332 for (
size_t n = 0; n < arguments->size(); ++
n) {
1334 if (input.node.valid()) {
1335 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1343 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
1345 Mips64OperandGeneratorT g(
this);
1348 if (!output.location.IsCallerFrameSlot())
continue;
1350 if (output.node.valid()) {
1351 DCHECK(!call_descriptor->IsCFunctionCall());
1359 int offset = call_descriptor->GetOffsetToReturns();
1360 int reverse_slot = -output.location.GetLocation() -
offset;
1361 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1362 g.UseImmediate(reverse_slot));
1369void InstructionSelectorT::VisitUnalignedLoad(
OpIndex node) {
1374 switch (load_rep.representation()) {
1382 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1385 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1417void InstructionSelectorT::VisitUnalignedStore(
OpIndex node) {
1418 Mips64OperandGeneratorT g(
this);
1468 if (g.CanBeImmediate(index,
opcode)) {
1470 g.UseRegister(base), g.UseImmediate(index),
1471 g.UseRegisterOrImmediateZero(value));
1473 InstructionOperand addr_reg = g.TempRegister();
1475 g.UseRegister(index), g.UseRegister(base));
1478 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1485static Instruction*
VisitCompare(InstructionSelectorT* selector,
1487 InstructionOperand left,
1488 InstructionOperand right,
1489 FlagsContinuationT* cont) {
1490 return selector->EmitWithContinuation(opcode, left, right, cont);
1495 FlagsContinuationT* cont) {
1496 Mips64OperandGeneratorT g(selector);
1500 InstructionOperand lhs, rhs;
1502 lhs = selector->MatchZero(left) ? g.UseImmediate(left) : g.UseRegister(left);
1504 selector->MatchZero(right) ? g.UseImmediate(right) : g.UseRegister(right);
1510 FlagsContinuationT* cont) {
1511 Mips64OperandGeneratorT g(selector);
1516 if (selector->MatchZero(rhs)) {
1517 VisitCompare(selector, kMips64CmpD, g.UseRegister(lhs), g.UseImmediate(rhs),
1519 }
else if (selector->MatchZero(lhs)) {
1520 VisitCompare(selector, kMips64CmpD, g.UseImmediate(lhs), g.UseRegister(rhs),
1523 VisitCompare(selector, kMips64CmpD, g.UseRegister(lhs), g.UseRegister(rhs),
1532 Mips64OperandGeneratorT g(selector);
1533 DCHECK_EQ(selector->value_input_count(node), 2);
1534 auto left = selector->input_at(node, 0);
1535 auto right = selector->input_at(node, 1);
1538 if (g.CanBeImmediate(right, opcode)) {
1539 if (opcode == kMips64Tst) {
1540 return VisitCompare(selector, opcode, g.UseRegister(left),
1541 g.UseImmediate(right), cont);
1543 switch (cont->condition()) {
1546 if (cont->IsSet()) {
1547 return VisitCompare(selector, opcode, g.UseRegister(left),
1548 g.UseImmediate(right), cont);
1550 return VisitCompare(selector, opcode, g.UseRegister(left),
1551 g.UseRegister(right), cont);
1557 return VisitCompare(selector, opcode, g.UseRegister(left),
1558 g.UseImmediate(right), cont);
1560 return VisitCompare(selector, opcode, g.UseRegister(left),
1561 g.UseRegister(right), cont);
1564 }
else if (g.CanBeImmediate(left, opcode)) {
1565 if (!commutative) cont->Commute();
1566 if (opcode == kMips64Tst) {
1567 return VisitCompare(selector, opcode, g.UseRegister(right),
1568 g.UseImmediate(left), cont);
1570 switch (cont->condition()) {
1573 if (cont->IsSet()) {
1574 return VisitCompare(selector, opcode, g.UseRegister(right),
1575 g.UseImmediate(left), cont);
1577 return VisitCompare(selector, opcode, g.UseRegister(right),
1578 g.UseRegister(left), cont);
1584 return VisitCompare(selector, opcode, g.UseRegister(right),
1585 g.UseImmediate(left), cont);
1587 return VisitCompare(selector, opcode, g.UseRegister(right),
1588 g.UseRegister(left), cont);
1592 return VisitCompare(selector, opcode, g.UseRegister(left),
1593 g.UseRegister(right), cont);
1598void VisitFullWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1600 Mips64OperandGeneratorT g(selector);
1601 InstructionOperand leftOp = g.TempRegister();
1602 InstructionOperand rightOp = g.TempRegister();
1604 selector->Emit(kMips64Dshl, leftOp,
1605 g.UseRegister(selector->input_at(node, 0)),
1606 g.TempImmediate(32));
1607 selector->Emit(kMips64Dshl, rightOp,
1608 g.UseRegister(selector->input_at(node, 1)),
1609 g.TempImmediate(32));
1612 selector->UpdateSourcePosition(
instr, node);
1615void VisitWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1616 FlagsContinuationT* cont) {
1617 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
1620void VisitWord64Compare(InstructionSelectorT* selector,
OpIndex node,
1621 FlagsContinuationT* cont) {
1626 FlagsContinuationT* cont) {
1627 Mips64OperandGeneratorT g(selector);
1628 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
1629 g.TempImmediate(0), cont);
1632void VisitAtomicLoad(InstructionSelectorT* selector,
OpIndex node,
1635 Mips64OperandGeneratorT g(selector);
1636 auto load = selector->load_view(node);
1643 switch (load_rep.representation()) {
1646 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1650 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1653 code = kAtomicLoadWord32;
1656 code = kMips64Word64AtomicLoadUint64;
1662 code = kMips64Word64AtomicLoadUint64;
1668 if (g.CanBeImmediate(index, code)) {
1671 g.DefineAsRegister(node), g.UseRegister(base),
1672 g.UseImmediate(index));
1674 InstructionOperand addr_reg = g.TempRegister();
1676 addr_reg, g.UseRegister(index), g.UseRegister(base));
1680 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1686 auto store = selector->store_view(node);
1687 return AtomicStoreParameters(store.stored_rep().representation(),
1688 store.stored_rep().write_barrier_kind(),
1689 store.memory_order().value(),
1690 store.access_kind());
1693void VisitAtomicStore(InstructionSelectorT* selector,
OpIndex node,
1696 Mips64OperandGeneratorT g(selector);
1697 auto store = selector->store_view(node);
1699 OpIndex index = selector->value(store.index());
1700 OpIndex value = store.value();
1708 if (
v8_flags.enable_unconditional_write_barriers &&
1716 !
v8_flags.disable_write_barriers) {
1720 InstructionOperand inputs[3];
1722 inputs[
input_count++] = g.UseUniqueRegister(base);
1723 inputs[
input_count++] = g.UseUniqueRegister(index);
1724 inputs[
input_count++] = g.UseUniqueRegister(value);
1727 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1728 size_t const temp_count =
arraysize(temps);
1729 code = kArchAtomicStoreWithWriteBarrier;
1731 selector->Emit(code, 0,
nullptr, input_count, inputs, temp_count, temps);
1735 code = kAtomicStoreWord8;
1738 code = kAtomicStoreWord16;
1741 code = kAtomicStoreWord32;
1745 code = kMips64Word64AtomicStoreWord64;
1751 code = kMips64StoreCompressTagged;
1758 if (g.CanBeImmediate(index, code)) {
1761 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
1762 g.UseRegisterOrImmediateZero(value));
1764 InstructionOperand addr_reg = g.TempRegister();
1766 addr_reg, g.UseRegister(index), g.UseRegister(base));
1770 g.NoOutput(), addr_reg, g.TempImmediate(0),
1771 g.UseRegisterOrImmediateZero(value));
1779 Mips64OperandGeneratorT g(selector);
1780 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
1782 OpIndex index = atomic_op.index();
1783 OpIndex value = atomic_op.value();
1786 InstructionOperand inputs[3];
1788 inputs[
input_count++] = g.UseUniqueRegister(base);
1789 inputs[
input_count++] = g.UseUniqueRegister(index);
1790 inputs[
input_count++] = g.UseUniqueRegister(value);
1791 InstructionOperand outputs[1];
1792 outputs[0] = g.UseUniqueRegister(node);
1793 InstructionOperand temp[3];
1794 temp[0] = g.TempRegister();
1795 temp[1] = g.TempRegister();
1796 temp[2] = g.TempRegister();
1799 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1805 Mips64OperandGeneratorT g(selector);
1806 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
1808 OpIndex index = atomic_op.index();
1809 OpIndex old_value = atomic_op.expected().value();
1810 OpIndex new_value = atomic_op.value();
1813 InstructionOperand inputs[4];
1815 inputs[
input_count++] = g.UseUniqueRegister(base);
1816 inputs[
input_count++] = g.UseUniqueRegister(index);
1817 inputs[
input_count++] = g.UseUniqueRegister(old_value);
1818 inputs[
input_count++] = g.UseUniqueRegister(new_value);
1819 InstructionOperand outputs[1];
1820 outputs[0] = g.UseUniqueRegister(node);
1821 InstructionOperand temp[3];
1822 temp[0] = g.TempRegister();
1823 temp[1] = g.TempRegister();
1824 temp[2] = g.TempRegister();
1827 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1833 Mips64OperandGeneratorT g(selector);
1834 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
1836 OpIndex index = atomic_op.index();
1837 OpIndex value = atomic_op.value();
1840 InstructionOperand inputs[3];
1842 inputs[
input_count++] = g.UseUniqueRegister(base);
1843 inputs[
input_count++] = g.UseUniqueRegister(index);
1844 inputs[
input_count++] = g.UseUniqueRegister(value);
1845 InstructionOperand outputs[1];
1846 outputs[0] = g.UseUniqueRegister(node);
1847 InstructionOperand temps[4];
1848 temps[0] = g.TempRegister();
1849 temps[1] = g.TempRegister();
1850 temps[2] = g.TempRegister();
1851 temps[3] = g.TempRegister();
1854 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
1860 OpIndex node, FlagsContinuationT* cont) {
1867 value = op.stack_limit();
1871 Mips64OperandGeneratorT g(
this);
1874 InstructionOperand*
const outputs =
nullptr;
1875 const int output_count = 0;
1881 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1887 InstructionOperand
inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1891 temp_count, temps, cont);
1896 FlagsContinuation* cont) {
1898 Mips64OperandGeneratorT g(
this);
1900 while (
const ComparisonOp*
equal =
1906 value =
equal->left();
1911 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1912 switch (comparison->rep.value()) {
1914 cont->OverwriteAndNegateIfEqual(
1916 return VisitWord32Compare(
this, value, cont);
1919 cont->OverwriteAndNegateIfEqual(
1921 return VisitWord64Compare(
this, value, cont);
1925 case ComparisonOp::Kind::kEqual:
1926 cont->OverwriteAndNegateIfEqual(
kEqual);
1928 case ComparisonOp::Kind::kSignedLessThan:
1931 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1940 case ComparisonOp::Kind::kEqual:
1941 cont->OverwriteAndNegateIfEqual(
kEqual);
1943 case ComparisonOp::Kind::kSignedLessThan:
1946 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1956 }
else if (
const ProjectionOp* projection =
1957 value_op.TryCast<ProjectionOp>()) {
1960 if (projection->index == 1u) {
1966 OpIndex node = projection->input();
1969 if (
const OverflowCheckedBinopOp* binop =
1972 switch (binop->kind) {
1973 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1974 cont->OverwriteAndNegateIfEqual(
kOverflow);
1976 is64 ? kMips64DaddOvf : kMips64Dadd, cont);
1977 case OverflowCheckedBinopOp::Kind::kSignedSub:
1978 cont->OverwriteAndNegateIfEqual(
kOverflow);
1980 is64 ? kMips64DsubOvf : kMips64Dsub, cont);
1981 case OverflowCheckedBinopOp::Kind::kSignedMul:
1982 cont->OverwriteAndNegateIfEqual(
kOverflow);
1984 this, node, is64 ? kMips64DMulOvf : kMips64MulOvf, cont);
1993 }
else if (value_op.Is<StackPointerGreaterThanOp>()) {
1994 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1995 return VisitStackPointerGreaterThan(value, cont);
2004void InstructionSelectorT::VisitSwitch(OpIndex node,
const SwitchInfo& sw) {
2005 Mips64OperandGeneratorT g(
this);
2006 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
2009 if (enable_switch_jump_table_ ==
2010 InstructionSelector::kEnableSwitchJumpTable) {
2011 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2012 size_t table_space_cost = 10 + 2 * sw.value_range();
2013 size_t table_time_cost = 3;
2014 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2015 size_t lookup_time_cost = sw.case_count();
2016 if (sw.case_count() > 0 &&
2017 table_space_cost + 3 * table_time_cost <=
2018 lookup_space_cost + 3 * lookup_time_cost &&
2019 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2020 sw.value_range() <= kMaxTableSwitchValueRange) {
2021 InstructionOperand index_operand = value_operand;
2022 if (sw.min_value()) {
2023 index_operand = g.TempRegister();
2024 Emit(kMips64Sub, index_operand, value_operand,
2025 g.TempImmediate(sw.min_value()));
2028 return EmitTableSwitch(sw, index_operand);
2033 return EmitBinarySearchSwitch(sw, value_operand);
2036void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
2042 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2044 if (MatchZero(right)) {
2045 return VisitWordCompareZero(user, left, &cont);
2048 VisitWord32Compare(
this, node, &cont);
2051void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
2052 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2053 VisitWord32Compare(
this, node, &cont);
2056void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
2057 FlagsContinuation cont =
2058 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2059 VisitWord32Compare(
this, node, &cont);
2062void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
2063 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2064 VisitWord32Compare(
this, node, &cont);
2067void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
2068 FlagsContinuation cont =
2069 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2070 VisitWord32Compare(
this, node, &cont);
2073void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
2075 if (ovf.valid() && IsUsed(ovf.value())) {
2076 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2077 return VisitBinop(
this, node, kMips64Dadd, &cont);
2080 FlagsContinuation cont;
2084void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
2087 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2088 return VisitBinop(
this, node, kMips64Dsub, &cont);
2091 FlagsContinuation cont;
2095void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
2098 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2099 return VisitBinop(
this, node, kMips64MulOvf, &cont);
2102 FlagsContinuation cont;
2103 VisitBinop(
this, node, kMips64MulOvf, &cont);
2106void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
2109 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2110 return VisitBinop(
this, node, kMips64DMulOvf, &cont);
2113 FlagsContinuation cont;
2114 VisitBinop(
this, node, kMips64DMulOvf, &cont);
2117void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
2120 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2121 return VisitBinop(
this, node, kMips64DaddOvf, &cont);
2124 FlagsContinuation cont;
2125 VisitBinop(
this, node, kMips64DaddOvf, &cont);
2128void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
2131 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2132 return VisitBinop(
this, node, kMips64DsubOvf, &cont);
2135 FlagsContinuation cont;
2136 VisitBinop(
this, node, kMips64DsubOvf, &cont);
2139void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
2140 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2141 VisitWord64Compare(
this, node, &cont);
2144void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
2145 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2146 VisitWord64Compare(
this, node, &cont);
2149void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
2150 FlagsContinuation cont =
2151 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2152 VisitWord64Compare(
this, node, &cont);
2155void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
2156 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2157 VisitWord64Compare(
this, node, &cont);
2160void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
2161 FlagsContinuation cont =
2162 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2163 VisitWord64Compare(
this, node, &cont);
2166void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
2167 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2171void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
2172 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2176void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
2177 FlagsContinuation cont =
2178 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2182void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
2183 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2187void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
2188 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2192void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
2193 FlagsContinuation cont =
2194 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2198void InstructionSelectorT::VisitFloat64ExtractLowWord32(OpIndex node) {
2199 VisitRR(
this, kMips64Float64ExtractLowWord32, node);
2202void InstructionSelectorT::VisitFloat64ExtractHighWord32(OpIndex node) {
2203 VisitRR(
this, kMips64Float64ExtractHighWord32, node);
2206void InstructionSelectorT::VisitFloat64SilenceNaN(OpIndex node) {
2207 VisitRR(
this, kMips64Float64SilenceNaN, node);
2210void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2211 Mips64OperandGeneratorT g(
this);
2212 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2216 InstructionOperand temps[] = {g.TempRegister()};
2217 Emit(kMips64Float64FromWord32Pair, g.DefineAsRegister(node), g.Use(hi),
2221void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2222 Mips64OperandGeneratorT g(
this);
2223 Emit(kMips64Sync, g.NoOutput());
2226void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2227 VisitAtomicLoad(
this, node, AtomicWidth::kWord32);
2230void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2231 VisitAtomicStore(
this, node, AtomicWidth::kWord32);
2234void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2235 VisitAtomicLoad(
this, node, AtomicWidth::kWord64);
2238void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2239 VisitAtomicStore(
this, node, AtomicWidth::kWord64);
2242void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2243 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2245 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2246 opcode = kAtomicExchangeInt8;
2247 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2248 opcode = kAtomicExchangeUint8;
2249 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2250 opcode = kAtomicExchangeInt16;
2251 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2252 opcode = kAtomicExchangeUint16;
2253 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2254 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2255 opcode = kAtomicExchangeWord32;
2262void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2263 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2265 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2266 opcode = kAtomicExchangeUint8;
2267 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2268 opcode = kAtomicExchangeUint16;
2269 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2270 opcode = kAtomicExchangeWord32;
2271 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2272 opcode = kMips64Word64AtomicExchangeUint64;
2279void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2280 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2282 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2283 opcode = kAtomicCompareExchangeInt8;
2284 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2285 opcode = kAtomicCompareExchangeUint8;
2286 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2287 opcode = kAtomicCompareExchangeInt16;
2288 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2289 opcode = kAtomicCompareExchangeUint16;
2290 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2291 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2292 opcode = kAtomicCompareExchangeWord32;
2299void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2300 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2302 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2303 opcode = kAtomicCompareExchangeUint8;
2304 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2305 opcode = kAtomicCompareExchangeUint16;
2306 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2307 opcode = kAtomicCompareExchangeWord32;
2308 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2309 opcode = kMips64Word64AtomicCompareExchangeUint64;
2316void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2317 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2318 ArchOpcode uint16_op, ArchOpcode word32_op) {
2319 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2321 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2323 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2325 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2327 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2329 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2330 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2338#define VISIT_ATOMIC_BINOP(op) \
2339 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2340 VisitWord32AtomicBinaryOperation( \
2341 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2342 kAtomic##op##Uint16, kAtomic##op##Word32); \
2349#undef VISIT_ATOMIC_BINOP
2351void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2352 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2353 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2354 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2356 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2358 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2360 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2362 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2367 VisitAtomicBinop(
this, node, opcode, AtomicWidth::kWord64);
2370#define VISIT_ATOMIC_BINOP(op) \
2371 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2372 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2373 kAtomic##op##Uint16, kAtomic##op##Word32, \
2374 kMips64Word64Atomic##op##Uint64); \
2381#undef VISIT_ATOMIC_BINOP
2383void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2387void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2391#define SIMD_TYPE_LIST(V) \
2399#define SIMD_UNOP_LIST(V) \
2400 V(F64x2Abs, kMips64F64x2Abs) \
2401 V(F64x2Neg, kMips64F64x2Neg) \
2402 V(F64x2Sqrt, kMips64F64x2Sqrt) \
2403 V(F64x2Ceil, kMips64F64x2Ceil) \
2404 V(F64x2Floor, kMips64F64x2Floor) \
2405 V(F64x2Trunc, kMips64F64x2Trunc) \
2406 V(F64x2NearestInt, kMips64F64x2NearestInt) \
2407 V(I64x2Neg, kMips64I64x2Neg) \
2408 V(I64x2BitMask, kMips64I64x2BitMask) \
2409 V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
2410 V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
2411 V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
2412 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2413 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2414 V(F32x4Abs, kMips64F32x4Abs) \
2415 V(F32x4Neg, kMips64F32x4Neg) \
2416 V(F32x4Sqrt, kMips64F32x4Sqrt) \
2417 V(F32x4Ceil, kMips64F32x4Ceil) \
2418 V(F32x4Floor, kMips64F32x4Floor) \
2419 V(F32x4Trunc, kMips64F32x4Trunc) \
2420 V(F32x4NearestInt, kMips64F32x4NearestInt) \
2421 V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
2422 V(I64x2Abs, kMips64I64x2Abs) \
2423 V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
2424 V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
2425 V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
2426 V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
2427 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2428 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2429 V(I32x4Neg, kMips64I32x4Neg) \
2430 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2431 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2432 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2433 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2434 V(I32x4Abs, kMips64I32x4Abs) \
2435 V(I32x4BitMask, kMips64I32x4BitMask) \
2436 V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
2437 V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
2438 V(I16x8Neg, kMips64I16x8Neg) \
2439 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2440 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2441 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2442 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2443 V(I16x8Abs, kMips64I16x8Abs) \
2444 V(I16x8BitMask, kMips64I16x8BitMask) \
2445 V(I8x16Neg, kMips64I8x16Neg) \
2446 V(I8x16Abs, kMips64I8x16Abs) \
2447 V(I8x16Popcnt, kMips64I8x16Popcnt) \
2448 V(I8x16BitMask, kMips64I8x16BitMask) \
2449 V(S128Not, kMips64S128Not) \
2450 V(I64x2AllTrue, kMips64I64x2AllTrue) \
2451 V(I32x4AllTrue, kMips64I32x4AllTrue) \
2452 V(I16x8AllTrue, kMips64I16x8AllTrue) \
2453 V(I8x16AllTrue, kMips64I8x16AllTrue) \
2454 V(V128AnyTrue, kMips64V128AnyTrue)
2456#define SIMD_SHIFT_OP_LIST(V) \
2470#define SIMD_BINOP_LIST(V) \
2471 V(F64x2Add, kMips64F64x2Add) \
2472 V(F64x2Sub, kMips64F64x2Sub) \
2473 V(F64x2Mul, kMips64F64x2Mul) \
2474 V(F64x2Div, kMips64F64x2Div) \
2475 V(F64x2Min, kMips64F64x2Min) \
2476 V(F64x2Max, kMips64F64x2Max) \
2477 V(F64x2Eq, kMips64F64x2Eq) \
2478 V(F64x2Ne, kMips64F64x2Ne) \
2479 V(F64x2Lt, kMips64F64x2Lt) \
2480 V(F64x2Le, kMips64F64x2Le) \
2481 V(I64x2Eq, kMips64I64x2Eq) \
2482 V(I64x2Ne, kMips64I64x2Ne) \
2483 V(I64x2Add, kMips64I64x2Add) \
2484 V(I64x2Sub, kMips64I64x2Sub) \
2485 V(I64x2Mul, kMips64I64x2Mul) \
2486 V(I64x2GtS, kMips64I64x2GtS) \
2487 V(I64x2GeS, kMips64I64x2GeS) \
2488 V(F32x4Add, kMips64F32x4Add) \
2489 V(F32x4Sub, kMips64F32x4Sub) \
2490 V(F32x4Mul, kMips64F32x4Mul) \
2491 V(F32x4Div, kMips64F32x4Div) \
2492 V(F32x4Max, kMips64F32x4Max) \
2493 V(F32x4Min, kMips64F32x4Min) \
2494 V(F32x4Eq, kMips64F32x4Eq) \
2495 V(F32x4Ne, kMips64F32x4Ne) \
2496 V(F32x4Lt, kMips64F32x4Lt) \
2497 V(F32x4Le, kMips64F32x4Le) \
2498 V(I32x4Add, kMips64I32x4Add) \
2499 V(I32x4Sub, kMips64I32x4Sub) \
2500 V(I32x4Mul, kMips64I32x4Mul) \
2501 V(I32x4MaxS, kMips64I32x4MaxS) \
2502 V(I32x4MinS, kMips64I32x4MinS) \
2503 V(I32x4MaxU, kMips64I32x4MaxU) \
2504 V(I32x4MinU, kMips64I32x4MinU) \
2505 V(I32x4Eq, kMips64I32x4Eq) \
2506 V(I32x4Ne, kMips64I32x4Ne) \
2507 V(I32x4GtS, kMips64I32x4GtS) \
2508 V(I32x4GeS, kMips64I32x4GeS) \
2509 V(I32x4GtU, kMips64I32x4GtU) \
2510 V(I32x4GeU, kMips64I32x4GeU) \
2511 V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
2512 V(I16x8Add, kMips64I16x8Add) \
2513 V(I16x8AddSatS, kMips64I16x8AddSatS) \
2514 V(I16x8AddSatU, kMips64I16x8AddSatU) \
2515 V(I16x8Sub, kMips64I16x8Sub) \
2516 V(I16x8SubSatS, kMips64I16x8SubSatS) \
2517 V(I16x8SubSatU, kMips64I16x8SubSatU) \
2518 V(I16x8Mul, kMips64I16x8Mul) \
2519 V(I16x8MaxS, kMips64I16x8MaxS) \
2520 V(I16x8MinS, kMips64I16x8MinS) \
2521 V(I16x8MaxU, kMips64I16x8MaxU) \
2522 V(I16x8MinU, kMips64I16x8MinU) \
2523 V(I16x8Eq, kMips64I16x8Eq) \
2524 V(I16x8Ne, kMips64I16x8Ne) \
2525 V(I16x8GtS, kMips64I16x8GtS) \
2526 V(I16x8GeS, kMips64I16x8GeS) \
2527 V(I16x8GtU, kMips64I16x8GtU) \
2528 V(I16x8GeU, kMips64I16x8GeU) \
2529 V(I16x8RoundingAverageU, kMips64I16x8RoundingAverageU) \
2530 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2531 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2532 V(I16x8Q15MulRSatS, kMips64I16x8Q15MulRSatS) \
2533 V(I8x16Add, kMips64I8x16Add) \
2534 V(I8x16AddSatS, kMips64I8x16AddSatS) \
2535 V(I8x16AddSatU, kMips64I8x16AddSatU) \
2536 V(I8x16Sub, kMips64I8x16Sub) \
2537 V(I8x16SubSatS, kMips64I8x16SubSatS) \
2538 V(I8x16SubSatU, kMips64I8x16SubSatU) \
2539 V(I8x16MaxS, kMips64I8x16MaxS) \
2540 V(I8x16MinS, kMips64I8x16MinS) \
2541 V(I8x16MaxU, kMips64I8x16MaxU) \
2542 V(I8x16MinU, kMips64I8x16MinU) \
2543 V(I8x16Eq, kMips64I8x16Eq) \
2544 V(I8x16Ne, kMips64I8x16Ne) \
2545 V(I8x16GtS, kMips64I8x16GtS) \
2546 V(I8x16GeS, kMips64I8x16GeS) \
2547 V(I8x16GtU, kMips64I8x16GtU) \
2548 V(I8x16GeU, kMips64I8x16GeU) \
2549 V(I8x16RoundingAverageU, kMips64I8x16RoundingAverageU) \
2550 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2551 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2552 V(S128And, kMips64S128And) \
2553 V(S128Or, kMips64S128Or) \
2554 V(S128Xor, kMips64S128Xor) \
2555 V(S128AndNot, kMips64S128AndNot)
2557void InstructionSelectorT::VisitS128Const(OpIndex node) {
UNIMPLEMENTED(); }
2559void InstructionSelectorT::VisitS128Zero(OpIndex node) {
2560 Mips64OperandGeneratorT g(
this);
2561 Emit(kMips64S128Zero, g.DefineAsRegister(node));
2563#define SIMD_VISIT_SPLAT(Type) \
2564 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
2565 VisitRR(this, kMips64##Type##Splat, node); \
2568#undef SIMD_VISIT_SPLAT
2570#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2571 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2572 VisitRRI(this, kMips64##Type##ExtractLane##Sign, node); \
2582#undef SIMD_VISIT_EXTRACT_LANE
2584#define SIMD_VISIT_REPLACE_LANE(Type) \
2585 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2586 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
2589#undef SIMD_VISIT_REPLACE_LANE
2591#define SIMD_VISIT_UNOP(Name, instruction) \
2592 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2593 VisitRR(this, instruction, node); \
2596#undef SIMD_VISIT_UNOP
2598#define SIMD_VISIT_SHIFT_OP(Name) \
2599 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2600 VisitSimdShift(this, kMips64##Name, node); \
2603#undef SIMD_VISIT_SHIFT_OP
2605#define SIMD_VISIT_BINOP(Name, instruction) \
2606 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2607 VisitRRR(this, instruction, node); \
2610#undef SIMD_VISIT_BINOP
2612#define SIMD_RELAXED_OP_LIST(V) \
2613 V(F64x2RelaxedMin) \
2614 V(F64x2RelaxedMax) \
2615 V(F32x4RelaxedMin) \
2616 V(F32x4RelaxedMax) \
2617 V(I32x4RelaxedTruncF32x4S) \
2618 V(I32x4RelaxedTruncF32x4U) \
2619 V(I32x4RelaxedTruncF64x2SZero) \
2620 V(I32x4RelaxedTruncF64x2UZero) \
2621 V(I16x8RelaxedQ15MulRS) \
2622 V(I8x16RelaxedLaneSelect) \
2623 V(I16x8RelaxedLaneSelect) \
2624 V(I32x4RelaxedLaneSelect) \
2625 V(I64x2RelaxedLaneSelect)
2627#define SIMD_VISIT_RELAXED_OP(Name) \
2628 void InstructionSelectorT::Visit##Name(OpIndex node) { UNREACHABLE(); }
2630#undef SIMD_VISIT_SHIFT_OP
2632void InstructionSelectorT::VisitS128Select(OpIndex node) {
2633 VisitRRRR(
this, kMips64S128Select, node);
2636#define SIMD_UNIMP_OP_LIST(V) \
2641 V(I16x8DotI8x16I7x16S) \
2642 V(I32x4DotI8x16I7x16AddS)
2644#define SIMD_VISIT_UNIMP_OP(Name) \
2645 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2648#undef SIMD_VISIT_UNIMP_OP
2649#undef SIMD_UNIMP_OP_LIST
2651#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V) \
2653 V(F16x8ExtractLane) \
2654 V(F16x8ReplaceLane) \
2661 V(F16x8NearestInt) \
2674 V(F16x8SConvertI16x8) \
2675 V(F16x8UConvertI16x8) \
2676 V(I16x8SConvertF16x8) \
2677 V(I16x8UConvertF16x8) \
2678 V(F32x4PromoteLowF16x8) \
2679 V(F16x8DemoteF32x4Zero) \
2680 V(F16x8DemoteF64x2Zero) \
2684#define SIMD_VISIT_UNIMPL_FP16_OP(Name) \
2685 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2688#undef SIMD_VISIT_UNIMPL_FP16_OP
2689#undef UNIMPLEMENTED_SIMD_FP16_OP_LIST
2691#if V8_ENABLE_WEBASSEMBLY
2693void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
UNIMPLEMENTED(); }
2697void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) {
UNIMPLEMENTED(); }
2699void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
2700 OperandGenerator g(
this);
2701 auto input = g.UseRegister(this->input_at(node, 0));
2702 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
2705void InstructionSelectorT::VisitSignExtendWord8ToInt32(OpIndex node) {
2706 VisitRR(
this, kMips64Seb, node);
2709void InstructionSelectorT::VisitSignExtendWord16ToInt32(OpIndex node) {
2710 VisitRR(
this, kMips64Seh, node);
2713void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
2714 VisitRR(
this, kMips64Seb, node);
2717void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
2718 VisitRR(
this, kMips64Seh, node);
2721void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
2725void InstructionSelectorT::VisitF32x4Pmin(OpIndex node) {
2729void InstructionSelectorT::VisitF32x4Pmax(OpIndex node) {
2733void InstructionSelectorT::VisitF64x2Pmin(OpIndex node) {
2737void InstructionSelectorT::VisitF64x2Pmax(OpIndex node) {
2741#define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
2742 void InstructionSelectorT::Visit##OPCODE1##ExtMulLow##OPCODE2( \
2746 void InstructionSelectorT::Visit##OPCODE1##ExtMulHigh##OPCODE2( \
2759#define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
2760 void InstructionSelectorT::Visit##OPCODE(OpIndex node) { UNIMPLEMENTED(); }
2765#undef VISIT_EXTADD_PAIRWISE
2767void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
2768 int first_input_index,
2774MachineOperatorBuilder::Flags
2775InstructionSelector::SupportedMachineOperatorFlags() {
2776 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2777 return flags | MachineOperatorBuilder::kWord32Ctz |
2778 MachineOperatorBuilder::kWord64Ctz |
2779 MachineOperatorBuilder::kWord32Popcnt |
2780 MachineOperatorBuilder::kWord64Popcnt |
2781 MachineOperatorBuilder::kWord32ShiftIsSafe |
2782 MachineOperatorBuilder::kInt32DivIsSafe |
2783 MachineOperatorBuilder::kUint32DivIsSafe |
2784 MachineOperatorBuilder::kFloat64RoundDown |
2785 MachineOperatorBuilder::kFloat32RoundDown |
2786 MachineOperatorBuilder::kFloat64RoundUp |
2787 MachineOperatorBuilder::kFloat32RoundUp |
2788 MachineOperatorBuilder::kFloat64RoundTruncate |
2789 MachineOperatorBuilder::kFloat32RoundTruncate |
2790 MachineOperatorBuilder::kFloat64RoundTiesEven |
2791 MachineOperatorBuilder::kFloat32RoundTiesEven;
2795MachineOperatorBuilder::AlignmentRequirements
2796InstructionSelector::AlignmentRequirements() {
2798 return MachineOperatorBuilder::AlignmentRequirements::
2799 FullUnalignedAccessSupport();
2802 return MachineOperatorBuilder::AlignmentRequirements::
2803 NoUnalignedAccessSupport();
2807#undef SIMD_BINOP_LIST
2808#undef SIMD_SHIFT_OP_LIST
2809#undef SIMD_RELAXED_OP_LIST
2810#undef SIMD_UNOP_LIST
2811#undef SIMD_TYPE_LIST
static constexpr T decode(U value)
static constexpr U encode(T value)
static constexpr MachineType Float64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Simd128()
static constexpr MachineType Float32()
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
void MarkAsSimd128(turboshaft::OpIndex node)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
void MarkAsFloat64(turboshaft::OpIndex node)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void EmitIdentity(turboshaft::OpIndex node)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
bool IsTailCallAddressImmediate()
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
PushParameterT PushParameter
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
auto Inputs(turboshaft::OpIndex node)
void MarkAsFloat32(turboshaft::OpIndex node)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * MarkAsCall()
InstructionOperand UseOperand(OpIndex node, InstructionCode opcode)
std::optional< int64_t > GetOptionalIntegerConstant(OpIndex operation)
Mips64OperandGeneratorT(InstructionSelectorT *selector)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
InstructionOperand UseRegisterOrImmediateZero(OpIndex node)
bool IsIntegerConstant(OpIndex node)
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const
bool CanBeImmediate(int64_t value, InstructionCode opcode)
InstructionOperand UseImmediate(int immediate)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionSelectorT * selector() const
InstructionOperand UseRegister(turboshaft::OpIndex node)
MachineRepresentation representation() const
WriteBarrierKind write_barrier_kind() const
LoadRepresentation loaded_rep() const
turboshaft::OpIndex base() const
turboshaft::OpIndex value() const
int32_t displacement() const
StoreRepresentation stored_rep() const
V8_INLINE const Operation & Get(OpIndex i) const
bool Is(V< AnyOrNone > op_idx) const
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr WordRepresentation Word32()
static constexpr WordRepresentation Word64()
static const ArchVariants kArchVariant
InstructionSelectorT * selector_
#define SIMD_SHIFT_OP_LIST(V)
#define VISIT_ATOMIC_BINOP(op)
#define SIMD_VISIT_SHIFT_OP(Name)
#define SIMD_VISIT_UNIMPL_FP16_OP(Name)
#define VISIT_EXT_MUL(OPCODE1, OPCODE2)
#define SIMD_VISIT_SPLAT(Type)
#define VISIT_EXTADD_PAIRWISE(OPCODE)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_UNIMP_OP(Name)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_UNIMP_OP_LIST(V)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_TYPE_LIST(V)
#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
ZoneVector< RpoNumber > & result
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float64(), RegisterRepresentation::Word64()> kTruncateFloat64ToInt64OverflowToMin
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
ConstantMask::For< ConstantOp::Kind::kExternal > kExternalConstant
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word32()> kWord32ShiftRightArithmetic
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
size_t AtomicWidthSize(AtomicWidth width)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
static void VisitSimdShift(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
static void VisitBinop(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kSignedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
bool TryMatchImmediate(InstructionSelectorT *selector, InstructionCode *opcode_return, OpIndex node, size_t *input_count_return, InstructionOperand *inputs)
static void VisitUniqueRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr int kTaggedSize
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
constexpr int kSystemPointerSizeLog2
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
ArchOpcode opcode() const
void Initialize(turboshaft::OpIndex node)
InstructionSelectorT * selector_
int64_t immediate() const
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V8_INLINE OpIndex input(size_t i) const
underlying_operation_t< Op > & Cast()
V< Word32 > right() const