18using namespace turboshaft;
51#define AndCommonMode \
52 ((OperandMode::kAllowRM | \
53 (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
54 : OperandMode::kNone)))
55#define And64OperandMode AndCommonMode
56#define Or64OperandMode And64OperandMode
57#define Xor64OperandMode And64OperandMode
59#define And32OperandMode \
60 (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
61#define Or32OperandMode And32OperandMode
62#define Xor32OperandMode And32OperandMode
64#define Shift32OperandMode \
65 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
66 (CpuFeatures::IsSupported(DISTINCT_OPS) \
67 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
68 : OperandMode::kNone)))
70#define Shift64OperandMode \
71 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
72 OperandMode::kAllowRRR | OperandMode::kAllowRRI))
74#define AddOperandMode \
75 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
76 (CpuFeatures::IsSupported(DISTINCT_OPS) \
77 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
78 : OperandMode::kArithmeticCommonMode)))
79#define SubOperandMode \
80 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
81 (CpuFeatures::IsSupported(DISTINCT_OPS) \
82 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
83 : OperandMode::kArithmeticCommonMode)))
84#define MulOperandMode \
85 (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
87struct BaseWithScaledIndexAndDisplacementMatch {
95std::optional<BaseWithScaledIndexAndDisplacementMatch>
115 BaseWithScaledIndexAndDisplacementMatch
result;
120 result.base = load->base();
121 result.index = load->index().value_or_invalid();
122 result.scale = load->element_size_log2;
123 result.displacement = load->offset;
127 result.base = store->base();
128 result.index = store->index().value_or_invalid();
129 result.scale = store->element_size_log2;
130 result.displacement = store->offset;
135#ifdef V8_ENABLE_WEBASSEMBLY
136 }
else if (
const Simd128LaneMemoryOp* lane_op =
137 op.
TryCast<Simd128LaneMemoryOp>()) {
138 result.base = lane_op->base();
139 result.index = lane_op->index();
144 }
else if (
const Simd128LoadTransformOp* load_transform =
145 op.
TryCast<Simd128LoadTransformOp>()) {
146 result.base = load_transform->base();
147 result.index = load_transform->index();
151 DCHECK(!load_transform->load_kind.tagged_base);
176 return this->
Use(node);
183 case ConstantOp::Kind::kWord32:
185 case ConstantOp::Kind::kWord64:
200 return 0 <= value && value < 32;
202 return 0 <= value && value < 64;
204 return is_int32(value);
206 return is_int32(-value);
208 return is_uint32(value);
210 return is_int20(value);
212 return is_uint12(value);
220 if (!
selector()->CanCover(user, input))
return false;
221 if (effect_level !=
selector()->GetEffectLevel(input)) {
229 case kS390_LoadAndTestWord64:
236 case kS390_LoadAndTestWord32:
298 if (
m->base.valid() &&
302 inputs[(*input_count)++] =
325bool S390OpcodeOnlySupport12BitDisp(
ArchOpcode opcode) {
328 case kS390_AddDouble:
330 case kS390_CmpDouble:
331 case kS390_Float32ToDouble:
340 return S390OpcodeOnlySupport12BitDisp(opcode);
343#define OpcodeImmMode(op) \
344 (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
345 : OperandMode::kInt20Imm)
348 RegisterRepresentation result_rep) {
352 switch (loaded_rep) {
355 return kS390_LoadWordS8;
358 return kS390_LoadWordU8;
361 return kS390_LoadWordS16;
364 return kS390_LoadWordU16;
368 return kS390_LoadWordU32;
372 return kS390_LoadWord64;
377 return kS390_LoadFloat32;
380 return kS390_LoadDouble;
381#ifdef V8_COMPRESS_POINTERS
385 return kS390_LoadWordS32;
388 return kS390_LoadDecompressTagged;
391 return kS390_LoadWordS32;
394 return kS390_LoadDecompressTaggedSigned;
400 return kS390_LoadWord64;
406 return kS390_LoadWord64;
409 return kS390_LoadSimd128;
420 switch (load_rep.representation()) {
422 opcode = kS390_LoadFloat32;
425 opcode = kS390_LoadDouble;
429 opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
432 opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
435 opcode = kS390_LoadWordU32;
441#ifdef V8_COMPRESS_POINTERS
442 opcode = kS390_LoadWordS32;
447#ifdef V8_COMPRESS_POINTERS
449 opcode = kS390_LoadDecompressTaggedSigned;
452 opcode = kS390_LoadDecompressTagged;
455 opcode = kS390_LoadDecompressTagged;
463 opcode = kS390_LoadWord64;
466 opcode = kS390_LoadSimd128;
481#define RESULT_IS_WORD32_LIST(V) \
483 V(BitcastFloat32ToInt32) \
485 V(RoundFloat64ToInt32) \
486 V(TruncateFloat32ToInt32) \
487 V(TruncateFloat32ToUint32) \
488 V(TruncateFloat64ToUint32) \
489 V(ChangeFloat64ToInt32) \
490 V(ChangeFloat64ToUint32) \
494 V(Float64ExtractLowWord32) \
495 V(Float64ExtractHighWord32) \
496 V(SignExtendWord8ToInt32) \
497 V(SignExtendWord16ToInt32) \
502 V(Int32AddWithOverflow) \
503 V(Int32SubWithOverflow) \
504 V(Int32MulWithOverflow) \
519bool ProduceWord32Result(InstructionSelectorT* selector,
OpIndex node) {
520 const Operation& op = selector->Get(node);
522 case Opcode::kWordBinop: {
523 const auto& binop = op.Cast<WordBinopOp>();
525 return binop.kind == WordBinopOp::Kind::kAdd ||
526 binop.kind == WordBinopOp::Kind::kSub ||
527 binop.kind == WordBinopOp::Kind::kMul ||
528 binop.kind == WordBinopOp::Kind::kSignedDiv ||
529 binop.kind == WordBinopOp::Kind::kUnsignedDiv ||
530 binop.kind == WordBinopOp::Kind::kSignedMod ||
531 binop.kind == WordBinopOp::Kind::kUnsignedMod ||
532 binop.kind == WordBinopOp::Kind::kBitwiseAnd ||
533 binop.kind == WordBinopOp::Kind::kBitwiseOr ||
534 binop.kind == WordBinopOp::Kind::kBitwiseXor ||
535 binop.kind == WordBinopOp::Kind::kSignedMulOverflownBits ||
536 binop.kind == WordBinopOp::Kind::kUnsignedMulOverflownBits;
538 case Opcode::kWordUnary: {
539 const auto& unop = op.Cast<WordUnaryOp>();
541 return unop.kind == WordUnaryOp::Kind::kCountLeadingZeros ||
542 unop.kind == WordUnaryOp::Kind::kPopCount ||
543 unop.kind == WordUnaryOp::Kind::kSignExtend8 ||
544 unop.kind == WordUnaryOp::Kind::kSignExtend16;
546 case Opcode::kChange: {
547 const auto& changeop = op.Cast<ChangeOp>();
548 switch (changeop.kind) {
551 case ChangeOp::Kind::kExtractLowHalf:
552 case ChangeOp::Kind::kExtractHighHalf:
557 case ChangeOp::Kind::kBitcast:
560 case ChangeOp::Kind::kSignedFloatTruncateOverflowToMin:
561 case ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin:
581 case Opcode::kShift: {
582 const auto& shift = op.Cast<
ShiftOp>();
584 return shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
585 shift.kind == ShiftOp::Kind::kShiftRightLogical ||
586 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros ||
587 shift.kind == ShiftOp::Kind::kShiftLeft ||
588 shift.kind == ShiftOp::Kind::kRotateRight;
590 case Opcode::kOverflowCheckedBinop: {
591 const auto& ovfbinop = op.Cast<OverflowCheckedBinopOp>();
593 return ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedAdd ||
594 ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedSub ||
595 ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedMul;
597 case Opcode::kLoad: {
604 if (load_rep.IsSigned())
617static inline bool DoZeroExtForResult(InstructionSelectorT* selector,
619 return ProduceWord32Result(selector, node);
627void VisitTryTruncateDouble(InstructionSelectorT* selector,
ArchOpcode opcode,
629 S390OperandGeneratorT g(selector);
630 InstructionOperand inputs[] = {g.UseRegister(selector->input_at(node, 0))};
631 InstructionOperand outputs[2];
632 size_t output_count = 0;
633 outputs[output_count++] = g.DefineAsRegister(node);
636 if (success_output.valid()) {
637 outputs[output_count++] = g.DefineAsRegister(success_output.value());
640 selector->Emit(opcode, output_count, outputs, 1, inputs);
643template <
class CanCombineWithLoad>
644void GenerateRightOperands(InstructionSelectorT* selector,
OpIndex node,
647 InstructionOperand* inputs,
size_t* input_count,
648 CanCombineWithLoad canCombineWithLoad) {
649 S390OperandGeneratorT g(selector);
652 g.CanBeImmediate(right, *operand_mode)) {
653 inputs[(*input_count)++] = g.UseImmediate(right);
657 const Operation& right_op = selector->Get(right);
658 if (right_op.Is<LoadOp>() && selector->CanCover(node, right) &&
661 selector->load_view(right).ts_result_rep()))) {
663 g.GetEffectiveAddressMemoryOperand(right, inputs, input_count);
670 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
676 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
683 inputs[(*input_count)++] = g.UseRegister(right);
689template <
class CanCombineWithLoad>
690void GenerateBinOpOperands(InstructionSelectorT* selector,
OpIndex node,
693 InstructionOperand* inputs,
size_t* input_count,
694 CanCombineWithLoad canCombineWithLoad) {
695 S390OperandGeneratorT g(selector);
697 InstructionOperand
const left_input = g.UseRegister(left);
698 inputs[(*input_count)++] = left_input;
701 inputs[(*input_count)++] = left_input;
705 GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
706 input_count, canCombineWithLoad);
710template <
class CanCombineWithLoad>
711void VisitUnaryOp(InstructionSelectorT* selector,
OpIndex node,
713 FlagsContinuationT* cont,
714 CanCombineWithLoad canCombineWithLoad);
716template <
class CanCombineWithLoad>
717void VisitBinOp(InstructionSelectorT* selector,
OpIndex node,
719 FlagsContinuationT* cont,
720 CanCombineWithLoad canCombineWithLoad);
727#define VISIT_OP_LIST_32(V) \
728 V(Word32, Unary, [](ArchOpcode opcode) { \
729 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
732 [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
734 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
736 [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
737 V(Word32, Bin, [](ArchOpcode opcode) { \
738 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
741 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
742 V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
744#define VISIT_OP_LIST(V) \
745 VISIT_OP_LIST_32(V) \
746 V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
748#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
749 static inline void Visit##type1##type2##Op( \
750 InstructionSelectorT* selector, OpIndex node, InstructionCode opcode, \
751 OperandModes operand_mode, FlagsContinuationT* cont) { \
752 Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
753 canCombineWithLoad); \
755 static inline void Visit##type1##type2##Op( \
756 InstructionSelectorT* selector, OpIndex node, InstructionCode opcode, \
757 OperandModes operand_mode) { \
758 FlagsContinuationT cont; \
759 Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
762#undef DECLARE_VISIT_HELPER_FUNCTIONS
763#undef VISIT_OP_LIST_32
766template <
class CanCombineWithLoad>
767void VisitUnaryOp(InstructionSelectorT* selector,
OpIndex node,
769 FlagsContinuationT* cont,
770 CanCombineWithLoad canCombineWithLoad) {
771 S390OperandGeneratorT g(selector);
772 InstructionOperand inputs[8];
773 size_t input_count = 0;
774 InstructionOperand outputs[2];
775 size_t output_count = 0;
776 OpIndex input = selector->input_at(node, 0);
778 GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs,
779 &input_count, canCombineWithLoad);
781 bool input_is_word32 = ProduceWord32Result(selector, input);
783 bool doZeroExt = DoZeroExtForResult(selector, node);
784 bool canEliminateZeroExt = input_is_word32;
788 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
791 if (!cont->IsDeoptimize()) {
795 if (doZeroExt && canEliminateZeroExt) {
797 outputs[output_count++] = g.DefineSameAsFirst(node);
799 outputs[output_count++] = g.DefineAsRegister(node);
802 outputs[output_count++] = g.DefineSameAsFirst(node);
810 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
814template <
class CanCombineWithLoad>
815void VisitBinOp(InstructionSelectorT* selector,
OpIndex node,
817 FlagsContinuationT* cont,
818 CanCombineWithLoad canCombineWithLoad) {
819 S390OperandGeneratorT g(selector);
820 OpIndex left = selector->input_at(node, 0);
821 OpIndex right = selector->input_at(node, 1);
822 InstructionOperand inputs[8];
824 InstructionOperand outputs[2];
825 size_t output_count = 0;
827 const Operation& op = selector->Get(node);
828 if (op.TryCast<WordBinopOp>() &&
831 !g.CanBeImmediate(right, operand_mode) &&
832 (g.CanBeBetterLeftOperand(right))) {
833 std::swap(left, right);
836 GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode,
837 inputs, &input_count, canCombineWithLoad);
839 bool left_is_word32 = ProduceWord32Result(selector, left);
841 bool doZeroExt = DoZeroExtForResult(selector, node);
842 bool canEliminateZeroExt = left_is_word32;
846 inputs[
input_count++] = g.TempImmediate(!canEliminateZeroExt);
853 !cont->IsDeoptimize()) {
854 if (doZeroExt && canEliminateZeroExt) {
856 outputs[output_count++] = g.DefineSameAsFirst(node);
858 outputs[output_count++] = g.DefineAsRegister(node);
861 outputs[output_count++] = g.DefineSameAsFirst(node);
869 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
875void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
878 stack_slot.is_tagged);
881 Emit(kArchStackSlot, g.DefineAsRegister(node),
882 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
885void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
886 S390OperandGeneratorT g(
this);
887 Emit(kArchAbortCSADcheck, g.NoOutput(),
888 g.UseFixed(this->input_at(node, 0), r3));
893 S390OperandGeneratorT g(
this);
894 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
895 InstructionOperand
inputs[3];
898 g.GetEffectiveAddressMemoryOperand(value,
inputs, &input_count);
904 TurboshaftAdapter::LoadView view = this->
load_view(node);
909void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
920 DCHECK_EQ(store_view.element_size_log2(), 0);
924 OpIndex value = store_view.value();
931 CHECK((store_view.ts_stored_rep() !=
933 (store_view.ts_stored_rep() !=
935 (store_view.ts_stored_rep() !=
939 size_t input_count = 0;
942 inputs, &input_count,
949 size_t const temp_count =
arraysize(temps);
953 selector->
Emit(code, 0,
nullptr, input_count, inputs, temp_count, temps);
957 switch (store_view.ts_stored_rep()) {
960 opcode = kS390_StoreWord8;
964 opcode = kS390_StoreWord16;
968 opcode = kS390_StoreWord32;
971 opcode = kS390_StoreReverse32;
972 value = selector->
input_at(value, 0);
978 opcode = kS390_StoreWord64;
981 opcode = kS390_StoreReverse64;
982 value = selector->
input_at(value, 0);
989 opcode = kS390_StoreFloat32;
992 opcode = kS390_StoreDouble;
997 opcode = kS390_StoreCompressTagged;
1002 opcode = kS390_StoreWord64;
1005 opcode = kS390_StoreSimd128;
1009 if (reverse_op.
Is<Opmask::kSimd128Simd128ReverseBytes>()) {
1010 opcode = kS390_StoreReverseSimd128;
1011 value = selector->
input_at(value, 0);
1025 size_t input_count = 0;
1031 inputs[input_count++] = value_operand;
1033 input_count, inputs);
1039void InstructionSelectorT::VisitStore(
OpIndex node) {
1044 if (
v8_flags.enable_unconditional_write_barriers &&
1052void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
1064 OpIndex node, FlagsContinuation* cont) {
1071 value = op.stack_limit();
1075 S390OperandGeneratorT g(
this);
1078 InstructionOperand*
const outputs =
nullptr;
1079 const int output_count = 0;
1084 InstructionOperand temps[] = {g.TempRegister()};
1090 InstructionOperand
inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1094 temp_count, temps, cont);
1102 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
1104 *mb = mask_lsb + mask_width - 1;
1114 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
1116 *mb = mask_lsb + mask_width - 1;
1121void InstructionSelectorT::VisitWord64And(
OpIndex node) {
1122 S390OperandGeneratorT g(
this);
1141 left = shift_op.
left();
1145 if (mb > 63 - sh) mb = 63 - sh;
1146 sh = (64 - sh) & 0x3F;
1149 if (me < sh) me = sh;
1159 opcode = kS390_RotLeftAndClearLeft64;
1161 }
else if (mb == 63) {
1163 opcode = kS390_RotLeftAndClearRight64;
1167 opcode = kS390_RotLeftAndClear64;
1171 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(left),
1172 g.TempImmediate(sh), g.TempImmediate(
mask));
1180void InstructionSelectorT::VisitWord64Shl(
OpIndex node) {
1181 S390OperandGeneratorT g(
this);
1191 const WordBinopOp& bitwise_and = lhs.
Cast<WordBinopOp>();
1192 int64_t right_value;
1196 if (me < sh) me = sh;
1203 opcode = kS390_RotLeftAndClearLeft64;
1205 }
else if (mb == 63) {
1207 opcode = kS390_RotLeftAndClearRight64;
1209 }
else if (sh && me <= sh) {
1211 opcode = kS390_RotLeftAndClear64;
1216 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
1217 g.TempImmediate(
mask));
1226void InstructionSelectorT::VisitWord64Shr(
OpIndex node) {
1227 S390OperandGeneratorT g(
this);
1237 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
1238 uint64_t right_value;
1243 if (mb > 63 - sh) mb = 63 - sh;
1244 sh = (64 - sh) & 0x3F;
1251 opcode = kS390_RotLeftAndClearLeft64;
1253 }
else if (mb == 63) {
1255 opcode = kS390_RotLeftAndClearRight64;
1260 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
1261 g.TempImmediate(
mask));
1283 uint32_t sar_by = sar_value;
1284 uint32_t shl_by = shl_value;
1285 if ((sar_by == shl_by) && (sar_by == 16)) {
1286 bool canEliminateZeroExt = ProduceWord32Result(selector, shl.
left());
1287 selector->
Emit(kS390_SignExtendWord16ToInt32,
1293 }
else if ((sar_by == shl_by) && (sar_by == 24)) {
1294 bool canEliminateZeroExt = ProduceWord32Result(selector, shl.
left());
1295 selector->
Emit(kS390_SignExtendWord8ToInt32,
1315void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
1319void InstructionSelectorT::VisitWord64ReverseBits(
OpIndex node) {
1323void InstructionSelectorT::VisitInt32AbsWithOverflow(
OpIndex node) {
1327void InstructionSelectorT::VisitInt64AbsWithOverflow(
OpIndex node) {
1331void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
1332 S390OperandGeneratorT g(
this);
1335 if (
CanCover(node, input) && input_op.Is<LoadOp>()) {
1339 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1340 InstructionOperand
inputs[3];
1343 g.GetEffectiveAddressMemoryOperand(input,
inputs, &input_count);
1349 Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1350 g.UseRegister(this->input_at(node, 0)));
1353void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
1354 S390OperandGeneratorT g(
this);
1357 if (
CanCover(node, input) && input_op.Is<LoadOp>()) {
1361 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1362 InstructionOperand
inputs[3];
1365 g.GetEffectiveAddressMemoryOperand(input,
inputs, &input_count);
1371 Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1372 g.UseRegister(this->input_at(node, 0)));
1375void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
1376 S390OperandGeneratorT g(
this);
1379 if (
CanCover(node, input) && input_op.Is<LoadOp>()) {
1383 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1384 InstructionOperand
inputs[3];
1387 g.GetEffectiveAddressMemoryOperand(input,
inputs, &input_count);
1389 outputs, input_count,
inputs);
1393 Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node),
1394 g.UseRegister(this->input_at(node, 0)));
1397template <
class Matcher, ArchOpcode neg_opcode>
1401 static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
1402 "Provided opcode is not a Neg opcode.");
1406 bool doZeroExt = DoZeroExtForResult(selector, node);
1407 bool canEliminateZeroExt = ProduceWord32Result(selector, value);
1409 selector->
Emit(neg_opcode,
1423template <
class Matcher, ArchOpcode shift_op>
1433 bool doZeroExt = DoZeroExtForResult(selector, node);
1434 bool canEliminateZeroExt = ProduceWord32Result(selector, left);
1451template <ArchOpcode opcode>
1459 VisitWord32BinOp(selector, node, opcode, mode, &cont);
1487 VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
1497template <ArchOpcode opcode>
1505 VisitWord64BinOp(selector, node, opcode, mode, &cont);
1529 size_t input_count = 0;
1531 size_t output_count = 0;
1537 input_count, inputs, cont);
1545#define null ([]() { return false; })
1547#define FLOAT_UNARY_OP_LIST(V) \
1548 V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
1549 OperandMode::kNone, null) \
1550 V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
1552 V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
1553 V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
1554 V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
1556 V(Float64, Float64RoundTiesEven, kS390_DoubleNearestInt, OperandMode::kNone, \
1558 V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
1560 V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
1561 V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
1562 V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
1563 V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
1564 V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
1565 V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
1567 V(Float32, Float32RoundTiesEven, kS390_FloatNearestInt, OperandMode::kNone, \
1569 V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
1570 V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
1571 V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
1572 V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
1573 OperandMode::kNone, null) \
1574 V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
1575 OperandMode::kAllowRM, null) \
1576 V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
1577 OperandMode::kNone, null) \
1579 V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
1580 OperandMode::kNone, null) \
1581 V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
1583 V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1585 V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
1587 V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1589 V(Float64, TruncateFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1591 V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
1592 OperandMode::kNone, null) \
1593 V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
1594 OperandMode::kNone, null) \
1595 V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
1596 OperandMode::kAllowRM, null) \
1597 V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, null)
1599#define FLOAT_BIN_OP_LIST(V) \
1600 V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
1601 V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
1602 V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null) \
1603 V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
1604 V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
1605 V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
1606 V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
1607 V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
1608 V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
1609 V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
1610 V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
1611 V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null)
1613#define WORD32_UNARY_OP_LIST(V) \
1614 V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
1615 OperandMode::kNone, null) \
1616 V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
1617 OperandMode::kNone, null) \
1618 V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
1619 OperandMode::kNone, null) \
1620 V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
1621 OperandMode::kNone, null) \
1622 V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
1623 OperandMode::kNone, null) \
1624 V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
1625 V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
1626 V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
1627 OperandMode::kNone, null) \
1628 V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
1630 V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
1632 V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
1634 V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
1636 V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
1637 OperandMode::kNone, null) \
1638 V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
1640 if (ProduceWord32Result(this, this->input_at(node, 0))) { \
1641 EmitIdentity(node); \
1647#define WORD32_BIN_OP_LIST(V) \
1648 V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
1649 OperandMode::kAllowRRR, \
1650 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1651 V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
1652 OperandMode::kAllowRRR, \
1653 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1654 V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
1655 ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
1656 V(Word32, Uint32MulHigh, kS390_MulHighU32, \
1657 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1658 V(Word32, Uint32Mod, kS390_ModU32, \
1659 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1660 V(Word32, Uint32Div, kS390_DivU32, \
1661 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1662 V(Word32, Int32Mod, kS390_Mod32, \
1663 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1664 V(Word32, Int32Div, kS390_Div32, \
1665 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1666 V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
1667 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
1670 V(Word32, Int32MulHigh, kS390_MulHigh32, \
1671 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
1672 V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
1673 return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
1675 V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
1676 V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
1677 V(Word32, Word32Ror, kS390_RotRight32, \
1678 OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
1679 OperandMode::kShift32Imm, \
1681 V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
1682 V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
1683 V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
1684 ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
1685 V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
1686 ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
1687 V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
1688 V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
1689 V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
1690 [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); })
1692#define WORD64_UNARY_OP_LIST(V) \
1693 V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
1695 V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
1696 V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
1697 V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
1698 ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
1699 V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
1700 OperandMode::kNone, null) \
1701 V(Word64, ChangeInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1703 V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
1705 V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
1707 V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
1709 V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, null)
1711#define WORD64_BIN_OP_LIST(V) \
1712 V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
1713 ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
1714 V(Word64, Uint64MulHigh, kS390_MulHighU64, OperandMode::kAllowRRR, null) \
1715 V(Word64, Uint64Mod, kS390_ModU64, \
1716 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1717 V(Word64, Uint64Div, kS390_DivU64, \
1718 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1719 V(Word64, Int64Mod, kS390_Mod64, \
1720 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1721 V(Word64, Int64Div, kS390_Div64, \
1722 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1723 V(Word64, Int64MulHigh, kS390_MulHighS64, OperandMode::kAllowRRR, null) \
1724 V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
1725 return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
1728 V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
1729 return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
1731 V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null) \
1732 V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
1733 V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
1734 V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
1735 V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null)
1737#define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
1738 void InstructionSelectorT::Visit##name(OpIndex node) { \
1739 if (std::function<bool()>(try_extra)()) return; \
1740 Visit##type##UnaryOp(this, node, op, mode); \
1743#define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
1744 void InstructionSelectorT::Visit##name(OpIndex node) { \
1745 if (std::function<bool()>(try_extra)()) return; \
1746 Visit##type##BinOp(this, node, op, mode); \
1756#undef FLOAT_UNARY_OP_LIST
1757#undef FLOAT_BIN_OP_LIST
1758#undef WORD32_UNARY_OP_LIST
1759#undef WORD32_BIN_OP_LIST
1760#undef WORD64_UNARY_OP_LIST
1761#undef WORD64_BIN_OP_LIST
1762#undef DECLARE_UNARY_OP
1763#undef DECLARE_BIN_OP
1766void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(
OpIndex node) {
1767 VisitTryTruncateDouble(
this, kS390_Float32ToInt64, node);
1770void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(
OpIndex node) {
1771 VisitTryTruncateDouble(
this, kS390_DoubleToInt64, node);
1774void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(
OpIndex node) {
1775 VisitTryTruncateDouble(
this, kS390_Float32ToUint64, node);
1778void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(
OpIndex node) {
1779 VisitTryTruncateDouble(
this, kS390_DoubleToUint64, node);
1782void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(
OpIndex node) {
1783 VisitTryTruncateDouble(
this, kS390_DoubleToInt32, node);
1786void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(
OpIndex node) {
1787 VisitTryTruncateDouble(
this, kS390_DoubleToUint32, node);
1790void InstructionSelectorT::VisitBitcastWord32ToWord64(
OpIndex node) {
1796void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
1797 S390OperandGeneratorT g(
this);
1798 Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1799 g.UseFixed(this->input_at(node, 0), d1),
1800 g.UseFixed(this->input_at(node, 1), d2))
1806 S390OperandGeneratorT g(
this);
1808 g.UseFixed(this->input_at(node, 0), d1))
1814 S390OperandGeneratorT g(
this);
1816 g.UseFixed(this->input_at(node, 0), d1),
1817 g.UseFixed(this->input_at(node, 1), d2))
1821void InstructionSelectorT::VisitInt64MulWithOverflow(
OpIndex node) {
1850 InstructionOperand left, InstructionOperand right,
1851 FlagsContinuationT* cont) {
1852 selector->EmitWithContinuation(opcode, left, right, cont);
1855void VisitLoadAndTest(InstructionSelectorT* selector,
InstructionCode opcode,
1857 bool discard_output =
false);
1863 S390OperandGeneratorT g(selector);
1864 OpIndex lhs = selector->input_at(node, 0);
1865 OpIndex rhs = selector->input_at(node, 1);
1867 const Operation& op = selector->Get(node);
1872 InstructionOperand inputs[8];
1873 InstructionOperand outputs[1];
1874 size_t input_count = 0;
1875 size_t output_count = 0;
1879 int effect_level = selector->GetEffectLevel(node, cont);
1881 if ((!g.CanBeImmediate(rhs, immediate_mode) &&
1882 g.CanBeImmediate(lhs, immediate_mode)) ||
1883 (!g.CanBeMemoryOperand(opcode, node, rhs, effect_level) &&
1884 g.CanBeMemoryOperand(opcode, node, lhs, effect_level))) {
1885 if (!selector->IsCommutative(node)) cont->Commute();
1886 std::swap(lhs, rhs);
1890 if (g.CanBeImmediate(rhs, immediate_mode) && g.GetImmediate(rhs) == 0) {
1891 DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1892 ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1893 ? kS390_LoadAndTestWord32
1894 : kS390_LoadAndTestWord64;
1895 return VisitLoadAndTest(selector, load_and_test, node, lhs, cont,
true);
1899 if (g.CanBeMemoryOperand(opcode, node, rhs, effect_level)) {
1901 AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1904 }
else if (g.CanBeImmediate(rhs, immediate_mode)) {
1907 inputs[
input_count++] = g.UseAnyExceptImmediate(rhs);
1910 DCHECK(input_count <= 8 && output_count <= 1);
1911 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1915void VisitWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1916 FlagsContinuationT* cont) {
1922void VisitWord64Compare(InstructionSelectorT* selector,
OpIndex node,
1923 FlagsContinuationT* cont) {
1931 FlagsContinuationT* cont) {
1937 FlagsContinuationT* cont) {
1941void VisitTestUnderMask(InstructionSelectorT* selector,
OpIndex node,
1942 FlagsContinuationT* cont) {
1943 const Operation& op = selector->Get(node);
1950 opcode = kS390_Tst32;
1952 opcode = kS390_Tst64;
1955 S390OperandGeneratorT g(selector);
1956 OpIndex lhs = selector->input_at(node, 0);
1957 OpIndex rhs = selector->input_at(node, 1);
1960 std::swap(lhs, rhs);
1966void VisitLoadAndTest(InstructionSelectorT* selector,
InstructionCode opcode,
1968 bool discard_output) {
1969 static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1970 "LoadAndTest Opcode shouldn't contain other opcodes.");
1972 DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1973 opcode <= kS390_LoadAndTestWord64);
1975 S390OperandGeneratorT g(selector);
1976 InstructionOperand inputs[8];
1977 InstructionOperand outputs[2];
1979 size_t output_count = 0;
1980 bool use_value =
false;
1982 int effect_level = selector->GetEffectLevel(node, cont);
1984 if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1987 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1990 inputs[
input_count++] = g.UseAnyExceptImmediate(value);
1994 if (!discard_output && !use_value) {
1995 outputs[output_count++] = g.DefineAsRegister(value);
1998 DCHECK(input_count <= 8 && output_count <= 2);
1999 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
2006 FlagsContinuation* cont) {
2013 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
2014 if (comparison->kind == ComparisonOp::Kind::kEqual) {
2015 switch (comparison->rep.MapTaggedToWord().value()) {
2017 cont->OverwriteAndNegateIfEqual(
kEqual);
2018 if (this->MatchIntegralZero(comparison->right())) {
2020 if (CanCover(value, comparison->left())) {
2021 const Operation& left_op = this->
Get(comparison->left());
2023 return VisitWord32Compare(
this, comparison->left(), cont);
2025 return VisitTestUnderMask(
this, comparison->left(), cont);
2029 return VisitWord32Compare(
this, value, cont);
2032 cont->OverwriteAndNegateIfEqual(
kEqual);
2033 if (this->MatchIntegralZero(comparison->right())) {
2035 if (CanCover(value, comparison->left())) {
2036 const Operation& left_op = this->
Get(comparison->left());
2038 return VisitWord64Compare(
this, comparison->left(), cont);
2040 return VisitTestUnderMask(
this, comparison->left(), cont);
2044 return VisitWord64Compare(
this, value, cont);
2047 cont->OverwriteAndNegateIfEqual(
kEqual);
2050 cont->OverwriteAndNegateIfEqual(
kEqual);
2056 switch (comparison->rep.MapTaggedToWord().value()) {
2058 cont->OverwriteAndNegateIfEqual(
2059 GetComparisonFlagCondition(*comparison));
2060 return VisitWord32Compare(
this, value, cont);
2062 cont->OverwriteAndNegateIfEqual(
2063 GetComparisonFlagCondition(*comparison));
2064 return VisitWord64Compare(
this, value, cont);
2067 case ComparisonOp::Kind::kSignedLessThan:
2070 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2078 case ComparisonOp::Kind::kSignedLessThan:
2081 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2091 }
else if (
const ProjectionOp* projection =
2092 value_op.TryCast<ProjectionOp>()) {
2095 if (projection->index == 1u) {
2101 OpIndex node = projection->input();
2102 if (
const OverflowCheckedBinopOp* binop =
2104 binop && CanDoBranchIfOverflowFusion(node)) {
2106 switch (binop->kind) {
2107 case OverflowCheckedBinopOp::Kind::kSignedAdd:
2108 cont->OverwriteAndNegateIfEqual(
kOverflow);
2116 case OverflowCheckedBinopOp::Kind::kSignedSub:
2117 cont->OverwriteAndNegateIfEqual(
kOverflow);
2125 case OverflowCheckedBinopOp::Kind::kSignedMul:
2127 cont->OverwriteAndNegateIfEqual(
2134 cont->OverwriteAndNegateIfEqual(
kOverflow);
2135 return VisitWord32BinOp(
2136 this, node, kS390_Mul32,
2137 OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
2139 cont->OverwriteAndNegateIfEqual(
kNotEqual);
2140 return VisitWord32BinOp(
2141 this, node, kS390_Mul32WithOverflow,
2142 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
2149 }
else if (
const OverflowCheckedUnaryOp* unop =
2151 unop && CanDoBranchIfOverflowFusion(node)) {
2153 switch (unop->kind) {
2154 case OverflowCheckedUnaryOp::Kind::kAbs:
2156 cont->OverwriteAndNegateIfEqual(
kOverflow);
2157 return VisitWord64UnaryOp(
this, node, kS390_Abs64,
2158 OperandMode::kNone, cont);
2160 cont->OverwriteAndNegateIfEqual(
kOverflow);
2161 return VisitWord32UnaryOp(
this, node, kS390_Abs32,
2162 OperandMode::kNone, cont);
2171 return VisitWord32Compare(
this, value, cont);
2173 return VisitTestUnderMask(
this, value, cont);
2174 }
else if (value_op.Is<LoadOp>()) {
2175 auto load = this->load_view(value);
2177 switch (load_rep.representation()) {
2179 return VisitLoadAndTest(
this, kS390_LoadAndTestWord32, user, value,
2186 return VisitWord32BinOp(
this, value, kS390_Or32,
Or32OperandMode, cont);
2193 return VisitWord64Compare(
this, value, cont);
2195 return VisitTestUnderMask(
this, value, cont);
2198 return VisitWord64BinOp(
this, value, kS390_Or64,
Or64OperandMode, cont);
2203 }
else if (value_op.Is<StackPointerGreaterThanOp>()) {
2204 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2205 return VisitStackPointerGreaterThan(value, cont);
2209 VisitLoadAndTest(
this, kS390_LoadAndTestWord32, user, value, cont,
true);
2212void InstructionSelectorT::VisitSwitch(OpIndex node,
const SwitchInfo& sw) {
2213 S390OperandGeneratorT g(
this);
2214 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
2217 if (enable_switch_jump_table_ ==
2218 InstructionSelector::kEnableSwitchJumpTable) {
2219 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2220 size_t table_space_cost = 4 + sw.value_range();
2221 size_t table_time_cost = 3;
2222 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2223 size_t lookup_time_cost = sw.case_count();
2224 if (sw.case_count() > 0 &&
2225 table_space_cost + 3 * table_time_cost <=
2226 lookup_space_cost + 3 * lookup_time_cost &&
2227 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2228 sw.value_range() <= kMaxTableSwitchValueRange) {
2229 InstructionOperand index_operand = value_operand;
2230 if (sw.min_value()) {
2231 index_operand = g.TempRegister();
2232 Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
2233 value_operand, g.TempImmediate(-sw.min_value()));
2235 InstructionOperand index_operand_zero_ext = g.TempRegister();
2236 Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2237 index_operand = index_operand_zero_ext;
2239 return EmitTableSwitch(sw, index_operand);
2244 return EmitBinarySearchSwitch(sw, value_operand);
2247void InstructionSelectorT::VisitWord32Equal(OpIndex
const node) {
2248 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2249 const ComparisonOp& op = this->
Get(node).template Cast<ComparisonOp>();
2250 if (this->MatchIntegralZero(op.right())) {
2251 return VisitLoadAndTest(
this, kS390_LoadAndTestWord32, node, op.left(),
2254 VisitWord32Compare(
this, node, &cont);
2257void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
2258 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2259 VisitWord32Compare(
this, node, &cont);
2262void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
2263 FlagsContinuation cont =
2264 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2265 VisitWord32Compare(
this, node, &cont);
2268void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
2269 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2270 VisitWord32Compare(
this, node, &cont);
2273void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
2274 FlagsContinuation cont =
2275 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2276 VisitWord32Compare(
this, node, &cont);
2279void InstructionSelectorT::VisitWord64Equal(OpIndex
const node) {
2280 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2281 const ComparisonOp& op = this->
Get(node).template Cast<ComparisonOp>();
2282 if (this->MatchIntegralZero(op.right())) {
2283 return VisitLoadAndTest(
this, kS390_LoadAndTestWord64, node, op.left(),
2286 VisitWord64Compare(
this, node, &cont);
2289void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
2290 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2291 VisitWord64Compare(
this, node, &cont);
2294void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
2295 FlagsContinuation cont =
2296 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2297 VisitWord64Compare(
this, node, &cont);
2300void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
2301 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2302 VisitWord64Compare(
this, node, &cont);
2305void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
2306 FlagsContinuation cont =
2307 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2308 VisitWord64Compare(
this, node, &cont);
2311void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
2315void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
2319void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
2320 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2324void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
2325 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2329void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
2330 FlagsContinuation cont =
2331 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2335void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
2336 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2340void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
2341 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2345void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
2346 FlagsContinuation cont =
2347 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2351void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2352 S390OperandGeneratorT g(
this);
2353 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2357 InstructionOperand temps[] = {g.TempRegister()};
2358 Emit(kS390_DoubleFromWord32Pair, g.DefineAsRegister(node), g.UseRegister(hi),
2359 g.UseRegister(lo),
arraysize(temps), temps);
2362bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
2366void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node,
int index) {}
2368void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
2369 LinkageLocation location) {}
2371void InstructionSelectorT::EmitPrepareArguments(
2372 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
2374 S390OperandGeneratorT g(
this);
2377 if (call_descriptor->IsCFunctionCall()) {
2378 Emit(kArchPrepareCallCFunction | MiscField::encode(
static_cast<int>(
2379 call_descriptor->ParameterCount())),
2380 0,
nullptr, 0,
nullptr);
2384 for (PushParameter input : (*arguments)) {
2385 if (!input.node.valid())
continue;
2386 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2387 g.TempImmediate(slot));
2392 int stack_decrement = 0;
2396 if (!input.node.valid())
continue;
2397 InstructionOperand decrement = g.UseImmediate(stack_decrement);
2398 stack_decrement = 0;
2399 Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
2404void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2405 S390OperandGeneratorT g(
this);
2406 Emit(kArchNop, g.NoOutput());
2409bool InstructionSelectorT::IsTailCallAddressImmediate() {
return false; }
2411void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2412 auto load = this->load_view(node);
2417void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2418 auto store = this->store_view(node);
2419 AtomicStoreParameters store_params(store.stored_rep().representation(),
2420 store.stored_rep().write_barrier_kind(),
2421 store.memory_order().value(),
2422 store.access_kind());
2427 ArchOpcode opcode, AtomicWidth width) {
2428 S390OperandGeneratorT g(selector);
2429 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
2431 OpIndex index = atomic_op.index();
2432 OpIndex value = atomic_op.value();
2435 InstructionOperand inputs[3];
2437 inputs[
input_count++] = g.UseUniqueRegister(base);
2438 inputs[
input_count++] = g.UseUniqueRegister(index);
2439 inputs[
input_count++] = g.UseUniqueRegister(value);
2440 InstructionOperand outputs[1];
2441 outputs[0] = g.DefineAsRegister(node);
2442 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2443 AtomicWidthField::encode(width);
2444 selector->Emit(code, 1, outputs, input_count, inputs);
2447void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2449 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2450 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2451 opcode = kAtomicExchangeInt8;
2452 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2453 opcode = kAtomicExchangeUint8;
2454 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2455 opcode = kAtomicExchangeInt16;
2456 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2457 opcode = kAtomicExchangeUint16;
2458 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2459 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2460 opcode = kAtomicExchangeWord32;
2467void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2469 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2470 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2471 opcode = kAtomicExchangeUint8;
2472 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2473 opcode = kAtomicExchangeUint16;
2474 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2475 opcode = kAtomicExchangeWord32;
2476 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2477 opcode = kS390_Word64AtomicExchangeUint64;
2485 ArchOpcode opcode, AtomicWidth width) {
2486 S390OperandGeneratorT g(selector);
2487 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
2489 OpIndex index = atomic_op.index();
2490 OpIndex old_value = atomic_op.expected().value();
2491 OpIndex new_value = atomic_op.value();
2493 InstructionOperand inputs[4];
2495 inputs[
input_count++] = g.UseUniqueRegister(old_value);
2496 inputs[
input_count++] = g.UseUniqueRegister(new_value);
2497 inputs[
input_count++] = g.UseUniqueRegister(base);
2500 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2502 addressing_mode = kMode_MRI;
2504 inputs[
input_count++] = g.UseUniqueRegister(index);
2505 addressing_mode = kMode_MRR;
2508 InstructionOperand outputs[1];
2509 size_t output_count = 0;
2510 outputs[output_count++] = g.DefineSameAsFirst(node);
2512 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2513 AtomicWidthField::encode(width);
2514 selector->Emit(code, output_count, outputs, input_count, inputs);
2517void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2518 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2520 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2521 opcode = kAtomicCompareExchangeInt8;
2522 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2523 opcode = kAtomicCompareExchangeUint8;
2524 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2525 opcode = kAtomicCompareExchangeInt16;
2526 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2527 opcode = kAtomicCompareExchangeUint16;
2528 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2529 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2530 opcode = kAtomicCompareExchangeWord32;
2537void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2538 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2540 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2541 opcode = kAtomicCompareExchangeUint8;
2542 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2543 opcode = kAtomicCompareExchangeUint16;
2544 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2545 opcode = kAtomicCompareExchangeWord32;
2546 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2547 opcode = kS390_Word64AtomicCompareExchangeUint64;
2563 size_t input_count = 0;
2569 addressing_mode = kMode_MRI;
2572 addressing_mode = kMode_MRR;
2578 size_t output_count = 0;
2582 size_t temp_count = 0;
2585 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2586 AtomicWidthField::encode(width);
2587 selector->
Emit(code, output_count, outputs, input_count, inputs, temp_count,
2591void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2592 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2593 ArchOpcode uint16_op, ArchOpcode word32_op) {
2595 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2596 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
2598 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
2600 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
2602 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
2604 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32() ||
2605 atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
2613#define VISIT_ATOMIC_BINOP(op) \
2614 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2615 VisitWord32AtomicBinaryOperation( \
2616 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2617 kAtomic##op##Uint16, kAtomic##op##Word32); \
2624#undef VISIT_ATOMIC_BINOP
2626void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2627 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2628 ArchOpcode word32_op, ArchOpcode word64_op) {
2630 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2631 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2633 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2635 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2637 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2642 VisitAtomicBinop(
this, node, opcode, AtomicWidth::kWord64);
2645#define VISIT_ATOMIC64_BINOP(op) \
2646 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2647 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2648 kAtomic##op##Uint16, kAtomic##op##Word32, \
2649 kS390_Word64Atomic##op##Uint64); \
2656#undef VISIT_ATOMIC64_BINOP
2658void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2659 auto load = this->load_view(node);
2660 LoadRepresentation load_rep = load.loaded_rep();
2661 VisitLoad(node, node, SelectLoadOpcode(load_rep));
2664void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2665 auto store = this->store_view(node);
2666 AtomicStoreParameters store_params(store.stored_rep().representation(),
2667 store.stored_rep().write_barrier_kind(),
2668 store.memory_order().value(),
2669 store.access_kind());
2673#define SIMD_TYPES(V) \
2681#define SIMD_BINOP_LIST(V) \
2710 V(I64x2ExtMulLowI32x4S) \
2711 V(I64x2ExtMulHighI32x4S) \
2712 V(I64x2ExtMulLowI32x4U) \
2713 V(I64x2ExtMulHighI32x4U) \
2733 V(I32x4ExtMulLowI16x8S) \
2734 V(I32x4ExtMulHighI16x8S) \
2735 V(I32x4ExtMulLowI16x8U) \
2736 V(I32x4ExtMulHighI16x8U) \
2754 V(I16x8SConvertI32x4) \
2755 V(I16x8UConvertI32x4) \
2756 V(I16x8RoundingAverageU) \
2757 V(I16x8ExtMulLowI8x16S) \
2758 V(I16x8ExtMulHighI8x16S) \
2759 V(I16x8ExtMulLowI8x16U) \
2760 V(I16x8ExtMulHighI8x16U) \
2776 V(I8x16SConvertI16x8) \
2777 V(I8x16UConvertI16x8) \
2778 V(I8x16RoundingAverageU) \
2787#define SIMD_BINOP_UNIQUE_REGISTER_LIST(V) \
2792 V(I16x8Q15MulRSatS) \
2798#define SIMD_UNOP_LIST(V) \
2805 V(F64x2NearestInt) \
2806 V(F64x2ConvertLowI32x4S) \
2807 V(F64x2ConvertLowI32x4U) \
2808 V(F64x2PromoteLowF32x4) \
2816 V(F32x4NearestInt) \
2817 V(F32x4DemoteF64x2Zero) \
2818 V(F32x4SConvertI32x4) \
2819 V(F32x4UConvertI32x4) \
2822 V(I64x2SConvertI32x4Low) \
2823 V(I64x2SConvertI32x4High) \
2824 V(I64x2UConvertI32x4Low) \
2825 V(I64x2UConvertI32x4High) \
2832 V(I32x4SConvertF32x4) \
2833 V(I32x4UConvertF32x4) \
2834 V(I32x4SConvertI16x8Low) \
2835 V(I32x4SConvertI16x8High) \
2836 V(I32x4UConvertI16x8Low) \
2837 V(I32x4UConvertI16x8High) \
2838 V(I32x4TruncSatF64x2SZero) \
2839 V(I32x4TruncSatF64x2UZero) \
2845 V(I16x8SConvertI8x16Low) \
2846 V(I16x8SConvertI8x16High) \
2847 V(I16x8UConvertI8x16Low) \
2848 V(I16x8UConvertI8x16High) \
2861#define SIMD_UNOP_UNIQUE_REGISTER_LIST(V) \
2862 V(I32x4ExtAddPairwiseI16x8S) \
2863 V(I32x4ExtAddPairwiseI16x8U) \
2864 V(I16x8ExtAddPairwiseI8x16S) \
2865 V(I16x8ExtAddPairwiseI8x16U)
2867#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2868 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2869 S390OperandGeneratorT g(this); \
2871 using namespace turboshaft; \
2872 const Operation& op = this->Get(node); \
2873 lane = op.template Cast<Simd128ExtractLaneOp>().lane; \
2874 Emit(kS390_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
2875 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(lane)); \
2885#undef SIMD_VISIT_EXTRACT_LANE
2887#define SIMD_VISIT_REPLACE_LANE(Type) \
2888 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2889 S390OperandGeneratorT g(this); \
2891 using namespace turboshaft; \
2892 const Operation& op = this->Get(node); \
2893 lane = op.template Cast<Simd128ReplaceLaneOp>().lane; \
2894 Emit(kS390_##Type##ReplaceLane, g.DefineAsRegister(node), \
2895 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(lane), \
2896 g.UseRegister(this->input_at(node, 1))); \
2899#undef SIMD_VISIT_REPLACE_LANE
2901#define SIMD_VISIT_BINOP(Opcode) \
2902 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2903 S390OperandGeneratorT g(this); \
2904 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2905 g.UseRegister(this->input_at(node, 0)), \
2906 g.UseRegister(this->input_at(node, 1))); \
2909#undef SIMD_VISIT_BINOP
2910#undef SIMD_BINOP_LIST
2912#define SIMD_VISIT_BINOP_UNIQUE_REGISTER(Opcode) \
2913 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2914 S390OperandGeneratorT g(this); \
2915 InstructionOperand temps[] = {g.TempSimd128Register(), \
2916 g.TempSimd128Register()}; \
2917 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2918 g.UseUniqueRegister(this->input_at(node, 0)), \
2919 g.UseUniqueRegister(this->input_at(node, 1)), arraysize(temps), \
2923#undef SIMD_VISIT_BINOP_UNIQUE_REGISTER
2924#undef SIMD_BINOP_UNIQUE_REGISTER_LIST
2926#define SIMD_VISIT_UNOP(Opcode) \
2927 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2928 S390OperandGeneratorT g(this); \
2929 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2930 g.UseRegister(this->input_at(node, 0))); \
2933#undef SIMD_VISIT_UNOP
2934#undef SIMD_UNOP_LIST
2936#define SIMD_VISIT_UNOP_UNIQUE_REGISTER(Opcode) \
2937 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2938 S390OperandGeneratorT g(this); \
2939 InstructionOperand temps[] = {g.TempSimd128Register()}; \
2940 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2941 g.UseUniqueRegister(this->input_at(node, 0)), arraysize(temps), \
2945#undef SIMD_VISIT_UNOP_UNIQUE_REGISTER
2946#undef SIMD_UNOP_UNIQUE_REGISTER_LIST
2948#define SIMD_VISIT_QFMOP(Opcode) \
2949 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2950 S390OperandGeneratorT g(this); \
2951 Emit(kS390_##Opcode, g.DefineSameAsFirst(node), \
2952 g.UseRegister(this->input_at(node, 0)), \
2953 g.UseRegister(this->input_at(node, 1)), \
2954 g.UseRegister(this->input_at(node, 2))); \
2960#undef SIMD_VISIT_QFMOP
2962#define SIMD_RELAXED_OP_LIST(V) \
2963 V(F64x2RelaxedMin, F64x2Pmin) \
2964 V(F64x2RelaxedMax, F64x2Pmax) \
2965 V(F32x4RelaxedMin, F32x4Pmin) \
2966 V(F32x4RelaxedMax, F32x4Pmax) \
2967 V(I32x4RelaxedTruncF32x4S, I32x4SConvertF32x4) \
2968 V(I32x4RelaxedTruncF32x4U, I32x4UConvertF32x4) \
2969 V(I32x4RelaxedTruncF64x2SZero, I32x4TruncSatF64x2SZero) \
2970 V(I32x4RelaxedTruncF64x2UZero, I32x4TruncSatF64x2UZero) \
2971 V(I16x8RelaxedQ15MulRS, I16x8Q15MulRSatS) \
2972 V(I8x16RelaxedLaneSelect, S128Select) \
2973 V(I16x8RelaxedLaneSelect, S128Select) \
2974 V(I32x4RelaxedLaneSelect, S128Select) \
2975 V(I64x2RelaxedLaneSelect, S128Select)
2977#define SIMD_VISIT_RELAXED_OP(name, op) \
2978 void InstructionSelectorT::Visit##name(OpIndex node) { Visit##op(node); }
2980#undef SIMD_VISIT_RELAXED_OP
2981#undef SIMD_RELAXED_OP_LIST
2983#define F16_OP_LIST(V) \
2985 V(F16x8ExtractLane) \
2986 V(F16x8ReplaceLane) \
2993 V(F16x8NearestInt) \
3006 V(F16x8SConvertI16x8) \
3007 V(F16x8UConvertI16x8) \
3008 V(I16x8SConvertF16x8) \
3009 V(I16x8UConvertF16x8) \
3010 V(F32x4PromoteLowF16x8) \
3011 V(F16x8DemoteF32x4Zero) \
3012 V(F16x8DemoteF64x2Zero) \
3016#define VISIT_F16_OP(name) \
3017 void InstructionSelectorT::Visit##name(OpIndex node) { UNIMPLEMENTED(); }
3023#if V8_ENABLE_WEBASSEMBLY
3024void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
3025 uint8_t shuffle[kSimd128Size];
3029 auto view = this->simd_shuffle_view(node);
3030 CanonicalizeShuffle(view, shuffle, &is_swizzle);
3031 S390OperandGeneratorT g(
this);
3032 OpIndex input0 = view.input(0);
3033 OpIndex input1 = view.input(1);
3036 int total_lane_count = 2 * kSimd128Size;
3037 uint8_t shuffle_remapped[kSimd128Size];
3038 for (
int i = 0;
i < kSimd128Size;
i++) {
3039 uint8_t current_index = shuffle[
i];
3040 shuffle_remapped[
i] = (current_index <= max_index
3041 ? max_index - current_index
3042 : total_lane_count - current_index + max_index);
3044 Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3045 g.UseRegister(input1),
3046 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
3047 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
3048 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
3049 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
3052void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) {
3053 S390OperandGeneratorT g(
this);
3055 const Simd128BinopOp& binop = this->
Get(node).template Cast<Simd128BinopOp>();
3056 DCHECK(binop.kind ==
any_of(Simd128BinopOp::Kind::kI8x16Swizzle,
3057 Simd128BinopOp::Kind::kI8x16RelaxedSwizzle));
3058 relaxed = binop.kind == Simd128BinopOp::Kind::kI8x16RelaxedSwizzle;
3062 Emit(kS390_I8x16Swizzle, g.DefineAsRegister(node),
3063 g.UseUniqueRegister(this->input_at(node, 0)),
3064 g.UseUniqueRegister(this->input_at(node, 1)));
3067void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
3068 OperandGenerator g(
this);
3070 auto input = g.UseRegister(this->input_at(node, 0));
3071 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
3075void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
UNREACHABLE(); }
3076void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) {
UNREACHABLE(); }
3084 for (
int i = 3;
i >= 0; --
i) {
3091void InstructionSelectorT::VisitS128Const(
OpIndex node) {
3092 S390OperandGeneratorT g(
this);
3094 const Simd128ConstantOp& constant =
3095 this->
Get(node).template Cast<Simd128ConstantOp>();
3096 memcpy(val, constant.value, kSimd128Size);
3098 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
3099 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
3100 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
3101 InstructionOperand dst = g.DefineAsRegister(node);
3103 Emit(kS390_S128Zero, dst);
3104 }
else if (all_ones) {
3105 Emit(kS390_S128AllOnes, dst);
3109 Emit(kS390_S128Const, dst,
3110 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]))),
3111 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 4)),
3112 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 8)),
3113 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 12)));
3117void InstructionSelectorT::VisitS128Zero(OpIndex node) {
3118 S390OperandGeneratorT g(
this);
3119 Emit(kS390_S128Zero, g.DefineAsRegister(node));
3122void InstructionSelectorT::VisitS128Select(OpIndex node) {
3123 S390OperandGeneratorT g(
this);
3124 Emit(kS390_S128Select, g.DefineAsRegister(node),
3125 g.UseRegister(this->input_at(node, 0)),
3126 g.UseRegister(this->input_at(node, 1)),
3127 g.UseRegister(this->input_at(node, 2)));
3130void InstructionSelectorT::EmitPrepareResults(
3131 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
3133 S390OperandGeneratorT g(
this);
3135 for (PushParameter output : *results) {
3136 if (!output.location.IsCallerFrameSlot())
continue;
3138 if (output.node.valid()) {
3139 DCHECK(!call_descriptor->IsCFunctionCall());
3140 if (output.location.GetType() == MachineType::Float32()) {
3141 MarkAsFloat32(output.node);
3142 }
else if (output.location.GetType() == MachineType::Float64()) {
3143 MarkAsFloat64(output.node);
3144 }
else if (output.location.GetType() == MachineType::Simd128()) {
3145 MarkAsSimd128(output.node);
3147 int offset = call_descriptor->GetOffsetToReturns();
3148 int reverse_slot = -output.location.GetLocation() -
offset;
3149 Emit(kS390_Peek, g.DefineAsRegister(output.node),
3150 g.UseImmediate(reverse_slot));
3155void InstructionSelectorT::VisitLoadLane(OpIndex node) {
3158 const Simd128LaneMemoryOp& load =
3159 this->
Get(node).template Cast<Simd128LaneMemoryOp>();
3161 switch (load.lane_kind) {
3162 case Simd128LaneMemoryOp::LaneKind::k8:
3163 opcode = kS390_S128Load8Lane;
3165 case Simd128LaneMemoryOp::LaneKind::k16:
3166 opcode = kS390_S128Load16Lane;
3168 case Simd128LaneMemoryOp::LaneKind::k32:
3169 opcode = kS390_S128Load32Lane;
3171 case Simd128LaneMemoryOp::LaneKind::k64:
3172 opcode = kS390_S128Load64Lane;
3175 S390OperandGeneratorT g(
this);
3176 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
3177 InstructionOperand inputs[5];
3180 inputs[
input_count++] = g.UseRegister(this->input_at(node, 2));
3184 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
3185 opcode |= AddressingModeField::encode(mode);
3186 Emit(opcode, 1, outputs, input_count, inputs);
3189void InstructionSelectorT::VisitLoadTransform(OpIndex node) {
3191 const Simd128LoadTransformOp& op =
3192 this->
Get(node).template Cast<Simd128LoadTransformOp>();
3193 switch (op.transform_kind) {
3194 case Simd128LoadTransformOp::TransformKind::k8Splat:
3195 opcode = kS390_S128Load8Splat;
3197 case Simd128LoadTransformOp::TransformKind::k16Splat:
3198 opcode = kS390_S128Load16Splat;
3200 case Simd128LoadTransformOp::TransformKind::k32Splat:
3201 opcode = kS390_S128Load32Splat;
3203 case Simd128LoadTransformOp::TransformKind::k64Splat:
3204 opcode = kS390_S128Load64Splat;
3206 case Simd128LoadTransformOp::TransformKind::k8x8S:
3207 opcode = kS390_S128Load8x8S;
3209 case Simd128LoadTransformOp::TransformKind::k8x8U:
3210 opcode = kS390_S128Load8x8U;
3212 case Simd128LoadTransformOp::TransformKind::k16x4S:
3213 opcode = kS390_S128Load16x4S;
3215 case Simd128LoadTransformOp::TransformKind::k16x4U:
3216 opcode = kS390_S128Load16x4U;
3218 case Simd128LoadTransformOp::TransformKind::k32x2S:
3219 opcode = kS390_S128Load32x2S;
3221 case Simd128LoadTransformOp::TransformKind::k32x2U:
3222 opcode = kS390_S128Load32x2U;
3224 case Simd128LoadTransformOp::TransformKind::k32Zero:
3225 opcode = kS390_S128Load32Zero;
3227 case Simd128LoadTransformOp::TransformKind::k64Zero:
3228 opcode = kS390_S128Load64Zero;
3233 VisitLoad(node, node, opcode);
3236void InstructionSelectorT::VisitStoreLane(OpIndex node) {
3239 const Simd128LaneMemoryOp& store =
3240 this->
Get(node).template Cast<Simd128LaneMemoryOp>();
3242 switch (store.lane_kind) {
3243 case Simd128LaneMemoryOp::LaneKind::k8:
3244 opcode = kS390_S128Store8Lane;
3246 case Simd128LaneMemoryOp::LaneKind::k16:
3247 opcode = kS390_S128Store16Lane;
3249 case Simd128LaneMemoryOp::LaneKind::k32:
3250 opcode = kS390_S128Store32Lane;
3252 case Simd128LaneMemoryOp::LaneKind::k64:
3253 opcode = kS390_S128Store64Lane;
3256 S390OperandGeneratorT g(
this);
3257 InstructionOperand inputs[5];
3260 inputs[
input_count++] = g.UseRegister(this->input_at(node, 2));
3264 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
3265 opcode |= AddressingModeField::encode(mode);
3266 Emit(opcode, 0,
nullptr, input_count, inputs);
3269void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(OpIndex node) {
3270 S390OperandGeneratorT g(
this);
3271 Emit(kS390_I16x8DotI8x16S, g.DefineAsRegister(node),
3272 g.UseUniqueRegister(this->input_at(node, 0)),
3273 g.UseUniqueRegister(this->input_at(node, 1)));
3276void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
3277 S390OperandGeneratorT g(
this);
3278 InstructionOperand temps[] = {g.TempSimd128Register()};
3279 Emit(kS390_I32x4DotI8x16AddS, g.DefineAsRegister(node),
3280 g.UseUniqueRegister(this->input_at(node, 0)),
3281 g.UseUniqueRegister(this->input_at(node, 1)),
3282 g.UseUniqueRegister(this->input_at(node, 2)),
arraysize(temps), temps);
3285void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
3286 S390OperandGeneratorT g(
this);
3289 if (op.Is<Opmask::kTruncateFloat32ToInt32OverflowToMin>()) {
3290 opcode |= MiscField::encode(
true);
3292 Emit(opcode, g.DefineAsRegister(node),
3293 g.UseRegister(this->input_at(node, 0)));
3296void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
3297 S390OperandGeneratorT g(
this);
3300 if (op.Is<Opmask::kTruncateFloat32ToUint32OverflowToMin>()) {
3301 opcode |= MiscField::encode(
true);
3304 Emit(opcode, g.DefineAsRegister(node),
3305 g.UseRegister(this->input_at(node, 0)));
3308void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
3309 int first_input_index,
3314MachineOperatorBuilder::Flags
3315InstructionSelector::SupportedMachineOperatorFlags() {
3316 return MachineOperatorBuilder::kFloat32RoundDown |
3317 MachineOperatorBuilder::kFloat64RoundDown |
3318 MachineOperatorBuilder::kFloat32RoundUp |
3319 MachineOperatorBuilder::kFloat64RoundUp |
3320 MachineOperatorBuilder::kFloat32RoundTruncate |
3321 MachineOperatorBuilder::kFloat64RoundTruncate |
3322 MachineOperatorBuilder::kFloat32RoundTiesEven |
3323 MachineOperatorBuilder::kFloat64RoundTiesEven |
3324 MachineOperatorBuilder::kFloat64RoundTiesAway |
3325 MachineOperatorBuilder::kWord32Popcnt |
3326 MachineOperatorBuilder::kInt32AbsWithOverflow |
3327 MachineOperatorBuilder::kInt64AbsWithOverflow |
3328 MachineOperatorBuilder::kWord64Popcnt;
3331MachineOperatorBuilder::AlignmentRequirements
3332InstructionSelector::AlignmentRequirements() {
3333 return MachineOperatorBuilder::AlignmentRequirements::
3334 FullUnalignedAccessSupport();
#define DEFINE_OPERATORS_FOR_FLAGS(Type)
static constexpr T decode(U value)
static constexpr U encode(T value)
static bool IsSupported(CpuFeature f)
constexpr MachineRepresentation representation() const
FlagsCondition condition() const
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
FlagsContinuationT FlagsContinuation
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void EmitIdentity(turboshaft::OpIndex node)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
bool IsLive(turboshaft::OpIndex node) const
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
Instruction * MarkAsCall()
InstructionOperand TempRegister()
InstructionOperand UseImmediate(int immediate)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand Use(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionSelectorT * selector() const
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand UseAnyExceptImmediate(OpIndex node)
bool CanBeBetterLeftOperand(OpIndex node) const
bool CanBeImmediate(int64_t value, OperandModes mode)
bool CanBeMemoryOperand(InstructionCode opcode, OpIndex user, OpIndex input, int effect_level)
InstructionOperand UseOperand(OpIndex node, OperandModes mode)
int64_t GetImmediate(OpIndex node)
S390OperandGeneratorT(InstructionSelectorT *selector)
AddressingMode GenerateMemoryOperandInputs(OptionalOpIndex index, OpIndex base, int64_t displacement, DisplacementMode displacement_mode, InstructionOperand inputs[], size_t *input_count, RegisterUseKind reg_kind=RegisterUseKind::kUseRegister)
bool CanBeImmediate(OpIndex node, OperandModes mode)
AddressingMode GetEffectiveAddressMemoryOperand(OpIndex operand, InstructionOperand inputs[], size_t *input_count, OperandModes immediate_mode=OperandMode::kInt20Imm)
WriteBarrierKind write_barrier_kind() const
LoadRepresentation loaded_rep() const
StoreRepresentation stored_rep() const
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V8_INLINE const Operation & Get(OpIndex i) const
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation UncompressedTaggedSigned()
static constexpr MemoryRepresentation Float64()
bool Is(V< AnyOrNone > op_idx) const
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
constexpr OpIndex value() const
constexpr bool valid() const
static constexpr RegisterRepresentation Compressed()
static constexpr RegisterRepresentation Simd128()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static constexpr WordRepresentation Word32()
static constexpr WordRepresentation Word64()
#define COMPRESS_POINTERS_BOOL
#define VISIT_ATOMIC_BINOP(op)
DisplacementMode displacement_mode
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
#define VISIT_F16_OP(name)
#define SIMD_VISIT_QFMOP(Opcode)
#define Shift64OperandMode
#define OpcodeImmMode(op)
#define VISIT_ATOMIC64_BINOP(op)
#define WORD32_BIN_OP_LIST(V)
#define FLOAT_BIN_OP_LIST(V)
#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad)
#define WORD64_UNARY_OP_LIST(V)
#define DECLARE_BIN_OP(type, name, op, mode, try_extra)
#define SIMD_BINOP_UNIQUE_REGISTER_LIST(V)
#define SIMD_VISIT_UNOP_UNIQUE_REGISTER(Opcode)
#define FLOAT_UNARY_OP_LIST(V)
#define SIMD_UNOP_UNIQUE_REGISTER_LIST(V)
#define SIMD_VISIT_BINOP_UNIQUE_REGISTER(Opcode)
#define WORD32_UNARY_OP_LIST(V)
#define WORD64_BIN_OP_LIST(V)
#define DECLARE_UNARY_OP(type, name, op, mode, try_extra)
ZoneVector< RpoNumber > & result
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
constexpr unsigned CountTrailingZeros64(uint64_t value)
constexpr unsigned CountTrailingZeros32(uint32_t value)
constexpr unsigned CountLeadingZeros64(uint64_t value)
constexpr unsigned CountPopulation(T value)
constexpr bool IsPowerOfTwo(T value)
constexpr unsigned CountLeadingZeros32(uint32_t value)
bool any_of(const C &container, const P &predicate)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word64()> kWord64ShiftLeft
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
WordBinopMask::For< WordBinopOp::Kind::kBitwiseOr, WordRepresentation::Word32()> kWord32BitwiseOr
WordBinopMask::For< WordBinopOp::Kind::kBitwiseXor, WordRepresentation::Word32()> kWord32BitwiseXor
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word64()> kWord64Sub
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word32()> kWord32Sub
WordBinopMask::For< WordBinopOp::Kind::kBitwiseOr, WordRepresentation::Word64()> kWord64BitwiseOr
WordBinopMask::For< WordBinopOp::Kind::kBitwiseXor, WordRepresentation::Word64()> kWord64BitwiseXor
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
static bool TryMatchInt32MulWithOverflow(InstructionSelectorT *selector, OpIndex node)
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool IsContiguousMask32(uint32_t value, int *mb, int *me)
ArchOpcode SelectLoadOpcode(MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, ImmediateMode *mode)
static bool TryMatchNegFromSub(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt64AddWithOverflow(InstructionSelectorT *selector, OpIndex node)
void EmitInt64MulWithOverflow(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
OperandModes immediateModeMask
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
base::Flags< OperandMode, uint32_t > OperandModes
static int32_t Pack4Lanes(const uint8_t *shuffle)
std::optional< BaseWithScaledIndexAndDisplacementMatch > TryMatchBaseWithScaledIndexAndDisplacement64(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt32AddWithOverflow(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchSignExtInt16OrInt8FromWord32Sar(InstructionSelectorT *selector, OpIndex node)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
static bool CompareLogical(FlagsContinuationT *cont)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static void VisitGeneralStore(InstructionSelectorT *selector, OpIndex node, MachineRepresentation rep, WriteBarrierKind write_barrier_kind=kNoWriteBarrier)
static bool TryMatchInt32OpWithOverflow(InstructionSelectorT *selector, OpIndex node, OperandModes mode)
static bool IsContiguousMask64(uint64_t value, int *mb, int *me)
static bool TryMatchInt64SubWithOverflow(InstructionSelectorT *selector, OpIndex node)
bool TryMatchShiftFromMul(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt32SubWithOverflow(InstructionSelectorT *selector, OpIndex node)
static void VisitShift(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool TryMatchDoubleConstructFromInsert(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt64OpWithOverflow(InstructionSelectorT *selector, OpIndex node, OperandModes mode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr int ElementSizeInBits(MachineRepresentation rep)
constexpr int kSimd128Size
const int kStackFrameExtraParamSlot
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool IsAnyCompressed(MachineRepresentation rep)
constexpr int kSystemPointerSize
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
DisplacementMode displacement_mode
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
bool IsLoadOrLoadImmutable(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V< WordPtr > index() const
V< WordPtr > base() const
MemoryRepresentation memory_rep
const underlying_operation_t< Op > * TryCast() const
underlying_operation_t< Op > & Cast()
V< Word32 > right() const
bool IsCommutative() const
V< WordType > left() const
V< WordType > right() const