26using namespace turboshaft;
58 if (constant->IsIntegral() && constant->integral() == 0)
return true;
59 if (constant->kind == ConstantOp::Kind::kFloat32) {
60 return constant->float32().get_bits() == 0;
62 if (constant->kind == ConstantOp::Kind::kFloat64) {
63 return constant->float64().get_bits() == 0;
111 if (!constant)
return false;
112 if (constant->kind == ConstantOp::Kind::kCompressedHeapObject) {
142 static_cast<uint32_t
>(value), 32, &ignored, &ignored, &ignored);
145 static_cast<uint64_t
>(value), 64, &ignored, &ignored, &ignored);
171 if (uint64_t constant;
189 Arm64OperandGeneratorT g(selector);
190 selector->Emit(opcode, g.DefineAsRegister(node),
191 g.UseRegister(selector->input_at(node, 0)));
196 Arm64OperandGeneratorT g(selector);
197 selector->Emit(opcode, g.DefineAsRegister(node),
198 g.UseRegister(selector->input_at(node, 0)),
199 g.UseRegister(selector->input_at(node, 1)));
202#if V8_ENABLE_WEBASSEMBLY
205 Arm64OperandGeneratorT g(selector);
206 selector->Emit(opcode, g.DefineAsRegister(node),
207 g.UseRegister(selector->input_at(node, 0)));
210void VisitSimdShiftRRR(InstructionSelectorT* selector,
ArchOpcode opcode,
212 Arm64OperandGeneratorT g(selector);
214 if (selector->MatchSignedIntegralConstant(selector->input_at(node, 1),
216 if (constant % width == 0) {
217 selector->EmitIdentity(node);
219 selector->Emit(opcode, g.DefineAsRegister(node),
220 g.UseRegister(selector->input_at(node, 0)),
221 g.UseImmediate(selector->input_at(node, 1)));
224 selector->Emit(opcode, g.DefineAsRegister(node),
225 g.UseRegister(selector->input_at(node, 0)),
226 g.UseRegister(selector->input_at(node, 1)));
232 Arm64OperandGeneratorT g(selector);
233 const Operation& op = selector->Get(node);
235 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)),
236 g.UseImmediate(imm));
241 const Simd128ReplaceLaneOp& op =
243 Arm64OperandGeneratorT g(selector);
244 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)),
245 g.UseImmediate(op.lane), g.UseUniqueRegister(op.input(1)));
251 Arm64OperandGeneratorT g(selector);
252 selector->Emit(opcode, g.DefineAsRegister(node),
253 g.UseRegister(selector->input_at(node, 0)),
254 g.UseOperand(selector->input_at(node, 1), operand_mode));
257struct ExtendingLoadMatcher {
280 InstructionSelectorT* selector_;
285 void Initialize(
OpIndex node) {
287 DCHECK(shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
288 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
293 int64_t constant_rhs;
295 if (lhs.Is<LoadOp>() &&
296 selector_->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
297 constant_rhs == 32 &&
selector_->CanCover(node, shift.left())) {
299 const LoadOp& load = lhs.Cast<LoadOp>();
302 if (load.index().has_value()) {
303 int64_t index_constant;
304 if (
selector_->MatchIntegralWord64Constant(load.index().value(),
318bool TryMatchExtendingLoad(InstructionSelectorT* selector,
OpIndex node) {
319 ExtendingLoadMatcher
m(node, selector);
324 ExtendingLoadMatcher
m(node, selector);
325 Arm64OperandGeneratorT g(selector);
327 InstructionOperand inputs[2];
328 inputs[0] = g.UseRegister(
m.base());
331 DCHECK(is_int32(
m.immediate()));
332 inputs[1] = g.TempImmediate(
static_cast<int32_t>(
m.immediate()));
333 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
341bool TryMatchAnyShift(InstructionSelectorT* selector,
OpIndex node,
343 RegisterRepresentation rep) {
344 Arm64OperandGeneratorT g(selector);
346 if (!selector->CanCover(node, input_node))
return false;
347 if (
const ShiftOp* shift = selector->Get(input_node).TryCast<
ShiftOp>()) {
350 if (shift->rep != rep)
return false;
351 if (!g.IsIntegerConstant(shift->right()))
return false;
353 switch (shift->kind) {
354 case ShiftOp::Kind::kShiftLeft:
357 case ShiftOp::Kind::kShiftRightLogical:
360 case ShiftOp::Kind::kShiftRightArithmetic:
361 case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
363 TryMatchExtendingLoad(selector, input_node)) {
368 case ShiftOp::Kind::kRotateRight:
374 case ShiftOp::Kind::kRotateLeft:
381bool TryMatchBitwiseAndSmallMask(OperationMatcher& matcher,
OpIndex op,
383 if (
const ChangeOp* change_op =
385 return TryMatchBitwiseAndSmallMask(matcher, change_op->input(), left,
mask);
387 if (
const WordBinopOp* bitwise_and =
389 if (matcher.MatchIntegralWord32Constant(bitwise_and->right(),
mask) &&
390 (*
mask == 0xFF || *
mask == 0xFFFF)) {
391 *left = bitwise_and->left();
394 if (matcher.MatchIntegralWord32Constant(bitwise_and->left(),
mask) &&
395 (*
mask == 0xFF || *
mask == 0xFFFF)) {
396 *left = bitwise_and->right();
403bool TryMatchSignExtendShift(InstructionSelectorT* selector,
OpIndex op,
404 OpIndex* left, int32_t* shift_by) {
405 if (
const ChangeOp* change_op =
407 return TryMatchSignExtendShift(selector, change_op->input(), left,
413 const Operation& sar_lhs = selector->Get(sar->left());
415 selector->CanCover(op, sar->left())) {
418 if (selector->MatchIntegralWord32Constant(sar->right(), &sar_by) &&
419 selector->MatchIntegralWord32Constant(shl.right(), &shl_by) &&
420 sar_by == shl_by && (sar_by == 16 || sar_by == 24)) {
430bool TryMatchAnyExtend(Arm64OperandGeneratorT* g,
431 InstructionSelectorT* selector,
OpIndex node,
433 InstructionOperand* left_op,
435 if (!selector->CanCover(node, right_node))
return false;
437 const Operation& right = selector->Get(right_node);
440 if (TryMatchBitwiseAndSmallMask(*selector, right_node, &bitwise_and_left,
442 *left_op = g->UseRegister(left_node);
443 *right_op = g->UseRegister(bitwise_and_left);
445 (
mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
451 if (TryMatchSignExtendShift(selector, right_node, &shift_input_left,
453 *left_op = g->UseRegister(left_node);
454 *right_op = g->UseRegister(shift_input_left);
456 (shift_by == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
460 if (
const ChangeOp* change_op =
464 *left_op = g->UseRegister(left_node);
465 *right_op = g->UseRegister(change_op->input());
471bool TryMatchLoadStoreShift(Arm64OperandGeneratorT* g,
472 InstructionSelectorT* selector,
474 OpIndex index, InstructionOperand* index_op,
475 InstructionOperand* shift_immediate_op) {
476 if (!selector->CanCover(node, index))
return false;
477 if (
const ChangeOp* change =
479 change && selector->CanCover(index, change->input())) {
480 index = change->input();
483 if (shift ==
nullptr)
return false;
484 if (!g->CanBeLoadStoreShiftImmediate(shift->right(), rep))
return false;
485 *index_op = g->UseRegister(shift->left());
486 *shift_immediate_op = g->UseImmediate(shift->right());
551 CanCommuteField::decode(
result));
556template <
typename Matcher>
559 FlagsContinuationT cont;
563void VisitBinopImpl(InstructionSelectorT* selector,
OpIndex binop_idx,
567 DCHECK(!cont->IsConditionalSet() && !cont->IsConditionalBranch());
568 Arm64OperandGeneratorT g(selector);
569 constexpr uint32_t kMaxFlagSetInputs = 3;
570 constexpr uint32_t kMaxSelectInputs = 2;
571 constexpr uint32_t kMaxInputs = kMaxFlagSetInputs + kMaxSelectInputs;
572 InstructionOperand inputs[kMaxInputs];
574 InstructionOperand outputs[1];
575 size_t output_count = 0;
577 uint8_t properties = GetBinopProperties(opcode);
578 bool can_commute = CanCommuteField::decode(properties);
579 bool must_commute_cond = MustCommuteCondField::decode(properties);
580 bool is_add_sub = IsAddSubField::decode(properties);
582 if (g.CanBeImmediate(right_node, operand_mode)) {
584 inputs[
input_count++] = g.UseImmediate(right_node);
585 }
else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
586 if (must_commute_cond) cont->Commute();
589 }
else if (is_add_sub &&
590 TryMatchAnyExtend(&g, selector, binop_idx, left_node, right_node,
591 &inputs[0], &inputs[1], &opcode)) {
593 }
else if (is_add_sub && can_commute &&
594 TryMatchAnyExtend(&g, selector, binop_idx, right_node, left_node,
595 &inputs[0], &inputs[1], &opcode)) {
596 if (must_commute_cond) cont->Commute();
598 }
else if (TryMatchAnyShift(selector, binop_idx, right_node, &opcode,
601 inputs[
input_count++] = g.UseRegisterOrImmediateZero(left_node);
602 inputs[
input_count++] = g.UseRegister(shift.left());
605 selector->MatchSignedIntegralConstant(shift.right(), &constant);
606 inputs[
input_count++] = g.UseImmediate(
static_cast<int>(constant & 0x3F));
607 }
else if (can_commute && TryMatchAnyShift(selector, binop_idx, left_node,
608 &opcode, !is_add_sub, rep)) {
609 if (must_commute_cond) cont->Commute();
611 inputs[
input_count++] = g.UseRegisterOrImmediateZero(right_node);
612 inputs[
input_count++] = g.UseRegister(shift.left());
615 selector->MatchSignedIntegralConstant(shift.right(), &constant);
616 inputs[
input_count++] = g.UseImmediate(
static_cast<int>(constant & 0x3F));
618 inputs[
input_count++] = g.UseRegisterOrImmediateZero(left_node);
622 if (!IsComparisonField::decode(properties)) {
623 outputs[output_count++] = g.DefineAsRegister(binop_idx);
626 if (cont->IsSelect()) {
630 inputs[
input_count++] = g.UseRegisterAtEnd(cont->true_value());
631 inputs[
input_count++] = g.UseRegisterAtEnd(cont->false_value());
634 DCHECK((output_count != 0) || IsComparisonField::decode(properties));
638 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
646 const Operation& binop = selector->Get(binop_idx);
647 OpIndex left_node = binop.input(0);
648 OpIndex right_node = binop.input(1);
649 return VisitBinopImpl(selector, binop_idx, left_node, right_node, rep, opcode,
654 RegisterRepresentation rep,
ArchOpcode opcode,
656 FlagsContinuationT cont;
657 VisitBinop(selector, node, rep, opcode, operand_mode, &cont);
660std::tuple<OpIndex, OpIndex> GetBinopLeftRightCstOnTheRight(
661 InstructionSelectorT* selector,
const WordBinopOp& binop) {
664 if (!selector->Is<ConstantOp>(right) &&
666 selector->Is<ConstantOp>(left)) {
667 std::swap(left, right);
669 return {left, right};
672void VisitAddSub(InstructionSelectorT* selector,
OpIndex node,
674 Arm64OperandGeneratorT g(selector);
675 const WordBinopOp& add_sub = selector->Get(node).Cast<WordBinopOp>();
676 auto [left, right] = GetBinopLeftRightCstOnTheRight(selector, add_sub);
678 if (std::optional<int64_t> constant_rhs =
679 g.GetOptionalIntegerConstant(right)) {
680 if (constant_rhs < 0 && constant_rhs > std::numeric_limits<int>::min() &&
682 selector->Emit(negate_opcode, g.DefineAsRegister(node),
684 g.TempImmediate(
static_cast<int32_t>(-*constant_rhs)));
694template <
typename Matcher>
695int32_t LeftShiftForReducedMultiply(Matcher*
m) {
696 DCHECK(
m->IsInt32Mul() ||
m->IsInt64Mul());
697 if (
m->right().HasResolvedValue() &&
m->right().ResolvedValue() >= 3) {
698 uint64_t value_minus_one =
m->right().ResolvedValue() - 1;
709int32_t LeftShiftForReducedMultiply(InstructionSelectorT* selector,
711 Arm64OperandGeneratorT g(selector);
712 if (
auto constant = g.GetOptionalIntegerConstant(rhs)) {
713 int64_t value_minus_one = constant.value() - 1;
722template <
typename MultiplyOpmaskT>
723bool TryEmitMultiplyAdd(InstructionSelectorT* selector,
OpIndex add,
725 const Operation& add_lhs = selector->Get(lhs);
726 if (!add_lhs.Is<MultiplyOpmaskT>() || !selector->CanCover(add, lhs)) {
730 const WordBinopOp& mul = add_lhs.Cast<WordBinopOp>();
731 if (LeftShiftForReducedMultiply(selector, mul.right()) != 0)
return false;
733 Arm64OperandGeneratorT g(selector);
734 selector->Emit(madd_opcode, g.DefineAsRegister(add),
735 g.UseRegister(mul.left()), g.UseRegister(mul.right()),
740bool TryEmitMultiplyAddInt32(InstructionSelectorT* selector,
OpIndex add,
742 return TryEmitMultiplyAdd<Opmask::kWord32Mul>(selector, add, lhs, rhs,
746bool TryEmitMultiplyAddInt64(InstructionSelectorT* selector,
OpIndex add,
748 return TryEmitMultiplyAdd<Opmask::kWord64Mul>(selector, add, lhs, rhs,
753template <
typename SubtractOpmaskT>
754bool TryEmitMultiplyNegate(InstructionSelectorT* selector,
OpIndex mul,
757 const Operation& mul_lhs = selector->Get(lhs);
758 if (!mul_lhs.Is<SubtractOpmaskT>() || !selector->CanCover(mul, lhs)) {
761 const WordBinopOp& sub = mul_lhs.Cast<WordBinopOp>();
762 Arm64OperandGeneratorT g(selector);
763 std::optional<int64_t> sub_lhs_constant =
764 g.GetOptionalIntegerConstant(sub.left());
765 if (!sub_lhs_constant.has_value() || sub_lhs_constant != 0)
return false;
766 selector->Emit(mneg_opcode, g.DefineAsRegister(mul),
767 g.UseRegister(sub.right()), g.UseRegister(rhs));
771bool TryEmitMultiplyNegateInt32(InstructionSelectorT* selector,
OpIndex mul,
773 return TryEmitMultiplyNegate<Opmask::kWord32Sub>(selector, mul, lhs, rhs,
777bool TryEmitMultiplyNegateInt64(InstructionSelectorT* selector,
OpIndex mul,
779 return TryEmitMultiplyNegate<Opmask::kWord64Sub>(selector, mul, lhs, rhs,
784template <
typename MultiplyOpmaskT>
785bool TryEmitMultiplySub(InstructionSelectorT* selector,
OpIndex node,
787 const WordBinopOp& sub = selector->Get(node).Cast<WordBinopOp>();
788 DCHECK_EQ(sub.kind, WordBinopOp::Kind::kSub);
791 const Operation& sub_rhs = selector->Get(sub.right());
792 if (sub_rhs.Is<MultiplyOpmaskT>() && selector->CanCover(node, sub.right())) {
793 const WordBinopOp& mul = sub_rhs.Cast<WordBinopOp>();
794 if (LeftShiftForReducedMultiply(selector, mul.right()) == 0) {
795 Arm64OperandGeneratorT g(selector);
796 selector->Emit(msub_opbocde, g.DefineAsRegister(node),
797 g.UseRegister(mul.left()), g.UseRegister(mul.right()),
798 g.UseRegister(sub.left()));
805std::tuple<InstructionCode, ImmediateMode> GetStoreOpcodeAndImmediate(
806 MemoryRepresentation stored_rep,
bool paired) {
807 switch (stored_rep) {
809 case MemoryRepresentation::Uint8():
813 case MemoryRepresentation::Uint16():
817 case MemoryRepresentation::Uint32():
820 case MemoryRepresentation::Uint64():
832 case MemoryRepresentation::TaggedPointer():
833 case MemoryRepresentation::TaggedSigned():
842#ifdef V8_COMPRESS_POINTERS
850 return {kArm64StrCompressTagged,
852 case MemoryRepresentation::AnyUncompressedTagged():
853 case MemoryRepresentation::UncompressedTaggedPointer():
854 case MemoryRepresentation::UncompressedTaggedSigned():
857 case MemoryRepresentation::ProtectedPointer():
860 case MemoryRepresentation::IndirectPointer():
862 case MemoryRepresentation::SandboxedPointer():
865 case MemoryRepresentation::Simd128():
868 case MemoryRepresentation::Simd256():
875void InstructionSelectorT::VisitTraceInstruction(OpIndex node) {}
877void InstructionSelectorT::VisitStackSlot(OpIndex node) {
878 const StackSlotOp& stack_slot = Cast<StackSlotOp>(node);
879 int slot =
frame_->AllocateSpillSlot(stack_slot.size, stack_slot.alignment,
880 stack_slot.is_tagged);
881 OperandGenerator g(
this);
883 Emit(kArchStackSlot, g.DefineAsRegister(node),
884 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
887void InstructionSelectorT::VisitAbortCSADcheck(OpIndex node) {
888 Arm64OperandGeneratorT g(
this);
889 Emit(kArchAbortCSADcheck, g.NoOutput(),
890 g.UseFixed(this->input_at(node, 0), x1));
896 Arm64OperandGeneratorT g(selector);
902 OpIndex index = load.index().value();
906 InstructionOperand inputs[3];
907 size_t input_count = 0;
908 InstructionOperand output_op;
912 output_op = g.DefineAsRegister(output.valid() ? output.value() : node);
915 int64_t index_constant;
916 const bool is_index_constant =
922 ptrdiff_t
const delta =
924 MacroAssemblerBase::RootRegisterOffsetForExternalReference(
929 if (is_int32(delta)) {
930 inputs[0] = g.UseImmediate(
static_cast<int32_t
>(delta));
931 opcode |= AddressingModeField::encode(kMode_Root);
932 selector->
Emit(opcode, 1, &output_op, input_count, inputs);
938 if (base_op.
Is<LoadRootRegisterOp>()) {
939 DCHECK(is_index_constant);
941 inputs[0] = g.UseImmediate64(index_constant);
942 opcode |= AddressingModeField::encode(kMode_Root);
943 selector->
Emit(opcode, 1, &output_op, input_count, inputs);
947 inputs[0] = g.UseRegister(
base);
949 if (is_index_constant) {
950 if (g.CanBeImmediate(index_constant, immediate_mode)) {
952 inputs[1] = g.UseImmediate64(index_constant);
953 opcode |= AddressingModeField::encode(kMode_MRI);
956 inputs[1] = g.UseRegister(index);
957 opcode |= AddressingModeField::encode(kMode_MRR);
960 if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
963 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
966 inputs[1] = g.UseRegister(index);
967 opcode |= AddressingModeField::encode(kMode_MRR);
970 selector->
Emit(opcode, 1, &output_op, input_count, inputs);
973#if V8_ENABLE_WEBASSEMBLY
978InstructionOperand EmitAddBeforeLoadOrStore(InstructionSelectorT* selector,
980 InstructionCode* opcode) {
981 Arm64OperandGeneratorT g(selector);
982 *opcode |= AddressingModeField::encode(kMode_MRI);
983 OpIndex input0 = selector->input_at(node, 0);
984 OpIndex input1 = selector->input_at(node, 1);
985 InstructionOperand addr = g.TempRegister();
986 auto rhs = g.CanBeImmediate(input1, kArithmeticImm) ? g.UseImmediate(input1)
987 : g.UseRegister(input1);
988 selector->Emit(kArm64Add, addr, g.UseRegister(input0), rhs);
993void InstructionSelectorT::VisitLoadLane(OpIndex node) {
994 const Simd128LaneMemoryOp& load = this->
Get(node).Cast<Simd128LaneMemoryOp>();
996 opcode |= LaneSizeField::encode(load.lane_size() * kBitsPerByte);
997 if (load.kind.with_trap_handler) {
998 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
1001 Arm64OperandGeneratorT g(
this);
1002 InstructionOperand addr = EmitAddBeforeLoadOrStore(
this, node, &opcode);
1003 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input_at(node, 2)),
1004 g.UseImmediate(load.lane), addr, g.TempImmediate(0));
1007void InstructionSelectorT::VisitStoreLane(OpIndex node) {
1008 const Simd128LaneMemoryOp& store =
Get(node).Cast<Simd128LaneMemoryOp>();
1010 opcode |= LaneSizeField::encode(store.lane_size() * kBitsPerByte);
1011 if (store.kind.with_trap_handler) {
1012 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
1015 Arm64OperandGeneratorT g(
this);
1016 InstructionOperand addr = EmitAddBeforeLoadOrStore(
this, node, &opcode);
1017 InstructionOperand inputs[4] = {
1018 g.UseRegister(input_at(node, 2)),
1019 g.UseImmediate(store.lane),
1024 Emit(opcode, 0,
nullptr, 4, inputs);
1027void InstructionSelectorT::VisitLoadTransform(OpIndex node) {
1028 const Simd128LoadTransformOp& op =
1029 this->
Get(node).Cast<Simd128LoadTransformOp>();
1031 bool require_add =
false;
1032 switch (op.transform_kind) {
1033 case Simd128LoadTransformOp::TransformKind::k8Splat:
1034 opcode = kArm64LoadSplat;
1035 opcode |= LaneSizeField::encode(8);
1038 case Simd128LoadTransformOp::TransformKind::k16Splat:
1039 opcode = kArm64LoadSplat;
1040 opcode |= LaneSizeField::encode(16);
1043 case Simd128LoadTransformOp::TransformKind::k32Splat:
1044 opcode = kArm64LoadSplat;
1045 opcode |= LaneSizeField::encode(32);
1048 case Simd128LoadTransformOp::TransformKind::k64Splat:
1049 opcode = kArm64LoadSplat;
1050 opcode |= LaneSizeField::encode(64);
1053 case Simd128LoadTransformOp::TransformKind::k8x8S:
1054 opcode = kArm64S128Load8x8S;
1056 case Simd128LoadTransformOp::TransformKind::k8x8U:
1057 opcode = kArm64S128Load8x8U;
1059 case Simd128LoadTransformOp::TransformKind::k16x4S:
1060 opcode = kArm64S128Load16x4S;
1062 case Simd128LoadTransformOp::TransformKind::k16x4U:
1063 opcode = kArm64S128Load16x4U;
1065 case Simd128LoadTransformOp::TransformKind::k32x2S:
1066 opcode = kArm64S128Load32x2S;
1068 case Simd128LoadTransformOp::TransformKind::k32x2U:
1069 opcode = kArm64S128Load32x2U;
1071 case Simd128LoadTransformOp::TransformKind::k32Zero:
1072 opcode = kArm64LdrS;
1074 case Simd128LoadTransformOp::TransformKind::k64Zero:
1075 opcode = kArm64LdrD;
1081 DCHECK(!op.load_kind.maybe_unaligned);
1083 Arm64OperandGeneratorT g(
this);
1085 OpIndex index = input_at(node, 1);
1086 InstructionOperand inputs[2];
1087 InstructionOperand outputs[1];
1089 inputs[0] = g.UseRegister(
base);
1090 inputs[1] = g.UseRegister(index);
1091 outputs[0] = g.DefineAsRegister(node);
1096 inputs[0] = EmitAddBeforeLoadOrStore(
this, node, &opcode);
1097 inputs[1] = g.TempImmediate(0);
1098 opcode |= AddressingModeField::encode(kMode_MRI);
1100 opcode |= AddressingModeField::encode(kMode_MRR);
1102 if (op.load_kind.with_trap_handler) {
1103 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
1105 Emit(opcode, 1, outputs, 2, inputs);
1115 switch (loaded_rep) {
1116 case MemoryRepresentation::Int8():
1117 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
1119 case MemoryRepresentation::Uint8():
1120 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
1122 case MemoryRepresentation::Int16():
1123 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
1125 case MemoryRepresentation::Uint16():
1126 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
1128 case MemoryRepresentation::Int32():
1129 case MemoryRepresentation::Uint32():
1130 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
1132 case MemoryRepresentation::Int64():
1133 case MemoryRepresentation::Uint64():
1134 DCHECK_EQ(result_rep, RegisterRepresentation::Word64());
1136 case MemoryRepresentation::Float16():
1137 DCHECK_EQ(result_rep, RegisterRepresentation::Float32());
1139 case MemoryRepresentation::Float32():
1140 DCHECK_EQ(result_rep, RegisterRepresentation::Float32());
1142 case MemoryRepresentation::Float64():
1143 DCHECK_EQ(result_rep, RegisterRepresentation::Float64());
1145#ifdef V8_COMPRESS_POINTERS
1146 case MemoryRepresentation::AnyTagged():
1147 case MemoryRepresentation::TaggedPointer():
1148 if (result_rep == RegisterRepresentation::Compressed()) {
1151 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
1153 case MemoryRepresentation::TaggedSigned():
1154 if (result_rep == RegisterRepresentation::Compressed()) {
1157 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
1160 case MemoryRepresentation::AnyTagged():
1161 case MemoryRepresentation::TaggedPointer():
1162 case MemoryRepresentation::TaggedSigned():
1165 case MemoryRepresentation::AnyUncompressedTagged():
1166 case MemoryRepresentation::UncompressedTaggedPointer():
1167 case MemoryRepresentation::UncompressedTaggedSigned():
1168 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
1170 case MemoryRepresentation::ProtectedPointer():
1173 case MemoryRepresentation::IndirectPointer():
1175 case MemoryRepresentation::SandboxedPointer():
1177 case MemoryRepresentation::Simd128():
1179 case MemoryRepresentation::Simd256():
1187 case MachineRepresentation::kFloat16:
1189 case MachineRepresentation::kFloat32:
1191 case MachineRepresentation::kFloat64:
1193 case MachineRepresentation::kBit:
1194 case MachineRepresentation::kWord8:
1196 : load_rep.
semantic() == MachineSemantic::kInt32 ? kArm64LdrsbW
1199 case MachineRepresentation::kWord16:
1201 : load_rep.
semantic() == MachineSemantic::kInt32 ? kArm64LdrshW
1204 case MachineRepresentation::kWord32:
1206 case MachineRepresentation::kCompressedPointer:
1207 case MachineRepresentation::kCompressed:
1208#ifdef V8_COMPRESS_POINTERS
1213#ifdef V8_COMPRESS_POINTERS
1214 case MachineRepresentation::kTaggedSigned:
1216 case MachineRepresentation::kTaggedPointer:
1217 case MachineRepresentation::kTagged:
1220 case MachineRepresentation::kTaggedSigned:
1221 case MachineRepresentation::kTaggedPointer:
1222 case MachineRepresentation::kTagged:
1224 case MachineRepresentation::kWord64:
1226 case MachineRepresentation::kProtectedPointer:
1229 case MachineRepresentation::kSandboxedPointer:
1231 case MachineRepresentation::kSimd128:
1233 case MachineRepresentation::kSimd256:
1234 case MachineRepresentation::kMapWord:
1235 case MachineRepresentation::kIndirectPointer:
1236 case MachineRepresentation::kFloat16RawBits:
1237 case MachineRepresentation::kNone:
1242void InstructionSelectorT::VisitLoad(
OpIndex node) {
1243 InstructionCode opcode = kArchNop;
1244 ImmediateMode immediate_mode = kNoImmediate;
1245 auto load = this->load_view(node);
1246 LoadRepresentation load_rep = load.loaded_rep();
1248 std::tie(opcode, immediate_mode) =
1249 GetLoadOpcodeAndImmediate(load.ts_loaded_rep(), load.ts_result_rep());
1251 if (load.is_protected(&traps_on_null)) {
1252 if (traps_on_null) {
1253 opcode |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
1255 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
1258 EmitLoad(
this, node, opcode, immediate_mode, rep);
1261void InstructionSelectorT::VisitProtectedLoad(OpIndex node) { VisitLoad(node); }
1263void InstructionSelectorT::VisitStorePair(OpIndex node) {
1264 Arm64OperandGeneratorT g(
this);
1268void InstructionSelectorT::VisitStore(OpIndex node) {
1269 TurboshaftAdapter::StoreView store_view = this->store_view(node);
1270 DCHECK_EQ(store_view.displacement(), 0);
1272 store_view.stored_rep().write_barrier_kind();
1274 store_view.stored_rep().representation();
1276 Arm64OperandGeneratorT g(
this);
1279 if (write_barrier_kind != kNoWriteBarrier &&
1280 !
v8_flags.disable_write_barriers) {
1283 InstructionOperand inputs[4];
1285 inputs[
input_count++] = g.UseUniqueRegister(store_view.base());
1291 : kLoadStoreImm64)) {
1293 addressing_mode = kMode_MRI;
1295 inputs[
input_count++] = g.UseUniqueRegister(index);
1296 addressing_mode = kMode_MRR;
1298 inputs[
input_count++] = g.UseUniqueRegister(store_view.value());
1302 if (representation == MachineRepresentation::kIndirectPointer) {
1303 DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier);
1305 code = kArchStoreIndirectWithWriteBarrier;
1307 inputs[
input_count++] = g.UseImmediate64(
static_cast<int64_t
>(tag));
1309 code = kArchStoreWithWriteBarrier;
1311 code |= AddressingModeField::encode(addressing_mode);
1312 code |= RecordWriteModeField::encode(record_write_mode);
1313 if (store_view.is_store_trap_on_null()) {
1314 code |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
1316 Emit(code, 0,
nullptr, input_count, inputs);
1320 InstructionOperand inputs[4];
1326 std::tie(opcode, immediate_mode) =
1327 GetStoreOpcodeAndImmediate(store_view.ts_stored_rep(),
false);
1329 if (
v8_flags.enable_unconditional_write_barriers) {
1335 std::optional<ExternalReference> external_base;
1336 ExternalReference
value;
1337 if (this->MatchExternalConstant(store_view.base(), &value)) {
1338 external_base =
value;
1341 std::optional<int64_t> constant_index;
1342 if (store_view.index().valid()) {
1344 constant_index = g.GetOptionalIntegerConstant(index);
1346 if (external_base.has_value() && constant_index.has_value() &&
1347 CanAddressRelativeToRootsRegister(*external_base)) {
1348 ptrdiff_t
const delta =
1350 MacroAssemblerBase::RootRegisterOffsetForExternalReference(
1352 if (is_int32(delta)) {
1354 InstructionOperand inputs[2];
1355 inputs[0] = g.UseRegister(store_view.value());
1356 inputs[1] = g.UseImmediate(
static_cast<int32_t>(delta));
1357 opcode |= AddressingModeField::encode(kMode_Root);
1358 Emit(opcode, 0,
nullptr, input_count, inputs);
1366 inputs[
input_count++] = g.UseRegisterOrImmediateZero(store_view.value());
1368 if (this->is_load_root_register(
base)) {
1370 opcode |= AddressingModeField::encode(kMode_Root);
1371 Emit(opcode, 0,
nullptr, input_count, inputs);
1377 if (g.CanBeImmediate(index, immediate_mode)) {
1379 opcode |= AddressingModeField::encode(kMode_MRI);
1380 }
else if (TryMatchLoadStoreShift(&g,
this, approx_rep, node, index,
1381 &inputs[input_count],
1382 &inputs[input_count + 1])) {
1384 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
1387 opcode |= AddressingModeField::encode(kMode_MRR);
1390 if (store_view.is_store_trap_on_null()) {
1391 opcode |= AccessModeField::encode(kMemoryAccessProtectedNullDereference);
1392 }
else if (store_view.access_kind() ==
1393 MemoryAccessKind::kProtectedByTrapHandler) {
1394 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
1397 Emit(opcode, 0,
nullptr, input_count, inputs);
1400void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
1404void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
1409void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) {
UNREACHABLE(); }
1412void InstructionSelectorT::VisitUnalignedStore(OpIndex node) {
UNREACHABLE(); }
1414namespace turboshaft {
1439 code, ccmp_condition, default_flags, ccmp_lhs, ccmp_rhs};
1443 return opcode() == kArm64Float32Cmp || opcode() == kArm64Float64Cmp;
1449 case RegisterRepresentation::Word32():
1451 case RegisterRepresentation::Word64():
1453 case RegisterRepresentation::Float32():
1454 return kArm64Float32Cmp;
1455 case RegisterRepresentation::Float64():
1456 return kArm64Float64Cmp;
1467 uint32_t num_ccmps_ = 0;
1472 enum class NodeKind : uint8_t { kFlagSetting, kLogicalCombine };
1475 : node_kind_(
NodeKind::kFlagSetting),
1480 : node_kind_(
NodeKind::kLogicalCombine),
node_(n), lhs_(l), rhs_(
r) {
1482 if (lhs_->IsFlagSetting() && !rhs_->IsFlagSetting()) {
1483 std::swap(lhs_, rhs_);
1487 DCHECK(IsLogicalCombine());
1489 if (requires_negation_) {
1494 if (IsFlagSetting()) {
1497 requires_negation_ = !requires_negation_;
1502 requires_negation_ =
false;
1505 DCHECK(IsLogicalCombine());
1507 return lhs_->IsFlagSetting() && rhs_->IsFlagSetting();
1511 return node_kind_ == NodeKind::kLogicalCombine;
1516 DCHECK(IsLogicalCombine());
1520 DCHECK(IsLogicalCombine());
1527 bool requires_negation_ =
false;
1537 if (comparison->rep == RegisterRepresentation::Word32() ||
1538 comparison->rep == RegisterRepresentation::Word64() ||
1539 comparison->rep == RegisterRepresentation::Tagged()) {
1540 switch (comparison->kind) {
1541 case ComparisonOp::Kind::kEqual:
1542 return FlagsCondition::kEqual;
1543 case ComparisonOp::Kind::kSignedLessThan:
1544 return FlagsCondition::kSignedLessThan;
1545 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1546 return FlagsCondition::kSignedLessThanOrEqual;
1547 case ComparisonOp::Kind::kUnsignedLessThan:
1548 return FlagsCondition::kUnsignedLessThan;
1549 case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
1550 return FlagsCondition::kUnsignedLessThanOrEqual;
1554 }
else if (comparison->rep == RegisterRepresentation::Float32() ||
1555 comparison->rep == RegisterRepresentation::Float64()) {
1556 switch (comparison->kind) {
1557 case ComparisonOp::Kind::kEqual:
1558 return FlagsCondition::kEqual;
1559 case ComparisonOp::Kind::kSignedLessThan:
1560 return FlagsCondition::kFloatLessThan;
1561 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1562 return FlagsCondition::kFloatLessThanOrEqual;
1568 return std::nullopt;
1593 selector, zone, nodes);
1595 selector, zone, nodes);
1596 if (maybe_lhs.has_value() && maybe_rhs.has_value()) {
1602 return nodes.back();
1607 return std::nullopt;
1608 }
else if (user.valid() && selector->
CanCover(user, node)) {
1609 std::optional<FlagsCondition> user_condition =
1611 if (!user_condition.has_value()) {
1612 return std::nullopt;
1615 if (comparison.
kind == ComparisonOp::Kind::kEqual &&
1618 selector, zone, nodes);
1619 if (maybe_negated.has_value()) {
1627 return std::nullopt;
1724 if (!sequence->HasCompare()) {
1739 std::swap(lhs, rhs);
1743 std::swap(lhs, rhs);
1752 sequence->InitialCompare(cmp, left, right, rep);
1755 bool is_logical_or =
1776 std::swap(ccmp_lhs, ccmp_rhs);
1780 sequence->AddConditionalCompare(rep, ccmp_condition, default_flags, ccmp_lhs,
1808 if (!root.has_value())
return std::nullopt;
1810 if (logic_nodes.
size() > FlagsContinuationT::kMaxCompareChainSize) {
1811 return std::nullopt;
1813 if (!logic_nodes.
front()->IsLegalFirstCombine()) {
1814 return std::nullopt;
1817 for (
auto* logic_node : logic_nodes) {
1820 DCHECK_LE(sequence->num_ccmps(), FlagsContinuationT::kMaxCompareChainSize);
1821 return logic_nodes.
back()->user_condition();
1831 constexpr uint32_t kMaxFlagSetInputs = 2;
1832 constexpr uint32_t kMaxCcmpOperands =
1834 constexpr uint32_t kExtraCcmpInputs = 2;
1835 constexpr uint32_t kMaxInputs =
1836 kMaxFlagSetInputs + kMaxCcmpOperands + kExtraCcmpInputs;
1838 size_t input_count = 0;
1845 inputs[input_count++] = g.
UseRegister(right_node);
1853 if ((
compare.code == kArm64Cmp32 ||
compare.code == kArm64Cmp) &&
1866 inputs[input_count++] =
1877 if (!cont->
IsBranch())
return false;
1883 if (final_cond.has_value()) {
1885 ? final_cond.value()
1895 sequence.opcode(), imm_mode, &new_cont);
1908 if (final_cond.has_value()) {
1911 sequence.ccmps(), sequence.num_ccmps(), final_cond.value(), node);
1917 sequence.opcode(), imm_mode, &cont);
1938 inv_opcode = kArm64Bic32;
1941 inv_opcode = kArm64Bic;
1944 inv_opcode = kArm64Orn32;
1947 inv_opcode = kArm64Orn;
1950 inv_opcode = kArm64Eon32;
1953 inv_opcode = kArm64Eon;
1966 int64_t xor_rhs_val;
1968 xor_rhs_val == -1) {
1980 int64_t xor_rhs_val;
1982 xor_rhs_val == -1) {
1991 int64_t xor_rhs_val;
1994 xor_rhs_val == -1) {
1996 bool is32 = rep == WordRepresentation::Word32();
1997 ArchOpcode opcode = is32 ? kArm64Not32 : kArm64Not;
2001 VisitBinop(selector, node, rep, opcode, imm_mode);
2005void InstructionSelectorT::VisitWord32And(
OpIndex node) {
2006 Arm64OperandGeneratorT g(
this);
2010 if (int64_t constant_rhs;
2012 CanCover(node, bitwise_and.
left()) &&
2013 MatchSignedIntegralConstant(bitwise_and.
right(), &constant_rhs)) {
2014 DCHECK(base::IsInRange(constant_rhs, std::numeric_limits<int32_t>::min(),
2015 std::numeric_limits<int32_t>::max()));
2016 uint32_t
mask =
static_cast<uint32_t
>(constant_rhs);
2017 uint32_t mask_width = base::bits::CountPopulation(
mask);
2018 uint32_t mask_msb = base::bits::CountLeadingZeros32(
mask);
2019 if ((mask_width != 0) && (mask_width != 32) &&
2020 (mask_msb + mask_width == 32)) {
2027 if (int64_t constant;
2028 MatchSignedIntegralConstant(lhs_shift.
right(), &constant)) {
2030 uint32_t lsb = constant & 0x1F;
2036 if (lsb + mask_width > 32) mask_width = 32 - lsb;
2038 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
2039 g.UseRegister(lhs_shift.
left()),
2040 g.UseImmediateOrTemp(lhs_shift.
right(), lsb),
2041 g.TempImmediate(mask_width));
2048 CanCover(node, bitwise_and.
left()),
2049 CanCover(node, bitwise_and.
right()), kLogical32Imm);
2052void InstructionSelectorT::VisitWord64And(OpIndex node) {
2053 Arm64OperandGeneratorT g(
this);
2055 const WordBinopOp& bitwise_and =
Get(node).Cast<Opmask::kWord64BitwiseAnd>();
2059 lhs.Is<Opmask::kWord64ShiftRightLogical>() &&
2060 CanCover(node, bitwise_and.left()) &&
2061 MatchUnsignedIntegralConstant(bitwise_and.right(), &
mask)) {
2062 uint64_t mask_width = base::bits::CountPopulation(
mask);
2063 uint64_t mask_msb = base::bits::CountLeadingZeros64(
mask);
2064 if ((mask_width != 0) && (mask_width != 64) &&
2065 (mask_msb + mask_width == 64)) {
2072 if (int64_t shift_by;
2073 MatchSignedIntegralConstant(shift.right(), &shift_by)) {
2075 uint32_t lsb =
static_cast<uint32_t
>(shift_by & 0x3F);
2081 if (lsb + mask_width > 64) mask_width = 64 - lsb;
2083 Emit(kArm64Ubfx, g.DefineAsRegister(node), g.UseRegister(shift.left()),
2084 g.UseImmediateOrTemp(shift.right(), lsb),
2085 g.TempImmediate(
static_cast<int32_t>(mask_width)));
2091 VisitLogical(
this, zone(), node, bitwise_and.rep, kArm64And,
2092 CanCover(node, bitwise_and.left()),
2093 CanCover(node, bitwise_and.right()), kLogical64Imm);
2096void InstructionSelectorT::VisitWord32Or(OpIndex node) {
2097 const WordBinopOp& op = this->
Get(node).template Cast<WordBinopOp>();
2099 CanCover(node, op.left()), CanCover(node, op.right()),
2103void InstructionSelectorT::VisitWord64Or(OpIndex node) {
2104 const WordBinopOp& op = this->
Get(node).template Cast<WordBinopOp>();
2105 VisitLogical(
this, zone(), node, op.rep, kArm64Or, CanCover(node, op.left()),
2106 CanCover(node, op.right()), kLogical64Imm);
2109void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
2110 const WordBinopOp& op = this->
Get(node).template Cast<WordBinopOp>();
2112 CanCover(node, op.left()), CanCover(node, op.right()),
2116void InstructionSelectorT::VisitWord64Xor(OpIndex node) {
2117 const WordBinopOp& op = this->
Get(node).template Cast<WordBinopOp>();
2118 VisitLogical(
this, zone(), node, op.rep, kArm64Eor, CanCover(node, op.left()),
2119 CanCover(node, op.right()), kLogical64Imm);
2122void InstructionSelectorT::VisitWord32Shl(OpIndex node) {
2125 if (uint64_t constant_left;
2126 lhs.Is<Opmask::kWord32BitwiseAnd>() && CanCover(node, shift_op.left()) &&
2127 MatchUnsignedIntegralConstant(shift_op.right(), &constant_left)) {
2128 uint32_t shift_by =
static_cast<uint32_t
>(constant_left);
2129 if (base::IsInRange(shift_by, 1, 31)) {
2130 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
2131 if (uint64_t constant_right;
2132 MatchUnsignedIntegralConstant(bitwise_and.right(), &constant_right)) {
2133 uint32_t
mask =
static_cast<uint32_t
>(constant_right);
2135 uint32_t mask_width = base::bits::CountPopulation(
mask);
2136 uint32_t mask_msb = base::bits::CountLeadingZeros32(
mask);
2137 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
2140 Arm64OperandGeneratorT g(
this);
2141 if ((shift_by + mask_width) >= 32) {
2144 Emit(kArm64Lsl32, g.DefineAsRegister(node),
2145 g.UseRegister(bitwise_and.left()), g.UseImmediate(shift_by));
2150 Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
2151 g.UseRegister(bitwise_and.left()), g.UseImmediate(shift_by),
2152 g.TempImmediate(mask_width));
2159 VisitRRO(
this, kArm64Lsl32, node, kShift32Imm);
2162void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
2163 Arm64OperandGeneratorT g(
this);
2164 const ShiftOp& shift_op = this->
Get(node).template Cast<ShiftOp>();
2167 if ((lhs.Is<Opmask::kChangeInt32ToInt64>() ||
2168 lhs.Is<Opmask::kChangeUint32ToUint64>()) &&
2169 rhs.Is<Opmask::kWord32Constant>()) {
2170 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
2171 if (base::IsInRange(shift_by, 32, 63) && CanCover(node, shift_op.left())) {
2174 Emit(kArm64Lsl, g.DefineAsRegister(node),
2175 g.UseRegister(lhs.Cast<ChangeOp>().input()),
2176 g.UseImmediate64(shift_by));
2180 VisitRRO(
this, kArm64Lsl, node, kShift64Imm);
2183void InstructionSelectorT::VisitStackPointerGreaterThan(
2184 OpIndex node, FlagsContinuationT* cont) {
2187 const auto& op = this->turboshaft_graph()
2189 .template Cast<StackPointerGreaterThanOp>();
2191 value = op.stack_limit();
2193 kArchStackPointerGreaterThan | MiscField::encode(
static_cast<int>(
kind));
2195 Arm64OperandGeneratorT g(
this);
2198 InstructionOperand*
const outputs =
nullptr;
2199 const int output_count = 0;
2204 InstructionOperand temps[] = {g.TempRegister()};
2205 const int temp_count = (
kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
2206 const auto register_mode = (
kind == StackCheckKind::kJSFunctionEntry)
2207 ? OperandGenerator::kUniqueRegister
2210 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2213 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2214 temp_count, temps, cont);
2219bool TryEmitBitfieldExtract32(InstructionSelectorT* selector, OpIndex node) {
2220 Arm64OperandGeneratorT g(selector);
2222 const Operation& lhs = selector->Get(shift.left());
2223 if (selector->CanCover(node, shift.left()) &&
2224 lhs.Is<Opmask::kWord32ShiftLeft>()) {
2228 int64_t lhs_shift_by_constant, shift_by_constant;
2229 if (selector->MatchSignedIntegralConstant(lhs_shift.right(),
2230 &lhs_shift_by_constant) &&
2231 selector->MatchSignedIntegralConstant(shift.right(),
2232 &shift_by_constant) &&
2233 (lhs_shift_by_constant & 0x1F) != 0 &&
2234 (lhs_shift_by_constant & 0x1F) == (shift_by_constant & 0x1F)) {
2235 DCHECK(shift.Is<Opmask::kWord32ShiftRightArithmetic>() ||
2236 shift.Is<Opmask::kWord32ShiftRightArithmeticShiftOutZeros>() ||
2237 shift.Is<Opmask::kWord32ShiftRightLogical>());
2239 ArchOpcode opcode = shift.kind == ShiftOp::Kind::kShiftRightLogical
2243 int right_val = shift_by_constant & 0x1F;
2246 selector->Emit(opcode, g.DefineAsRegister(node),
2247 g.UseRegister(lhs_shift.left()), g.TempImmediate(0),
2248 g.TempImmediate(32 - right_val));
2256void InstructionSelectorT::VisitWord32Shr(OpIndex node) {
2259 uint64_t constant_right;
2260 const bool right_is_constant =
2261 MatchUnsignedIntegralConstant(shift.right(), &constant_right);
2262 if (lhs.Is<Opmask::kWord32BitwiseAnd>() && right_is_constant) {
2263 uint32_t lsb = constant_right & 0x1F;
2264 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
2265 uint32_t constant_bitmask;
2266 if (MatchIntegralWord32Constant(bitwise_and.right(), &constant_bitmask) &&
2267 constant_bitmask != 0) {
2270 uint32_t
mask = (constant_bitmask >> lsb) << lsb;
2271 unsigned mask_width = base::bits::CountPopulation(
mask);
2272 unsigned mask_msb = base::bits::CountLeadingZeros32(
mask);
2273 if ((mask_msb + mask_width + lsb) == 32) {
2274 Arm64OperandGeneratorT g(
this);
2276 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
2277 g.UseRegister(bitwise_and.left()),
2278 g.UseImmediateOrTemp(shift.right(), lsb),
2279 g.TempImmediate(mask_width));
2283 }
else if (TryEmitBitfieldExtract32(
this, node)) {
2287 if (lhs.Is<Opmask::kWord32UnsignedMulOverflownBits>() && right_is_constant &&
2288 CanCover(node, shift.left())) {
2291 Arm64OperandGeneratorT g(
this);
2292 const WordBinopOp& mul = lhs.Cast<WordBinopOp>();
2293 int shift_by = constant_right & 0x1F;
2294 InstructionOperand
const smull_operand = g.TempRegister();
2295 Emit(kArm64Umull, smull_operand, g.UseRegister(mul.left()),
2296 g.UseRegister(mul.right()));
2297 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
2298 g.TempImmediate(32 + shift_by));
2302 VisitRRO(
this, kArm64Lsr32, node, kShift32Imm);
2305void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
2308 if (uint64_t constant; lhs.Is<Opmask::kWord64BitwiseAnd>() &&
2309 MatchUnsignedIntegralConstant(op.right(), &constant)) {
2310 uint32_t lsb = constant & 0x3F;
2311 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
2312 uint64_t constant_and_rhs;
2313 if (MatchIntegralWord64Constant(bitwise_and.right(), &constant_and_rhs) &&
2314 constant_and_rhs != 0) {
2317 uint64_t
mask =
static_cast<uint64_t
>(constant_and_rhs >> lsb) << lsb;
2318 unsigned mask_width = base::bits::CountPopulation(
mask);
2319 unsigned mask_msb = base::bits::CountLeadingZeros64(
mask);
2320 if ((mask_msb + mask_width + lsb) == 64) {
2321 Arm64OperandGeneratorT g(
this);
2323 Emit(kArm64Ubfx, g.DefineAsRegister(node),
2324 g.UseRegister(bitwise_and.left()),
2325 g.UseImmediateOrTemp(op.right(), lsb),
2326 g.TempImmediate(mask_width));
2331 VisitRRO(
this, kArm64Lsr, node, kShift64Imm);
2334void InstructionSelectorT::VisitWord32Sar(OpIndex node) {
2335 if (TryEmitBitfieldExtract32(
this, node)) {
2341 uint64_t constant_right;
2342 const bool right_is_constant =
2343 MatchUnsignedIntegralConstant(shift.right(), &constant_right);
2344 if (lhs.Is<Opmask::kWord32SignedMulOverflownBits>() && right_is_constant &&
2345 CanCover(node, shift.left())) {
2348 Arm64OperandGeneratorT g(
this);
2349 const WordBinopOp& mul_overflow = lhs.Cast<WordBinopOp>();
2350 int shift_by = constant_right & 0x1F;
2351 InstructionOperand
const smull_operand = g.TempRegister();
2352 Emit(kArm64Smull, smull_operand, g.UseRegister(mul_overflow.left()),
2353 g.UseRegister(mul_overflow.right()));
2354 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
2355 g.TempImmediate(32 + shift_by));
2359 if (lhs.Is<Opmask::kWord32Add>() && right_is_constant &&
2360 CanCover(node, shift.left())) {
2361 const WordBinopOp& add =
Get(shift.left()).Cast<WordBinopOp>();
2363 if (lhs.Is<Opmask::kWord32SignedMulOverflownBits>() &&
2364 CanCover(shift.left(), add.left())) {
2369 Arm64OperandGeneratorT g(
this);
2370 const WordBinopOp& mul = lhs.Cast<WordBinopOp>();
2372 InstructionOperand
const smull_operand = g.TempRegister();
2373 Emit(kArm64Smull, smull_operand, g.UseRegister(mul.left()),
2374 g.UseRegister(mul.right()));
2376 InstructionOperand
const add_operand = g.TempRegister();
2377 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
2378 add_operand, g.UseRegister(add.right()), smull_operand,
2379 g.TempImmediate(32));
2381 Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
2382 g.UseImmediate(shift.right()));
2387 VisitRRO(
this, kArm64Asr32, node, kShift32Imm);
2390void InstructionSelectorT::VisitWord64Sar(OpIndex node) {
2398 int64_t constant_rhs;
2399 if (lhs.Is<Opmask::kChangeInt32ToInt64>() &&
2400 MatchIntegralWord64Constant(shiftop.right(), &constant_rhs) &&
2401 is_uint5(constant_rhs) && CanCover(node, shiftop.left())) {
2404 OpIndex input = lhs.Cast<ChangeOp>().input();
2405 if (!
Get(input).Is<LoadOp>() || !CanCover(shiftop.left(), input)) {
2406 Arm64OperandGeneratorT g(
this);
2407 int right =
static_cast<int>(constant_rhs);
2408 Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(input),
2409 g.UseImmediate(right), g.UseImmediate(32 - right));
2414 VisitRRO(
this, kArm64Asr, node, kShift64Imm);
2417void InstructionSelectorT::VisitWord32Rol(OpIndex node) {
UNREACHABLE(); }
2419void InstructionSelectorT::VisitWord64Rol(OpIndex node) {
UNREACHABLE(); }
2421void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
2422 VisitRRO(
this, kArm64Ror32, node, kShift32Imm);
2425void InstructionSelectorT::VisitWord64Ror(OpIndex node) {
2426 VisitRRO(
this, kArm64Ror, node, kShift64Imm);
2429#define RR_OP_T_LIST(V) \
2430 V(Float32Sqrt, kArm64Float32Sqrt) \
2431 V(Float64Sqrt, kArm64Float64Sqrt) \
2432 V(Float32RoundDown, kArm64Float32RoundDown) \
2433 V(Float64RoundDown, kArm64Float64RoundDown) \
2434 V(Float32RoundUp, kArm64Float32RoundUp) \
2435 V(Float64RoundUp, kArm64Float64RoundUp) \
2436 V(Float32RoundTruncate, kArm64Float32RoundTruncate) \
2437 V(Float64RoundTruncate, kArm64Float64RoundTruncate) \
2438 V(Float64RoundTiesAway, kArm64Float64RoundTiesAway) \
2439 V(Float32RoundTiesEven, kArm64Float32RoundTiesEven) \
2440 V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \
2441 V(Float64SilenceNaN, kArm64Float64SilenceNaN) \
2442 V(ChangeInt32ToFloat64, kArm64Int32ToFloat64) \
2443 V(RoundFloat64ToInt32, kArm64Float64ToInt32) \
2444 V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64) \
2445 V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \
2446 V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \
2447 V(ChangeInt64ToFloat64, kArm64Int64ToFloat64) \
2448 V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64) \
2449 V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \
2450 V(ChangeFloat64ToInt64, kArm64Float64ToInt64) \
2451 V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
2452 V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
2453 V(RoundInt64ToFloat32, kArm64Int64ToFloat32) \
2454 V(RoundInt64ToFloat64, kArm64Int64ToFloat64) \
2455 V(RoundUint64ToFloat32, kArm64Uint64ToFloat32) \
2456 V(RoundUint64ToFloat64, kArm64Uint64ToFloat64) \
2457 V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32) \
2458 V(BitcastFloat64ToInt64, kArm64U64MoveFloat64) \
2459 V(BitcastInt32ToFloat32, kArm64Float64MoveU64) \
2460 V(BitcastInt64ToFloat64, kArm64Float64MoveU64) \
2461 V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \
2462 V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
2463 V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \
2464 V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
2465 V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
2466 V(Word64Clz, kArm64Clz) \
2467 V(Word32Clz, kArm64Clz32) \
2468 V(Word32Popcnt, kArm64Cnt32) \
2469 V(Word64Popcnt, kArm64Cnt64) \
2470 V(Word32ReverseBits, kArm64Rbit32) \
2471 V(Word64ReverseBits, kArm64Rbit) \
2472 V(Word32ReverseBytes, kArm64Rev32) \
2473 V(Word64ReverseBytes, kArm64Rev) \
2474 IF_WASM(V, F16x8Ceil, kArm64Float16RoundUp) \
2475 IF_WASM(V, F16x8Floor, kArm64Float16RoundDown) \
2476 IF_WASM(V, F16x8Trunc, kArm64Float16RoundTruncate) \
2477 IF_WASM(V, F16x8NearestInt, kArm64Float16RoundTiesEven) \
2478 IF_WASM(V, F32x4Ceil, kArm64Float32RoundUp) \
2479 IF_WASM(V, F32x4Floor, kArm64Float32RoundDown) \
2480 IF_WASM(V, F32x4Trunc, kArm64Float32RoundTruncate) \
2481 IF_WASM(V, F32x4NearestInt, kArm64Float32RoundTiesEven) \
2482 IF_WASM(V, F64x2Ceil, kArm64Float64RoundUp) \
2483 IF_WASM(V, F64x2Floor, kArm64Float64RoundDown) \
2484 IF_WASM(V, F64x2Trunc, kArm64Float64RoundTruncate) \
2485 IF_WASM(V, F64x2NearestInt, kArm64Float64RoundTiesEven)
2487#define RRR_OP_T_LIST(V) \
2488 V(Int32Div, kArm64Idiv32) \
2489 V(Int64Div, kArm64Idiv) \
2490 V(Uint32Div, kArm64Udiv32) \
2491 V(Uint64Div, kArm64Udiv) \
2492 V(Int32Mod, kArm64Imod32) \
2493 V(Int64Mod, kArm64Imod) \
2494 V(Uint32Mod, kArm64Umod32) \
2495 V(Uint64Mod, kArm64Umod) \
2496 V(Float32Add, kArm64Float32Add) \
2497 V(Float64Add, kArm64Float64Add) \
2498 V(Float32Sub, kArm64Float32Sub) \
2499 V(Float64Sub, kArm64Float64Sub) \
2500 V(Float32Div, kArm64Float32Div) \
2501 V(Float64Div, kArm64Float64Div) \
2502 V(Float32Max, kArm64Float32Max) \
2503 V(Float64Max, kArm64Float64Max) \
2504 V(Float32Min, kArm64Float32Min) \
2505 V(Float64Min, kArm64Float64Min) \
2506 IF_WASM(V, I8x16Swizzle, kArm64I8x16Swizzle)
2508#define RR_VISITOR(Name, opcode) \
2509 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2510 VisitRR(this, opcode, node); \
2516#define RRR_VISITOR(Name, opcode) \
2517 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2518 VisitRRR(this, opcode, node); \
2524void InstructionSelectorT::VisitWord32Ctz(OpIndex node) {
UNREACHABLE(); }
2526void InstructionSelectorT::VisitWord64Ctz(OpIndex node) {
UNREACHABLE(); }
2528void InstructionSelectorT::VisitInt32Add(OpIndex node) {
2529 const WordBinopOp& add = this->
Get(node).Cast<WordBinopOp>();
2530 DCHECK(add.Is<Opmask::kWord32Add>());
2534 if (TryEmitMultiplyAddInt32(
this, node, left, right) ||
2535 TryEmitMultiplyAddInt32(
this, node, right, left)) {
2538 VisitAddSub(
this, node, kArm64Add32, kArm64Sub32);
2541void InstructionSelectorT::VisitInt64Add(OpIndex node) {
2542 const WordBinopOp& add = this->
Get(node).Cast<WordBinopOp>();
2543 DCHECK(add.Is<Opmask::kWord64Add>());
2547 if (TryEmitMultiplyAddInt64(
this, node, left, right) ||
2548 TryEmitMultiplyAddInt64(
this, node, right, left)) {
2551 VisitAddSub(
this, node, kArm64Add, kArm64Sub);
2554void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
2555 DCHECK(this->
Get(node).Is<Opmask::kWord32Sub>());
2558 if (TryEmitMultiplySub<Opmask::kWord32Mul>(
this, node, kArm64Msub32)) {
2562 VisitAddSub(
this, node, kArm64Sub32, kArm64Add32);
2565void InstructionSelectorT::VisitInt64Sub(OpIndex node) {
2566 DCHECK(this->
Get(node).Is<Opmask::kWord64Sub>());
2569 if (TryEmitMultiplySub<Opmask::kWord64Mul>(
this, node, kArm64Msub)) {
2573 VisitAddSub(
this, node, kArm64Sub, kArm64Add);
2578void EmitInt32MulWithOverflow(InstructionSelectorT* selector, OpIndex node,
2579 FlagsContinuationT* cont) {
2580 Arm64OperandGeneratorT g(selector);
2581 const OverflowCheckedBinopOp& mul =
2582 selector->Get(node).Cast<OverflowCheckedBinopOp>();
2583 InstructionOperand
result = g.DefineAsRegister(node);
2584 InstructionOperand left = g.UseRegister(mul.left());
2587 if (selector->MatchIntegralWord32Constant(mul.right(), &constant_rhs) &&
2588 base::bits::IsPowerOfTwo(constant_rhs)) {
2590 int32_t shift = base::bits::WhichPowerOfTwo(constant_rhs);
2591 selector->Emit(kArm64Sbfiz,
result, left, g.TempImmediate(shift),
2592 g.TempImmediate(32));
2594 InstructionOperand right = g.UseRegister(mul.right());
2595 selector->Emit(kArm64Smull,
result, left, right);
2599 kArm64Cmp | AddressingModeField::encode(kMode_Operand2_R_SXTW);
2600 selector->EmitWithContinuation(opcode,
result,
result, cont);
2604 FlagsContinuationT* cont) {
2605 Arm64OperandGeneratorT g(selector);
2606 InstructionOperand
result = g.DefineAsRegister(node);
2607 InstructionOperand left = g.UseRegister(selector->input_at(node, 0));
2608 InstructionOperand high = g.TempRegister();
2610 InstructionOperand right = g.UseRegister(selector->input_at(node, 1));
2611 selector->Emit(kArm64Mul,
result, left, right);
2612 selector->Emit(kArm64Smulh, high, left, right);
2616 kArm64Cmp | AddressingModeField::encode(kMode_Operand2_R_ASR_I);
2617 selector->EmitWithContinuation(opcode, high,
result, g.TempImmediate(63),
2623void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
2624 Arm64OperandGeneratorT g(
this);
2625 const WordBinopOp& mul =
Get(node).Cast<WordBinopOp>();
2626 DCHECK(mul.Is<Opmask::kWord32Mul>());
2630 int32_t shift = LeftShiftForReducedMultiply(
this, mul.right());
2632 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
2633 g.DefineAsRegister(node), g.UseRegister(mul.left()),
2634 g.UseRegister(mul.left()), g.TempImmediate(shift));
2639 if (TryEmitMultiplyNegateInt32(
this, node, mul.left(), mul.right()) ||
2640 TryEmitMultiplyNegateInt32(
this, node, mul.right(), mul.left())) {
2647void InstructionSelectorT::VisitInt64Mul(OpIndex node) {
2648 Arm64OperandGeneratorT g(
this);
2649 const WordBinopOp& mul =
Get(node).Cast<WordBinopOp>();
2650 DCHECK(mul.Is<Opmask::kWord64Mul>());
2654 int32_t shift = LeftShiftForReducedMultiply(
this, mul.right());
2656 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
2657 g.DefineAsRegister(node), g.UseRegister(mul.left()),
2658 g.UseRegister(mul.left()), g.TempImmediate(shift));
2663 if (TryEmitMultiplyNegateInt64(
this, node, mul.left(), mul.right()) ||
2664 TryEmitMultiplyNegateInt64(
this, node, mul.right(), mul.left())) {
2671#if V8_ENABLE_WEBASSEMBLY
2673void VisitExtMul(InstructionSelectorT* selector, ArchOpcode opcode,
2674 OpIndex node,
int dst_lane_size) {
2676 code |= LaneSizeField::encode(dst_lane_size);
2681void InstructionSelectorT::VisitI16x8ExtMulLowI8x16S(OpIndex node) {
2682 VisitExtMul(
this, kArm64Smull, node, 16);
2685void InstructionSelectorT::VisitI16x8ExtMulHighI8x16S(OpIndex node) {
2686 VisitExtMul(
this, kArm64Smull2, node, 16);
2689void InstructionSelectorT::VisitI16x8ExtMulLowI8x16U(OpIndex node) {
2690 VisitExtMul(
this, kArm64Umull, node, 16);
2693void InstructionSelectorT::VisitI16x8ExtMulHighI8x16U(OpIndex node) {
2694 VisitExtMul(
this, kArm64Umull2, node, 16);
2697void InstructionSelectorT::VisitI32x4ExtMulLowI16x8S(OpIndex node) {
2698 VisitExtMul(
this, kArm64Smull, node, 32);
2701void InstructionSelectorT::VisitI32x4ExtMulHighI16x8S(OpIndex node) {
2702 VisitExtMul(
this, kArm64Smull2, node, 32);
2705void InstructionSelectorT::VisitI32x4ExtMulLowI16x8U(OpIndex node) {
2706 VisitExtMul(
this, kArm64Umull, node, 32);
2709void InstructionSelectorT::VisitI32x4ExtMulHighI16x8U(OpIndex node) {
2710 VisitExtMul(
this, kArm64Umull2, node, 32);
2713void InstructionSelectorT::VisitI64x2ExtMulLowI32x4S(OpIndex node) {
2714 VisitExtMul(
this, kArm64Smull, node, 64);
2717void InstructionSelectorT::VisitI64x2ExtMulHighI32x4S(OpIndex node) {
2718 VisitExtMul(
this, kArm64Smull2, node, 64);
2721void InstructionSelectorT::VisitI64x2ExtMulLowI32x4U(OpIndex node) {
2722 VisitExtMul(
this, kArm64Umull, node, 64);
2725void InstructionSelectorT::VisitI64x2ExtMulHighI32x4U(OpIndex node) {
2726 VisitExtMul(
this, kArm64Umull2, node, 64);
2730#if V8_ENABLE_WEBASSEMBLY
2732void VisitExtAddPairwise(InstructionSelectorT* selector, ArchOpcode opcode,
2733 OpIndex node,
int dst_lane_size) {
2735 code |= LaneSizeField::encode(dst_lane_size);
2736 VisitRR(selector, code, node);
2740void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8S(OpIndex node) {
2741 VisitExtAddPairwise(
this, kArm64Saddlp, node, 32);
2744void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8U(OpIndex node) {
2745 VisitExtAddPairwise(
this, kArm64Uaddlp, node, 32);
2748void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16S(OpIndex node) {
2749 VisitExtAddPairwise(
this, kArm64Saddlp, node, 16);
2752void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16U(OpIndex node) {
2753 VisitExtAddPairwise(
this, kArm64Uaddlp, node, 16);
2757void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
2758 Arm64OperandGeneratorT g(
this);
2759 InstructionOperand
const smull_operand = g.TempRegister();
2760 Emit(kArm64Smull, smull_operand, g.UseRegister(this->input_at(node, 0)),
2761 g.UseRegister(this->input_at(node, 1)));
2762 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
2765void InstructionSelectorT::VisitInt64MulHigh(OpIndex node) {
2766 return VisitRRR(
this, kArm64Smulh, node);
2769void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
2770 Arm64OperandGeneratorT g(
this);
2771 InstructionOperand
const smull_operand = g.TempRegister();
2772 Emit(kArm64Umull, smull_operand, g.UseRegister(this->input_at(node, 0)),
2773 g.UseRegister(this->input_at(node, 1)));
2774 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
2777void InstructionSelectorT::VisitUint64MulHigh(OpIndex node) {
2778 return VisitRRR(
this, kArm64Umulh, node);
2781void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
2782 Arm64OperandGeneratorT g(
this);
2786 MiscField::encode(op.Is<Opmask::kTruncateFloat32ToInt32OverflowToMin>());
2787 Emit(opcode, g.DefineAsRegister(node),
2788 g.UseRegister(this->input_at(node, 0)));
2791void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
2792 Arm64OperandGeneratorT g(
this);
2795 if (op.Is<Opmask::kTruncateFloat32ToUint32OverflowToMin>()) {
2796 opcode |= MiscField::encode(
true);
2799 Emit(opcode, g.DefineAsRegister(node),
2800 g.UseRegister(this->input_at(node, 0)));
2803void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
2804 Arm64OperandGeneratorT g(
this);
2806 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2807 InstructionOperand outputs[2];
2808 size_t output_count = 0;
2809 outputs[output_count++] = g.DefineAsRegister(node);
2812 if (success_output.valid()) {
2813 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2816 Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
2819void InstructionSelectorT::VisitTruncateFloat64ToInt64(OpIndex node) {
2820 Arm64OperandGeneratorT g(
this);
2823 if (op.Is<Opmask::kTruncateFloat64ToInt64OverflowToMin>()) {
2824 opcode |= MiscField::encode(
true);
2827 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
2830void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
2831 Arm64OperandGeneratorT g(
this);
2833 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2834 InstructionOperand outputs[2];
2835 size_t output_count = 0;
2836 outputs[output_count++] = g.DefineAsRegister(node);
2839 if (success_output.valid()) {
2840 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2843 Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
2846void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
2847 Arm64OperandGeneratorT g(
this);
2848 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2849 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2850 InstructionOperand temps[] = {g.TempDoubleRegister()};
2851 Emit(kArm64Float64ToFloat16RawBits,
arraysize(outputs), outputs,
2855void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
2856 Arm64OperandGeneratorT g(
this);
2857 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2858 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2859 InstructionOperand temps[] = {g.TempDoubleRegister()};
2860 Emit(kArm64Float16RawBitsToFloat64,
arraysize(outputs), outputs,
2864void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
2865 Arm64OperandGeneratorT g(
this);
2867 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2868 InstructionOperand outputs[2];
2869 size_t output_count = 0;
2870 outputs[output_count++] = g.DefineAsRegister(node);
2873 if (success_output.valid()) {
2874 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2877 Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
2880void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
2881 Arm64OperandGeneratorT g(
this);
2883 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2884 InstructionOperand outputs[2];
2885 size_t output_count = 0;
2886 outputs[output_count++] = g.DefineAsRegister(node);
2889 if (success_output.valid()) {
2890 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2893 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
2896void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
2897 Arm64OperandGeneratorT g(
this);
2898 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2899 InstructionOperand outputs[2];
2900 size_t output_count = 0;
2901 outputs[output_count++] = g.DefineAsRegister(node);
2904 if (success_output.valid()) {
2905 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2908 Emit(kArm64Float64ToInt32, output_count, outputs, 1, inputs);
2911void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
2912 Arm64OperandGeneratorT g(
this);
2913 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
2914 InstructionOperand outputs[2];
2915 size_t output_count = 0;
2916 outputs[output_count++] = g.DefineAsRegister(node);
2919 if (success_output.valid()) {
2920 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2923 Emit(kArm64Float64ToUint32, output_count, outputs, 1, inputs);
2926void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
2932void InstructionSelectorT::VisitChangeInt32ToInt64(OpIndex node) {
2933 const ChangeOp& change_op = this->
Get(node).template Cast<ChangeOp>();
2934 const Operation& input_op = this->
Get(change_op.input());
2935 if (input_op.Is<LoadOp>() && CanCover(node, change_op.input())) {
2938 this->load_view(change_op.input()).loaded_rep();
2943 case MachineRepresentation::kBit:
2944 case MachineRepresentation::kWord8:
2945 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
2948 case MachineRepresentation::kWord16:
2949 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
2952 case MachineRepresentation::kWord32:
2953 case MachineRepresentation::kWord64:
2957 case MachineRepresentation::kTaggedSigned:
2958 case MachineRepresentation::kTagged:
2959 case MachineRepresentation::kTaggedPointer:
2960 opcode = kArm64Ldrsw;
2966 EmitLoad(
this, change_op.input(), opcode, immediate_mode, rep, node);
2969 if ((input_op.Is<Opmask::kWord32ShiftRightArithmetic>() ||
2970 input_op.Is<Opmask::kWord32ShiftRightArithmeticShiftOutZeros>()) &&
2971 CanCover(node, change_op.input())) {
2973 if (int64_t constant; MatchSignedIntegralConstant(sar.right(), &constant)) {
2974 Arm64OperandGeneratorT g(
this);
2976 int right = constant & 0x1F;
2977 Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(sar.left()),
2978 g.TempImmediate(right), g.TempImmediate(32 - right));
2982 VisitRR(
this, kArm64Sxtw, node);
2985bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
2991 switch (op.opcode) {
2992 case Opcode::kWordBinop:
2993 return op.Cast<WordBinopOp>().rep == WordRepresentation::Word32();
2994 case Opcode::kShift:
2995 return op.Cast<
ShiftOp>().rep == WordRepresentation::Word32();
2996 case Opcode::kComparison:
2997 return op.Cast<ComparisonOp>().rep == RegisterRepresentation::Word32();
2998 case Opcode::kOverflowCheckedBinop:
2999 return op.Cast<OverflowCheckedBinopOp>().rep ==
3000 WordRepresentation::Word32();
3001 case Opcode::kProjection:
3002 return ZeroExtendsWord32ToWord64NoPhis(op.Cast<ProjectionOp>().input());
3003 case Opcode::kLoad: {
3004 RegisterRepresentation rep =
3005 op.Cast<LoadOp>().loaded_rep.ToRegisterRepresentation();
3006 return rep == RegisterRepresentation::Word32();
3013void InstructionSelectorT::VisitChangeUint32ToUint64(OpIndex node) {
3014 Arm64OperandGeneratorT g(
this);
3015 OpIndex value = this->input_at(node, 0);
3016 if (ZeroExtendsWord32ToWord64(value)) {
3017 return EmitIdentity(node);
3019 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
3022void InstructionSelectorT::VisitTruncateInt64ToInt32(OpIndex node) {
3023 Arm64OperandGeneratorT g(
this);
3029void InstructionSelectorT::VisitFloat64Mod(OpIndex node) {
3030 Arm64OperandGeneratorT g(
this);
3031 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
3032 g.UseFixed(this->input_at(node, 0), d0),
3033 g.UseFixed(this->input_at(node, 1), d1))
3037void InstructionSelectorT::VisitFloat64Ieee754Binop(OpIndex node,
3038 InstructionCode opcode) {
3039 Arm64OperandGeneratorT g(
this);
3040 Emit(opcode, g.DefineAsFixed(node, d0),
3041 g.UseFixed(this->input_at(node, 0), d0),
3042 g.UseFixed(this->input_at(node, 1), d1))
3046void InstructionSelectorT::VisitFloat64Ieee754Unop(OpIndex node,
3047 InstructionCode opcode) {
3048 Arm64OperandGeneratorT g(
this);
3049 Emit(opcode, g.DefineAsFixed(node, d0),
3050 g.UseFixed(this->input_at(node, 0), d0))
3054void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node,
int index) {}
3056void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
3057 LinkageLocation location) {}
3059void InstructionSelectorT::EmitPrepareArguments(
3060 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
3062 Arm64OperandGeneratorT g(
this);
3066 int claim_count =
static_cast<int>(arguments->size());
3067 bool needs_padding = claim_count % 2 != 0;
3068 int slot = claim_count - 1;
3069 claim_count =
RoundUp(claim_count, 2);
3071 if (claim_count > 0) {
3075 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
3077 if (needs_padding) {
3078 Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
3079 g.TempImmediate(claim_count - 1));
3085 PushParameter input0 = (*arguments)[slot];
3088 if (!input0.node.valid()) {
3092 PushParameter input1 = slot > 0 ? (*arguments)[slot - 1] : PushParameter();
3095 if (input1.node.valid() &&
3096 input0.location.GetType() == input1.location.GetType()) {
3097 Emit(kArm64PokePair, g.NoOutput(), g.UseRegister(input0.node),
3098 g.UseRegister(input1.node), g.TempImmediate(slot));
3101 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input0.node),
3102 g.TempImmediate(slot));
3108void InstructionSelectorT::EmitPrepareResults(
3109 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
3111 Arm64OperandGeneratorT g(
this);
3113 for (PushParameter output : *results) {
3114 if (!output.location.IsCallerFrameSlot())
continue;
3116 if (output.node.valid()) {
3117 DCHECK(!call_descriptor->IsCFunctionCall());
3119 if (output.location.GetType() == MachineType::Float32()) {
3120 MarkAsFloat32(output.node);
3121 }
else if (output.location.GetType() == MachineType::Float64()) {
3122 MarkAsFloat64(output.node);
3123 }
else if (output.location.GetType() == MachineType::Simd128()) {
3124 MarkAsSimd128(output.node);
3127 int offset = call_descriptor->GetOffsetToReturns();
3128 int reverse_slot = -output.location.GetLocation() -
offset;
3129 Emit(kArm64Peek, g.DefineAsRegister(output.node),
3130 g.UseImmediate(reverse_slot));
3135bool InstructionSelectorT::IsTailCallAddressImmediate() {
return false; }
3140void VisitCompare(InstructionSelectorT* selector, InstructionCode opcode,
3141 InstructionOperand left, InstructionOperand right,
3142 FlagsContinuationT* cont) {
3143 if (cont->IsSelect()) {
3144 Arm64OperandGeneratorT g(selector);
3145 InstructionOperand inputs[] = {
3146 left, right, g.UseRegisterOrImmediateZero(cont->true_value()),
3147 g.UseRegisterOrImmediateZero(cont->false_value())};
3148 selector->EmitWithContinuation(opcode, 0,
nullptr, 4, inputs, cont);
3150 selector->EmitWithContinuation(opcode, left, right, cont);
3162bool CanUseFlagSettingBinop(FlagsCondition cond) {
3182 DCHECK(CanUseFlagSettingBinop(cond));
3206void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelectorT* selector,
3207 OpIndex* node, OpIndex binop,
3209 FlagsCondition cond,
3210 FlagsContinuationT* cont,
3211 ImmediateMode* immediate_mode) {
3215 const Operation& op = selector->Get(binop);
3216 if (op.Is<Opmask::kWord32Add>()) {
3217 binop_opcode = kArm64Add32;
3218 no_output_opcode = kArm64Cmn32;
3220 }
else if (op.Is<Opmask::kWord32BitwiseAnd>()) {
3221 binop_opcode = kArm64And32;
3222 no_output_opcode = kArm64Tst32;
3227 if (selector->CanCover(*node, binop)) {
3230 cont->Overwrite(MapForFlagSettingBinop(cond));
3231 *opcode = no_output_opcode;
3233 *immediate_mode = binop_immediate_mode;
3234 }
else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
3238 cont->Overwrite(MapForFlagSettingBinop(cond));
3239 *opcode = binop_opcode;
3241 *immediate_mode = binop_immediate_mode;
3276void EmitBranchOrDeoptimize(InstructionSelectorT* selector,
3277 InstructionCode opcode, InstructionOperand value,
3278 FlagsContinuationT* cont) {
3279 DCHECK(cont->IsBranch() || cont->IsDeoptimize());
3280 selector->EmitWithContinuation(opcode, value, cont);
3284struct CbzOrTbzMatchTrait {};
3287struct CbzOrTbzMatchTrait<32> {
3288 using IntegralType = uint32_t;
3292 kArm64CompareAndBranch32;
3297struct CbzOrTbzMatchTrait<64> {
3298 using IntegralType = uint64_t;
3308bool TryEmitCbzOrTbz(InstructionSelectorT* selector, OpIndex node,
3309 typename CbzOrTbzMatchTrait<N>::IntegralType value,
3310 OpIndex user, FlagsCondition cond,
3311 FlagsContinuationT* cont) {
3313 if (!cont->IsBranch() && !cont->IsDeoptimize())
return false;
3319 if (value != 0)
return false;
3323 if (cont->IsDeoptimize())
return false;
3324 Arm64OperandGeneratorT g(selector);
3325 cont->Overwrite(MapForTbz(cond));
3328 const Operation& op = selector->Get(node);
3329 if (op.Is<Opmask::kFloat64ExtractHighWord32>() &&
3330 selector->CanCover(user, node)) {
3334 InstructionOperand temp = g.TempRegister();
3335 selector->Emit(kArm64U64MoveFloat64, temp,
3336 g.UseRegister(selector->input_at(node, 0)));
3337 selector->EmitWithContinuation(kArm64TestAndBranch, temp,
3338 g.TempImmediate(kDSignBit), cont);
3343 selector->EmitWithContinuation(
3344 CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode, g.UseRegister(node),
3345 g.TempImmediate(CbzOrTbzMatchTrait<N>::kSignBit), cont);
3350 const Operation& op = selector->Get(node);
3351 if (
const WordBinopOp* bitwise_and = op.TryCast<Opmask::kBitwiseAnd>()) {
3354 uint64_t actual_value;
3355 if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
3356 selector->MatchUnsignedIntegralConstant(bitwise_and->right(),
3358 actual_value == value && selector->CanCover(user, node)) {
3359 Arm64OperandGeneratorT g(selector);
3363 selector->EmitWithContinuation(
3364 CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode,
3365 g.UseRegister(bitwise_and->left()),
3366 g.TempImmediate(base::bits::CountTrailingZeros(value)), cont);
3374 if (value != 0)
return false;
3375 Arm64OperandGeneratorT g(selector);
3376 cont->Overwrite(MapForCbz(cond));
3377 EmitBranchOrDeoptimize(selector,
3378 CbzOrTbzMatchTrait<N>::kCompareAndBranchOpcode,
3379 g.UseRegister(node), cont);
3389 InstructionCode opcode, FlagsContinuationT* cont,
3390 ImmediateMode immediate_mode) {
3391 Arm64OperandGeneratorT g(selector);
3392 DCHECK_EQ(selector->value_input_count(node), 2);
3393 auto left = selector->input_at(node, 0);
3394 auto right = selector->input_at(node, 1);
3397 if (!g.CanBeImmediate(right, immediate_mode) &&
3398 g.CanBeImmediate(left, immediate_mode)) {
3400 std::swap(left, right);
3404 if (opcode == kArm64Cmp &&
3405 selector->MatchSignedIntegralConstant(right, &constant)) {
3406 if (TryEmitCbzOrTbz<64>(selector, left, constant, node, cont->condition(),
3412 VisitCompare(selector, opcode, g.UseRegister(left),
3413 g.UseOperand(right, immediate_mode), cont);
3416void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
3417 FlagsContinuationT* cont) {
3424 if (uint64_t constant;
3425 selector->MatchUnsignedIntegralConstant(rhs, &constant) &&
3426 TryEmitCbzOrTbz<32>(selector, lhs,
static_cast<uint32_t
>(constant), node,
3430 if (uint64_t constant;
3431 selector->MatchUnsignedIntegralConstant(lhs, &constant) &&
3432 TryEmitCbzOrTbz<32>(selector, rhs,
static_cast<uint32_t
>(constant), node,
3437 const Operation& left = selector->Get(lhs);
3438 const Operation& right = selector->Get(rhs);
3442 if (selector->MatchIntegralZero(rhs) &&
3443 (left.Is<Opmask::kWord32Add>() || left.Is<Opmask::kWord32BitwiseAnd>())) {
3445 if (CanUseFlagSettingBinop(cond)) {
3446 MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, lhs, &opcode,
3447 cond, cont, &immediate_mode);
3449 }
else if (selector->MatchIntegralZero(lhs) &&
3450 (right.Is<Opmask::kWord32Add>() ||
3451 right.Is<Opmask::kWord32BitwiseAnd>())) {
3455 if (CanUseFlagSettingBinop(commuted_cond)) {
3456 MaybeReplaceCmpZeroWithFlagSettingBinop(
3457 selector, &node, rhs, &opcode, commuted_cond, cont, &immediate_mode);
3459 }
else if (right.Is<Opmask::kWord32Sub>() &&
3460 (cond == kEqual || cond == kNotEqual)) {
3461 const WordBinopOp& sub = right.Cast<WordBinopOp>();
3462 if (selector->MatchIntegralZero(sub.left())) {
3465 opcode = kArm64Cmn32;
3466 VisitBinopImpl(selector, node, lhs, sub.right(),
3467 RegisterRepresentation::Word32(), opcode, immediate_mode,
3472 VisitBinop(selector, node, RegisterRepresentation::Word32(), opcode,
3473 immediate_mode, cont);
3476void VisitWordTest(InstructionSelectorT* selector, OpIndex node,
3477 InstructionCode opcode, FlagsContinuationT* cont) {
3478 Arm64OperandGeneratorT g(selector);
3479 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
3483void VisitWord32Test(InstructionSelectorT* selector, OpIndex node,
3484 FlagsContinuationT* cont) {
3485 VisitWordTest(selector, node, kArm64Tst32, cont);
3488void VisitWord64Test(InstructionSelectorT* selector, OpIndex node,
3489 FlagsContinuationT* cont) {
3490 VisitWordTest(selector, node, kArm64Tst, cont);
3493struct TestAndBranchMatcherTurboshaft {
3494 TestAndBranchMatcherTurboshaft(InstructionSelectorT* selector,
3495 const WordBinopOp& binop)
3500 bool Matches()
const {
return matches_; }
3502 unsigned bit()
const {
3509 if (
binop_.kind != WordBinopOp::Kind::kBitwiseAnd)
return;
3511 if (!
selector_->MatchUnsignedIntegralConstant(
binop_.right(), &value) ||
3512 !base::bits::IsPowerOfTwo(value)) {
3517 bit_ = base::bits::CountTrailingZeros(value);
3528 FlagsContinuationT* cont) {
3529 Arm64OperandGeneratorT g(selector);
3530 const ComparisonOp& op = selector->Get(node).template Cast<ComparisonOp>();
3533 if (selector->MatchZero(right)) {
3534 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(left),
3535 g.UseImmediate(right), cont);
3536 }
else if (selector->MatchZero(left)) {
3538 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(right),
3539 g.UseImmediate(left), cont);
3541 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(left),
3542 g.UseRegister(right), cont);
3548 FlagsContinuationT* cont) {
3549 Arm64OperandGeneratorT g(selector);
3554 if (selector->MatchZero(rhs)) {
3555 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(lhs),
3556 g.UseImmediate(rhs), cont);
3557 }
else if (selector->MatchZero(lhs)) {
3559 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(rhs),
3560 g.UseImmediate(lhs), cont);
3562 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(lhs),
3563 g.UseRegister(rhs), cont);
3568 ArchOpcode opcode, AtomicWidth width,
3569 MemoryAccessKind access_kind) {
3571 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
3572 Arm64OperandGeneratorT g(selector);
3574 OpIndex index = atomic_op.index();
3575 OpIndex value = atomic_op.value();
3576 InstructionOperand inputs[] = {g.UseRegister(
base), g.UseRegister(index),
3577 g.UseUniqueRegister(value)};
3578 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
3579 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
3580 AtomicWidthField::encode(width);
3581 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
3582 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
3584 if (CpuFeatures::IsSupported(LSE)) {
3585 InstructionOperand temps[] = {g.TempRegister()};
3589 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
3596 ArchOpcode opcode, AtomicWidth width,
3597 MemoryAccessKind access_kind) {
3599 Arm64OperandGeneratorT g(selector);
3600 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
3602 OpIndex index = atomic_op.index();
3603 OpIndex old_value = atomic_op.expected().value();
3604 OpIndex new_value = atomic_op.value();
3605 InstructionOperand inputs[] = {g.UseRegister(
base), g.UseRegister(index),
3606 g.UseUniqueRegister(old_value),
3607 g.UseUniqueRegister(new_value)};
3608 InstructionOperand outputs[1];
3609 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
3610 AtomicWidthField::encode(width);
3611 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
3612 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
3614 if (CpuFeatures::IsSupported(LSE)) {
3615 InstructionOperand temps[] = {g.TempRegister()};
3616 outputs[0] = g.DefineSameAsInput(node, 2);
3620 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
3621 outputs[0] = g.DefineAsRegister(node);
3627void VisitAtomicLoad(InstructionSelectorT* selector, OpIndex node,
3628 AtomicWidth width) {
3630 Arm64OperandGeneratorT g(selector);
3631 auto load = selector->load_view(node);
3634 InstructionOperand inputs[] = {g.UseRegister(
base), g.UseRegister(index)};
3635 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
3636 InstructionOperand temps[] = {g.TempRegister()};
3643 switch (load_rep.representation()) {
3644 case MachineRepresentation::kWord8:
3645 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
3646 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
3648 case MachineRepresentation::kWord16:
3649 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
3650 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
3652 case MachineRepresentation::kWord32:
3653 code = kAtomicLoadWord32;
3655 case MachineRepresentation::kWord64:
3656 code = kArm64Word64AtomicLoadUint64;
3658#ifdef V8_COMPRESS_POINTERS
3659 case MachineRepresentation::kTaggedSigned:
3660 code = kArm64LdarDecompressTaggedSigned;
3662 case MachineRepresentation::kTaggedPointer:
3663 code = kArm64LdarDecompressTagged;
3665 case MachineRepresentation::kTagged:
3666 code = kArm64LdarDecompressTagged;
3669 case MachineRepresentation::kTaggedSigned:
3670 case MachineRepresentation::kTaggedPointer:
3671 case MachineRepresentation::kTagged:
3672 if (kTaggedSize == 8) {
3673 code = kArm64Word64AtomicLoadUint64;
3675 code = kAtomicLoadWord32;
3679 case MachineRepresentation::kCompressedPointer:
3680 case MachineRepresentation::kCompressed:
3682 code = kAtomicLoadWord32;
3689 if (load.is_protected(&traps_on_null)) {
3694 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
3698 AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
3705 auto store = selector->store_view(node);
3706 return AtomicStoreParameters(store.stored_rep().representation(),
3707 store.stored_rep().write_barrier_kind(),
3708 store.memory_order().value(),
3709 store.access_kind());
3712void VisitAtomicStore(InstructionSelectorT* selector, OpIndex node,
3713 AtomicWidth width) {
3715 Arm64OperandGeneratorT g(selector);
3716 auto store = selector->store_view(node);
3718 OpIndex index = selector->value(store.index());
3719 OpIndex value = store.value();
3729 if (
v8_flags.enable_unconditional_write_barriers &&
3734 InstructionOperand inputs[] = {g.UseRegister(
base), g.UseRegister(index),
3735 g.UseUniqueRegister(value)};
3736 InstructionOperand temps[] = {g.TempRegister()};
3739 if (write_barrier_kind != kNoWriteBarrier &&
3740 !
v8_flags.disable_write_barriers) {
3746 code = kArchAtomicStoreWithWriteBarrier;
3747 code |= RecordWriteModeField::encode(record_write_mode);
3750 case MachineRepresentation::kWord8:
3751 code = kAtomicStoreWord8;
3753 case MachineRepresentation::kWord16:
3754 code = kAtomicStoreWord16;
3756 case MachineRepresentation::kWord32:
3757 code = kAtomicStoreWord32;
3759 case MachineRepresentation::kWord64:
3761 code = kArm64Word64AtomicStoreWord64;
3763 case MachineRepresentation::kTaggedSigned:
3764 case MachineRepresentation::kTaggedPointer:
3765 case MachineRepresentation::kTagged:
3767 code = kArm64StlrCompressTagged;
3769 case MachineRepresentation::kCompressedPointer:
3770 case MachineRepresentation::kCompressed:
3773 code = kArm64StlrCompressTagged;
3778 code |= AtomicWidthField::encode(width);
3781 if (store_params.kind() == MemoryAccessKind::kProtectedByTrapHandler) {
3782 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
3785 code |= AddressingModeField::encode(kMode_MRR);
3791 ArchOpcode opcode, AtomicWidth width,
3792 MemoryAccessKind access_kind) {
3794 Arm64OperandGeneratorT g(selector);
3795 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
3797 OpIndex index = atomic_op.index();
3798 OpIndex value = atomic_op.value();
3800 InstructionOperand inputs[] = {g.UseRegister(
base), g.UseRegister(index),
3801 g.UseUniqueRegister(value)};
3802 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
3803 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
3804 AtomicWidthField::encode(width);
3805 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
3806 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
3809 if (CpuFeatures::IsSupported(LSE)) {
3810 InstructionOperand temps[] = {g.TempRegister()};
3814 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
3823void InstructionSelectorT::VisitWordCompareZero(OpIndex user, OpIndex value,
3824 FlagsContinuation* cont) {
3825 Arm64OperandGeneratorT g(
this);
3827 ConsumeEqualZero(&user, &value, cont);
3831 MatchTruncateWord64ToWord32(value, &value64) && CanCover(user, value)) {
3843 if (cont->IsBranch()) {
3844 if (value_op.Is<Opmask::kWord64Equal>()) {
3845 const ComparisonOp&
equal = value_op.Cast<ComparisonOp>();
3846 if (MatchIntegralZero(
equal.right())) {
3847 const WordBinopOp* left_binop =
3848 Get(
equal.left()).TryCast<WordBinopOp>();
3850 TestAndBranchMatcherTurboshaft matcher(
this, *left_binop);
3851 if (matcher.Matches()) {
3853 DCHECK((cont->condition() == kEqual) ||
3854 (cont->condition() == kNotEqual));
3855 Arm64OperandGeneratorT
gen(
this);
3856 cont->OverwriteAndNegateIfEqual(kEqual);
3857 EmitWithContinuation(kArm64TestAndBranch,
3858 gen.UseRegister(left_binop->left()),
3859 gen.TempImmediate(matcher.bit()), cont);
3866 if (
const WordBinopOp* value_binop = value_op.TryCast<WordBinopOp>()) {
3867 TestAndBranchMatcherTurboshaft matcher(
this, *value_binop);
3868 if (matcher.Matches()) {
3870 DCHECK((cont->condition() == kEqual) ||
3871 (cont->condition() == kNotEqual));
3873 RegisterRepresentation::Word32()
3874 ? kArm64TestAndBranch32
3875 : kArm64TestAndBranch;
3876 Arm64OperandGeneratorT
gen(
this);
3877 EmitWithContinuation(opcode,
gen.UseRegister(value_binop->left()),
3878 gen.TempImmediate(matcher.bit()), cont);
3884 if (CanCover(user, value)) {
3885 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
3886 switch (comparison->rep.MapTaggedToWord().value()) {
3887 case RegisterRepresentation::Word32():
3888 cont->OverwriteAndNegateIfEqual(
3889 GetComparisonFlagCondition(*comparison));
3890 return VisitWord32Compare(
this, value, cont);
3892 case RegisterRepresentation::Word64():
3893 cont->OverwriteAndNegateIfEqual(
3894 GetComparisonFlagCondition(*comparison));
3896 if (comparison->kind == ComparisonOp::Kind::kEqual) {
3897 const Operation& left_op = Get(comparison->left());
3898 if (MatchIntegralZero(comparison->right()) &&
3899 left_op.Is<Opmask::kWord64BitwiseAnd>() &&
3900 CanCover(value, comparison->left())) {
3901 return VisitWordCompare(this, comparison->left(), kArm64Tst, cont,
3907 case RegisterRepresentation::Float32():
3909 case ComparisonOp::Kind::kEqual:
3910 cont->OverwriteAndNegateIfEqual(kEqual);
3912 case ComparisonOp::Kind::kSignedLessThan:
3913 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
3915 case ComparisonOp::Kind::kSignedLessThanOrEqual:
3916 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
3922 case RegisterRepresentation::Float64():
3924 case ComparisonOp::Kind::kEqual:
3925 cont->OverwriteAndNegateIfEqual(kEqual);
3927 case ComparisonOp::Kind::kSignedLessThan:
3928 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
3930 case ComparisonOp::Kind::kSignedLessThanOrEqual:
3931 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
3940 }
else if (
const ProjectionOp* projection =
3941 value_op.TryCast<ProjectionOp>()) {
3944 if (projection->index == 1u) {
3950 OpIndex node = projection->input();
3951 if (
const OverflowCheckedBinopOp* binop =
3952 TryCast<OverflowCheckedBinopOp>(node);
3953 binop && CanDoBranchIfOverflowFusion(node)) {
3954 const bool is64 = binop->rep == WordRepresentation::Word64();
3955 switch (binop->kind) {
3956 case OverflowCheckedBinopOp::Kind::kSignedAdd:
3957 cont->OverwriteAndNegateIfEqual(kOverflow);
3958 return VisitBinop(
this, node, binop->rep,
3959 is64 ? kArm64Add : kArm64Add32, kArithmeticImm,
3961 case OverflowCheckedBinopOp::Kind::kSignedSub:
3962 cont->OverwriteAndNegateIfEqual(kOverflow);
3963 return VisitBinop(
this, node, binop->rep,
3964 is64 ? kArm64Sub : kArm64Sub32, kArithmeticImm,
3966 case OverflowCheckedBinopOp::Kind::kSignedMul:
3974 cont->OverwriteAndNegateIfEqual(kNotEqual);
3982 cont->OverwriteAndNegateIfEqual(kNotEqual);
3983 return EmitInt32MulWithOverflow(
this, node, cont);
3988 }
else if (value_op.Is<Opmask::kWord32Add>()) {
3990 }
else if (value_op.Is<Opmask::kWord32Sub>()) {
3991 return VisitWord32Compare(
this, value, cont);
3992 }
else if (value_op.Is<Opmask::kWord32BitwiseAnd>()) {
3997 }
else if (value_op.Is<Opmask::kWord64BitwiseAnd>()) {
3999 }
else if (value_op.Is<Opmask::kWord32BitwiseOr>()) {
4003 }
else if (value_op.Is<StackPointerGreaterThanOp>()) {
4004 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
4005 return VisitStackPointerGreaterThan(value, cont);
4011 if (cont->IsBranch()) {
4012 Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
4013 g.UseRegister(value), g.Label(cont->true_block()),
4014 g.Label(cont->false_block()));
4016 VisitCompare(
this, cont->Encode(kArm64Tst32), g.UseRegister(value),
4017 g.UseRegister(value), cont);
4021void InstructionSelectorT::VisitSwitch(OpIndex node,
const SwitchInfo& sw) {
4022 Arm64OperandGeneratorT g(
this);
4023 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
4026 if (enable_switch_jump_table_ ==
4027 InstructionSelector::kEnableSwitchJumpTable) {
4028 static const size_t kMaxTableSwitchValueRange = 2 << 16;
4029 size_t table_space_cost = 4 + sw.value_range();
4030 size_t table_time_cost = 3;
4031 size_t lookup_space_cost = 3 + 2 * sw.case_count();
4032 size_t lookup_time_cost = sw.case_count();
4033 if (sw.case_count() > 4 &&
4034 table_space_cost + 3 * table_time_cost <=
4035 lookup_space_cost + 3 * lookup_time_cost &&
4036 sw.min_value() > std::numeric_limits<int32_t>::min() &&
4037 sw.value_range() <= kMaxTableSwitchValueRange) {
4038 InstructionOperand index_operand = value_operand;
4039 if (sw.min_value()) {
4040 index_operand = g.TempRegister();
4041 Emit(kArm64Sub32, index_operand, value_operand,
4042 g.TempImmediate(sw.min_value()));
4045 if (!ZeroExtendsWord32ToWord64(this->input_at(node, 0))) {
4046 index_operand = g.TempRegister();
4047 Emit(kArm64Mov32, index_operand, value_operand);
4051 return EmitTableSwitch(sw, index_operand);
4056 return EmitBinarySearchSwitch(sw, value_operand);
4059void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
4065 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
4067 if (MatchZero(right)) {
4069 if (CanCover(user, value)) {
4071 if (value_op.Is<Opmask::kWord32Add>() ||
4072 value_op.Is<Opmask::kWord32BitwiseAnd>()) {
4073 return VisitWord32Compare(
this, node, &cont);
4075 if (value_op.Is<Opmask::kWord32Sub>()) {
4079 if (value_op.Is<Opmask::kWord32Equal>()) {
4083 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, value);
4085 VisitWord32Compare(
this, value, &cont);
4089 return VisitWord32Test(
this, value, &cont);
4095 Arm64OperandGeneratorT g(
this);
4096 const RootsTable& roots_table =
isolate()->roots_table();
4098 Handle<HeapObject> right;
4102 if (MatchHeapConstant(node, &right) && !right.is_null() &&
4103 roots_table.IsRootHandle(right, &root_index)) {
4104 if (RootsTable::IsReadOnly(root_index)) {
4106 MacroAssemblerBase::ReadOnlyRootPtr(root_index,
isolate());
4107 if (g.CanBeImmediate(ptr, ImmediateMode::kArithmeticImm)) {
4108 return VisitCompare(
this, kArm64Cmp32, g.UseRegister(left),
4109 g.TempImmediate(ptr), &cont);
4114 VisitWord32Compare(
this, node, &cont);
4117void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
4118 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
4119 VisitWord32Compare(
this, node, &cont);
4122void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
4123 FlagsContinuation cont =
4124 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
4125 VisitWord32Compare(
this, node, &cont);
4128void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
4129 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
4130 VisitWord32Compare(
this, node, &cont);
4133void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
4134 FlagsContinuation cont =
4135 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
4136 VisitWord32Compare(
this, node, &cont);
4139void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
4140 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
4141 const ComparisonOp&
equal = this->
Get(node).template Cast<ComparisonOp>();
4143 if (this->MatchIntegralZero(
equal.right()) && CanCover(node,
equal.left())) {
4144 if (this->
Get(
equal.left()).template Is<Opmask::kWord64BitwiseAnd>()) {
4148 return VisitWord64Test(
this,
equal.left(), &cont);
4153void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
4155 if (ovf.valid() && IsUsed(ovf.value())) {
4156 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
4157 return VisitBinop(
this, node, RegisterRepresentation::Word32(), kArm64Add32,
4158 kArithmeticImm, &cont);
4160 FlagsContinuation cont;
4161 VisitBinop(
this, node, RegisterRepresentation::Word32(), kArm64Add32,
4162 kArithmeticImm, &cont);
4165void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
4168 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
4169 return VisitBinop(
this, node, RegisterRepresentation::Word32(), kArm64Sub32,
4170 kArithmeticImm, &cont);
4172 FlagsContinuation cont;
4173 VisitBinop(
this, node, RegisterRepresentation::Word32(), kArm64Sub32,
4174 kArithmeticImm, &cont);
4177void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
4184 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf.value());
4185 return EmitInt32MulWithOverflow(
this, node, &cont);
4187 FlagsContinuation cont;
4188 EmitInt32MulWithOverflow(
this, node, &cont);
4191void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
4194 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
4195 return VisitBinop(
this, node, RegisterRepresentation::Word64(), kArm64Add,
4196 kArithmeticImm, &cont);
4198 FlagsContinuation cont;
4199 VisitBinop(
this, node, RegisterRepresentation::Word64(), kArm64Add,
4200 kArithmeticImm, &cont);
4203void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
4206 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
4207 return VisitBinop(
this, node, RegisterRepresentation::Word64(), kArm64Sub,
4208 kArithmeticImm, &cont);
4210 FlagsContinuation cont;
4211 VisitBinop(
this, node, RegisterRepresentation::Word64(), kArm64Sub,
4212 kArithmeticImm, &cont);
4215void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
4223 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf.value());
4226 FlagsContinuation cont;
4230void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
4231 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
4235void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
4236 FlagsContinuation cont =
4237 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
4241void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
4242 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
4246void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
4247 FlagsContinuation cont =
4248 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
4252void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
4253 Arm64OperandGeneratorT g(
this);
4256 if (input_op.Is<Opmask::kFloat32Mul>() && CanCover(node, input)) {
4257 const FloatBinopOp& mul = input_op.Cast<FloatBinopOp>();
4258 Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
4259 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
4262 VisitRR(
this, kArm64Float32Neg, node);
4265void InstructionSelectorT::VisitFloat32Mul(OpIndex node) {
4266 Arm64OperandGeneratorT g(
this);
4267 const FloatBinopOp& mul = this->
Get(node).template Cast<FloatBinopOp>();
4270 if (lhs.Is<Opmask::kFloat32Negate>() && CanCover(node, mul.left())) {
4271 Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
4272 g.UseRegister(lhs.input(0)), g.UseRegister(mul.right()));
4277 if (rhs.Is<Opmask::kFloat32Negate>() && CanCover(node, mul.right())) {
4278 Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
4279 g.UseRegister(rhs.input(0)), g.UseRegister(mul.left()));
4282 return VisitRRR(
this, kArm64Float32Mul, node);
4285void InstructionSelectorT::VisitFloat32Abs(OpIndex node) {
4286 Arm64OperandGeneratorT g(
this);
4287 OpIndex in = this->input_at(node, 0);
4289 if (input_op.Is<Opmask::kFloat32Sub>() && CanCover(node, in)) {
4290 const FloatBinopOp& sub = input_op.Cast<FloatBinopOp>();
4291 Emit(kArm64Float32Abd, g.DefineAsRegister(node), g.UseRegister(sub.left()),
4292 g.UseRegister(sub.right()));
4296 return VisitRR(
this, kArm64Float32Abs, node);
4299void InstructionSelectorT::VisitFloat64Abs(OpIndex node) {
4300 Arm64OperandGeneratorT g(
this);
4301 OpIndex in = this->input_at(node, 0);
4303 if (input_op.Is<Opmask::kFloat64Sub>() && CanCover(node, in)) {
4304 const FloatBinopOp& sub = input_op.Cast<FloatBinopOp>();
4305 Emit(kArm64Float64Abd, g.DefineAsRegister(node), g.UseRegister(sub.left()),
4306 g.UseRegister(sub.right()));
4310 return VisitRR(
this, kArm64Float64Abs, node);
4313void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
4314 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
4318void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
4319 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
4323void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
4324 FlagsContinuation cont =
4325 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
4329void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
4330 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
4334void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
4335 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
4339void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
4340 FlagsContinuation cont =
4341 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
4345void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
4346 Arm64OperandGeneratorT g(
this);
4347 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
4351 int vreg = g.AllocateVirtualRegister();
4352 Emit(kArm64Bfi, g.DefineSameAsFirstForVreg(vreg), g.UseRegister(lo),
4353 g.UseRegister(hi), g.TempImmediate(32), g.TempImmediate(32));
4354 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node),
4355 g.UseRegisterForVreg(vreg));
4358void InstructionSelectorT::VisitFloat64InsertLowWord32(OpIndex node) {
4362void InstructionSelectorT::VisitFloat64InsertHighWord32(OpIndex node) {
4366void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
4367 Arm64OperandGeneratorT g(
this);
4370 if (input_op.Is<Opmask::kFloat64Mul>() && CanCover(node, input)) {
4371 const FloatBinopOp& mul = input_op.Cast<FloatBinopOp>();
4372 Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
4373 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
4376 VisitRR(
this, kArm64Float64Neg, node);
4379void InstructionSelectorT::VisitFloat64Mul(OpIndex node) {
4380 Arm64OperandGeneratorT g(
this);
4381 const FloatBinopOp& mul = this->
Get(node).template Cast<FloatBinopOp>();
4383 if (lhs.Is<Opmask::kFloat64Negate>() && CanCover(node, mul.left())) {
4384 Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
4385 g.UseRegister(lhs.input(0)), g.UseRegister(mul.right()));
4390 if (rhs.Is<Opmask::kFloat64Negate>() && CanCover(node, mul.right())) {
4391 Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
4392 g.UseRegister(rhs.input(0)), g.UseRegister(mul.left()));
4395 return VisitRRR(
this, kArm64Float64Mul, node);
4398void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
4400 Arm64OperandGeneratorT g(
this);
4401 Emit(kArm64DmbIsh, g.NoOutput());
4404void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
4405 VisitAtomicLoad(
this, node, AtomicWidth::kWord32);
4408void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
4409 VisitAtomicLoad(
this, node, AtomicWidth::kWord64);
4412void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
4413 VisitAtomicStore(
this, node, AtomicWidth::kWord32);
4416void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
4417 VisitAtomicStore(
this, node, AtomicWidth::kWord64);
4420void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
4421 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4423 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4424 opcode = kAtomicExchangeInt8;
4425 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4426 opcode = kAtomicExchangeUint8;
4427 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4428 opcode = kAtomicExchangeInt16;
4429 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4430 opcode = kAtomicExchangeUint16;
4431 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4432 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4433 opcode = kAtomicExchangeWord32;
4438 atomic_op.memory_access_kind);
4441void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
4442 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4444 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4445 opcode = kAtomicExchangeUint8;
4446 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4447 opcode = kAtomicExchangeUint16;
4448 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4449 opcode = kAtomicExchangeWord32;
4450 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
4451 opcode = kArm64Word64AtomicExchangeUint64;
4456 atomic_op.memory_access_kind);
4459void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
4460 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4462 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4463 opcode = kAtomicCompareExchangeInt8;
4464 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4465 opcode = kAtomicCompareExchangeUint8;
4466 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4467 opcode = kAtomicCompareExchangeInt16;
4468 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4469 opcode = kAtomicCompareExchangeUint16;
4470 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4471 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4472 opcode = kAtomicCompareExchangeWord32;
4477 atomic_op.memory_access_kind);
4480void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
4481 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4483 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4484 opcode = kAtomicCompareExchangeUint8;
4485 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4486 opcode = kAtomicCompareExchangeUint16;
4487 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4488 opcode = kAtomicCompareExchangeWord32;
4489 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
4490 opcode = kArm64Word64AtomicCompareExchangeUint64;
4495 atomic_op.memory_access_kind);
4498void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
4499 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
4500 ArchOpcode uint16_op, ArchOpcode word32_op) {
4501 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4503 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4505 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4507 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4509 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4511 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4512 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4518 atomic_op.memory_access_kind);
4521#define VISIT_ATOMIC_BINOP(op) \
4522 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
4523 VisitWord32AtomicBinaryOperation( \
4524 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
4525 kAtomic##op##Uint16, kAtomic##op##Word32); \
4532#undef VISIT_ATOMIC_BINOP
4534void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
4537 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
4539 if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
4541 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
4543 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
4545 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint64()) {
4554#define VISIT_ATOMIC_BINOP(op) \
4555 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
4556 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
4557 kAtomic##op##Uint16, kAtomic##op##Word32, \
4558 kArm64Word64Atomic##op##Uint64); \
4565#undef VISIT_ATOMIC_BINOP
4567void InstructionSelectorT::VisitInt32AbsWithOverflow(
OpIndex node) {
4571void InstructionSelectorT::VisitInt64AbsWithOverflow(
OpIndex node) {
4575#if V8_ENABLE_WEBASSEMBLY
4576#define SIMD_UNOP_LIST(V) \
4577 V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
4578 V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
4579 V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
4580 V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
4581 V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
4582 V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
4583 V(F16x8SConvertI16x8, kArm64F16x8SConvertI16x8) \
4584 V(F16x8UConvertI16x8, kArm64F16x8UConvertI16x8) \
4585 V(I16x8SConvertF16x8, kArm64I16x8SConvertF16x8) \
4586 V(I16x8UConvertF16x8, kArm64I16x8UConvertF16x8) \
4587 V(F16x8DemoteF32x4Zero, kArm64F16x8DemoteF32x4Zero) \
4588 V(F16x8DemoteF64x2Zero, kArm64F16x8DemoteF64x2Zero) \
4589 V(F32x4PromoteLowF16x8, kArm64F32x4PromoteLowF16x8) \
4590 V(I64x2BitMask, kArm64I64x2BitMask) \
4591 V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
4592 V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
4593 V(I32x4RelaxedTruncF32x4S, kArm64I32x4SConvertF32x4) \
4594 V(I32x4RelaxedTruncF32x4U, kArm64I32x4UConvertF32x4) \
4595 V(I32x4BitMask, kArm64I32x4BitMask) \
4596 V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
4597 V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
4598 V(I32x4RelaxedTruncF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
4599 V(I32x4RelaxedTruncF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
4600 V(I16x8BitMask, kArm64I16x8BitMask) \
4601 V(S128Not, kArm64S128Not) \
4602 V(V128AnyTrue, kArm64V128AnyTrue) \
4603 V(I64x2AllTrue, kArm64I64x2AllTrue) \
4604 V(I32x4AllTrue, kArm64I32x4AllTrue) \
4605 V(I16x8AllTrue, kArm64I16x8AllTrue) \
4606 V(I8x16AllTrue, kArm64I8x16AllTrue)
4608#define SIMD_UNOP_LANE_SIZE_LIST(V) \
4609 V(F64x2Splat, kArm64FSplat, 64) \
4610 V(F64x2Abs, kArm64FAbs, 64) \
4611 V(F64x2Sqrt, kArm64FSqrt, 64) \
4612 V(F64x2Neg, kArm64FNeg, 64) \
4613 V(F32x4Splat, kArm64FSplat, 32) \
4614 V(F32x4Abs, kArm64FAbs, 32) \
4615 V(F32x4Sqrt, kArm64FSqrt, 32) \
4616 V(F32x4Neg, kArm64FNeg, 32) \
4617 V(I64x2Splat, kArm64ISplat, 64) \
4618 V(I64x2Abs, kArm64IAbs, 64) \
4619 V(I64x2Neg, kArm64INeg, 64) \
4620 V(I32x4Splat, kArm64ISplat, 32) \
4621 V(I32x4Abs, kArm64IAbs, 32) \
4622 V(I32x4Neg, kArm64INeg, 32) \
4623 V(F16x8Splat, kArm64FSplat, 16) \
4624 V(F16x8Abs, kArm64FAbs, 16) \
4625 V(F16x8Sqrt, kArm64FSqrt, 16) \
4626 V(F16x8Neg, kArm64FNeg, 16) \
4627 V(I16x8Splat, kArm64ISplat, 16) \
4628 V(I16x8Abs, kArm64IAbs, 16) \
4629 V(I16x8Neg, kArm64INeg, 16) \
4630 V(I8x16Splat, kArm64ISplat, 8) \
4631 V(I8x16Abs, kArm64IAbs, 8) \
4632 V(I8x16Neg, kArm64INeg, 8)
4634#define SIMD_SHIFT_OP_LIST(V) \
4648#define SIMD_BINOP_LIST(V) \
4649 V(I32x4Mul, kArm64I32x4Mul) \
4650 V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
4651 V(I16x8DotI8x16I7x16S, kArm64I16x8DotI8x16S) \
4652 V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
4653 V(I16x8Mul, kArm64I16x8Mul) \
4654 V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
4655 V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
4656 V(I16x8RelaxedQ15MulRS, kArm64I16x8Q15MulRSatS) \
4657 V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
4658 V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
4659 V(S128Or, kArm64S128Or)
4661#define SIMD_BINOP_LANE_SIZE_LIST(V) \
4662 V(F64x2Min, kArm64FMin, 64) \
4663 V(F64x2Max, kArm64FMax, 64) \
4664 V(F64x2Add, kArm64FAdd, 64) \
4665 V(F64x2Sub, kArm64FSub, 64) \
4666 V(F64x2Div, kArm64FDiv, 64) \
4667 V(F64x2RelaxedMin, kArm64FMin, 64) \
4668 V(F64x2RelaxedMax, kArm64FMax, 64) \
4669 V(F32x4Min, kArm64FMin, 32) \
4670 V(F32x4Max, kArm64FMax, 32) \
4671 V(F32x4Add, kArm64FAdd, 32) \
4672 V(F32x4Sub, kArm64FSub, 32) \
4673 V(F32x4Div, kArm64FDiv, 32) \
4674 V(F32x4RelaxedMin, kArm64FMin, 32) \
4675 V(F32x4RelaxedMax, kArm64FMax, 32) \
4676 V(F16x8Add, kArm64FAdd, 16) \
4677 V(F16x8Sub, kArm64FSub, 16) \
4678 V(F16x8Div, kArm64FDiv, 16) \
4679 V(F16x8Min, kArm64FMin, 16) \
4680 V(F16x8Max, kArm64FMax, 16) \
4681 V(I64x2Sub, kArm64ISub, 64) \
4682 V(I32x4GtU, kArm64IGtU, 32) \
4683 V(I32x4GeU, kArm64IGeU, 32) \
4684 V(I32x4MinS, kArm64IMinS, 32) \
4685 V(I32x4MaxS, kArm64IMaxS, 32) \
4686 V(I32x4MinU, kArm64IMinU, 32) \
4687 V(I32x4MaxU, kArm64IMaxU, 32) \
4688 V(I16x8AddSatS, kArm64IAddSatS, 16) \
4689 V(I16x8SubSatS, kArm64ISubSatS, 16) \
4690 V(I16x8AddSatU, kArm64IAddSatU, 16) \
4691 V(I16x8SubSatU, kArm64ISubSatU, 16) \
4692 V(I16x8GtU, kArm64IGtU, 16) \
4693 V(I16x8GeU, kArm64IGeU, 16) \
4694 V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
4695 V(I8x16RoundingAverageU, kArm64RoundingAverageU, 8) \
4696 V(I16x8MinS, kArm64IMinS, 16) \
4697 V(I16x8MaxS, kArm64IMaxS, 16) \
4698 V(I16x8MinU, kArm64IMinU, 16) \
4699 V(I16x8MaxU, kArm64IMaxU, 16) \
4700 V(I8x16Sub, kArm64ISub, 8) \
4701 V(I8x16AddSatS, kArm64IAddSatS, 8) \
4702 V(I8x16SubSatS, kArm64ISubSatS, 8) \
4703 V(I8x16AddSatU, kArm64IAddSatU, 8) \
4704 V(I8x16SubSatU, kArm64ISubSatU, 8) \
4705 V(I8x16GtU, kArm64IGtU, 8) \
4706 V(I8x16GeU, kArm64IGeU, 8) \
4707 V(I8x16MinS, kArm64IMinS, 8) \
4708 V(I8x16MaxS, kArm64IMaxS, 8) \
4709 V(I8x16MinU, kArm64IMinU, 8) \
4710 V(I8x16MaxU, kArm64IMaxU, 8)
4712void InstructionSelectorT::VisitS128Const(OpIndex node) {
4713 Arm64OperandGeneratorT g(
this);
4714 static const int kUint32Immediates = 4;
4715 uint32_t val[kUint32Immediates];
4716 static_assert(
sizeof(val) == kSimd128Size);
4717 const Simd128ConstantOp& constant =
4718 this->Get(node).template Cast<Simd128ConstantOp>();
4719 memcpy(val, constant.value, kSimd128Size);
4720 Emit(kArm64S128Const, g.DefineAsRegister(node), g.UseImmediate(val[0]),
4721 g.UseImmediate(val[1]), g.UseImmediate(val[2]), g.UseImmediate(val[3]));
4727 BicImmParam(uint32_t imm, uint8_t lane_size, uint8_t shift_amount)
4728 : imm(imm), lane_size(lane_size), shift_amount(shift_amount) {}
4731 uint8_t shift_amount;
4734struct BicImmResult {
4735 BicImmResult(std::optional<BicImmParam> param, OpIndex const_node,
4737 : param(param), const_node(const_node), other_node(other_node) {}
4738 std::optional<BicImmParam> param;
4743std::optional<BicImmParam> BicImm16bitHelper(uint16_t val) {
4744 uint8_t byte0 = val & 0xFF;
4745 uint8_t byte1 = val >> 8;
4747 if (byte0 == 0x00) {
4748 return BicImmParam(byte1, 16, 8);
4750 if (byte1 == 0x00) {
4751 return BicImmParam(byte0, 16, 0);
4753 return std::nullopt;
4756std::optional<BicImmParam> BicImm32bitHelper(uint32_t val) {
4757 for (
int i = 0;
i < 4;
i++) {
4759 if ((val & (0xFF << (8 *
i))) == val) {
4760 return BicImmParam(
static_cast<uint8_t
>(val >>
i * 8), 32,
i * 8);
4764 if ((val >> 16) == (0xFFFF & val)) {
4765 return BicImm16bitHelper(0xFFFF & val);
4767 return std::nullopt;
4770std::optional<BicImmParam> BicImmConstHelper(
const Operation& op,
4772 const int kUint32Immediates = 4;
4773 uint32_t val[kUint32Immediates];
4775 memcpy(val, op.Cast<Simd128ConstantOp>().value, kSimd128Size);
4777 if (!(val[0] == val[1] && val[1] == val[2] && val[2] == val[3])) {
4778 return std::nullopt;
4780 return BicImm32bitHelper(not_imm ? ~val[0] : val[0]);
4783std::optional<BicImmResult> BicImmHelper(InstructionSelectorT* selector,
4784 OpIndex and_node,
bool not_imm) {
4785 const Simd128BinopOp& op = selector->Get(and_node).Cast<Simd128BinopOp>();
4790 if (not_imm && selector->Get(op.left()).Is<Simd128ConstantOp>()) {
4791 return BicImmResult(BicImmConstHelper(selector->Get(op.left()), not_imm),
4792 op.left(), op.right());
4794 if (selector->Get(op.right()).Is<Simd128ConstantOp>()) {
4795 return BicImmResult(BicImmConstHelper(selector->Get(op.right()), not_imm),
4796 op.right(), op.left());
4798 return std::nullopt;
4801bool TryEmitS128AndNotImm(InstructionSelectorT* selector, OpIndex node,
4803 Arm64OperandGeneratorT g(selector);
4804 std::optional<BicImmResult>
result = BicImmHelper(selector, node, not_imm);
4805 if (!
result.has_value())
return false;
4806 std::optional<BicImmParam> param =
result->param;
4807 if (param.has_value()) {
4808 if (selector->CanCover(node,
result->other_node)) {
4810 kArm64S128AndNot | LaneSizeField::encode(param->lane_size),
4811 g.DefineSameAsFirst(node), g.UseRegister(
result->other_node),
4812 g.UseImmediate(param->imm), g.UseImmediate(param->shift_amount));
4821void InstructionSelectorT::VisitS128AndNot(OpIndex node) {
4822 if (!TryEmitS128AndNotImm(
this, node,
false)) {
4823 VisitRRR(
this, kArm64S128AndNot, node);
4827void InstructionSelectorT::VisitS128And(OpIndex node) {
4829 if (!TryEmitS128AndNotImm(
this, node,
true)) {
4830 VisitRRR(
this, kArm64S128And, node);
4834void InstructionSelectorT::VisitS128Zero(OpIndex node) {
4835 Arm64OperandGeneratorT g(
this);
4836 Emit(kArm64S128Const, g.DefineAsRegister(node), g.UseImmediate(0),
4837 g.UseImmediate(0), g.UseImmediate(0), g.UseImmediate(0));
4840void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
4841 Arm64OperandGeneratorT g(
this);
4842 InstructionOperand output = CpuFeatures::IsSupported(DOTPROD)
4843 ? g.DefineSameAsInput(node, 2)
4844 : g.DefineAsRegister(node);
4845 Emit(kArm64I32x4DotI8x16AddS, output, g.UseRegister(this->input_at(node, 0)),
4846 g.UseRegister(this->input_at(node, 1)),
4847 g.UseRegister(this->input_at(node, 2)));
4850void InstructionSelectorT::VisitI8x16BitMask(OpIndex node) {
4851 Arm64OperandGeneratorT g(
this);
4852 InstructionOperand temps[1];
4853 size_t temp_count = 0;
4855 if (CpuFeatures::IsSupported(PMULL1Q)) {
4856 temps[0] = g.TempSimd128Register();
4860 Emit(kArm64I8x16BitMask, g.DefineAsRegister(node),
4861 g.UseRegister(this->input_at(node, 0)), temp_count, temps);
4864#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
4865 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
4867 kArm64##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
4879#undef SIMD_VISIT_EXTRACT_LANE
4881#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
4882 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
4883 VisitRRIR(this, kArm64##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
4893#undef SIMD_VISIT_REPLACE_LANE
4895#define SIMD_VISIT_UNOP(Name, instruction) \
4896 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4897 VisitRR(this, instruction, node); \
4900#undef SIMD_VISIT_UNOP
4901#undef SIMD_UNOP_LIST
4903#define SIMD_VISIT_SHIFT_OP(Name, width) \
4904 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4905 VisitSimdShiftRRR(this, kArm64##Name, node, width); \
4908#undef SIMD_VISIT_SHIFT_OP
4909#undef SIMD_SHIFT_OP_LIST
4911#define SIMD_VISIT_BINOP(Name, instruction) \
4912 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4913 VisitRRR(this, instruction, node); \
4916#undef SIMD_VISIT_BINOP
4917#undef SIMD_BINOP_LIST
4919#define SIMD_VISIT_BINOP_LANE_SIZE(Name, instruction, LaneSize) \
4920 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4921 VisitRRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
4923SIMD_BINOP_LANE_SIZE_LIST(SIMD_VISIT_BINOP_LANE_SIZE)
4924#undef SIMD_VISIT_BINOP_LANE_SIZE
4925#undef SIMD_BINOP_LANE_SIZE_LIST
4927#define SIMD_VISIT_UNOP_LANE_SIZE(Name, instruction, LaneSize) \
4928 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4929 VisitRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
4931SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
4932#undef SIMD_VISIT_UNOP_LANE_SIZE
4933#undef SIMD_UNOP_LANE_SIZE_LIST
4935using ShuffleMatcher =
4936 ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
4937using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher,
4938 MachineRepresentation::kSimd128>;
4947 explicit operator bool()
const {
return dup_node.valid(); }
4951MulWithDup TryMatchMulWithDup(InstructionSelectorT* selector, OpIndex node) {
4961#if V8_ENABLE_WEBASSEMBLY
4962 const Simd128BinopOp& mul = selector->Get(node).Cast<Simd128BinopOp>();
4963 const Operation& left = selector->Get(mul.left());
4964 const Operation& right = selector->Get(mul.right());
4974 if (left.Is<Simd128ShuffleOp>() &&
4975 wasm::SimdShuffle::TryMatchSplat<LANES>(
4976 left.Cast<Simd128ShuffleOp>().shuffle, &index)) {
4977 dup_node = left.input(index < LANES ? 0 : 1);
4978 input = mul.right();
4979 }
else if (right.Is<Simd128ShuffleOp>() &&
4980 wasm::SimdShuffle::TryMatchSplat<LANES>(
4981 right.Cast<Simd128ShuffleOp>().shuffle, &index)) {
4982 dup_node = right.input(index < LANES ? 0 : 1);
4990 return {input, dup_node, index};
4994void InstructionSelectorT::VisitF16x8Mul(OpIndex node) {
4995 if (MulWithDup
result = TryMatchMulWithDup<8>(
this, node)) {
4996 Arm64OperandGeneratorT g(
this);
4997 Emit(kArm64FMulElement | LaneSizeField::encode(16),
4998 g.DefineAsRegister(node), g.UseRegister(
result.input),
4999 g.UseRegister(
result.dup_node), g.UseImmediate(
result.index));
5001 return VisitRRR(
this, kArm64FMul | LaneSizeField::encode(16), node);
5005void InstructionSelectorT::VisitF32x4Mul(OpIndex node) {
5006 if (MulWithDup
result = TryMatchMulWithDup<4>(
this, node)) {
5007 Arm64OperandGeneratorT g(
this);
5008 Emit(kArm64FMulElement | LaneSizeField::encode(32),
5009 g.DefineAsRegister(node), g.UseRegister(
result.input),
5010 g.UseRegister(
result.dup_node), g.UseImmediate(
result.index));
5012 return VisitRRR(
this, kArm64FMul | LaneSizeField::encode(32), node);
5016void InstructionSelectorT::VisitF64x2Mul(OpIndex node) {
5017 if (MulWithDup
result = TryMatchMulWithDup<2>(
this, node)) {
5018 Arm64OperandGeneratorT g(
this);
5019 Emit(kArm64FMulElement | LaneSizeField::encode(64),
5020 g.DefineAsRegister(node), g.UseRegister(
result.input),
5021 g.UseRegister(
result.dup_node), g.UseImmediate(
result.index));
5023 return VisitRRR(
this, kArm64FMul | LaneSizeField::encode(64), node);
5027void InstructionSelectorT::VisitI64x2Mul(OpIndex node) {
5028 Arm64OperandGeneratorT g(
this);
5029 InstructionOperand temps[] = {g.TempSimd128Register()};
5030 Emit(kArm64I64x2Mul, g.DefineAsRegister(node),
5031 g.UseRegister(this->input_at(node, 0)),
5032 g.UseRegister(this->input_at(node, 1)),
arraysize(temps), temps);
5038class SimdBinopMatcherTurboshaft {
5040 SimdBinopMatcherTurboshaft(InstructionSelectorT* selector, OpIndex node)
5042 const Simd128BinopOp& add_op = selector->Get(node).Cast<Simd128BinopOp>();
5043 DCHECK(Simd128BinopOp::IsCommutative(add_op.kind));
5044 input0_ = add_op.left();
5045 input1_ = add_op.right();
5047 template <
typename OpmaskT>
5048 bool InputMatches() {
5049 if (
selector_->Get(input1_).Is<OpmaskT>()) {
5050 std::swap(input0_, input1_);
5053 return selector_->Get(input0_).Is<OpmaskT>();
5055 OpIndex matched_input()
const {
return input0_; }
5056 OpIndex other_input()
const {
return input1_; }
5065template <
typename OpmaskT>
5066bool ShraHelper(InstructionSelectorT* selector, OpIndex node,
int lane_size,
5067 InstructionCode shra_code, InstructionCode add_code) {
5068 Arm64OperandGeneratorT g(selector);
5069 SimdBinopMatcherTurboshaft
m(selector, node);
5070 if (!
m.InputMatches<OpmaskT>() ||
5071 !selector->CanCover(node,
m.matched_input())) {
5074 const Simd128ShiftOp& shiftop =
5075 selector->Get(
m.matched_input()).Cast<Simd128ShiftOp>();
5077 if (!selector->MatchSignedIntegralConstant(shiftop.shift(), &constant)) {
5082 if (constant % lane_size == 0) {
5083 selector->Emit(add_code, g.DefineAsRegister(node),
5084 g.UseRegister(shiftop.input()),
5085 g.UseRegister(
m.other_input()));
5087 selector->Emit(shra_code | LaneSizeField::encode(lane_size),
5088 g.DefineSameAsFirst(node), g.UseRegister(
m.other_input()),
5089 g.UseRegister(shiftop.input()),
5090 g.UseImmediate(shiftop.shift()));
5095template <
typename OpmaskT>
5096bool AdalpHelper(InstructionSelectorT* selector, OpIndex node,
int lane_size,
5097 InstructionCode adalp_code) {
5098 Arm64OperandGeneratorT g(selector);
5099 SimdBinopMatcherTurboshaft
m(selector, node);
5100 if (!
m.InputMatches<OpmaskT>() ||
5101 !selector->CanCover(node,
m.matched_input())) {
5104 selector->Emit(adalp_code | LaneSizeField::encode(lane_size),
5105 g.DefineSameAsFirst(node), g.UseRegister(
m.other_input()),
5106 g.UseRegister(selector->Get(
m.matched_input()).input(0)));
5110template <
typename OpmaskT>
5111bool MlaHelper(InstructionSelectorT* selector, OpIndex node,
5112 InstructionCode mla_code) {
5113 Arm64OperandGeneratorT g(selector);
5114 SimdBinopMatcherTurboshaft
m(selector, node);
5115 if (!
m.InputMatches<OpmaskT>() ||
5116 !selector->CanCover(node,
m.matched_input())) {
5119 const Operation& mul = selector->Get(
m.matched_input());
5120 selector->Emit(mla_code, g.DefineSameAsFirst(node),
5121 g.UseRegister(
m.other_input()), g.UseRegister(mul.input(0)),
5122 g.UseRegister(mul.input(1)));
5126template <Simd128BinopOp::Kind kind>
5127bool SmlalHelper(InstructionSelectorT* selector, OpIndex node,
int lane_size,
5128 InstructionCode smlal_code) {
5129 Arm64OperandGeneratorT g(selector);
5130 SimdBinopMatcherTurboshaft
m(selector, node);
5131 using OpmaskT = Opmask::Simd128BinopMask::For<kind>;
5132 if (!
m.InputMatches<OpmaskT>() ||
5133 !selector->CanCover(node,
m.matched_input()))
5136 const Operation& matched = selector->Get(
m.matched_input());
5137 selector->Emit(smlal_code | LaneSizeField::encode(lane_size),
5138 g.DefineSameAsFirst(node), g.UseRegister(
m.other_input()),
5139 g.UseRegister(matched.input(0)),
5140 g.UseRegister(matched.input(1)));
5144template <
typename OpmaskT>
5145bool sha3helper(InstructionSelectorT* selector, OpIndex node,
5146 InstructionCode sha3_code) {
5147 Arm64OperandGeneratorT g(selector);
5148 SimdBinopMatcherTurboshaft
m(selector, node);
5149 if (!
m.InputMatches<OpmaskT>() ||
5150 !selector->CanCover(node,
m.matched_input())) {
5153 const Operation& matched = selector->Get(
m.matched_input());
5155 sha3_code, g.DefineSameAsFirst(node), g.UseRegister(
m.other_input()),
5156 g.UseRegister(matched.input(0)), g.UseRegister(matched.input(1)));
5162void InstructionSelectorT::VisitS128Xor(OpIndex node) {
5163 Arm64OperandGeneratorT g(
this);
5165 if (!CpuFeatures::IsSupported(SHA3)) {
5166 return VisitRRR(
this, kArm64S128Xor, node);
5169 if (sha3helper<Opmask::kSimd128AndNot>(
this, node, kArm64Bcax) ||
5170 sha3helper<Opmask::kSimd128Xor>(
this, node, kArm64Eor3))
5173 return VisitRRR(
this, kArm64S128Xor, node);
5176void InstructionSelectorT::VisitI64x2Add(OpIndex node) {
5177 if (ShraHelper<Opmask::kSimd128I64x2ShrS>(
5178 this, node, 64, kArm64Ssra, kArm64IAdd | LaneSizeField::encode(64)) ||
5179 ShraHelper<Opmask::kSimd128I64x2ShrU>(
5180 this, node, 64, kArm64Usra, kArm64IAdd | LaneSizeField::encode(64))) {
5183 VisitRRR(
this, kArm64IAdd | LaneSizeField::encode(64), node);
5186void InstructionSelectorT::VisitI8x16Add(OpIndex node) {
5187 if (!ShraHelper<Opmask::kSimd128I8x16ShrS>(
5188 this, node, 8, kArm64Ssra, kArm64IAdd | LaneSizeField::encode(8)) &&
5189 !ShraHelper<Opmask::kSimd128I8x16ShrU>(
5190 this, node, 8, kArm64Usra, kArm64IAdd | LaneSizeField::encode(8))) {
5191 VisitRRR(
this, kArm64IAdd | LaneSizeField::encode(8), node);
5195#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
5196 void InstructionSelectorT::Visit##Type##Add(OpIndex node) { \
5198 if (MlaHelper<Opmask::kSimd128##Type##Mul>( \
5199 this, node, kArm64Mla | LaneSizeField::encode(LaneSize))) { \
5203 if (AdalpHelper<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##S>( \
5204 this, node, LaneSize, kArm64Sadalp) || \
5205 AdalpHelper<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##U>( \
5206 this, node, LaneSize, kArm64Uadalp)) { \
5210 if (ShraHelper<Opmask::kSimd128##Type##ShrS>( \
5211 this, node, LaneSize, kArm64Ssra, \
5212 kArm64IAdd | LaneSizeField::encode(LaneSize)) || \
5213 ShraHelper<Opmask::kSimd128##Type##ShrU>( \
5214 this, node, LaneSize, kArm64Usra, \
5215 kArm64IAdd | LaneSizeField::encode(LaneSize))) { \
5221 Simd128BinopOp::Kind::k##Type##ExtMulLow##PairwiseType##S>( \
5222 this, node, LaneSize, kArm64Smlal) || \
5224 Simd128BinopOp::Kind::k##Type##ExtMulHigh##PairwiseType##S>( \
5225 this, node, LaneSize, kArm64Smlal2) || \
5227 Simd128BinopOp::Kind::k##Type##ExtMulLow##PairwiseType##U>( \
5228 this, node, LaneSize, kArm64Umlal) || \
5230 Simd128BinopOp::Kind::k##Type##ExtMulHigh##PairwiseType##U>( \
5231 this, node, LaneSize, kArm64Umlal2)) { \
5234 VisitRRR(this, kArm64IAdd | LaneSizeField::encode(LaneSize), node); \
5237VISIT_SIMD_ADD(I32x4, I16x8, 32)
5238VISIT_SIMD_ADD(I16x8, I8x16, 16)
5239#undef VISIT_SIMD_ADD
5241#define VISIT_SIMD_SUB(Type, LaneSize) \
5242 void InstructionSelectorT::Visit##Type##Sub(OpIndex node) { \
5243 Arm64OperandGeneratorT g(this); \
5244 const Simd128BinopOp& sub = Get(node).Cast<Simd128BinopOp>(); \
5245 const Operation& right = Get(sub.right()); \
5247 if (right.Is<Opmask::kSimd128##Type##Mul>() && \
5248 CanCover(node, sub.right())) { \
5249 Emit(kArm64Mls | LaneSizeField::encode(LaneSize), \
5250 g.DefineSameAsFirst(node), g.UseRegister(sub.left()), \
5251 g.UseRegister(right.input(0)), g.UseRegister(right.input(1))); \
5254 VisitRRR(this, kArm64ISub | LaneSizeField::encode(LaneSize), node); \
5257VISIT_SIMD_SUB(I32x4, 32)
5258VISIT_SIMD_SUB(I16x8, 16)
5259#undef VISIT_SIMD_SUB
5262void VisitSimdReduce(InstructionSelectorT* selector, OpIndex node,
5263 InstructionCode opcode) {
5264 Arm64OperandGeneratorT g(selector);
5265 selector->Emit(opcode, g.DefineAsRegister(node),
5266 g.UseRegister(selector->Get(node).input(0)));
5271#define VISIT_SIMD_REDUCE(Type, Opcode) \
5272 void InstructionSelectorT::Visit##Type##AddReduce(OpIndex node) { \
5273 VisitSimdReduce(this, node, Opcode); \
5276VISIT_SIMD_REDUCE(I8x16, kArm64I8x16Addv)
5277VISIT_SIMD_REDUCE(I16x8, kArm64I16x8Addv)
5278VISIT_SIMD_REDUCE(I32x4, kArm64I32x4Addv)
5279VISIT_SIMD_REDUCE(I64x2, kArm64I64x2AddPair)
5280VISIT_SIMD_REDUCE(F32x4, kArm64F32x4AddReducePairwise)
5281VISIT_SIMD_REDUCE(F64x2, kArm64F64x2AddPair)
5282#undef VISIT_SIMD_REDUCE
5285bool isSimdZero(InstructionSelectorT* selector, OpIndex node) {
5286 const Operation& op = selector->Get(node);
5287 if (
auto constant = op.TryCast<Simd128ConstantOp>()) {
5288 return constant->IsZero();
5295#define VISIT_SIMD_CM(Type, T, CmOp, CmOpposite, LaneSize) \
5296 void InstructionSelectorT::Visit##Type##CmOp(OpIndex node) { \
5297 Arm64OperandGeneratorT g(this); \
5298 OpIndex left = this->input_at(node, 0); \
5299 OpIndex right = this->input_at(node, 1); \
5300 if (isSimdZero(this, left)) { \
5301 Emit(kArm64##T##CmOpposite | LaneSizeField::encode(LaneSize), \
5302 g.DefineAsRegister(node), g.UseRegister(right)); \
5304 } else if (isSimdZero(this, right)) { \
5305 Emit(kArm64##T##CmOp | LaneSizeField::encode(LaneSize), \
5306 g.DefineAsRegister(node), g.UseRegister(left)); \
5309 VisitRRR(this, kArm64##T##CmOp | LaneSizeField::encode(LaneSize), node); \
5312VISIT_SIMD_CM(F64x2,
F, Eq, Eq, 64)
5313VISIT_SIMD_CM(F64x2,
F, Ne, Ne, 64)
5314VISIT_SIMD_CM(F64x2,
F, Lt, Gt, 64)
5315VISIT_SIMD_CM(F64x2,
F, Le, Ge, 64)
5316VISIT_SIMD_CM(F32x4,
F, Eq, Eq, 32)
5317VISIT_SIMD_CM(F32x4,
F, Ne, Ne, 32)
5318VISIT_SIMD_CM(F32x4,
F, Lt, Gt, 32)
5319VISIT_SIMD_CM(F32x4,
F, Le, Ge, 32)
5320VISIT_SIMD_CM(F16x8,
F, Eq, Eq, 16)
5321VISIT_SIMD_CM(F16x8,
F, Ne, Ne, 16)
5322VISIT_SIMD_CM(F16x8,
F, Lt, Gt, 16)
5323VISIT_SIMD_CM(F16x8,
F, Le, Ge, 16)
5325VISIT_SIMD_CM(I64x2,
I, Eq, Eq, 64)
5326VISIT_SIMD_CM(I64x2,
I, Ne, Ne, 64)
5327VISIT_SIMD_CM(I64x2,
I, GtS, LtS, 64)
5328VISIT_SIMD_CM(I64x2,
I, GeS, LeS, 64)
5329VISIT_SIMD_CM(I32x4,
I, Eq, Eq, 32)
5330VISIT_SIMD_CM(I32x4,
I, Ne, Ne, 32)
5331VISIT_SIMD_CM(I32x4,
I, GtS, LtS, 32)
5332VISIT_SIMD_CM(I32x4,
I, GeS, LeS, 32)
5333VISIT_SIMD_CM(I16x8,
I, Eq, Eq, 16)
5334VISIT_SIMD_CM(I16x8,
I, Ne, Ne, 16)
5335VISIT_SIMD_CM(I16x8,
I, GtS, LtS, 16)
5336VISIT_SIMD_CM(I16x8,
I, GeS, LeS, 16)
5337VISIT_SIMD_CM(I8x16,
I, Eq, Eq, 8)
5338VISIT_SIMD_CM(I8x16,
I, Ne, Ne, 8)
5339VISIT_SIMD_CM(I8x16,
I, GtS, LtS, 8)
5340VISIT_SIMD_CM(I8x16,
I, GeS, LeS, 8)
5343void InstructionSelectorT::VisitS128Select(OpIndex node) {
5344 Arm64OperandGeneratorT g(
this);
5345 Emit(kArm64S128Select, g.DefineSameAsFirst(node),
5346 g.UseRegister(this->input_at(node, 0)),
5347 g.UseRegister(this->input_at(node, 1)),
5348 g.UseRegister(this->input_at(node, 2)));
5351void InstructionSelectorT::VisitI8x16RelaxedLaneSelect(OpIndex node) {
5352 VisitS128Select(node);
5355void InstructionSelectorT::VisitI16x8RelaxedLaneSelect(OpIndex node) {
5356 VisitS128Select(node);
5359void InstructionSelectorT::VisitI32x4RelaxedLaneSelect(OpIndex node) {
5360 VisitS128Select(node);
5363void InstructionSelectorT::VisitI64x2RelaxedLaneSelect(OpIndex node) {
5364 VisitS128Select(node);
5367#define VISIT_SIMD_QFMOP(op) \
5368 void InstructionSelectorT::Visit##op(OpIndex node) { \
5369 Arm64OperandGeneratorT g(this); \
5370 Emit(kArm64##op, g.DefineSameAsInput(node, 2), \
5371 g.UseRegister(this->input_at(node, 0)), \
5372 g.UseRegister(this->input_at(node, 1)), \
5373 g.UseRegister(this->input_at(node, 2))); \
5381#undef VISIT_SIMD_QFMOP
5385void ArrangeShuffleTable(Arm64OperandGeneratorT* g, OpIndex input0,
5386 OpIndex input1, InstructionOperand* src0,
5387 InstructionOperand* src1) {
5388 if (input0 == input1) {
5390 *src0 = *src1 = g->UseRegister(input0);
5393 *src0 = g->UseFixed(input0, fp_fixed1);
5394 *src1 = g->UseFixed(input1, fp_fixed2);
5398using CanonicalShuffle = wasm::SimdShuffle::CanonicalShuffle;
5399std::optional<ArchOpcode> TryMapCanonicalShuffleToArch(
5400 CanonicalShuffle shuffle) {
5401 using CanonicalToArch = std::pair<CanonicalShuffle, ArchOpcode>;
5402 constexpr static auto arch_shuffles = std::to_array<CanonicalToArch>({
5403 {CanonicalShuffle::kS64x2Even, kArm64S64x2UnzipLeft},
5404 {CanonicalShuffle::kS64x2Odd, kArm64S64x2UnzipRight},
5405 {CanonicalShuffle::kS64x2ReverseBytes, kArm64S8x8Reverse},
5406 {CanonicalShuffle::kS32x4Even, kArm64S32x4UnzipLeft},
5407 {CanonicalShuffle::kS32x4Odd, kArm64S32x4UnzipRight},
5408 {CanonicalShuffle::kS32x4InterleaveLowHalves, kArm64S32x4ZipLeft},
5409 {CanonicalShuffle::kS32x4InterleaveHighHalves, kArm64S32x4ZipRight},
5410 {CanonicalShuffle::kS32x4ReverseBytes, kArm64S8x4Reverse},
5411 {CanonicalShuffle::kS32x4Reverse, kArm64S32x4Reverse},
5412 {CanonicalShuffle::kS32x2Reverse, kArm64S32x2Reverse},
5413 {CanonicalShuffle::kS32x4TransposeEven, kArm64S32x4TransposeLeft},
5414 {CanonicalShuffle::kS32x4TransposeOdd, kArm64S32x4TransposeRight},
5415 {CanonicalShuffle::kS16x8Even, kArm64S16x8UnzipLeft},
5416 {CanonicalShuffle::kS16x8Odd, kArm64S16x8UnzipRight},
5417 {CanonicalShuffle::kS16x8InterleaveLowHalves, kArm64S16x8ZipLeft},
5418 {CanonicalShuffle::kS16x8InterleaveHighHalves, kArm64S16x8ZipRight},
5419 {CanonicalShuffle::kS16x2Reverse, kArm64S16x2Reverse},
5420 {CanonicalShuffle::kS16x4Reverse, kArm64S16x4Reverse},
5421 {CanonicalShuffle::kS16x8ReverseBytes, kArm64S8x2Reverse},
5422 {CanonicalShuffle::kS16x8TransposeEven, kArm64S16x8TransposeLeft},
5423 {CanonicalShuffle::kS16x8TransposeOdd, kArm64S16x8TransposeRight},
5424 {CanonicalShuffle::kS8x16Even, kArm64S8x16UnzipLeft},
5425 {CanonicalShuffle::kS8x16Odd, kArm64S8x16UnzipRight},
5426 {CanonicalShuffle::kS8x16InterleaveLowHalves, kArm64S8x16ZipLeft},
5427 {CanonicalShuffle::kS8x16InterleaveHighHalves, kArm64S8x16ZipRight},
5428 {CanonicalShuffle::kS8x16TransposeEven, kArm64S8x16TransposeLeft},
5429 {CanonicalShuffle::kS8x16TransposeOdd, kArm64S8x16TransposeRight},
5432 for (
auto& [canonical, arch_opcode] : arch_shuffles) {
5433 if (canonical == shuffle) {
5441void InstructionSelectorT::VisitI8x2Shuffle(OpIndex node) {
5442 Arm64OperandGeneratorT g(
this);
5443 auto view = this->simd_shuffle_view(node);
5444 constexpr size_t shuffle_bytes = 2;
5445 OpIndex input0 = view.input(0);
5446 OpIndex input1 = view.input(1);
5447 std::array<uint8_t, shuffle_bytes> shuffle;
5448 std::copy(view.data(), view.data() + shuffle_bytes, shuffle.begin());
5450 uint8_t shuffle16x1;
5451 if (wasm::SimdShuffle::TryMatch16x1Shuffle(shuffle.data(), &shuffle16x1)) {
5452 Emit(kArm64S16x1Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5453 g.UseRegister(input1), g.UseImmediate(shuffle16x1));
5455 Emit(kArm64S8x2Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5456 g.UseRegister(input1),
5457 g.UseImmediate(wasm::SimdShuffle::Pack2Lanes(shuffle)));
5461void InstructionSelectorT::VisitI8x4Shuffle(OpIndex node) {
5462 Arm64OperandGeneratorT g(
this);
5463 auto view = this->simd_shuffle_view(node);
5464 OpIndex input0 = view.input(0);
5465 OpIndex input1 = view.input(1);
5466 constexpr size_t shuffle_bytes = 4;
5467 std::array<uint8_t, shuffle_bytes> shuffle;
5468 std::copy(view.data(), view.data() + shuffle_bytes, shuffle.begin());
5469 std::array<uint8_t, 2> shuffle16x2;
5470 uint8_t shuffle32x1;
5472 if (wasm::SimdShuffle::TryMatch32x1Shuffle(shuffle.data(), &shuffle32x1)) {
5473 Emit(kArm64S32x1Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5474 g.UseRegister(input1), g.UseImmediate(shuffle32x1));
5475 }
else if (wasm::SimdShuffle::TryMatch16x2Shuffle(shuffle.data(),
5476 shuffle16x2.data())) {
5477 Emit(kArm64S16x2Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5478 g.UseRegister(input1),
5479 g.UseImmediate(wasm::SimdShuffle::Pack2Lanes(shuffle16x2)));
5481 InstructionOperand src0, src1;
5482 ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
5483 Emit(kArm64I8x16Shuffle, g.DefineAsRegister(node), src0, src1,
5484 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[0])),
5485 g.UseImmediate(0), g.UseImmediate(0), g.UseImmediate(0));
5489void InstructionSelectorT::VisitI8x8Shuffle(OpIndex node) {
5490 Arm64OperandGeneratorT g(
this);
5491 auto view = this->simd_shuffle_view(node);
5492 OpIndex input0 = view.input(0);
5493 OpIndex input1 = view.input(1);
5494 constexpr size_t shuffle_bytes = 8;
5495 std::array<uint8_t, shuffle_bytes> shuffle;
5496 std::copy(view.data(), view.data() + shuffle_bytes, shuffle.begin());
5497 std::array<uint8_t, 2> shuffle32x2;
5498 uint8_t shuffle64x1;
5499 if (wasm::SimdShuffle::TryMatch64x1Shuffle(shuffle.data(), &shuffle64x1)) {
5500 Emit(kArm64S64x1Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5501 g.UseRegister(input1), g.UseImmediate(shuffle64x1));
5502 }
else if (wasm::SimdShuffle::TryMatch32x2Shuffle(shuffle.data(),
5503 shuffle32x2.data())) {
5504 Emit(kArm64S32x2Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5505 g.UseRegister(input1),
5506 g.UseImmediate(wasm::SimdShuffle::Pack2Lanes(shuffle32x2)));
5509 InstructionOperand src0, src1;
5510 ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
5511 Emit(kArm64I8x16Shuffle, g.DefineAsRegister(node), src0, src1,
5512 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[0])),
5513 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[4])),
5514 g.UseImmediate(0), g.UseImmediate(0));
5518void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
5519 std::array<uint8_t, kSimd128Size> shuffle;
5521 auto view = this->simd_shuffle_view(node);
5522 CanonicalizeShuffle(view, shuffle.data(), &is_swizzle);
5523 OpIndex input0 = view.input(0);
5524 OpIndex input1 = view.input(1);
5525 Arm64OperandGeneratorT g(
this);
5527 const CanonicalShuffle canonical =
5528 wasm::SimdShuffle::TryMatchCanonical(shuffle);
5530 if (
auto arch_opcode = TryMapCanonicalShuffleToArch(canonical);
5531 arch_opcode.has_value()) {
5532 Emit(arch_opcode.value(), g.DefineAsRegister(node), g.UseRegister(input0),
5533 g.UseRegister(input1));
5538 if (wasm::SimdShuffle::TryMatchConcat(shuffle.data(), &
offset)) {
5539 Emit(kArm64S8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
5540 g.UseRegister(input1), g.UseImmediate(
offset));
5543 std::array<uint8_t, 2> shuffle64x2;
5544 if (wasm::SimdShuffle::TryMatch64x2Shuffle(shuffle.data(),
5545 shuffle64x2.data())) {
5546 Emit(kArm64S64x2Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5547 g.UseRegister(input1),
5548 g.UseImmediate(wasm::SimdShuffle::Pack2Lanes(shuffle64x2)));
5551 uint8_t shuffle32x4[4];
5555 if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle.data(), shuffle32x4)) {
5556 if (wasm::SimdShuffle::TryMatchSplat<4>(shuffle.data(), &index)) {
5558 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
5559 g.UseImmediate(4), g.UseImmediate(index % 4));
5560 }
else if (wasm::SimdShuffle::TryMatch32x4OneLaneSwizzle(shuffle32x4, &from,
5562 Emit(kArm64S32x4OneLaneSwizzle, g.DefineAsRegister(node),
5563 g.UseRegister(input0), g.TempImmediate(from), g.TempImmediate(to));
5564 }
else if (canonical == CanonicalShuffle::kIdentity) {
5568 MarkAsDefined(node);
5569 SetRename(node, input0);
5571 Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
5572 g.UseRegister(input1),
5573 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
5577 if (wasm::SimdShuffle::TryMatchSplat<8>(shuffle.data(), &index)) {
5579 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
5580 g.UseImmediate(8), g.UseImmediate(index % 8));
5583 if (wasm::SimdShuffle::TryMatchSplat<16>(shuffle.data(), &index)) {
5585 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
5586 g.UseImmediate(16), g.UseImmediate(index % 16));
5590 InstructionOperand src0, src1;
5591 ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
5592 Emit(kArm64I8x16Shuffle, g.DefineAsRegister(node), src0, src1,
5593 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[0])),
5594 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[4])),
5595 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[8])),
5596 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(&shuffle[12])));
5599void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
5600 OperandGenerator g(
this);
5601 auto input = g.UseRegister(this->input_at(node, 0));
5602 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
5607void InstructionSelectorT::VisitSignExtendWord8ToInt32(OpIndex node) {
5608 VisitRR(
this, kArm64Sxtb32, node);
5611void InstructionSelectorT::VisitSignExtendWord16ToInt32(OpIndex node) {
5612 VisitRR(
this, kArm64Sxth32, node);
5615void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
5616 VisitRR(
this, kArm64Sxtb, node);
5619void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
5620 VisitRR(
this, kArm64Sxth, node);
5623void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
5624 VisitRR(
this, kArm64Sxtw, node);
5627#if V8_ENABLE_WEBASSEMBLY
5629void VisitPminOrPmax(InstructionSelectorT* selector, ArchOpcode opcode,
5631 Arm64OperandGeneratorT g(selector);
5634 selector->Emit(opcode, g.DefineAsRegister(node),
5635 g.UseUniqueRegister(selector->input_at(node, 0)),
5636 g.UseUniqueRegister(selector->input_at(node, 1)));
5640void InstructionSelectorT::VisitF16x8Pmin(OpIndex node) {
5641 VisitPminOrPmax(
this, kArm64F16x8Pmin, node);
5644void InstructionSelectorT::VisitF16x8Pmax(OpIndex node) {
5645 VisitPminOrPmax(
this, kArm64F16x8Pmax, node);
5648void InstructionSelectorT::VisitF32x4Pmin(OpIndex node) {
5649 VisitPminOrPmax(
this, kArm64F32x4Pmin, node);
5652void InstructionSelectorT::VisitF32x4Pmax(OpIndex node) {
5653 VisitPminOrPmax(
this, kArm64F32x4Pmax, node);
5656void InstructionSelectorT::VisitF64x2Pmin(OpIndex node) {
5657 VisitPminOrPmax(
this, kArm64F64x2Pmin, node);
5660void InstructionSelectorT::VisitF64x2Pmax(OpIndex node) {
5661 VisitPminOrPmax(
this, kArm64F64x2Pmax, node);
5665void VisitSignExtendLong(InstructionSelectorT* selector, ArchOpcode opcode,
5666 OpIndex node,
int lane_size) {
5668 code |= LaneSizeField::encode(lane_size);
5669 VisitRR(selector, code, node);
5673void InstructionSelectorT::VisitI64x2SConvertI32x4Low(OpIndex node) {
5674 VisitSignExtendLong(
this, kArm64Sxtl, node, 64);
5677void InstructionSelectorT::VisitI64x2SConvertI32x4High(OpIndex node) {
5678 VisitSignExtendLong(
this, kArm64Sxtl2, node, 64);
5681void InstructionSelectorT::VisitI64x2UConvertI32x4Low(OpIndex node) {
5682 VisitSignExtendLong(
this, kArm64Uxtl, node, 64);
5685void InstructionSelectorT::VisitI64x2UConvertI32x4High(OpIndex node) {
5686 VisitSignExtendLong(
this, kArm64Uxtl2, node, 64);
5689void InstructionSelectorT::VisitI32x4SConvertI16x8Low(OpIndex node) {
5690 VisitSignExtendLong(
this, kArm64Sxtl, node, 32);
5693void InstructionSelectorT::VisitI32x4SConvertI16x8High(OpIndex node) {
5694 VisitSignExtendLong(
this, kArm64Sxtl2, node, 32);
5697void InstructionSelectorT::VisitI32x4UConvertI16x8Low(OpIndex node) {
5698 VisitSignExtendLong(
this, kArm64Uxtl, node, 32);
5701void InstructionSelectorT::VisitI32x4UConvertI16x8High(OpIndex node) {
5702 VisitSignExtendLong(
this, kArm64Uxtl2, node, 32);
5705void InstructionSelectorT::VisitI16x8SConvertI8x16Low(OpIndex node) {
5706 VisitSignExtendLong(
this, kArm64Sxtl, node, 16);
5709void InstructionSelectorT::VisitI16x8SConvertI8x16High(OpIndex node) {
5710 VisitSignExtendLong(
this, kArm64Sxtl2, node, 16);
5713void InstructionSelectorT::VisitI16x8UConvertI8x16Low(OpIndex node) {
5714 VisitSignExtendLong(
this, kArm64Uxtl, node, 16);
5717void InstructionSelectorT::VisitI16x8UConvertI8x16High(OpIndex node) {
5718 VisitSignExtendLong(
this, kArm64Uxtl2, node, 16);
5721void InstructionSelectorT::VisitI8x16Popcnt(OpIndex node) {
5723 code |= LaneSizeField::encode(8);
5727#ifdef V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
5729void InstructionSelectorT::VisitSimd128LoadPairDeinterleave(OpIndex node) {
5730 const auto& load = this->
Get(node).Cast<Simd128LoadPairDeinterleaveOp>();
5731 Arm64OperandGeneratorT g(
this);
5733 opcode |= LaneSizeField::encode(load.lane_size());
5734 if (load.load_kind.with_trap_handler) {
5735 opcode |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
5740 InstructionOperand outputs[] = {
5741 g.DefineAsFixed(first.value(), fp_fixed1),
5742 g.DefineAsFixed(
second.value(), fp_fixed2),
5745 InstructionOperand inputs[] = {
5746 EmitAddBeforeLoadOrStore(
this, node, &opcode),
5756void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
5757 int first_input_index,
5759 continuation_outputs_.push_back(g->DefineAsRegister(node));
5763MachineOperatorBuilder::Flags
5764InstructionSelector::SupportedMachineOperatorFlags() {
5765 auto flags = MachineOperatorBuilder::kFloat32RoundDown |
5766 MachineOperatorBuilder::kFloat64RoundDown |
5767 MachineOperatorBuilder::kFloat32RoundUp |
5768 MachineOperatorBuilder::kFloat64RoundUp |
5769 MachineOperatorBuilder::kFloat32RoundTruncate |
5770 MachineOperatorBuilder::kFloat64RoundTruncate |
5771 MachineOperatorBuilder::kFloat64RoundTiesAway |
5772 MachineOperatorBuilder::kFloat32RoundTiesEven |
5773 MachineOperatorBuilder::kFloat64RoundTiesEven |
5774 MachineOperatorBuilder::kWord32Popcnt |
5775 MachineOperatorBuilder::kWord64Popcnt |
5776 MachineOperatorBuilder::kWord32ShiftIsSafe |
5777 MachineOperatorBuilder::kInt32DivIsSafe |
5778 MachineOperatorBuilder::kUint32DivIsSafe |
5779 MachineOperatorBuilder::kWord32ReverseBits |
5780 MachineOperatorBuilder::kWord64ReverseBits |
5781 MachineOperatorBuilder::kSatConversionIsSafe |
5782 MachineOperatorBuilder::kFloat32Select |
5783 MachineOperatorBuilder::kFloat64Select |
5784 MachineOperatorBuilder::kWord32Select |
5785 MachineOperatorBuilder::kWord64Select |
5786 MachineOperatorBuilder::kLoadStorePairs;
5787 if (CpuFeatures::IsSupported(FP16)) {
5788 flags |= MachineOperatorBuilder::kFloat16 |
5789 MachineOperatorBuilder::kFloat16RawBitsConversion;
5795MachineOperatorBuilder::AlignmentRequirements
5796InstructionSelector::AlignmentRequirements() {
5797 return MachineOperatorBuilder::AlignmentRequirements::
5798 FullUnalignedAccessSupport();
static constexpr U encode(T value)
static constexpr bool IsImmLSScaled(int64_t offset, unsigned size_log2)
static constexpr bool IsImmAddSub(int64_t immediate)
static constexpr bool IsImmConditionalCompare(int64_t immediate)
static constexpr bool IsImmLSUnscaled(int64_t offset)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
RootsTable & roots_table()
constexpr bool IsUnsigned() const
constexpr MachineSemantic semantic() const
constexpr MachineRepresentation representation() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr bool IsReadOnly(RootIndex root_index)
InstructionOperand UseRegisterAtEndOrImmediateZero(OpIndex node)
bool IsLoadStoreImmediate(int64_t value, unsigned size)
std::optional< int64_t > GetOptionalIntegerConstant(OpIndex operation)
bool CanBeImmediate(int64_t value, ImmediateMode mode)
bool CanBeLoadStoreShiftImmediate(OpIndex node, MachineRepresentation rep)
Arm64OperandGeneratorT(InstructionSelectorT *selector)
InstructionOperand UseImmediateOrTemp(OpIndex node, int32_t value)
InstructionOperand UseRegisterOrImmediateZero(OpIndex node)
bool CanBeImmediate(OpIndex node, ImmediateMode mode)
bool IsImmediateZero(OpIndex node)
bool IsIntegerConstant(OpIndex node) const
InstructionOperand UseOperand(OpIndex node, ImmediateMode mode)
FlagsCondition condition() const
const compare_chain_t & compares() const
bool IsConditionalSet() const
turboshaft::Block * false_block() const
turboshaft::Block * true_block() const
bool IsConditionalBranch() const
uint32_t num_conditional_compares() const
FlagsCondition final_condition() const
std::array< ConditionalCompare, kMaxCompareChainSize > compare_chain_t
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
Isolate * isolate() const
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
InstructionOperand UseImmediate(int immediate)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionSelectorT * selector() const
InstructionOperand UseRegisterAtEnd(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool IsLegalFirstCombine() const
CompareChainNode * rhs() const
CompareChainNode(OpIndex n, FlagsCondition condition)
bool IsFlagSetting() const
void SetCondition(FlagsCondition condition)
CompareChainNode(OpIndex n, CompareChainNode *l, CompareChainNode *r)
FlagsCondition user_condition_
bool IsLogicalCombine() const
FlagsCondition user_condition() const
CompareChainNode * lhs() const
void MarkRequiresNegation()
void AddConditionalCompare(RegisterRepresentation rep, FlagsCondition ccmp_condition, FlagsCondition default_flags, OpIndex ccmp_lhs, OpIndex ccmp_rhs)
InstructionCode GetOpcode(RegisterRepresentation rep) const
uint32_t num_ccmps() const
FlagsContinuationT::compare_chain_t & ccmps()
InstructionCode opcode() const
FlagsContinuationT::compare_chain_t ccmps_
void InitialCompare(OpIndex op, OpIndex l, OpIndex r, RegisterRepresentation rep)
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
constexpr Enum value() const
constexpr RegisterRepresentation MapTaggedToWord() const
static constexpr WordRepresentation Word64()
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
const WordBinopOp & binop_
static constexpr ArchOpcode kTestAndBranchOpcode
static constexpr unsigned kSignBit
#define RR_VISITOR(Name, opcode)
#define RRR_VISITOR(Name, opcode)
InstructionSelectorT * selector_
#define VISIT_ATOMIC_BINOP(op)
static constexpr ArchOpcode kCompareAndBranchOpcode
#define SIMD_SHIFT_OP_LIST(V)
#define SIMD_VISIT_SHIFT_OP(Name)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define VISIT_SIMD_QFMOP(Name, instruction)
ZoneVector< RpoNumber > & result
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
BitField< T, shift, size, uint8_t > BitField8
ShiftKindMask::For< ShiftOp::Kind::kShiftLeft > kShiftLeft
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word32()> kWord32ShiftRightArithmetic
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word32()> kWord32ShiftLeft
constexpr size_t input_count()
static bool TryMatchConditionalCompareChainSet(InstructionSelectorT *selector, Zone *zone, OpIndex node)
static void VisitCompareChain(InstructionSelectorT *selector, OpIndex left_node, OpIndex right_node, RegisterRepresentation rep, InstructionCode opcode, ImmediateMode operand_mode, FlagsContinuationT *cont)
static std::optional< FlagsCondition > GetFlagsCondition(OpIndex node, InstructionSelectorT *selector)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
WordWithBits< 64 > Word64
WordWithBits< 32 > Word32
static std::optional< FlagsCondition > TryMatchConditionalCompareChainShared(InstructionSelectorT *selector, Zone *zone, OpIndex node, CompareSequence *sequence)
static bool TryMatchConditionalCompareChainBranch(InstructionSelectorT *selector, Zone *zone, OpIndex node, FlagsContinuationT *cont)
void CombineFlagSettingOps(CompareChainNode *logic_node, InstructionSelectorT *selector, CompareSequence *sequence)
static std::optional< CompareChainNode * > FindCompareChain(OpIndex user, OpIndex node, InstructionSelectorT *selector, Zone *zone, ZoneVector< CompareChainNode * > &nodes)
static void VisitLogical(InstructionSelectorT *selector, Zone *zone, OpIndex node, WordRepresentation rep, ArchOpcode opcode, bool left_can_cover, bool right_can_cover, ImmediateMode imm_mode)
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
constexpr size_t kCcmpOffsetOfOpcode
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
constexpr size_t kNumCcmpOperands
BinopMatcher< Int32Matcher, Int32Matcher, MachineRepresentation::kWord32 > Int32BinopMatcher
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
template const Signature< wasm::ValueType > bool
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
BinopMatcher< Int64Matcher, Int64Matcher, MachineRepresentation::kWord64 > Int64BinopMatcher
size_t AtomicWidthSize(AtomicWidth width)
void EmitInt64MulWithOverflow(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
constexpr size_t kCcmpOffsetOfDefaultFlags
std::tuple< InstructionCode, ImmediateMode > GetLoadOpcodeAndImmediate(MemoryRepresentation loaded_rep, RegisterRepresentation result_rep)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
constexpr size_t kCcmpOffsetOfRhs
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
FlagsCondition CommuteFlagsCondition(FlagsCondition condition)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
FlagsCondition NegateFlagsCondition(FlagsCondition condition)
constexpr size_t kCcmpOffsetOfCompareCondition
constexpr size_t kCcmpOffsetOfLhs
void VisitRR(InstructionSelectorT *selector, InstructionCode opcode, OpIndex node)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
constexpr int64_t kXSignBit
constexpr int kSimd128Size
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
V8_EXPORT_PRIVATE constexpr int ElementSizeLog2Of(MachineRepresentation)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define I(name, number_of_args, result_size)
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
void Initialize(turboshaft::OpIndex node)
InstructionSelectorT * selector_
int64_t immediate() const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
MemoryAccessKind memory_access_kind
MemoryRepresentation memory_rep
RegisterRepresentation rep
ExternalReference external_reference() const
underlying_operation_t< Op > & Cast()
V< Word32 > right() const
bool IsCommutative() const
V< WordType > left() const
V< WordType > right() const
#define V8_STATIC_ROOTS_BOOL
std::unique_ptr< ValueMirror > value