23using namespace turboshaft;
43 std::numeric_limits<int32_t>::max()));
44 int32_t value =
static_cast<int32_t
>(value64);
69 return value >= -1020 && value <= 1020 && (value % 4) == 0;
76 return value >= -4095 && value <= 4095;
81 return value >= -255 && value <= 255;
94 ArmOperandGeneratorT g(selector);
95 selector->Emit(opcode, g.DefineAsRegister(node),
96 g.UseRegister(selector->input_at(node, 0)));
101 ArmOperandGeneratorT g(selector);
102 selector->Emit(opcode, g.DefineAsRegister(node),
103 g.UseRegister(selector->input_at(node, 0)),
104 g.UseRegister(selector->input_at(node, 1)));
107#if V8_ENABLE_WEBASSEMBLY
108void VisitSimdShiftRRR(InstructionSelectorT* selector,
ArchOpcode opcode,
110 ArmOperandGeneratorT g(selector);
111 const Simd128ShiftOp& op = selector->Get(node).Cast<Simd128ShiftOp>();
113 if (selector->MatchIntegralWord32Constant(op.shift(), &shift_by)) {
114 if (shift_by % width == 0) {
115 selector->EmitIdentity(node);
117 selector->Emit(opcode, g.DefineAsRegister(node),
118 g.UseRegister(op.input()), g.UseImmediate(op.shift()));
125void VisitRRRShuffle(InstructionSelectorT* selector,
ArchOpcode opcode,
127 ArmOperandGeneratorT g(selector);
129 if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight ||
130 opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight ||
131 opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight ||
132 opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight ||
133 opcode == kArmS8x16TransposeRight) {
134 std::swap(input0, input1);
138 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input0),
139 g.UseRegister(input1));
143 ArmOperandGeneratorT g(selector);
144 const Operation& op = selector->Get(node);
146 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)),
147 g.UseImmediate(imm));
152 ArmOperandGeneratorT g(selector);
153 const Simd128ReplaceLaneOp& op =
155 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.into()),
156 g.UseImmediate(op.lane), g.UseUniqueRegister(op.new_lane()));
160template <
typename OpmaskT,
int kImmMin,
int kImmMax,
AddressingMode kImmMode,
162bool TryMatchShift(InstructionSelectorT* selector,
164 InstructionOperand* value_return,
165 InstructionOperand* shift_return) {
166 ArmOperandGeneratorT g(selector);
167 const Operation& op = selector->Get(node);
168 if (op.Is<OpmaskT>()) {
170 *value_return = g.UseRegister(shift.left());
172 if (selector->MatchIntegralWord32Constant(shift.right(), &shift_by) &&
175 *shift_return = g.UseImmediate(shift.right());
178 *shift_return = g.UseRegister(shift.right());
185template <
typename OpmaskT,
int kImmMin,
int kImmMax, AddressingMode kImmMode>
186bool TryMatchShiftImmediate(InstructionSelectorT* selector,
188 InstructionOperand* value_return,
189 InstructionOperand* shift_return) {
190 ArmOperandGeneratorT g(selector);
191 const Operation& op = selector->Get(node);
192 if (op.Is<OpmaskT>()) {
195 if (selector->MatchIntegralWord32Constant(shift.right(), &shift_by) &&
198 *value_return = g.UseRegister(shift.left());
199 *shift_return = g.UseImmediate(shift.right());
206bool TryMatchROR(InstructionSelectorT* selector,
InstructionCode* opcode_return,
207 OpIndex node, InstructionOperand* value_return,
208 InstructionOperand* shift_return) {
210 kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R>(
211 selector, opcode_return,
node, value_return, shift_return);
214bool TryMatchASR(InstructionSelectorT* selector,
InstructionCode* opcode_return,
215 OpIndex node, InstructionOperand* value_return,
216 InstructionOperand* shift_return) {
218 kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R>(
219 selector, opcode_return,
node, value_return, shift_return) ||
221 kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R>(
222 selector, opcode_return, node, value_return, shift_return);
225bool TryMatchLSL(InstructionSelectorT* selector,
InstructionCode* opcode_return,
226 OpIndex node, InstructionOperand* value_return,
227 InstructionOperand* shift_return) {
229 kMode_Operand2_R_LSL_R>(selector, opcode_return,
node,
230 value_return, shift_return);
233bool TryMatchLSLImmediate(InstructionSelectorT* selector,
235 InstructionOperand* value_return,
236 InstructionOperand* shift_return) {
238 kMode_Operand2_R_LSL_I>(
239 selector, opcode_return,
node, value_return, shift_return);
242bool TryMatchLSR(InstructionSelectorT* selector,
InstructionCode* opcode_return,
243 OpIndex node, InstructionOperand* value_return,
244 InstructionOperand* shift_return) {
246 kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R>(
247 selector, opcode_return,
node, value_return, shift_return);
250bool TryMatchShift(InstructionSelectorT* selector,
252 InstructionOperand* value_return,
253 InstructionOperand* shift_return) {
255 TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
256 TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
257 TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
258 TryMatchROR(selector, opcode_return, node, value_return, shift_return));
261bool TryMatchImmediateOrShift(InstructionSelectorT* selector,
263 size_t* input_count_return,
264 InstructionOperand* inputs) {
265 ArmOperandGeneratorT g(selector);
266 if (g.CanBeImmediate(node, *opcode_return)) {
268 inputs[0] = g.UseImmediate(node);
269 *input_count_return = 1;
272 if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
273 *input_count_return = 2;
281 FlagsContinuationT* cont) {
283 ArmOperandGeneratorT g(selector);
284 OpIndex lhs = selector->input_at(node, 0);
285 OpIndex rhs = selector->input_at(node, 1);
286 InstructionOperand inputs[3];
288 InstructionOperand outputs[1];
289 size_t output_count = 0;
299 InstructionOperand
const input = g.UseRegister(lhs);
303 }
else if (TryMatchImmediateOrShift(selector, &opcode, rhs, &input_count,
305 inputs[0] = g.UseRegister(lhs);
307 }
else if (TryMatchImmediateOrShift(selector, &reverse_opcode, lhs,
308 &input_count, &inputs[1])) {
309 inputs[0] = g.UseRegister(rhs);
310 opcode = reverse_opcode;
318 outputs[output_count++] = g.DefineAsRegister(node);
326 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
332 FlagsContinuationT cont;
333 VisitBinop(selector, node, opcode, reverse_opcode, &cont);
336void EmitDiv(InstructionSelectorT* selector,
ArchOpcode div_opcode,
338 InstructionOperand result_operand, InstructionOperand left_operand,
339 InstructionOperand right_operand) {
340 ArmOperandGeneratorT g(selector);
341 if (selector->IsSupported(SUDIV)) {
342 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
345 InstructionOperand left_double_operand = g.TempDoubleRegister();
346 InstructionOperand right_double_operand = g.TempDoubleRegister();
347 InstructionOperand result_double_operand = g.TempDoubleRegister();
348 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
349 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
350 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
351 right_double_operand);
352 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
355void VisitDiv(InstructionSelectorT* selector,
OpIndex node,
358 ArmOperandGeneratorT g(selector);
359 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
360 g.DefineAsRegister(node), g.UseRegister(selector->input_at(node, 0)),
361 g.UseRegister(selector->input_at(node, 1)));
364void VisitMod(InstructionSelectorT* selector,
OpIndex node,
367 ArmOperandGeneratorT g(selector);
368 InstructionOperand div_operand = g.TempRegister();
369 InstructionOperand result_operand = g.DefineAsRegister(node);
370 InstructionOperand left_operand = g.UseRegister(selector->input_at(node, 0));
371 InstructionOperand right_operand = g.UseRegister(selector->input_at(node, 1));
372 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
373 left_operand, right_operand);
374 if (selector->IsSupported(ARMv7)) {
375 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
378 InstructionOperand mul_operand = g.TempRegister();
379 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
381 result_operand, left_operand, mul_operand);
390void EmitAddBeforeS128LoadStore(InstructionSelectorT* selector,
392 size_t* input_count_return,
393 InstructionOperand* inputs) {
394 ArmOperandGeneratorT g(selector);
395 InstructionOperand addr = g.TempRegister();
398 selector->Emit(op, 1, &addr, 2, inputs);
400 *input_count_return -= 1;
406 ArmOperandGeneratorT g(selector);
407 InstructionOperand inputs[3];
410 const Operation& base_op = selector->Get(base);
411 int64_t index_constant;
413 selector->MatchSignedIntegralConstant(index, &index_constant)) {
414 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
415 if (selector->CanAddressRelativeToRootsRegister(
416 constant_base.external_reference())) {
417 ptrdiff_t
const delta =
420 selector->isolate(), constant_base.external_reference());
422 inputs[0] = g.UseImmediate(
static_cast<int32_t>(delta));
424 selector->Emit(opcode, 1, output, input_count, inputs);
429 if (base_op.Is<LoadRootRegisterOp>()) {
432 inputs[0] = g.UseImmediate(index);
434 selector->Emit(opcode, 1, output, input_count, inputs);
438 inputs[0] = g.UseRegister(base);
439 if (g.CanBeImmediate(index, opcode)) {
440 inputs[1] = g.UseImmediate(index);
442 }
else if ((opcode == kArmLdr) &&
443 TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
447 inputs[1] = g.UseRegister(index);
448 if (opcode == kArmVld1S128) {
449 EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[0]);
454 selector->Emit(opcode, 1, output, input_count, inputs);
458 size_t input_count, InstructionOperand* inputs,
OpIndex index) {
459 ArmOperandGeneratorT g(selector);
462 if (g.CanBeImmediate(index, opcode)) {
465 }
else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
466 TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
471 if (arch_opcode == kArmVst1S128) {
473 EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
478 selector->Emit(opcode, 0,
nullptr, input_count, inputs);
481void VisitPairAtomicBinOp(InstructionSelectorT* selector,
OpIndex node,
483 ArmOperandGeneratorT g(selector);
486 OpIndex index = selector->input_at(node, 1);
487 OpIndex value = selector->input_at(node, 2);
488 OpIndex value_high = selector->input_at(node, 3);
491 InstructionOperand inputs[] = {
492 g.UseUniqueRegister(value), g.UseUniqueRegister(value_high),
493 g.UseUniqueRegister(base), g.UseUniqueRegister(index)};
494 InstructionOperand outputs[2];
495 size_t output_count = 0;
496 InstructionOperand temps[6];
497 size_t temp_count = 0;
498 temps[temp_count++] = g.TempRegister();
499 temps[temp_count++] = g.TempRegister(r6);
500 temps[temp_count++] = g.TempRegister(r7);
501 temps[temp_count++] = g.TempRegister();
504 if (projection0.valid()) {
505 outputs[output_count++] = g.DefineAsFixed(projection0.value(), r2);
507 temps[temp_count++] = g.TempRegister(r2);
509 if (projection1.valid()) {
510 outputs[output_count++] = g.DefineAsFixed(projection1.value(), r3);
512 temps[temp_count++] = g.TempRegister(r3);
514 selector->Emit(code, output_count, outputs,
arraysize(inputs), inputs,
520void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
523 stack_slot.is_tagged);
526 Emit(kArchStackSlot, g.DefineAsRegister(node),
527 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
530void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
531 ArmOperandGeneratorT g(
this);
532 Emit(kArchAbortCSADcheck, g.NoOutput(),
533 g.UseFixed(this->input_at(node, 0), r1));
536#if V8_ENABLE_WEBASSEMBLY
539 Simd128LaneMemoryOp::LaneKind lane_kind) {
541 case Simd128LaneMemoryOp::LaneKind::k8:
543 case Simd128LaneMemoryOp::LaneKind::k16:
545 case Simd128LaneMemoryOp::LaneKind::k32:
547 case Simd128LaneMemoryOp::LaneKind::k64:
553void InstructionSelectorT::VisitStoreLane(
OpIndex node) {
554 const Simd128LaneMemoryOp& store =
Get(node).Cast<Simd128LaneMemoryOp>();
556 LoadStoreLaneParams f(MachineRepresentationOf(store.lane_kind), store.lane);
558 f.low_op ? kArmS128StoreLaneLow : kArmS128StoreLaneHigh;
561 ArmOperandGeneratorT g(
this);
562 InstructionOperand
inputs[4];
564 inputs[0] = g.UseRegister(store.value());
565 inputs[1] = g.UseImmediate(f.laneidx);
566 inputs[2] = g.UseRegister(store.base());
567 inputs[3] = g.UseRegister(store.index());
568 EmitAddBeforeS128LoadStore(
this, &
opcode, &input_count, &
inputs[2]);
572void InstructionSelectorT::VisitLoadLane(
OpIndex node) {
573 const Simd128LaneMemoryOp& load = this->
Get(node).Cast<Simd128LaneMemoryOp>();
574 LoadStoreLaneParams f(MachineRepresentationOf(load.lane_kind), load.lane);
576 f.low_op ? kArmS128LoadLaneLow : kArmS128LoadLaneHigh;
579 ArmOperandGeneratorT g(
this);
580 InstructionOperand output = g.DefineSameAsFirst(node);
581 InstructionOperand
inputs[4];
583 inputs[0] = g.UseRegister(load.value());
584 inputs[1] = g.UseImmediate(f.laneidx);
585 inputs[2] = g.UseRegister(load.base());
586 inputs[3] = g.UseRegister(load.index());
587 EmitAddBeforeS128LoadStore(
this, &
opcode, &input_count, &
inputs[2]);
592 const Simd128LoadTransformOp& op =
593 this->
Get(node).Cast<Simd128LoadTransformOp>();
595 switch (op.transform_kind) {
596 case Simd128LoadTransformOp::TransformKind::k8Splat:
597 opcode = kArmS128Load8Splat;
599 case Simd128LoadTransformOp::TransformKind::k16Splat:
600 opcode = kArmS128Load16Splat;
602 case Simd128LoadTransformOp::TransformKind::k32Splat:
603 opcode = kArmS128Load32Splat;
605 case Simd128LoadTransformOp::TransformKind::k64Splat:
606 opcode = kArmS128Load64Splat;
608 case Simd128LoadTransformOp::TransformKind::k8x8S:
609 opcode = kArmS128Load8x8S;
611 case Simd128LoadTransformOp::TransformKind::k8x8U:
612 opcode = kArmS128Load8x8U;
614 case Simd128LoadTransformOp::TransformKind::k16x4S:
615 opcode = kArmS128Load16x4S;
617 case Simd128LoadTransformOp::TransformKind::k16x4U:
618 opcode = kArmS128Load16x4U;
620 case Simd128LoadTransformOp::TransformKind::k32x2S:
621 opcode = kArmS128Load32x2S;
623 case Simd128LoadTransformOp::TransformKind::k32x2U:
624 opcode = kArmS128Load32x2U;
626 case Simd128LoadTransformOp::TransformKind::k32Zero:
627 opcode = kArmS128Load32Zero;
629 case Simd128LoadTransformOp::TransformKind::k64Zero:
630 opcode = kArmS128Load64Zero;
636 ArmOperandGeneratorT g(
this);
637 InstructionOperand output = g.DefineAsRegister(node);
638 InstructionOperand
inputs[2];
640 inputs[0] = g.UseRegister(op.base());
641 inputs[1] = g.UseRegister(op.index());
642 EmitAddBeforeS128LoadStore(
this, &
opcode, &input_count, &
inputs[0]);
697void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
741 return kAtomicStoreWord8;
743 return kAtomicStoreWord16;
748 return kAtomicStoreWord32;
755 StoreRepresentation store_rep,
756 std::optional<AtomicMemoryOrder> atomic_order) {
758 ArmOperandGeneratorT g(selector);
759 auto store_view = selector->store_view(node);
761 OpIndex index = selector->value(store_view.index());
762 OpIndex value = store_view.value();
775 InstructionOperand inputs[3];
780 if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
782 addressing_mode = kMode_Offset_RI;
784 inputs[
input_count++] = g.UseUniqueRegister(index);
785 addressing_mode = kMode_Offset_RR;
787 inputs[
input_count++] = g.UseUniqueRegister(value);
792 code = kArchStoreWithWriteBarrier;
795 code = kArchAtomicStoreWithWriteBarrier;
800 selector->Emit(code, 0,
nullptr, input_count, inputs);
804 opcode = GetStoreOpcode(rep);
809 opcode = GetAtomicStoreOpcode(rep);
813 std::optional<ExternalReference> external_base;
815 ExternalReference reference_value;
816 if (selector->MatchExternalConstant(store_view.base(),
818 external_base = reference_value;
823 selector->CanAddressRelativeToRootsRegister(*external_base)) {
824 int64_t index_constant;
825 if (selector->MatchSignedIntegralConstant(index, &index_constant)) {
826 ptrdiff_t
const delta =
829 selector->isolate(), *external_base);
831 InstructionOperand inputs[2];
832 inputs[0] = g.UseRegister(value);
833 inputs[1] = g.UseImmediate(
static_cast<int32_t>(delta));
835 selector->Emit(opcode, 0,
nullptr, input_count, inputs);
840 if (selector->is_load_root_register(base)) {
842 InstructionOperand inputs[2];
843 inputs[0] = g.UseRegister(value);
844 inputs[1] = g.UseImmediate(index);
846 selector->Emit(opcode, 0,
nullptr, input_count, inputs);
850 InstructionOperand inputs[4];
854 EmitStore(selector, opcode, input_count, inputs, index);
862void InstructionSelectorT::VisitStore(
OpIndex node) {
867void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
872void InstructionSelectorT::VisitUnalignedLoad(
OpIndex node) {
875 ArmOperandGeneratorT g(
this);
886 InstructionOperand temp = g.TempRegister();
888 Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
897 InstructionOperand
inputs[3];
898 inputs[0] = g.UseRegister(base);
901 if (TryMatchImmediateOrShift(
this, &add_opcode, index, &input_count,
908 inputs[1] = g.UseRegister(index);
912 InstructionOperand addr = g.TempRegister();
913 Emit(add_opcode, 1, &addr, input_count,
inputs);
919 Emit(op, g.DefineAsRegister(node), addr);
922 InstructionOperand fp_lo = g.TempRegister();
923 InstructionOperand fp_hi = g.TempRegister();
925 Emit(
opcode, fp_lo, addr, g.TempImmediate(0));
926 Emit(
opcode, fp_hi, addr, g.TempImmediate(4));
927 Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
937void InstructionSelectorT::VisitUnalignedStore(
OpIndex node) {
938 ArmOperandGeneratorT g(
this);
944 InstructionOperand
inputs[4];
957 Emit(kArmVmovU32F32,
inputs[0], g.UseRegister(value));
959 EmitStore(
this, kArmStr, input_count,
inputs, index);
964 InstructionOperand address = g.TempRegister();
968 InstructionOperand
inputs[3];
969 inputs[0] = g.UseRegister(base);
972 if (TryMatchImmediateOrShift(
this, &add_opcode, index, &input_count,
979 inputs[1] = g.UseRegister(index);
983 Emit(add_opcode, 1, &address, input_count,
inputs);
998 InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
1005 g.UseRegister(base);
1006 EmitStore(
this, kArmStr, input_count,
inputs, index);
1009 InstructionOperand base4 = g.TempRegister();
1011 g.UseRegister(base), g.TempImmediate(4));
1014 EmitStore(
this, kArmStr, input_count,
inputs, index);
1026void EmitBic(InstructionSelectorT* selector,
OpIndex node,
OpIndex left,
1028 ArmOperandGeneratorT g(selector);
1030 InstructionOperand value_operand;
1031 InstructionOperand shift_operand;
1032 if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
1033 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
1034 value_operand, shift_operand);
1038 g.DefineAsRegister(node), g.UseRegister(left),
1039 g.UseRegister(right));
1042void EmitUbfx(InstructionSelectorT* selector,
OpIndex node,
OpIndex left,
1043 uint32_t lsb, uint32_t width) {
1047 ArmOperandGeneratorT g(selector);
1048 selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
1049 g.TempImmediate(lsb), g.TempImmediate(width));
1054void InstructionSelectorT::VisitWord32And(
OpIndex node) {
1055 ArmOperandGeneratorT g(
this);
1057 const WordBinopOp& bitwise_and =
Get(node).Cast<WordBinopOp>();
1061 CanCover(node, bitwise_and.left())) {
1062 const WordBinopOp& bitwise_xor = lhs.Cast<WordBinopOp>();
1066 EmitBic(
this, node, bitwise_and.right(), bitwise_xor.left());
1073 CanCover(node, bitwise_and.right())) {
1074 const WordBinopOp& bitwise_xor = rhs.Cast<WordBinopOp>();
1078 EmitBic(
this, node, bitwise_and.left(), bitwise_xor.left());
1092 if (((shift == 8) || (shift == 16) || (shift == 24)) &&
1096 Emit(kArmUxtb, g.DefineAsRegister(node), g.UseRegister(
shr.left()),
1097 g.TempImmediate(shift));
1099 }
else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
1102 Emit(kArmUxth, g.DefineAsRegister(node), g.UseRegister(
shr.left()),
1103 g.TempImmediate(shift));
1106 ((leading_zeros + width) == 32)) {
1109 if ((1 <= shift) && (shift <= 31)) {
1114 EmitUbfx(
this, node,
shr.left(), shift,
1115 std::min(width, 32 - shift));
1120 }
else if (value == 0xFFFF) {
1123 Emit(kArmUxth, g.DefineAsRegister(node),
1124 g.UseRegister(bitwise_and.left()), g.TempImmediate(0));
1127 if (g.CanBeImmediate(~value)) {
1130 g.DefineAsRegister(node), g.UseRegister(bitwise_and.left()),
1131 g.TempImmediate(~value));
1134 if (!g.CanBeImmediate(value) &&
IsSupported(ARMv7)) {
1138 if ((width != 0) && ((leading_zeros + width) == 32) &&
1139 (9 <= leading_zeros) && (leading_zeros <= 23)) {
1141 EmitUbfx(
this, node, bitwise_and.left(), 0, width);
1148 if ((leading_zeros + width + lsb) == 32) {
1150 Emit(kArmBfc, g.DefineSameAsFirst(node),
1151 g.UseRegister(bitwise_and.left()), g.TempImmediate(lsb),
1152 g.TempImmediate(width));
1160void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
1164void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
1165 ArmOperandGeneratorT g(
this);
1171 InstructionOperand value_operand;
1172 InstructionOperand shift_operand;
1173 if (TryMatchShift(
this, &
opcode, bitwise_xor.left(), &value_operand,
1175 Emit(
opcode, g.DefineAsRegister(node), value_operand, shift_operand);
1179 g.DefineAsRegister(node), g.UseRegister(bitwise_xor.left()));
1193 value = op.stack_limit();
1201 const int output_count = 0;
1216 temp_count, temps, cont);
1221template <
typename TryMatchShift>
1227 size_t input_count = 2;
1229 size_t output_count = 0;
1231 CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
1233 outputs[output_count++] = g.DefineAsRegister(node);
1245template <
typename TryMatchShift>
1247 TryMatchShift try_match_shift) {
1248 FlagsContinuationT cont;
1249 VisitShift(selector, node, try_match_shift, &cont);
1254void InstructionSelectorT::VisitWord32Shl(
OpIndex node) {
1258void InstructionSelectorT::VisitWord32Shr(
OpIndex node) {
1259 ArmOperandGeneratorT g(
this);
1265 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
1268 value = value >> lsb << lsb;
1271 if ((width != 0) && (msb + width + lsb == 32)) {
1273 return EmitUbfx(
this, node, bitwise_and.left(), lsb, width);
1280void InstructionSelectorT::VisitWord32Sar(
OpIndex node) {
1281 ArmOperandGeneratorT g(
this);
1286 if (uint32_t sar_by, shl_by;
1289 if ((sar_by == shl_by) && (sar_by == 16)) {
1290 Emit(kArmSxth, g.DefineAsRegister(node), g.UseRegister(shl.left()),
1291 g.TempImmediate(0));
1293 }
else if ((sar_by == shl_by) && (sar_by == 24)) {
1294 Emit(kArmSxtb, g.DefineAsRegister(node), g.UseRegister(shl.left()),
1295 g.TempImmediate(0));
1297 }
else if (
IsSupported(ARMv7) && (sar_by >= shl_by)) {
1298 Emit(kArmSbfx, g.DefineAsRegister(node), g.UseRegister(shl.left()),
1299 g.TempImmediate(sar_by - shl_by), g.TempImmediate(32 - sar_by));
1307void InstructionSelectorT::VisitInt32PairAdd(
OpIndex node) {
1308 ArmOperandGeneratorT g(
this);
1311 if (projection1.valid()) {
1314 InstructionOperand
inputs[] = {
1315 g.UseRegister(this->
input_at(node, 0)),
1316 g.UseUniqueRegister(this->
input_at(node, 1)),
1317 g.UseRegister(this->
input_at(node, 2)),
1318 g.UseUniqueRegister(this->
input_at(node, 3))};
1320 InstructionOperand outputs[] = {g.DefineAsRegister(node),
1321 g.DefineAsRegister(projection1.value())};
1328 g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)),
1329 g.UseRegister(this->input_at(node, 2)));
1333void InstructionSelectorT::VisitInt32PairSub(
OpIndex node) {
1334 ArmOperandGeneratorT g(
this);
1337 if (projection1.valid()) {
1340 InstructionOperand
inputs[] = {
1341 g.UseRegister(this->
input_at(node, 0)),
1342 g.UseUniqueRegister(this->
input_at(node, 1)),
1343 g.UseRegister(this->
input_at(node, 2)),
1344 g.UseUniqueRegister(this->
input_at(node, 3))};
1346 InstructionOperand outputs[] = {g.DefineAsRegister(node),
1347 g.DefineAsRegister(projection1.value())};
1354 g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)),
1355 g.UseRegister(this->input_at(node, 2)));
1359void InstructionSelectorT::VisitInt32PairMul(
OpIndex node) {
1360 ArmOperandGeneratorT g(
this);
1362 if (projection1.valid()) {
1363 InstructionOperand
inputs[] = {
1364 g.UseUniqueRegister(this->
input_at(node, 0)),
1365 g.UseUniqueRegister(this->
input_at(node, 1)),
1366 g.UseUniqueRegister(this->
input_at(node, 2)),
1367 g.UseUniqueRegister(this->
input_at(node, 3))};
1369 InstructionOperand outputs[] = {g.DefineAsRegister(node),
1370 g.DefineAsRegister(projection1.value())};
1377 g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)),
1378 g.UseRegister(this->input_at(node, 2)));
1386 ArmOperandGeneratorT g(selector);
1389 InstructionOperand shift_operand;
1390 OpIndex shift_by = selector->input_at(node, 2);
1392 if (selector->MatchSignedIntegralConstant(shift_by, &unused)) {
1393 shift_operand = g.UseImmediate(shift_by);
1395 shift_operand = g.UseUniqueRegister(shift_by);
1398 InstructionOperand inputs[] = {
1399 g.UseUniqueRegister(selector->input_at(node, 0)),
1400 g.UseUniqueRegister(selector->input_at(node, 1)), shift_operand};
1404 InstructionOperand outputs[2];
1405 InstructionOperand temps[1];
1409 outputs[output_count++] = g.DefineAsRegister(node);
1410 if (projection1.valid()) {
1411 outputs[output_count++] = g.DefineAsRegister(projection1.value());
1413 temps[temp_count++] = g.TempRegister();
1416 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1419void InstructionSelectorT::VisitWord32PairShl(
OpIndex node) {
1423void InstructionSelectorT::VisitWord32PairShr(
OpIndex node) {
1427void InstructionSelectorT::VisitWord32PairSar(
OpIndex node) {
1433void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
1439void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
1441 VisitRR(
this, kArmRbit, node);
1444void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
1448void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
1452void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
1458void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
1459 ArmOperandGeneratorT g(
this);
1460 const WordBinopOp& add = this->
Get(node).Cast<WordBinopOp>();
1466 const WordBinopOp& mul = left.Cast<WordBinopOp>();
1467 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mul.left()),
1468 g.UseRegister(mul.right()), g.UseRegister(add.right()));
1472 const WordBinopOp& mul = left.Cast<WordBinopOp>();
1473 Emit(kArmSmmla, g.DefineAsRegister(node), g.UseRegister(mul.left()),
1474 g.UseRegister(mul.right()), g.UseRegister(add.right()));
1478 const WordBinopOp& bitwise_and = left.Cast<WordBinopOp>();
1482 Emit(kArmUxtab, g.DefineAsRegister(node), g.UseRegister(add.right()),
1483 g.UseRegister(bitwise_and.left()), g.TempImmediate(0));
1485 }
else if (
mask == 0xFFFF) {
1486 Emit(kArmUxtah, g.DefineAsRegister(node), g.UseRegister(add.right()),
1487 g.UseRegister(bitwise_and.left()), g.TempImmediate(0));
1493 if (
CanCover(add.left(), lhs_shift.left()) &&
1495 const ShiftOp& lhs_shift_lhs_shift =
1497 uint32_t sar_by, shl_by;
1500 if (sar_by == 24 && shl_by == 24) {
1501 Emit(kArmSxtab, g.DefineAsRegister(node),
1502 g.UseRegister(add.right()),
1503 g.UseRegister(lhs_shift_lhs_shift.left()), g.TempImmediate(0));
1506 if (sar_by == 16 && shl_by == 16) {
1507 Emit(kArmSxtah, g.DefineAsRegister(node),
1508 g.UseRegister(add.right()),
1509 g.UseRegister(lhs_shift_lhs_shift.left()), g.TempImmediate(0));
1520 const WordBinopOp& mul = right.Cast<WordBinopOp>();
1521 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mul.left()),
1522 g.UseRegister(mul.right()), g.UseRegister(add.left()));
1526 const WordBinopOp& mul = right.Cast<WordBinopOp>();
1527 Emit(kArmSmmla, g.DefineAsRegister(node), g.UseRegister(mul.left()),
1528 g.UseRegister(mul.right()), g.UseRegister(add.left()));
1532 const WordBinopOp& bitwise_and = right.Cast<WordBinopOp>();
1536 Emit(kArmUxtab, g.DefineAsRegister(node), g.UseRegister(add.left()),
1537 g.UseRegister(bitwise_and.left()), g.TempImmediate(0));
1539 }
else if (
mask == 0xFFFF) {
1540 Emit(kArmUxtah, g.DefineAsRegister(node), g.UseRegister(add.left()),
1541 g.UseRegister(bitwise_and.left()), g.TempImmediate(0));
1547 if (
CanCover(add.right(), rhs_shift.left()) &&
1550 uint32_t sar_by, shl_by;
1553 if (sar_by == 24 && shl_by == 24) {
1554 Emit(kArmSxtab, g.DefineAsRegister(node), g.UseRegister(add.left()),
1555 g.UseRegister(rhs_shift_left.left()), g.TempImmediate(0));
1557 }
else if (sar_by == 16 && shl_by == 16) {
1558 Emit(kArmSxtah, g.DefineAsRegister(node), g.UseRegister(add.left()),
1559 g.UseRegister(rhs_shift_left.left()), g.TempImmediate(0));
1569void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
1570 ArmOperandGeneratorT g(
this);
1575 const WordBinopOp& mul = rhs.Cast<WordBinopOp>();
1576 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mul.left()),
1577 g.UseRegister(mul.right()), g.UseRegister(sub.left()));
1585void EmitInt32MulWithOverflow(InstructionSelectorT* selector,
OpIndex node,
1586 FlagsContinuationT* cont) {
1587 ArmOperandGeneratorT g(selector);
1588 OpIndex lhs = selector->input_at(node, 0);
1589 OpIndex rhs = selector->input_at(node, 1);
1590 InstructionOperand result_operand = g.DefineAsRegister(node);
1591 InstructionOperand temp_operand = g.TempRegister();
1592 InstructionOperand outputs[] = {result_operand, temp_operand};
1593 InstructionOperand inputs[] = {g.UseRegister(lhs), g.UseRegister(rhs)};
1594 selector->Emit(kArmSmull, 2, outputs, 2, inputs);
1597 InstructionOperand shift_31 = g.UseImmediate(31);
1600 selector->EmitWithContinuation(opcode, temp_operand, result_operand, shift_31,
1606void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
1607 ArmOperandGeneratorT g(
this);
1614 g.DefineAsRegister(node), g.UseRegister(mul.left()),
1615 g.UseRegister(mul.left()),
1621 g.DefineAsRegister(node), g.UseRegister(mul.left()),
1622 g.UseRegister(mul.left()),
1630void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
1632 ArmOperandGeneratorT g(
this);
1633 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
1634 InstructionOperand
inputs[] = {g.UseRegister(left), g.UseRegister(right)};
1638void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
1639 VisitDiv(
this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1642void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
1643 VisitDiv(
this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1646void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
1647 VisitMod(
this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1650void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
1651 VisitMod(
this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1654#define RR_OP_T_LIST(V) \
1655 V(ChangeInt32ToFloat64, kArmVcvtF64S32) \
1656 V(ChangeUint32ToFloat64, kArmVcvtF64U32) \
1657 V(ChangeFloat32ToFloat64, kArmVcvtF64F32) \
1658 V(ChangeFloat64ToInt32, kArmVcvtS32F64) \
1659 V(ChangeFloat64ToUint32, kArmVcvtU32F64) \
1660 V(RoundInt32ToFloat32, kArmVcvtF32S32) \
1661 V(RoundUint32ToFloat32, kArmVcvtF32U32) \
1662 V(Float64ExtractLowWord32, kArmVmovLowU32F64) \
1663 V(Float64ExtractHighWord32, kArmVmovHighU32F64) \
1664 V(TruncateFloat64ToFloat32, kArmVcvtF32F64) \
1665 V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
1666 V(TruncateFloat64ToUint32, kArmVcvtU32F64) \
1667 V(BitcastFloat32ToInt32, kArmVmovU32F32) \
1668 V(BitcastInt32ToFloat32, kArmVmovF32U32) \
1669 V(RoundFloat64ToInt32, kArmVcvtS32F64) \
1670 V(Float64SilenceNaN, kArmFloat64SilenceNaN) \
1671 V(Float32Abs, kArmVabsF32) \
1672 V(Float64Abs, kArmVabsF64) \
1673 V(Float32Neg, kArmVnegF32) \
1674 V(Float64Neg, kArmVnegF64) \
1675 V(Float32Sqrt, kArmVsqrtF32) \
1676 V(Float64Sqrt, kArmVsqrtF64) \
1677 V(Word32Clz, kArmClz)
1679#define RR_OP_T_LIST_V8(V) \
1680 V(Float32RoundDown, kArmVrintmF32) \
1681 V(Float64RoundDown, kArmVrintmF64) \
1682 V(Float32RoundUp, kArmVrintpF32) \
1683 V(Float64RoundUp, kArmVrintpF64) \
1684 V(Float32RoundTruncate, kArmVrintzF32) \
1685 V(Float64RoundTruncate, kArmVrintzF64) \
1686 V(Float64RoundTiesAway, kArmVrintaF64) \
1687 V(Float32RoundTiesEven, kArmVrintnF32) \
1688 V(Float64RoundTiesEven, kArmVrintnF64) \
1689 IF_WASM(V, F64x2Ceil, kArmF64x2Ceil) \
1690 IF_WASM(V, F64x2Floor, kArmF64x2Floor) \
1691 IF_WASM(V, F64x2Trunc, kArmF64x2Trunc) \
1692 IF_WASM(V, F64x2NearestInt, kArmF64x2NearestInt) \
1693 IF_WASM(V, F32x4Ceil, kArmVrintpF32) \
1694 IF_WASM(V, F32x4Floor, kArmVrintmF32) \
1695 IF_WASM(V, F32x4Trunc, kArmVrintzF32) \
1696 IF_WASM(V, F32x4NearestInt, kArmVrintnF32)
1698#define RRR_OP_T_LIST(V) \
1699 V(Float64Div, kArmVdivF64) \
1700 V(Float32Mul, kArmVmulF32) \
1701 V(Float64Mul, kArmVmulF64) \
1702 V(Float32Div, kArmVdivF32) \
1703 V(Float32Max, kArmFloat32Max) \
1704 V(Float64Max, kArmFloat64Max) \
1705 V(Float32Min, kArmFloat32Min) \
1706 V(Float64Min, kArmFloat64Min) \
1707 V(Int32MulHigh, kArmSmmul)
1709#define RR_VISITOR(Name, opcode) \
1710 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1711 VisitRR(this, opcode, node); \
1717#define RR_VISITOR_V8(Name, opcode) \
1718 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1719 DCHECK(CpuFeatures::IsSupported(ARMv8)); \
1720 VisitRR(this, opcode, node); \
1724#undef RR_OP_T_LIST_V8
1726#define RRR_VISITOR(Name, opcode) \
1727 void InstructionSelectorT::Visit##Name(OpIndex node) { \
1728 VisitRRR(this, opcode, node); \
1734void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(
OpIndex node) {
1738void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(
OpIndex node) {
1742void InstructionSelectorT::VisitFloat32Add(
OpIndex node) {
1743 ArmOperandGeneratorT g(
this);
1747 const FloatBinopOp& mul = lhs.Cast<FloatBinopOp>();
1748 Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(add.right()),
1749 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1754 const FloatBinopOp& mul = rhs.Cast<FloatBinopOp>();
1755 Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(add.left()),
1756 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1762void InstructionSelectorT::VisitFloat64Add(
OpIndex node) {
1763 ArmOperandGeneratorT g(
this);
1767 const FloatBinopOp& mul = lhs.Cast<FloatBinopOp>();
1768 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(add.right()),
1769 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1774 const FloatBinopOp& mul = rhs.Cast<FloatBinopOp>();
1775 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(add.left()),
1776 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1782void InstructionSelectorT::VisitFloat32Sub(
OpIndex node) {
1783 ArmOperandGeneratorT g(
this);
1787 const FloatBinopOp& mul = rhs.Cast<FloatBinopOp>();
1788 Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(sub.left()),
1789 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1795void InstructionSelectorT::VisitFloat64Sub(
OpIndex node) {
1796 ArmOperandGeneratorT g(
this);
1800 const FloatBinopOp& mul = rhs.Cast<FloatBinopOp>();
1801 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(sub.left()),
1802 g.UseRegister(mul.left()), g.UseRegister(mul.right()));
1808void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
1809 ArmOperandGeneratorT g(
this);
1810 Emit(kArmVmodF64, g.DefineAsFixed(node, d0),
1811 g.UseFixed(this->input_at(node, 0), d0),
1812 g.UseFixed(this->input_at(node, 1), d1))
1820 g.
UseFixed(this->input_at(node, 0), d0),
1821 g.
UseFixed(this->input_at(node, 1), d1))
1829 g.
UseFixed(this->input_at(node, 0), d0))
1847 0,
nullptr, 0,
nullptr);
1850 for (
size_t n = 0; n < arguments->size(); ++
n) {
1852 if (input.node.valid()) {
1853 int slot =
static_cast<int>(
n);
1860 int stack_decrement = 0;
1864 if (!input.node.valid())
continue;
1866 stack_decrement = 0;
1878 if (!output.location.IsCallerFrameSlot())
continue;
1880 if (output.node.valid()) {
1890 int reverse_slot = -output.location.GetLocation() -
offset;
1910 FlagsContinuationT* cont) {
1911 ArmOperandGeneratorT g(selector);
1913 if (selector->MatchZero(cmp.
right())) {
1915 g.UseImmediate(cmp.
right()), cont);
1916 }
else if (selector->MatchZero(cmp.
left())) {
1919 g.UseImmediate(cmp.
left()), cont);
1922 g.UseRegister(cmp.
right()), cont);
1928 FlagsContinuationT* cont) {
1929 ArmOperandGeneratorT g(selector);
1931 if (selector->MatchZero(op.right())) {
1932 VisitCompare(selector, kArmVcmpF64, g.UseRegister(op.left()),
1933 g.UseImmediate(op.right()), cont);
1934 }
else if (selector->MatchZero(op.left())) {
1936 VisitCompare(selector, kArmVcmpF64, g.UseRegister(op.right()),
1937 g.UseImmediate(op.left()), cont);
1939 VisitCompare(selector, kArmVcmpF64, g.UseRegister(op.left()),
1940 g.UseRegister(op.right()), cont);
1972 DCHECK(CanUseFlagSettingBinop(cond));
1996void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelectorT* selector,
2000 FlagsContinuationT* cont) {
2003 const Operation& op = selector->Get(binop);
2005 binop_opcode = kArmAdd;
2006 no_output_opcode = kArmCmn;
2008 binop_opcode = kArmAnd;
2009 no_output_opcode = kArmTst;
2011 binop_opcode = kArmOrr;
2012 no_output_opcode = kArmOrr;
2014 binop_opcode = kArmEor;
2015 no_output_opcode = kArmTeq;
2018 if (selector->CanCover(*node, binop)) {
2020 cont->Overwrite(MapForFlagSettingBinop(cond));
2021 *opcode = no_output_opcode;
2023 }
else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
2027 cont->Overwrite(MapForFlagSettingBinop(cond));
2028 *opcode = binop_opcode;
2036 ArmOperandGeneratorT g(selector);
2037 OpIndex lhs = selector->input_at(node, 0);
2038 OpIndex rhs = selector->input_at(node, 1);
2039 InstructionOperand inputs[3];
2041 InstructionOperand outputs[2];
2042 size_t output_count = 0;
2043 bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
2044 (opcode != kArmTst) && (opcode != kArmTeq);
2046 if (TryMatchImmediateOrShift(selector, &opcode, rhs, &input_count,
2048 inputs[0] = g.UseRegister(lhs);
2050 }
else if (TryMatchImmediateOrShift(selector, &opcode, lhs, &input_count,
2052 const Operation& op = selector->Get(node);
2053 if (
const ComparisonOp* cmp = op.TryCast<ComparisonOp>()) {
2055 }
else if (
const WordBinopOp* binop = op.TryCast<WordBinopOp>()) {
2060 inputs[0] = g.UseRegister(rhs);
2069 if (cont->IsDeoptimize()) {
2074 outputs[output_count++] = g.DefineSameAsFirst(node);
2076 outputs[output_count++] = g.DefineAsRegister(node);
2084 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
2089 FlagsContinuationT* cont) {
2091 const ComparisonOp& comparison =
2093 const Operation& lhs = selector->Get(comparison.left());
2094 const Operation& rhs = selector->Get(comparison.right());
2097 if (selector->MatchIntegralZero(comparison.right()) &&
2102 if (CanUseFlagSettingBinop(cond)) {
2103 MaybeReplaceCmpZeroWithFlagSettingBinop(
2104 selector, &node, comparison.left(), &opcode, cond, cont);
2106 }
else if (selector->MatchIntegralZero(comparison.left()) &&
2114 if (CanUseFlagSettingBinop(cond)) {
2115 MaybeReplaceCmpZeroWithFlagSettingBinop(
2116 selector, &node, comparison.right(), &opcode, cond, cont);
2133 switch (comparison->rep.MapTaggedToWord().value()) {
2139 switch (comparison->kind) {
2140 case ComparisonOp::Kind::kEqual:
2143 case ComparisonOp::Kind::kSignedLessThan:
2146 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2153 switch (comparison->kind) {
2154 case ComparisonOp::Kind::kEqual:
2157 case ComparisonOp::Kind::kSignedLessThan:
2160 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2173 if (projection->index == 1u) {
2179 OpIndex node = projection->input();
2184 switch (binop->kind) {
2185 case OverflowCheckedBinopOp::Kind::kSignedAdd:
2187 return VisitBinop(
this, node, kArmAdd, kArmAdd, cont);
2188 case OverflowCheckedBinopOp::Kind::kSignedSub:
2190 return VisitBinop(
this, node, kArmSub, kArmRsb, cont);
2191 case OverflowCheckedBinopOp::Kind::kSignedMul:
2197 return EmitInt32MulWithOverflow(
this, node, cont);
2208 return VisitBinop(
this, value, kArmOrr, kArmOrr, cont);
2212 return VisitShift(
this, value, TryMatchASR, cont);
2214 return VisitShift(
this, value, TryMatchLSL, cont);
2216 return VisitShift(
this, value, TryMatchLSR, cont);
2218 return VisitShift(
this, value, TryMatchROR, cont);
2245 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2247 size_t table_time_cost = 3;
2248 size_t lookup_space_cost = 3 + 2 * sw.
case_count();
2251 table_space_cost + 3 * table_time_cost <=
2252 lookup_space_cost + 3 * lookup_time_cost &&
2253 sw.
min_value() > std::numeric_limits<int32_t>::min() &&
2270void InstructionSelectorT::VisitWord32Equal(
OpIndex node) {
2279void InstructionSelectorT::VisitInt32LessThan(
OpIndex node) {
2284void InstructionSelectorT::VisitInt32LessThanOrEqual(
OpIndex node) {
2290void InstructionSelectorT::VisitUint32LessThan(
OpIndex node) {
2295void InstructionSelectorT::VisitUint32LessThanOrEqual(
OpIndex node) {
2301void InstructionSelectorT::VisitInt32AddWithOverflow(
OpIndex node) {
2305 return VisitBinop(
this, node, kArmAdd, kArmAdd, &cont);
2308 VisitBinop(
this, node, kArmAdd, kArmAdd, &cont);
2311void InstructionSelectorT::VisitInt32SubWithOverflow(
OpIndex node) {
2315 return VisitBinop(
this, node, kArmSub, kArmRsb, &cont);
2318 VisitBinop(
this, node, kArmSub, kArmRsb, &cont);
2321void InstructionSelectorT::VisitInt32MulWithOverflow(
OpIndex node) {
2329 return EmitInt32MulWithOverflow(
this, node, &cont);
2332 EmitInt32MulWithOverflow(
this, node, &cont);
2335void InstructionSelectorT::VisitFloat32Equal(
OpIndex node) {
2340void InstructionSelectorT::VisitFloat32LessThan(
OpIndex node) {
2345void InstructionSelectorT::VisitFloat32LessThanOrEqual(
OpIndex node) {
2351void InstructionSelectorT::VisitFloat64Equal(
OpIndex node) {
2356void InstructionSelectorT::VisitFloat64LessThan(
OpIndex node) {
2361void InstructionSelectorT::VisitFloat64LessThanOrEqual(
OpIndex node) {
2367void InstructionSelectorT::VisitFloat64InsertLowWord32(
OpIndex node) {
2371void InstructionSelectorT::VisitFloat64InsertHighWord32(
OpIndex node) {
2384void InstructionSelectorT::VisitMemoryBarrier(
OpIndex node) {
2387 Emit(kArmDmbIsh, g.NoOutput());
2390void InstructionSelectorT::VisitWord32AtomicLoad(
OpIndex node) {
2394 ArmOperandGeneratorT g(
this);
2400 switch (load_rep.representation()) {
2402 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2405 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2411 opcode = kAtomicLoadWord32;
2417 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2420void InstructionSelectorT::VisitWord32AtomicStore(
OpIndex node) {
2422 AtomicStoreParameters store_params(store.stored_rep().representation(),
2423 store.stored_rep().write_barrier_kind(),
2424 store.memory_order().value(),
2425 store.access_kind());
2427 store_params.order());
2430void InstructionSelectorT::VisitWord32AtomicExchange(
OpIndex node) {
2431 ArmOperandGeneratorT g(
this);
2435 opcode = kAtomicExchangeInt8;
2437 opcode = kAtomicExchangeUint8;
2439 opcode = kAtomicExchangeInt16;
2441 opcode = kAtomicExchangeUint16;
2444 opcode = kAtomicExchangeWord32;
2450 InstructionOperand
inputs[3];
2455 InstructionOperand outputs[1];
2456 outputs[0] = g.DefineAsRegister(node);
2457 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2462void InstructionSelectorT::VisitWord32AtomicCompareExchange(
OpIndex node) {
2463 ArmOperandGeneratorT g(
this);
2466 OpIndex index = atomic_op.index();
2467 OpIndex old_value = atomic_op.expected().value();
2468 OpIndex new_value = atomic_op.value();
2471 opcode = kAtomicCompareExchangeInt8;
2473 opcode = kAtomicCompareExchangeUint8;
2475 opcode = kAtomicCompareExchangeInt16;
2477 opcode = kAtomicCompareExchangeUint16;
2480 opcode = kAtomicCompareExchangeWord32;
2486 InstructionOperand
inputs[4];
2492 InstructionOperand outputs[1];
2493 outputs[0] = g.DefineAsRegister(node);
2494 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
2523 size_t input_count = 0;
2535#define VISIT_ATOMIC_BINOP(op) \
2536 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2537 VisitWord32AtomicBinaryOperation( \
2538 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2539 kAtomic##op##Uint16, kAtomic##op##Word32); \
2546#undef VISIT_ATOMIC_BINOP
2548void InstructionSelectorT::VisitWord32AtomicPairLoad(
OpIndex node) {
2549 ArmOperandGeneratorT g(
this);
2552 InstructionOperand
inputs[3];
2553 size_t input_count = 0;
2554 inputs[input_count++] = g.UseUniqueRegister(
base);
2555 inputs[input_count++] = g.UseUniqueRegister(index);
2556 InstructionOperand temps[1];
2557 size_t temp_count = 0;
2558 InstructionOperand outputs[2];
2559 size_t output_count = 0;
2563 if (projection0.valid() && projection1.valid()) {
2564 outputs[output_count++] = g.DefineAsFixed(projection0.value(), r0);
2565 outputs[output_count++] = g.DefineAsFixed(projection1.value(), r1);
2566 temps[temp_count++] = g.TempRegister();
2567 }
else if (projection0.valid()) {
2568 inputs[input_count++] = g.UseImmediate(0);
2569 outputs[output_count++] = g.DefineAsRegister(projection0.value());
2570 }
else if (projection1.valid()) {
2571 inputs[input_count++] = g.UseImmediate(4);
2572 temps[temp_count++] = g.TempRegister();
2573 outputs[output_count++] = g.DefineAsRegister(projection1.value());
2578 Emit(kArmWord32AtomicPairLoad, output_count, outputs, input_count,
inputs,
2582void InstructionSelectorT::VisitWord32AtomicPairStore(
OpIndex node) {
2583 ArmOperandGeneratorT g(
this);
2586 InstructionOperand
inputs[] = {g.UseUniqueRegister(store.base()),
2587 g.UseUniqueRegister(store.index().value()),
2588 g.UseFixed(store.value_low().value(), r2),
2589 g.UseFixed(store.value_high().value(), r3)};
2590 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
2591 g.TempRegister(r1)};
2597void InstructionSelectorT::VisitWord32AtomicPairAdd(
OpIndex node) {
2598 VisitPairAtomicBinOp(
this, node, kArmWord32AtomicPairAdd);
2601void InstructionSelectorT::VisitWord32AtomicPairSub(
OpIndex node) {
2602 VisitPairAtomicBinOp(
this, node, kArmWord32AtomicPairSub);
2605void InstructionSelectorT::VisitWord32AtomicPairAnd(
OpIndex node) {
2606 VisitPairAtomicBinOp(
this, node, kArmWord32AtomicPairAnd);
2609void InstructionSelectorT::VisitWord32AtomicPairOr(
OpIndex node) {
2610 VisitPairAtomicBinOp(
this, node, kArmWord32AtomicPairOr);
2613void InstructionSelectorT::VisitWord32AtomicPairXor(
OpIndex node) {
2614 VisitPairAtomicBinOp(
this, node, kArmWord32AtomicPairXor);
2617void InstructionSelectorT::VisitWord32AtomicPairExchange(
OpIndex node) {
2618 ArmOperandGeneratorT g(
this);
2624 InstructionOperand
inputs[] = {
2625 g.UseFixed(value, r0), g.UseFixed(value_high, r1),
2626 g.UseUniqueRegister(base), g.UseUniqueRegister(index)};
2631 InstructionOperand outputs[2];
2632 size_t output_count = 0;
2633 InstructionOperand temps[4];
2634 size_t temp_count = 0;
2635 temps[temp_count++] = g.TempRegister();
2636 temps[temp_count++] = g.TempRegister();
2637 if (projection0.valid()) {
2638 outputs[output_count++] = g.DefineAsFixed(projection0.value(), r6);
2640 temps[temp_count++] = g.TempRegister(r6);
2642 if (projection1.valid()) {
2643 outputs[output_count++] = g.DefineAsFixed(projection1.value(), r7);
2645 temps[temp_count++] = g.TempRegister(r7);
2651void InstructionSelectorT::VisitWord32AtomicPairCompareExchange(
OpIndex node) {
2652 ArmOperandGeneratorT g(
this);
2655 const size_t expected_offset = 4;
2656 const size_t value_offset = 2;
2657 InstructionOperand
inputs[] = {
2658 g.UseFixed(this->
input_at(node, expected_offset), r4),
2659 g.UseFixed(this->
input_at(node, expected_offset + 1), r5),
2660 g.UseFixed(this->
input_at(node, value_offset), r8),
2661 g.UseFixed(this->
input_at(node, value_offset + 1), r9),
2662 g.UseUniqueRegister(this->
input_at(node, 0)),
2663 g.UseUniqueRegister(this->
input_at(node, 1))};
2668 InstructionOperand outputs[2];
2669 size_t output_count = 0;
2670 InstructionOperand temps[4];
2671 size_t temp_count = 0;
2672 temps[temp_count++] = g.TempRegister();
2673 temps[temp_count++] = g.TempRegister();
2674 if (projection0.valid()) {
2675 outputs[output_count++] = g.DefineAsFixed(projection0.value(), r2);
2677 temps[temp_count++] = g.TempRegister(r2);
2679 if (projection1.valid()) {
2680 outputs[output_count++] = g.DefineAsFixed(projection1.value(), r3);
2682 temps[temp_count++] = g.TempRegister(r3);
2688#define SIMD_UNOP_LIST(V) \
2689 V(F64x2Abs, kArmF64x2Abs) \
2690 V(F64x2Neg, kArmF64x2Neg) \
2691 V(F64x2Sqrt, kArmF64x2Sqrt) \
2692 V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
2693 V(F32x4UConvertI32x4, kArmF32x4UConvertI32x4) \
2694 V(F32x4Abs, kArmF32x4Abs) \
2695 V(F32x4Neg, kArmF32x4Neg) \
2696 V(I64x2Abs, kArmI64x2Abs) \
2697 V(I64x2SConvertI32x4Low, kArmI64x2SConvertI32x4Low) \
2698 V(I64x2SConvertI32x4High, kArmI64x2SConvertI32x4High) \
2699 V(I64x2UConvertI32x4Low, kArmI64x2UConvertI32x4Low) \
2700 V(I64x2UConvertI32x4High, kArmI64x2UConvertI32x4High) \
2701 V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4) \
2702 V(I32x4RelaxedTruncF32x4S, kArmI32x4SConvertF32x4) \
2703 V(I32x4SConvertI16x8Low, kArmI32x4SConvertI16x8Low) \
2704 V(I32x4SConvertI16x8High, kArmI32x4SConvertI16x8High) \
2705 V(I32x4Neg, kArmI32x4Neg) \
2706 V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \
2707 V(I32x4RelaxedTruncF32x4U, kArmI32x4UConvertF32x4) \
2708 V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \
2709 V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \
2710 V(I32x4Abs, kArmI32x4Abs) \
2711 V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low) \
2712 V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \
2713 V(I16x8Neg, kArmI16x8Neg) \
2714 V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low) \
2715 V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
2716 V(I16x8Abs, kArmI16x8Abs) \
2717 V(I8x16Neg, kArmI8x16Neg) \
2718 V(I8x16Abs, kArmI8x16Abs) \
2719 V(I8x16Popcnt, kArmVcnt) \
2720 V(S128Not, kArmS128Not) \
2721 V(I64x2AllTrue, kArmI64x2AllTrue) \
2722 V(I32x4AllTrue, kArmI32x4AllTrue) \
2723 V(I16x8AllTrue, kArmI16x8AllTrue) \
2724 V(V128AnyTrue, kArmV128AnyTrue) \
2725 V(I8x16AllTrue, kArmI8x16AllTrue)
2727#define SIMD_SHIFT_OP_LIST(V) \
2741#define SIMD_BINOP_LIST(V) \
2742 V(F64x2Add, kArmF64x2Add) \
2743 V(F64x2Sub, kArmF64x2Sub) \
2744 V(F64x2Mul, kArmF64x2Mul) \
2745 V(F64x2Div, kArmF64x2Div) \
2746 V(F64x2Min, kArmF64x2Min) \
2747 V(F64x2Max, kArmF64x2Max) \
2748 V(F64x2Eq, kArmF64x2Eq) \
2749 V(F64x2Ne, kArmF64x2Ne) \
2750 V(F64x2Lt, kArmF64x2Lt) \
2751 V(F64x2Le, kArmF64x2Le) \
2752 V(F32x4Add, kArmF32x4Add) \
2753 V(F32x4Sub, kArmF32x4Sub) \
2754 V(F32x4Mul, kArmF32x4Mul) \
2755 V(F32x4Min, kArmF32x4Min) \
2756 V(F32x4RelaxedMin, kArmF32x4Min) \
2757 V(F32x4Max, kArmF32x4Max) \
2758 V(F32x4RelaxedMax, kArmF32x4Max) \
2759 V(F32x4Eq, kArmF32x4Eq) \
2760 V(F32x4Ne, kArmF32x4Ne) \
2761 V(F32x4Lt, kArmF32x4Lt) \
2762 V(F32x4Le, kArmF32x4Le) \
2763 V(I64x2Add, kArmI64x2Add) \
2764 V(I64x2Sub, kArmI64x2Sub) \
2765 V(I32x4Sub, kArmI32x4Sub) \
2766 V(I32x4Mul, kArmI32x4Mul) \
2767 V(I32x4MinS, kArmI32x4MinS) \
2768 V(I32x4MaxS, kArmI32x4MaxS) \
2769 V(I32x4Eq, kArmI32x4Eq) \
2770 V(I64x2Eq, kArmI64x2Eq) \
2771 V(I64x2Ne, kArmI64x2Ne) \
2772 V(I64x2GtS, kArmI64x2GtS) \
2773 V(I64x2GeS, kArmI64x2GeS) \
2774 V(I32x4Ne, kArmI32x4Ne) \
2775 V(I32x4GtS, kArmI32x4GtS) \
2776 V(I32x4GeS, kArmI32x4GeS) \
2777 V(I32x4MinU, kArmI32x4MinU) \
2778 V(I32x4MaxU, kArmI32x4MaxU) \
2779 V(I32x4GtU, kArmI32x4GtU) \
2780 V(I32x4GeU, kArmI32x4GeU) \
2781 V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
2782 V(I16x8AddSatS, kArmI16x8AddSatS) \
2783 V(I16x8Sub, kArmI16x8Sub) \
2784 V(I16x8SubSatS, kArmI16x8SubSatS) \
2785 V(I16x8Mul, kArmI16x8Mul) \
2786 V(I16x8MinS, kArmI16x8MinS) \
2787 V(I16x8MaxS, kArmI16x8MaxS) \
2788 V(I16x8Eq, kArmI16x8Eq) \
2789 V(I16x8Ne, kArmI16x8Ne) \
2790 V(I16x8GtS, kArmI16x8GtS) \
2791 V(I16x8GeS, kArmI16x8GeS) \
2792 V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
2793 V(I16x8AddSatU, kArmI16x8AddSatU) \
2794 V(I16x8SubSatU, kArmI16x8SubSatU) \
2795 V(I16x8MinU, kArmI16x8MinU) \
2796 V(I16x8MaxU, kArmI16x8MaxU) \
2797 V(I16x8GtU, kArmI16x8GtU) \
2798 V(I16x8GeU, kArmI16x8GeU) \
2799 V(I16x8RoundingAverageU, kArmI16x8RoundingAverageU) \
2800 V(I16x8Q15MulRSatS, kArmI16x8Q15MulRSatS) \
2801 V(I16x8RelaxedQ15MulRS, kArmI16x8Q15MulRSatS) \
2802 V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
2803 V(I8x16Add, kArmI8x16Add) \
2804 V(I8x16AddSatS, kArmI8x16AddSatS) \
2805 V(I8x16Sub, kArmI8x16Sub) \
2806 V(I8x16SubSatS, kArmI8x16SubSatS) \
2807 V(I8x16MinS, kArmI8x16MinS) \
2808 V(I8x16MaxS, kArmI8x16MaxS) \
2809 V(I8x16Eq, kArmI8x16Eq) \
2810 V(I8x16Ne, kArmI8x16Ne) \
2811 V(I8x16GtS, kArmI8x16GtS) \
2812 V(I8x16GeS, kArmI8x16GeS) \
2813 V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
2814 V(I8x16AddSatU, kArmI8x16AddSatU) \
2815 V(I8x16SubSatU, kArmI8x16SubSatU) \
2816 V(I8x16MinU, kArmI8x16MinU) \
2817 V(I8x16MaxU, kArmI8x16MaxU) \
2818 V(I8x16GtU, kArmI8x16GtU) \
2819 V(I8x16GeU, kArmI8x16GeU) \
2820 V(I8x16RoundingAverageU, kArmI8x16RoundingAverageU) \
2821 V(S128And, kArmS128And) \
2822 V(S128Or, kArmS128Or) \
2823 V(S128Xor, kArmS128Xor) \
2824 V(S128AndNot, kArmS128AndNot)
2826#if V8_ENABLE_WEBASSEMBLY
2827void InstructionSelectorT::VisitI32x4DotI16x8S(
OpIndex node) {
2828 ArmOperandGeneratorT g(
this);
2829 Emit(kArmI32x4DotI16x8S, g.DefineAsRegister(node),
2830 g.UseUniqueRegister(this->input_at(node, 0)),
2831 g.UseUniqueRegister(this->input_at(node, 1)));
2834void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(
OpIndex node) {
2835 ArmOperandGeneratorT g(
this);
2836 Emit(kArmI16x8DotI8x16S, g.DefineAsRegister(node),
2837 g.UseUniqueRegister(this->input_at(node, 0)),
2838 g.UseUniqueRegister(this->input_at(node, 1)));
2841void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(
OpIndex node) {
2842 ArmOperandGeneratorT g(
this);
2843 InstructionOperand temps[] = {g.TempSimd128Register()};
2844 Emit(kArmI32x4DotI8x16AddS, g.DefineSameAsInput(node, 2),
2845 g.UseUniqueRegister(this->input_at(node, 0)),
2846 g.UseUniqueRegister(this->input_at(node, 1)),
2847 g.UseUniqueRegister(this->input_at(node, 2)),
arraysize(temps), temps);
2850void InstructionSelectorT::VisitS128Const(
OpIndex node) {
2851 ArmOperandGeneratorT g(
this);
2853 const Simd128ConstantOp& constant =
2857 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2858 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2859 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2860 InstructionOperand dst = g.DefineAsRegister(node);
2862 Emit(kArmS128Zero, dst);
2863 }
else if (all_ones) {
2864 Emit(kArmS128AllOnes, dst);
2866 Emit(kArmS128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
2867 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
2871void InstructionSelectorT::VisitS128Zero(
OpIndex node) {
2872 ArmOperandGeneratorT g(
this);
2873 Emit(kArmS128Zero, g.DefineAsRegister(node));
2876void InstructionSelectorT::VisitF64x2Splat(
OpIndex node) {
2877 VisitRR(
this, kArmF64x2Splat, node);
2879void InstructionSelectorT::VisitF32x4Splat(
OpIndex node) {
2880 VisitRR(
this, kArmF32x4Splat, node);
2883void InstructionSelectorT::VisitI32x4Splat(
OpIndex node) {
2884 VisitRR(
this, kArmI32x4Splat, node);
2886void InstructionSelectorT::VisitI16x8Splat(
OpIndex node) {
2887 VisitRR(
this, kArmI16x8Splat, node);
2891void InstructionSelectorT::VisitI8x16Splat(
OpIndex node) {
2892 VisitRR(
this, kArmI8x16Splat, node);
2895#if V8_ENABLE_WEBASSEMBLY
2896#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2897 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2898 VisitRRI(this, kArm##Type##ExtractLane##Sign, node); \
2907#undef SIMD_VISIT_EXTRACT_LANE
2909void InstructionSelectorT::VisitF16x8ExtractLane(
OpIndex node) {
2913void InstructionSelectorT::VisitF64x2ReplaceLane(
OpIndex node) {
2914 VisitRRIR(
this, kArmF64x2ReplaceLane, node);
2916void InstructionSelectorT::VisitF32x4ReplaceLane(
OpIndex node) {
2917 VisitRRIR(
this, kArmF32x4ReplaceLane, node);
2919void InstructionSelectorT::VisitF16x8ReplaceLane(
OpIndex node) {
2922void InstructionSelectorT::VisitI32x4ReplaceLane(
OpIndex node) {
2923 VisitRRIR(
this, kArmI32x4ReplaceLane, node);
2925void InstructionSelectorT::VisitI16x8ReplaceLane(
OpIndex node) {
2926 VisitRRIR(
this, kArmI16x8ReplaceLane, node);
2928void InstructionSelectorT::VisitI8x16ReplaceLane(
OpIndex node) {
2929 VisitRRIR(
this, kArmI8x16ReplaceLane, node);
2932#define SIMD_VISIT_UNOP(Name, instruction) \
2933 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2934 VisitRR(this, instruction, node); \
2937#undef SIMD_VISIT_UNOP
2938#undef SIMD_UNOP_LIST
2940#define UNIMPLEMENTED_SIMD_UNOP_LIST(V) \
2949#define SIMD_VISIT_UNIMPL_UNOP(Name) \
2950 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2952UNIMPLEMENTED_SIMD_UNOP_LIST(SIMD_VISIT_UNIMPL_UNOP)
2953#undef SIMD_VISIT_UNIMPL_UNOP
2954#undef UNIMPLEMENTED_SIMD_UNOP_LIST
2956#define UNIMPLEMENTED_SIMD_CVTOP_LIST(V) \
2957 V(F16x8SConvertI16x8) \
2958 V(F16x8UConvertI16x8) \
2959 V(I16x8SConvertF16x8) \
2960 V(I16x8UConvertF16x8) \
2961 V(F32x4PromoteLowF16x8) \
2962 V(F16x8DemoteF32x4Zero) \
2963 V(F16x8DemoteF64x2Zero)
2965#define SIMD_VISIT_UNIMPL_CVTOP(Name) \
2966 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2968UNIMPLEMENTED_SIMD_CVTOP_LIST(SIMD_VISIT_UNIMPL_CVTOP)
2969#undef SIMD_VISIT_UNIMPL_CVTOP
2970#undef UNIMPLEMENTED_SIMD_CVTOP_LIST
2972#define SIMD_VISIT_SHIFT_OP(Name, width) \
2973 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2974 VisitSimdShiftRRR(this, kArm##Name, node, width); \
2977#undef SIMD_VISIT_SHIFT_OP
2978#undef SIMD_SHIFT_OP_LIST
2980#define SIMD_VISIT_BINOP(Name, instruction) \
2981 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2982 VisitRRR(this, instruction, node); \
2985#undef SIMD_VISIT_BINOP
2986#undef SIMD_BINOP_LIST
2988#define UNIMPLEMENTED_SIMD_BINOP_LIST(V) \
3002#define SIMD_VISIT_UNIMPL_BINOP(Name) \
3003 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
3005UNIMPLEMENTED_SIMD_BINOP_LIST(SIMD_VISIT_UNIMPL_BINOP)
3006#undef SIMD_VISIT_UNIMPL_BINOP
3007#undef UNIMPLEMENTED_SIMD_BINOP_LIST
3011#define VISIT_SIMD_ADD(Type, PairwiseType, NeonWidth) \
3012 void InstructionSelectorT::Visit##Type##Add(OpIndex node) { \
3013 ArmOperandGeneratorT g(this); \
3014 const Simd128BinopOp& add_op = Get(node).Cast<Simd128BinopOp>(); \
3015 const Operation& left = Get(add_op.left()); \
3016 const Operation& right = Get(add_op.right()); \
3017 if (left.Is<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##S>() && \
3018 CanCover(node, add_op.left())) { \
3019 Emit(kArmVpadal | MiscField::encode(NeonS##NeonWidth), \
3020 g.DefineSameAsFirst(node), g.UseRegister(add_op.right()), \
3021 g.UseRegister(left.input(0))); \
3024 if (left.Is<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##U>() && \
3025 CanCover(node, add_op.left())) { \
3026 Emit(kArmVpadal | MiscField::encode(NeonU##NeonWidth), \
3027 g.DefineSameAsFirst(node), g.UseRegister(add_op.right()), \
3028 g.UseRegister(left.input(0))); \
3031 if (right.Is<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##S>() && \
3032 CanCover(node, add_op.right())) { \
3033 Emit(kArmVpadal | MiscField::encode(NeonS##NeonWidth), \
3034 g.DefineSameAsFirst(node), g.UseRegister(add_op.left()), \
3035 g.UseRegister(right.input(0))); \
3038 if (right.Is<Opmask::kSimd128##Type##ExtAddPairwise##PairwiseType##U>() && \
3039 CanCover(node, add_op.right())) { \
3040 Emit(kArmVpadal | MiscField::encode(NeonU##NeonWidth), \
3041 g.DefineSameAsFirst(node), g.UseRegister(add_op.left()), \
3042 g.UseRegister(right.input(0))); \
3045 VisitRRR(this, kArm##Type##Add, node); \
3048VISIT_SIMD_ADD(I16x8, I8x16, 8)
3049VISIT_SIMD_ADD(I32x4, I16x8, 16)
3050#undef VISIT_SIMD_ADD
3052void InstructionSelectorT::VisitI64x2SplatI32Pair(
OpIndex node) {
3057void InstructionSelectorT::VisitI64x2ReplaceLaneI32Pair(
OpIndex node) {
3062void InstructionSelectorT::VisitI64x2Neg(
OpIndex node) {
3063 ArmOperandGeneratorT g(
this);
3064 Emit(kArmI64x2Neg, g.DefineAsRegister(node),
3065 g.UseUniqueRegister(this->input_at(node, 0)));
3068void InstructionSelectorT::VisitI64x2Mul(
OpIndex node) {
3069 ArmOperandGeneratorT g(
this);
3070 InstructionOperand temps[] = {g.TempSimd128Register()};
3071 Emit(kArmI64x2Mul, g.DefineAsRegister(node),
3072 g.UseUniqueRegister(this->input_at(node, 0)),
3073 g.UseUniqueRegister(this->input_at(node, 1)),
arraysize(temps), temps);
3076void InstructionSelectorT::VisitF32x4Sqrt(
OpIndex node) {
3077 ArmOperandGeneratorT g(
this);
3080 Emit(kArmF32x4Sqrt, g.DefineAsFixed(node, q0),
3081 g.UseFixed(this->input_at(node, 0), q0));
3084void InstructionSelectorT::VisitF32x4Div(
OpIndex node) {
3085 ArmOperandGeneratorT g(
this);
3088 Emit(kArmF32x4Div, g.DefineAsFixed(node, q0),
3089 g.UseFixed(this->input_at(node, 0), q0),
3090 g.UseFixed(this->input_at(node, 1), q1));
3093void InstructionSelectorT::VisitS128Select(
OpIndex node) {
3094 ArmOperandGeneratorT g(
this);
3095 Emit(kArmS128Select, g.DefineSameAsFirst(node),
3096 g.UseRegister(this->input_at(node, 0)),
3097 g.UseRegister(this->input_at(node, 1)),
3098 g.UseRegister(this->input_at(node, 2)));
3101void InstructionSelectorT::VisitI8x16RelaxedLaneSelect(
OpIndex node) {
3102 VisitS128Select(node);
3105void InstructionSelectorT::VisitI16x8RelaxedLaneSelect(
OpIndex node) {
3106 VisitS128Select(node);
3109void InstructionSelectorT::VisitI32x4RelaxedLaneSelect(
OpIndex node) {
3110 VisitS128Select(node);
3113void InstructionSelectorT::VisitI64x2RelaxedLaneSelect(
OpIndex node) {
3114 VisitS128Select(node);
3117#define VISIT_SIMD_QFMOP(op) \
3118 void InstructionSelectorT::Visit##op(OpIndex node) { \
3119 ArmOperandGeneratorT g(this); \
3120 Emit(kArm##op, g.DefineAsRegister(node), \
3121 g.UseUniqueRegister(this->input_at(node, 0)), \
3122 g.UseUniqueRegister(this->input_at(node, 1)), \
3123 g.UseUniqueRegister(this->input_at(node, 2))); \
3129#undef VISIT_SIMD_QFMOP
3136struct ShuffleEntry {
3141static const ShuffleEntry arch_shuffles[] = {
3142 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3144 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3146 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3147 kArmS32x4UnzipLeft},
3148 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3149 kArmS32x4UnzipRight},
3150 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3151 kArmS32x4TransposeLeft},
3152 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
3153 kArmS32x4TransposeRight},
3154 {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, kArmS32x2Reverse},
3156 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3158 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3160 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3161 kArmS16x8UnzipLeft},
3162 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3163 kArmS16x8UnzipRight},
3164 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3165 kArmS16x8TransposeLeft},
3166 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3167 kArmS16x8TransposeRight},
3168 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kArmS16x4Reverse},
3169 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kArmS16x2Reverse},
3171 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3173 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3175 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3176 kArmS8x16UnzipLeft},
3177 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3178 kArmS8x16UnzipRight},
3179 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3180 kArmS8x16TransposeLeft},
3181 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3182 kArmS8x16TransposeRight},
3183 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArmS8x8Reverse},
3184 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
3185 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
3187bool TryMatchArchShuffle(
const uint8_t* shuffle,
const ShuffleEntry* table,
3188 size_t num_entries,
bool is_swizzle,
3191 for (
size_t i = 0;
i < num_entries; ++
i) {
3192 const ShuffleEntry& entry = table[
i];
3195 if ((entry.shuffle[j] &
mask) != (shuffle[j] &
mask)) {
3200 *opcode = entry.opcode;
3207void ArrangeShuffleTable(ArmOperandGeneratorT* g,
OpIndex input0,
3208 OpIndex input1, InstructionOperand* src0,
3209 InstructionOperand* src1) {
3210 if (input0 == input1) {
3212 *src0 = *src1 = g->UseRegister(input0);
3215 *src0 = g->UseFixed(input0, q0);
3216 *src1 = g->UseFixed(input1, q1);
3222void InstructionSelectorT::VisitI8x16Shuffle(
OpIndex node) {
3227 auto view = this->simd_shuffle_view(node);
3228 CanonicalizeShuffle(view, shuffle, &is_swizzle);
3229 OpIndex input0 = view.input(0);
3230 OpIndex input1 = view.input(1);
3231 uint8_t shuffle32x4[4];
3232 ArmOperandGeneratorT g(
this);
3237 Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3238 g.UseImmediate(
Neon32), g.UseImmediate(index % 4));
3248 InstructionOperand src0 = g.UseUniqueRegister(input0);
3249 InstructionOperand src1 = is_swizzle ? src0 : g.UseUniqueRegister(input1);
3250 Emit(kArmS32x4Shuffle, g.DefineAsRegister(node), src0, src1,
3257 Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3258 g.UseImmediate(
Neon16), g.UseImmediate(index % 8));
3263 Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3264 g.UseImmediate(
Neon8), g.UseImmediate(index % 16));
3268 if (TryMatchArchShuffle(shuffle, arch_shuffles,
arraysize(arch_shuffles),
3270 VisitRRRShuffle(
this,
opcode, node, input0, input1);
3275 Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
3276 g.UseRegister(input1), g.UseImmediate(
offset));
3280 InstructionOperand src0, src1;
3281 ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
3282 Emit(kArmI8x16Shuffle, g.DefineAsRegister(node), src0, src1,
3289void InstructionSelectorT::VisitSetStackPointer(
OpIndex node) {
3291 auto input = g.UseRegister(this->
input_at(node, 0));
3292 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
3295void InstructionSelectorT::VisitI8x16Swizzle(
OpIndex node) {
3296 ArmOperandGeneratorT g(
this);
3299 Emit(kArmI8x16Swizzle, g.DefineAsRegister(node),
3300 g.UseUniqueRegister(this->input_at(node, 0)),
3301 g.UseRegister(this->input_at(node, 1)));
3306void InstructionSelectorT::VisitSignExtendWord8ToInt32(
OpIndex node) {
3307 ArmOperandGeneratorT g(
this);
3308 Emit(kArmSxtb, g.DefineAsRegister(node),
3309 g.UseRegister(this->input_at(node, 0)), g.TempImmediate(0));
3312void InstructionSelectorT::VisitSignExtendWord16ToInt32(
OpIndex node) {
3313 ArmOperandGeneratorT g(
this);
3314 Emit(kArmSxth, g.DefineAsRegister(node),
3315 g.UseRegister(this->input_at(node, 0)), g.TempImmediate(0));
3318void InstructionSelectorT::VisitInt32AbsWithOverflow(
OpIndex node) {
3322void InstructionSelectorT::VisitInt64AbsWithOverflow(
OpIndex node) {
3327template <ArchOpcode opcode>
3328void VisitBitMask(InstructionSelectorT* selector,
OpIndex node) {
3329 ArmOperandGeneratorT g(selector);
3330 InstructionOperand temps[] = {g.TempSimd128Register()};
3331 selector->Emit(opcode, g.DefineAsRegister(node),
3332 g.UseRegister(selector->input_at(node, 0)),
arraysize(temps),
3337void InstructionSelectorT::VisitI8x16BitMask(
OpIndex node) {
3338 VisitBitMask<kArmI8x16BitMask>(
this, node);
3341#if V8_ENABLE_WEBASSEMBLY
3342void InstructionSelectorT::VisitI16x8BitMask(
OpIndex node) {
3343 VisitBitMask<kArmI16x8BitMask>(
this, node);
3346void InstructionSelectorT::VisitI32x4BitMask(
OpIndex node) {
3347 VisitBitMask<kArmI32x4BitMask>(
this, node);
3350void InstructionSelectorT::VisitI64x2BitMask(
OpIndex node) {
3351 VisitBitMask<kArmI64x2BitMask>(
this, node);
3355void VisitF32x4PminOrPmax(InstructionSelectorT* selector,
ArchOpcode opcode,
3357 ArmOperandGeneratorT g(selector);
3360 selector->Emit(opcode, g.DefineAsRegister(node),
3361 g.UseUniqueRegister(selector->input_at(node, 0)),
3362 g.UseUniqueRegister(selector->input_at(node, 1)));
3365void VisitF64x2PminOrPMax(InstructionSelectorT* selector,
ArchOpcode opcode,
3367 ArmOperandGeneratorT g(selector);
3368 selector->Emit(opcode, g.DefineSameAsFirst(node),
3369 g.UseRegister(selector->input_at(node, 0)),
3370 g.UseRegister(selector->input_at(node, 1)));
3374void InstructionSelectorT::VisitF32x4Pmin(
OpIndex node) {
3375 VisitF32x4PminOrPmax(
this, kArmF32x4Pmin, node);
3378void InstructionSelectorT::VisitF32x4Pmax(
OpIndex node) {
3379 VisitF32x4PminOrPmax(
this, kArmF32x4Pmax, node);
3382void InstructionSelectorT::VisitF64x2Pmin(
OpIndex node) {
3383 VisitF64x2PminOrPMax(
this, kArmF64x2Pmin, node);
3386void InstructionSelectorT::VisitF64x2Pmax(
OpIndex node) {
3387 VisitF64x2PminOrPMax(
this, kArmF64x2Pmax, node);
3390void InstructionSelectorT::VisitF64x2RelaxedMin(
OpIndex node) {
3391 VisitF64x2Pmin(node);
3394void InstructionSelectorT::VisitF64x2RelaxedMax(
OpIndex node) {
3395 VisitF64x2Pmax(node);
3398#define EXT_MUL_LIST(V) \
3399 V(I16x8ExtMulLowI8x16S, kArmVmullLow, NeonS8) \
3400 V(I16x8ExtMulHighI8x16S, kArmVmullHigh, NeonS8) \
3401 V(I16x8ExtMulLowI8x16U, kArmVmullLow, NeonU8) \
3402 V(I16x8ExtMulHighI8x16U, kArmVmullHigh, NeonU8) \
3403 V(I32x4ExtMulLowI16x8S, kArmVmullLow, NeonS16) \
3404 V(I32x4ExtMulHighI16x8S, kArmVmullHigh, NeonS16) \
3405 V(I32x4ExtMulLowI16x8U, kArmVmullLow, NeonU16) \
3406 V(I32x4ExtMulHighI16x8U, kArmVmullHigh, NeonU16) \
3407 V(I64x2ExtMulLowI32x4S, kArmVmullLow, NeonS32) \
3408 V(I64x2ExtMulHighI32x4S, kArmVmullHigh, NeonS32) \
3409 V(I64x2ExtMulLowI32x4U, kArmVmullLow, NeonU32) \
3410 V(I64x2ExtMulHighI32x4U, kArmVmullHigh, NeonU32)
3412#define VISIT_EXT_MUL(OPCODE, VMULL, NEONSIZE) \
3413 void InstructionSelectorT::Visit##OPCODE(OpIndex node) { \
3414 VisitRRR(this, VMULL | MiscField::encode(NEONSIZE), node); \
3422#define VISIT_EXTADD_PAIRWISE(OPCODE, NEONSIZE) \
3423 void InstructionSelectorT::Visit##OPCODE(OpIndex node) { \
3424 VisitRR(this, kArmVpaddl | MiscField::encode(NEONSIZE), node); \
3430#undef VISIT_EXTADD_PAIRWISE
3435void InstructionSelectorT::VisitF64x2ConvertLowI32x4S(
OpIndex node) {
3436 ArmOperandGeneratorT g(
this);
3437 Emit(kArmF64x2ConvertLowI32x4S, g.DefineAsRegister(node),
3438 g.UseFixed(this->input_at(node, 0), q0));
3441void InstructionSelectorT::VisitF64x2ConvertLowI32x4U(
OpIndex node) {
3442 ArmOperandGeneratorT g(
this);
3443 Emit(kArmF64x2ConvertLowI32x4U, g.DefineAsRegister(node),
3444 g.UseFixed(this->input_at(node, 0), q0));
3447void InstructionSelectorT::VisitI32x4TruncSatF64x2SZero(
OpIndex node) {
3448 ArmOperandGeneratorT g(
this);
3449 Emit(kArmI32x4TruncSatF64x2SZero, g.DefineAsFixed(node, q0),
3450 g.UseUniqueRegister(this->input_at(node, 0)));
3453void InstructionSelectorT::VisitI32x4TruncSatF64x2UZero(
OpIndex node) {
3454 ArmOperandGeneratorT g(
this);
3455 Emit(kArmI32x4TruncSatF64x2UZero, g.DefineAsFixed(node, q0),
3456 g.UseUniqueRegister(this->input_at(node, 0)));
3459void InstructionSelectorT::VisitF32x4DemoteF64x2Zero(
OpIndex node) {
3460 ArmOperandGeneratorT g(
this);
3461 Emit(kArmF32x4DemoteF64x2Zero, g.DefineAsFixed(node, q0),
3462 g.UseUniqueRegister(this->input_at(node, 0)));
3465void InstructionSelectorT::VisitF64x2PromoteLowF32x4(
OpIndex node) {
3466 ArmOperandGeneratorT g(
this);
3467 Emit(kArmF64x2PromoteLowF32x4, g.DefineAsRegister(node),
3468 g.UseFixed(this->input_at(node, 0), q0));
3471void InstructionSelectorT::VisitI32x4RelaxedTruncF64x2SZero(
OpIndex node) {
3472 VisitI32x4TruncSatF64x2SZero(node);
3475void InstructionSelectorT::VisitI32x4RelaxedTruncF64x2UZero(
OpIndex node) {
3476 VisitI32x4TruncSatF64x2UZero(node);
3480void InstructionSelectorT::VisitTruncateFloat32ToInt32(
OpIndex node) {
3481 ArmOperandGeneratorT g(
this);
3488 g.UseRegister(this->input_at(node, 0)));
3491void InstructionSelectorT::VisitTruncateFloat32ToUint32(
OpIndex node) {
3492 ArmOperandGeneratorT g(
this);
3499 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
3503 int first_input_index,
static constexpr T decode(U value)
static constexpr U encode(T value)
constexpr void Add(E element)
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
static bool IsSupported(CpuFeature f)
constexpr bool IsUnsigned() const
static constexpr MachineType Float64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Simd128()
static constexpr MachineType Float32()
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool CanBeImmediate(int32_t value) const
bool CanBeImmediate(OpIndex node, InstructionCode opcode)
ArmOperandGeneratorT(InstructionSelectorT *selector)
bool CanBeImmediate(uint32_t value) const
size_t ParameterCount() const
bool IsCFunctionCall() const
int GetOffsetToReturns() const
void OverwriteAndNegateIfEqual(FlagsCondition condition)
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
void VisitBitcastWord32PairToFloat64(turboshaft::OpIndex node)
void SetRename(turboshaft::OpIndex node, turboshaft::OpIndex rename)
void MarkAsSimd128(turboshaft::OpIndex node)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void AddOutputToSelectContinuation(OperandGenerator *g, int first_input_index, turboshaft::OpIndex node)
bool CanDoBranchIfOverflowFusion(turboshaft::OpIndex node)
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
void MarkAsFloat64(turboshaft::OpIndex node)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table_
FlagsContinuationT FlagsContinuation
void MarkAsDefined(turboshaft::OpIndex node)
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
void EmitTableSwitch(const SwitchInfo &sw, InstructionOperand const &index_operand)
bool IsSupported(CpuFeature feature) const
void MarkAsUsed(turboshaft::OpIndex node)
bool IsTailCallAddressImmediate()
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
auto Inputs(turboshaft::OpIndex node)
void MarkAsFloat32(turboshaft::OpIndex node)
void VisitSwitch(turboshaft::OpIndex node, const SwitchInfo &sw)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void EmitBinarySearchSwitch(const SwitchInfo &sw, InstructionOperand const &value_operand)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void VisitWord32AtomicBinaryOperation(turboshaft::OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode word32_op)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements()
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags()
Instruction * MarkAsCall()
static AlignmentRequirements SomeUnalignedAccessUnsupported(base::EnumSet< MachineRepresentation > unalignedLoadUnsupportedTypes, base::EnumSet< MachineRepresentation > unalignedStoreUnsupportedTypes)
InstructionOperand TempRegister()
InstructionOperand UseImmediate(int immediate)
InstructionOperand UseFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionSelectorT * selector() const
InstructionOperand NoOutput()
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand DefineAsFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand UseRegisterWithMode(turboshaft::OpIndex node, RegisterMode register_mode)
MachineRepresentation representation() const
size_t case_count() const
size_t value_range() const
int32_t min_value() const
turboshaft::OptionalOpIndex index() const
turboshaft::OpIndex base() const
turboshaft::OpIndex value() const
StoreRepresentation stored_rep() const
V8_INLINE const Operation & Get(OpIndex i) const
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Int16()
bool Is(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr WordRepresentation Word32()
static bool TryMatchSplat(const uint8_t *shuffle, int *index)
static bool TryMatch32x4Shuffle(const uint8_t *shuffle, uint8_t *shuffle32x4)
static bool TryMatchIdentity(const uint8_t *shuffle)
static bool TryMatchConcat(const uint8_t *shuffle, uint8_t *offset)
static int32_t Pack4Lanes(const uint8_t *shuffle)
#define RR_OP_T_LIST_V8(V)
#define RR_VISITOR(Name, opcode)
#define RRR_VISITOR(Name, opcode)
#define SIMD_SHIFT_OP_LIST(V)
#define VISIT_ATOMIC_BINOP(op)
#define RR_VISITOR_V8(Name, opcode)
#define SIMD_VISIT_SHIFT_OP(Name)
#define VISIT_EXT_MUL(OPCODE1, OPCODE2)
#define VISIT_EXTADD_PAIRWISE(OPCODE)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define VISIT_SIMD_QFMOP(Name, instruction)
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
constexpr unsigned CountTrailingZeros32(uint32_t value)
constexpr unsigned CountPopulation(T value)
constexpr bool IsPowerOfTwo(T value)
constexpr unsigned CountLeadingZeros32(uint32_t value)
constexpr int WhichPowerOfTwo(T value)
V8_INLINE Dest bit_cast(Source const &source)
template unsigned leading_zeros
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
ShiftMask::For< ShiftOp::Kind::kRotateRight, WordRepresentation::Word32()> kWord32RotateRight
WordBinopMask::For< WordBinopOp::Kind::kMul, WordRepresentation::Word32()> kWord32Mul
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
WordBinopMask::For< WordBinopOp::Kind::kSignedMulOverflownBits, WordRepresentation::Word32()> kWord32SignedMulOverflownBits
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word32()> kWord32ShiftRightLogical
FloatBinopMask::For< FloatBinopOp::Kind::kMul, FloatRepresentation::Float32()> kFloat32Mul
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros, WordRepresentation::Word32()> kWord32ShiftRightArithmeticShiftOutZeros
WordBinopMask::For< WordBinopOp::Kind::kBitwiseOr, WordRepresentation::Word32()> kWord32BitwiseOr
WordBinopMask::For< WordBinopOp::Kind::kBitwiseXor, WordRepresentation::Word32()> kWord32BitwiseXor
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
FloatBinopMask::For< FloatBinopOp::Kind::kMul, FloatRepresentation::Float64()> kFloat64Mul
WordBinopMask::For< WordBinopOp::Kind::kAdd, WordRepresentation::Word32()> kWord32Add
ConstantMask::For< ConstantOp::Kind::kExternal > kExternalConstant
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word32()> kWord32ShiftRightArithmetic
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word32()> kWord32ShiftLeft
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitWord32PairShift(InstructionSelectorT *selector, InstructionCode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
void VisitStoreCommon(InstructionSelectorT *selector, OpIndex node, StoreRepresentation store_rep, std::optional< AtomicMemoryOrder > atomic_order)
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
FlagsCondition CommuteFlagsCondition(FlagsCondition condition)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
MachineRepresentation UnalignedStoreRepresentation
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static constexpr FlagsCondition kStackPointerGreaterThanCondition
static void VisitShift(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
constexpr int kSimd128Size
constexpr NeonDataType NeonS16
constexpr NeonSize Neon32
constexpr NeonDataType NeonS8
constexpr NeonSize Neon16
constexpr int kSystemPointerSize
constexpr NeonDataType NeonU8
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
constexpr NeonDataType NeonU16
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_LE(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V< WordPtr > index() const
V< WordPtr > base() const
MemoryRepresentation memory_rep
V< Word32 > low_word32() const
V< Word32 > high_word32() const
static bool IsCommutative(Kind kind)
const underlying_operation_t< Op > * TryCast() const
bool IsCommutative() const