17using namespace turboshaft;
45 if (!constant)
return false;
46 if (constant->kind == ConstantOp::Kind::kCompressedHeapObject) {
72 return is_int16(value);
74 return is_uint16(value);
76 return is_int16(-value);
78 return is_int16(value) && !(value & 3);
80 return 0 <= value && value < 32;
82 return is_int34(value);
84 return 0 <= value && value < 64;
96 PPCOperandGeneratorT g(selector);
97 selector->Emit(opcode, g.DefineAsRegister(node),
98 g.UseRegister(selector->input_at(node, 0)));
103 PPCOperandGeneratorT g(selector);
104 selector->Emit(opcode, g.DefineAsRegister(node),
105 g.UseRegister(selector->input_at(node, 0)),
106 g.UseRegister(selector->input_at(node, 1)));
111 PPCOperandGeneratorT g(selector);
112 selector->Emit(opcode, g.DefineAsRegister(node),
113 g.UseRegister(selector->input_at(node, 0)),
114 g.UseOperand(selector->input_at(node, 1), operand_mode));
117void VisitTryTruncateDouble(InstructionSelectorT* selector,
119 PPCOperandGeneratorT g(selector);
120 InstructionOperand inputs[] = {g.UseRegister(selector->input_at(node, 0))};
121 InstructionOperand outputs[2];
122 size_t output_count = 0;
123 outputs[output_count++] = g.DefineAsRegister(node);
126 if (success_output.valid()) {
127 outputs[output_count++] = g.DefineAsRegister(success_output.value());
130 selector->Emit(opcode, output_count, outputs, 1, inputs);
136 FlagsContinuationT* cont) {
137 PPCOperandGeneratorT g(selector);
138 InstructionOperand inputs[4];
140 InstructionOperand outputs[2];
141 size_t output_count = 0;
143 inputs[
input_count++] = g.UseRegister(selector->input_at(node, 0));
145 g.UseOperand(selector->input_at(node, 1), operand_mode);
147 if (cont->IsDeoptimize()) {
151 outputs[output_count++] = g.DefineSameAsFirst(node);
153 outputs[output_count++] = g.DefineAsRegister(node);
161 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
168 FlagsContinuationT cont;
169 VisitBinop(selector, node, opcode, operand_mode, &cont);
174void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
177 stack_slot.is_tagged);
180 Emit(kArchStackSlot, g.DefineAsRegister(node),
181 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
184void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
185 PPCOperandGeneratorT g(
this);
186 Emit(kArchAbortCSADcheck, g.NoOutput(),
187 g.UseFixed(this->input_at(node, 0), r4));
201 switch (loaded_rep) {
204 return kPPC_LoadWordS8;
207 return kPPC_LoadWordU8;
210 return kPPC_LoadWordS16;
213 return kPPC_LoadWordU16;
217 return kPPC_LoadWordU32;
222 return kPPC_LoadWord64;
227 return kPPC_LoadFloat32;
230 return kPPC_LoadDouble;
231#ifdef V8_COMPRESS_POINTERS
236 return kPPC_LoadWordS32;
239 return kPPC_LoadDecompressTagged;
243 return kPPC_LoadWordS32;
246 return kPPC_LoadDecompressTaggedSigned;
254 return kPPC_LoadWord64;
261 return kPPC_LoadWord64;
263 return kPPC_LoadDecodeSandboxedPointer;
268 return kPPC_LoadSimd128;
284 return kPPC_LoadFloat32;
286 return kPPC_LoadDouble;
289 return load_rep.
IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
291 return load_rep.
IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
293 return kPPC_LoadWordU32;
296#ifdef V8_COMPRESS_POINTERS
298 return kPPC_LoadWordS32;
305 return kPPC_LoadDecodeSandboxedPointer;
306#ifdef V8_COMPRESS_POINTERS
308 return kPPC_LoadDecompressTaggedSigned;
310 return kPPC_LoadDecompressTagged;
312 return kPPC_LoadDecompressTagged;
320 return kPPC_LoadWord64;
324 return kPPC_LoadSimd128;
339 auto load_view = selector->
load_view(node);
343 bool is_atomic = load_view.is_atomic();
372void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
379 std::optional<AtomicMemoryOrder> atomic_order) {
384 OpIndex value = store_view.value();
385 bool is_atomic = store_view.is_atomic();
394 if (
v8_flags.enable_unconditional_write_barriers &&
403 CHECK((store_view.ts_stored_rep() !=
405 (store_view.ts_stored_rep() !=
407 (store_view.ts_stored_rep() !=
411 size_t input_count = 0;
419 addressing_mode = kMode_MRI;
422 addressing_mode = kMode_MRR;
428 size_t const temp_count =
arraysize(temps);
433 code = kArchStoreIndirectWithWriteBarrier;
435 inputs[input_count++] = g.
UseImmediate(
static_cast<int64_t
>(tag));
437 code = kArchStoreWithWriteBarrier;
442 selector->
Emit(code, 0,
nullptr, input_count, inputs, temp_count, temps);
451 switch (store_view.ts_stored_rep()) {
454 opcode = kPPC_StoreWord8;
458 opcode = kPPC_StoreWord16;
462 opcode = kPPC_StoreWord32;
465 opcode = kPPC_StoreByteRev32;
466 value = selector->
input_at(value, 0);
474 opcode = kPPC_StoreWord64;
477 opcode = kPPC_StoreByteRev64;
478 value = selector->
input_at(value, 0);
486 opcode = kPPC_StoreFloat32;
489 opcode = kPPC_StoreDouble;
495 opcode = kPPC_StoreCompressTagged;
501 opcode = kPPC_StoreWord64;
508 opcode = kPPC_StoreIndirectPointer;
512 opcode = kPPC_StoreEncodeSandboxedPointer;
515 opcode = kPPC_StoreSimd128;
545void InstructionSelectorT::VisitStore(
OpIndex node) {
550void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
573 inv_opcode = kPPC_AndComplement;
576 inv_opcode = kPPC_OrComplement;
617 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
619 *mb = mask_lsb + mask_width - 1;
628 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
630 *mb = mask_lsb + mask_width - 1;
636void InstructionSelectorT::VisitWord32And(
OpIndex node) {
637 PPCOperandGeneratorT g(
this);
656 left = shift_op.
left();
660 if (mb > 31 - sh) mb = 31 - sh;
661 sh = (32 - sh) & 0x1F;
664 if (me < sh) me = sh;
669 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
670 g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
679void InstructionSelectorT::VisitWord64And(
OpIndex node) {
680 PPCOperandGeneratorT g(
this);
682 const WordBinopOp& bitwise_and =
Get(node).Cast<WordBinopOp>();
689 OpIndex left = bitwise_and.left();
699 left = shift_op.left();
703 if (mb > 63 - sh) mb = 63 - sh;
704 sh = (64 - sh) & 0x3F;
707 if (me < sh) me = sh;
717 opcode = kPPC_RotLeftAndClearLeft64;
719 }
else if (mb == 63) {
721 opcode = kPPC_RotLeftAndClearRight64;
725 opcode = kPPC_RotLeftAndClear64;
729 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(left),
730 g.TempImmediate(sh), g.TempImmediate(
mask));
739void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
745void InstructionSelectorT::VisitWord64Or(
OpIndex node) {
751void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
752 PPCOperandGeneratorT g(
this);
757 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(bitwise_xor.left()));
764 OpIndex node, FlagsContinuation* cont) {
771 value = op.stack_limit();
775 PPCOperandGeneratorT g(
this);
778 InstructionOperand*
const outputs =
nullptr;
779 const int output_count = 0;
784 InstructionOperand temps[] = {g.TempRegister()};
790 InstructionOperand
inputs[] = {g.UseRegisterWithMode(value, register_mode)};
794 temp_count, temps, cont);
797void InstructionSelectorT::VisitWord64Xor(
OpIndex node) {
798 PPCOperandGeneratorT g(
this);
803 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(bitwise_xor.left()));
809void InstructionSelectorT::VisitWord32Shl(
OpIndex node) {
810 PPCOperandGeneratorT g(
this);
815 this->MatchSignedIntegralConstant(shl.right(), &value) &&
820 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
825 if (me < sh) me = sh;
827 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
828 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
829 g.TempImmediate(mb), g.TempImmediate(me));
837void InstructionSelectorT::VisitWord64Shl(
OpIndex node) {
838 PPCOperandGeneratorT g(
this);
843 this->MatchSignedIntegralConstant(shl.right(), &value) &&
848 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
853 if (me < sh) me = sh;
860 opcode = kPPC_RotLeftAndClearLeft64;
862 }
else if (mb == 63) {
864 opcode = kPPC_RotLeftAndClearRight64;
866 }
else if (sh && me <= sh) {
868 opcode = kPPC_RotLeftAndClear64;
873 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
874 g.TempImmediate(
mask));
883void InstructionSelectorT::VisitWord32Shr(
OpIndex node) {
884 PPCOperandGeneratorT g(
this);
894 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
895 uint64_t right_value;
900 if (mb > 31 - sh) mb = 31 - sh;
901 sh = (32 - sh) & 0x1F;
903 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
904 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
905 g.TempImmediate(mb), g.TempImmediate(me));
913void InstructionSelectorT::VisitWord64Shr(
OpIndex node) {
914 PPCOperandGeneratorT g(
this);
924 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
925 uint64_t right_value;
930 if (mb > 63 - sh) mb = 63 - sh;
931 sh = (64 - sh) & 0x3F;
938 opcode = kPPC_RotLeftAndClearLeft64;
940 }
else if (mb == 63) {
942 opcode = kPPC_RotLeftAndClearRight64;
947 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
948 g.TempImmediate(
mask));
957void InstructionSelectorT::VisitWord32Sar(
OpIndex node) {
958 PPCOperandGeneratorT g(
this);
967 uint32_t sar_by = sar_value;
968 uint32_t shl_by = shl_value;
969 if ((sar_by == shl_by) && (sar_by == 16)) {
970 Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
971 g.UseRegister(shl.left()));
973 }
else if ((sar_by == shl_by) && (sar_by == 24)) {
974 Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
975 g.UseRegister(shl.left()));
983void InstructionSelectorT::VisitWord64Sar(
OpIndex node) {
984 PPCOperandGeneratorT g(
this);
988 int64_t constant_rhs;
990 if (lhs.Is<LoadOp>() &&
991 this->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
992 constant_rhs == 32 && this->CanCover(node, shift.left())) {
995 const LoadOp& load = lhs.Cast<LoadOp>();
997 if (load.index().has_value()) {
998 int64_t index_constant;
1010 g.DefineAsRegister(node), g.UseRegister(load.base()),
1011 g.TempImmediate(
offset), g.UseImmediate(0));
1024void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
1029void InstructionSelectorT::VisitWord64Ror(
OpIndex node) {
1033void InstructionSelectorT::VisitWord32Clz(
OpIndex node) {
1034 PPCOperandGeneratorT g(
this);
1035 Emit(kPPC_Cntlz32, g.DefineAsRegister(node),
1036 g.UseRegister(this->input_at(node, 0)));
1039void InstructionSelectorT::VisitWord64Clz(
OpIndex node) {
1040 PPCOperandGeneratorT g(
this);
1041 Emit(kPPC_Cntlz64, g.DefineAsRegister(node),
1042 g.UseRegister(this->input_at(node, 0)));
1045void InstructionSelectorT::VisitWord32Popcnt(
OpIndex node) {
1046 PPCOperandGeneratorT g(
this);
1047 Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
1048 g.UseRegister(this->input_at(node, 0)));
1051void InstructionSelectorT::VisitWord64Popcnt(
OpIndex node) {
1052 PPCOperandGeneratorT g(
this);
1053 Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
1054 g.UseRegister(this->input_at(node, 0)));
1061void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
1065void InstructionSelectorT::VisitWord64ReverseBits(
OpIndex node) {
1069void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
1070 PPCOperandGeneratorT g(
this);
1071 InstructionOperand temp[] = {g.TempRegister()};
1074 if (
CanCover(node, input) && input_op.Is<LoadOp>()) {
1080 bool is_atomic = load.is_atomic();
1082 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(
offset),
1083 g.UseImmediate(is_atomic));
1087 Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
1088 g.UseUniqueRegister(this->input_at(node, 0)), 1, temp);
1091void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
1092 PPCOperandGeneratorT g(
this);
1095 if (
CanCover(node, input) && input_op.Is<LoadOp>()) {
1101 bool is_atomic = load.is_atomic();
1103 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(
offset),
1104 g.UseImmediate(is_atomic));
1108 Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
1109 g.UseUniqueRegister(this->input_at(node, 0)));
1112void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
1113 PPCOperandGeneratorT g(
this);
1114 Emit(kPPC_LoadReverseSimd128RR, g.DefineAsRegister(node),
1115 g.UseRegister(this->input_at(node, 0)));
1118void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
1122void InstructionSelectorT::VisitInt64Add(
OpIndex node) {
1126void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
1127 PPCOperandGeneratorT g(
this);
1130 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right()));
1136void InstructionSelectorT::VisitInt64Sub(
OpIndex node) {
1137 PPCOperandGeneratorT g(
this);
1140 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right()));
1149 InstructionOperand left, InstructionOperand right,
1150 FlagsContinuationT* cont);
1151void EmitInt32MulWithOverflow(InstructionSelectorT* selector,
OpIndex node,
1152 FlagsContinuationT* cont) {
1153 PPCOperandGeneratorT g(selector);
1154 OpIndex lhs = selector->input_at(node, 0);
1155 OpIndex rhs = selector->input_at(node, 1);
1156 InstructionOperand result_operand = g.DefineAsRegister(node);
1157 InstructionOperand high32_operand = g.TempRegister();
1158 InstructionOperand temp_operand = g.TempRegister();
1160 InstructionOperand outputs[] = {result_operand, high32_operand};
1161 InstructionOperand inputs[] = {g.UseRegister(lhs), g.UseRegister(rhs)};
1162 selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
1165 InstructionOperand shift_31 = g.UseImmediate(31);
1166 InstructionOperand outputs[] = {temp_operand};
1167 InstructionOperand inputs[] = {result_operand, shift_31};
1168 selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
1171 VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
1175 FlagsContinuationT* cont) {
1176 PPCOperandGeneratorT g(selector);
1177 OpIndex lhs = selector->input_at(node, 0);
1178 OpIndex rhs = selector->input_at(node, 1);
1179 InstructionOperand
result = g.DefineAsRegister(node);
1180 InstructionOperand left = g.UseRegister(lhs);
1181 InstructionOperand high = g.TempRegister();
1182 InstructionOperand result_sign = g.TempRegister();
1183 InstructionOperand right = g.UseRegister(rhs);
1184 selector->Emit(kPPC_Mul64,
result, left, right);
1185 selector->Emit(kPPC_MulHighS64, high, left, right);
1186 selector->Emit(kPPC_ShiftRightAlg64, result_sign,
result,
1187 g.TempImmediate(63));
1189 selector->EmitWithContinuation(kPPC_Cmp64, high, result_sign, cont);
1194void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
1198void InstructionSelectorT::VisitInt64Mul(
OpIndex node) {
1202void InstructionSelectorT::VisitInt32MulHigh(
OpIndex node) {
1203 PPCOperandGeneratorT g(
this);
1204 Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
1205 g.UseRegister(this->input_at(node, 0)),
1206 g.UseRegister(this->input_at(node, 1)));
1209void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
1210 PPCOperandGeneratorT g(
this);
1211 Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
1212 g.UseRegister(this->input_at(node, 0)),
1213 g.UseRegister(this->input_at(node, 1)));
1216void InstructionSelectorT::VisitInt64MulHigh(
OpIndex node) {
1217 PPCOperandGeneratorT g(
this);
1218 Emit(kPPC_MulHighS64, g.DefineAsRegister(node),
1219 g.UseRegister(this->input_at(node, 0)),
1220 g.UseRegister(this->input_at(node, 1)));
1223void InstructionSelectorT::VisitUint64MulHigh(
OpIndex node) {
1224 PPCOperandGeneratorT g(
this);
1225 Emit(kPPC_MulHighU64, g.DefineAsRegister(node),
1226 g.UseRegister(this->input_at(node, 0)),
1227 g.UseRegister(this->input_at(node, 1)));
1230void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
1234void InstructionSelectorT::VisitInt64Div(
OpIndex node) {
1238void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
1242void InstructionSelectorT::VisitUint64Div(
OpIndex node) {
1246void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
1250void InstructionSelectorT::VisitInt64Mod(
OpIndex node) {
1254void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
1258void InstructionSelectorT::VisitUint64Mod(
OpIndex node) {
1262void InstructionSelectorT::VisitChangeFloat32ToFloat64(
OpIndex node) {
1263 VisitRR(
this, kPPC_Float32ToDouble, node);
1266void InstructionSelectorT::VisitRoundInt32ToFloat32(
OpIndex node) {
1267 VisitRR(
this, kPPC_Int32ToFloat32, node);
1270void InstructionSelectorT::VisitRoundUint32ToFloat32(
OpIndex node) {
1271 VisitRR(
this, kPPC_Uint32ToFloat32, node);
1274void InstructionSelectorT::VisitChangeInt32ToFloat64(
OpIndex node) {
1275 VisitRR(
this, kPPC_Int32ToDouble, node);
1278void InstructionSelectorT::VisitChangeUint32ToFloat64(
OpIndex node) {
1279 VisitRR(
this, kPPC_Uint32ToDouble, node);
1282void InstructionSelectorT::VisitChangeFloat64ToInt32(
OpIndex node) {
1283 VisitRR(
this, kPPC_DoubleToInt32, node);
1286void InstructionSelectorT::VisitChangeFloat64ToUint32(
OpIndex node) {
1287 VisitRR(
this, kPPC_DoubleToUint32, node);
1290void InstructionSelectorT::VisitTruncateFloat64ToUint32(
OpIndex node) {
1291 VisitRR(
this, kPPC_DoubleToUint32, node);
1294void InstructionSelectorT::VisitSignExtendWord8ToInt32(
OpIndex node) {
1296 VisitRR(
this, kPPC_ExtendSignWord8, node);
1299void InstructionSelectorT::VisitSignExtendWord16ToInt32(
OpIndex node) {
1301 VisitRR(
this, kPPC_ExtendSignWord16, node);
1304void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(
OpIndex node) {
1305 VisitTryTruncateDouble(
this, kPPC_DoubleToInt64, node);
1308void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(
OpIndex node) {
1309 VisitTryTruncateDouble(
this, kPPC_DoubleToInt64, node);
1312void InstructionSelectorT::VisitTruncateFloat64ToInt64(
OpIndex node) {
1313 VisitRR(
this, kPPC_DoubleToInt64, node);
1316void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(
OpIndex node) {
1317 VisitTryTruncateDouble(
this, kPPC_DoubleToUint64, node);
1320void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(
OpIndex node) {
1321 VisitTryTruncateDouble(
this, kPPC_DoubleToUint64, node);
1324void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(
OpIndex node) {
1325 VisitTryTruncateDouble(
this, kPPC_DoubleToInt32, node);
1328void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(
OpIndex node) {
1329 VisitTryTruncateDouble(
this, kPPC_DoubleToUint32, node);
1332void InstructionSelectorT::VisitBitcastWord32ToWord64(
OpIndex node) {
1338void InstructionSelectorT::VisitChangeInt32ToInt64(
OpIndex node) {
1340 VisitRR(
this, kPPC_ExtendSignWord32, node);
1343void InstructionSelectorT::VisitSignExtendWord8ToInt64(
OpIndex node) {
1345 VisitRR(
this, kPPC_ExtendSignWord8, node);
1348void InstructionSelectorT::VisitSignExtendWord16ToInt64(
OpIndex node) {
1350 VisitRR(
this, kPPC_ExtendSignWord16, node);
1353void InstructionSelectorT::VisitSignExtendWord32ToInt64(
OpIndex node) {
1355 VisitRR(
this, kPPC_ExtendSignWord32, node);
1358bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(
OpIndex node) {
1362void InstructionSelectorT::VisitChangeUint32ToUint64(
OpIndex node) {
1364 VisitRR(
this, kPPC_Uint32ToUint64, node);
1367void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(
OpIndex node) {
1371void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(
OpIndex node) {
1375void InstructionSelectorT::VisitChangeFloat64ToUint64(
OpIndex node) {
1376 VisitRR(
this, kPPC_DoubleToUint64, node);
1379void InstructionSelectorT::VisitChangeFloat64ToInt64(
OpIndex node) {
1380 VisitRR(
this, kPPC_DoubleToInt64, node);
1383void InstructionSelectorT::VisitTruncateFloat64ToFloat32(
OpIndex node) {
1384 VisitRR(
this, kPPC_DoubleToFloat32, node);
1387void InstructionSelectorT::VisitTruncateFloat64ToWord32(
OpIndex node) {
1388 VisitRR(
this, kArchTruncateDoubleToI, node);
1391void InstructionSelectorT::VisitRoundFloat64ToInt32(
OpIndex node) {
1392 VisitRR(
this, kPPC_DoubleToInt32, node);
1395void InstructionSelectorT::VisitTruncateFloat32ToInt32(
OpIndex node) {
1396 PPCOperandGeneratorT g(
this);
1403 g.UseRegister(this->input_at(node, 0)));
1406void InstructionSelectorT::VisitTruncateFloat32ToUint32(
OpIndex node) {
1407 PPCOperandGeneratorT g(
this);
1415 g.UseRegister(this->input_at(node, 0)));
1418void InstructionSelectorT::VisitTruncateInt64ToInt32(
OpIndex node) {
1420 VisitRR(
this, kPPC_Int64ToInt32, node);
1423void InstructionSelectorT::VisitRoundInt64ToFloat32(
OpIndex node) {
1424 VisitRR(
this, kPPC_Int64ToFloat32, node);
1427void InstructionSelectorT::VisitRoundInt64ToFloat64(
OpIndex node) {
1428 VisitRR(
this, kPPC_Int64ToDouble, node);
1431void InstructionSelectorT::VisitChangeInt64ToFloat64(
OpIndex node) {
1432 VisitRR(
this, kPPC_Int64ToDouble, node);
1435void InstructionSelectorT::VisitRoundUint64ToFloat32(
OpIndex node) {
1436 VisitRR(
this, kPPC_Uint64ToFloat32, node);
1439void InstructionSelectorT::VisitRoundUint64ToFloat64(
OpIndex node) {
1440 VisitRR(
this, kPPC_Uint64ToDouble, node);
1443void InstructionSelectorT::VisitBitcastFloat32ToInt32(
OpIndex node) {
1444 VisitRR(
this, kPPC_BitcastFloat32ToInt32, node);
1447void InstructionSelectorT::VisitBitcastFloat64ToInt64(
OpIndex node) {
1448 VisitRR(
this, kPPC_BitcastDoubleToInt64, node);
1451void InstructionSelectorT::VisitBitcastInt32ToFloat32(
OpIndex node) {
1452 VisitRR(
this, kPPC_BitcastInt32ToFloat32, node);
1455void InstructionSelectorT::VisitBitcastInt64ToFloat64(
OpIndex node) {
1456 VisitRR(
this, kPPC_BitcastInt64ToDouble, node);
1459void InstructionSelectorT::VisitFloat32Add(
OpIndex node) {
1463void InstructionSelectorT::VisitFloat64Add(
OpIndex node) {
1465 VisitRRR(
this, kPPC_AddDouble, node);
1468void InstructionSelectorT::VisitFloat32Sub(
OpIndex node) {
1472void InstructionSelectorT::VisitFloat64Sub(
OpIndex node) {
1474 VisitRRR(
this, kPPC_SubDouble, node);
1477void InstructionSelectorT::VisitFloat32Mul(
OpIndex node) {
1481void InstructionSelectorT::VisitFloat64Mul(
OpIndex node) {
1483 VisitRRR(
this, kPPC_MulDouble, node);
1486void InstructionSelectorT::VisitFloat32Div(
OpIndex node) {
1490void InstructionSelectorT::VisitFloat64Div(
OpIndex node) {
1491 VisitRRR(
this, kPPC_DivDouble, node);
1494void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
1495 PPCOperandGeneratorT g(
this);
1496 Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
1497 g.UseFixed(this->input_at(node, 0), d1),
1498 g.UseFixed(this->input_at(node, 1), d2))
1502void InstructionSelectorT::VisitFloat32Max(
OpIndex node) {
1506void InstructionSelectorT::VisitFloat64Max(
OpIndex node) {
1507 VisitRRR(
this, kPPC_MaxDouble, node);
1510void InstructionSelectorT::VisitFloat64SilenceNaN(
OpIndex node) {
1511 VisitRR(
this, kPPC_Float64SilenceNaN, node);
1514void InstructionSelectorT::VisitFloat32Min(
OpIndex node) {
1518void InstructionSelectorT::VisitFloat64Min(
OpIndex node) {
1519 VisitRRR(
this, kPPC_MinDouble, node);
1522void InstructionSelectorT::VisitFloat32Abs(
OpIndex node) {
1526void InstructionSelectorT::VisitFloat64Abs(
OpIndex node) {
1527 VisitRR(
this, kPPC_AbsDouble, node);
1530void InstructionSelectorT::VisitFloat32Sqrt(
OpIndex node) {
1536 PPCOperandGeneratorT g(
this);
1538 g.UseFixed(this->input_at(node, 0), d1))
1544 PPCOperandGeneratorT g(
this);
1546 g.UseFixed(this->input_at(node, 0), d1),
1547 g.UseFixed(this->input_at(node, 1), d2))
1551void InstructionSelectorT::VisitFloat64Sqrt(
OpIndex node) {
1552 VisitRR(
this, kPPC_SqrtDouble, node);
1555void InstructionSelectorT::VisitFloat32RoundDown(
OpIndex node) {
1559void InstructionSelectorT::VisitFloat64RoundDown(
OpIndex node) {
1560 VisitRR(
this, kPPC_FloorDouble, node);
1563void InstructionSelectorT::VisitFloat32RoundUp(
OpIndex node) {
1567void InstructionSelectorT::VisitFloat64RoundUp(
OpIndex node) {
1568 VisitRR(
this, kPPC_CeilDouble, node);
1571void InstructionSelectorT::VisitFloat32RoundTruncate(
OpIndex node) {
1575void InstructionSelectorT::VisitFloat64RoundTruncate(
OpIndex node) {
1576 VisitRR(
this, kPPC_TruncateDouble, node);
1579void InstructionSelectorT::VisitFloat64RoundTiesAway(
OpIndex node) {
1580 VisitRR(
this, kPPC_RoundDouble, node);
1583void InstructionSelectorT::VisitFloat32Neg(
OpIndex node) {
1584 VisitRR(
this, kPPC_NegDouble, node);
1587void InstructionSelectorT::VisitFloat64Neg(
OpIndex node) {
1588 VisitRR(
this, kPPC_NegDouble, node);
1591void InstructionSelectorT::VisitInt32AddWithOverflow(
OpIndex node) {
1601void InstructionSelectorT::VisitInt32SubWithOverflow(
OpIndex node) {
1612void InstructionSelectorT::VisitInt64AddWithOverflow(
OpIndex node) {
1622void InstructionSelectorT::VisitInt64SubWithOverflow(
OpIndex node) {
1632void InstructionSelectorT::VisitInt64MulWithOverflow(
OpIndex node) {
1659 InstructionOperand left, InstructionOperand right,
1660 FlagsContinuationT* cont) {
1661 selector->EmitWithContinuation(opcode, left, right, cont);
1668 PPCOperandGeneratorT g(selector);
1669 OpIndex lhs = selector->input_at(node, 0);
1670 OpIndex rhs = selector->input_at(node, 1);
1673 if (g.CanBeImmediate(rhs, immediate_mode)) {
1674 VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseImmediate(rhs),
1676 }
else if (g.CanBeImmediate(lhs, immediate_mode)) {
1677 if (!commutative) cont->Commute();
1678 VisitCompare(selector, opcode, g.UseRegister(rhs), g.UseImmediate(lhs),
1681 VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseRegister(rhs),
1686void VisitWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1687 FlagsContinuationT* cont) {
1692void VisitWord64Compare(InstructionSelectorT* selector,
OpIndex node,
1693 FlagsContinuationT* cont) {
1700 FlagsContinuationT* cont) {
1701 PPCOperandGeneratorT g(selector);
1702 OpIndex lhs = selector->input_at(node, 0);
1703 OpIndex rhs = selector->input_at(node, 1);
1704 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), g.UseRegister(rhs),
1710 FlagsContinuationT* cont) {
1711 PPCOperandGeneratorT g(selector);
1712 OpIndex lhs = selector->input_at(node, 0);
1713 OpIndex rhs = selector->input_at(node, 1);
1714 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), g.UseRegister(rhs),
1721 FlagsContinuation* cont) {
1727 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1728 switch (comparison->rep.MapTaggedToWord().value()) {
1730 cont->OverwriteAndNegateIfEqual(
1732 return VisitWord32Compare(
this, value, cont);
1734 cont->OverwriteAndNegateIfEqual(
1736 return VisitWord64Compare(
this, value, cont);
1739 case ComparisonOp::Kind::kEqual:
1740 cont->OverwriteAndNegateIfEqual(
kEqual);
1742 case ComparisonOp::Kind::kSignedLessThan:
1745 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1753 case ComparisonOp::Kind::kEqual:
1754 cont->OverwriteAndNegateIfEqual(
kEqual);
1756 case ComparisonOp::Kind::kSignedLessThan:
1759 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1768 }
else if (
const ProjectionOp* projection =
1769 value_op.TryCast<ProjectionOp>()) {
1772 if (projection->index == 1u) {
1778 OpIndex node = projection->input();
1779 if (
const OverflowCheckedBinopOp* binop =
1781 binop && CanDoBranchIfOverflowFusion(node)) {
1783 switch (binop->kind) {
1784 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1785 cont->OverwriteAndNegateIfEqual(
kOverflow);
1786 return VisitBinop(
this, node,
1787 is64 ? kPPC_Add64 : kPPC_AddWithOverflow32,
1789 case OverflowCheckedBinopOp::Kind::kSignedSub:
1790 cont->OverwriteAndNegateIfEqual(
kOverflow);
1791 return VisitBinop(
this, node,
1792 is64 ? kPPC_Sub : kPPC_SubWithOverflow32,
1793 kInt16Imm_Negate, cont);
1794 case OverflowCheckedBinopOp::Kind::kSignedMul:
1796 cont->OverwriteAndNegateIfEqual(
kNotEqual);
1799 cont->OverwriteAndNegateIfEqual(
kNotEqual);
1800 return EmitInt32MulWithOverflow(
this, node, cont);
1806 return VisitWord32Compare(
this, value, cont);
1809 kInt16Imm_Unsigned);
1811 return VisitWord64Compare(
this, value, cont);
1814 kInt16Imm_Unsigned);
1815 }
else if (value_op.Is<StackPointerGreaterThanOp>()) {
1816 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1817 return VisitStackPointerGreaterThan(value, cont);
1822 PPCOperandGeneratorT g(
this);
1823 VisitCompare(
this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
1827void InstructionSelectorT::VisitSwitch(OpIndex node,
const SwitchInfo& sw) {
1828 PPCOperandGeneratorT g(
this);
1829 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
1832 if (enable_switch_jump_table_ ==
1833 InstructionSelector::kEnableSwitchJumpTable) {
1834 static const size_t kMaxTableSwitchValueRange = 2 << 16;
1835 size_t table_space_cost = 4 + sw.value_range();
1836 size_t table_time_cost = 3;
1837 size_t lookup_space_cost = 3 + 2 * sw.case_count();
1838 size_t lookup_time_cost = sw.case_count();
1839 if (sw.case_count() > 0 &&
1840 table_space_cost + 3 * table_time_cost <=
1841 lookup_space_cost + 3 * lookup_time_cost &&
1842 sw.min_value() > std::numeric_limits<int32_t>::min() &&
1843 sw.value_range() <= kMaxTableSwitchValueRange) {
1844 InstructionOperand index_operand = value_operand;
1845 if (sw.min_value()) {
1846 index_operand = g.TempRegister();
1847 Emit(kPPC_Sub, index_operand, value_operand,
1848 g.TempImmediate(sw.min_value()));
1851 InstructionOperand index_operand_zero_ext = g.TempRegister();
1852 Emit(kPPC_Uint32ToUint64, index_operand_zero_ext, index_operand);
1853 index_operand = index_operand_zero_ext;
1855 return EmitTableSwitch(sw, index_operand);
1860 return EmitBinarySearchSwitch(sw, value_operand);
1863void InstructionSelectorT::VisitWord32Equal(OpIndex
const node) {
1867 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1870 PPCOperandGeneratorT g(
this);
1871 const RootsTable& roots_table =
isolate()->roots_table();
1873 Handle<HeapObject> right;
1877 if (MatchHeapConstant(node, &right) && !right.is_null() &&
1878 roots_table.IsRootHandle(right, &root_index)) {
1879 if (RootsTable::IsReadOnly(root_index)) {
1881 MacroAssemblerBase::ReadOnlyRootPtr(root_index,
isolate());
1882 if (g.CanBeImmediate(ptr, kInt16Imm)) {
1883 return VisitCompare(
this, kPPC_Cmp32, g.UseRegister(left),
1884 g.TempImmediate(ptr), &cont);
1889 VisitWord32Compare(
this, node, &cont);
1892void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
1893 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1894 VisitWord32Compare(
this, node, &cont);
1897void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
1898 FlagsContinuation cont =
1899 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1900 VisitWord32Compare(
this, node, &cont);
1903void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
1904 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1905 VisitWord32Compare(
this, node, &cont);
1908void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
1909 FlagsContinuation cont =
1910 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1911 VisitWord32Compare(
this, node, &cont);
1914void InstructionSelectorT::VisitWord64Equal(OpIndex
const node) {
1915 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1916 VisitWord64Compare(
this, node, &cont);
1919void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
1920 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1921 VisitWord64Compare(
this, node, &cont);
1924void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
1925 FlagsContinuation cont =
1926 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1927 VisitWord64Compare(
this, node, &cont);
1930void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
1931 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1932 VisitWord64Compare(
this, node, &cont);
1935void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
1936 FlagsContinuation cont =
1937 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1938 VisitWord64Compare(
this, node, &cont);
1941void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1944 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf.value());
1945 return EmitInt32MulWithOverflow(
this, node, &cont);
1947 FlagsContinuation cont;
1948 EmitInt32MulWithOverflow(
this, node, &cont);
1951void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
1952 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1956void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
1957 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1961void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
1962 FlagsContinuation cont =
1963 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1967void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
1968 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1972void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
1973 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1977void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
1978 FlagsContinuation cont =
1979 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1983void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node,
int index) {}
1985void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
1986 LinkageLocation location) {}
1988void InstructionSelectorT::EmitPrepareArguments(
1989 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
1991 PPCOperandGeneratorT g(
this);
1994 if (call_descriptor->IsCFunctionCall()) {
1995 Emit(kArchPrepareCallCFunction | MiscField::encode(
static_cast<int>(
1996 call_descriptor->ParameterCount())),
1997 0,
nullptr, 0,
nullptr);
2001 for (PushParameter input : (*arguments)) {
2002 if (!input.node.valid())
continue;
2003 Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2004 g.TempImmediate(slot));
2009 int stack_decrement = 0;
2013 if (!input.node.valid())
continue;
2014 InstructionOperand decrement = g.UseImmediate(stack_decrement);
2015 stack_decrement = 0;
2016 Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
2021bool InstructionSelectorT::IsTailCallAddressImmediate() {
return false; }
2023void InstructionSelectorT::VisitFloat64ExtractLowWord32(OpIndex node) {
2024 PPCOperandGeneratorT g(
this);
2025 Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
2026 g.UseRegister(this->input_at(node, 0)));
2029void InstructionSelectorT::VisitFloat64ExtractHighWord32(OpIndex node) {
2030 PPCOperandGeneratorT g(
this);
2031 Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
2032 g.UseRegister(this->input_at(node, 0)));
2035void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2036 PPCOperandGeneratorT g(
this);
2037 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2041 InstructionOperand temps[] = {g.TempRegister()};
2042 Emit(kPPC_DoubleFromWord32Pair, g.DefineAsRegister(node), g.UseRegister(hi),
2043 g.UseRegister(lo),
arraysize(temps), temps);
2046void InstructionSelectorT::VisitFloat64InsertLowWord32(OpIndex node) {
2050void InstructionSelectorT::VisitFloat64InsertHighWord32(OpIndex node) {
2054void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2055 PPCOperandGeneratorT g(
this);
2056 Emit(kPPC_Sync, g.NoOutput());
2059void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2060 auto load_view = this->load_view(node);
2066void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2067 auto load_view = this->load_view(node);
2073void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2074 auto store = this->store_view(node);
2075 AtomicStoreParameters store_params(store.stored_rep().representation(),
2076 store.stored_rep().write_barrier_kind(),
2077 store.memory_order().value(),
2078 store.access_kind());
2080 store_params.order());
2083void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2084 auto store = this->store_view(node);
2085 AtomicStoreParameters store_params(store.stored_rep().representation(),
2086 store.stored_rep().write_barrier_kind(),
2087 store.memory_order().value(),
2088 store.access_kind());
2090 store_params.order());
2103 size_t input_count = 0;
2109 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2110 selector->
Emit(code, 1, outputs, input_count, inputs);
2113void InstructionSelectorT::VisitWord32AtomicExchange(
OpIndex node) {
2115 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2116 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
2117 opcode = kAtomicExchangeInt8;
2118 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
2119 opcode = kPPC_AtomicExchangeUint8;
2120 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
2121 opcode = kAtomicExchangeInt16;
2122 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
2123 opcode = kPPC_AtomicExchangeUint16;
2124 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32() ||
2125 atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
2126 opcode = kPPC_AtomicExchangeWord32;
2133void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2135 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2136 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2137 opcode = kPPC_AtomicExchangeUint8;
2138 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2139 opcode = kPPC_AtomicExchangeUint16;
2140 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2141 opcode = kPPC_AtomicExchangeWord32;
2142 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2143 opcode = kPPC_AtomicExchangeWord64;
2160 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2163 size_t input_count = 0;
2170 size_t output_count = 0;
2173 selector->
Emit(code, output_count, outputs, input_count, inputs);
2176void InstructionSelectorT::VisitWord32AtomicCompareExchange(
OpIndex node) {
2178 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2179 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
2180 opcode = kAtomicCompareExchangeInt8;
2181 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
2182 opcode = kPPC_AtomicCompareExchangeUint8;
2183 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
2184 opcode = kAtomicCompareExchangeInt16;
2185 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
2186 opcode = kPPC_AtomicCompareExchangeUint16;
2187 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32() ||
2188 atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
2189 opcode = kPPC_AtomicCompareExchangeWord32;
2196void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2198 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2199 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2200 opcode = kPPC_AtomicCompareExchangeUint8;
2201 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2202 opcode = kPPC_AtomicCompareExchangeUint16;
2203 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2204 opcode = kPPC_AtomicCompareExchangeWord32;
2205 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2206 opcode = kPPC_AtomicCompareExchangeWord64;
2221 selector->
Get(node).template Cast<AtomicRMWOp>();
2225 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
2227 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
2229 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
2231 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
2233 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32()) {
2235 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
2237 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int64()) {
2239 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint64()) {
2246 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2249 size_t input_count = 0;
2255 size_t output_count = 0;
2258 selector->
Emit(code, output_count, outputs, input_count, inputs);
2261void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2262 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2263 ArchOpcode uint16_op, ArchOpcode word32_op) {
2268void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2269 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2270 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2275#define VISIT_ATOMIC_BINOP(op) \
2276 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2277 VisitAtomicBinaryOperation( \
2278 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2279 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2280 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2281 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2283 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2284 VisitAtomicBinaryOperation( \
2285 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2286 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2287 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2288 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2295#undef VISIT_ATOMIC_BINOP
2297void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2301void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2305#define SIMD_TYPES(V) \
2313#define SIMD_BINOP_LIST(V) \
2343 V(I64x2ExtMulLowI32x4S) \
2344 V(I64x2ExtMulHighI32x4S) \
2345 V(I64x2ExtMulLowI32x4U) \
2346 V(I64x2ExtMulHighI32x4U) \
2366 V(I32x4ExtMulLowI16x8S) \
2367 V(I32x4ExtMulHighI16x8S) \
2368 V(I32x4ExtMulLowI16x8U) \
2369 V(I32x4ExtMulHighI16x8U) \
2386 V(I16x8SConvertI32x4) \
2387 V(I16x8UConvertI32x4) \
2392 V(I16x8RoundingAverageU) \
2393 V(I16x8Q15MulRSatS) \
2394 V(I16x8ExtMulLowI8x16S) \
2395 V(I16x8ExtMulHighI8x16S) \
2396 V(I16x8ExtMulLowI8x16U) \
2397 V(I16x8ExtMulHighI8x16U) \
2413 V(I8x16SConvertI16x8) \
2414 V(I8x16UConvertI16x8) \
2419 V(I8x16RoundingAverageU) \
2429#define SIMD_UNOP_LIST(V) \
2436 V(F64x2ConvertLowI32x4S) \
2437 V(F64x2ConvertLowI32x4U) \
2438 V(F64x2PromoteLowF32x4) \
2442 V(F32x4SConvertI32x4) \
2443 V(F32x4UConvertI32x4) \
2447 V(F32x4DemoteF64x2Zero) \
2450 V(I64x2SConvertI32x4Low) \
2451 V(I64x2SConvertI32x4High) \
2452 V(I64x2UConvertI32x4Low) \
2453 V(I64x2UConvertI32x4High) \
2458 V(I32x4SConvertF32x4) \
2459 V(I32x4UConvertF32x4) \
2460 V(I32x4SConvertI16x8Low) \
2461 V(I32x4SConvertI16x8High) \
2462 V(I32x4UConvertI16x8Low) \
2463 V(I32x4UConvertI16x8High) \
2464 V(I32x4ExtAddPairwiseI16x8S) \
2465 V(I32x4ExtAddPairwiseI16x8U) \
2466 V(I32x4TruncSatF64x2SZero) \
2467 V(I32x4TruncSatF64x2UZero) \
2479 V(I16x8SConvertI8x16Low) \
2480 V(I16x8SConvertI8x16High) \
2481 V(I16x8UConvertI8x16Low) \
2482 V(I16x8UConvertI8x16High) \
2483 V(I16x8ExtAddPairwiseI8x16S) \
2484 V(I16x8ExtAddPairwiseI8x16U) \
2488#define SIMD_VISIT_SPLAT(Type, T, LaneSize) \
2489 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
2490 PPCOperandGeneratorT g(this); \
2491 Emit(kPPC_##T##Splat | LaneSizeField::encode(LaneSize), \
2492 g.DefineAsRegister(node), g.UseRegister(this->input_at(node, 0))); \
2500#undef SIMD_VISIT_SPLAT
2502#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
2503 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2504 PPCOperandGeneratorT g(this); \
2506 const Operation& op = this->Get(node); \
2507 lane = op.template Cast<Simd128ExtractLaneOp>().lane; \
2508 Emit(kPPC_##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
2509 g.DefineAsRegister(node), g.UseRegister(this->input_at(node, 0)), \
2510 g.UseImmediate(lane)); \
2520#undef SIMD_VISIT_EXTRACT_LANE
2522#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
2523 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2524 PPCOperandGeneratorT g(this); \
2526 const Operation& op = this->Get(node); \
2527 lane = op.template Cast<Simd128ReplaceLaneOp>().lane; \
2528 Emit(kPPC_##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
2529 g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)), \
2530 g.UseImmediate(lane), g.UseRegister(this->input_at(node, 1))); \
2538#undef SIMD_VISIT_REPLACE_LANE
2540#define SIMD_VISIT_BINOP(Opcode) \
2541 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2542 PPCOperandGeneratorT g(this); \
2543 InstructionOperand temps[] = {g.TempRegister()}; \
2544 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2545 g.UseRegister(this->input_at(node, 0)), \
2546 g.UseRegister(this->input_at(node, 1)), arraysize(temps), temps); \
2549#undef SIMD_VISIT_BINOP
2550#undef SIMD_BINOP_LIST
2552#define SIMD_VISIT_UNOP(Opcode) \
2553 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2554 PPCOperandGeneratorT g(this); \
2555 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2556 g.UseRegister(this->input_at(node, 0))); \
2559#undef SIMD_VISIT_UNOP
2560#undef SIMD_UNOP_LIST
2562#define SIMD_VISIT_QFMOP(Opcode) \
2563 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2564 PPCOperandGeneratorT g(this); \
2565 Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \
2566 g.UseRegister(this->input_at(node, 0)), \
2567 g.UseRegister(this->input_at(node, 1)), \
2568 g.UseRegister(this->input_at(node, 2))); \
2574#undef SIMD_VISIT_QFMOP
2576#define SIMD_RELAXED_OP_LIST(V) \
2577 V(F64x2RelaxedMin, F64x2Pmin) \
2578 V(F64x2RelaxedMax, F64x2Pmax) \
2579 V(F32x4RelaxedMin, F32x4Pmin) \
2580 V(F32x4RelaxedMax, F32x4Pmax) \
2581 V(I32x4RelaxedTruncF32x4S, I32x4SConvertF32x4) \
2582 V(I32x4RelaxedTruncF32x4U, I32x4UConvertF32x4) \
2583 V(I32x4RelaxedTruncF64x2SZero, I32x4TruncSatF64x2SZero) \
2584 V(I32x4RelaxedTruncF64x2UZero, I32x4TruncSatF64x2UZero) \
2585 V(I16x8RelaxedQ15MulRS, I16x8Q15MulRSatS) \
2586 V(I8x16RelaxedLaneSelect, S128Select) \
2587 V(I16x8RelaxedLaneSelect, S128Select) \
2588 V(I32x4RelaxedLaneSelect, S128Select) \
2589 V(I64x2RelaxedLaneSelect, S128Select)
2591#define SIMD_VISIT_RELAXED_OP(name, op) \
2592 void InstructionSelectorT::Visit##name(OpIndex node) { Visit##op(node); }
2594#undef SIMD_VISIT_RELAXED_OP
2595#undef SIMD_RELAXED_OP_LIST
2597#define F16_OP_LIST(V) \
2599 V(F16x8ExtractLane) \
2600 V(F16x8ReplaceLane) \
2607 V(F16x8NearestInt) \
2620 V(F16x8SConvertI16x8) \
2621 V(F16x8UConvertI16x8) \
2622 V(I16x8SConvertF16x8) \
2623 V(I16x8UConvertF16x8) \
2624 V(F32x4PromoteLowF16x8) \
2625 V(F16x8DemoteF32x4Zero) \
2626 V(F16x8DemoteF64x2Zero) \
2630#define VISIT_F16_OP(name) \
2631 void InstructionSelectorT::Visit##name(OpIndex node) { UNIMPLEMENTED(); }
2637#if V8_ENABLE_WEBASSEMBLY
2638void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
2639 uint8_t shuffle[kSimd128Size];
2643 auto view = this->simd_shuffle_view(node);
2644 CanonicalizeShuffle(view, shuffle, &is_swizzle);
2645 PPCOperandGeneratorT g(
this);
2646 OpIndex input0 = view.input(0);
2647 OpIndex input1 = view.input(1);
2650 int total_lane_count = 2 * kSimd128Size;
2651 uint8_t shuffle_remapped[kSimd128Size];
2652 for (
int i = 0;
i < kSimd128Size;
i++) {
2653 uint8_t current_index = shuffle[
i];
2654 shuffle_remapped[
i] = (current_index <= max_index
2655 ? max_index - current_index
2656 : total_lane_count - current_index + max_index);
2658 Emit(kPPC_I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2659 g.UseRegister(input1),
2660 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
2661 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
2662 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
2663 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
2666void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
2667 OperandGenerator g(
this);
2669 auto input = g.UseRegister(this->input_at(node, 0));
2670 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
2674void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
UNREACHABLE(); }
2677void InstructionSelectorT::VisitS128Zero(OpIndex node) {
2678 PPCOperandGeneratorT g(
this);
2679 Emit(kPPC_S128Zero, g.DefineAsRegister(node));
2682void InstructionSelectorT::VisitS128Select(OpIndex node) {
2683 PPCOperandGeneratorT g(
this);
2684 Emit(kPPC_S128Select, g.DefineAsRegister(node),
2685 g.UseRegister(this->input_at(node, 0)),
2686 g.UseRegister(this->input_at(node, 1)),
2687 g.UseRegister(this->input_at(node, 2)));
2695 for (
int i = 3;
i >= 0; --
i) {
2702void InstructionSelectorT::VisitS128Const(
OpIndex node) {
2703 PPCOperandGeneratorT g(
this);
2705 const Simd128ConstantOp& constant =
2706 this->
Get(node).template Cast<Simd128ConstantOp>();
2707 memcpy(val, constant.value, kSimd128Size);
2709 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2710 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2711 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2712 InstructionOperand dst = g.DefineAsRegister(node);
2714 Emit(kPPC_S128Zero, dst);
2715 }
else if (all_ones) {
2716 Emit(kPPC_S128AllOnes, dst);
2720 Emit(kPPC_S128Const, g.DefineAsRegister(node),
2721 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]))),
2722 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 4)),
2723 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 8)),
2724 g.UseImmediate(Pack4Lanes(
reinterpret_cast<uint8_t*
>(&val[0]) + 12)));
2728void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(OpIndex node) {
2729 PPCOperandGeneratorT g(
this);
2730 Emit(kPPC_I16x8DotI8x16S, g.DefineAsRegister(node),
2731 g.UseUniqueRegister(this->input_at(node, 0)),
2732 g.UseUniqueRegister(this->input_at(node, 1)));
2735void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
2736 PPCOperandGeneratorT g(
this);
2737 Emit(kPPC_I32x4DotI8x16AddS, g.DefineAsRegister(node),
2738 g.UseUniqueRegister(this->input_at(node, 0)),
2739 g.UseUniqueRegister(this->input_at(node, 1)),
2740 g.UseUniqueRegister(this->input_at(node, 2)));
2743void InstructionSelectorT::EmitPrepareResults(
2744 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
2746 PPCOperandGeneratorT g(
this);
2748 for (PushParameter output : *results) {
2749 if (!output.location.IsCallerFrameSlot())
continue;
2751 if (output.node.valid()) {
2752 DCHECK(!call_descriptor->IsCFunctionCall());
2753 if (output.location.GetType() == MachineType::Float32()) {
2754 MarkAsFloat32(output.node);
2755 }
else if (output.location.GetType() == MachineType::Float64()) {
2756 MarkAsFloat64(output.node);
2757 }
else if (output.location.GetType() == MachineType::Simd128()) {
2758 MarkAsSimd128(output.node);
2760 int offset = call_descriptor->GetOffsetToReturns();
2761 int reverse_slot = -output.location.GetLocation() -
offset;
2762 Emit(kPPC_Peek, g.DefineAsRegister(output.node),
2763 g.UseImmediate(reverse_slot));
2768void InstructionSelectorT::VisitLoadLane(OpIndex node) {
2769 PPCOperandGeneratorT g(
this);
2771 const Simd128LaneMemoryOp& load =
2772 this->
Get(node).template Cast<Simd128LaneMemoryOp>();
2773 switch (load.lane_kind) {
2774 case Simd128LaneMemoryOp::LaneKind::k8:
2775 opcode = kPPC_S128Load8Lane;
2777 case Simd128LaneMemoryOp::LaneKind::k16:
2778 opcode = kPPC_S128Load16Lane;
2780 case Simd128LaneMemoryOp::LaneKind::k32:
2781 opcode = kPPC_S128Load32Lane;
2783 case Simd128LaneMemoryOp::LaneKind::k64:
2784 opcode = kPPC_S128Load64Lane;
2787 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2788 g.DefineSameAsFirst(node), g.UseRegister(load.value()),
2789 g.UseRegister(load.base()), g.UseRegister(load.index()),
2790 g.UseImmediate(load.lane));
2793void InstructionSelectorT::VisitLoadTransform(OpIndex node) {
2794 PPCOperandGeneratorT g(
this);
2796 const Simd128LoadTransformOp& op =
2797 this->
Get(node).template Cast<Simd128LoadTransformOp>();
2801 switch (op.transform_kind) {
2802 case Simd128LoadTransformOp::TransformKind::k8Splat:
2803 opcode = kPPC_S128Load8Splat;
2805 case Simd128LoadTransformOp::TransformKind::k16Splat:
2806 opcode = kPPC_S128Load16Splat;
2808 case Simd128LoadTransformOp::TransformKind::k32Splat:
2809 opcode = kPPC_S128Load32Splat;
2811 case Simd128LoadTransformOp::TransformKind::k64Splat:
2812 opcode = kPPC_S128Load64Splat;
2814 case Simd128LoadTransformOp::TransformKind::k8x8S:
2815 opcode = kPPC_S128Load8x8S;
2817 case Simd128LoadTransformOp::TransformKind::k8x8U:
2818 opcode = kPPC_S128Load8x8U;
2820 case Simd128LoadTransformOp::TransformKind::k16x4S:
2821 opcode = kPPC_S128Load16x4S;
2823 case Simd128LoadTransformOp::TransformKind::k16x4U:
2824 opcode = kPPC_S128Load16x4U;
2826 case Simd128LoadTransformOp::TransformKind::k32x2S:
2827 opcode = kPPC_S128Load32x2S;
2829 case Simd128LoadTransformOp::TransformKind::k32x2U:
2830 opcode = kPPC_S128Load32x2U;
2832 case Simd128LoadTransformOp::TransformKind::k32Zero:
2833 opcode = kPPC_S128Load32Zero;
2835 case Simd128LoadTransformOp::TransformKind::k64Zero:
2836 opcode = kPPC_S128Load64Zero;
2841 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2842 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2845void InstructionSelectorT::VisitStoreLane(OpIndex node) {
2846 PPCOperandGeneratorT g(
this);
2848 InstructionOperand inputs[4];
2849 const Simd128LaneMemoryOp& store =
2850 this->
Get(node).template Cast<Simd128LaneMemoryOp>();
2851 switch (store.lane_kind) {
2852 case Simd128LaneMemoryOp::LaneKind::k8:
2853 opcode = kPPC_S128Store8Lane;
2855 case Simd128LaneMemoryOp::LaneKind::k16:
2856 opcode = kPPC_S128Store16Lane;
2858 case Simd128LaneMemoryOp::LaneKind::k32:
2859 opcode = kPPC_S128Store32Lane;
2861 case Simd128LaneMemoryOp::LaneKind::k64:
2862 opcode = kPPC_S128Store64Lane;
2865 inputs[0] = g.UseRegister(store.value());
2866 inputs[1] = g.UseRegister(store.base());
2867 inputs[2] = g.UseRegister(store.index());
2868 inputs[3] = g.UseImmediate(store.lane);
2869 Emit(opcode | AddressingModeField::encode(kMode_MRR), 0,
nullptr, 4, inputs);
2872void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
2873 int first_input_index,
2878void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
2882void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
2886void InstructionSelectorT::VisitF64x2NearestInt(OpIndex node) {
UNREACHABLE(); }
2888void InstructionSelectorT::VisitF32x4NearestInt(OpIndex node) {
UNREACHABLE(); }
2890MachineOperatorBuilder::Flags
2891InstructionSelector::SupportedMachineOperatorFlags() {
2892 return MachineOperatorBuilder::kFloat32RoundDown |
2893 MachineOperatorBuilder::kFloat64RoundDown |
2894 MachineOperatorBuilder::kFloat32RoundUp |
2895 MachineOperatorBuilder::kFloat64RoundUp |
2896 MachineOperatorBuilder::kFloat32RoundTruncate |
2897 MachineOperatorBuilder::kFloat64RoundTruncate |
2898 MachineOperatorBuilder::kFloat64RoundTiesAway |
2899 MachineOperatorBuilder::kWord32Popcnt |
2900 MachineOperatorBuilder::kWord64Popcnt;
2904MachineOperatorBuilder::AlignmentRequirements
2905InstructionSelector::AlignmentRequirements() {
2906 return MachineOperatorBuilder::AlignmentRequirements::
2907 FullUnalignedAccessSupport();
static constexpr U encode(T value)
static bool IsSupported(CpuFeature f)
RootsTable & roots_table()
constexpr bool IsSigned() const
constexpr MachineRepresentation representation() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr bool IsReadOnly(RootIndex root_index)
FlagsCondition condition() const
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
Isolate * isolate() const
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
FlagsContinuationT FlagsContinuation
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void EmitIdentity(turboshaft::OpIndex node)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
Instruction * MarkAsCall()
InstructionOperand TempRegister()
InstructionOperand UseImmediate(int immediate)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionSelectorT * selector() const
InstructionOperand NoOutput()
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool CanBeImmediate(OpIndex node, ImmediateMode mode)
PPCOperandGeneratorT(InstructionSelectorT *selector)
bool CanBeImmediate(int64_t value, ImmediateMode mode)
InstructionOperand UseOperand(OpIndex node, ImmediateMode mode)
MachineRepresentation representation() const
WriteBarrierKind write_barrier_kind() const
turboshaft::MemoryRepresentation ts_loaded_rep() const
turboshaft::RegisterRepresentation ts_result_rep() const
V8_INLINE const Operation & Get(OpIndex i) const
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation UncompressedTaggedSigned()
static constexpr MemoryRepresentation Float64()
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
constexpr OpIndex value() const
static constexpr RegisterRepresentation Compressed()
static constexpr RegisterRepresentation Simd128()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static constexpr WordRepresentation Word64()
#define COMPRESS_POINTERS_BOOL
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
#define VISIT_ATOMIC_BINOP(op)
#define SIMD_VISIT_SPLAT(Type)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
#define VISIT_F16_OP(name)
#define SIMD_VISIT_QFMOP(Opcode)
ZoneVector< RpoNumber > & result
#define SmiWordOffset(offset)
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
constexpr unsigned CountTrailingZeros64(uint64_t value)
constexpr unsigned CountTrailingZeros32(uint32_t value)
constexpr unsigned CountLeadingZeros64(uint64_t value)
constexpr unsigned CountPopulation(T value)
constexpr unsigned CountLeadingZeros32(uint32_t value)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word64()> kWord64ShiftLeft
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word64()> kWord64ShiftRightLogical
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word64()> kWord64Sub
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word32()> kWord32Sub
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word32()> kWord32ShiftLeft
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
static void VisitLogical(InstructionSelectorT *selector, Zone *zone, OpIndex node, WordRepresentation rep, ArchOpcode opcode, bool left_can_cover, bool right_can_cover, ImmediateMode imm_mode)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool IsContiguousMask32(uint32_t value, int *mb, int *me)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
@ kIndirectPointerWriteBarrier
ArchOpcode SelectLoadOpcode(MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, ImmediateMode *mode)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitInt64MulWithOverflow(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitAtomicBinaryOperation(InstructionSelectorT *selector, OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode int32_op, ArchOpcode uint32_op, ArchOpcode int64_op, ArchOpcode uint64_op)
static void VisitLoadCommon(InstructionSelectorT *selector, OpIndex node, ImmediateMode mode, InstructionCode opcode)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static int32_t Pack4Lanes(const uint8_t *shuffle)
void VisitStoreCommon(InstructionSelectorT *selector, OpIndex node, StoreRepresentation store_rep, std::optional< AtomicMemoryOrder > atomic_order)
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
static bool CompareLogical(FlagsContinuationT *cont)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static bool IsContiguousMask64(uint64_t value, int *mb, int *me)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr int kSimd128Size
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
const int kStackFrameExtraParamSlot
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
#define I(name, number_of_args, result_size)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
static turboshaft::OpIndex value(turboshaft::OptionalOpIndex node)
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V< WordPtr > index() const
OptionalOpIndex expected() const
V< WordPtr > base() const
MemoryRepresentation memory_rep
underlying_operation_t< Op > & Cast()
V< Word32 > right() const
V< WordType > left() const
V< WordType > right() const
#define V8_STATIC_ROOTS_BOOL