32#if V8_ENABLE_WEBASSEMBLY
40using namespace turboshaft;
46bool IsCompressed(InstructionSelectorT* selector,
OpIndex node) {
47 if (!node.valid())
return false;
48 if (selector->is_load(node)) {
49 auto load = selector->load_view(node);
50 return load.loaded_rep().IsCompressed();
51 }
else if (
const PhiOp* phi = selector->TryCast<PhiOp>(node)) {
64bool LhsIsNotOnlyConstant(Graph* graph,
OpIndex left_idx,
OpIndex right_idx) {
65 const Operation& left = graph->Get(left_idx);
66 const Operation& right = graph->Get(right_idx);
68 if (right.Is<ConstantOp>()) {
72 if (left.Is<ConstantOp>()) {
89 constexpr int64_t kImmediateMin = std::numeric_limits<int32_t>::min() + 1;
90 constexpr int64_t kImmediateMax = std::numeric_limits<int32_t>::max();
91 static_assert(kImmediateMin ==
93 static_assert(kImmediateMax ==
95 return kImmediateMin <= value && value <= kImmediateMax;
106 switch (constant.kind) {
107 case ConstantOp::Kind::kCompressedHeapObject: {
121 case ConstantOp::Kind::kWord32: {
122 const int32_t value = constant.word32();
125 return value != std::numeric_limits<int32_t>::min();
127 case ConstantOp::Kind::kWord64: {
128 const int64_t value = constant.word64();
131 case ConstantOp::Kind::kSmi: {
133 const int64_t value = constant.smi().ptr();
136 const int32_t value = constant.smi().ptr();
139 return value != std::numeric_limits<int32_t>::min();
142 case ConstantOp::Kind::kNumber:
143 return constant.number().get_bits() == 0;
152 switch (constant.kind) {
153 case ConstantOp::Kind::kWord32:
154 return constant.word32();
155 case ConstantOp::Kind::kWord64:
156 return static_cast<int32_t
>(constant.word64());
157 case ConstantOp::Kind::kSmi:
158 return static_cast<int32_t
>(constant.smi().ptr());
159 case ConstantOp::Kind::kNumber:
160 DCHECK_EQ(constant.number().get_bits(), 0);
167struct ScaledIndexMatch {
174 OpIndex* index,
int*
scale,
bool* power_of_two_plus_one) {
181 if (constant ==
nullptr)
return false;
182 if (constant->kind != ConstantOp::Kind::kWord32 &&
183 constant->kind != ConstantOp::Kind::kWord64) {
186 uint64_t value = constant->
integral();
187 if (plus_one) *plus_one =
false;
188 if (value == 1)
return (
scale = 0),
true;
189 if (value == 2)
return (
scale = 1),
true;
190 if (value == 4)
return (
scale = 2),
true;
191 if (value == 8)
return (
scale = 3),
true;
192 if (plus_one ==
nullptr)
return false;
194 if (value == 3)
return (
scale = 1),
true;
195 if (value == 5)
return (
scale = 2),
true;
196 if (value == 9)
return (
scale = 3),
true;
202 if (binop->kind != WordBinopOp::Kind::kMul)
return false;
203 if (MatchScaleConstant(selector->
Get(binop->right()), *
scale,
204 power_of_two_plus_one)) {
205 *index = binop->left();
208 if (MatchScaleConstant(selector->
Get(binop->left()), *
scale,
209 power_of_two_plus_one)) {
210 *index = binop->right();
215 if (shift->kind != ShiftOp::Kind::kShiftLeft)
return false;
218 if (scale_value < 0 || scale_value > 3)
return false;
219 *index = shift->left();
220 *
scale =
static_cast<int>(scale_value);
221 if (power_of_two_plus_one) *power_of_two_plus_one =
false;
230 bool allow_power_of_two_plus_one) {
231 ScaledIndexMatch match;
232 bool plus_one =
false;
234 allow_power_of_two_plus_one ? &plus_one :
nullptr)) {
235 match.base = plus_one ? match.index :
OpIndex{};
243 bool allow_power_of_two_plus_one) {
249 bool allow_power_of_two_plus_one) {
253struct BaseWithScaledIndexAndDisplacementMatch {
261std::optional<BaseWithScaledIndexAndDisplacementMatch>
264 bool is_commutative);
266std::optional<BaseWithScaledIndexAndDisplacementMatch>
286 BaseWithScaledIndexAndDisplacementMatch
result;
291 result.base = load->base();
292 result.index = load->index().value_or_invalid();
293 result.scale = load->element_size_log2;
294 result.displacement = load->offset;
298 result.base = store->base();
299 result.index = store->index().value_or_invalid();
300 result.scale = store->element_size_log2;
301 result.displacement = store->offset;
304 }
else if (op.
Is<WordBinopOp>()) {
306#ifdef V8_ENABLE_WEBASSEMBLY
307 }
else if (
const Simd128LaneMemoryOp* lane_op =
308 op.
TryCast<Simd128LaneMemoryOp>()) {
309 result.base = lane_op->base();
310 result.index = lane_op->index();
315 }
else if (
const Simd128LoadTransformOp* load_transform_128 =
316 op.
TryCast<Simd128LoadTransformOp>()) {
317 result.base = load_transform_128->base();
318 DCHECK_EQ(load_transform_128->offset, 0);
325 result.index = load_transform_128->index();
330 DCHECK(!load_transform_128->load_kind.tagged_base);
332#if V8_ENABLE_WASM_SIMD256_REVEC
333 }
else if (
const Simd256LoadTransformOp* load_transform_256 =
334 op.
TryCast<Simd256LoadTransformOp>()) {
335 result.base = load_transform_256->base();
336 result.index = load_transform_256->index();
337 DCHECK_EQ(load_transform_256->offset, 0);
340 DCHECK(!load_transform_256->load_kind.tagged_base);
348 const WordBinopOp& binop = op.
Cast<WordBinopOp>();
352 selector, left, right, binop.IsCommutative());
355std::optional<BaseWithScaledIndexAndDisplacementMatch>
358 bool is_commutative) {
374 -> std::optional<BaseWithScaledIndexAndDisplacementMatch> {
375 BaseWithScaledIndexAndDisplacementMatch
result;
387 if (right_binop->kind == WordBinopOp::Kind::kSub) {
392 result.base = right_binop->left();
397 if (right_binop->kind == WordBinopOp::Kind::kAdd) {
401 result.base = right_binop->left();
403 right_binop->left(), &
result.displacement)) {
405 result.base = right_binop->right();
434 -> std::optional<BaseWithScaledIndexAndDisplacementMatch> {
436 WordBinopOp::Kind::kAdd);
438 BaseWithScaledIndexAndDisplacementMatch
result;
452 result.base = left_add_right;
466 auto match_plus_plus = [&selector, &match_S_plus_plus](
OpIndex left,
468 -> std::optional<BaseWithScaledIndexAndDisplacementMatch> {
469 BaseWithScaledIndexAndDisplacementMatch
result;
475 left_add && left_add->
kind == WordBinopOp::Kind::kAdd) {
478 match_S_plus_plus(left, right, left_add->left(), left_add->right());
479 if (maybe_res)
return maybe_res;
482 match_S_plus_plus(left, right, left_add->right(), left_add->left());
483 if (maybe_res)
return maybe_res;
490 auto maybe_res = match_S_plus(left, right);
491 if (maybe_res)
return maybe_res;
493 if (is_commutative) {
495 maybe_res = match_S_plus(right, left);
502 maybe_res = match_plus_plus(left, right);
503 if (maybe_res)
return maybe_res;
505 if (is_commutative) {
507 maybe_res = match_plus_plus(right, left);
513 BaseWithScaledIndexAndDisplacementMatch
result;
528std::optional<BaseWithScaledIndexAndDisplacementMatch>
551 if (!
selector()->CanCover(node, input))
return false;
553 if (effect_level !=
selector()->GetEffectLevel(input)) {
611 case ConstantOp::Kind::kWord32:
612 return op->word32() == 0;
613 case ConstantOp::Kind::kWord64:
614 return op->word64() == 0;
629 bool fold_base_into_displacement =
false;
630 int64_t fold_value = 0;
645 fold_base_into_displacement =
true;
654 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
660 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
661 kMode_MR4I, kMode_MR8I};
662 mode = kMRnI_modes[scale_exponent];
665 kMode_MR4, kMode_MR8};
666 mode = kMRn_modes[scale_exponent];
679 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
680 if (fold_base_into_displacement) {
686 kMode_M4I, kMode_M8I};
687 mode = kMnI_modes[scale_exponent];
689 if (!index.valid()) {
705 kMode_M4I, kMode_M8I};
706 mode = kMnI_modes[scale_exponent];
713 mode = kMn_modes[scale_exponent];
714 if (mode == kMode_MR1) {
745struct LoadStoreView {
746 explicit LoadStoreView(
const Operation& op) {
750 index = load->index();
754 const StoreOp& store = op.
Cast<StoreOp>();
756 index = store.index();
772 LoadStoreView load_or_store(op);
775 !load_or_store.index.valid()) {
776 if (
selector()->CanAddressRelativeToRootsRegister(reference)) {
777 const ptrdiff_t delta =
778 load_or_store.offset +
781 if (is_int32(delta)) {
796 if (
m->displacement != 0) {
797 inputs[(*input_count)++] =
813 m->displacement,
m->displacement_mode,
814 inputs, input_count, reg_kind);
815 }
else if (!
m->base.valid() &&
838 switch (loaded_rep) {
868#ifdef V8_COMPRESS_POINTERS
875 return kX64MovqDecompressTagged;
881 return kX64MovqDecompressTaggedSigned;
884 case MemoryRepresentation::TaggedPointer():
885 case MemoryRepresentation::TaggedSigned():
890 case MemoryRepresentation::UncompressedTaggedPointer():
891 case MemoryRepresentation::UncompressedTaggedSigned():
896 return kX64MovqDecompressProtected;
900 return kX64MovqDecodeSandboxedPointer;
906 return kX64Movdqu256;
912 switch (load_rep.representation()) {
924 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
927 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
934#ifdef V8_COMPRESS_POINTERS
940#ifdef V8_COMPRESS_POINTERS
942 opcode = kX64MovqDecompressTaggedSigned;
946 opcode = kX64MovqDecompressTagged;
958 opcode = kX64MovqDecompressProtected;
961 opcode = kX64MovqDecodeSandboxedPointer;
967 opcode = kX64Movdqu256;
978ArchOpcode GetStoreOpcode(MemoryRepresentation stored_rep) {
979 switch (stored_rep) {
981 case MemoryRepresentation::Uint8():
984 case MemoryRepresentation::Uint16():
987 case MemoryRepresentation::Uint32():
990 case MemoryRepresentation::Uint64():
999 case MemoryRepresentation::TaggedPointer():
1000 case MemoryRepresentation::TaggedSigned():
1001 return kX64MovqCompressTagged;
1003 case MemoryRepresentation::UncompressedTaggedPointer():
1004 case MemoryRepresentation::UncompressedTaggedSigned():
1010 return kX64MovqStoreIndirectPointer;
1012 return kX64MovqEncodeSandboxedPointer;
1016 return kX64Movdqu256;
1020ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
1021 switch (store_rep.representation()) {
1023 return kAtomicStoreWord8;
1025 return kAtomicStoreWord16;
1027 return kAtomicStoreWord32;
1029 return kX64Word64AtomicStoreWord64;
1034 return kX64Word64AtomicStoreWord64;
1038 return kAtomicStoreWord32;
1045template <VectorLength vec_len>
1046void VisitMinOrMax(InstructionSelectorT* selector,
OpIndex node,
1048 X64OperandGeneratorT g(selector);
1049 const Operation& op = selector->Get(node);
1051 InstructionOperand dst = selector->IsSupported(AVX)
1052 ? g.DefineAsRegister(node)
1053 : g.DefineSameAsFirst(node);
1054 InstructionCode instr_code = opcode | VectorLengthField::encode(vec_len);
1058 selector->Emit(instr_code, dst, g.UseRegister(op.input(1)),
1059 g.UseRegister(op.input(0)));
1061 selector->Emit(instr_code, dst, g.UseRegister(op.input(0)),
1062 g.UseRegister(op.input(1)));
1067void InstructionSelectorT::VisitTraceInstruction(
OpIndex node) {
1072void InstructionSelectorT::VisitStackSlot(
OpIndex node) {
1075 stack_slot.is_tagged);
1078 Emit(kArchStackSlot, g.DefineAsRegister(node),
1079 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
1082void InstructionSelectorT::VisitAbortCSADcheck(
OpIndex node) {
1083 X64OperandGeneratorT g(
this);
1086 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(check.message(), rdx));
1089#ifdef V8_ENABLE_WEBASSEMBLY
1090void InstructionSelectorT::VisitLoadLane(
OpIndex node) {
1091 const Simd128LaneMemoryOp& load = this->
Get(node).Cast<Simd128LaneMemoryOp>();
1093 switch (load.lane_kind) {
1094 case Simd128LaneMemoryOp::LaneKind::k8:
1097 case Simd128LaneMemoryOp::LaneKind::k16:
1100 case Simd128LaneMemoryOp::LaneKind::k32:
1103 case Simd128LaneMemoryOp::LaneKind::k64:
1108 X64OperandGeneratorT g(
this);
1109 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1113 InstructionOperand
inputs[5];
1120 g.GetEffectiveAddressMemoryOperand(node,
inputs, &input_count);
1126 DCHECK(!load.kind.maybe_unaligned);
1127 if (load.kind.with_trap_handler) {
1134 const Simd128LoadTransformOp& op =
1135 this->
Get(node).Cast<Simd128LoadTransformOp>();
1137 switch (op.transform_kind) {
1138 case Simd128LoadTransformOp::TransformKind::k8x8S:
1139 opcode = kX64S128Load8x8S;
1141 case Simd128LoadTransformOp::TransformKind::k8x8U:
1142 opcode = kX64S128Load8x8U;
1144 case Simd128LoadTransformOp::TransformKind::k16x4S:
1145 opcode = kX64S128Load16x4S;
1147 case Simd128LoadTransformOp::TransformKind::k16x4U:
1148 opcode = kX64S128Load16x4U;
1150 case Simd128LoadTransformOp::TransformKind::k32x2S:
1151 opcode = kX64S128Load32x2S;
1153 case Simd128LoadTransformOp::TransformKind::k32x2U:
1154 opcode = kX64S128Load32x2U;
1156 case Simd128LoadTransformOp::TransformKind::k8Splat:
1157 opcode = kX64S128Load8Splat;
1159 case Simd128LoadTransformOp::TransformKind::k16Splat:
1160 opcode = kX64S128Load16Splat;
1162 case Simd128LoadTransformOp::TransformKind::k32Splat:
1163 opcode = kX64S128Load32Splat;
1165 case Simd128LoadTransformOp::TransformKind::k64Splat:
1166 opcode = kX64S128Load64Splat;
1168 case Simd128LoadTransformOp::TransformKind::k32Zero:
1171 case Simd128LoadTransformOp::TransformKind::k64Zero:
1177 DCHECK(!op.load_kind.maybe_unaligned);
1179 if (op.load_kind.with_trap_handler) {
1185#if V8_ENABLE_WASM_SIMD256_REVEC
1186void InstructionSelectorT::VisitS256Const(
OpIndex node) {
1187 X64OperandGeneratorT g(
this);
1188 static const int kUint32Immediates =
kSimd256Size /
sizeof(uint32_t);
1189 uint32_t val[kUint32Immediates];
1193 bool all_zeros = std::all_of(std::begin(val), std::end(val),
1194 [](uint32_t v) {
return v == 0; });
1198 bool all_ones = std::all_of(std::begin(val), std::end(val),
1199 [](uint32_t v) {
return v == UINT32_MAX; });
1200 InstructionOperand dst = g.DefineAsRegister(node);
1202 Emit(kX64SZero | VectorLengthField::encode(kV256), dst);
1203 }
else if (all_ones) {
1204 Emit(kX64SAllOnes | VectorLengthField::encode(kV256), dst);
1206 Emit(kX64S256Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
1207 g.UseImmediate(val[2]), g.UseImmediate(val[3]), g.UseImmediate(val[4]),
1208 g.UseImmediate(val[5]), g.UseImmediate(val[6]),
1209 g.UseImmediate(val[7]));
1213void InstructionSelectorT::VisitS256Zero(
OpIndex node) {
1214 X64OperandGeneratorT g(
this);
1215 Emit(kX64SZero | VectorLengthField::encode(kV256), g.DefineAsRegister(node));
1218void InstructionSelectorT::VisitSimd256LoadTransform(
OpIndex node) {
1219 const Simd256LoadTransformOp& op =
1220 this->
Get(node).Cast<Simd256LoadTransformOp>();
1222 switch (op.transform_kind) {
1223 case Simd256LoadTransformOp::TransformKind::k8x16S:
1224 opcode = kX64S256Load8x16S;
1226 case Simd256LoadTransformOp::TransformKind::k8x16U:
1227 opcode = kX64S256Load8x16U;
1229 case Simd256LoadTransformOp::TransformKind::k8x8U:
1230 opcode = kX64S256Load8x8U;
1232 case Simd256LoadTransformOp::TransformKind::k16x8S:
1233 opcode = kX64S256Load16x8S;
1235 case Simd256LoadTransformOp::TransformKind::k16x8U:
1236 opcode = kX64S256Load16x8U;
1238 case Simd256LoadTransformOp::TransformKind::k32x4S:
1239 opcode = kX64S256Load32x4S;
1241 case Simd256LoadTransformOp::TransformKind::k32x4U:
1242 opcode = kX64S256Load32x4U;
1244 case Simd256LoadTransformOp::TransformKind::k8Splat:
1245 opcode = kX64S256Load8Splat;
1247 case Simd256LoadTransformOp::TransformKind::k16Splat:
1248 opcode = kX64S256Load16Splat;
1250 case Simd256LoadTransformOp::TransformKind::k32Splat:
1251 opcode = kX64S256Load32Splat;
1253 case Simd256LoadTransformOp::TransformKind::k64Splat:
1254 opcode = kX64S256Load64Splat;
1259 DCHECK(!op.load_kind.maybe_unaligned);
1261 if (op.load_kind.with_trap_handler) {
1267void InstructionSelectorT::VisitF32x8RelaxedMin(
OpIndex node) {
1268 VisitMinOrMax<kV256>(
this, node, kX64Minps,
false);
1271void InstructionSelectorT::VisitF32x8RelaxedMax(
OpIndex node) {
1272 VisitMinOrMax<kV256>(
this, node, kX64Maxps,
false);
1275void InstructionSelectorT::VisitF64x4RelaxedMin(
OpIndex node) {
1276 VisitMinOrMax<kV256>(
this, node, kX64Minpd,
false);
1279void InstructionSelectorT::VisitF64x4RelaxedMax(
OpIndex node) {
1280 VisitMinOrMax<kV256>(
this, node, kX64Maxpd,
false);
1283#ifdef V8_TARGET_ARCH_X64
1284void InstructionSelectorT::VisitSimd256Shufd(
OpIndex node) {
1285 X64OperandGeneratorT g(
this);
1286 const Simd256ShufdOp& shufd =
Get(node).Cast<Simd256ShufdOp>();
1287 InstructionOperand dst = g.DefineAsRegister(node);
1288 InstructionOperand src = g.UseUniqueRegister(shufd.input());
1289 InstructionOperand imm = g.UseImmediate(shufd.control);
1290 InstructionOperand
inputs[] = {src, imm};
1294void InstructionSelectorT::VisitSimd256Shufps(
OpIndex node) {
1295 X64OperandGeneratorT g(
this);
1296 const Simd256ShufpsOp& shufps =
Get(node).Cast<Simd256ShufpsOp>();
1297 InstructionOperand dst = g.DefineAsRegister(node);
1298 InstructionOperand src1 = g.UseUniqueRegister(shufps.left());
1299 InstructionOperand src2 = g.UseUniqueRegister(shufps.right());
1300 InstructionOperand imm = g.UseImmediate(shufps.control);
1301 InstructionOperand
inputs[] = {src1, src2, imm};
1305void InstructionSelectorT::VisitSimd256Unpack(
OpIndex node) {
1306 X64OperandGeneratorT g(
this);
1307 const Simd256UnpackOp& unpack =
Get(node).Cast<Simd256UnpackOp>();
1308 InstructionOperand dst = g.DefineAsRegister(node);
1309 InstructionOperand src1 = g.UseUniqueRegister(unpack.left());
1310 InstructionOperand src2 = g.UseUniqueRegister(unpack.right());
1311 InstructionOperand
inputs[] = {src1, src2};
1313 switch (unpack.kind) {
1314 case Simd256UnpackOp::Kind::k32x8High:
1315 code = kX64S32x8UnpackHigh;
1317 case Simd256UnpackOp::Kind::k32x8Low:
1318 code = kX64S32x8UnpackLow;
1326void InstructionSelectorT::VisitSimdPack128To256(
OpIndex node) {
1327 X64OperandGeneratorT g(
this);
1329 const SimdPack128To256Op& op =
Get(node).Cast<SimdPack128To256Op>();
1333 constexpr int kHighLaneIndex = 1;
1335 InstructionOperand dst = g.DefineAsRegister(node);
1336 InstructionOperand src0 = g.UseUniqueRegister(input0);
1337 InstructionOperand src1 = g.UseUniqueRegister(input1);
1338 InstructionOperand imm = g.UseImmediate(kHighLaneIndex);
1340 InstructionOperand
inputs[] = {src0, src1, imm};
1351 X64OperandGeneratorT g(
this);
1356 InstructionOperand temps[] = {g.TempRegister()};
1360 InstructionOperand* temps =
nullptr;
1361 size_t temp_count = 0;
1364 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1365 InstructionOperand
inputs[3];
1368 g.GetEffectiveAddressMemoryOperand(value,
inputs, &input_count, reg_kind);
1373 if (load.is_protected(&traps_on_null)) {
1374 if (traps_on_null) {
1381 Emit(code, 1, outputs, input_count,
inputs, temp_count, temps);
1385 TurboshaftAdapter::LoadView view = this->
load_view(node);
1387 GetLoadOpcode(view.ts_loaded_rep(), view.ts_result_rep()));
1390void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
VisitLoad(node); }
1398 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
1399 X64OperandGeneratorT g(selector);
1401 InstructionOperand inputs[] = {
1402 g.UseUniqueRegister(atomic_op.value()),
1403 g.UseUniqueRegister(atomic_op.base()),
1404 g.GetEffectiveIndexOperand(atomic_op.index(), &addressing_mode)};
1405 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
1415 const TurboshaftAdapter::StoreView& store) {
1416 X64OperandGeneratorT g(selector);
1419 OpIndex value = store.value();
1421 uint8_t element_size_log2 = store.element_size_log2();
1422 std::optional<AtomicMemoryOrder> atomic_order = store.memory_order();
1425 const StoreRepresentation store_rep = store.stored_rep();
1428 const bool is_seqcst =
1431 if (
v8_flags.enable_unconditional_write_barriers &&
1436 const auto access_mode =
1438 ? (store.is_store_trap_on_null()
1444 !
v8_flags.disable_write_barriers) {
1448 CHECK((store.ts_stored_rep() !=
1450 (store.ts_stored_rep() !=
1452 (store.ts_stored_rep() !=
1455 InstructionOperand inputs[5];
1457 addressing_mode = g.GenerateMemoryOperandInputs(
1462 inputs[
input_count++] = g.UseUniqueRegister(value);
1465 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1470 code = kArchStoreIndirectWithWriteBarrier;
1472 inputs[
input_count++] = g.UseImmediate64(
static_cast<int64_t
>(tag));
1474 code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
1475 : kArchStoreWithWriteBarrier;
1480 selector->Emit(code, 0,
nullptr, input_count, inputs,
arraysize(temps),
1487 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1491 InstructionOperand* temps =
nullptr;
1492 size_t temp_count = 0;
1502 InstructionOperand inputs[4];
1509 inputs[
input_count++] = g.UseUniqueRegister(value);
1510 inputs[
input_count++] = g.UseUniqueRegister(base);
1512 if (index.valid()) {
1514 inputs[
input_count++] = g.GetEffectiveIndexOperand(
1515 selector->value(index), &addressing_mode);
1519 addressing_mode = kMode_MRI;
1521 addressing_mode = kMode_MR;
1523 opcode = GetSeqCstStoreOpcode(store_rep);
1528 selector->MatchTruncateWord64ToWord32(value, &value64)) {
1533 addressing_mode = g.GetEffectiveAddressMemoryOperand(
1534 store, inputs, &input_count, reg_kind);
1535 InstructionOperand value_operand = g.CanBeImmediate(value)
1536 ? g.UseImmediate(value)
1537 : g.UseRegister(value, reg_kind);
1539 opcode = GetStoreOpcode(store.ts_stored_rep());
1545 selector->Emit(code, 0,
static_cast<InstructionOperand*
>(
nullptr),
1546 input_count, inputs, temp_count, temps);
1554void InstructionSelectorT::VisitStore(
OpIndex node) {
1558void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
1568#ifdef V8_ENABLE_WEBASSEMBLY
1569void InstructionSelectorT::VisitStoreLane(
OpIndex node) {
1570 X64OperandGeneratorT g(
this);
1571 const Simd128LaneMemoryOp& store =
Get(node).Cast<Simd128LaneMemoryOp>();
1573 switch (store.lane_kind) {
1574 case Simd128LaneMemoryOp::LaneKind::k8:
1577 case Simd128LaneMemoryOp::LaneKind::k16:
1580 case Simd128LaneMemoryOp::LaneKind::k32:
1581 opcode = kX64S128Store32Lane;
1583 case Simd128LaneMemoryOp::LaneKind::k64:
1584 opcode = kX64S128Store64Lane;
1588 InstructionOperand
inputs[4];
1591 g.GetEffectiveAddressMemoryOperand(node,
inputs, &input_count);
1594 if (store.kind.with_trap_handler) {
1598 InstructionOperand value_operand = g.UseRegister(store.value());
1613 auto left = binop.
input(0);
1614 auto right = binop.
input(1);
1617 std::swap(left, right);
1621 size_t input_count = 0;
1623 size_t output_count = 0;
1626 if (left == right) {
1635 inputs[input_count++] = input;
1636 inputs[input_count++] = input;
1645 std::swap(left, right);
1654 inputs[input_count++] = g.
Use(right);
1672 bool allow_implicit_int64_truncation =
1674 if (
auto* constant = selector->
Get(node).TryCast<
ConstantOp>()) {
1675 if (constant->kind == ConstantOp::Kind::kWord32) {
1676 return constant->word32();
1678 if (allow_implicit_int64_truncation &&
1679 constant->kind == ConstantOp::Kind::kWord64) {
1680 return static_cast<int32_t
>(constant->word64());
1683 return std::nullopt;
1692void InstructionSelectorT::VisitWord32And(
OpIndex node) {
1693 X64OperandGeneratorT g(
this);
1699 load_rep.IsUnsigned()) {
1704 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(left));
1711 load_rep.IsUnsigned()) {
1716 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(left));
1730 return std::nullopt;
1733void InstructionSelectorT::VisitWord64And(
OpIndex node) {
1734 X64OperandGeneratorT g(
this);
1737 if (*constant == 0xFF) {
1738 Emit(kX64Movzxbq, g.DefineAsRegister(node), g.Use(left));
1740 }
else if (*constant == 0xFFFF) {
1741 Emit(kX64Movzxwq, g.DefineAsRegister(node), g.Use(left));
1743 }
else if (*constant == 0xFFFFFFFF) {
1744 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(left));
1746 }
else if (std::numeric_limits<uint32_t>::min() <= *constant &&
1747 *constant <= std::numeric_limits<uint32_t>::max()) {
1748 Emit(kX64And32, g.DefineSameAsFirst(node), g.UseRegister(left),
1749 g.UseImmediate(
static_cast<int32_t>(*constant)));
1756void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
1760void InstructionSelectorT::VisitWord64Or(
OpIndex node) {
1764void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
1765 X64OperandGeneratorT g(
this);
1767 if (*constant ==
static_cast<uint64_t
>(-1)) {
1768 Emit(kX64Not32, g.DefineSameAsFirst(node),
1769 g.UseRegister(
Get(node).input(0)));
1776void InstructionSelectorT::VisitWord64Xor(
OpIndex node) {
1777 X64OperandGeneratorT g(
this);
1779 if (*constant ==
static_cast<uint64_t
>(-1)) {
1780 Emit(kX64Not, g.DefineSameAsFirst(node),
1781 g.UseRegister(
Get(node).input(0)));
1789 OpIndex node, FlagsContinuation* cont) {
1796 X64OperandGeneratorT g(
this);
1797 OpIndex value = op.stack_limit();
1798 if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) {
1802 static constexpr int kMaxInputCount = 3;
1805 InstructionOperand
inputs[kMaxInputCount];
1807 g.GetEffectiveAddressMemoryOperand(value,
inputs, &input_count);
1821void VisitWord32Shift(InstructionSelectorT* selector,
OpIndex node,
1823 X64OperandGeneratorT g(selector);
1826 auto left = op.left();
1827 auto right = op.right();
1829 if (
V<Word64> left64; selector->MatchTruncateWord64ToWord32(left, &left64)) {
1833 if (g.CanBeImmediate(right)) {
1834 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1835 g.UseImmediate(right));
1837 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1838 g.UseFixed(right, rcx));
1844void VisitWord64Shift(InstructionSelectorT* selector,
OpIndex node,
1846 X64OperandGeneratorT g(selector);
1849 auto left = op.left();
1850 auto right = op.right();
1852 if (g.CanBeImmediate(right)) {
1856 if (opcode == kX64Shr &&
m.left().IsChangeUint32ToUint64() &&
1857 m.right().HasResolvedValue() &&
m.right().ResolvedValue() < 32 &&
1858 m.right().ResolvedValue() >= 0) {
1860 left = left->InputAt(0);
1863 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1864 g.UseImmediate(right));
1869 if (
m.right().IsWord64And()) {
1871 if (mright.right().Is(0x3F)) {
1872 right = mright.left().node();
1876 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1877 g.UseFixed(right, rcx));
1882bool TryVisitWordShift(InstructionSelectorT* selector,
OpIndex node,
int bits,
1883 ArchOpcode opcode, FlagsContinuationT* cont) {
1884 DCHECK(bits == 32 || bits == 64);
1885 X64OperandGeneratorT g(selector);
1888 auto left = op.left();
1889 auto right = op.right();
1892 if (!g.CanBeImmediate(right) ||
1893 (g.GetImmediateIntegerValue(right) & (bits - 1)) == 0) {
1896 InstructionOperand output = g.DefineSameAsFirst(node);
1897 InstructionOperand inputs[2];
1898 inputs[0] = g.UseRegister(left);
1899 inputs[1] = g.UseImmediate(right);
1900 selector->EmitWithContinuation(opcode, 1, &output, 2, inputs, cont);
1907 X64OperandGeneratorT g(selector);
1909 InstructionOperand inputs[4];
1918 InstructionOperand outputs[1];
1919 outputs[0] = g.DefineAsRegister(
result);
1923 selector->Emit(opcode, 1, outputs, input_count, inputs);
1928void InstructionSelectorT::VisitWord32Shl(
OpIndex node) {
1934 EmitLea(
this, kX64Lea32, node, index,
scale, base, 0,
1938 VisitWord32Shift(
this, node, kX64Shl32);
1941void InstructionSelectorT::VisitWord64Shl(
OpIndex node) {
1942 X64OperandGeneratorT g(
this);
1947 OpIndex right = shift.right();
1955 Emit(kX64Shl, g.DefineSameAsFirst(node),
1956 g.UseRegister(this->Get(left).input(0)), g.UseImmediate(right));
1959 VisitWord64Shift(
this, node, kX64Shl);
1962void InstructionSelectorT::VisitWord32Shr(
OpIndex node) {
1963 VisitWord32Shift(
this, node, kX64Shr32);
2010bool TryEmitLoadForLoadWord64AndShiftRight(InstructionSelectorT* selector,
2015 X64OperandGeneratorT g(selector);
2016 if (selector->CanCover(node, shift.left()) &&
2017 selector->Get(shift.left()).Is<LoadOp>() &&
2018 selector->MatchIntegralWord32Constant(shift.right(), 32)) {
2019 DCHECK_EQ(selector->GetEffectLevel(node),
2020 selector->GetEffectLevel(shift.left()));
2025 if (
m.has_value() &&
2031 InstructionOperand temps[] = {g.TempRegister()};
2035 InstructionOperand* temps =
nullptr;
2036 size_t temp_count = 0;
2040 InstructionOperand inputs[3];
2042 shift.left(), inputs, &input_count, reg_kind);
2043 if (
m->displacement == 0) {
2047 mode = AddDisplacementToAddressingMode(mode);
2054 if (!inputs[input_count - 1].IsImmediate())
return false;
2057 static_cast<int32_t>(
m->displacement) + 4);
2059 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2061 selector->Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
2070void InstructionSelectorT::VisitWord64Shr(
OpIndex node) {
2071 if (TryEmitLoadForLoadWord64AndShiftRight(
this, node, kX64Movl))
return;
2072 VisitWord64Shift(
this, node, kX64Shr);
2075void InstructionSelectorT::VisitWord32Sar(
OpIndex node) {
2078 X64OperandGeneratorT g(
this);
2080 if (
CanCover(
m.node(),
m.left().node()) &&
m.left().IsWord32Shl()) {
2082 if (mleft.right().Is(16) &&
m.right().Is(16)) {
2083 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
2085 }
else if (mleft.right().Is(24) &&
m.right().Is(24)) {
2086 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
2091 VisitWord32Shift(
this, node, kX64Sar32);
2094void InstructionSelectorT::VisitWord64Sar(
OpIndex node) {
2095 if (TryEmitLoadForLoadWord64AndShiftRight(
this, node, kX64Movsxlq))
return;
2096 VisitWord64Shift(
this, node, kX64Sar);
2099void InstructionSelectorT::VisitWord32Rol(
OpIndex node) {
2100 VisitWord32Shift(
this, node, kX64Rol32);
2103void InstructionSelectorT::VisitWord64Rol(
OpIndex node) {
2104 VisitWord64Shift(
this, node, kX64Rol);
2107void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
2108 VisitWord32Shift(
this, node, kX64Ror32);
2111void InstructionSelectorT::VisitWord64Ror(
OpIndex node) {
2112 VisitWord64Shift(
this, node, kX64Ror);
2115void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
2119void InstructionSelectorT::VisitWord64ReverseBits(
OpIndex node) {
2123void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
2124 X64OperandGeneratorT g(
this);
2127 Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(op.input()));
2130void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
2131 X64OperandGeneratorT g(
this);
2134 Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(op.input()));
2137void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
2141void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
2142 X64OperandGeneratorT g(
this);
2144 std::optional<BaseWithScaledIndexAndDisplacementMatch>
m;
2162 if (
m.has_value()) {
2164 EmitLea(
this, kX64Lea32, node,
m->index,
m->scale,
m->base,
2165 m->displacement,
m->displacement_mode);
2174void InstructionSelectorT::VisitInt64Add(
OpIndex node) {
2175 X64OperandGeneratorT g(
this);
2179 EmitLea(
this, kX64Lea, node, match->index, match->scale, match->base,
2180 match->displacement, match->displacement_mode);
2189void InstructionSelectorT::VisitInt64AddWithOverflow(
OpIndex node) {
2193 return VisitBinop(
this, node, kX64Add, &cont);
2199void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
2200 X64OperandGeneratorT g(
this);
2202 if (g.CanBeImmediate(right)) {
2203 int32_t imm = g.GetImmediateIntegerValue(right);
2205 if (this->
Get(left).outputs_rep()[0] ==
2212 Emit(kX64Movl, g.DefineAsRegister(node), g.UseRegister(left));
2218 g.DefineAsRegister(node), g.UseRegister(left),
2225 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(right));
2232void InstructionSelectorT::VisitInt64Sub(
OpIndex node) {
2233 X64OperandGeneratorT g(
this);
2234 const WordBinopOp& binop = this->
Get(node).Cast<WordBinopOp>();
2235 DCHECK_EQ(binop.kind, WordBinopOp::Kind::kSub);
2238 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(binop.right()));
2242 int64_t immediate_value = -*constant;
2247 g.DefineAsRegister(node), g.UseRegister(binop.left()),
2248 g.TempImmediate(
static_cast<int32_t>(immediate_value)));
2255void InstructionSelectorT::VisitInt64SubWithOverflow(
OpIndex node) {
2259 return VisitBinop(
this, node, kX64Sub, &cont);
2268 X64OperandGeneratorT g(selector);
2269 auto [left, right] = selector->Inputs<WordBinopOp>(
node);
2270 if (g.CanBeImmediate(right)) {
2271 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
2272 g.UseImmediate(right));
2274 if (g.CanBeBetterLeftOperand(right)) {
2275 std::swap(left, right);
2277 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
2282void VisitMulHigh(InstructionSelectorT* selector,
OpIndex node,
2284 X64OperandGeneratorT g(selector);
2285 auto [left, right] = selector->Inputs<WordBinopOp>(
node);
2286 if (selector->IsLive(left) && !selector->IsLive(right)) {
2287 std::swap(left, right);
2289 InstructionOperand temps[] = {g.TempRegister(rax)};
2292 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
2293 g.UseUniqueRegister(right),
arraysize(temps), temps);
2297 X64OperandGeneratorT g(selector);
2298 auto [left, right] = selector->Inputs<WordBinopOp>(
node);
2299 InstructionOperand temps[] = {g.TempRegister(rdx)};
2300 selector->Emit(opcode, g.DefineAsFixed(node, rax), g.UseFixed(left, rax),
2301 g.UseUniqueRegister(right),
arraysize(temps), temps);
2305 X64OperandGeneratorT g(selector);
2306 auto [left, right] = selector->Inputs<WordBinopOp>(
node);
2307 InstructionOperand temps[] = {g.TempRegister(rax)};
2308 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
2309 g.UseUniqueRegister(right),
arraysize(temps), temps);
2314void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
2316 EmitLea(
this, kX64Lea32, node,
m->index,
m->scale,
m->base, 0,
2320 VisitMul(
this, node, kX64Imul32);
2323void InstructionSelectorT::VisitInt32MulWithOverflow(
OpIndex node) {
2327 return VisitBinop(
this, node, kX64Imul32, &cont);
2333void InstructionSelectorT::VisitInt64Mul(
OpIndex node) {
2335 EmitLea(
this, kX64Lea, node,
m->index,
m->scale,
m->base, 0,
2339 VisitMul(
this, node, kX64Imul);
2342void InstructionSelectorT::VisitInt64MulWithOverflow(
OpIndex node) {
2346 return VisitBinop(
this, node, kX64Imul, &cont);
2352void InstructionSelectorT::VisitInt32MulHigh(
OpIndex node) {
2353 VisitMulHigh(
this, node, kX64ImulHigh32);
2356void InstructionSelectorT::VisitInt64MulHigh(
OpIndex node) {
2357 VisitMulHigh(
this, node, kX64ImulHigh64);
2360void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
2361 VisitDiv(
this, node, kX64Idiv32);
2364void InstructionSelectorT::VisitInt64Div(
OpIndex node) {
2365 VisitDiv(
this, node, kX64Idiv);
2368void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
2369 VisitDiv(
this, node, kX64Udiv32);
2372void InstructionSelectorT::VisitUint64Div(
OpIndex node) {
2373 VisitDiv(
this, node, kX64Udiv);
2376void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
2377 VisitMod(
this, node, kX64Idiv32);
2380void InstructionSelectorT::VisitInt64Mod(
OpIndex node) {
2381 VisitMod(
this, node, kX64Idiv);
2384void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
2385 VisitMod(
this, node, kX64Udiv32);
2388void InstructionSelectorT::VisitUint64Mod(
OpIndex node) {
2389 VisitMod(
this, node, kX64Udiv);
2392void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
2393 VisitMulHigh(
this, node, kX64UmulHigh32);
2396void InstructionSelectorT::VisitUint64MulHigh(
OpIndex node) {
2397 VisitMulHigh(
this, node, kX64UmulHigh64);
2411void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(
OpIndex node) {
2414 X64OperandGeneratorT g(
this);
2415 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2416 InstructionOperand outputs[2];
2417 InstructionOperand temps[1];
2418 size_t output_count = 0;
2419 size_t temp_count = 0;
2420 outputs[output_count++] = g.DefineAsRegister(node);
2423 if (success_output.valid()) {
2424 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2425 temps[temp_count++] = g.TempSimd128Register();
2428 Emit(kSSEFloat32ToInt64, output_count, outputs, 1,
inputs, temp_count, temps);
2439void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(
OpIndex node) {
2442 X64OperandGeneratorT g(
this);
2443 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2444 InstructionOperand outputs[2];
2445 size_t output_count = 0;
2446 outputs[output_count++] = g.DefineAsRegister(node);
2449 if (success_output.valid()) {
2450 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2453 Emit(kSSEFloat64ToUint32, output_count, outputs, 1,
inputs);
2456void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(
OpIndex node) {
2459 X64OperandGeneratorT g(
this);
2460 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2461 InstructionOperand outputs[2];
2462 size_t output_count = 0;
2463 outputs[output_count++] = g.DefineAsRegister(node);
2466 if (success_output.valid()) {
2467 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2470 Emit(kSSEFloat32ToUint64, output_count, outputs, 1,
inputs);
2473void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(
OpIndex node) {
2476 X64OperandGeneratorT g(
this);
2477 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2478 InstructionOperand outputs[2];
2479 size_t output_count = 0;
2480 outputs[output_count++] = g.DefineAsRegister(node);
2483 if (success_output.valid()) {
2484 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2487 Emit(kSSEFloat64ToUint64, output_count, outputs, 1,
inputs);
2490void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(
OpIndex node) {
2493 X64OperandGeneratorT g(
this);
2494 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2495 InstructionOperand outputs[2];
2496 InstructionOperand temps[1];
2497 size_t output_count = 0;
2498 size_t temp_count = 0;
2499 outputs[output_count++] = g.DefineAsRegister(node);
2502 if (success_output.valid()) {
2503 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2504 temps[temp_count++] = g.TempSimd128Register();
2507 Emit(kSSEFloat64ToInt64, output_count, outputs, 1,
inputs, temp_count, temps);
2510void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(
OpIndex node) {
2513 X64OperandGeneratorT g(
this);
2514 InstructionOperand
inputs[] = {g.UseRegister(op.input())};
2515 InstructionOperand outputs[2];
2516 InstructionOperand temps[1];
2517 size_t output_count = 0;
2518 size_t temp_count = 0;
2519 outputs[output_count++] = g.DefineAsRegister(node);
2522 if (success_output.valid()) {
2523 outputs[output_count++] = g.DefineAsRegister(success_output.value());
2524 temps[temp_count++] = g.TempSimd128Register();
2527 Emit(kSSEFloat64ToInt32, output_count, outputs, 1,
inputs, temp_count, temps);
2530void InstructionSelectorT::VisitBitcastWord32ToWord64(
OpIndex node) {
2536void InstructionSelectorT::VisitChangeInt32ToInt64(
OpIndex node) {
2540 X64OperandGeneratorT g(
this);
2541 auto value = op.input();
2549 opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
2552 opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
2569 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2571 InstructionOperand
inputs[3];
2573 g.GetEffectiveAddressMemoryOperand(value,
inputs, &input_count);
2577 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(value));
2581bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(
OpIndex node) {
2583 switch (op.opcode) {
2584 case Opcode::kWordBinop: {
2585 const auto& binop = op.Cast<WordBinopOp>();
2587 DCHECK(binop.kind == WordBinopOp::Kind::kBitwiseAnd ||
2588 binop.kind == WordBinopOp::Kind::kBitwiseOr ||
2589 binop.kind == WordBinopOp::Kind::kBitwiseXor ||
2590 binop.kind == WordBinopOp::Kind::kAdd ||
2591 binop.kind == WordBinopOp::Kind::kSub ||
2592 binop.kind == WordBinopOp::Kind::kMul ||
2593 binop.kind == WordBinopOp::Kind::kSignedDiv ||
2594 binop.kind == WordBinopOp::Kind::kUnsignedDiv ||
2595 binop.kind == WordBinopOp::Kind::kSignedMod ||
2596 binop.kind == WordBinopOp::Kind::kUnsignedMod ||
2597 binop.kind == WordBinopOp::Kind::kSignedMulOverflownBits ||
2598 binop.kind == WordBinopOp::Kind::kUnsignedMulOverflownBits);
2601 case Opcode::kShift: {
2602 const auto& shift = op.Cast<
ShiftOp>();
2604 DCHECK(shift.kind == ShiftOp::Kind::kShiftLeft ||
2605 shift.kind == ShiftOp::Kind::kShiftRightLogical ||
2606 shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
2607 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros ||
2608 shift.kind == ShiftOp::Kind::kRotateLeft ||
2609 shift.kind == ShiftOp::Kind::kRotateRight);
2612 case Opcode::kComparison: {
2613 const auto& comparison = op.Cast<ComparisonOp>();
2614 DCHECK(comparison.kind == ComparisonOp::Kind::kEqual ||
2615 comparison.kind == ComparisonOp::Kind::kSignedLessThan ||
2616 comparison.kind == ComparisonOp::Kind::kSignedLessThanOrEqual ||
2617 comparison.kind == ComparisonOp::Kind::kUnsignedLessThan ||
2618 comparison.kind == ComparisonOp::Kind::kUnsignedLessThanOrEqual);
2621 case Opcode::kProjection: {
2622 const auto& projection = op.Cast<ProjectionOp>();
2623 if (
const auto* binop =
2624 this->
Get(projection.input()).TryCast<OverflowCheckedBinopOp>()) {
2625 DCHECK(binop->kind == OverflowCheckedBinopOp::Kind::kSignedAdd ||
2626 binop->kind == OverflowCheckedBinopOp::Kind::kSignedSub ||
2627 binop->kind == OverflowCheckedBinopOp::Kind::kSignedMul);
2632 case Opcode::kLoad: {
2633 const auto& load = op.Cast<LoadOp>();
2636 switch (load.loaded_rep.ToMachineType().representation()) {
2646 case Opcode::kConstant: {
2647 X64OperandGeneratorT g(
this);
2651 if (g.CanBeImmediate(node)) {
2652 return g.GetImmediateIntegerValue(node) >= 0;
2656 case Opcode::kChange:
2663void InstructionSelectorT::VisitChangeUint32ToUint64(
OpIndex node) {
2664 X64OperandGeneratorT g(
this);
2668 if (ZeroExtendsWord32ToWord64(value)) {
2673 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
2678void VisitRO(InstructionSelectorT* selector,
OpIndex node,
2680 X64OperandGeneratorT g(selector);
2681 const Operation& op = selector->Get(node);
2683 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(op.input(0)));
2688 X64OperandGeneratorT g(selector);
2689 const Operation& op = selector->Get(node);
2691 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
2696 X64OperandGeneratorT g(selector);
2697 const Operation& op = selector->Get(node);
2699 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(op.input(0)),
2700 g.Use(op.input(1)));
2703void VisitFloatBinop(InstructionSelectorT* selector,
OpIndex node,
2705 X64OperandGeneratorT g(selector);
2706 const FloatBinopOp& op = selector->Cast<FloatBinopOp>(
node);
2708 auto left = op.left();
2709 auto right = op.right();
2710 InstructionOperand inputs[8];
2712 InstructionOperand outputs[1];
2713 size_t output_count = 0;
2716 if (left == right) {
2724 InstructionOperand
const input = g.UseRegister(left);
2728 int effect_level = selector->GetEffectLevel(node);
2729 if (selector->IsCommutative(node) &&
2730 (g.CanBeBetterLeftOperand(right) ||
2731 g.CanBeMemoryOperand(avx_opcode, node, left, effect_level)) &&
2732 (!g.CanBeBetterLeftOperand(left) ||
2733 !g.CanBeMemoryOperand(avx_opcode, node, right, effect_level))) {
2734 std::swap(left, right);
2736 if (g.CanBeMemoryOperand(avx_opcode, node, right, effect_level)) {
2739 g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
2742 if (g.IsProtectedLoad(right) &&
2743 selector->CanCoverProtectedLoad(node, right)) {
2750 selector->MarkAsProtected(node);
2755 selector->SetProtectedLoadToRemove(right);
2756 trapping_load = right;
2766 InstructionCode code = selector->IsSupported(AVX) ? avx_opcode : sse_opcode;
2767 outputs[output_count++] = selector->IsSupported(AVX)
2768 ? g.DefineAsRegister(node)
2769 : g.DefineSameAsFirst(node);
2772 Instruction*
instr =
2773 selector->Emit(code, output_count, outputs, input_count, inputs);
2774 if (trapping_load.valid()) {
2775 selector->UpdateSourcePosition(
instr, trapping_load);
2779void VisitFloatUnop(InstructionSelectorT* selector,
OpIndex node,
OpIndex input,
2781 X64OperandGeneratorT g(selector);
2782 if (selector->IsSupported(AVX)) {
2783 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
2785 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
2791#define RO_OP_T_LIST(V) \
2792 V(Word64Clz, kX64Lzcnt) \
2793 V(Word32Clz, kX64Lzcnt32) \
2794 V(Word64Ctz, kX64Tzcnt) \
2795 V(Word32Ctz, kX64Tzcnt32) \
2796 V(Word64Popcnt, kX64Popcnt) \
2797 V(Word32Popcnt, kX64Popcnt32) \
2798 V(Float64Sqrt, kSSEFloat64Sqrt) \
2799 V(Float32Sqrt, kSSEFloat32Sqrt) \
2800 V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
2801 V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
2802 V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
2803 V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
2804 V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
2805 V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
2806 V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \
2807 V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
2808 V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
2809 V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \
2810 V(RoundUint64ToFloat32, kSSEUint64ToFloat32) \
2811 V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \
2812 V(RoundUint64ToFloat64, kSSEUint64ToFloat64) \
2813 V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \
2814 V(ChangeInt64ToFloat64, kSSEInt64ToFloat64) \
2815 V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
2816 V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
2817 V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
2818 V(BitcastFloat32ToInt32, kX64BitcastFI) \
2819 V(BitcastFloat64ToInt64, kX64BitcastDL) \
2820 V(BitcastInt32ToFloat32, kX64BitcastIF) \
2821 V(BitcastInt64ToFloat64, kX64BitcastLD) \
2822 V(SignExtendWord8ToInt32, kX64Movsxbl) \
2823 V(SignExtendWord16ToInt32, kX64Movsxwl) \
2824 V(SignExtendWord8ToInt64, kX64Movsxbq) \
2825 V(SignExtendWord16ToInt64, kX64Movsxwq) \
2826 V(TruncateFloat64ToInt64, kSSEFloat64ToInt64) \
2827 V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
2828 V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)
2830#ifdef V8_ENABLE_WEBASSEMBLY
2831#define RR_OP_T_LIST_WEBASSEMBLY(V) \
2832 V(F16x8Ceil, kX64F16x8Round | MiscField::encode(kRoundUp)) \
2833 V(F16x8Floor, kX64F16x8Round | MiscField::encode(kRoundDown)) \
2834 V(F16x8Trunc, kX64F16x8Round | MiscField::encode(kRoundToZero)) \
2835 V(F16x8NearestInt, kX64F16x8Round | MiscField::encode(kRoundToNearest)) \
2836 V(F32x4Ceil, kX64F32x4Round | MiscField::encode(kRoundUp)) \
2837 V(F32x4Floor, kX64F32x4Round | MiscField::encode(kRoundDown)) \
2838 V(F32x4Trunc, kX64F32x4Round | MiscField::encode(kRoundToZero)) \
2839 V(F32x4NearestInt, kX64F32x4Round | MiscField::encode(kRoundToNearest)) \
2840 V(F64x2Ceil, kX64F64x2Round | MiscField::encode(kRoundUp)) \
2841 V(F64x2Floor, kX64F64x2Round | MiscField::encode(kRoundDown)) \
2842 V(F64x2Trunc, kX64F64x2Round | MiscField::encode(kRoundToZero)) \
2843 V(F64x2NearestInt, kX64F64x2Round | MiscField::encode(kRoundToNearest))
2845#define RR_OP_T_LIST_WEBASSEMBLY(V)
2848#define RR_OP_T_LIST(V) \
2849 V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
2850 V(SignExtendWord32ToInt64, kX64Movsxlq) \
2851 V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
2852 V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
2853 V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
2854 V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
2855 V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
2856 V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
2857 V(Float32RoundTiesEven, \
2858 kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
2859 V(Float64RoundTiesEven, \
2860 kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
2861 RR_OP_T_LIST_WEBASSEMBLY(V)
2863#define RO_VISITOR(Name, opcode) \
2864 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2865 VisitRO(this, node, opcode); \
2871#define RR_VISITOR(Name, opcode) \
2872 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2873 VisitRR(this, node, opcode); \
2879void InstructionSelectorT::VisitTruncateFloat64ToWord32(
OpIndex node) {
2880 VisitRR(
this, node, kArchTruncateDoubleToI);
2883void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(
OpIndex node) {
2884 X64OperandGeneratorT g(
this);
2886 InstructionOperand temps[] = {g.TempDoubleRegister(), g.TempRegister()};
2887 Emit(kSSEFloat64ToFloat16RawBits, g.DefineAsRegister(node),
2888 g.UseUniqueRegister(op.input()),
arraysize(temps), temps);
2891void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(
OpIndex node) {
2892 X64OperandGeneratorT g(
this);
2894 InstructionOperand temps[] = {g.TempDoubleRegister()};
2895 Emit(kSSEFloat16RawBitsToFloat64, g.DefineAsRegister(node),
2896 g.UseRegister(op.input()),
arraysize(temps), temps);
2899void InstructionSelectorT::VisitTruncateInt64ToInt32(
OpIndex node) {
2903 X64OperandGeneratorT g(
this);
2906 bool can_cover =
false;
2907 if (
const TaggedBitcastOp* value_op =
2910 value = value_op->input();
2920 if (
CanCover(value, shift->left()) &&
2921 TryEmitLoadForLoadWord64AndShiftRight(
this, value, kX64Movl)) {
2928 Emit(kX64Shr, g.DefineSameAsFirst(node), g.UseRegister(shift->left()),
2929 g.TempImmediate(32));
2934 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
2937void InstructionSelectorT::VisitFloat32Add(
OpIndex node) {
2938 VisitFloatBinop(
this, node, kAVXFloat32Add, kSSEFloat32Add);
2941void InstructionSelectorT::VisitFloat32Sub(
OpIndex node) {
2942 VisitFloatBinop(
this, node, kAVXFloat32Sub, kSSEFloat32Sub);
2945void InstructionSelectorT::VisitFloat32Mul(
OpIndex node) {
2946 VisitFloatBinop(
this, node, kAVXFloat32Mul, kSSEFloat32Mul);
2949void InstructionSelectorT::VisitFloat32Div(
OpIndex node) {
2950 VisitFloatBinop(
this, node, kAVXFloat32Div, kSSEFloat32Div);
2953void InstructionSelectorT::VisitFloat32Abs(
OpIndex node) {
2956 VisitFloatUnop(
this, node, op.input(), kX64Float32Abs);
2959void InstructionSelectorT::VisitFloat32Max(
OpIndex node) {
2960 VisitRRO(
this, node, kSSEFloat32Max);
2963void InstructionSelectorT::VisitFloat32Min(
OpIndex node) {
2964 VisitRRO(
this, node, kSSEFloat32Min);
2967void InstructionSelectorT::VisitFloat64Add(
OpIndex node) {
2968 VisitFloatBinop(
this, node, kAVXFloat64Add, kSSEFloat64Add);
2971void InstructionSelectorT::VisitFloat64Sub(
OpIndex node) {
2972 VisitFloatBinop(
this, node, kAVXFloat64Sub, kSSEFloat64Sub);
2975void InstructionSelectorT::VisitFloat64Mul(
OpIndex node) {
2976 VisitFloatBinop(
this, node, kAVXFloat64Mul, kSSEFloat64Mul);
2979void InstructionSelectorT::VisitFloat64Div(
OpIndex node) {
2980 VisitFloatBinop(
this, node, kAVXFloat64Div, kSSEFloat64Div);
2983void InstructionSelectorT::VisitFloat64Mod(
OpIndex node) {
2986 X64OperandGeneratorT g(
this);
2987 InstructionOperand temps[] = {g.TempRegister(rax)};
2988 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node), g.UseRegister(op.left()),
2989 g.UseRegister(op.right()), 1, temps);
2992void InstructionSelectorT::VisitFloat64Max(
OpIndex node) {
2993 VisitRRO(
this, node, kSSEFloat64Max);
2996void InstructionSelectorT::VisitFloat64Min(
OpIndex node) {
2997 VisitRRO(
this, node, kSSEFloat64Min);
3000void InstructionSelectorT::VisitFloat64Abs(
OpIndex node) {
3003 VisitFloatUnop(
this, node, op.input(), kX64Float64Abs);
3006void InstructionSelectorT::VisitFloat64RoundTiesAway(
OpIndex node) {
3010void InstructionSelectorT::VisitFloat32Neg(
OpIndex node) {
3013 VisitFloatUnop(
this, node, op.input(), kX64Float32Neg);
3016void InstructionSelectorT::VisitFloat64Neg(
OpIndex node) {
3019 VisitFloatUnop(
this, node, op.input(), kX64Float64Neg);
3026 X64OperandGeneratorT g(
this);
3027 Emit(
opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(op.left(), xmm0),
3028 g.UseFixed(op.right(), xmm1))
3035 X64OperandGeneratorT g(
this);
3037 Emit(
opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(op.input(), xmm0))
3044 LinkageLocation location) {}
3047 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
3049 X64OperandGeneratorT g(
this);
3052 if (call_descriptor->IsCFunctionCall()) {
3054 call_descriptor->ParameterCount())),
3055 0,
nullptr, 0,
nullptr);
3058 for (
size_t n = 0; n < arguments->size(); ++
n) {
3060 if (input.node.valid()) {
3061 int slot =
static_cast<int>(
n);
3062 InstructionOperand value = g.CanBeImmediate(input.node)
3063 ? g.UseImmediate(input.node)
3064 : g.UseRegister(input.node);
3071 int stack_decrement = 0;
3076 if (!input.node.valid())
continue;
3077 InstructionOperand decrement = g.UseImmediate(stack_decrement);
3078 stack_decrement = 0;
3079 if (g.CanBeImmediate(input.node)) {
3080 Emit(kX64Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
3085 Emit(kX64Push, g.NoOutput(), decrement, g.UseRegister(input.node));
3086 }
else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
3088 InstructionOperand outputs[1];
3089 InstructionOperand
inputs[5];
3093 input.node,
inputs, &input_count);
3097 Emit(kX64Push, g.NoOutput(), decrement, g.UseAny(input.node));
3104 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
3106 X64OperandGeneratorT g(
this);
3108 if (!output.location.IsCallerFrameSlot())
continue;
3110 if (output.node.valid()) {
3111 DCHECK(!call_descriptor->IsCFunctionCall());
3119 InstructionOperand
result = g.DefineAsRegister(output.node);
3120 int offset = call_descriptor->GetOffsetToReturns();
3121 int reverse_slot = -output.location.GetLocation() -
offset;
3122 InstructionOperand slot = g.UseImmediate(reverse_slot);
3132void VisitCompareWithMemoryOperand(InstructionSelectorT* selector,
3134 InstructionOperand right,
3135 FlagsContinuationT* cont) {
3136 DCHECK(selector->IsLoadOrLoadImmutable(left));
3137 X64OperandGeneratorT g(selector);
3139 InstructionOperand inputs[6];
3141 g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
3144 if (cont->IsSelect()) {
3147 inputs[
input_count++] = g.UseRegister(cont->true_value());
3148 inputs[
input_count++] = g.Use(cont->false_value());
3150 inputs[
input_count++] = g.UseRegister(cont->false_value());
3151 inputs[
input_count++] = g.Use(cont->true_value());
3155 selector->EmitWithContinuation(opcode, 0,
nullptr, input_count, inputs, cont);
3160 InstructionOperand left, InstructionOperand right,
3161 FlagsContinuationT* cont) {
3162 if (cont->IsSelect()) {
3163 X64OperandGeneratorT g(selector);
3164 InstructionOperand inputs[4] = {left, right};
3167 inputs[2] = g.UseRegister(cont->true_value());
3168 inputs[3] = g.Use(cont->false_value());
3170 inputs[2] = g.UseRegister(cont->false_value());
3171 inputs[3] = g.Use(cont->true_value());
3173 selector->EmitWithContinuation(opcode, 0,
nullptr, 4, inputs, cont);
3176 selector->EmitWithContinuation(opcode, left, right, cont);
3183 X64OperandGeneratorT g(selector);
3184 if (commutative && g.CanBeBetterLeftOperand(right)) {
3185 std::swap(left, right);
3187 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
3190MachineType MachineTypeForNarrow(InstructionSelectorT* selector,
OpIndex node,
3192 if (selector->IsLoadOrLoadImmutable(hint_node)) {
3193 MachineType hint = selector->load_view(hint_node).loaded_rep();
3195 if (selector->MatchSignedIntegralConstant(node, &constant)) {
3197 if (constant >= std::numeric_limits<int8_t>::min() &&
3198 constant <= std::numeric_limits<int8_t>::max()) {
3202 if (constant >= std::numeric_limits<uint8_t>::min() &&
3203 constant <= std::numeric_limits<uint8_t>::max()) {
3207 if (constant >= std::numeric_limits<int16_t>::min() &&
3208 constant <= std::numeric_limits<int16_t>::max()) {
3212 if (constant >= std::numeric_limits<uint16_t>::min() &&
3213 constant <= std::numeric_limits<uint16_t>::max()) {
3217 if (constant >= std::numeric_limits<int32_t>::min() &&
3218 constant <= std::numeric_limits<int32_t>::max()) {
3222 if (constant >= std::numeric_limits<uint32_t>::min() &&
3223 constant <= std::numeric_limits<uint32_t>::max())
3228 if (selector->IsLoadOrLoadImmutable(node)) {
3229 return selector->load_view(node).loaded_rep();
3234bool IsIntConstant(InstructionSelectorT* selector,
OpIndex node) {
3235 if (
auto constant = selector->Get(node).TryCast<ConstantOp>()) {
3236 return constant->kind == ConstantOp::Kind::kWord32 ||
3237 constant->kind == ConstantOp::Kind::kWord64;
3241bool IsWordAnd(InstructionSelectorT* selector,
OpIndex node) {
3242 if (
auto binop = selector->Get(node).TryCast<WordBinopOp>()) {
3243 return binop->kind == WordBinopOp::Kind::kBitwiseAnd;
3251MachineType MachineTypeForNarrowWordAnd(InstructionSelectorT* selector,
3254 const WordBinopOp& op = selector->Cast<WordBinopOp>(and_node);
3256 auto and_left = op.left();
3257 auto and_right = op.right();
3258 auto and_constant_node = IsIntConstant(selector, and_right) ? and_right
3259 : IsIntConstant(selector, and_left) ? and_left
3262 if (and_constant_node.valid()) {
3263 int64_t and_constant, cmp_constant;
3264 selector->MatchSignedIntegralConstant(and_constant_node, &and_constant);
3265 selector->MatchSignedIntegralConstant(constant_node, &cmp_constant);
3266 if (and_constant >= 0 && cmp_constant >= 0) {
3268 and_constant > cmp_constant ? and_constant : cmp_constant;
3269 if (constant <= std::numeric_limits<int8_t>::max()) {
3271 }
else if (constant <= std::numeric_limits<uint8_t>::max()) {
3273 }
else if (constant <= std::numeric_limits<int16_t>::max()) {
3275 }
else if (constant <= std::numeric_limits<uint16_t>::max()) {
3277 }
else if (constant <= std::numeric_limits<int32_t>::max()) {
3279 }
else if (constant <= std::numeric_limits<uint32_t>::max()) {
3292 OpIndex right, FlagsContinuationT* cont) {
3295 if (IsWordAnd(selector, left) && IsIntConstant(selector, right)) {
3296 left_type = MachineTypeForNarrowWordAnd(selector, left, right);
3297 right_type = left_type;
3298 }
else if (IsWordAnd(selector, right) && IsIntConstant(selector, left)) {
3299 right_type = MachineTypeForNarrowWordAnd(selector, right, left);
3300 left_type = right_type;
3305 left_type = MachineTypeForNarrow(selector, left, right);
3306 right_type = MachineTypeForNarrow(selector, right, left);
3308 if (left_type == right_type) {
3309 switch (left_type.representation()) {
3312 if (opcode == kX64Test || opcode == kX64Test32)
return kX64Test8;
3313 if (opcode == kX64Cmp || opcode == kX64Cmp32) {
3315 cont->OverwriteUnsignedIfSigned();
3327 if (opcode == kX64Test)
return kX64Test32;
3328 if (opcode == kX64Cmp) {
3330 cont->OverwriteUnsignedIfSigned();
3337#ifdef V8_COMPRESS_POINTERS
3343 if (opcode == kX64Cmp)
return kX64Cmp32;
3369OpIndex RemoveUnnecessaryWordAnd(InstructionSelectorT* selector,
3373 if (opcode == kX64Cmp32 || opcode == kX64Test32) {
3374 mask = std::numeric_limits<uint32_t>::max();
3375 }
else if (opcode == kX64Cmp16 || opcode == kX64Test16) {
3376 mask = std::numeric_limits<uint16_t>::max();
3377 }
else if (opcode == kX64Cmp8 || opcode == kX64Test8) {
3378 mask = std::numeric_limits<uint8_t>::max();
3383 const WordBinopOp& op = selector->Cast<WordBinopOp>(and_node);
3385 auto and_left = op.left();
3386 auto and_right = op.right();
3387 auto and_constant_node =
OpIndex{};
3388 auto and_other_node =
OpIndex{};
3389 if (IsIntConstant(selector, and_left)) {
3390 and_constant_node = and_left;
3391 and_other_node = and_right;
3392 }
else if (IsIntConstant(selector, and_right)) {
3393 and_constant_node = and_right;
3394 and_other_node = and_left;
3397 if (and_constant_node.valid()) {
3398 int64_t and_constant;
3399 selector->MatchSignedIntegralConstant(and_constant_node, &and_constant);
3400 if (and_constant ==
mask)
return and_other_node;
3408 X64OperandGeneratorT g(selector);
3409 const Operation& op = selector->Get(node);
3411 auto left = op.input(0);
3412 auto right = op.input(1);
3416 if (opcode == kX64Cmp32 || opcode == kX64Test32) {
3418 selector->MatchTruncateWord64ToWord32(left, &left64)) {
3422 selector->MatchTruncateWord64ToWord32(right, &right64)) {
3427 opcode = TryNarrowOpcodeSize(selector, opcode, left, right, cont);
3431 int effect_level = selector->GetEffectLevel(node, cont);
3433 if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
3434 (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
3435 !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
3436 if (!selector->IsCommutative(node)) cont->Commute();
3437 std::swap(left, right);
3440 if (IsWordAnd(selector, left)) {
3441 left = RemoveUnnecessaryWordAnd(selector, opcode, left);
3445 if (g.CanBeImmediate(right)) {
3446 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
3447 return VisitCompareWithMemoryOperand(selector, opcode, left,
3448 g.UseImmediate(right), cont);
3450 return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
3455 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
3456 return VisitCompareWithMemoryOperand(selector, opcode, left,
3457 g.UseRegister(right), cont);
3460 return VisitCompare(selector, opcode, left, right, cont,
3461 selector->IsCommutative(node));
3464void VisitWord64EqualImpl(InstructionSelectorT* selector,
OpIndex node,
3465 FlagsContinuationT* cont) {
3466 if (selector->CanUseRootsRegister()) {
3467 X64OperandGeneratorT g(selector);
3468 const RootsTable& roots_table = selector->isolate()->roots_table();
3470 const ComparisonOp&
equal =
3473 Handle<HeapObject> object;
3475 selector->MatchHeapConstant(
equal.right(), &
object)) {
3476 if (roots_table.IsRootHandle(
object, &root_index)) {
3483 g.UseRegister(
equal.left()), cont);
3490bool MatchHeapObjectEqual(InstructionSelectorT* selector,
OpIndex node,
3491 OpIndex* left, Handle<HeapObject>* right) {
3492 const ComparisonOp&
equal = selector->Cast<ComparisonOp>(
node);
3494 if (selector->MatchHeapConstant(
equal.right(), right)) {
3495 *left =
equal.left();
3501void VisitWord32EqualImpl(InstructionSelectorT* selector,
OpIndex node,
3502 FlagsContinuationT* cont) {
3504 X64OperandGeneratorT g(selector);
3505 const RootsTable& roots_table = selector->isolate()->roots_table();
3508 Handle<HeapObject> right;
3512 if (MatchHeapObjectEqual(selector, node, &left, &right)) {
3513 if (roots_table.IsRootHandle(right, &root_index)) {
3518 selector, kX64Cmp32, g.UseRegister(left),
3520 root_index, selector->isolate())),
3523 if (selector->CanUseRootsRegister()) {
3531 g.UseRegister(left), cont);
3539void VisitCompareZero(InstructionSelectorT* selector,
OpIndex user,
3541 FlagsContinuationT* cont) {
3542 X64OperandGeneratorT g(selector);
3543 const Operation& op = selector->turboshaft_graph()->Get(node);
3544 if (cont->IsBranch() &&
3546 if (
const WordBinopOp* binop = op.TryCast<WordBinopOp>()) {
3547 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) {
3549 switch (binop->kind) {
3550 case WordBinopOp::Kind::kAdd:
3551 return VisitBinop(selector, node, is64 ? kX64Add : kX64Add32, cont);
3552 case WordBinopOp::Kind::kSub:
3553 return VisitBinop(selector, node, is64 ? kX64Sub : kX64Sub32, cont);
3554 case WordBinopOp::Kind::kBitwiseAnd:
3555 return VisitBinop(selector, node, is64 ? kX64And : kX64And32, cont);
3556 case WordBinopOp::Kind::kBitwiseOr:
3557 return VisitBinop(selector, node, is64 ? kX64Or : kX64Or32, cont);
3563 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) {
3565 switch (shift->kind) {
3566 case ShiftOp::Kind::kShiftLeft:
3567 if (TryVisitWordShift(selector, node, is64 ? 64 : 32,
3568 is64 ? kX64Shl : kX64Shl32, cont)) {
3572 case ShiftOp::Kind::kShiftRightLogical:
3573 if (TryVisitWordShift(selector, node, is64 ? 64 : 32,
3574 is64 ? kX64Shr : kX64Shr32, cont)) {
3585 int effect_level = selector->GetEffectLevel(node, cont);
3586 if (
const auto load = op.TryCast<LoadOp>()) {
3589 if (opcode == kX64Cmp32) {
3591 }
else if (opcode == kX64Test32) {
3596 if (opcode == kX64Cmp32) {
3598 }
else if (opcode == kX64Test32) {
3599 opcode = kX64Test16;
3603 if (g.CanBeMemoryOperand(opcode, user, node, effect_level)) {
3604 VisitCompareWithMemoryOperand(selector, opcode, node, g.TempImmediate(0),
3607 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
3613 FlagsContinuationT* cont) {
3614 const ComparisonOp& op = selector->Cast<ComparisonOp>(
node);
3616 selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
3617 VisitCompare(selector, opcode, op.right(), op.left(), cont,
false);
3622 FlagsContinuationT* cont) {
3623 const ComparisonOp& op = selector->Cast<ComparisonOp>(
node);
3625 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
3626 VisitCompare(selector, opcode, op.right(), op.left(), cont,
false);
3633 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
3634 X64OperandGeneratorT g(selector);
3636 InstructionOperand inputs[] = {
3637 g.UseUniqueRegister(atomic_op.value()),
3638 g.UseUniqueRegister(atomic_op.base()),
3639 g.GetEffectiveIndexOperand(atomic_op.index(), &addressing_mode)};
3640 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
3641 InstructionOperand temps[] = {g.TempRegister()};
3655 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
3656 X64OperandGeneratorT g(selector);
3658 InstructionOperand inputs[] = {
3659 g.UseFixed(atomic_op.expected().value(), rax),
3660 g.UseUniqueRegister(atomic_op.value()),
3661 g.UseUniqueRegister(atomic_op.base()),
3662 g.GetEffectiveIndexOperand(atomic_op.index(), &addressing_mode)};
3663 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
3676 FlagsContinuation* cont) {
3682 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
3683 if (comparison->kind == ComparisonOp::Kind::kEqual) {
3684 switch (comparison->rep.MapTaggedToWord().value()) {
3686 cont->OverwriteAndNegateIfEqual(
kEqual);
3687 return VisitWord32EqualImpl(
this, value, cont);
3689 cont->OverwriteAndNegateIfEqual(kEqual);
3690 if (this->MatchIntegralZero(comparison->right())) {
3692 if (CanCover(value, comparison->left())) {
3693 const Operation& left_op = this->
Get(comparison->left());
3694 if (left_op.Is<Opmask::kWord64Sub>()) {
3697 }
else if (left_op.Is<Opmask::kWord64BitwiseAnd>()) {
3702 return VisitCompareZero(
this, value, comparison->left(), kX64Cmp,
3705 return VisitWord64EqualImpl(
this, value, cont);
3707 case RegisterRepresentation::Float32():
3710 case RegisterRepresentation::Float64(): {
3711 bool is_self_compare = comparison->left() == comparison->right();
3712 cont->OverwriteAndNegateIfEqual(is_self_compare ? kIsNotNaN
3720 switch (comparison->rep.MapTaggedToWord().value()) {
3721 case RegisterRepresentation::Word32(): {
3722 cont->OverwriteAndNegateIfEqual(
3723 GetComparisonFlagCondition(*comparison));
3726 case RegisterRepresentation::Word64(): {
3727 cont->OverwriteAndNegateIfEqual(
3728 GetComparisonFlagCondition(*comparison));
3731 case RegisterRepresentation::Float32():
3733 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
3737 ComparisonOp::Kind::kSignedLessThanOrEqual);
3738 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
3741 case RegisterRepresentation::Float64():
3743 if (MatchZero(comparison->left())) {
3744 const Operation& right = this->Get(comparison->right());
3745 if (right.Is<Opmask::kFloat64Abs>()) {
3754 cont->OverwriteAndNegateIfEqual(kNotEqual);
3755 InstructionCode const opcode =
3756 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
3757 return VisitCompare(this, opcode, comparison->left(),
3758 right.Cast<FloatUnaryOp>().input(), cont,
3762 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
3766 ComparisonOp::Kind::kSignedLessThanOrEqual);
3767 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
3774 }
else if (value_op.Is<Opmask::kWord32Sub>()) {
3776 }
else if (value_op.Is<Opmask::kWord32BitwiseAnd>()) {
3778 }
else if (
const ProjectionOp* projection =
3779 value_op.TryCast<ProjectionOp>()) {
3782 if (projection->index == 1u) {
3788 OpIndex node = projection->input();
3789 if (
const OverflowCheckedBinopOp* binop =
3790 this->TryCast<OverflowCheckedBinopOp>(node);
3791 binop && CanDoBranchIfOverflowFusion(node)) {
3792 const bool is64 = binop->rep == WordRepresentation::Word64();
3793 cont->OverwriteAndNegateIfEqual(kOverflow);
3794 switch (binop->kind) {
3795 case OverflowCheckedBinopOp::Kind::kSignedAdd:
3796 return VisitBinop(
this, node, is64 ? kX64Add : kX64Add32, cont);
3797 case OverflowCheckedBinopOp::Kind::kSignedSub:
3798 return VisitBinop(
this, node, is64 ? kX64Sub : kX64Sub32, cont);
3799 case OverflowCheckedBinopOp::Kind::kSignedMul:
3800 return VisitBinop(
this, node, is64 ? kX64Imul : kX64Imul32, cont);
3805 }
else if (value_op.Is<StackPointerGreaterThanOp>()) {
3806 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
3807 return VisitStackPointerGreaterThan(value, cont);
3812 VisitCompareZero(
this, user, value, kX64Cmp32, cont);
3815void InstructionSelectorT::VisitSwitch(OpIndex node,
const SwitchInfo& sw) {
3816 X64OperandGeneratorT g(
this);
3817 const SwitchOp& op = Cast<SwitchOp>(node);
3819 InstructionOperand value_operand = g.UseRegister(op.input());
3822 if (enable_switch_jump_table_ ==
3823 InstructionSelector::kEnableSwitchJumpTable) {
3824 static const size_t kMaxTableSwitchValueRange = 2 << 16;
3825 size_t table_space_cost = 4 + sw.value_range();
3826 size_t table_time_cost = 3;
3827 size_t lookup_space_cost = 3 + 2 * sw.case_count();
3828 size_t lookup_time_cost = sw.case_count();
3829 if (sw.case_count() > 4 &&
3830 table_space_cost + 3 * table_time_cost <=
3831 lookup_space_cost + 3 * lookup_time_cost &&
3832 sw.min_value() > std::numeric_limits<int32_t>::min() &&
3833 sw.value_range() <= kMaxTableSwitchValueRange) {
3834 InstructionOperand index_operand = g.TempRegister();
3835 if (sw.min_value()) {
3838 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
3839 value_operand, g.TempImmediate(-sw.min_value()));
3842 if (ZeroExtendsWord32ToWord64(op.input())) {
3844 index_operand = value_operand;
3846 Emit(kX64Movl, index_operand, value_operand);
3850 return EmitTableSwitch(sw, index_operand);
3855 return EmitBinarySearchSwitch(sw, value_operand);
3858void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
3859 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
3860 const ComparisonOp&
equal = Cast<ComparisonOp>(node);
3862 DCHECK(
equal.rep == RegisterRepresentation::Word32() ||
3863 equal.rep == RegisterRepresentation::Tagged());
3864 if (MatchIntegralZero(
equal.right())) {
3865 return VisitWordCompareZero(node,
equal.left(), &cont);
3867 VisitWord32EqualImpl(
this, node, &cont);
3870void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
3871 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
3875void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
3876 FlagsContinuation cont =
3877 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
3881void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
3882 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
3886void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
3887 FlagsContinuation cont =
3888 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
3892void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
3893 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
3894 const ComparisonOp&
equal = Cast<ComparisonOp>(node);
3896 DCHECK(
equal.rep == RegisterRepresentation::Word64() ||
3897 equal.rep == RegisterRepresentation::Tagged());
3898 if (MatchIntegralZero(
equal.right())) {
3899 if (CanCover(node,
equal.left())) {
3901 if (left_op.Is<Opmask::kWord64Sub>()) {
3903 }
else if (left_op.Is<Opmask::kWord64BitwiseAnd>()) {
3908 VisitWord64EqualImpl(
this, node, &cont);
3911void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
3914 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
3915 return VisitBinop(
this, node, kX64Add32, &cont);
3917 FlagsContinuation cont;
3921void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
3924 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
3925 return VisitBinop(
this, node, kX64Sub32, &cont);
3927 FlagsContinuation cont;
3931void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
3932 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
3936void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
3937 FlagsContinuation cont =
3938 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
3942void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
3943 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
3947void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
3948 FlagsContinuation cont =
3949 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
3953void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
3954 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
3958void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
3959 FlagsContinuation cont =
3960 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
3964void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
3965 FlagsContinuation cont =
3966 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
3970void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
3971 const ComparisonOp& op = Cast<ComparisonOp>(node);
3972 bool is_self_compare = op.left() == op.right();
3973 FlagsContinuation cont = FlagsContinuation::ForSet(
3974 is_self_compare ? kIsNotNaN : kUnorderedEqual, node);
3978void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
3987 const ComparisonOp& cmp = Cast<ComparisonOp>(node);
3988 DCHECK_EQ(cmp.rep, RegisterRepresentation::Float64());
3989 DCHECK_EQ(cmp.kind, ComparisonOp::Kind::kSignedLessThan);
3990 if (MatchZero(cmp.left())) {
3991 if (
const FloatUnaryOp* right_op =
3992 TryCast<Opmask::kFloat64Abs>(cmp.right())) {
3993 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
3995 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
3996 return VisitCompare(
this, opcode, cmp.left(), right_op->input(), &cont,
4000 FlagsContinuation cont =
4001 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
4005void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
4006 FlagsContinuation cont =
4007 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
4011void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
4012 X64OperandGeneratorT g(
this);
4013 const auto& bitcast = Cast<BitcastWord32PairToFloat64Op>(node);
4018 InstructionOperand zero = sequence()->AddImmediate(Constant(0.0));
4019 InstructionOperand temp = g.TempDoubleRegister();
4020 Emit(kSSEFloat64InsertHighWord32, temp, zero, g.Use(hi));
4021 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node), temp, g.Use(lo));
4024void InstructionSelectorT::VisitFloat64SilenceNaN(OpIndex node) {
4025 X64OperandGeneratorT g(
this);
4026 const FloatUnaryOp& op = Cast<FloatUnaryOp>(node);
4028 Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
4029 g.UseRegister(op.input()));
4032void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
4034 order = Cast<MemoryBarrierOp>(node).memory_order;
4037 if (order == AtomicMemoryOrder::kSeqCst) {
4038 X64OperandGeneratorT g(
this);
4039 Emit(kX64MFence, g.NoOutput());
4042 DCHECK_EQ(AtomicMemoryOrder::kAcqRel, order);
4045void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
4051 DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
4052 DCHECK(!load_rep.IsMapWord());
4056 VisitLoad(node, node, GetLoadOpcode(load_rep));
4059void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
4061 DCHECK(!load_rep.IsMapWord());
4065 VisitLoad(node, node, GetLoadOpcode(load_rep));
4068void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
4069 auto store = this->store_view(node);
4070 DCHECK_NE(store.stored_rep().representation(),
4071 MachineRepresentation::kWord64);
4078void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
4079 auto store = this->store_view(node);
4086void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
4087 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4089 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4090 opcode = kAtomicExchangeInt8;
4091 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4092 opcode = kAtomicExchangeUint8;
4093 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4094 opcode = kAtomicExchangeInt16;
4095 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4096 opcode = kAtomicExchangeUint16;
4097 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4098 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4099 opcode = kAtomicExchangeWord32;
4104 atomic_op.memory_access_kind);
4107void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
4108 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4110 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4111 opcode = kAtomicExchangeUint8;
4112 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4113 opcode = kAtomicExchangeUint16;
4114 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4115 opcode = kAtomicExchangeWord32;
4116 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
4117 opcode = kX64Word64AtomicExchangeUint64;
4122 atomic_op.memory_access_kind);
4125void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
4126 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4128 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4129 opcode = kAtomicCompareExchangeInt8;
4130 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4131 opcode = kAtomicCompareExchangeUint8;
4132 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4133 opcode = kAtomicCompareExchangeInt16;
4134 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4135 opcode = kAtomicCompareExchangeUint16;
4136 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4137 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4138 opcode = kAtomicCompareExchangeWord32;
4143 atomic_op.memory_access_kind);
4146void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
4147 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4149 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4150 opcode = kAtomicCompareExchangeUint8;
4151 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4152 opcode = kAtomicCompareExchangeUint16;
4153 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4154 opcode = kAtomicCompareExchangeWord32;
4155 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
4156 opcode = kX64Word64AtomicCompareExchangeUint64;
4161 atomic_op.memory_access_kind);
4164void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
4165 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
4166 ArchOpcode uint16_op, ArchOpcode word32_op) {
4167 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4169 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
4171 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4173 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
4175 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4177 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
4178 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4184 atomic_op.memory_access_kind);
4187#define VISIT_ATOMIC_BINOP(op) \
4188 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
4189 VisitWord32AtomicBinaryOperation( \
4190 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
4191 kAtomic##op##Uint16, kAtomic##op##Word32); \
4198#undef VISIT_ATOMIC_BINOP
4200void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
4201 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
4202 ArchOpcode uint32_op, ArchOpcode word64_op) {
4203 const AtomicRMWOp& atomic_op = Cast<AtomicRMWOp>(node);
4205 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
4207 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
4209 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
4211 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
4216 VisitAtomicBinop(
this, node, opcode, AtomicWidth::kWord64,
4217 atomic_op.memory_access_kind);
4220#define VISIT_ATOMIC_BINOP(op) \
4221 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
4222 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
4223 kAtomic##op##Uint16, kAtomic##op##Word32, \
4224 kX64Word64Atomic##op##Uint64); \
4231#undef VISIT_ATOMIC_BINOP
4233#ifdef V8_ENABLE_WEBASSEMBLY
4234#define SIMD_BINOP_SSE_AVX_LIST(V) \
4235 V(I64x2ExtMulLowI32x4S) \
4236 V(I64x2ExtMulHighI32x4S) \
4237 V(I64x2ExtMulLowI32x4U) \
4238 V(I64x2ExtMulHighI32x4U) \
4240 V(I32x8DotI16x16S) \
4241 V(I32x4ExtMulLowI16x8S) \
4242 V(I32x4ExtMulHighI16x8S) \
4243 V(I32x4ExtMulLowI16x8U) \
4244 V(I32x4ExtMulHighI16x8U) \
4245 V(I16x8SConvertI32x4) \
4246 V(I16x8UConvertI32x4) \
4247 V(I16x8ExtMulLowI8x16S) \
4248 V(I16x8ExtMulHighI8x16S) \
4249 V(I16x8ExtMulLowI8x16U) \
4250 V(I16x8ExtMulHighI8x16U) \
4251 V(I16x8Q15MulRSatS) \
4252 V(I16x8RelaxedQ15MulRS) \
4253 V(I8x16SConvertI16x8) \
4254 V(I8x16UConvertI16x8) \
4255 V(I16x16SConvertI32x8) \
4256 V(I16x16UConvertI32x8) \
4257 V(I8x32SConvertI16x16) \
4258 V(I8x32UConvertI16x16) \
4259 V(I64x4ExtMulI32x4S) \
4260 V(I64x4ExtMulI32x4U) \
4261 V(I32x8ExtMulI16x8S) \
4262 V(I32x8ExtMulI16x8U) \
4263 V(I16x16ExtMulI8x16S) \
4264 V(I16x16ExtMulI8x16U)
4266#define SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
4267 V(F64x2Add, FAdd, kL64, kV128) \
4268 V(F64x4Add, FAdd, kL64, kV256) \
4269 V(F32x4Add, FAdd, kL32, kV128) \
4270 V(F32x8Add, FAdd, kL32, kV256) \
4271 V(I64x2Add, IAdd, kL64, kV128) \
4272 V(I64x4Add, IAdd, kL64, kV256) \
4273 V(I32x8Add, IAdd, kL32, kV256) \
4274 V(I16x16Add, IAdd, kL16, kV256) \
4275 V(I8x32Add, IAdd, kL8, kV256) \
4276 V(I32x4Add, IAdd, kL32, kV128) \
4277 V(I16x8Add, IAdd, kL16, kV128) \
4278 V(I8x16Add, IAdd, kL8, kV128) \
4279 V(F64x4Sub, FSub, kL64, kV256) \
4280 V(F64x2Sub, FSub, kL64, kV128) \
4281 V(F32x4Sub, FSub, kL32, kV128) \
4282 V(F32x8Sub, FSub, kL32, kV256) \
4283 V(I64x2Sub, ISub, kL64, kV128) \
4284 V(I64x4Sub, ISub, kL64, kV256) \
4285 V(I32x8Sub, ISub, kL32, kV256) \
4286 V(I16x16Sub, ISub, kL16, kV256) \
4287 V(I8x32Sub, ISub, kL8, kV256) \
4288 V(I32x4Sub, ISub, kL32, kV128) \
4289 V(I16x8Sub, ISub, kL16, kV128) \
4290 V(I8x16Sub, ISub, kL8, kV128) \
4291 V(F64x2Mul, FMul, kL64, kV128) \
4292 V(F32x4Mul, FMul, kL32, kV128) \
4293 V(F64x4Mul, FMul, kL64, kV256) \
4294 V(F32x8Mul, FMul, kL32, kV256) \
4295 V(I32x8Mul, IMul, kL32, kV256) \
4296 V(I16x16Mul, IMul, kL16, kV256) \
4297 V(I32x4Mul, IMul, kL32, kV128) \
4298 V(I16x8Mul, IMul, kL16, kV128) \
4299 V(F64x2Div, FDiv, kL64, kV128) \
4300 V(F32x4Div, FDiv, kL32, kV128) \
4301 V(F64x4Div, FDiv, kL64, kV256) \
4302 V(F32x8Div, FDiv, kL32, kV256) \
4303 V(I16x8AddSatS, IAddSatS, kL16, kV128) \
4304 V(I16x16AddSatS, IAddSatS, kL16, kV256) \
4305 V(I8x16AddSatS, IAddSatS, kL8, kV128) \
4306 V(I8x32AddSatS, IAddSatS, kL8, kV256) \
4307 V(I16x8SubSatS, ISubSatS, kL16, kV128) \
4308 V(I16x16SubSatS, ISubSatS, kL16, kV256) \
4309 V(I8x16SubSatS, ISubSatS, kL8, kV128) \
4310 V(I8x32SubSatS, ISubSatS, kL8, kV256) \
4311 V(I16x8AddSatU, IAddSatU, kL16, kV128) \
4312 V(I16x16AddSatU, IAddSatU, kL16, kV256) \
4313 V(I8x16AddSatU, IAddSatU, kL8, kV128) \
4314 V(I8x32AddSatU, IAddSatU, kL8, kV256) \
4315 V(I16x8SubSatU, ISubSatU, kL16, kV128) \
4316 V(I16x16SubSatU, ISubSatU, kL16, kV256) \
4317 V(I8x16SubSatU, ISubSatU, kL8, kV128) \
4318 V(I8x32SubSatU, ISubSatU, kL8, kV256) \
4319 V(F64x2Eq, FEq, kL64, kV128) \
4320 V(F32x4Eq, FEq, kL32, kV128) \
4321 V(F32x8Eq, FEq, kL32, kV256) \
4322 V(F64x4Eq, FEq, kL64, kV256) \
4323 V(I8x32Eq, IEq, kL8, kV256) \
4324 V(I16x16Eq, IEq, kL16, kV256) \
4325 V(I32x8Eq, IEq, kL32, kV256) \
4326 V(I64x4Eq, IEq, kL64, kV256) \
4327 V(I64x2Eq, IEq, kL64, kV128) \
4328 V(I32x4Eq, IEq, kL32, kV128) \
4329 V(I16x8Eq, IEq, kL16, kV128) \
4330 V(I8x16Eq, IEq, kL8, kV128) \
4331 V(F64x2Ne, FNe, kL64, kV128) \
4332 V(F32x4Ne, FNe, kL32, kV128) \
4333 V(F32x8Ne, FNe, kL32, kV256) \
4334 V(F64x4Ne, FNe, kL64, kV256) \
4335 V(I32x4GtS, IGtS, kL32, kV128) \
4336 V(I16x8GtS, IGtS, kL16, kV128) \
4337 V(I8x16GtS, IGtS, kL8, kV128) \
4338 V(I8x32GtS, IGtS, kL8, kV256) \
4339 V(I16x16GtS, IGtS, kL16, kV256) \
4340 V(I32x8GtS, IGtS, kL32, kV256) \
4341 V(I64x4GtS, IGtS, kL64, kV256) \
4342 V(F64x2Lt, FLt, kL64, kV128) \
4343 V(F32x4Lt, FLt, kL32, kV128) \
4344 V(F64x4Lt, FLt, kL64, kV256) \
4345 V(F32x8Lt, FLt, kL32, kV256) \
4346 V(F64x2Le, FLe, kL64, kV128) \
4347 V(F32x4Le, FLe, kL32, kV128) \
4348 V(F64x4Le, FLe, kL64, kV256) \
4349 V(F32x8Le, FLe, kL32, kV256) \
4350 V(I32x4MinS, IMinS, kL32, kV128) \
4351 V(I16x8MinS, IMinS, kL16, kV128) \
4352 V(I8x16MinS, IMinS, kL8, kV128) \
4353 V(I32x4MinU, IMinU, kL32, kV128) \
4354 V(I16x8MinU, IMinU, kL16, kV128) \
4355 V(I8x16MinU, IMinU, kL8, kV128) \
4356 V(I32x4MaxS, IMaxS, kL32, kV128) \
4357 V(I16x8MaxS, IMaxS, kL16, kV128) \
4358 V(I8x16MaxS, IMaxS, kL8, kV128) \
4359 V(I32x4MaxU, IMaxU, kL32, kV128) \
4360 V(I16x8MaxU, IMaxU, kL16, kV128) \
4361 V(I8x16MaxU, IMaxU, kL8, kV128) \
4362 V(I32x8MinS, IMinS, kL32, kV256) \
4363 V(I16x16MinS, IMinS, kL16, kV256) \
4364 V(I8x32MinS, IMinS, kL8, kV256) \
4365 V(I32x8MinU, IMinU, kL32, kV256) \
4366 V(I16x16MinU, IMinU, kL16, kV256) \
4367 V(I8x32MinU, IMinU, kL8, kV256) \
4368 V(I32x8MaxS, IMaxS, kL32, kV256) \
4369 V(I16x16MaxS, IMaxS, kL16, kV256) \
4370 V(I8x32MaxS, IMaxS, kL8, kV256) \
4371 V(I32x8MaxU, IMaxU, kL32, kV256) \
4372 V(I16x16MaxU, IMaxU, kL16, kV256) \
4373 V(I8x32MaxU, IMaxU, kL8, kV256) \
4374 V(I16x8RoundingAverageU, IRoundingAverageU, kL16, kV128) \
4375 V(I16x16RoundingAverageU, IRoundingAverageU, kL16, kV256) \
4376 V(I8x16RoundingAverageU, IRoundingAverageU, kL8, kV128) \
4377 V(I8x32RoundingAverageU, IRoundingAverageU, kL8, kV256) \
4378 V(S128And, SAnd, kL8, kV128) \
4379 V(S256And, SAnd, kL8, kV256) \
4380 V(S128Or, SOr, kL8, kV128) \
4381 V(S256Or, SOr, kL8, kV256) \
4382 V(S128Xor, SXor, kL8, kV128) \
4383 V(S256Xor, SXor, kL8, kV256)
4385#define SIMD_F16x8_BINOP_LIST(V) \
4397#define SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
4398 V(F64x2Min, FMin, kL64, kV128) \
4399 V(F32x4Min, FMin, kL32, kV128) \
4400 V(F64x4Min, FMin, kL64, kV256) \
4401 V(F32x8Min, FMin, kL32, kV256) \
4402 V(F64x2Max, FMax, kL64, kV128) \
4403 V(F32x4Max, FMax, kL32, kV128) \
4404 V(F64x4Max, FMax, kL64, kV256) \
4405 V(F32x8Max, FMax, kL32, kV256) \
4406 V(I64x2Ne, INe, kL64, kV128) \
4407 V(I32x4Ne, INe, kL32, kV128) \
4408 V(I16x8Ne, INe, kL16, kV128) \
4409 V(I8x16Ne, INe, kL8, kV128) \
4410 V(I64x4Ne, INe, kL64, kV256) \
4411 V(I32x8Ne, INe, kL32, kV256) \
4412 V(I16x16Ne, INe, kL16, kV256) \
4413 V(I8x32Ne, INe, kL8, kV256) \
4414 V(I32x4GtU, IGtU, kL32, kV128) \
4415 V(I16x8GtU, IGtU, kL16, kV128) \
4416 V(I8x16GtU, IGtU, kL8, kV128) \
4417 V(I32x8GtU, IGtU, kL32, kV256) \
4418 V(I16x16GtU, IGtU, kL16, kV256) \
4419 V(I8x32GtU, IGtU, kL8, kV256) \
4420 V(I32x4GeS, IGeS, kL32, kV128) \
4421 V(I16x8GeS, IGeS, kL16, kV128) \
4422 V(I8x16GeS, IGeS, kL8, kV128) \
4423 V(I32x8GeS, IGeS, kL32, kV256) \
4424 V(I16x16GeS, IGeS, kL16, kV256) \
4425 V(I8x32GeS, IGeS, kL8, kV256) \
4426 V(I32x4GeU, IGeU, kL32, kV128) \
4427 V(I16x8GeU, IGeU, kL16, kV128) \
4428 V(I8x16GeU, IGeU, kL8, kV128) \
4429 V(I32x8GeU, IGeU, kL32, kV256) \
4430 V(I16x16GeU, IGeU, kL16, kV256) \
4431 V(I8x32GeU, IGeU, kL8, kV256)
4433#define SIMD_UNOP_LIST(V) \
4434 V(F64x2ConvertLowI32x4S) \
4435 V(F64x4ConvertI32x4S) \
4436 V(F32x4SConvertI32x4) \
4437 V(F32x8SConvertI32x8) \
4438 V(F32x4DemoteF64x2Zero) \
4439 V(F32x4DemoteF64x4) \
4440 V(I16x8SConvertF16x8) \
4441 V(I16x8UConvertF16x8) \
4442 V(F16x8SConvertI16x8) \
4443 V(F16x8UConvertI16x8) \
4444 V(F16x8DemoteF32x4Zero) \
4445 V(F32x4PromoteLowF16x8) \
4446 V(I64x2SConvertI32x4Low) \
4447 V(I64x2SConvertI32x4High) \
4448 V(I64x4SConvertI32x4) \
4449 V(I64x2UConvertI32x4Low) \
4450 V(I64x2UConvertI32x4High) \
4451 V(I64x4UConvertI32x4) \
4452 V(I32x4SConvertI16x8Low) \
4453 V(I32x4SConvertI16x8High) \
4454 V(I32x8SConvertI16x8) \
4455 V(I32x4UConvertI16x8Low) \
4456 V(I32x4UConvertI16x8High) \
4457 V(I32x8UConvertI16x8) \
4458 V(I16x8SConvertI8x16Low) \
4459 V(I16x8SConvertI8x16High) \
4460 V(I16x16SConvertI8x16) \
4461 V(I16x8UConvertI8x16Low) \
4462 V(I16x8UConvertI8x16High) \
4463 V(I16x16UConvertI8x16)
4465#define SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST(V) \
4466 V(F32x4Abs, FAbs, kL32, kV128) \
4467 V(I32x4Abs, IAbs, kL32, kV128) \
4468 V(F16x8Abs, FAbs, kL16, kV128) \
4469 V(I16x8Abs, IAbs, kL16, kV128) \
4470 V(I8x16Abs, IAbs, kL8, kV128) \
4471 V(F32x4Neg, FNeg, kL32, kV128) \
4472 V(I32x4Neg, INeg, kL32, kV128) \
4473 V(F16x8Neg, FNeg, kL16, kV128) \
4474 V(I16x8Neg, INeg, kL16, kV128) \
4475 V(I8x16Neg, INeg, kL8, kV128) \
4476 V(F64x2Sqrt, FSqrt, kL64, kV128) \
4477 V(F32x4Sqrt, FSqrt, kL32, kV128) \
4478 V(F16x8Sqrt, FSqrt, kL16, kV128) \
4479 V(I64x2BitMask, IBitMask, kL64, kV128) \
4480 V(I32x4BitMask, IBitMask, kL32, kV128) \
4481 V(I16x8BitMask, IBitMask, kL16, kV128) \
4482 V(I8x16BitMask, IBitMask, kL8, kV128) \
4483 V(I64x2AllTrue, IAllTrue, kL64, kV128) \
4484 V(I32x4AllTrue, IAllTrue, kL32, kV128) \
4485 V(I16x8AllTrue, IAllTrue, kL16, kV128) \
4486 V(I8x16AllTrue, IAllTrue, kL8, kV128) \
4487 V(S128Not, SNot, kL8, kV128) \
4488 V(F64x4Abs, FAbs, kL64, kV256) \
4489 V(F32x8Abs, FAbs, kL32, kV256) \
4490 V(I32x8Abs, IAbs, kL32, kV256) \
4491 V(I16x16Abs, IAbs, kL16, kV256) \
4492 V(I8x32Abs, IAbs, kL8, kV256) \
4493 V(F64x4Neg, FNeg, kL64, kV256) \
4494 V(F32x8Neg, FNeg, kL32, kV256) \
4495 V(I32x8Neg, INeg, kL32, kV256) \
4496 V(I16x16Neg, INeg, kL16, kV256) \
4497 V(I8x32Neg, INeg, kL8, kV256) \
4498 V(F64x4Sqrt, FSqrt, kL64, kV256) \
4499 V(F32x8Sqrt, FSqrt, kL32, kV256) \
4500 V(S256Not, SNot, kL8, kV256)
4502#define SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(V) \
4503 V(I64x2Shl, IShl, kL64, kV128) \
4504 V(I32x4Shl, IShl, kL32, kV128) \
4505 V(I16x8Shl, IShl, kL16, kV128) \
4506 V(I32x4ShrS, IShrS, kL32, kV128) \
4507 V(I16x8ShrS, IShrS, kL16, kV128) \
4508 V(I64x2ShrU, IShrU, kL64, kV128) \
4509 V(I32x4ShrU, IShrU, kL32, kV128) \
4510 V(I16x8ShrU, IShrU, kL16, kV128) \
4511 V(I64x4Shl, IShl, kL64, kV256) \
4512 V(I32x8Shl, IShl, kL32, kV256) \
4513 V(I16x16Shl, IShl, kL16, kV256) \
4514 V(I32x8ShrS, IShrS, kL32, kV256) \
4515 V(I16x16ShrS, IShrS, kL16, kV256) \
4516 V(I64x4ShrU, IShrU, kL64, kV256) \
4517 V(I32x8ShrU, IShrU, kL32, kV256) \
4518 V(I16x16ShrU, IShrU, kL16, kV256)
4520#define SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(V) \
4521 V(I8x16Shl, IShl, kL8, kV128) \
4522 V(I8x16ShrS, IShrS, kL8, kV128) \
4523 V(I8x16ShrU, IShrU, kL8, kV128)
4525void InstructionSelectorT::VisitS128Const(OpIndex node) {
4526 X64OperandGeneratorT g(
this);
4527 static const int kUint32Immediates =
kSimd128Size /
sizeof(uint32_t);
4528 uint32_t val[kUint32Immediates];
4529 const Simd128ConstantOp& constant = Cast<Simd128ConstantOp>(node);
4530 memcpy(val, constant.value, kSimd128Size);
4532 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
4533 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
4534 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
4535 InstructionOperand dst = g.DefineAsRegister(node);
4537 Emit(kX64SZero | VectorLengthField::encode(kV128), dst);
4538 }
else if (all_ones) {
4539 Emit(kX64SAllOnes | VectorLengthField::encode(kV128), dst);
4541 Emit(kX64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
4542 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
4546void InstructionSelectorT::VisitS128Zero(OpIndex node) {
4547 X64OperandGeneratorT g(
this);
4548 Emit(kX64SZero | VectorLengthField::encode(kV128), g.DefineAsRegister(node));
4551#define SIMD_INT_TYPES_FOR_SPLAT(V) \
4552 V(I64x2, kL64, kV128) \
4553 V(I32x4, kL32, kV128) \
4554 V(I16x8, kL16, kV128) \
4555 V(I8x16, kL8, kV128) \
4556 V(I64x4, kL64, kV256) \
4557 V(I32x8, kL32, kV256) \
4558 V(I16x16, kL16, kV256) \
4559 V(I8x32, kL8, kV256)
4562#define VISIT_INT_SIMD_SPLAT(Type, LaneSize, VectorLength) \
4563 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
4564 X64OperandGeneratorT g(this); \
4565 const Operation& op = Get(node); \
4566 DCHECK_EQ(op.input_count, 1); \
4567 OpIndex input = op.input(0); \
4568 if (g.CanBeImmediate(input) && g.GetImmediateIntegerValue(input) == 0) { \
4569 Emit(kX64SZero | VectorLengthField::encode(VectorLength), \
4570 g.DefineAsRegister(node)); \
4572 Emit(kX64ISplat | LaneSizeField::encode(LaneSize) | \
4573 VectorLengthField::encode(VectorLength), \
4574 g.DefineAsRegister(node), g.Use(input)); \
4577SIMD_INT_TYPES_FOR_SPLAT(VISIT_INT_SIMD_SPLAT)
4578#undef VISIT_INT_SIMD_SPLAT
4579#undef SIMD_INT_TYPES_FOR_SPLAT
4581void InstructionSelectorT::VisitF64x2Splat(OpIndex node) {
4582 X64OperandGeneratorT g(
this);
4583 const Simd128SplatOp& op = Cast<Simd128SplatOp>(node);
4585 Emit(kX64FSplat | LaneSizeField::encode(kL64) |
4586 VectorLengthField::encode(kV128),
4587 g.DefineAsRegister(node), g.Use(op.input()));
4590void InstructionSelectorT::VisitF32x4Splat(OpIndex node) {
4591 X64OperandGeneratorT g(
this);
4592 const Simd128SplatOp& op = Cast<Simd128SplatOp>(node);
4594 Emit(kX64FSplat | LaneSizeField::encode(kL32) |
4595 VectorLengthField::encode(kV128),
4596 g.DefineAsRegister(node), g.UseRegister(op.input()));
4599void InstructionSelectorT::VisitF16x8Splat(OpIndex node) {
4600 X64OperandGeneratorT g(
this);
4601 const Simd128SplatOp& op = Cast<Simd128SplatOp>(node);
4603 Emit(kX64FSplat | LaneSizeField::encode(kL16) |
4604 VectorLengthField::encode(kV128),
4605 g.DefineAsRegister(node), g.UseRegister(op.input()));
4608void InstructionSelectorT::VisitF64x4Splat(OpIndex node) {
4609#ifdef V8_ENABLE_WASM_SIMD256_REVEC
4610 X64OperandGeneratorT g(
this);
4611 const Simd256SplatOp& op = Cast<Simd256SplatOp>(node);
4613 Emit(kX64FSplat | LaneSizeField::encode(kL64) |
4614 VectorLengthField::encode(kV256),
4615 g.DefineAsRegister(node), g.UseRegister(op.input()));
4621void InstructionSelectorT::VisitF32x8Splat(OpIndex node) {
4622#ifdef V8_ENABLE_WASM_SIMD256_REVEC
4623 X64OperandGeneratorT g(
this);
4624 const Simd256SplatOp& op = Cast<Simd256SplatOp>(node);
4626 Emit(kX64FSplat | LaneSizeField::encode(kL32) |
4627 VectorLengthField::encode(kV256),
4628 g.DefineAsRegister(node), g.UseRegister(op.input()));
4634#define SIMD_VISIT_EXTRACT_LANE(IF, Type, Sign, LaneSize, VectorLength) \
4635 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
4636 X64OperandGeneratorT g(this); \
4637 const Simd128ExtractLaneOp& op = Cast<Simd128ExtractLaneOp>(node); \
4638 int32_t lane = op.lane; \
4639 Emit(kX64##IF##ExtractLane##Sign | LaneSizeField::encode(LaneSize) | \
4640 VectorLengthField::encode(VectorLength), \
4641 g.DefineAsRegister(node), g.UseRegister(op.input()), \
4642 g.UseImmediate(lane)); \
4652#undef SIMD_VISIT_EXTRACT_LANE
4654void InstructionSelectorT::VisitI16x8ExtractLaneU(OpIndex node) {
4655 X64OperandGeneratorT g(
this);
4656 const Simd128ExtractLaneOp& op = Cast<Simd128ExtractLaneOp>(node);
4657 Emit(kX64Pextrw, g.DefineAsRegister(node), g.UseRegister(op.input()),
4658 g.UseImmediate(
static_cast<int32_t>(op.lane)));
4661void InstructionSelectorT::VisitI8x16ExtractLaneU(OpIndex node) {
4662 X64OperandGeneratorT g(
this);
4663 const Simd128ExtractLaneOp& op = Cast<Simd128ExtractLaneOp>(node);
4664 Emit(kX64Pextrb, g.DefineAsRegister(node), g.UseRegister(op.input()),
4665 g.UseImmediate(op.lane));
4668void InstructionSelectorT::VisitF16x8ReplaceLane(OpIndex node) {
4669 X64OperandGeneratorT g(
this);
4670 auto& op = Cast<Simd128ReplaceLaneOp>(node);
4671 Emit(kX64FReplaceLane | LaneSizeField::encode(kL16) |
4672 VectorLengthField::encode(kV128),
4673 g.DefineSameAsFirst(node), g.UseRegister(op.into()),
4674 g.UseImmediate(op.lane), g.Use(op.new_lane()));
4677void InstructionSelectorT::VisitF32x4ReplaceLane(OpIndex node) {
4678 X64OperandGeneratorT g(
this);
4679 const Simd128ReplaceLaneOp& op = Cast<Simd128ReplaceLaneOp>(node);
4680 Emit(kX64FReplaceLane | LaneSizeField::encode(kL32) |
4681 VectorLengthField::encode(kV128),
4682 g.DefineSameAsFirst(node), g.UseRegister(op.into()),
4683 g.UseImmediate(op.lane), g.Use(op.new_lane()));
4686void InstructionSelectorT::VisitF64x2ReplaceLane(OpIndex node) {
4687 X64OperandGeneratorT g(
this);
4689 InstructionOperand dst =
4690 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
4691 const Simd128ReplaceLaneOp& op = Cast<Simd128ReplaceLaneOp>(node);
4692 Emit(kX64FReplaceLane | LaneSizeField::encode(kL64) |
4693 VectorLengthField::encode(kV128),
4694 dst, g.UseRegister(op.into()), g.UseImmediate(op.lane),
4695 g.UseRegister(op.new_lane()));
4698#define VISIT_SIMD_REPLACE_LANE(TYPE, OPCODE) \
4699 void InstructionSelectorT::Visit##TYPE##ReplaceLane(OpIndex node) { \
4700 X64OperandGeneratorT g(this); \
4701 const Simd128ReplaceLaneOp& op = Cast<Simd128ReplaceLaneOp>(node); \
4702 Emit(OPCODE, g.DefineAsRegister(node), g.UseRegister(op.into()), \
4703 g.UseImmediate(op.lane), g.Use(op.new_lane())); \
4706#define SIMD_TYPES_FOR_REPLACE_LANE(V) \
4707 V(I64x2, kX64Pinsrq) \
4708 V(I32x4, kX64Pinsrd) \
4709 V(I16x8, kX64Pinsrw) \
4710 V(I8x16, kX64Pinsrb)
4712SIMD_TYPES_FOR_REPLACE_LANE(VISIT_SIMD_REPLACE_LANE)
4713#undef SIMD_TYPES_FOR_REPLACE_LANE
4714#undef VISIT_SIMD_REPLACE_LANE
4716#define VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES( \
4717 Name, Opcode, LaneSize, VectorLength) \
4718 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4719 X64OperandGeneratorT g(this); \
4720 const Operation& op = Get(node); \
4721 DCHECK_EQ(op.input_count, 2); \
4722 InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
4723 : g.DefineSameAsFirst(node); \
4724 if (g.CanBeImmediate(op.input(1))) { \
4725 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4726 VectorLengthField::encode(VectorLength), \
4727 dst, g.UseRegister(op.input(0)), g.UseImmediate(op.input(1))); \
4729 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4730 VectorLengthField::encode(VectorLength), \
4731 dst, g.UseRegister(op.input(0)), g.UseRegister(op.input(1))); \
4734SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(
4735 VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES)
4737#undef VISIT_SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
4738#undef SIMD_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
4740#define VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES( \
4741 Name, Opcode, LaneSize, VectorLength) \
4742 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4743 X64OperandGeneratorT g(this); \
4744 const Operation& op = Get(node); \
4745 DCHECK_EQ(op.input_count, 2); \
4746 InstructionOperand output = \
4747 IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
4748 if (g.CanBeImmediate(op.input(1))) { \
4749 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4750 VectorLengthField::encode(VectorLength), \
4751 output, g.UseRegister(op.input(0)), g.UseImmediate(op.input(1))); \
4753 InstructionOperand temps[] = {g.TempSimd128Register()}; \
4754 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4755 VectorLengthField::encode(VectorLength), \
4756 output, g.UseUniqueRegister(op.input(0)), \
4757 g.UseUniqueRegister(op.input(1)), arraysize(temps), temps); \
4760SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES(
4761 VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES)
4762#undef VISIT_SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
4763#undef SIMD_NARROW_SHIFT_LANE_SIZE_VECTOR_LENGTH_OPCODES
4765#define VISIT_SIMD_UNOP(Opcode) \
4766 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
4767 X64OperandGeneratorT g(this); \
4768 const Operation& op = Get(node); \
4769 DCHECK_EQ(op.input_count, 1); \
4770 Emit(kX64##Opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0))); \
4773#undef VISIT_SIMD_UNOP
4774#undef SIMD_UNOP_LIST
4776#define VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
4778 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4779 X64OperandGeneratorT g(this); \
4780 const Operation& op = Get(node); \
4781 DCHECK_EQ(op.input_count, 1); \
4782 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4783 VectorLengthField::encode(VectorLength), \
4784 g.DefineAsRegister(node), g.UseRegister(op.input(0))); \
4787SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST(VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH)
4789#undef VISIT_SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH
4790#undef SIMD_UNOP_LANE_SIZE_VECTOR_LENGTH_LIST
4792#define VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
4794 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4795 X64OperandGeneratorT g(this); \
4796 const Operation& op = Get(node); \
4797 DCHECK_EQ(op.input_count, 2); \
4798 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4799 VectorLengthField::encode(VectorLength), \
4800 g.DefineSameAsFirst(node), g.UseRegister(op.input(0)), \
4801 g.UseRegister(op.input(1))); \
4804SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST(
4805 VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH)
4807#undef VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH
4808#undef SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH_LIST
4810#define VISIT_SIMD_BINOP(Opcode) \
4811 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
4812 X64OperandGeneratorT g(this); \
4813 const Operation& op = Get(node); \
4814 DCHECK_EQ(op.input_count, 2); \
4815 if (IsSupported(AVX)) { \
4816 Emit(kX64##Opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)), \
4817 g.UseRegister(op.input(1))); \
4819 Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
4820 g.UseRegister(op.input(0)), g.UseRegister(op.input(1))); \
4824SIMD_BINOP_SSE_AVX_LIST(VISIT_SIMD_BINOP)
4825#undef VISIT_SIMD_BINOP
4826#undef SIMD_BINOP_SSE_AVX_LIST
4828#define VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH(Name, Opcode, LaneSize, \
4830 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4831 X64OperandGeneratorT g(this); \
4832 const Operation& op = Get(node); \
4833 DCHECK_EQ(op.input_count, 2); \
4834 if (IsSupported(AVX)) { \
4835 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4836 VectorLengthField::encode(VectorLength), \
4837 g.DefineAsRegister(node), g.UseRegister(op.input(0)), \
4838 g.UseRegister(op.input(1))); \
4840 Emit(kX64##Opcode | LaneSizeField::encode(LaneSize) | \
4841 VectorLengthField::encode(VectorLength), \
4842 g.DefineSameAsFirst(node), g.UseRegister(op.input(0)), \
4843 g.UseRegister(op.input(1))); \
4847SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST(
4848 VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH)
4849#undef VISIT_SIMD_BINOP_LANE_SIZE_VECTOR_LENGTH
4850#undef SIMD_BINOP_SSE_AVX_LANE_SIZE_VECTOR_LENGTH_LIST
4852#define VISIT_SIMD_F16x8_BINOP(Name, Opcode) \
4853 void InstructionSelectorT::Visit##Name(OpIndex node) { \
4854 X64OperandGeneratorT g(this); \
4855 const Operation& op = Get(node); \
4856 DCHECK_EQ(op.input_count, 2); \
4857 InstructionOperand temps[] = {g.TempSimd256Register(), \
4858 g.TempSimd256Register()}; \
4859 size_t temp_count = arraysize(temps); \
4860 Emit(kX64##Opcode | LaneSizeField::encode(kL16) | \
4861 VectorLengthField::encode(kV128), \
4862 g.DefineAsRegister(node), g.UseUniqueRegister(op.input(0)), \
4863 g.UseUniqueRegister(op.input(1)), temp_count, temps); \
4866SIMD_F16x8_BINOP_LIST(VISIT_SIMD_F16x8_BINOP)
4867#undef VISIT_SIMD_F16x8_BINOP
4868#undef SIMD_F16x8_BINOP_LIST
4870 void InstructionSelectorT::VisitV128AnyTrue(OpIndex node) {
4871 X64OperandGeneratorT g(
this);
4872 const Simd128TestOp& op = Cast<Simd128TestOp>(node);
4874 Emit(kX64V128AnyTrue, g.DefineAsRegister(node),
4875 g.UseUniqueRegister(op.input()));
4880static bool IsV128ZeroConst(InstructionSelectorT* selector, OpIndex node) {
4881 const Operation& op = selector->Get(node);
4882 if (
auto constant = op.TryCast<Simd128ConstantOp>()) {
4883 return constant->IsZero();
4888static bool MatchSimd128Constant(InstructionSelectorT* selector, OpIndex node,
4889 std::array<uint8_t, kSimd128Size>* constant) {
4891 const Operation& op = selector->Get(node);
4892 if (
auto c = op.TryCast<Simd128ConstantOp>()) {
4893 std::memcpy(constant, c->value, kSimd128Size);
4901void InstructionSelectorT::VisitS128Select(OpIndex node) {
4902 X64OperandGeneratorT g(
this);
4903 const Simd128TernaryOp& op = Cast<Simd128TernaryOp>(node);
4906 InstructionOperand dst =
4907 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
4908 if (IsV128ZeroConst(
this, op.input(2))) {
4910 Emit(kX64SAnd | VectorLengthField::encode(kV128), dst,
4911 g.UseRegister(op.input(0)), g.UseRegister(op.input(1)));
4912 }
else if (IsV128ZeroConst(
this, op.input(1))) {
4914 Emit(kX64SAndNot | VectorLengthField::encode(kV128), dst,
4915 g.UseRegister(op.input(0)), g.UseRegister(op.input(2)));
4917 Emit(kX64SSelect | VectorLengthField::encode(kV128), dst,
4918 g.UseRegister(op.input(0)), g.UseRegister(op.input(1)),
4919 g.UseRegister(op.input(2)));
4923void InstructionSelectorT::VisitS256Select(OpIndex node) {
4924#ifdef V8_ENABLE_WASM_SIMD256_REVEC
4925 X64OperandGeneratorT g(
this);
4926 const Simd256TernaryOp& op = Cast<Simd256TernaryOp>(node);
4927 Emit(kX64SSelect | VectorLengthField::encode(kV256), g.DefineAsRegister(node),
4928 g.UseRegister(op.input(0)), g.UseRegister(op.input(1)),
4929 g.UseRegister(op.input(2)));
4935void InstructionSelectorT::VisitS128AndNot(OpIndex node) {
4936 X64OperandGeneratorT g(
this);
4937 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
4940 Emit(kX64SAndNot | VectorLengthField::encode(kV128),
4941 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
4942 g.UseRegister(op.right()), g.UseRegister(op.left()));
4945void InstructionSelectorT::VisitS256AndNot(OpIndex node) {
4946#ifdef V8_ENABLE_WASM_SIMD256_REVEC
4947 X64OperandGeneratorT g(
this);
4948 const Simd256BinopOp& op = Cast<Simd256BinopOp>(node);
4951 Emit(kX64SAndNot | VectorLengthField::encode(kV256),
4952 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
4953 g.UseRegister(op.right()), g.UseRegister(op.left()));
4959void InstructionSelectorT::VisitF64x2Abs(OpIndex node) {
4960 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
4962 VisitFloatUnop(
this, node, op.input(),
4963 kX64FAbs | LaneSizeField::encode(kL64) |
4964 VectorLengthField::encode(kV128));
4967void InstructionSelectorT::VisitF64x2Neg(OpIndex node) {
4968 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
4970 VisitFloatUnop(
this, node, op.input(),
4971 kX64FNeg | LaneSizeField::encode(kL64) |
4972 VectorLengthField::encode(kV128));
4975void InstructionSelectorT::VisitF32x4UConvertI32x4(OpIndex node) {
4976 X64OperandGeneratorT g(
this);
4977 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
4985 bool can_use_sign_convert =
false;
4986 if (
const Simd128UnaryOp* unop =
4987 this->
Get(value).
template TryCast<Simd128UnaryOp>()) {
4988 if (unop->kind == Simd128UnaryOp::Kind::kI32x4UConvertI16x8Low ||
4989 unop->kind == Simd128UnaryOp::Kind::kI32x4UConvertI16x8High) {
4990 can_use_sign_convert =
true;
4994 if (can_use_sign_convert) {
4995 Emit(kX64F32x4SConvertI32x4, g.DefineAsRegister(node),
4996 g.UseRegister(op.input()));
4998 Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
4999 g.UseRegister(op.input()));
5003#define VISIT_SIMD_QFMOP(Opcode) \
5004 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
5005 X64OperandGeneratorT g(this); \
5006 const Operation& op = Get(node); \
5007 DCHECK_EQ(op.input_count, 3); \
5008 Emit(kX64##Opcode, g.UseRegister(node), g.UseRegister(op.input(0)), \
5009 g.UseRegister(op.input(1)), g.UseRegister(op.input(2))); \
5016#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5022#undef VISIT_SIMD_QFMOP
5024#define VISIT_SIMD_F16x8_QFMOP(Opcode) \
5025 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
5026 X64OperandGeneratorT g(this); \
5027 const Operation& op = Get(node); \
5028 DCHECK_EQ(op.input_count, 3); \
5029 InstructionOperand temps[] = {g.TempSimd256Register(), \
5030 g.TempSimd256Register()}; \
5031 Emit(kX64##Opcode, g.UseRegister(node), g.UseUniqueRegister(op.input(0)), \
5032 g.UseUniqueRegister(op.input(1)), g.UseUniqueRegister(op.input(2)), \
5033 arraysize(temps), temps); \
5036VISIT_SIMD_F16x8_QFMOP(F16x8Qfma) VISIT_SIMD_F16x8_QFMOP(F16x8Qfms)
5037#undef VISIT_SIMD_F16x8_QFMOP
5039 void InstructionSelectorT::VisitI64x2Neg(OpIndex node) {
5040 X64OperandGeneratorT g(
this);
5041 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5044 InstructionOperand operand0 = IsSupported(AVX)
5045 ? g.UseRegister(op.input())
5046 : g.UseUniqueRegister(op.input());
5048 kX64INeg | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV128),
5049 g.DefineAsRegister(node), operand0);
5052void InstructionSelectorT::VisitI64x2ShrS(OpIndex node) {
5053 X64OperandGeneratorT g(
this);
5054 const Simd128ShiftOp& op = Cast<Simd128ShiftOp>(node);
5056 InstructionOperand dst =
5057 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
5059 if (g.CanBeImmediate(op.shift())) {
5060 Emit(kX64IShrS | LaneSizeField::encode(kL64) |
5061 VectorLengthField::encode(kV128),
5062 dst, g.UseRegister(op.input()), g.UseImmediate(op.shift()));
5064 InstructionOperand temps[] = {g.TempSimd128Register()};
5065 Emit(kX64IShrS | LaneSizeField::encode(kL64) |
5066 VectorLengthField::encode(kV128),
5067 dst, g.UseUniqueRegister(op.input()), g.UseRegister(op.shift()),
5072void InstructionSelectorT::VisitI64x2Mul(OpIndex node) {
5073 X64OperandGeneratorT g(
this);
5074 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5076 InstructionOperand temps[] = {g.TempSimd128Register()};
5078 kX64IMul | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV128),
5079 g.DefineAsRegister(node), g.UseUniqueRegister(op.left()),
5080 g.UseUniqueRegister(op.right()),
arraysize(temps), temps);
5083void InstructionSelectorT::VisitI64x4Mul(OpIndex node) {
5084#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5085 X64OperandGeneratorT g(
this);
5086 const Simd256BinopOp& op = Cast<Simd256BinopOp>(node);
5088 InstructionOperand temps[] = {g.TempSimd256Register()};
5090 kX64IMul | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV256),
5091 g.DefineAsRegister(node), g.UseUniqueRegister(op.left()),
5092 g.UseUniqueRegister(op.right()),
arraysize(temps), temps);
5098void InstructionSelectorT::VisitI32x4SConvertF32x4(OpIndex node) {
5099 X64OperandGeneratorT g(
this);
5100 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5102 Emit(kX64I32x4SConvertF32x4,
5103 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
5104 g.UseRegister(op.input()));
5107void InstructionSelectorT::VisitI32x8SConvertF32x8(OpIndex node) {
5108#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5109 X64OperandGeneratorT g(
this);
5110 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5112 Emit(kX64I32x8SConvertF32x8, g.DefineAsRegister(node),
5113 g.UseRegister(op.input()));
5119void InstructionSelectorT::VisitI32x4UConvertF32x4(OpIndex node) {
5120 X64OperandGeneratorT g(
this);
5121 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5123 InstructionOperand temps[] = {g.TempSimd128Register(),
5124 g.TempSimd128Register()};
5125 Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
5126 g.UseRegister(op.input()),
arraysize(temps), temps);
5129void InstructionSelectorT::VisitI32x8UConvertF32x8(OpIndex node) {
5130#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5131 X64OperandGeneratorT g(
this);
5132 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5134 InstructionOperand temps[] = {g.TempSimd256Register(),
5135 g.TempSimd256Register()};
5136 Emit(kX64I32x8UConvertF32x8, g.DefineSameAsFirst(node),
5137 g.UseRegister(op.input()),
arraysize(temps), temps);
5143#if V8_ENABLE_WASM_SIMD256_REVEC
5144void InstructionSelectorT::VisitF32x8UConvertI32x8(OpIndex node) {
5145 X64OperandGeneratorT g(
this);
5146 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5152 bool can_use_sign_convert =
false;
5153 if (Is<Opmask::kSimd256I32x8UConvertI16x8>(value)) {
5154 can_use_sign_convert =
true;
5157 if (can_use_sign_convert) {
5158 Emit(kX64F32x8SConvertI32x8, g.DefineAsRegister(node),
5159 g.UseRegister(op.input()));
5161 Emit(kX64F32x8UConvertI32x8, g.DefineSameAsFirst(node),
5162 g.UseRegister(op.input()));
5166void InstructionSelectorT::VisitExtractF128(OpIndex node) {
5167 X64OperandGeneratorT g(
this);
5168 const Simd256Extract128LaneOp& op = Cast<Simd256Extract128LaneOp>(node);
5172 Emit(kX64ExtractF128, g.DefineAsRegister(node), g.UseRegister(op.input()),
5173 g.UseImmediate(op.lane));
5177void InstructionSelectorT::VisitI8x32Shuffle(OpIndex node) {
UNREACHABLE(); }
5181void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
5185void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
5189#if V8_ENABLE_WEBASSEMBLY
5195bool TryMatch16x8HalfShuffle(uint8_t* shuffle16x8, uint8_t* blend_mask) {
5197 for (
int i = 0;
i < 8;
i++) {
5198 if ((shuffle16x8[
i] & 0x4) != (
i & 0x4))
return false;
5199 *blend_mask |= (shuffle16x8[
i] > 7 ? 1 : 0) <<
i;
5204bool TryMatchShufps(
const uint8_t* shuffle32x4) {
5209 return shuffle32x4[0] < 4 && shuffle32x4[1] < 4 && shuffle32x4[2] > 3 &&
5213static bool TryMatchOneInputIsZeros(InstructionSelectorT* selector,
5214 TurboshaftAdapter::SimdShuffleView& view,
5215 uint8_t* shuffle,
bool* needs_swap) {
5216 *needs_swap =
false;
5217 bool input0_is_zero = IsV128ZeroConst(selector, view.input(0));
5218 bool input1_is_zero = IsV128ZeroConst(selector, view.input(1));
5219 if (!input0_is_zero && !input1_is_zero) {
5223 if (input0_is_zero) {
5231void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
5234 auto view = this->simd_shuffle_view(node);
5235 CanonicalizeShuffle(view, shuffle, &is_swizzle);
5238 static const int kMaxImms = 6;
5239 uint32_t imms[kMaxImms];
5241 static const int kMaxTemps = 2;
5242 InstructionOperand temps[kMaxTemps];
5244 X64OperandGeneratorT g(
this);
5246 bool no_same_as_first = is_swizzle;
5251 bool src0_needs_reg =
true;
5252 bool src1_needs_reg =
false;
5256 uint8_t shuffle32x4[4];
5257 uint8_t shuffle16x8[8];
5259 const wasm::ShuffleEntry<kSimd128Size>* arch_shuffle;
5261 if (wasm::SimdShuffle::TryMatchConcat(shuffle, &
offset)) {
5262 if (wasm::SimdShuffle::TryMatch32x4Rotate(shuffle, shuffle32x4,
5264 uint8_t shuffle_mask = wasm::SimdShuffle::PackShuffle4(shuffle32x4);
5265 opcode = kX64S32x4Rotate;
5266 imms[imm_count++] = shuffle_mask;
5269 SwapShuffleInputs(view);
5271 no_same_as_first = CpuFeatures::IsSupported(AVX);
5273 src1_needs_reg =
true;
5274 opcode = kX64S8x16Alignr;
5276 imms[imm_count++] =
offset;
5278 }
else if (wasm::SimdShuffle::TryMatchArchShuffle(shuffle, is_swizzle,
5280 opcode = arch_shuffle->opcode;
5281 src0_needs_reg = arch_shuffle->src0_needs_reg;
5284 src1_needs_reg = arch_shuffle->src1_needs_reg;
5286 IsSupported(AVX) && arch_shuffle->no_same_as_first_if_avx;
5287 }
else if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
5288 uint8_t shuffle_mask = wasm::SimdShuffle::PackShuffle4(shuffle32x4);
5290 if (wasm::SimdShuffle::TryMatchIdentity(shuffle)) {
5292 OpIndex input = view.input(0);
5295 MarkAsDefined(node);
5296 SetRename(node, input);
5300 opcode = kX64S32x4Swizzle;
5301 no_same_as_first =
true;
5305 src0_needs_reg =
true;
5306 imms[imm_count++] = shuffle_mask;
5311 if (wasm::SimdShuffle::TryMatchBlend(shuffle)) {
5312 opcode = kX64S16x8Blend;
5313 uint8_t blend_mask = wasm::SimdShuffle::PackBlend4(shuffle32x4);
5314 imms[imm_count++] = blend_mask;
5315 no_same_as_first = CpuFeatures::IsSupported(AVX);
5316 }
else if (TryMatchShufps(shuffle32x4)) {
5317 opcode = kX64Shufps;
5318 uint8_t
mask = wasm::SimdShuffle::PackShuffle4(shuffle32x4);
5319 imms[imm_count++] =
mask;
5320 src1_needs_reg =
true;
5321 no_same_as_first = IsSupported(AVX);
5323 opcode = kX64S32x4Shuffle;
5324 no_same_as_first =
true;
5328 src0_needs_reg =
true;
5329 src1_needs_reg =
true;
5330 imms[imm_count++] = shuffle_mask;
5331 uint8_t blend_mask = wasm::SimdShuffle::PackBlend4(shuffle32x4);
5332 imms[imm_count++] = blend_mask;
5335 }
else if (wasm::SimdShuffle::TryMatch16x8Shuffle(shuffle, shuffle16x8)) {
5337 if (wasm::SimdShuffle::TryMatchBlend(shuffle)) {
5338 opcode = kX64S16x8Blend;
5339 blend_mask = wasm::SimdShuffle::PackBlend8(shuffle16x8);
5340 imms[imm_count++] = blend_mask;
5341 no_same_as_first = CpuFeatures::IsSupported(AVX);
5342 }
else if (wasm::SimdShuffle::TryMatchSplat<8>(shuffle, &index)) {
5343 opcode = kX64S16x8Dup;
5344 src0_needs_reg =
false;
5345 imms[imm_count++] =
index;
5346 }
else if (TryMatch16x8HalfShuffle(shuffle16x8, &blend_mask)) {
5347 opcode = is_swizzle ? kX64S16x8HalfShuffle1 : kX64S16x8HalfShuffle2;
5349 no_same_as_first =
true;
5350 src0_needs_reg =
false;
5351 uint8_t mask_lo = wasm::SimdShuffle::PackShuffle4(shuffle16x8);
5352 uint8_t mask_hi = wasm::SimdShuffle::PackShuffle4(shuffle16x8 + 4);
5353 imms[imm_count++] = mask_lo;
5354 imms[imm_count++] = mask_hi;
5355 if (!is_swizzle) imms[imm_count++] = blend_mask;
5357 }
else if (wasm::SimdShuffle::TryMatchSplat<16>(shuffle, &index)) {
5358 opcode = kX64S8x16Dup;
5359 no_same_as_first =
false;
5360 src0_needs_reg =
true;
5361 imms[imm_count++] =
index;
5362 }
else if (TryMatchOneInputIsZeros(
this, view, shuffle, &needs_swap)) {
5366 SwapShuffleInputs(view);
5371 if (wasm::SimdShuffle::TryMatchByteToDwordZeroExtend(shuffle)) {
5372 opcode = kX64I32X4ShiftZeroExtendI8x16;
5373 no_same_as_first =
true;
5374 src0_needs_reg =
true;
5375 imms[imm_count++] = shuffle[0];
5382 if (shuffle[
i] >= kSimd128Size) {
5388 if (opcode == kX64I8x16Shuffle) {
5390 no_same_as_first = !is_swizzle;
5391 src0_needs_reg = !no_same_as_first;
5392 imms[imm_count++] = wasm::SimdShuffle::Pack4Lanes(shuffle);
5393 imms[imm_count++] = wasm::SimdShuffle::Pack4Lanes(shuffle + 4);
5394 imms[imm_count++] = wasm::SimdShuffle::Pack4Lanes(shuffle + 8);
5395 imms[imm_count++] = wasm::SimdShuffle::Pack4Lanes(shuffle + 12);
5396 temps[temp_count++] = g.TempSimd128Register();
5401 OpIndex input0 = view.input(0);
5402 InstructionOperand dst =
5403 no_same_as_first ? g.DefineAsRegister(view) : g.DefineSameAsFirst(view);
5406 InstructionOperand src0 =
5407 temp_count ? g.UseUniqueRegister(input0) : g.UseRegister(input0);
5408 USE(src0_needs_reg);
5411 InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
5414 OpIndex input1 = view.input(1);
5418 temp_count ? g.UseUniqueRegister(input1) : g.UseRegister(input1);
5419 USE(src1_needs_reg);
5421 for (
int i = 0;
i < imm_count; ++
i) {
5424 Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
5427void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) {
5429 const Simd128BinopOp& binop = Cast<Simd128BinopOp>(node);
5431 DCHECK(binop.kind ==
any_of(Simd128BinopOp::Kind::kI8x16Swizzle,
5432 Simd128BinopOp::Kind::kI8x16RelaxedSwizzle));
5433 bool relaxed = binop.kind == Simd128BinopOp::Kind::kI8x16RelaxedSwizzle;
5435 OpIndex right = binop.right();
5438 opcode |= MiscField::encode(
true);
5440 std::array<uint8_t, kSimd128Size> imms;
5441 if (MatchSimd128Constant(
this, right, &imms)) {
5446 MiscField::encode(wasm::SimdSwizzle::AllInRangeOrTopBitSet(imms));
5450 X64OperandGeneratorT g(
this);
5452 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
5453 g.UseRegister(left), g.UseRegister(right));
5457void VisitRelaxedLaneSelect(InstructionSelectorT* selector, OpIndex node,
5458 InstructionCode code) {
5459 X64OperandGeneratorT g(selector);
5460 const Operation& op = selector->Get(node);
5465 if (selector->IsSupported(AVX)) {
5466 selector->Emit(code, g.DefineAsRegister(node), g.UseRegister(op.input(2)),
5467 g.UseRegister(op.input(1)), g.UseRegister(op.input(0)));
5471 selector->Emit(code, g.DefineSameAsFirst(node), g.UseRegister(op.input(2)),
5472 g.UseRegister(op.input(1)), g.UseFixed(op.input(0), xmm0));
5477void InstructionSelectorT::VisitI8x16RelaxedLaneSelect(OpIndex node) {
5478 VisitRelaxedLaneSelect(
this, node,
5479 kX64Pblendvb | VectorLengthField::encode(kV128));
5481void InstructionSelectorT::VisitI16x8RelaxedLaneSelect(OpIndex node) {
5482 VisitRelaxedLaneSelect(
this, node,
5483 kX64Pblendvb | VectorLengthField::encode(kV128));
5485void InstructionSelectorT::VisitI32x4RelaxedLaneSelect(OpIndex node) {
5486 VisitRelaxedLaneSelect(
this, node,
5487 kX64Blendvps | VectorLengthField::encode(kV128));
5490void InstructionSelectorT::VisitI64x2RelaxedLaneSelect(OpIndex node) {
5491 VisitRelaxedLaneSelect(
this, node,
5492 kX64Blendvpd | VectorLengthField::encode(kV128));
5495#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5496void InstructionSelectorT::VisitI8x32RelaxedLaneSelect(OpIndex node) {
5497 VisitRelaxedLaneSelect(
this, node,
5498 kX64Pblendvb | VectorLengthField::encode(kV256));
5500void InstructionSelectorT::VisitI16x16RelaxedLaneSelect(OpIndex node) {
5501 VisitRelaxedLaneSelect(
this, node,
5502 kX64Pblendvb | VectorLengthField::encode(kV256));
5505void InstructionSelectorT::VisitI32x8RelaxedLaneSelect(OpIndex node) {
5506 VisitRelaxedLaneSelect(
this, node,
5507 kX64Blendvps | VectorLengthField::encode(kV256));
5510void InstructionSelectorT::VisitI64x4RelaxedLaneSelect(OpIndex node) {
5511 VisitRelaxedLaneSelect(
this, node,
5512 kX64Blendvpd | VectorLengthField::encode(kV256));
5516void InstructionSelectorT::VisitF16x8Pmin(OpIndex node) {
5517 X64OperandGeneratorT g(
this);
5518 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5520 InstructionOperand dst = g.DefineAsRegister(node);
5521 InstructionCode instr_code = kX64Minph | VectorLengthField::encode(kV128);
5522 InstructionOperand temps[] = {g.TempSimd256Register(),
5523 g.TempSimd256Register()};
5526 Emit(instr_code, dst, g.UseUniqueRegister(op.right()),
5527 g.UseUniqueRegister(op.left()), temp_count, temps);
5530void InstructionSelectorT::VisitF16x8Pmax(OpIndex node) {
5531 X64OperandGeneratorT g(
this);
5532 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5534 InstructionOperand dst = g.DefineAsRegister(node);
5535 InstructionCode instr_code = kX64Maxph | VectorLengthField::encode(kV128);
5536 InstructionOperand temps[] = {g.TempSimd256Register(),
5537 g.TempSimd256Register()};
5540 Emit(instr_code, dst, g.UseUniqueRegister(op.right()),
5541 g.UseUniqueRegister(op.left()), temp_count, temps);
5544void InstructionSelectorT::VisitF16x8DemoteF64x2Zero(OpIndex node) {
5545 X64OperandGeneratorT g(
this);
5546 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5548 InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register(),
5549 g.TempSimd128Register()};
5552 Emit(kX64F16x8DemoteF64x2Zero, g.DefineAsRegister(node),
5553 g.UseUniqueRegister(op.input()), temp_count, temps);
5556void InstructionSelectorT::VisitF32x4Pmin(OpIndex node) {
5557 VisitMinOrMax<kV128>(
this, node, kX64Minps,
true);
5560void InstructionSelectorT::VisitF32x4Pmax(OpIndex node) {
5561 VisitMinOrMax<kV128>(
this, node, kX64Maxps,
true);
5564void InstructionSelectorT::VisitF64x2Pmin(OpIndex node) {
5565 VisitMinOrMax<kV128>(
this, node, kX64Minpd,
true);
5568void InstructionSelectorT::VisitF64x2Pmax(OpIndex node) {
5569 VisitMinOrMax<kV128>(
this, node, kX64Maxpd,
true);
5572void InstructionSelectorT::VisitF32x8Pmin(OpIndex node) {
5573 VisitMinOrMax<kV256>(
this, node, kX64F32x8Pmin,
true);
5576void InstructionSelectorT::VisitF32x8Pmax(OpIndex node) {
5577 VisitMinOrMax<kV256>(
this, node, kX64F32x8Pmax,
true);
5580void InstructionSelectorT::VisitF64x4Pmin(OpIndex node) {
5581 VisitMinOrMax<kV256>(
this, node, kX64F64x4Pmin,
true);
5584void InstructionSelectorT::VisitF64x4Pmax(OpIndex node) {
5585 VisitMinOrMax<kV256>(
this, node, kX64F64x4Pmax,
true);
5588void InstructionSelectorT::VisitF32x4RelaxedMin(OpIndex node) {
5589 VisitMinOrMax<kV128>(
this, node, kX64Minps,
false);
5592void InstructionSelectorT::VisitF32x4RelaxedMax(OpIndex node) {
5593 VisitMinOrMax<kV128>(
this, node, kX64Maxps,
false);
5596void InstructionSelectorT::VisitF64x2RelaxedMin(OpIndex node) {
5597 VisitMinOrMax<kV128>(
this, node, kX64Minpd,
false);
5600void InstructionSelectorT::VisitF64x2RelaxedMax(OpIndex node) {
5601 VisitMinOrMax<kV128>(
this, node, kX64Maxpd,
false);
5604void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8S(OpIndex node) {
5605 X64OperandGeneratorT g(
this);
5606 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5608 InstructionOperand dst = CpuFeatures::IsSupported(AVX)
5609 ? g.DefineAsRegister(node)
5610 : g.DefineSameAsFirst(node);
5611 Emit(kX64I32x4ExtAddPairwiseI16x8S, dst, g.UseRegister(op.input()));
5614void InstructionSelectorT::VisitI32x8ExtAddPairwiseI16x16S(OpIndex node) {
5615#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5616 X64OperandGeneratorT g(
this);
5617 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5619 Emit(kX64I32x8ExtAddPairwiseI16x16S, g.DefineAsRegister(node),
5620 g.UseRegister(op.input()));
5626void InstructionSelectorT::VisitI32x4ExtAddPairwiseI16x8U(OpIndex node) {
5627 X64OperandGeneratorT g(
this);
5628 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5630 InstructionOperand dst = CpuFeatures::IsSupported(AVX)
5631 ? g.DefineAsRegister(node)
5632 : g.DefineSameAsFirst(node);
5633 Emit(kX64I32x4ExtAddPairwiseI16x8U, dst, g.UseRegister(op.input()));
5636void InstructionSelectorT::VisitI32x8ExtAddPairwiseI16x16U(OpIndex node) {
5637#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5638 X64OperandGeneratorT g(
this);
5639 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5641 Emit(kX64I32x8ExtAddPairwiseI16x16U, g.DefineAsRegister(node),
5642 g.UseRegister(op.input()));
5648void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16S(OpIndex node) {
5649 X64OperandGeneratorT g(
this);
5650 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5653 Emit(kX64I16x8ExtAddPairwiseI8x16S, g.DefineAsRegister(node),
5654 g.UseUniqueRegister(op.input()));
5657void InstructionSelectorT::VisitI16x16ExtAddPairwiseI8x32S(OpIndex node) {
5658#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5659 X64OperandGeneratorT g(
this);
5660 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5662 Emit(kX64I16x16ExtAddPairwiseI8x32S, g.DefineAsRegister(node),
5663 g.UseUniqueRegister(op.input()));
5669void InstructionSelectorT::VisitI16x8ExtAddPairwiseI8x16U(OpIndex node) {
5670 X64OperandGeneratorT g(
this);
5671 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5673 InstructionOperand dst = CpuFeatures::IsSupported(AVX)
5674 ? g.DefineAsRegister(node)
5675 : g.DefineSameAsFirst(node);
5676 Emit(kX64I16x8ExtAddPairwiseI8x16U, dst, g.UseRegister(op.input()));
5679void InstructionSelectorT::VisitI16x16ExtAddPairwiseI8x32U(OpIndex node) {
5680#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5681 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5683 X64OperandGeneratorT g(
this);
5684 Emit(kX64I16x16ExtAddPairwiseI8x32U, g.DefineAsRegister(node),
5685 g.UseUniqueRegister(op.input()));
5691void InstructionSelectorT::VisitI8x16Popcnt(OpIndex node) {
5692 X64OperandGeneratorT g(
this);
5693 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5695 InstructionOperand temps[] = {g.TempSimd128Register()};
5696 Emit(kX64I8x16Popcnt, g.DefineAsRegister(node),
5697 g.UseUniqueRegister(op.input()),
arraysize(temps), temps);
5700void InstructionSelectorT::VisitF64x2ConvertLowI32x4U(OpIndex node) {
5701 X64OperandGeneratorT g(
this);
5702 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5704 InstructionOperand dst =
5705 IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
5706 Emit(kX64F64x2ConvertLowI32x4U, dst, g.UseRegister(op.input()));
5709void InstructionSelectorT::VisitI32x4TruncSatF64x2SZero(OpIndex node) {
5710 X64OperandGeneratorT g(
this);
5711 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5713 if (CpuFeatures::IsSupported(AVX)) {
5715 Emit(kX64I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
5716 g.UseUniqueRegister(op.input()));
5718 Emit(kX64I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
5719 g.UseRegister(op.input()));
5723void InstructionSelectorT::VisitI32x4TruncSatF64x2UZero(OpIndex node) {
5724 X64OperandGeneratorT g(
this);
5725 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5727 InstructionOperand dst = CpuFeatures::IsSupported(AVX)
5728 ? g.DefineAsRegister(node)
5729 : g.DefineSameAsFirst(node);
5730 Emit(kX64I32x4TruncSatF64x2UZero, dst, g.UseRegister(op.input()));
5733void InstructionSelectorT::VisitI32x4RelaxedTruncF64x2SZero(OpIndex node) {
5734 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5736 VisitFloatUnop(
this, node, op.input(), kX64Cvttpd2dq);
5739void InstructionSelectorT::VisitI32x4RelaxedTruncF64x2UZero(OpIndex node) {
5740 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5742 VisitFloatUnop(
this, node, op.input(), kX64I32x4TruncF64x2UZero);
5745void InstructionSelectorT::VisitI32x4RelaxedTruncF32x4S(OpIndex node) {
5746 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5748 VisitFloatUnop(
this, node, op.input(), kX64Cvttps2dq);
5751void InstructionSelectorT::VisitI32x4RelaxedTruncF32x4U(OpIndex node) {
5752 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5754 X64OperandGeneratorT g(
this);
5755 InstructionOperand temps[] = {g.TempSimd128Register()};
5756 if (IsSupported(AVX)) {
5757 Emit(kX64I32x4TruncF32x4U, g.DefineAsRegister(node),
5758 g.UseRegister(op.input()),
arraysize(temps), temps);
5760 Emit(kX64I32x4TruncF32x4U, g.DefineSameAsFirst(node),
5761 g.UseRegister(op.input()),
arraysize(temps), temps);
5765void InstructionSelectorT::VisitI32x8RelaxedTruncF32x8S(OpIndex node) {
5766#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5767 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5769 VisitFloatUnop(
this, node, op.input(),
5770 kX64Cvttps2dq | VectorLengthField::encode(kV256));
5776void InstructionSelectorT::VisitI32x8RelaxedTruncF32x8U(OpIndex node) {
5777#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5778 const Simd256UnaryOp& op = Cast<Simd256UnaryOp>(node);
5780 DCHECK(CpuFeatures::IsSupported(AVX) && CpuFeatures::IsSupported(AVX2));
5781 X64OperandGeneratorT g(
this);
5782 InstructionOperand temps[] = {g.TempSimd256Register()};
5783 Emit(kX64I32x8TruncF32x8U, g.DefineAsRegister(node),
5784 g.UseRegister(op.input()),
arraysize(temps), temps);
5790void InstructionSelectorT::VisitI64x2GtS(OpIndex node) {
5791 X64OperandGeneratorT g(
this);
5792 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5794 if (CpuFeatures::IsSupported(AVX)) {
5795 Emit(kX64IGtS | LaneSizeField::encode(kL64) |
5796 VectorLengthField::encode(kV128),
5797 g.DefineAsRegister(node), g.UseRegister(op.left()),
5798 g.UseRegister(op.right()));
5799 }
else if (CpuFeatures::IsSupported(SSE4_2)) {
5800 Emit(kX64IGtS | LaneSizeField::encode(kL64) |
5801 VectorLengthField::encode(kV128),
5802 g.DefineSameAsFirst(node), g.UseRegister(op.left()),
5803 g.UseRegister(op.right()));
5805 Emit(kX64IGtS | LaneSizeField::encode(kL64) |
5806 VectorLengthField::encode(kV128),
5807 g.DefineAsRegister(node), g.UseUniqueRegister(op.left()),
5808 g.UseUniqueRegister(op.right()));
5812void InstructionSelectorT::VisitI64x2GeS(OpIndex node) {
5813 X64OperandGeneratorT g(
this);
5814 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5816 if (CpuFeatures::IsSupported(AVX)) {
5817 Emit(kX64IGeS | LaneSizeField::encode(kL64) |
5818 VectorLengthField::encode(kV128),
5819 g.DefineAsRegister(node), g.UseRegister(op.left()),
5820 g.UseRegister(op.right()));
5821 }
else if (CpuFeatures::IsSupported(SSE4_2)) {
5822 Emit(kX64IGeS | LaneSizeField::encode(kL64) |
5823 VectorLengthField::encode(kV128),
5824 g.DefineAsRegister(node), g.UseUniqueRegister(op.left()),
5825 g.UseRegister(op.right()));
5827 Emit(kX64IGeS | LaneSizeField::encode(kL64) |
5828 VectorLengthField::encode(kV128),
5829 g.DefineAsRegister(node), g.UseUniqueRegister(op.left()),
5830 g.UseUniqueRegister(op.right()));
5834void InstructionSelectorT::VisitI64x4GeS(OpIndex node) {
5835#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5836 X64OperandGeneratorT g(
this);
5837 const Simd256BinopOp& op = Cast<Simd256BinopOp>(node);
5839 DCHECK(CpuFeatures::IsSupported(AVX2));
5841 kX64IGeS | LaneSizeField::encode(kL64) | VectorLengthField::encode(kV256),
5842 g.DefineAsRegister(node), g.UseRegister(op.left()),
5843 g.UseRegister(op.right()));
5849void InstructionSelectorT::VisitI64x2Abs(OpIndex node) {
5850 X64OperandGeneratorT g(
this);
5851 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5853 if (CpuFeatures::IsSupported(AVX)) {
5854 Emit(kX64IAbs | LaneSizeField::encode(kL64) |
5855 VectorLengthField::encode(kV128),
5856 g.DefineAsRegister(node), g.UseUniqueRegister(op.input()));
5858 Emit(kX64IAbs | LaneSizeField::encode(kL64) |
5859 VectorLengthField::encode(kV128),
5860 g.DefineSameAsFirst(node), g.UseRegister(op.input()));
5864bool InstructionSelectorT::CanOptimizeF64x2PromoteLowF32x4(OpIndex node) {
5865 const Simd128UnaryOp& op = Cast<Opmask::kSimd128F64x2PromoteLowF32x4>(node);
5867 return Is<Opmask::kSimd128LoadTransform64Zero>(input) &&
5868 CanCover(node, input);
5871void InstructionSelectorT::VisitF64x2PromoteLowF32x4(OpIndex node) {
5872 X64OperandGeneratorT g(
this);
5873 const Simd128UnaryOp& op = Cast<Simd128UnaryOp>(node);
5876 if (CanOptimizeF64x2PromoteLowF32x4(node)) {
5878 const Simd128LoadTransformOp& load_transform =
5879 Cast<Simd128LoadTransformOp>(input);
5880 if (load_transform.load_kind.with_trap_handler) {
5881 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
5885 MarkAsDefined(input);
5886 VisitLoad(node, input, code);
5893void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(OpIndex node) {
5894 X64OperandGeneratorT g(
this);
5895 const Simd128BinopOp& op = Cast<Simd128BinopOp>(node);
5897 Emit(kX64I16x8DotI8x16I7x16S, g.DefineAsRegister(node),
5898 g.UseUniqueRegister(op.left()), g.UseRegister(op.right()));
5901void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
5902 X64OperandGeneratorT g(
this);
5903 const Simd128TernaryOp& op = Cast<Simd128TernaryOp>(node);
5905 if (CpuFeatures::IsSupported(AVX_VNNI)) {
5906 Emit(kX64I32x4DotI8x16I7x16AddS, g.DefineSameAsInput(node, 2),
5907 g.UseRegister(op.input(0)), g.UseRegister(op.input(1)),
5908 g.UseRegister(op.input(2)));
5910 InstructionOperand temps[] = {g.TempSimd128Register()};
5911 Emit(kX64I32x4DotI8x16I7x16AddS, g.DefineSameAsInput(node, 2),
5912 g.UseUniqueRegister(op.input(0)), g.UseUniqueRegister(op.input(1)),
5913 g.UseUniqueRegister(op.input(2)),
arraysize(temps), temps);
5917#ifdef V8_ENABLE_WASM_SIMD256_REVEC
5918void InstructionSelectorT::VisitI16x16DotI8x32I7x32S(OpIndex node) {
5919 X64OperandGeneratorT g(
this);
5920 const Simd256BinopOp& op = Cast<Simd256BinopOp>(node);
5922 Emit(kX64I16x16DotI8x32I7x32S, g.DefineAsRegister(node),
5923 g.UseUniqueRegister(op.left()), g.UseRegister(op.right()));
5926void InstructionSelectorT::VisitI32x8DotI8x32I7x32AddS(OpIndex node) {
5927 X64OperandGeneratorT g(
this);
5928 const Simd256TernaryOp& op = Cast<Simd256TernaryOp>(node);
5930 if (CpuFeatures::IsSupported(AVX_VNNI)) {
5931 Emit(kX64I32x8DotI8x32I7x32AddS, g.DefineSameAsInput(node, 2),
5932 g.UseRegister(op.input(0)), g.UseRegister(op.input(1)),
5933 g.UseRegister(op.input(2)));
5935 InstructionOperand temps[] = {g.TempSimd256Register()};
5936 Emit(kX64I32x8DotI8x32I7x32AddS, g.DefineSameAsInput(node, 2),
5937 g.UseUniqueRegister(op.input(0)), g.UseUniqueRegister(op.input(1)),
5938 g.UseUniqueRegister(op.input(2)),
arraysize(temps), temps);
5943void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
5944 X64OperandGeneratorT g(
this);
5945 const SetStackPointerOp& op = Cast<SetStackPointerOp>(node);
5947 auto input = g.UseAny(op.value());
5948 Emit(kArchSetStackPointer, 0,
nullptr, 1, &input);
5953#ifndef V8_ENABLE_WEBASSEMBLY
5954#define VISIT_UNSUPPORTED_OP(op) \
5955 void InstructionSelectorT::Visit##op(OpIndex) { UNREACHABLE(); }
5960void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
5961 int first_input_index,
5963 continuation_outputs_.push_back(
5964 g->DefineSameAsInput(node, first_input_index));
5968MachineOperatorBuilder::Flags
5969InstructionSelector::SupportedMachineOperatorFlags() {
5970 MachineOperatorBuilder::Flags flags =
5971 MachineOperatorBuilder::kWord32ShiftIsSafe |
5972 MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
5973 MachineOperatorBuilder::kWord32Rol | MachineOperatorBuilder::kWord64Rol |
5974 MachineOperatorBuilder::kWord32Select |
5975 MachineOperatorBuilder::kWord64Select;
5976 if (CpuFeatures::IsSupported(POPCNT)) {
5977 flags |= MachineOperatorBuilder::kWord32Popcnt |
5978 MachineOperatorBuilder::kWord64Popcnt;
5980 if (CpuFeatures::IsSupported(SSE4_1)) {
5981 flags |= MachineOperatorBuilder::kFloat32RoundDown |
5982 MachineOperatorBuilder::kFloat64RoundDown |
5983 MachineOperatorBuilder::kFloat32RoundUp |
5984 MachineOperatorBuilder::kFloat64RoundUp |
5985 MachineOperatorBuilder::kFloat32RoundTruncate |
5986 MachineOperatorBuilder::kFloat64RoundTruncate |
5987 MachineOperatorBuilder::kFloat32RoundTiesEven |
5988 MachineOperatorBuilder::kFloat64RoundTiesEven;
5990 if (CpuFeatures::IsSupported(F16C)) {
5991 flags |= MachineOperatorBuilder::kFloat16;
5992 if (CpuFeatures::IsSupported(AVX)) {
5993 flags |= MachineOperatorBuilder::kFloat16RawBitsConversion;
6000MachineOperatorBuilder::AlignmentRequirements
6001InstructionSelector::AlignmentRequirements() {
6002 return MachineOperatorBuilder::AlignmentRequirements::
6003 FullUnalignedAccessSupport();
interpreter::OperandScale scale
static constexpr U encode(T value)
Bootstrapper * bootstrapper()
RootsTable & roots_table()
static constexpr MachineType Float64()
static constexpr MachineType Uint8()
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
static constexpr MachineType Simd128()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Float32()
static constexpr MachineType None()
static constexpr MachineType Int8()
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr bool IsReadOnly(RootIndex root_index)
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
int GetVirtualRegister(turboshaft::OpIndex node)
void MarkAsSimd128(turboshaft::OpIndex node)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
void MarkAsFloat64(turboshaft::OpIndex node)
Isolate * isolate() const
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
FlagsContinuationT FlagsContinuation
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void EmitIdentity(turboshaft::OpIndex node)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
bool IsSupported(CpuFeature feature) const
bool IsTailCallAddressImmediate()
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
bool IsReallyLive(turboshaft::OpIndex node) const
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
PushParameterT PushParameter
bool IsCommutative(turboshaft::OpIndex node) const
OperandGeneratorT OperandGenerator
int GetEffectLevel(turboshaft::OpIndex node) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
auto Inputs(turboshaft::OpIndex node)
void MarkAsFloat32(turboshaft::OpIndex node)
InstructionSequence * sequence() const
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * MarkAsCall()
InstructionOperand UseImmediate(int immediate)
InstructionOperand Use(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand TempImmediate(int32_t imm)
InstructionSelectorT * selector() const
InstructionOperand UseImmediate64(int64_t immediate)
InstructionOperand UseRegister(turboshaft::OpIndex node)
LoadRepresentation loaded_rep() const
int32_t GetImmediateIntegerValue(OpIndex node)
InstructionOperand GetEffectiveIndexOperand(OpIndex index, AddressingMode *mode)
bool IsZeroIntConstant(OpIndex node) const
bool CanBeMemoryOperand(InstructionCode opcode, OpIndex node, OpIndex input, int effect_level)
bool CanBeImmediate(OpIndex node)
AddressingMode GetEffectiveAddressMemoryOperand(OpIndex operand, InstructionOperand inputs[], size_t *input_count, RegisterUseKind reg_kind=RegisterUseKind::kUseRegister)
AddressingMode GenerateMemoryOperandInputs(OptionalOpIndex index, int scale_exponent, OpIndex base, int64_t displacement, DisplacementMode displacement_mode, InstructionOperand inputs[], size_t *input_count, RegisterUseKind reg_kind=RegisterUseKind::kUseRegister)
X64OperandGeneratorT(InstructionSelectorT *selector)
bool CanBeBetterLeftOperand(OpIndex node) const
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
bool Is(V< AnyOrNone > op_idx) const
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchWordBinop(V< Any > matched, VMatch< T > left, VMatch< T > right, OMatch< WordBinopOp::Kind > kind={}, OMatch< WordRepresentation > rep={}) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchExternalConstant(V< Any > matched, ExternalReference *reference) const
bool MatchTruncateWord64ToWord32(V< Any > matched, VMatch< Word64 > input) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static constexpr WordRepresentation Word32()
static constexpr WordRepresentation Word64()
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
#define RR_VISITOR(Name, opcode)
#define RO_VISITOR(Name, opcode)
DisplacementMode displacement_mode
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define VISIT_SIMD_QFMOP(Name, instruction)
#define VISIT_ATOMIC_BINOP(op)
#define VISIT_UNSUPPORTED_OP(op)
ZoneVector< RpoNumber > & result
#define SIMD_UNOP_LIST(V)
bool any_of(const C &container, const P &predicate)
signed_type NegateWithWraparound(signed_type a)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word64()> kWord64ShiftRightArithmetic
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word64()> kWord64ShiftRightLogical
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
WordWithBits< 128 > Simd128
WordWithBits< 256 > Simd256
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
int32_t GetImmediateIntegerValue(InstructionSelectorT *selector, OpIndex node)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
BinopMatcher< Int32Matcher, Int32Matcher, MachineRepresentation::kWord32 > Int32BinopMatcher
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
std::optional< uint64_t > TryGetRightWordConstant(InstructionSelectorT *selector, OpIndex node)
@ kIndirectPointerWriteBarrier
BinopMatcher< Int64Matcher, Int64Matcher, MachineRepresentation::kWord64 > Int64BinopMatcher
std::optional< ScaledIndexMatch > TryMatchScaledIndex(InstructionSelectorT *selector, OpIndex node, bool allow_power_of_two_plus_one)
std::optional< ScaledIndexMatch > TryMatchScaledIndex32(InstructionSelectorT *selector, OpIndex node, bool allow_power_of_two_plus_one)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
std::optional< int32_t > GetWord32Constant(InstructionSelectorT *selector, OpIndex node, bool allow_implicit_int64_truncation=TurboshaftAdapter::AllowsImplicitWord64ToWord32Truncation)
static void VisitBinop(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
std::optional< BaseWithScaledIndexAndDisplacementMatch > TryMatchBaseWithScaledIndexAndDisplacement64(InstructionSelectorT *selector, OpIndex node)
void VisitStoreCommon(InstructionSelectorT *selector, OpIndex node, StoreRepresentation store_rep, std::optional< AtomicMemoryOrder > atomic_order)
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
bool MatchScaledIndex(InstructionSelectorT *selector, OpIndex node, OpIndex *index, int *scale, bool *power_of_two_plus_one)
bool ValueFitsIntoImmediate(int64_t value)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
@ kProtectedByTrapHandler
bool CanBeImmediate(InstructionSelectorT *selector, OpIndex node)
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
std::optional< ScaledIndexMatch > TryMatchScaledIndex64(InstructionSelectorT *selector, OpIndex node, bool allow_power_of_two_plus_one)
void VisitRR(InstructionSelectorT *selector, InstructionCode opcode, OpIndex node)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
std::optional< BaseWithScaledIndexAndDisplacementMatch > TryMatchBaseWithScaledIndexAndDisplacement32(InstructionSelectorT *selector, OpIndex node)
std::optional< BaseWithScaledIndexAndDisplacementMatch > TryMatchBaseWithScaledIndexAndDisplacement64ForWordBinop(InstructionSelectorT *selector, OpIndex left, OpIndex right, bool is_commutative)
constexpr int kSimd128Size
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
Tagged(T object) -> Tagged< T >
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
constexpr int kSystemPointerSizeLog2
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool IsAnyCompressed(MachineRepresentation rep)
constexpr int kSimd256Size
constexpr int kSystemPointerSize
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
V8_EXPORT_PRIVATE constexpr int ElementSizeLog2Of(MachineRepresentation)
constexpr bool CanBeCompressedPointer(MachineRepresentation rep)
constexpr bool IsIntegral(MachineRepresentation rep)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define MACHINE_SIMD256_OP_LIST(V)
#define MACHINE_SIMD128_OP_LIST(V)
#define I(name, number_of_args, result_size)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
DisplacementMode displacement_mode
static constexpr bool AllowsImplicitWord64ToWord32Truncation
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
bool is_load(turboshaft::OpIndex node) const
bool IsLoadOrLoadImmutable(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
uint64_t integral() const
static constexpr int32_t kMinOffset
static constexpr int32_t kMaxOffset
V8_INLINE OpIndex input(size_t i) const
const uint16_t input_count
const underlying_operation_t< Op > * TryCast() const
underlying_operation_t< Op > & Cast()
#define V8_STATIC_ROOTS_BOOL
#define V8_UNLIKELY(condition)