37#ifndef V8_CODEGEN_X64_ASSEMBLER_X64_H_
38#define V8_CODEGEN_X64_ASSEMBLER_X64_H_
56#if defined(V8_OS_WIN_X64)
63class SafepointTableBuilder;
64class MaglevSafepointTableBuilder;
132 :
value_(static_cast<int32_t>(static_cast<intptr_t>(value.ptr()))) {
147 "Immediate must be small enough to pass it by value");
187 bool is_label_operand =
true;
195 bool is_label_operand =
false;
199 uint8_t buf[6] = {0};
208 static_assert(std::is_same<
decltype(LabelOperand::is_label_operand),
209 decltype(MemoryOperand::is_label_operand)>
::value);
210 static_assert(offsetof(
LabelOperand, is_label_operand) ==
212 static_assert(std::is_same<
decltype(LabelOperand::rex),
213 decltype(MemoryOperand::rex)>
::value);
217 "Length must have native word size to avoid spurious reloads "
218 "after writing it.");
220 "Length must be aligned for fast access.");
229 if (disp == 0 &&
base != rbp &&
base != r13) {
231 }
else if (is_int8(disp)) {
245 if (disp == 0 &&
base != rbp &&
base != r13) {
249 }
else if (is_int8(disp)) {
270 set_sib(
scale, index, rbp);
282 DCHECK(addend == 0 || (is_int8(addend) &&
label->is_bound()));
294 return memory_.is_label_operand;
301 V8_ASSUME(!memory_.is_label_operand || memory_.rex == 0);
306 DCHECK(!is_label_operand());
311 DCHECK(is_label_operand());
321 DCHECK(!is_label_operand());
323 memory_.buf[0] = mod << 6 | rm_reg.
low_bits();
335 memory_.buf[1] = (
scale << 6) | (index.low_bits() << 3) |
base.low_bits();
336 memory_.rex |= index.high_bit() << 1 |
base.high_bit();
341 V8_ASSUME(memory_.len == 1 || memory_.len == 2);
343 memory_.buf[memory_.len] = disp;
344 memory_.len +=
sizeof(int8_t);
348 V8_ASSUME(memory_.len == 1 || memory_.len == 2);
349 Address p =
reinterpret_cast<Address
>(&memory_.buf[memory_.len]);
350 WriteUnalignedValue(p, disp);
351 memory_.len +=
sizeof(int32_t);
383 "Operand must be small enough to pass it by value");
389#define ASSEMBLER_INSTRUCTION_LIST(V) \
415#define SHIFT_INSTRUCTION_LIST(V) \
495 static constexpr int kGap = 32;
496 static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
507 std::unique_ptr<AssemblerBuffer> = {});
510 std::unique_ptr<AssemblerBuffer> buffer = {})
516 static constexpr int kNoHandlerTable = 0;
521 int handler_table_offset);
527 GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
543 static inline Address target_address_at(Address
pc, Address constant_pool);
545 Address
pc, Address constant_pool, Address target,
548 static inline int32_t relative_target_offset(Address target, Address
pc);
557 Address instruction_payload);
571 Address
pc, Address constant_pool, uint32_t new_constant,
576 static constexpr int kSpecialTargetSize = 4;
579 static constexpr uint8_t kTestEaxByte = 0xA9;
581 static constexpr uint8_t kTestAlByte = 0xA8;
583 static constexpr uint8_t
kNopByte = 0x90;
586 static constexpr uint8_t kJccShortPrefix = 0x70;
587 static constexpr uint8_t kJncShortOpcode = kJccShortPrefix |
not_carry;
588 static constexpr uint8_t kJcShortOpcode = kJccShortPrefix |
carry;
589 static constexpr uint8_t kJnzShortOpcode = kJccShortPrefix |
not_zero;
590 static constexpr uint8_t kJzShortOpcode = kJccShortPrefix | zero;
593 enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
594 enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
595 enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
615#define DECLARE_INSTRUCTION(instruction) \
616 template <typename... Ps> \
617 void instruction##_tagged(Ps... ps) { \
618 emit_##instruction(ps..., kTaggedSize); \
621 template <typename... Ps> \
622 void instruction##l(Ps... ps) { \
623 emit_##instruction(ps..., kInt32Size); \
626 template <typename... Ps> \
627 void instruction##q(Ps... ps) { \
628 emit_##instruction(ps..., kInt64Size); \
631#undef DECLARE_INSTRUCTION
672 static constexpr int kPushq32InstrSize = 5;
713 movq(dst,
Immediate64(
static_cast<int64_t
>(value)));
752 immediate_arithmetic_op_8(0x7, dst, src);
757 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
759 const int kMaxMacroFusionLength = 10;
760 AlignForJCCErratum(kMaxMacroFusionLength);
770 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
772 const int kMaxMacroFusionLength = 9;
773 AlignForJCCErratum(kMaxMacroFusionLength);
781 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
783 const int kMaxMacroFusionLength = 14;
784 AlignForJCCErratum(kMaxMacroFusionLength);
792 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
794 const int kMaxMacroFusionLength = 14;
795 AlignForJCCErratum(kMaxMacroFusionLength);
800 immediate_arithmetic_op_8(0x7, dst, src);
805 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
812 immediate_arithmetic_op_16(0x7, dst, src);
817 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
824 immediate_arithmetic_op_16(0x7, dst, src);
829 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
831 const int kMaxMacroFusionLength = 12;
832 AlignForJCCErratum(kMaxMacroFusionLength);
840 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
842 const int kMaxMacroFusionLength = 15;
843 AlignForJCCErratum(kMaxMacroFusionLength);
851 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
853 const int kMaxMacroFusionLength = 10;
854 AlignForJCCErratum(kMaxMacroFusionLength);
862 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
864 const int kMaxMacroFusionLength = 15;
865 AlignForJCCErratum(kMaxMacroFusionLength);
880 immediate_arithmetic_op_8(0x4, dst, src);
921#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
922 void instruction##l(Register dst, Immediate imm8) { \
923 shift(dst, imm8, subcode, kInt32Size); \
926 void instruction##q(Register dst, Immediate imm8) { \
927 shift(dst, imm8, subcode, kInt64Size); \
930 void instruction##l(Operand dst, Immediate imm8) { \
931 shift(dst, imm8, subcode, kInt32Size); \
934 void instruction##q(Operand dst, Immediate imm8) { \
935 shift(dst, imm8, subcode, kInt64Size); \
938 void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
940 void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
942 void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
944 void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
946#undef DECLARE_SHIFT_INSTRUCTION
958 immediate_arithmetic_op_8(0x5, dst, src);
966 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
968 const int kMaxMacroFusionLength = 9;
969 AlignForJCCErratum(kMaxMacroFusionLength);
976 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
978 const int kMaxMacroFusionLength = 10;
979 AlignForJCCErratum(kMaxMacroFusionLength);
986 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
995 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
997 const int kMaxMacroFusionLength = 14;
998 AlignForJCCErratum(kMaxMacroFusionLength);
1005 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1007 const int kMaxMacroFusionLength = 10;
1008 AlignForJCCErratum(kMaxMacroFusionLength);
1015 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1017 const int kMaxMacroFusionLength = 12;
1018 AlignForJCCErratum(kMaxMacroFusionLength);
1025 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1034 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1036 const int kMaxMacroFusionLength = 15;
1037 AlignForJCCErratum(kMaxMacroFusionLength);
1075 vinstr(0x0, dst, ymm0, src, k66, k0F3A, kW1, AVX2);
1079 vinstr(0x0, dst, ymm0, src, k66, k0F3A, kW1, AVX2);
1107 static constexpr int kIntraSegmentJmpInstrSize = 5;
1126 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1127 const int kInstLength = distance == Label::kFar ? 6 : 2;
1128 AlignForJCCErratum(kInstLength);
1148 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
1149 const int kInstLength = distance == Label::kFar ? 6 : 2;
1150 AlignForJCCErratum(kInstLength);
1261 template <
typename Reg1,
typename Reg2,
typename Op>
1269#define DECLARE_SSE_INSTRUCTION(instruction, escape, opcode) \
1270 void instruction(XMMRegister dst, XMMRegister src) { \
1271 sse_instr(dst, src, 0x##escape, 0x##opcode); \
1273 void instruction(XMMRegister dst, Operand src) { \
1274 sse_instr(dst, src, 0x##escape, 0x##opcode); \
1279#undef DECLARE_SSE_INSTRUCTION
1283 uint8_t escape, uint8_t opcode);
1286#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
1287 void instruction(XMMRegister dst, XMMRegister src) { \
1288 sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
1290 void instruction(XMMRegister dst, Operand src) { \
1291 sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
1299#undef DECLARE_SSE2_INSTRUCTION
1304 sse2_instr(ext_reg,
reg, prefix, escape, opcode);
1308#define DECLARE_SSE2_SHIFT_IMM(instruction, prefix, escape, opcode, extension) \
1309 void instruction(XMMRegister reg, uint8_t imm8) { \
1310 sse2_instr(reg, imm8, 0x##prefix, 0x##escape, 0x##opcode, 0x##extension); \
1313#undef DECLARE_SSE2_SHIFT_IMM
1315#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1316 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1317 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
1319 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
1320 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
1323#define DECLARE_SSE2_PD_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1324 DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1325 void v##instruction(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
1326 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX); \
1328 void v##instruction(YMMRegister dst, YMMRegister src1, Operand src2) { \
1329 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX); \
1333#undef DECLARE_SSE2_PD_AVX_INSTRUCTION
1335#define DECLARE_SSE2_PI_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1336 DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1337 void v##instruction(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
1338 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX2); \
1340 void v##instruction(YMMRegister dst, YMMRegister src1, Operand src2) { \
1341 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX2); \
1345#undef DECLARE_SSE2_PI_AVX_INSTRUCTION
1347#define DECLARE_SSE2_SHIFT_AVX_INSTRUCTION(instruction, prefix, escape, \
1349 DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1350 void v##instruction(YMMRegister dst, YMMRegister src1, XMMRegister src2) { \
1351 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX2); \
1353 void v##instruction(YMMRegister dst, YMMRegister src1, Operand src2) { \
1354 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0, AVX2); \
1358#undef DECLARE_SSE2_SHIFT_AVX_INSTRUCTION
1359#undef DECLARE_SSE2_AVX_INSTRUCTION
1361#define DECLARE_SSE2_UNOP_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1362 void v##instruction(XMMRegister dst, XMMRegister src) { \
1363 vpd(0x##opcode, dst, xmm0, src); \
1365 void v##instruction(XMMRegister dst, Operand src) { \
1366 vpd(0x##opcode, dst, xmm0, src); \
1370#undef DECLARE_SSE2_UNOP_AVX_INSTRUCTION
1372#define DECLARE_SSE2_UNOP_AVX_YMM_INSTRUCTION( \
1373 instruction, opcode, DSTRegister, SRCRegister, MemOperand) \
1374 void v##instruction(DSTRegister dst, SRCRegister src) { \
1375 vpd(0x##opcode, dst, ymm0, src); \
1377 void v##instruction(DSTRegister dst, MemOperand src) { \
1378 vpd(0x##opcode, dst, ymm0, src); \
1388#undef DECLARE_SSE2_UNOP_AVX_YMM_INSTRUCTION
1398 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1400 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1402#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
1404 void instruction(XMMRegister dst, XMMRegister src) { \
1405 ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1407 void instruction(XMMRegister dst, Operand src) { \
1408 ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1413#undef DECLARE_SSSE3_INSTRUCTION
1417 uint8_t escape1, uint8_t escape2, uint8_t opcode,
1420 uint8_t escape2, uint8_t opcode, int8_t imm8);
1422 uint8_t escape1, uint8_t escape2, uint8_t opcode,
1425 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1427 uint8_t escape2, uint8_t opcode);
1428#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
1430 void instruction(XMMRegister dst, XMMRegister src) { \
1431 sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1433 void instruction(XMMRegister dst, Operand src) { \
1434 sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1442#undef DECLARE_SSE4_INSTRUCTION
1444#define DECLARE_SSE4_EXTRACT_INSTRUCTION(instruction, prefix, escape1, \
1446 void instruction(Register dst, XMMRegister src, uint8_t imm8) { \
1447 sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
1450 void instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
1451 sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
1456#undef DECLARE_SSE4_EXTRACT_INSTRUCTION
1460 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1462 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1463#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, \
1465 void instruction(XMMRegister dst, XMMRegister src) { \
1466 sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1468 void instruction(XMMRegister dst, Operand src) { \
1469 sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1473#undef DECLARE_SSE4_2_INSTRUCTION
1475#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
1477 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1478 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1480 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
1481 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1483 void v##instruction(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
1484 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0, \
1487 void v##instruction(YMMRegister dst, YMMRegister src1, Operand src2) { \
1488 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0, \
1495#undef DECLARE_SSE34_AVX_INSTRUCTION
1497#define DECLARE_SSSE3_UNOP_AVX_INSTRUCTION(instruction, prefix, escape1, \
1499 void v##instruction(XMMRegister dst, XMMRegister src) { \
1500 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1502 void v##instruction(XMMRegister dst, Operand src) { \
1503 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1505 void v##instruction(YMMRegister dst, YMMRegister src) { \
1506 vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
1508 void v##instruction(YMMRegister dst, Operand src) { \
1509 vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
1513#undef DECLARE_SSSE3_UNOP_AVX_INSTRUCTION
1517 vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0);
1519 emit(
mask.code() << 4);
1523 vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0, AVX2);
1525 emit(
mask.code() << 4);
1530 vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0);
1532 emit(
mask.code() << 4);
1536 vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0, AVX);
1538 emit(
mask.code() << 4);
1543 vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0);
1545 emit(
mask.code() << 4);
1549 vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0, AVX);
1551 emit(
mask.code() << 4);
1554#define DECLARE_SSE4_PMOV_AVX_INSTRUCTION(instruction, prefix, escape1, \
1556 void v##instruction(XMMRegister dst, XMMRegister src) { \
1557 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1559 void v##instruction(XMMRegister dst, Operand src) { \
1560 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1563#undef DECLARE_SSE4_PMOV_AVX_INSTRUCTION
1565#define DECLARE_SSE4_PMOV_AVX2_INSTRUCTION(instruction, prefix, escape1, \
1567 void v##instruction(YMMRegister dst, XMMRegister src) { \
1568 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1570 void v##instruction(YMMRegister dst, Operand src) { \
1571 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1574#undef DECLARE_SSE4_PMOV_AVX2_INSTRUCTION
1577 vinstr(0x17, dst, ymm0, src, k66, k0F38, kW0, AVX);
1580 vinstr(0x17, dst, ymm0, src, k66, k0F38, kW0, AVX);
1583#define DECLARE_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode) \
1584 void v##instruction(Register dst, XMMRegister src, uint8_t imm8) { \
1585 XMMRegister idst = XMMRegister::from_code(dst.code()); \
1586 vinstr(0x##opcode, src, xmm0, idst, k##prefix, k##escape1##escape2, kW0); \
1589 void v##instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
1590 vinstr(0x##opcode, src, xmm0, dst, k##prefix, k##escape1##escape2, kW0); \
1595#undef DECLARE_AVX_INSTRUCTION
1686#define SSE_CMP_P(instr, imm8) \
1687 void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
1688 void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
1689 void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
1690 void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
1714 sse_instr(dst, src, 0x0F, 0x12);
1717 sse_instr(dst, src, 0x0F, 0x16);
1740 template <
typename Reg1,
typename Reg2,
typename Op>
1744#define FMA(instr, prefix, escape1, escape2, extension, opcode) \
1745 void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1746 fma_instr(0x##opcode, dst, src1, src2, kL128, k##prefix, \
1747 k##escape1##escape2, k##extension); \
1749 void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1750 fma_instr(0x##opcode, dst, src1, src2, kL128, k##prefix, \
1751 k##escape1##escape2, k##extension); \
1756#define DECLARE_FMA_YMM_INSTRUCTION(instr, prefix, escape1, escape2, \
1757 extension, opcode) \
1758 void instr(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
1759 fma_instr(0x##opcode, dst, src1, src2, kL256, k##prefix, \
1760 k##escape1##escape2, k##extension); \
1762 void instr(YMMRegister dst, YMMRegister src1, Operand src2) { \
1763 fma_instr(0x##opcode, dst, src1, src2, kL256, k##prefix, \
1764 k##escape1##escape2, k##extension); \
1768#undef DECLARE_FMA_YMM_INSTRUCTION
1778 vsd(0x10, dst, src1, src2);
1799#define AVX_SSE_UNOP(instr, escape, opcode) \
1800 void v##instr(XMMRegister dst, XMMRegister src2) { \
1801 vps(0x##opcode, dst, xmm0, src2); \
1803 void v##instr(XMMRegister dst, Operand src2) { \
1804 vps(0x##opcode, dst, xmm0, src2); \
1806 void v##instr(YMMRegister dst, YMMRegister src2) { \
1807 vps(0x##opcode, dst, ymm0, src2); \
1809 void v##instr(YMMRegister dst, Operand src2) { \
1810 vps(0x##opcode, dst, ymm0, src2); \
1815#define AVX_SSE_BINOP(instr, escape, opcode) \
1816 void v##instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1817 vps(0x##opcode, dst, src1, src2); \
1819 void v##instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1820 vps(0x##opcode, dst, src1, src2); \
1822 void v##instr(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
1823 vps(0x##opcode, dst, src1, src2); \
1825 void v##instr(YMMRegister dst, YMMRegister src1, Operand src2) { \
1826 vps(0x##opcode, dst, src1, src2); \
1831#define AVX_3(instr, opcode, impl, SIMDRegister) \
1832 void instr(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
1833 impl(opcode, dst, src1, src2); \
1835 void instr(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
1836 impl(opcode, dst, src1, src2); \
1839 AVX_3(vhaddps, 0x7c, vsd, XMMRegister)
1840 AVX_3(vhaddps, 0x7c, vsd, YMMRegister)
1842#define AVX_SCALAR(instr, prefix, escape, opcode) \
1843 void v##instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1844 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kWIG); \
1846 void v##instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1847 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kWIG); \
1855#define AVX_SSE2_SHIFT_IMM(instr, prefix, escape, opcode, extension) \
1856 void v##instr(XMMRegister dst, XMMRegister src, uint8_t imm8) { \
1857 XMMRegister ext_reg = XMMRegister::from_code(extension); \
1858 vinstr(0x##opcode, ext_reg, dst, src, k##prefix, k##escape, kWIG); \
1862 void v##instr(YMMRegister dst, YMMRegister src, uint8_t imm8) { \
1863 YMMRegister ext_reg = YMMRegister::from_code(extension); \
1864 vinstr(0x##opcode, ext_reg, dst, src, k##prefix, k##escape, kWIG); \
1868#undef AVX_SSE2_SHIFT_IMM
1871 vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
1874 vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
1877 vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
1880 vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG, AVX);
1883 vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG, AVX);
1886 vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
1889 vinstr(0x5b, dst, ymm0, src, kF3, k0F, kWIG, AVX);
1892 vinstr(0x5b, dst, ymm0, src, kF3, k0F, kWIG, AVX);
1896 vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
1899 vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
1903 vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
1906 vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
1910 vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
1913 vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
1917 vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
1920 vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
1924 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1928 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1932 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1936 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1940 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1944 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1948 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1952 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1956 vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
1960 vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1961 emit(
static_cast<uint8_t
>(mode) | 0x8);
1965 vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1966 emit(
static_cast<uint8_t
>(mode) | 0x8);
1970 vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1971 emit(
static_cast<uint8_t
>(mode) | 0x8);
1975 vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1976 emit(
static_cast<uint8_t
>(mode) | 0x8);
1979 vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
1980 emit(
static_cast<uint8_t
>(mode) | 0x8);
1983 vinstr(0x08, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
1984 emit(
static_cast<uint8_t
>(mode) | 0x8);
1987 vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
1988 emit(
static_cast<uint8_t
>(mode) | 0x8);
1991 vinstr(0x09, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
1992 emit(
static_cast<uint8_t
>(mode) | 0x8);
1995 template <
typename Reg,
typename Op>
1996 void vsd(uint8_t op, Reg dst, Reg src1, Op src2) {
1997 vinstr(op, dst, src1, src2, kF2, k0F, kWIG, AVX);
2001 vss(0x10, dst, src1, src2);
2012 vps(0xC6, dst, src1, src2, imm8);
2016 vps(0xC6, dst, src1, src2, imm8);
2037 vps(0x50, idst, xmm0, src);
2041 vpd(0x50, idst, xmm0, src);
2045 vss(0xC2, dst, dst, src);
2049 vsd(0xC2, dst, dst, src);
2053 vps(0xC2, dst, src1, src2);
2057 vps(0xC2, dst, src1, src2);
2061 vps(0xC2, dst, src1, src2);
2065 vps(0xC2, dst, src1, src2);
2069 vpd(0xC2, dst, src1, src2);
2073 vpd(0xC2, dst, src1, src2);
2077 vpd(0xC2, dst, src1, src2);
2081 vpd(0xC2, dst, src1, src2);
2084#define AVX_CMP_P(instr, imm8, SIMDRegister) \
2085 void instr##ps(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
2086 vcmpps(dst, src1, src2, imm8); \
2088 void instr##ps(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
2089 vcmpps(dst, src1, src2, imm8); \
2091 void instr##pd(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
2092 vcmppd(dst, src1, src2, imm8); \
2094 void instr##pd(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
2095 vcmppd(dst, src1, src2, imm8); \
2118 vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
2122 vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
2127 vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
2132 vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW1);
2137 vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
2141 vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
2146 vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
2150 vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
2155 vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
2159 vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
2164 vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW1);
2168 vinstr(0x22, dst, src1, src2, k66, k0F3A, kW1);
2173 vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
2177 vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
2181 vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
2185 vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
2189 vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
2193 vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
2197 vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
2201 vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
2205 vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
2209 vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
2213 vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
2217 vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
2223 vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
2228 vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
2232 vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
2236 vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
2242 vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
2247 vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
2251 vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
2255 vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
2275#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
2276 template <typename Reg, typename Op> \
2277 void instr(Reg dst, Op src) { \
2278 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
2282#undef AVX2_INSTRUCTION
2292 vinstr(0x50, dst, src1, src2, k66, k0F38, kW0, AVX_VNNI);
2295 vinstr(0x50, dst, src1, src2, k66, k0F38, kW0, AVX_VNNI);
2300 vinstr(0x50, dst, src1, src2, kF2, k0F38, kW0, AVX_VNNI_INT8);
2303 vinstr(0x50, dst, src1, src2, kF2, k0F38, kW0, AVX_VNNI_INT8);
2308 bmi1q(0xf2, dst, src1, src2);
2311 bmi1q(0xf2, dst, src1, src2);
2314 bmi1l(0xf2, dst, src1, src2);
2317 bmi1l(0xf2, dst, src1, src2);
2320 bmi1q(0xf7, dst, src2, src1);
2323 bmi1q(0xf7, dst, src2, src1);
2326 bmi1l(0xf7, dst, src2, src1);
2329 bmi1l(0xf7, dst, src2, src1);
2359 bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
2362 bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
2365 bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
2368 bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
2371 bmi2q(kF2, 0xf6, dst1, dst2, src);
2374 bmi2q(kF2, 0xf6, dst1, dst2, src);
2377 bmi2l(kF2, 0xf6, dst1, dst2, src);
2380 bmi2l(kF2, 0xf6, dst1, dst2, src);
2383 bmi2q(kF2, 0xf5, dst, src1, src2);
2386 bmi2q(kF2, 0xf5, dst, src1, src2);
2389 bmi2l(kF2, 0xf5, dst, src1, src2);
2392 bmi2l(kF2, 0xf5, dst, src1, src2);
2395 bmi2q(kF3, 0xf5, dst, src1, src2);
2398 bmi2q(kF3, 0xf5, dst, src1, src2);
2401 bmi2l(kF3, 0xf5, dst, src1, src2);
2404 bmi2l(kF3, 0xf5, dst, src1, src2);
2407 bmi2q(kF3, 0xf7, dst, src2, src1);
2410 bmi2q(kF3, 0xf7, dst, src2, src1);
2413 bmi2l(kF3, 0xf7, dst, src2, src1);
2416 bmi2l(kF3, 0xf7, dst, src2, src1);
2419 bmi2q(k66, 0xf7, dst, src2, src1);
2422 bmi2q(k66, 0xf7, dst, src2, src1);
2425 bmi2l(k66, 0xf7, dst, src2, src1);
2428 bmi2l(k66, 0xf7, dst, src2, src1);
2431 bmi2q(kF2, 0xf7, dst, src2, src1);
2434 bmi2q(kF2, 0xf7, dst, src2, src1);
2437 bmi2l(kF2, 0xf7, dst, src2, src1);
2440 bmi2l(kF2, 0xf7, dst, src2, src1);
2466 void dp(uintptr_t data) { dq(data); }
2486 return static_cast<int>(reloc_info_writer.pos() -
pc_);
2493 static constexpr int kMaximalBufferSize = 512 *
MB;
2498#if defined(V8_OS_WIN_X64)
2499 win64_unwindinfo::BuiltinUnwindInfo GetUnwindInfo()
const;
2510 return reinterpret_cast<Address
>(buffer_start_ +
pos);
2513 return ReadUnalignedValue<uint32_t>(addr_at(
pos));
2516 WriteUnalignedValue(addr_at(
pos),
x);
2522 template <
typename T>
2523 static uint8_t*
emit(uint8_t* __restrict
pc, T t) {
2524 WriteUnalignedValue(
reinterpret_cast<Address
>(
pc), t);
2525 return pc +
sizeof(
T);
2534 if (!RelocInfo::IsNoInfo(
x.rmode_)) RecordRelocInfo(
x.rmode_);
2539 if (!RelocInfo::IsNoInfo(
x.rmode_)) RecordRelocInfo(
x.rmode_);
2540 emitq(
static_cast<uint64_t
>(
x.value_));
2564 inline void emit_rex_64(
Register rm_reg);
2571 inline void emit_rex_64(
Operand op);
2587 inline void emit_rex_32(
Register rm_reg);
2591 inline void emit_rex_32(
Operand op);
2621 inline void emit_optional_rex_32(
Register rm_reg);
2622 inline void emit_optional_rex_32(
XMMRegister rm_reg);
2626 inline void emit_optional_rex_32(
Operand op);
2649 emit_optional_rex_32(
p1);
2653 template <
class P1,
class P2>
2656 emit_rex_64(
p1,
p2);
2659 emit_optional_rex_32(
p1,
p2);
2670 inline void emit_vex3_byte2(VexW w,
XMMRegister v, VectorLength l,
2673 VectorLength l, SIMDPrefix pp, LeadingOpcode
m,
2676 VectorLength l, SIMDPrefix pp, LeadingOpcode
m,
2679 VectorLength l, SIMDPrefix pp, LeadingOpcode
m,
2682 VectorLength l, SIMDPrefix pp, LeadingOpcode
m,
2690 emit_operand(
reg.low_bits(), adr);
2704 emit(0xC0 |
reg.low_bits() << 3 | rm_reg.
low_bits());
2711 emit(0xC0 | code << 3 | rm_reg.
low_bits());
2766 arithmetic_op(0x03, dst, src, size);
2770 immediate_arithmetic_op(0x0, dst, src, size);
2774 arithmetic_op(0x03, dst, src, size);
2778 arithmetic_op(0x1, src, dst, size);
2782 immediate_arithmetic_op(0x0, dst, src, size);
2786 arithmetic_op(0x23, dst, src, size);
2790 arithmetic_op(0x23, dst, src, size);
2794 arithmetic_op(0x21, src, dst, size);
2798 immediate_arithmetic_op(0x4, dst, src, size);
2802 immediate_arithmetic_op(0x4, dst, src, size);
2806 arithmetic_op(0x3B, dst, src, size);
2811 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2813 const int kMaxMacroFusionLength = 9;
2814 AlignForJCCErratum(kMaxMacroFusionLength);
2815 emit_cmp(dst, src, size);
2819 arithmetic_op(0x3B, dst, src, size);
2824 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2826 const int kMaxMacroFusionLength = 14;
2827 AlignForJCCErratum(kMaxMacroFusionLength);
2828 emit_cmp(dst, src, size);
2832 arithmetic_op(0x39, src, dst, size);
2837 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2839 const int kMaxMacroFusionLength = 14;
2840 AlignForJCCErratum(kMaxMacroFusionLength);
2841 emit_cmp(dst, src, size);
2845 immediate_arithmetic_op(0x7, dst, src, size);
2850 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2853 const int kMaxMacroFusionLength = 9 +
size;
2854 AlignForJCCErratum(kMaxMacroFusionLength);
2855 emit_cmp(dst, src, size);
2859 immediate_arithmetic_op(0x7, dst, src, size);
2864 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2867 emit_cmp(dst, src, size);
2917 arithmetic_op(0x0B, dst, src, size);
2921 arithmetic_op(0x0B, dst, src, size);
2925 arithmetic_op(0x9, src, dst, size);
2929 immediate_arithmetic_op(0x1, dst, src, size);
2933 immediate_arithmetic_op(0x1, dst, src, size);
2939 arithmetic_op(0x1b, dst, src, size);
2943 arithmetic_op(0x2B, dst, src, size);
2947 immediate_arithmetic_op(0x5, dst, src, size);
2951 arithmetic_op(0x2B, dst, src, size);
2955 arithmetic_op(0x29, src, dst, size);
2959 immediate_arithmetic_op(0x5, dst, src, size);
2965 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2967 const int kMaxMacroFusionLength = 9;
2968 AlignForJCCErratum(kMaxMacroFusionLength);
2969 emit_test(dst, src, size);
2975 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2978 const int kMaxMacroFusionLength = 9 +
size;
2979 AlignForJCCErratum(kMaxMacroFusionLength);
2986 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2988 const int kMaxMacroFusionLength = 14;
2989 AlignForJCCErratum(kMaxMacroFusionLength);
2990 emit_test(op,
reg, size);
2996 DCHECK(CpuFeatures::IsSupported(INTEL_JCC_ERRATUM_MITIGATION));
2999 emit_test(op,
mask, size);
3003 return emit_test(op,
reg, size);
3008 return emit_aligned_test(op,
reg, size);
3020 arithmetic_op(0x33, dst, src, size);
3025 arithmetic_op(0x33, dst, src, size);
3029 immediate_arithmetic_op(0x6, dst, src, size);
3033 immediate_arithmetic_op(0x6, dst, src, size);
3037 arithmetic_op(0x31, src, dst, size);
3065 int safepoint_table_offset,
int handler_table_offset);
3076 std::deque<int> internal_reference_positions_;
3084#if defined(V8_OS_WIN_X64)
3085 std::unique_ptr<win64_unwindinfo::XdataEncoder> xdata_encoder_;
3093 VectorLength l, SIMDPrefix pp,
3094 LeadingOpcode
m, VexW w);
3100 VectorLength l, SIMDPrefix pp,
3101 LeadingOpcode
m, VexW w);
3106 VectorLength l, SIMDPrefix pp,
3107 LeadingOpcode
m, VexW w);
3112 VectorLength l, SIMDPrefix pp,
3113 LeadingOpcode
m, VexW w);
3119 LeadingOpcode
m, VexW w,
3125 LeadingOpcode
m, VexW w,
3130 SIMDPrefix pp, LeadingOpcode
m,
3136 LeadingOpcode
m, VexW w,
3141 SIMDPrefix pp, LeadingOpcode
m,
3147 LeadingOpcode
m, VexW w,
3159 space_before_ =
assembler_->available_space();
3165 int bytes_generated = space_before_ -
assembler_->available_space();
3166 DCHECK(bytes_generated < assembler_->kGap);
#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode)
#define SSE_CMP_P(instr, imm8)
#define AVX_CMP_P(instr, imm8)
#define FMA(instr, length, prefix, escape1, escape2, extension, opcode)
#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_SSE4_PMOV_AVX2_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_FMA_YMM_INSTRUCTION(instr, prefix, escape1, escape2, extension, opcode)
#define DECLARE_SSE4_PMOV_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define AVX_3(instr, opcode, impl, SIMDRegister)
#define DECLARE_SSE2_SHIFT_IMM(instruction, prefix, escape, opcode, extension)
#define AVX_SSE_UNOP(instr, escape, opcode)
#define DECLARE_SSE2_PD_AVX_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSE2_PI_AVX_INSTRUCTION(instruction, prefix, escape, opcode)
#define AVX_SCALAR(instr, prefix, escape, opcode)
#define DECLARE_SSE2_UNOP_AVX_YMM_INSTRUCTION( instruction, opcode, DSTRegister, SRCRegister, MemOperand)
#define DECLARE_SSE4_EXTRACT_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define AVX_SSE_BINOP(instr, escape, opcode)
#define AVX_SSE2_SHIFT_IMM(instr, prefix, escape, opcode, extension)
#define ASSEMBLER_INSTRUCTION_LIST(V)
#define DECLARE_SSE2_SHIFT_AVX_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSE2_UNOP_AVX_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSE_INSTRUCTION(instruction, escape, opcode)
#define DECLARE_SSSE3_UNOP_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode)
#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define SHIFT_INSTRUCTION_LIST(V)
interpreter::OperandScale scale
void shrxl(Register dst, Register src1, Register src2)
void vss(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2)
void bind_to(Label *L, int pos)
void vpinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void emit_trace_instruction(Immediate markid)
void pinsrd(XMMRegister dst, Operand src, uint8_t imm8)
void cmpeqsd(XMMRegister dst, XMMRegister src)
void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp)
void movdqu(XMMRegister dst, XMMRegister src)
void vpshuflw(YMMRegister dst, YMMRegister src, uint8_t imm8)
void emit_movzxb(Register dst, Operand src, int size)
void emit_neg(Operand dst, int size)
void palignr(XMMRegister dst, Operand src, uint8_t mask)
void vpblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void load_rax(Address value, RelocInfo::Mode rmode)
void blsmskq(Register dst, Register src)
void vroundss(XMMRegister dst, XMMRegister src1, Operand src2, RoundingMode mode)
void vmovupd(Operand dst, XMMRegister src)
void arithmetic_op_16(uint8_t opcode, Register reg, Operand rm_reg)
void store_rax(ExternalReference ref)
void vmovhps(Operand dst, XMMRegister src)
void GetCode(LocalIsolate *isolate, CodeDesc *desc)
void movb(Operand dst, Immediate imm)
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode)
void shrd(Register dst, Register src)
void vpblendw(YMMRegister dst, YMMRegister src1, YMMRegister src2, uint8_t mask)
void emit_sse_operand(XMMRegister dst, XMMRegister src)
void aligned_jmp(Label *L, Label::Distance distance=Label::kFar)
void vperm2f128(YMMRegister dst, YMMRegister src1, YMMRegister src2, uint8_t lane)
void bmi1q(uint8_t op, Register reg, Register vreg, Register rm)
void movsd(XMMRegister dst, Operand src)
void sse4_instr(XMMRegister dst, Register src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode, int8_t imm8)
void emit_imul(Register src, int size)
void popcntl(Register dst, Operand src)
void movsxwq(Register dst, Register src)
void pinsrq(XMMRegister dst, Operand src, uint8_t imm8)
void cmpw(Register dst, Immediate src)
void vpermq(YMMRegister dst, YMMRegister src, uint8_t imm8)
void cmpb(Register dst, Register src)
void movapd(XMMRegister dst, XMMRegister src)
void bmi1l(uint8_t op, Register reg, Register vreg, Operand rm)
void vcmppd(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp)
void cvttss2si(Register dst, XMMRegister src)
void vmovapd(YMMRegister dst, YMMRegister src)
void cvtqsi2sd(XMMRegister dst, Operand src)
void emit_repmovs(int size)
void vmovd(XMMRegister dst, Operand src)
~Assembler() override=default
void emit_imul(Operand src, int size)
void vmovaps(YMMRegister dst, YMMRegister src)
void movb(Register dst, Immediate imm)
void pextl(Register dst, Register src1, Operand src2)
void emit_sse_operand(XMMRegister dst)
void arithmetic_op_8(uint8_t opcode, Register reg, Operand rm_reg)
void cvtlsi2sd(XMMRegister dst, Register src)
void emit_xor(Register dst, Immediate src, int size)
void bextrl(Register dst, Operand src1, Register src2)
void arithmetic_op(uint8_t opcode, Register reg, Operand rm_reg, int size)
void vmovlps(Operand dst, XMMRegister src)
void cmpxchgb(Operand dst, Register src)
void pextq(Register dst, Register src1, Register src2)
void movlhps(XMMRegister dst, XMMRegister src)
void pdepq(Register dst, Register src1, Register src2)
void bsrl(Register dst, Operand src)
void cmovl(Condition cc, Register dst, Operand src)
void ucomiss(XMMRegister dst, XMMRegister src)
void shlxl(Register dst, Register src1, Register src2)
void record_farjmp_position(Label *L, int pos)
void emit_sse_operand(Register reg, Operand adr)
void vpdpbusd(YMMRegister dst, YMMRegister src1, YMMRegister src2)
void vpblendvb(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister mask)
void aligned_cmpb(Register dst, Operand src)
void vmovdqu(XMMRegister dst, XMMRegister src)
void movss(XMMRegister dst, Operand src)
void movupd(XMMRegister dst, Operand src)
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, RoundingMode mode)
void emit_add(Register dst, Operand src, int size)
void vmovdqa(YMMRegister dst, Operand src)
void vmovshdup(XMMRegister dst, XMMRegister src)
void emit_idiv(Register src, int size)
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void vmovd(XMMRegister dst, Register src)
void vpshuflw(XMMRegister dst, Operand src, uint8_t imm8)
void near_call(Builtin buitin, RelocInfo::Mode rmode)
void vcvttsd2siq(Register dst, Operand src)
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode)
static bool IsJmpRel(Address addr)
void vpd(uint8_t op, XMMRegister dst, YMMRegister src1, YMMRegister src2)
void emit_sse_operand(XMMRegister dst, Register src)
void vps(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void vpshufhw(YMMRegister dst, YMMRegister src, uint8_t imm8)
void aligned_cmpw(Operand dst, Register src)
void movss(Operand dst, XMMRegister src)
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode)
void vmovq(XMMRegister dst, Register src)
void vcvtdq2pd(YMMRegister dst, XMMRegister src)
void vmovshdup(YMMRegister dst, YMMRegister src)
void testw(Register reg, Immediate mask)
void movq(Register dst, XMMRegister src)
void vblendvpd(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister mask)
void vcvtph2ps(YMMRegister dst, XMMRegister src)
void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8)
void emit_inc(Register dst, int size)
void immediate_arithmetic_op(uint8_t subcode, Operand dst, Immediate src, int size)
void cmpb_al(Immediate src)
void vpshufd(XMMRegister dst, Operand src, uint8_t imm8)
void testb(Operand op, Register reg)
void blsiq(Register dst, Operand src)
void emit_movzxw(Register dst, Operand src, int size)
void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void emit_mov(Register dst, Operand src, int size)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature=AVX)
void shift(Register dst, int subcode, int size)
void emit_xor(Register dst, Register src, int size)
Assembler(const MaybeAssemblerZone &, const AssemblerOptions &options, std::unique_ptr< AssemblerBuffer > buffer={})
void vpd(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void palignr(XMMRegister dst, XMMRegister src, uint8_t mask)
void vmovups(XMMRegister dst, Operand src)
void cvtlsi2ss(XMMRegister dst, Register src)
void vps(uint8_t op, YMMRegister dst, YMMRegister src1, Operand src2)
void immediate_arithmetic_op_8(uint8_t subcode, Register dst, Immediate src)
static int deserialization_special_target_size(Address instruction_payload)
void vcmpps(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp)
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8)
void bzhiq(Register dst, Register src1, Register src2)
void popcntq(Register dst, Operand src)
void emit_inc(Operand dst, int size)
void andnq(Register dst, Register src1, Operand src2)
void movmskps(Register dst, XMMRegister src)
void AllocateAndInstallRequestedHeapNumbers(LocalIsolate *isolate)
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature=AVX)
void mulxq(Register dst1, Register dst2, Register src)
void vmovdqa(XMMRegister dst, XMMRegister src)
void movb(Operand dst, Register src)
void shlxl(Register dst, Operand src1, Register src2)
void vmovdqu(YMMRegister dst, YMMRegister src)
void aligned_testw(Register dst, Register src)
void vcmpps(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp)
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2)
void emit_code_relative_offset(Label *label)
void sse_instr(XMMRegister dst, XMMRegister src, uint8_t escape, uint8_t opcode)
void vmovsd(Operand dst, XMMRegister src)
void cmppd(XMMRegister dst, Operand src, int8_t cmp)
void emit_or(Register dst, Register src, int size)
void tzcntl(Register dst, Operand src)
void rorxq(Register dst, Register src, uint8_t imm8)
void popcntl(Register dst, Register src)
void testb(Register dst, Register src)
void pinsrd(XMMRegister dst, Register src, uint8_t imm8)
void vmovdqa(YMMRegister dst, YMMRegister src)
void bsrq(Register dst, Operand src)
void aligned_cmpw(Register dst, Immediate src)
void j(Condition cc, Address entry, RelocInfo::Mode rmode)
void set_byte_at(int pos, uint8_t value)
void vmovups(YMMRegister dst, Operand src)
void aligned_cmpw(Register dst, Register src)
void testw(Register reg, Operand op)
void aligned_cmpb(Operand dst, Register src)
static void deserialization_set_target_internal_reference_at(Address pc, Address target, WritableJitAllocation &jit_allocation, RelocInfo::Mode mode=RelocInfo::INTERNAL_REFERENCE)
void emit_aligned_cmp(Register dst, Register src, int size)
void emit_xchg(Register dst, Operand src, int size)
void vmovss(XMMRegister dst, Operand src)
void store_rax(Address dst, RelocInfo::Mode mode)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void aligned_testb(Register reg, Immediate mask)
void mulxl(Register dst1, Register dst2, Operand src)
void sarxq(Register dst, Operand src1, Register src2)
void emit_not(Register dst, int size)
void xchgb(Register reg, Operand op)
void aligned_testb(Register reg, Operand op)
void lzcntq(Register dst, Operand src)
void movd(XMMRegister dst, Register src)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature=AVX)
void vcvttss2siq(Register dst, XMMRegister src)
void shrxq(Register dst, Operand src1, Register src2)
void movsxwq(Register dst, Operand src)
void bmi2l(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Operand rm)
void emit_test(Operand op, Register reg, int size)
void emit_sub(Register dst, Register src, int size)
void emit_and(Register dst, Register src, int size)
void vbroadcastss(XMMRegister dst, XMMRegister src)
void bextrl(Register dst, Register src1, Register src2)
void rorxl(Register dst, Register src, uint8_t imm8)
void emit_or(Operand dst, Register src, int size)
void movsxlq(Register dst, Operand src)
void bsfq(Register dst, Register src)
void roundss(XMMRegister dst, Operand src, RoundingMode mode)
void emit_cmp(Operand dst, Register src, int size)
void vmovups(Operand dst, XMMRegister src)
void mulxq(Register dst1, Register dst2, Operand src)
void aligned_cmpb(Register dst, Register src)
void movups(XMMRegister dst, XMMRegister src)
void testb(Register reg, Immediate mask)
void vmovdqu(YMMRegister dst, Operand src)
void pinsrq(XMMRegister dst, Register src, uint8_t imm8)
void vmovupd(YMMRegister dst, Operand src)
void cmovq(Condition cc, Register dst, Register src)
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2)
int available_space() const
void vps(uint8_t op, YMMRegister dst, YMMRegister src1, YMMRegister src2)
void vmovups(YMMRegister dst, YMMRegister src)
void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2)
void sse_instr(XMMRegister dst, Operand src, uint8_t escape, uint8_t opcode)
void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void cmpb(Operand dst, Register src)
void ssse3_instr(XMMRegister dst, XMMRegister src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void emit_aligned_cmp(Operand dst, Immediate src, int size)
void blsrq(Register dst, Operand src)
void vmovddup(XMMRegister dst, Operand src)
void vcvtdq2pd(YMMRegister dst, Operand src)
void emit_aligned_test(Register dst, Register src, int size)
void vcmppd(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp)
void emit_xchg(Register dst, Register src, int size)
void cmpeqss(XMMRegister dst, XMMRegister src)
void movw(Operand dst, Immediate imm)
void vcvttsd2si(Register dst, XMMRegister src)
void roundsd(XMMRegister dst, Operand src, RoundingMode mode)
void long_at_put(int pos, uint32_t x)
void pinsrw(XMMRegister dst, Operand src, uint8_t imm8)
void vmovsd(XMMRegister dst, Operand src)
void vmovmskps(Register dst, XMMRegister src)
void emit_imul(Register dst, Operand src, Immediate imm, int size)
void shufps(XMMRegister dst, XMMRegister src, uint8_t imm8)
void cmpw(Operand dst, Immediate src)
void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp)
void btq(Operand dst, Register src)
void sarxq(Register dst, Register src1, Register src2)
void emit_div(Register src, int size)
void movsxwl(Register dst, Operand src)
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void vpmovmskb(Register dst, XMMRegister src)
void vucomiss(XMMRegister dst, Operand src)
void shlxq(Register dst, Operand src1, Register src2)
void cmpb(Register dst, Operand src)
void vpshufhw(XMMRegister dst, Operand src, uint8_t imm8)
void movdqa(XMMRegister dst, Operand src)
void bsrq(Register dst, Register src)
void testw(Operand op, Register reg)
void ucomiss(XMMRegister dst, Operand src)
void sse4_2_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void shrxq(Register dst, Register src1, Register src2)
void tzcntl(Register dst, Register src)
void emit_modrm(int code, Register rm_reg)
void vpalignr(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t imm8)
void cvtqsi2sd(XMMRegister dst, Register src)
void vmovupd(XMMRegister dst, Operand src)
void sarxl(Register dst, Register src1, Register src2)
void vmovapd(XMMRegister dst, XMMRegister src)
void vcvtsd2si(Register dst, XMMRegister src)
void emit_mov(Register dst, Immediate64 value, int size)
void andnq(Register dst, Register src1, Register src2)
void vpd(uint8_t op, YMMRegister dst, YMMRegister src1, Operand src2)
void immediate_arithmetic_op_16(uint8_t subcode, Operand dst, Immediate src)
void cvttsd2si(Register dst, Operand src)
void pdepq(Register dst, Register src1, Operand src2)
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void emit_and(Register dst, Immediate src, int size)
void bmi1l(uint8_t op, Register reg, Register vreg, Register rm)
void blsiq(Register dst, Register src)
Operand256 Operand256 void lddqu(XMMRegister dst, Operand src)
void pextq(Register dst, Register src1, Operand src2)
void blsrq(Register dst, Register src)
void emit_imul(Register dst, Operand src, int size)
void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask)
void vextractf128(XMMRegister dst, YMMRegister src, uint8_t lane)
void insertps(XMMRegister dst, XMMRegister src, uint8_t imm8)
V8_NOINLINE V8_PRESERVE_MOST void GrowBuffer()
void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void vcvttss2si(Register dst, XMMRegister src)
void emit_and(Register dst, Operand src, int size)
void emit_test(Register reg, Immediate mask, int size)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void bsfq(Register dst, Operand src)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
void movddup(XMMRegister dst, XMMRegister src)
void aligned_testw(Operand op, Register reg)
void setcc(Condition cc, Register reg)
void emit_test(Register dst, Register src, int size)
void vmovss(Operand dst, XMMRegister src)
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2)
void pushq(Immediate value)
void arithmetic_op(uint8_t opcode, Register reg, Register rm_reg, int size)
static uint32_t uint32_constant_at(Address pc, Address constant_pool)
void emit_mov(Operand dst, Register src, int size)
void movddup(XMMRegister dst, Operand src)
void emit_cmp(Register dst, Immediate src, int size)
void vbroadcastsd(YMMRegister dst, Operand src)
void testb(Operand op, Immediate mask)
void blsmskl(Register dst, Register src)
void movsxbl(Register dst, Operand src)
void movb(Register dst, Operand src)
void rorxq(Register dst, Operand src, uint8_t imm8)
void sse4_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void vroundpd(YMMRegister dst, YMMRegister src, RoundingMode mode)
void emit_aligned_test(Operand op, Register reg, int size)
void load_rax(ExternalReference ext)
void bextrq(Register dst, Register src1, Register src2)
void sse4_instr(Operand dst, XMMRegister src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode, int8_t imm8)
void emit_dec(Register dst, int size)
void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask)
void cmpb(Register dst, Immediate src)
void vpd(uint8_t op, YMMRegister dst, YMMRegister src1, YMMRegister src2)
void jmp(Handle< Code > target, RelocInfo::Mode rmode)
void pshufd(XMMRegister dst, Operand src, uint8_t shuffle)
void vroundsd(XMMRegister dst, XMMRegister src1, Operand src2, RoundingMode mode)
void vcmpeqsd(XMMRegister dst, XMMRegister src)
void emit_and(Operand dst, Register src, int size)
void bzhil(Register dst, Operand src1, Register src2)
void vmovddup(YMMRegister dst, Operand src)
void jmp(Register adr, bool notrack=false)
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2)
void sse4_instr(XMMRegister dst, XMMRegister src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
BuiltinJumpTableInfoWriter builtin_jump_table_info_writer_
void vmovddup(YMMRegister dst, YMMRegister src)
void sse2_instr(XMMRegister reg, uint8_t imm8, uint8_t prefix, uint8_t escape, uint8_t opcode, int extension)
void movss(XMMRegister dst, XMMRegister src)
void cmpw(Register dst, Register src)
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2)
void subb(Register dst, Immediate src)
void vshufps(YMMRegister dst, YMMRegister src1, YMMRegister src2, uint8_t imm8)
void blsil(Register dst, Register src)
static void set_uint32_constant_at(Address pc, Address constant_pool, uint32_t new_constant, WritableJitAllocation *jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
uint32_t long_at(int pos)
void movsxwl(Register dst, Register src)
void cmpps(XMMRegister dst, Operand src, int8_t cmp)
void vcvttss2si(Register dst, Operand src)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2, RoundingMode mode)
void vps(uint8_t op, YMMRegister dst, YMMRegister src1, YMMRegister src2, uint8_t imm8)
void bswapq(Register dst)
void vinserti128(YMMRegister dst, YMMRegister src1, XMMRegister src2, uint8_t lane)
void vpshufhw(YMMRegister dst, Operand src, uint8_t imm8)
void pdepl(Register dst, Register src1, Register src2)
void rorxl(Register dst, Operand src, uint8_t imm8)
void vcvtps2ph(XMMRegister dst, YMMRegister src, uint8_t imm8)
void emit_xor(Operand dst, Register src, int size)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void vmovaps(YMMRegister dst, Operand src)
void emit_aligned_cmp(Register dst, Immediate src, int size)
void blsmskl(Register dst, Operand src)
void xaddl(Operand dst, Register src)
void sse2_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape, uint8_t opcode)
void vlddqu(XMMRegister dst, Operand src)
void j(Condition cc, Handle< Code > target, RelocInfo::Mode rmode)
void vucomiss(XMMRegister dst, XMMRegister src)
bool is_optimizable_farjmp(int idx)
void vinstr(uint8_t op, Reg1 dst, Reg2 src1, Op src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature=AVX2)
void tzcntq(Register dst, Operand src)
void vps(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2)
void btrq(Register dst, Immediate imm8)
void vmovd(Register dst, XMMRegister src)
void vbroadcastss(YMMRegister dst, XMMRegister src)
void movdqa(Operand dst, XMMRegister src)
static uint8_t * emit(uint8_t *__restrict pc, T t)
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2)
void immediate_arithmetic_op_8(uint8_t subcode, Operand dst, Immediate src)
void cmpltsd(XMMRegister dst, XMMRegister src)
void emit_xor(Operand dst, Immediate src, int size)
void emit_imul(Register dst, Register src, int size)
void psrldq(XMMRegister dst, uint8_t shift)
void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp)
void vblendvps(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister mask)
void vmovdqu(Operand dst, XMMRegister src)
void movdqu(XMMRegister dst, Operand src)
void movsd(Operand dst, XMMRegister src)
void vpshuflw(YMMRegister dst, Operand src, uint8_t imm8)
int WriteBuiltinJumpTableInfos()
void fisttp_d(Operand adr)
void emit_aligned_test(Operand op, Immediate mask, int size)
void aligned_testb(Operand op, Immediate mask)
void AlignForJCCErratum(int inst_size)
void movsd(XMMRegister dst, XMMRegister src)
void pushq_imm32(int32_t imm32)
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2)
void emit_imul(Register dst, Register src, Immediate imm, int size)
void emit_rex(P1 p1, P2 p2, int size)
void movaps(XMMRegister dst, XMMRegister src)
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2)
void vpshufd(YMMRegister dst, Operand src, uint8_t imm8)
void movsxbq(Register dst, Operand src)
void btsq(Register dst, Immediate imm8)
void jmp(Label *L, Label::Distance distance=Label::kFar)
void blsil(Register dst, Operand src)
void haddps(XMMRegister dst, XMMRegister src)
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp)
void pdepl(Register dst, Register src1, Operand src2)
void GetCode(Isolate *isolate, CodeDesc *desc)
void movsxbl(Register dst, Register src)
void cvtlsi2ss(XMMRegister dst, Operand src)
void xaddb(Operand dst, Register src)
void btsq(Operand dst, Register src)
void aligned_cmpb(Register dst, Immediate src)
void vmovdqu(XMMRegister dst, Operand src)
void fisttp_s(Operand adr)
V8_NOINLINE void emit_label_operand(int rm, Label *label, int addend=0)
void arithmetic_op_16(uint8_t opcode, Register reg, Register rm_reg)
void emit_sse_operand(Register dst, XMMRegister src)
void aligned_testb(Operand op, Register reg)
static void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *writable_jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
void blsrl(Register dst, Register src)
void cvttsd2si(Register dst, XMMRegister src)
void andb(Register dst, Immediate src)
void movsxbq(Register dst, Register src)
void cvtqsi2ss(XMMRegister dst, Register src)
void emit_add(Operand dst, Immediate src, int size)
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void emit_test(Register reg, Operand op, int size)
void bmi2q(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Operand rm)
void emit_or(Operand dst, Immediate src, int size)
void cvttsd2siq(Register dst, Operand src)
void cmovl(Condition cc, Register dst, Register src)
void shld(Register dst, Register src)
void arithmetic_op_8(uint8_t opcode, Register reg, Register rm_reg)
void aligned_cmpw(Operand dst, Immediate src)
void xaddw(Operand dst, Register src)
void movq_heap_number(Register dst, double value)
void MaybeEmitOutOfLineConstantPool()
void fistp_d(Operand adr)
void vmovaps(XMMRegister dst, Operand src)
void emit_sse_operand(XMMRegister reg, Operand adr)
void bsfl(Register dst, Operand src)
void emit_sub(Register dst, Immediate src, int size)
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp)
void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle)
void vmovdqa(XMMRegister dst, Operand src)
void vpdpbssd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void GetCode(LocalIsolate *isolate, CodeDesc *desc, int safepoint_table_offset, int handler_table_offset)
void movshdup(XMMRegister dst, XMMRegister src)
void tzcntq(Register dst, Register src)
void andnl(Register dst, Register src1, Operand src2)
void cmovq(Condition cc, Register dst, Operand src)
void lzcntl(Register dst, Register src)
static bool IsNop(Address addr)
void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle)
void pextrq(Register dst, XMMRegister src, int8_t imm8)
void emit_modrm(Register reg, Register rm_reg)
void cvtsd2si(Register dst, XMMRegister src)
void aligned_testw(Operand op, Immediate mask)
void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8)
void haddps(XMMRegister dst, Operand src)
void pinsrb(XMMRegister dst, Register src, uint8_t imm8)
void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void vcvtph2ps(XMMRegister dst, XMMRegister src)
void pinsrb(XMMRegister dst, Operand src, uint8_t imm8)
void emit_and(Operand dst, Immediate src, int size)
void aligned_testw(Register reg, Immediate mask)
void cvttps2dq(XMMRegister dst, XMMRegister src)
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void movw(Operand dst, Register src)
void emit_cmpxchg(Operand dst, Register src, int size)
void aligned_j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void vpd(uint8_t op, XMMRegister dst, YMMRegister src1, Operand src2)
void sse2_instr(XMMRegister dst, XMMRegister src, uint8_t prefix, uint8_t escape, uint8_t opcode)
void vpblendw(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t mask)
void shift(Operand dst, int subcode, int size)
void bzhiq(Register dst, Operand src1, Register src2)
void cmpxchgw(Operand dst, Register src)
void cmpw(Operand dst, Register src)
void pextl(Register dst, Register src1, Register src2)
void emit_cmp(Register dst, Register src, int size)
void jmp(Operand src, bool notrack=false)
void emit_aligned_cmp(Register dst, Operand src, int size)
void aligned_testw(Register reg, Operand op)
void movq(Register dst, uint64_t value)
void call(Operand operand)
void emit_dec(Operand dst, int size)
void cvttss2siq(Register dst, XMMRegister src)
void sarxl(Register dst, Operand src1, Register src2)
void lzcntq(Register dst, Register src)
void emit_farith(int b1, int b2, int i)
void bsrl(Register dst, Register src)
void immediate_arithmetic_op_16(uint8_t subcode, Register dst, Immediate src)
void vpalignr(YMMRegister dst, YMMRegister src1, YMMRegister src2, uint8_t imm8)
void cvtsd2siq(Register dst, XMMRegister src)
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
void pblendw(XMMRegister dst, Operand src, uint8_t mask)
void emit_cmp(Operand dst, Immediate src, int size)
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void mulxl(Register dst1, Register dst2, Register src)
void vmovaps(XMMRegister dst, XMMRegister src)
void vptest(YMMRegister dst, YMMRegister src)
void vcvttps2dq(YMMRegister dst, Operand src)
void emit_add(Operand dst, Register src, int size)
void vcvttss2siq(Register dst, Operand src)
void emit_sub(Register dst, Operand src, int size)
void vroundps(YMMRegister dst, YMMRegister src, RoundingMode mode)
void fisub_s(Operand adr)
void incsspq(Register number_of_words)
void vmovups(Operand dst, YMMRegister src)
void vpblendw(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t mask)
void vmovq(XMMRegister dst, Operand src)
void cvttsd2siq(Register dst, XMMRegister src)
void emit_add(Register dst, Immediate src, int size)
void vps(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void vcvttsd2siq(Register dst, XMMRegister src)
void emit_sub(Operand dst, Immediate src, int size)
void emit_aligned_test(Register reg, Operand op, int size)
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8)
void insertps(XMMRegister dst, Operand src, uint8_t imm8)
void emit_operand(Register reg, Operand adr)
void movdqa(XMMRegister dst, XMMRegister src)
void blsmskq(Register dst, Operand src)
void movq(Register dst, int64_t value)
void cvtqsi2ss(XMMRegister dst, Operand src)
void vcmpeqss(XMMRegister dst, XMMRegister src)
void testw(Operand op, Immediate mask)
void vpd(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2)
void fma_instr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8)
void vmovmskpd(Register dst, XMMRegister src)
void emit_mov(Register dst, Register src, int size)
void movq(XMMRegister dst, Register src)
void near_jmp(intptr_t disp, RelocInfo::Mode rmode)
void vpextrq(Register dst, XMMRegister src, int8_t imm8)
void sse4_2_instr(XMMRegister dst, XMMRegister src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void emit_or(Register dst, Immediate src, int size)
void movmskpd(Register dst, XMMRegister src)
void popcntq(Register dst, Register src)
void lzcntl(Register dst, Operand src)
void movw(Register dst, Operand src)
void fistp_s(Operand adr)
void immediate_arithmetic_op(uint8_t subcode, Register dst, Immediate src, int size)
void emit_rex(P1 p1, int size)
void emit_sbb(Register dst, Register src, int size)
void movd(Register dst, XMMRegister src)
void emit_test(Operand op, Immediate mask, int size)
void emit_xor(Register dst, Operand src, int size)
void fma_instr(uint8_t op, Reg1 dst, Reg2 src1, Op src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void xchgw(Register reg, Operand op)
void vinsertps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void movsxlq(Register dst, Register src)
void sse4_instr(Register dst, XMMRegister src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode, int8_t imm8)
void movhps(XMMRegister dst, Operand src)
void pmovmskb(Register dst, XMMRegister src)
void movd(XMMRegister dst, Operand src)
void movups(XMMRegister dst, Operand src)
void emit_add(Register dst, Register src, int size)
void andnl(Register dst, Register src1, Register src2)
void movl(Operand dst, Label *src)
void vpshufd(YMMRegister dst, YMMRegister src, uint8_t imm8)
void emit_aligned_test(Register reg, Immediate mask, int size)
void shrxl(Register dst, Operand src1, Register src2)
void emit_lea(Register dst, Operand src, int size)
void bmi1q(uint8_t op, Register reg, Register vreg, Operand rm)
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void bmi2q(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Register rm)
void vbroadcastss(YMMRegister dst, Operand src)
void emit_aligned_cmp(Operand dst, Register src, int size)
void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode)
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void vmovq(Register dst, XMMRegister src)
void testb(Register reg, Operand op)
DECLARE_SSE2_UNOP_AVX_YMM_INSTRUCTION(sqrtpd, 51, YMMRegister, YMMRegister, Operand) DECLARE_SSE2_UNOP_AVX_YMM_INSTRUCTION(cvtpd2ps
void cvttps2dq(XMMRegister dst, Operand src)
void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp)
void sub_sp_32(uint32_t imm)
void vcvtdq2pd(XMMRegister dst, XMMRegister src)
void movhlps(XMMRegister dst, XMMRegister src)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void vbroadcastsd(YMMRegister dst, XMMRegister src)
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void cvtdq2pd(XMMRegister dst, XMMRegister src)
void emit_neg(Register dst, int size)
void cmpb(Operand dst, Immediate src)
void movdqu(Operand dst, XMMRegister src)
void pinsrw(XMMRegister dst, Register src, uint8_t imm8)
void emit_movzxb(Register dst, Register src, int size)
void movlps(Operand dst, XMMRegister src)
void vpermq(YMMRegister dst, Operand src, uint8_t imm8)
void movups(Operand dst, XMMRegister src)
void vcvtps2ph(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vss(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void FinalizeJumpOptimizationInfo()
void emit_cmp(Register dst, Operand src, int size)
void xaddq(Operand dst, Register src)
void testw(Register dst, Register src)
void bmi2l(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Register rm)
void vmovups(XMMRegister dst, XMMRegister src)
void emit_operand(int rm, Operand adr)
void cvttss2siq(Register dst, Operand src)
void call(Handle< Code > target, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET)
void bsfl(Register dst, Register src)
bool buffer_overflow() const
void cmpw(Register dst, Operand src)
void vmovdqu(Operand dst, YMMRegister src)
void vptest(YMMRegister dst, Operand src)
void cvttss2si(Register dst, Operand src)
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void near_call(intptr_t disp, RelocInfo::Mode rmode)
void movupd(Operand dst, XMMRegister src)
void blsrl(Register dst, Operand src)
void shlxq(Register dst, Register src1, Register src2)
void aligned_cmpw(Register dst, Operand src)
void movaps(XMMRegister dst, Operand src)
int SizeOfCodeGeneratedSince(Label *label)
void movq(XMMRegister dst, XMMRegister src)
void aligned_testb(Register dst, Register src)
DirectHandle< Code > code_target_object_handle_at(Address pc)
void vcvttps2dq(XMMRegister dst, XMMRegister src)
static bool UseConstPoolFor(RelocInfo::Mode rmode)
void vcvttps2dq(YMMRegister dst, YMMRegister src)
void vmovupd(Operand dst, YMMRegister src)
void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, SourcePosition position, int id)
void movlps(XMMRegister dst, Operand src)
void emit_movzxw(Register dst, Register src, int size)
void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void ssse3_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void aligned_cmpb(Operand dst, Immediate src)
void bextrq(Register dst, Operand src1, Register src2)
void vsd(uint8_t op, Reg dst, Reg src1, Op src2)
void bzhil(Register dst, Register src1, Register src2)
void emit_sub(Operand dst, Register src, int size)
void vbroadcastss(XMMRegister dst, Operand src)
void vcvttsd2si(Register dst, Operand src)
void WriteBuiltinJumpTableEntry(Label *label, int table_pos)
void movhps(Operand dst, XMMRegister src)
void emit_mov(Register dst, Immediate value, int size)
void bswapl(Register dst)
void emit_not(Operand dst, int size)
void cvtlsi2sd(XMMRegister dst, Operand src)
void vmovddup(XMMRegister dst, XMMRegister src)
void vpdpbssd(YMMRegister dst, YMMRegister src1, YMMRegister src2)
static Builtin target_builtin_at(Address pc)
void movq(XMMRegister dst, Operand src)
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void emit_mov(Operand dst, Immediate value, int size)
void emit_or(Register dst, Operand src, int size)
void movq_imm64(Register dst, int64_t value)
void vpdpbusd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
bool IsMoveRipRelative(Address instr)
ConstPool(Assembler *assm)
static constexpr int kMoveRipRelativeDispOffset
static constexpr int kRipRelativeDispSize
static constexpr int kMoveImm64Offset
static constexpr uint32_t kMoveRipRelativeMask
bool AddSharedEntry(uint64_t data, int offset)
std::multimap< uint64_t, int > entries_
bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode)
static constexpr uint32_t kMoveRipRelativeInstr
V8_INLINE EnsureSpace(Assembler *assembler)
V8_INLINE Operand256(Register base, int32_t disp)
V8_INLINE Operand256(Register base, Register index, ScaleFactor scale, int32_t disp)
V8_INLINE Operand256(Register index, ScaleFactor scale, int32_t disp)
Operand256(const Operand256 &) V8_NOEXCEPT=default
Operand256 & operator=(const Operand256 &) V8_NOEXCEPT=default
V8_INLINE void set_disp32(int disp)
V8_INLINE Operand(Register index, ScaleFactor scale, int32_t disp)
V8_INLINE constexpr bool is_label_operand() const
Operand(Operand base, int32_t offset)
V8_INLINE Operand(Label *label, int addend=0)
V8_INLINE constexpr void set_disp8(int disp)
Operand(const Operand &) V8_NOEXCEPT=default
V8_INLINE constexpr void set_sib(ScaleFactor scale, Register index, Register base)
V8_INLINE constexpr Operand(Register base, int32_t disp)
V8_INLINE const LabelOperand & label() const
V8_INLINE constexpr uint8_t rex() const
Operand & operator=(const Operand &) V8_NOEXCEPT=default
V8_INLINE const MemoryOperand & memory() const
V8_INLINE constexpr void set_modrm(int mod, Register rm_reg)
V8_INLINE Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
bool AddressUsesRegister(Register reg) const
constexpr int8_t code() const
constexpr int low_bits() const
constexpr int high_bit() const
#define V8_ENABLE_SANDBOX_BOOL
#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value)
BytecodeAssembler & assembler_
#define EXPORT_TEMPLATE_DECLARE(export)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define FMA_INSTRUCTION_LIST(V)
#define FMA_PD_INSTRUCTION_LIST(V)
#define FMA_PS_INSTRUCTION_LIST(V)
#define SSE_UNOP_INSTRUCTION_LIST(V)
#define SSSE3_UNOP_INSTRUCTION_LIST(V)
#define SSE4_INSTRUCTION_LIST(V)
#define SSE2_INSTRUCTION_LIST(V)
#define SSE2_INSTRUCTION_LIST_SD(V)
#define SSSE3_INSTRUCTION_LIST(V)
#define AVX2_BROADCAST_LIST(V)
constexpr VFPRoundingMode kRoundToNearest
constexpr int kTaggedSize
bool operator!=(ExternalReference lhs, ExternalReference rhs)
@ kUnsignedGreaterThanEqual
std::variant< Zone *, AccountingAllocator * > MaybeAssemblerZone
constexpr int kSystemPointerSize
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
constexpr VFPRoundingMode kRoundToZero
@ times_system_pointer_size
@ times_external_pointer_size
@ times_half_system_pointer_size
#define DCHECK_NOT_NULL(val)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define ASSERT_TRIVIALLY_COPYABLE(T)
#define V8_EXPORT_PRIVATE
#define V8_UNLIKELY(condition)
std::unique_ptr< ValueMirror > value
#define SSE4_EXTRACT_INSTRUCTION_LIST(V)
#define SSE_INSTRUCTION_LIST_SS(V)
#define SSE2_INSTRUCTION_LIST_PI(V)
#define SSE4_UNOP_INSTRUCTION_LIST_PMOV(V)
#define SSE_BINOP_INSTRUCTION_LIST(V)
#define SSE2_UNOP_INSTRUCTION_LIST(V)
#define SSE2_INSTRUCTION_LIST_SHIFT(V)
#define SSE2_INSTRUCTION_LIST_PD(V)
#define SSE4_UNOP_INSTRUCTION_LIST(V)
#define SSE4_2_INSTRUCTION_LIST(V)
#define SSE2_INSTRUCTION_LIST_SHIFT_IMM(V)