5#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_INL_H_
6#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_INL_H_
75 assm->
StoreV128(src.reg().fp(), dst, scratch);
80 }
else if (src.is_const()) {
81 if (src.kind() ==
kI32) {
85 assm->
mov(scratch,
Operand(
static_cast<int64_t
>(src.i32_const())));
112 kLiftoffFrameSetupFunctionReg) ==
118 Register scratch = ip;
121 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
127 int stack_param_delta) {
128 Register scratch = r1;
137 int slot_count = num_callee_stack_params + 2;
138 for (
int i = slot_count - 1;
i >= 0; --
i) {
152 int offset, SafepointTableBuilder* safepoint_table_builder,
153 bool feedback_vector_slot,
size_t stack_param_slots) {
156 if (feedback_vector_slot) {
160 constexpr int LayInstrSize = 6;
187 patching_assembler.branchOnCond(
al,
jump_offset,
true,
true);
194 if (frame_size <
v8_flags.stack_size * 1024) {
197 AddU64(stack_limit, Operand(frame_size));
202 if (
v8_flags.experimental_wasm_growable_stacks) {
205 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
210 AddS64(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
214 safepoint_table_builder->DefineSafepoint(
this);
217 Call(
static_cast<Address>(Builtin::kWasmStackOverflow),
220 safepoint_table_builder->DefineSafepoint(
this);
261 const FreezeCacheState& frozen) {
271 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
274 int budget_arr_offset =
kInt32Size * declared_func_index;
276 MemOperand budget_addr(budget_array, budget_arr_offset);
278 SubS32(budget, Operand(budget_used));
284 if (!
v8_flags.experimental_wasm_growable_stacks) {
288 Label done, call_runtime;
293 mov(old_fp.gp(), fp);
336 switch (value.type().kind()) {
338 mov(
reg.gp(), Operand(value.to_i32()));
341 mov(
reg.gp(), Operand(value.to_i64()));
403 uint32_t* protected_load_pc,
405 CHECK(is_int20(offset_imm));
407 if (offset_reg !=
no_reg && shift_amount != 0) {
411 if (protected_load_pc) *protected_load_pc =
pc_offset();
424 int32_t offset_imm) {
431 int32_t offset_imm, Register src,
433 uint32_t* protected_store_pc,
434 SkipWriteBarrier skip_write_barrier) {
437 if (protected_store_pc) *protected_store_pc =
pc_offset();
440 if (skip_write_barrier ||
v8_flags.disable_write_barriers)
return;
450 StubCallMode::kCallWasmRuntimeStub);
455 Register offset_reg, uintptr_t offset_imm,
456 LoadType type, uint32_t* protected_load_pc,
457 bool is_load_mem,
bool i64_offset,
460 if (offset_reg !=
no_reg && !i64_offset) {
462 llgfr(ip, offset_reg);
465 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
466 if (offset_reg !=
no_reg && shift_amount != 0) {
470 if (!is_int20(offset_imm)) {
471 if (offset_reg !=
no_reg) {
472 mov(
r0, Operand(offset_imm));
476 mov(ip, Operand(offset_imm));
483 if (protected_load_pc) *protected_load_pc =
pc_offset();
484 switch (type.value()) {
485 case LoadType::kI32Load8U:
486 case LoadType::kI64Load8U:
489 case LoadType::kI32Load8S:
490 case LoadType::kI64Load8S:
493 case LoadType::kI32Load16U:
494 case LoadType::kI64Load16U:
501 case LoadType::kI32Load16S:
502 case LoadType::kI64Load16S:
509 case LoadType::kI64Load32U:
516 case LoadType::kI32Load:
517 case LoadType::kI64Load32S:
524 case LoadType::kI64Load:
531 case LoadType::kF32Load:
538 case LoadType::kF64Load:
545 case LoadType::kS128Load:
557#define PREP_MEM_OPERAND(offset_reg, offset_imm, scratch) \
558 if (offset_reg != no_reg && !i64_offset) { \
560 llgfr(scratch, offset_reg); \
561 offset_reg = scratch; \
563 if (!is_int20(offset_imm)) { \
564 if (offset_reg != no_reg) { \
565 mov(r0, Operand(offset_imm)); \
566 AddS64(r0, offset_reg); \
569 mov(scratch, Operand(offset_imm)); \
571 offset_reg = scratch; \
575 uintptr_t offset_imm, LiftoffRegister src,
576 StoreType type, LiftoffRegList ,
577 uint32_t* protected_store_pc,
bool is_store_mem,
582 if (protected_store_pc) *protected_store_pc =
pc_offset();
583 switch (type.value()) {
584 case StoreType::kI32Store8:
585 case StoreType::kI64Store8:
588 case StoreType::kI32Store16:
589 case StoreType::kI64Store16:
596 case StoreType::kI32Store:
597 case StoreType::kI64Store32:
604 case StoreType::kI64Store:
611 case StoreType::kF32Store:
618 case StoreType::kF64Store:
625 case StoreType::kS128Store: {
639 Register offset_reg, uintptr_t offset_imm,
640 LoadType type, LiftoffRegList ,
642 Load(dst, src_addr, offset_reg, offset_imm, type,
nullptr,
true, i64_offset);
646 uintptr_t offset_imm, LiftoffRegister src,
647 StoreType type, LiftoffRegList ,
653 switch (type.value()) {
654 case StoreType::kI32Store8:
655 case StoreType::kI64Store8: {
659 case StoreType::kI32Store16:
660 case StoreType::kI64Store16: {
661#ifdef V8_TARGET_BIG_ENDIAN
672 case StoreType::kI32Store:
673 case StoreType::kI64Store32: {
674#ifdef V8_TARGET_BIG_ENDIAN
685 case StoreType::kI64Store: {
686#ifdef V8_TARGET_BIG_ENDIAN
703 uintptr_t offset_imm, LiftoffRegister value,
704 LiftoffRegister
result, StoreType type,
706 LiftoffRegList pinned = LiftoffRegList{dst_addr,
value,
result};
707 if (offset_reg !=
no_reg) pinned.set(offset_reg);
716 switch (type.value()) {
717 case StoreType::kI32Store8:
718 case StoreType::kI64Store8: {
722 AddS32(tmp2, tmp1, value.gp());
728 case StoreType::kI32Store16:
729 case StoreType::kI64Store16: {
733#ifdef V8_TARGET_BIG_ENDIAN
736 AddS32(tmp2, tmp2, value.gp());
740 AddS32(tmp2, tmp1, value.gp());
745#ifdef V8_TARGET_BIG_ENDIAN
751 case StoreType::kI32Store:
752 case StoreType::kI64Store32: {
756#ifdef V8_TARGET_BIG_ENDIAN
758 AddS32(tmp2, tmp2, value.gp());
761 AddS32(tmp2, tmp1, value.gp());
766#ifdef V8_TARGET_BIG_ENDIAN
771 case StoreType::kI64Store: {
775#ifdef V8_TARGET_BIG_ENDIAN
777 AddS64(tmp2, tmp2, value.gp());
780 AddS64(tmp2, tmp1, value.gp());
785#ifdef V8_TARGET_BIG_ENDIAN
796 uintptr_t offset_imm, LiftoffRegister value,
797 LiftoffRegister
result, StoreType type,
799 LiftoffRegList pinned = LiftoffRegList{dst_addr,
value,
result};
800 if (offset_reg !=
no_reg) pinned.set(offset_reg);
809 switch (type.value()) {
810 case StoreType::kI32Store8:
811 case StoreType::kI64Store8: {
815 SubS32(tmp2, tmp1, value.gp());
821 case StoreType::kI32Store16:
822 case StoreType::kI64Store16: {
826#ifdef V8_TARGET_BIG_ENDIAN
829 SubS32(tmp2, tmp2, value.gp());
833 SubS32(tmp2, tmp1, value.gp());
838#ifdef V8_TARGET_BIG_ENDIAN
844 case StoreType::kI32Store:
845 case StoreType::kI64Store32: {
849#ifdef V8_TARGET_BIG_ENDIAN
851 SubS32(tmp2, tmp2, value.gp());
854 SubS32(tmp2, tmp1, value.gp());
859#ifdef V8_TARGET_BIG_ENDIAN
864 case StoreType::kI64Store: {
868#ifdef V8_TARGET_BIG_ENDIAN
870 SubS64(tmp2, tmp2, value.gp());
873 SubS64(tmp2, tmp1, value.gp());
878#ifdef V8_TARGET_BIG_ENDIAN
889 uintptr_t offset_imm, LiftoffRegister value,
890 LiftoffRegister
result, StoreType type,
892 LiftoffRegList pinned = LiftoffRegList{dst_addr,
value,
result};
893 if (offset_reg !=
no_reg) pinned.set(offset_reg);
902 switch (type.value()) {
903 case StoreType::kI32Store8:
904 case StoreType::kI64Store8: {
908 AndP(tmp2, tmp1, value.gp());
914 case StoreType::kI32Store16:
915 case StoreType::kI64Store16: {
919#ifdef V8_TARGET_BIG_ENDIAN
922 AndP(tmp2, tmp2, value.gp());
926 AndP(tmp2, tmp1, value.gp());
931#ifdef V8_TARGET_BIG_ENDIAN
937 case StoreType::kI32Store:
938 case StoreType::kI64Store32: {
942#ifdef V8_TARGET_BIG_ENDIAN
944 AndP(tmp2, tmp2, value.gp());
947 AndP(tmp2, tmp1, value.gp());
952#ifdef V8_TARGET_BIG_ENDIAN
957 case StoreType::kI64Store: {
961#ifdef V8_TARGET_BIG_ENDIAN
963 AndP(tmp2, tmp2, value.gp());
966 AndP(tmp2, tmp1, value.gp());
971#ifdef V8_TARGET_BIG_ENDIAN
982 uintptr_t offset_imm, LiftoffRegister value,
983 LiftoffRegister
result, StoreType type,
985 LiftoffRegList pinned = LiftoffRegList{dst_addr,
value,
result};
986 if (offset_reg !=
no_reg) pinned.set(offset_reg);
995 switch (type.value()) {
996 case StoreType::kI32Store8:
997 case StoreType::kI64Store8: {
1001 OrP(tmp2, tmp1, value.gp());
1007 case StoreType::kI32Store16:
1008 case StoreType::kI64Store16: {
1012#ifdef V8_TARGET_BIG_ENDIAN
1015 OrP(tmp2, tmp2, value.gp());
1019 OrP(tmp2, tmp1, value.gp());
1024#ifdef V8_TARGET_BIG_ENDIAN
1030 case StoreType::kI32Store:
1031 case StoreType::kI64Store32: {
1035#ifdef V8_TARGET_BIG_ENDIAN
1037 OrP(tmp2, tmp2, value.gp());
1040 OrP(tmp2, tmp1, value.gp());
1045#ifdef V8_TARGET_BIG_ENDIAN
1050 case StoreType::kI64Store: {
1054#ifdef V8_TARGET_BIG_ENDIAN
1056 OrP(tmp2, tmp2, value.gp());
1059 OrP(tmp2, tmp1, value.gp());
1064#ifdef V8_TARGET_BIG_ENDIAN
1075 uintptr_t offset_imm, LiftoffRegister value,
1076 LiftoffRegister
result, StoreType type,
1078 LiftoffRegList pinned = LiftoffRegList{dst_addr,
value,
result};
1079 if (offset_reg !=
no_reg) pinned.set(offset_reg);
1088 switch (type.value()) {
1089 case StoreType::kI32Store8:
1090 case StoreType::kI64Store8: {
1094 XorP(tmp2, tmp1, value.gp());
1100 case StoreType::kI32Store16:
1101 case StoreType::kI64Store16: {
1105#ifdef V8_TARGET_BIG_ENDIAN
1108 XorP(tmp2, tmp2, value.gp());
1112 XorP(tmp2, tmp1, value.gp());
1117#ifdef V8_TARGET_BIG_ENDIAN
1123 case StoreType::kI32Store:
1124 case StoreType::kI64Store32: {
1128#ifdef V8_TARGET_BIG_ENDIAN
1130 XorP(tmp2, tmp2, value.gp());
1133 XorP(tmp2, tmp1, value.gp());
1138#ifdef V8_TARGET_BIG_ENDIAN
1143 case StoreType::kI64Store: {
1147#ifdef V8_TARGET_BIG_ENDIAN
1149 XorP(tmp2, tmp2, value.gp());
1152 XorP(tmp2, tmp1, value.gp());
1157#ifdef V8_TARGET_BIG_ENDIAN
1168 uintptr_t offset_imm,
1169 LiftoffRegister value,
1170 LiftoffRegister
result, StoreType type,
1176 switch (type.value()) {
1177 case StoreType::kI32Store8:
1178 case StoreType::kI64Store8: {
1183 case StoreType::kI32Store16:
1184 case StoreType::kI64Store16: {
1185#ifdef V8_TARGET_BIG_ENDIAN
1186 lrvr(r1, value.gp());
1192#ifdef V8_TARGET_BIG_ENDIAN
1200 case StoreType::kI32Store:
1201 case StoreType::kI64Store32: {
1202#ifdef V8_TARGET_BIG_ENDIAN
1203 lrvr(r1, value.gp());
1211#ifdef V8_TARGET_BIG_ENDIAN
1217 case StoreType::kI64Store: {
1218#ifdef V8_TARGET_BIG_ENDIAN
1219 lrvgr(r1, value.gp());
1221 mov(r1, value.gp());
1227#ifdef V8_TARGET_BIG_ENDIAN
1238 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
1239 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister
result,
1240 StoreType type,
bool i64_offset) {
1242 LiftoffRegList pinned = LiftoffRegList{dst_addr, expected, new_value,
result};
1243 if (offset_reg !=
no_reg) pinned.set(offset_reg);
1252 switch (type.value()) {
1253 case StoreType::kI32Store8:
1254 case StoreType::kI64Store8: {
1260 case StoreType::kI32Store16:
1261 case StoreType::kI64Store16: {
1262#ifdef V8_TARGET_BIG_ENDIAN
1263 lrvr(tmp1, expected.gp());
1264 lrvr(tmp2, new_value.gp());
1269 LoadU16(tmp2, new_value.gp());
1273#ifdef V8_TARGET_BIG_ENDIAN
1279 case StoreType::kI32Store:
1280 case StoreType::kI64Store32: {
1281#ifdef V8_TARGET_BIG_ENDIAN
1282 lrvr(tmp1, expected.gp());
1283 lrvr(tmp2, new_value.gp());
1286 LoadU32(tmp2, new_value.gp());
1290#ifdef V8_TARGET_BIG_ENDIAN
1295 case StoreType::kI64Store: {
1296#ifdef V8_TARGET_BIG_ENDIAN
1297 lrvgr(tmp1, expected.gp());
1298 lrvgr(tmp2, new_value.gp());
1300 mov(tmp1, expected.gp());
1301 mov(tmp2, new_value.gp());
1305#ifdef V8_TARGET_BIG_ENDIAN
1318 uint32_t caller_slot_idx,
1323#if defined(V8_TARGET_BIG_ENDIAN)
1347 Register scratch = temps.Acquire();
1357 uint32_t caller_slot_idx,
1359 Register frame_pointer) {
1363#if defined(V8_TARGET_BIG_ENDIAN)
1387 Register scratch = temps.Acquire();
1400#if defined(V8_TARGET_BIG_ENDIAN)
1424 Register scratch = temps.Acquire();
1433#ifdef V8_TARGET_BIG_ENDIAN
1461 dst_offset += (length == 4 ?
stack_bias : 0);
1462 src_offset += (length == 4 ?
stack_bias : 0);
1464 if (is_int20(dst_offset)) {
1467 mov(ip, Operand(-dst_offset));
1471 if (is_int20(src_offset)) {
1474 mov(r1, Operand(-src_offset));
1519 Register scratch = temps.Acquire();
1533 switch (value.type().kind()) {
1535 mov(src, Operand(value.to_i32()));
1540 mov(src, Operand(value.to_i64()));
1568 Register scratch = temps.Acquire();
1589 mov(
r0, Operand(0));
1630#define SIGN_EXT(r) lgfr(r, r)
1631#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
1632#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
1633#define REGISTER_AND_WITH_1F \
1634 ([&](Register rhs) { \
1635 AndP(r1, rhs, Operand(31)); \
1639#define LFR_TO_REG(reg) reg.gp()
1642#define UNOP_LIST(V) \
1643 V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
1644 V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1645 LFR_TO_REG, USE, true, bool) \
1646 V(u32_to_uintptr, LoadU32, Register, Register, , , USE, , void) \
1647 V(i32_signextend_i8, lbr, Register, Register, , , USE, , void) \
1648 V(i32_signextend_i16, lhr, Register, Register, , , USE, , void) \
1649 V(i64_signextend_i8, lgbr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1650 LFR_TO_REG, USE, , void) \
1651 V(i64_signextend_i16, lghr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1652 LFR_TO_REG, USE, , void) \
1653 V(i64_signextend_i32, LoadS32, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1654 LFR_TO_REG, USE, , void) \
1655 V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
1656 V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
1657 V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
1658 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1659 V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
1660 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1661 V(f32_ceil, CeilF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1662 V(f32_floor, FloorF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1663 V(f32_trunc, TruncF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1664 V(f32_nearest_int, NearestIntF32, DoubleRegister, DoubleRegister, , , USE, \
1666 V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1667 V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1668 V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1669 V(f64_ceil, CeilF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1670 V(f64_floor, FloorF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1671 V(f64_trunc, TruncF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1672 V(f64_nearest_int, NearestIntF64, DoubleRegister, DoubleRegister, , , USE, \
1674 V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
1675 V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
1676 V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
1678#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
1680 return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
1681 auto _dst = dcast(dst); \
1682 auto _src = scast(src); \
1683 instr(_dst, _src); \
1688#undef EMIT_UNOP_FUNCTION
1693#define BINOP_LIST(V) \
1694 V(f32_min, FloatMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1696 V(f32_max, FloatMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1698 V(f64_min, DoubleMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1700 V(f64_max, DoubleMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1702 V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1704 V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1706 V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1708 V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1710 V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1712 V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1714 V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1716 V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1718 V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
1719 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1720 V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
1721 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1722 V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
1723 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1724 V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
1725 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1726 V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
1727 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1728 V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
1729 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1730 V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
1732 V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
1734 V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1735 V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1736 V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1737 V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1738 V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1739 V(i32_and, And, Register, Register, Register, , , , SIGN_EXT, , void) \
1740 V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT, , void) \
1741 V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT, , void) \
1742 V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1743 V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1744 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1745 V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1746 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1747 V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1748 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1749 V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1750 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1751 V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1752 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1753 V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1754 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1755 V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
1756 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1757 V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
1758 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1759 V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
1760 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1761 V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
1762 LFR_TO_REG, Operand, USE, , void) \
1763 V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1764 LFR_TO_REG, Operand, USE, , void) \
1765 V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1766 LFR_TO_REG, Operand, USE, , void) \
1767 V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1768 LFR_TO_REG, Operand, USE, , void) \
1769 V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
1770 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1771 V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
1772 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1773 V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
1774 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void)
1776#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
1777 scast2, rcast, ret, return_type) \
1778 return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
1780 auto _dst = dcast(dst); \
1781 auto _lhs = scast1(lhs); \
1782 auto _rhs = scast2(rhs); \
1783 instr(_dst, _lhs, _rhs); \
1790#undef EMIT_BINOP_FUNCTION
1792#undef INT32_AND_WITH_1F
1793#undef REGISTER_AND_WITH_1F
1800 Register scratch = temps.Acquire();
1805 Register scratch = temps.Acquire();
1807 AddU64(scratch, Operand(1));
1814 Label* trap_div_by_zero,
1815 Label* trap_div_unrepresentable) {
1820 b(
eq, trap_div_by_zero);
1823 CmpS32(rhs, Operand(-1));
1826 b(
eq, trap_div_unrepresentable);
1833 Label* trap_div_by_zero) {
1836 beq(trap_div_by_zero);
1841 Label* trap_div_by_zero) {
1844 Label trap_div_unrepresentable;
1847 beq(trap_div_by_zero);
1850 CmpS32(rhs, Operand(-1));
1853 beq(&trap_div_unrepresentable);
1861 bind(&trap_div_unrepresentable);
1862 mov(dst, Operand(0));
1867 Label* trap_div_by_zero) {
1870 beq(trap_div_by_zero);
1875 LiftoffRegister rhs,
1876 Label* trap_div_by_zero,
1877 Label* trap_div_unrepresentable) {
1879 constexpr int64_t kMinInt64 =
static_cast<int64_t
>(1) << 63;
1883 beq(trap_div_by_zero);
1886 CmpS64(rhs.gp(), Operand(-1));
1888 mov(
r0, Operand(kMinInt64));
1890 b(
eq, trap_div_unrepresentable);
1893 DivS64(dst.gp(), lhs.gp(), rhs.gp());
1898 LiftoffRegister rhs,
1899 Label* trap_div_by_zero) {
1901 b(
eq, trap_div_by_zero);
1903 DivU64(dst.gp(), lhs.gp(), rhs.gp());
1908 LiftoffRegister rhs,
1909 Label* trap_div_by_zero) {
1910 constexpr int64_t kMinInt64 =
static_cast<int64_t
>(1) << 63;
1912 Label trap_div_unrepresentable;
1918 beq(trap_div_by_zero);
1921 CmpS64(rhs.gp(), Operand(-1));
1923 mov(
r0, Operand(kMinInt64));
1925 beq(&trap_div_unrepresentable);
1928 ModS64(dst.gp(), lhs.gp(), rhs.gp());
1931 bind(&trap_div_unrepresentable);
1932 mov(dst.gp(), Operand(0));
1938 LiftoffRegister rhs,
1939 Label* trap_div_by_zero) {
1942 beq(trap_div_by_zero);
1943 ModU64(dst.gp(), lhs.gp(), rhs.gp());
1949 constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1951 Register scratch2 = temps.Acquire();
1954 AndP(
r0, Operand(~kF64SignBit));
1958 AndP(scratch2, Operand(kF64SignBit));
1966 constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1968 Register scratch2 = temps.Acquire();
1971 AndP(
r0, Operand(~kF64SignBit));
1975 AndP(scratch2, Operand(kF64SignBit));
1982 LiftoffRegister dst,
1983 LiftoffRegister src, Label*
trap) {
1985 case kExprI32ConvertI64:
1986 lgfr(dst.gp(), src.gp());
1988 case kExprI32SConvertF32: {
1994 case kExprI32UConvertF32: {
1999 case kExprI32SConvertF64: {
2004 case kExprI32UConvertF64: {
2009 case kExprI32SConvertSatF32: {
2010 Label done, src_is_nan;
2026 case kExprI32UConvertSatF32: {
2027 Label done, src_is_nan;
2042 case kExprI32SConvertSatF64: {
2043 Label done, src_is_nan;
2057 case kExprI32UConvertSatF64: {
2058 Label done, src_is_nan;
2072 case kExprI32ReinterpretF32:
2073 lgdr(dst.gp(), src.fp());
2074 srlg(dst.gp(), dst.gp(), Operand(32));
2076 case kExprI64SConvertI32:
2079 case kExprI64UConvertI32:
2080 llgfr(dst.gp(), src.gp());
2082 case kExprI64ReinterpretF64:
2083 lgdr(dst.gp(), src.fp());
2085 case kExprF32SConvertI32: {
2089 case kExprF32UConvertI32: {
2093 case kExprF32ConvertF64:
2094 ledbr(dst.fp(), src.fp());
2096 case kExprF32ReinterpretI32: {
2097 sllg(
r0, src.gp(), Operand(32));
2101 case kExprF64SConvertI32: {
2105 case kExprF64UConvertI32: {
2109 case kExprF64ConvertF32:
2110 ldebr(dst.fp(), src.fp());
2112 case kExprF64ReinterpretI64:
2113 ldgr(dst.fp(), src.gp());
2115 case kExprF64SConvertI64:
2118 case kExprF64UConvertI64:
2121 case kExprI64SConvertF32: {
2126 case kExprI64UConvertF32: {
2132 case kExprF32SConvertI64:
2135 case kExprF32UConvertI64:
2138 case kExprI64SConvertF64: {
2143 case kExprI64UConvertF64: {
2149 case kExprI64SConvertSatF32: {
2150 Label done, src_is_nan;
2165 case kExprI64UConvertSatF32: {
2166 Label done, src_is_nan;
2182 case kExprI64SConvertSatF64: {
2183 Label done, src_is_nan;
2197 case kExprI64UConvertSatF64: {
2198 Label done, src_is_nan;
2225 const FreezeCacheState& frozen) {
2240#if defined(V8_COMPRESS_POINTERS)
2274 Register lhs, int32_t imm,
2275 const FreezeCacheState& frozen) {
2278 CmpS32(lhs, Operand(imm));
2280 CmpU32(lhs, Operand(imm));
2286 Register lhs, int32_t imm,
2287 const FreezeCacheState& frozen) {
2290 CmpS64(lhs, Operand(imm));
2292 CmpU64(lhs, Operand(imm));
2297#define EMIT_EQZ(test, src) \
2301 mov(dst, Operand(1)); \
2303 mov(dst, Operand(0)); \
2311#define EMIT_SET_CONDITION(dst, cond) \
2314 lghi(dst, Operand(1)); \
2316 lghi(dst, Operand(0)); \
2321 Register lhs, Register rhs) {
2337 LiftoffRegister lhs,
2338 LiftoffRegister rhs) {
2341 CmpS64(lhs.gp(), rhs.gp());
2343 CmpU64(lhs.gp(), rhs.gp());
2369 mov(
r0, Operand(imm));
2374 LiftoffRegister true_value,
2375 LiftoffRegister false_value,
2382 const FreezeCacheState& frozen) {
2390#define SIMD_BINOP_RR_LIST(V) \
2391 V(f64x2_add, F64x2Add) \
2392 V(f64x2_sub, F64x2Sub) \
2393 V(f64x2_mul, F64x2Mul) \
2394 V(f64x2_div, F64x2Div) \
2395 V(f64x2_min, F64x2Min) \
2396 V(f64x2_max, F64x2Max) \
2397 V(f64x2_eq, F64x2Eq) \
2398 V(f64x2_ne, F64x2Ne) \
2399 V(f64x2_lt, F64x2Lt) \
2400 V(f64x2_le, F64x2Le) \
2401 V(f64x2_pmin, F64x2Pmin) \
2402 V(f64x2_pmax, F64x2Pmax) \
2403 V(f32x4_add, F32x4Add) \
2404 V(f32x4_sub, F32x4Sub) \
2405 V(f32x4_mul, F32x4Mul) \
2406 V(f32x4_div, F32x4Div) \
2407 V(f32x4_min, F32x4Min) \
2408 V(f32x4_max, F32x4Max) \
2409 V(f32x4_eq, F32x4Eq) \
2410 V(f32x4_ne, F32x4Ne) \
2411 V(f32x4_lt, F32x4Lt) \
2412 V(f32x4_le, F32x4Le) \
2413 V(f32x4_pmin, F32x4Pmin) \
2414 V(f32x4_pmax, F32x4Pmax) \
2415 V(i64x2_add, I64x2Add) \
2416 V(i64x2_sub, I64x2Sub) \
2417 V(i64x2_eq, I64x2Eq) \
2418 V(i64x2_ne, I64x2Ne) \
2419 V(i64x2_gt_s, I64x2GtS) \
2420 V(i64x2_ge_s, I64x2GeS) \
2421 V(i32x4_add, I32x4Add) \
2422 V(i32x4_sub, I32x4Sub) \
2423 V(i32x4_mul, I32x4Mul) \
2424 V(i32x4_eq, I32x4Eq) \
2425 V(i32x4_ne, I32x4Ne) \
2426 V(i32x4_gt_s, I32x4GtS) \
2427 V(i32x4_ge_s, I32x4GeS) \
2428 V(i32x4_gt_u, I32x4GtU) \
2429 V(i32x4_min_s, I32x4MinS) \
2430 V(i32x4_min_u, I32x4MinU) \
2431 V(i32x4_max_s, I32x4MaxS) \
2432 V(i32x4_max_u, I32x4MaxU) \
2433 V(i16x8_add, I16x8Add) \
2434 V(i16x8_sub, I16x8Sub) \
2435 V(i16x8_mul, I16x8Mul) \
2436 V(i16x8_eq, I16x8Eq) \
2437 V(i16x8_ne, I16x8Ne) \
2438 V(i16x8_gt_s, I16x8GtS) \
2439 V(i16x8_ge_s, I16x8GeS) \
2440 V(i16x8_gt_u, I16x8GtU) \
2441 V(i16x8_min_s, I16x8MinS) \
2442 V(i16x8_min_u, I16x8MinU) \
2443 V(i16x8_max_s, I16x8MaxS) \
2444 V(i16x8_max_u, I16x8MaxU) \
2445 V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
2446 V(i8x16_add, I8x16Add) \
2447 V(i8x16_sub, I8x16Sub) \
2448 V(i8x16_eq, I8x16Eq) \
2449 V(i8x16_ne, I8x16Ne) \
2450 V(i8x16_gt_s, I8x16GtS) \
2451 V(i8x16_ge_s, I8x16GeS) \
2452 V(i8x16_gt_u, I8x16GtU) \
2453 V(i8x16_min_s, I8x16MinS) \
2454 V(i8x16_min_u, I8x16MinU) \
2455 V(i8x16_max_s, I8x16MaxS) \
2456 V(i8x16_max_u, I8x16MaxU) \
2457 V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
2458 V(s128_and, S128And) \
2459 V(s128_or, S128Or) \
2460 V(s128_xor, S128Xor) \
2461 V(s128_and_not, S128AndNot)
2463#define EMIT_SIMD_BINOP_RR(name, op) \
2464 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2465 LiftoffRegister rhs) { \
2466 op(dst.fp(), lhs.fp(), rhs.fp()); \
2469#undef EMIT_SIMD_BINOP_RR
2470#undef SIMD_BINOP_RR_LIST
2472#define SIMD_SHIFT_RR_LIST(V) \
2473 V(i64x2_shl, I64x2Shl) \
2474 V(i64x2_shr_s, I64x2ShrS) \
2475 V(i64x2_shr_u, I64x2ShrU) \
2476 V(i32x4_shl, I32x4Shl) \
2477 V(i32x4_shr_s, I32x4ShrS) \
2478 V(i32x4_shr_u, I32x4ShrU) \
2479 V(i16x8_shl, I16x8Shl) \
2480 V(i16x8_shr_s, I16x8ShrS) \
2481 V(i16x8_shr_u, I16x8ShrU) \
2482 V(i8x16_shl, I8x16Shl) \
2483 V(i8x16_shr_s, I8x16ShrS) \
2484 V(i8x16_shr_u, I8x16ShrU)
2486#define EMIT_SIMD_SHIFT_RR(name, op) \
2487 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2488 LiftoffRegister rhs) { \
2489 op(dst.fp(), lhs.fp(), rhs.gp(), kScratchDoubleReg); \
2492#undef EMIT_SIMD_SHIFT_RR
2493#undef SIMD_SHIFT_RR_LIST
2495#define SIMD_SHIFT_RI_LIST(V) \
2496 V(i64x2_shli, I64x2Shl, 63) \
2497 V(i64x2_shri_s, I64x2ShrS, 63) \
2498 V(i64x2_shri_u, I64x2ShrU, 63) \
2499 V(i32x4_shli, I32x4Shl, 31) \
2500 V(i32x4_shri_s, I32x4ShrS, 31) \
2501 V(i32x4_shri_u, I32x4ShrU, 31) \
2502 V(i16x8_shli, I16x8Shl, 15) \
2503 V(i16x8_shri_s, I16x8ShrS, 15) \
2504 V(i16x8_shri_u, I16x8ShrU, 15) \
2505 V(i8x16_shli, I8x16Shl, 7) \
2506 V(i8x16_shri_s, I8x16ShrS, 7) \
2507 V(i8x16_shri_u, I8x16ShrU, 7)
2509#define EMIT_SIMD_SHIFT_RI(name, op, mask) \
2510 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2512 op(dst.fp(), lhs.fp(), Operand(rhs & mask), r0, kScratchDoubleReg); \
2515#undef EMIT_SIMD_SHIFT_RI
2516#undef SIMD_SHIFT_RI_LIST
2518#define SIMD_UNOP_LIST(V) \
2519 V(f64x2_splat, F64x2Splat, fp, fp, , void) \
2520 V(f64x2_abs, F64x2Abs, fp, fp, , void) \
2521 V(f64x2_neg, F64x2Neg, fp, fp, , void) \
2522 V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void) \
2523 V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool) \
2524 V(f64x2_floor, F64x2Floor, fp, fp, true, bool) \
2525 V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool) \
2526 V(f64x2_nearest_int, F64x2NearestInt, fp, fp, true, bool) \
2527 V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, fp, fp, , void) \
2528 V(f64x2_convert_low_i32x4_u, F64x2ConvertLowI32x4U, fp, fp, , void) \
2529 V(f32x4_abs, F32x4Abs, fp, fp, , void) \
2530 V(f32x4_splat, F32x4Splat, fp, fp, , void) \
2531 V(f32x4_neg, F32x4Neg, fp, fp, , void) \
2532 V(f32x4_sqrt, F32x4Sqrt, fp, fp, , void) \
2533 V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \
2534 V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \
2535 V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool) \
2536 V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool) \
2537 V(i64x2_abs, I64x2Abs, fp, fp, , void) \
2538 V(i64x2_splat, I64x2Splat, fp, gp, , void) \
2539 V(i64x2_neg, I64x2Neg, fp, fp, , void) \
2540 V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, fp, fp, , void) \
2541 V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, fp, fp, , void) \
2542 V(i64x2_uconvert_i32x4_low, I64x2UConvertI32x4Low, fp, fp, , void) \
2543 V(i64x2_uconvert_i32x4_high, I64x2UConvertI32x4High, fp, fp, , void) \
2544 V(i32x4_abs, I32x4Abs, fp, fp, , void) \
2545 V(i32x4_neg, I32x4Neg, fp, fp, , void) \
2546 V(i32x4_splat, I32x4Splat, fp, gp, , void) \
2547 V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, fp, fp, , void) \
2548 V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, fp, fp, , void) \
2549 V(i32x4_uconvert_i16x8_low, I32x4UConvertI16x8Low, fp, fp, , void) \
2550 V(i32x4_uconvert_i16x8_high, I32x4UConvertI16x8High, fp, fp, , void) \
2551 V(i16x8_abs, I16x8Abs, fp, fp, , void) \
2552 V(i16x8_neg, I16x8Neg, fp, fp, , void) \
2553 V(i16x8_splat, I16x8Splat, fp, gp, , void) \
2554 V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, fp, fp, , void) \
2555 V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, fp, fp, , void) \
2556 V(i16x8_uconvert_i8x16_low, I16x8UConvertI8x16Low, fp, fp, , void) \
2557 V(i16x8_uconvert_i8x16_high, I16x8UConvertI8x16High, fp, fp, , void) \
2558 V(i8x16_abs, I8x16Abs, fp, fp, , void) \
2559 V(i8x16_neg, I8x16Neg, fp, fp, , void) \
2560 V(i8x16_splat, I8x16Splat, fp, gp, , void) \
2561 V(i8x16_popcnt, I8x16Popcnt, fp, fp, , void) \
2562 V(s128_not, S128Not, fp, fp, , void)
2564#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
2565 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2566 LiftoffRegister src) { \
2567 op(dst.dtype(), src.stype()); \
2568 return return_val; \
2571#undef EMIT_SIMD_UNOP
2572#undef SIMD_UNOP_LIST
2574#define SIMD_EXTRACT_LANE_LIST(V) \
2575 V(f64x2_extract_lane, F64x2ExtractLane, fp) \
2576 V(f32x4_extract_lane, F32x4ExtractLane, fp) \
2577 V(i64x2_extract_lane, I64x2ExtractLane, gp) \
2578 V(i32x4_extract_lane, I32x4ExtractLane, gp) \
2579 V(i16x8_extract_lane_u, I16x8ExtractLaneU, gp) \
2580 V(i16x8_extract_lane_s, I16x8ExtractLaneS, gp) \
2581 V(i8x16_extract_lane_u, I8x16ExtractLaneU, gp) \
2582 V(i8x16_extract_lane_s, I8x16ExtractLaneS, gp)
2584#define EMIT_SIMD_EXTRACT_LANE(name, op, dtype) \
2585 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
2586 uint8_t imm_lane_idx) { \
2587 op(dst.dtype(), src.fp(), imm_lane_idx, r0); \
2590#undef EMIT_SIMD_EXTRACT_LANE
2591#undef SIMD_EXTRACT_LANE_LIST
2593#define SIMD_REPLACE_LANE_LIST(V) \
2594 V(f64x2_replace_lane, F64x2ReplaceLane, fp) \
2595 V(f32x4_replace_lane, F32x4ReplaceLane, fp) \
2596 V(i64x2_replace_lane, I64x2ReplaceLane, gp) \
2597 V(i32x4_replace_lane, I32x4ReplaceLane, gp) \
2598 V(i16x8_replace_lane, I16x8ReplaceLane, gp) \
2599 V(i8x16_replace_lane, I8x16ReplaceLane, gp)
2601#define EMIT_SIMD_REPLACE_LANE(name, op, stype) \
2602 void LiftoffAssembler::emit_##name( \
2603 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2604 uint8_t imm_lane_idx) { \
2605 op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx, r0); \
2608#undef EMIT_SIMD_REPLACE_LANE
2609#undef SIMD_REPLACE_LANE_LIST
2611#define SIMD_EXT_MUL_LIST(V) \
2612 V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
2613 V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
2614 V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
2615 V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
2616 V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
2617 V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
2618 V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
2619 V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
2620 V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
2621 V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
2622 V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
2623 V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U)
2625#define EMIT_SIMD_EXT_MUL(name, op) \
2626 void LiftoffAssembler::emit_##name( \
2627 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2628 op(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg); \
2631#undef EMIT_SIMD_EXT_MUL
2632#undef SIMD_EXT_MUL_LIST
2634#define SIMD_ALL_TRUE_LIST(V) \
2635 V(i64x2_alltrue, I64x2AllTrue) \
2636 V(i32x4_alltrue, I32x4AllTrue) \
2637 V(i16x8_alltrue, I16x8AllTrue) \
2638 V(i8x16_alltrue, I8x16AllTrue)
2640#define EMIT_SIMD_ALL_TRUE(name, op) \
2641 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2642 LiftoffRegister src) { \
2643 op(dst.gp(), src.fp(), r0, kScratchDoubleReg); \
2646#undef EMIT_SIMD_ALL_TRUE
2647#undef SIMD_ALL_TRUE_LIST
2649#define SIMD_ADD_SUB_SAT_LIST(V) \
2650 V(i16x8_add_sat_s, I16x8AddSatS) \
2651 V(i16x8_sub_sat_s, I16x8SubSatS) \
2652 V(i16x8_add_sat_u, I16x8AddSatU) \
2653 V(i16x8_sub_sat_u, I16x8SubSatU) \
2654 V(i8x16_add_sat_s, I8x16AddSatS) \
2655 V(i8x16_sub_sat_s, I8x16SubSatS) \
2656 V(i8x16_add_sat_u, I8x16AddSatU) \
2657 V(i8x16_sub_sat_u, I8x16SubSatU)
2659#define EMIT_SIMD_ADD_SUB_SAT(name, op) \
2660 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2661 LiftoffRegister rhs) { \
2662 Simd128Register src1 = lhs.fp(); \
2663 Simd128Register src2 = rhs.fp(); \
2664 Simd128Register dest = dst.fp(); \
2668 if (dest == src1 || dest == src2) { \
2669 dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1, src2}).fp(); \
2671 Simd128Register temp = \
2672 GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp(); \
2673 op(dest, src1, src2, kScratchDoubleReg, temp); \
2675 if (dest != dst.fp()) { \
2676 vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
2680#undef EMIT_SIMD_ADD_SUB_SAT
2681#undef SIMD_ADD_SUB_SAT_LIST
2683#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2684 V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
2685 V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
2686 V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
2687 V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
2689#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
2690 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2691 LiftoffRegister src) { \
2692 Simd128Register src1 = src.fp(); \
2693 Simd128Register dest = dst.fp(); \
2695 if (dest == src1) { \
2696 dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1}).fp(); \
2698 Simd128Register temp = \
2699 GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1}).fp(); \
2700 op(dest, src1, kScratchDoubleReg, temp); \
2701 if (dest != dst.fp()) { \
2702 vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
2706#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2707#undef SIMD_EXT_ADD_PAIRWISE_LIST
2709#define SIMD_QFM_LIST(V) \
2710 V(f64x2_qfma, F64x2Qfma) \
2711 V(f64x2_qfms, F64x2Qfms) \
2712 V(f32x4_qfma, F32x4Qfma) \
2713 V(f32x4_qfms, F32x4Qfms)
2715#define EMIT_SIMD_QFM(name, op) \
2716 void LiftoffAssembler::emit_##name( \
2717 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2718 LiftoffRegister src3) { \
2719 op(dst.fp(), src1.fp(), src2.fp(), src3.fp()); \
2725#define SIMD_RELAXED_BINOP_LIST(V) \
2726 V(i8x16_relaxed_swizzle, i8x16_swizzle) \
2727 V(f64x2_relaxed_min, f64x2_pmin) \
2728 V(f64x2_relaxed_max, f64x2_pmax) \
2729 V(f32x4_relaxed_min, f32x4_pmin) \
2730 V(f32x4_relaxed_max, f32x4_pmax) \
2731 V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
2733#define SIMD_VISIT_RELAXED_BINOP(name, op) \
2734 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2735 LiftoffRegister rhs) { \
2736 emit_##op(dst, lhs, rhs); \
2739#undef SIMD_VISIT_RELAXED_BINOP
2740#undef SIMD_RELAXED_BINOP_LIST
2742#define SIMD_RELAXED_UNOP_LIST(V) \
2743 V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
2744 V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
2745 V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
2746 V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
2748#define SIMD_VISIT_RELAXED_UNOP(name, op) \
2749 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2750 LiftoffRegister src) { \
2751 emit_##op(dst, src); \
2754#undef SIMD_VISIT_RELAXED_UNOP
2755#undef SIMD_RELAXED_UNOP_LIST
2757#define F16_UNOP_LIST(V) \
2765 V(f16x8_nearest_int) \
2766 V(i16x8_sconvert_f16x8) \
2767 V(i16x8_uconvert_f16x8) \
2768 V(f16x8_sconvert_i16x8) \
2769 V(f16x8_uconvert_i16x8) \
2770 V(f16x8_demote_f32x4_zero) \
2771 V(f32x4_promote_low_f16x8) \
2772 V(f16x8_demote_f64x2_zero)
2774#define VISIT_F16_UNOP(name) \
2775 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2776 LiftoffRegister src) { \
2780#undef VISIT_F16_UNOP
2783#define F16_BINOP_LIST(V) \
2797#define VISIT_F16_BINOP(name) \
2798 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2799 LiftoffRegister rhs) { \
2803#undef VISIT_F16_BINOP
2804#undef F16_BINOP_LIST
2809 LiftoffRegister lhs,
2810 uint8_t imm_lane_idx) {
2815 LiftoffRegister src1,
2816 LiftoffRegister src2,
2817 uint8_t imm_lane_idx) {
2822 LiftoffRegister src1,
2823 LiftoffRegister src2,
2824 LiftoffRegister src3) {
2829 LiftoffRegister src1,
2830 LiftoffRegister src2,
2831 LiftoffRegister src3) {
2836 Register offset_reg, uintptr_t offset_imm,
2839 uint32_t* protected_load_pc,
2841 if (!is_int20(offset_imm)) {
2842 mov(ip, Operand(offset_imm));
2843 if (offset_reg !=
no_reg) {
2852 MachineType memtype = type.mem_type();
2889 Register addr, Register offset_reg,
2890 uintptr_t offset_imm, LoadType type,
2891 uint8_t laneidx, uint32_t* protected_load_pc,
2897 MachineType mem_type = type.mem_type();
2902 if (protected_load_pc) *protected_load_pc =
pc_offset();
2916 uintptr_t offset_imm, LiftoffRegister src,
2917 StoreType type, uint8_t lane,
2918 uint32_t* protected_store_pc,
2924 if (protected_store_pc) *protected_store_pc =
pc_offset();
2940 LiftoffRegister rhs) {
2941 I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(),
r0, r1, ip);
2945 LiftoffRegister rhs) {
2950 LiftoffRegister rhs) {
2955 LiftoffRegister rhs) {
2960 LiftoffRegister lhs,
2961 LiftoffRegister rhs) {
2969 LiftoffRegister src) {
2974 LiftoffRegister src) {
2979 LiftoffRegister src) {
2984 LiftoffRegister lhs,
2985 LiftoffRegister rhs) {
2990 LiftoffRegister src) {
2995 LiftoffRegister src1,
2996 LiftoffRegister src2) {
3009 LiftoffRegister lhs,
3010 LiftoffRegister rhs) {
3015 LiftoffRegister lhs,
3016 LiftoffRegister rhs,
3017 LiftoffRegister acc) {
3026 LiftoffRegister lhs,
3027 LiftoffRegister rhs,
3028 const uint8_t shuffle[16],
3037 uint8_t current_index = shuffle[
i];
3038 shuffle_remapped[
i] = (current_index <= max_index
3039 ? max_index - current_index
3040 : total_lane_count - current_index + max_index);
3043 memcpy(vals, shuffle_remapped,
sizeof(shuffle_remapped));
3044#ifdef V8_TARGET_BIG_ENDIAN
3048 I8x16Shuffle(dst.fp(), lhs.fp(), rhs.fp(), vals[1], vals[0],
r0, ip,
3053 LiftoffRegister src) {
3058 LiftoffRegister src) {
3063 const uint8_t imms[16]) {
3065 memcpy(vals, imms,
sizeof(vals));
3066#ifdef V8_TARGET_BIG_ENDIAN
3074 LiftoffRegister src1,
3075 LiftoffRegister src2,
3076 LiftoffRegister
mask) {
3081 LiftoffRegister src) {
3086 LiftoffRegister src) {
3091 LiftoffRegister src) {
3096 LiftoffRegister src) {
3101 LiftoffRegister src) {
3106 LiftoffRegister lhs,
3107 LiftoffRegister rhs) {
3112 LiftoffRegister lhs,
3113 LiftoffRegister rhs) {
3118 LiftoffRegister lhs,
3119 LiftoffRegister rhs) {
3124 LiftoffRegister lhs,
3125 LiftoffRegister rhs) {
3130 LiftoffRegister src) {
3135 LiftoffRegister src) {
3140 LiftoffRegister src1,
3141 LiftoffRegister src2,
3142 LiftoffRegister
mask,
3171 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
3172 LiftoffRegList ref_spills,
int spill_offset) {
3176 while (!gp_spills.is_empty()) {
3177 LiftoffRegister
reg = gp_spills.GetLastRegSet();
3178 if (ref_spills.has(
reg)) {
3179 safepoint.DefineTaggedStackSlot(spill_offset);
3181 gp_spills.clear(
reg);
3190 Drop(num_stack_slots);
3195 const std::initializer_list<VarState>
args,
const LiftoffRegister* rets,
3197 ExternalReference ext_ref) {
3198 int size =
RoundUp(stack_bytes, 8);
3214 constexpr int kNumCCallArgs = 1;
3219 const LiftoffRegister* result_reg = rets;
3220 if (return_kind != kVoid) {
3221 constexpr Register kReturnReg = r2;
3222 if (kReturnReg != rets->gp()) {
3223 Move(*rets, LiftoffRegister(kReturnReg), return_kind);
3229 if (out_argument_kind != kVoid) {
3230 switch (out_argument_kind) {
3259 ExternalReference ext_ref) {
3261 int num_args =
static_cast<int>(
args.size());
3268 ParallelMove parallel_move{
this};
3271 parallel_move.LoadIntoRegister(LiftoffRegister{
kCArgRegs[reg_args]}, arg);
3285 parallel_move.Execute();
3300 compiler::CallDescriptor* call_descriptor,
3303 CallWasmCodePointer(target);
3307 compiler::CallDescriptor* call_descriptor, Register target) {
3332 Label return_nan, done;
3348 LiftoffRegister src,
3350 LiftoffRegister tmp_s128,
3352 Label return_nan, done;
3353 if (lane_kind ==
kF32) {
3365 mov(
r0, Operand(1));
3377 int last_stack_slot = param_slots;
3378 for (
auto& slot :
slots_) {
3379 const int stack_slot = slot.dst_slot_;
3382 last_stack_slot = stack_slot;
3384 switch (src.loc()) {
3386 switch (src.kind()) {
3392 UseScratchRegisterScope temps(
asm_);
3393 Register scratch = temps.Acquire();
3415 UseScratchRegisterScope temps(
asm_);
3416 Register scratch = temps.Acquire();
3431 switch (src.kind()) {
3446 UseScratchRegisterScope temps(
asm_);
3447 Register scratch = temps.Acquire();
3460 UseScratchRegisterScope temps(
asm_);
3461 Register scratch = temps.Acquire();
3463 switch (src.kind()) {
3465 asm_->
mov(scratch, Operand(src.i32_const()));
3468 asm_->
mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
void lzer(DoubleRegister r1)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bne(Register rj, Register rd, int32_t offset)
void branchOnCond(Condition c, int branch_offset, bool is_bound=false, bool force_long_branch=false)
void lzdr(DoubleRegister r1)
void blt(Register rj, Register rd, int32_t offset)
friend class UseScratchRegisterScope
static constexpr int kGap
uint64_t jump_offset(Label *L)
void bge(Register rj, Register rd, int32_t offset)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void beq(Register rj, Register rd, int32_t offset)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void StoreV128(Simd128Register src, const MemOperand &mem, Register scratch)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void AtomicExchangeU8(Register addr, Register value, Register output, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void Drop(int count, Condition cond=al)
void XorP(Register dst, Register src)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
void mov(Register rd, Register rj)
void ModS32(Register dst, Register src, Register value)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void ConvertFloat32ToUnsignedInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I32x4BitMask(Register dst, VRegister src)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void JumpIfSmi(Register value, Label *smi_label)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPush(RegList regs)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void ConvertFloat32ToUnsignedInt64(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreV128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Register scratch2)
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand &opnd)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void ModU32(Register dst, Register src, Register value)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void PushCommonFrame(Register marker_reg=no_reg)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void MultiPop(RegList regs)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void AddU32(Register dst, Register src1, Register src2)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128LE(DoubleRegister dst, const MemOperand &mem, Register scratch0, Register scratch1)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void I16x8BitMask(Register dst, VRegister src)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void I64x2BitMask(Register dst, QwNeonRegister src)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void OrP(Register dst, Register src)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void AllocateStackSpace(Register bytes)
void MoveChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void AddU64(Register dst, const Operand &imm)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void CmpAndSwap(Register old_val, Register new_val, const MemOperand &opnd)
void MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void AtomicExchangeU16(Register addr, Register value, Register output, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128(Simd128Register dst, const MemOperand &mem, Register scratch)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void AndP(Register dst, Register src)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
static constexpr int kInstanceDataOffset
static constexpr int kFeedbackVectorOffset
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_store_nonzero(Register dst)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void TailCallNativeWasmCode(Address addr)
void SpillInstanceData(Register instance)
void RecordOolSpillSpaceSize(int size)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void Fill(LiftoffRegister, int offset, ValueKind)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void DeallocateStackSlot(uint32_t size)
void StackCheck(Label *ool_code)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void CallBuiltin(Builtin builtin)
void CallFrameSetupStub(int declared_function_index)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Spill(VarState *slot)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void AssertUnreachable(AbortReason reason)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void LoadConstant(LiftoffRegister, WasmValue)
int GetTotalFrameSize() const
void CallNativeWasmCode(Address addr)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
static bool NeedsAlignment(ValueKind kind)
static int SlotSizeForType(ValueKind kind)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
bool supports_f16_mem_access()
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void LoadInstanceDataFromFrame(Register dst)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
static constexpr int kStackSlotSize
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
CacheState * cache_state()
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void AllocateStackSlot(Register addr, uint32_t size)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void PushRegisters(LiftoffRegList)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void bailout(LiftoffBailoutReason reason, const char *detail)
void IncrementSmi(LiftoffRegister dst, int offset)
void PopRegisters(LiftoffRegList)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void FillStackSlotsWithZero(int start, int size)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
Register LoadOldFramePointer()
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void clear_i32_upper_half(Register dst)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
static constexpr int StaticStackFrameSize()
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
void Construct(int param_slots)
LiftoffAssembler *const asm_
static constexpr int ToTagged(int offset)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define SIMD_EXTRACT_LANE_LIST(V)
#define EMIT_SIMD_REPLACE_LANE(name, stype)
#define EMIT_SIMD_EXT_MUL(name)
#define EMIT_SIMD_ADD_SUB_SAT(name)
#define SIMD_REPLACE_LANE_LIST(V)
#define EMIT_SIMD_EXTRACT_LANE(name, dtype)
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, ret, return_type)
#define SIMD_VISIT_RELAXED_BINOP(name, op)
#define SIMD_RELAXED_BINOP_LIST(V)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast, ret, return_type)
#define SIMD_RELAXED_UNOP_LIST(V)
#define EMIT_SIMD_SHIFT_RR(name, op)
#define SIMD_SHIFT_RI_LIST(V)
#define SIMD_VISIT_RELAXED_UNOP(name, op)
#define VISIT_F16_BINOP(name)
#define F16_BINOP_LIST(V)
#define VISIT_F16_UNOP(name)
#define EMIT_SIMD_SHIFT_RI(name, op, mask)
#define SIMD_SHIFT_RR_LIST(V)
#define EMIT_SIMD_BINOP_RR(name, op)
#define EMIT_SET_CONDITION(dst, cond)
#define PREP_MEM_OPERAND(offset_reg, offset_imm, scratch)
#define SIMD_BINOP_RR_LIST(V)
#define EMIT_EQZ(test, src)
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
#define SIMD_ADD_SUB_SAT_LIST(V)
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
MemOperand GetStackSlot(int offset)
MemOperand GetInstanceDataOperand()
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr int kSimd128Size
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
const int kStackFrameExtraParamSlot
kWasmInternalFunctionIndirectPointerTag instance_data
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
QwNeonRegister Simd128Register
constexpr int kSystemPointerSize
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr VFPRoundingMode kRoundToZero
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
constexpr int kDoubleSize
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
static V ByteReverse(V value)
#define DCHECK_LE(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
Register cached_instance_data
LiftoffRegList used_registers
#define V8_LIKELY(condition)