5#ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_INL_H_
6#define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_INL_H_
80 if ((list.
Count() & 1) != 0) list.
set(fp_scratch);
108 bool i64_offset =
false,
unsigned shift_amount = 0) {
113 assm->
Add(effective_addr, addr.
X(), offset_imm);
127 uintptr_t offset_imm,
128 bool i64_offset =
false) {
129 if (!
offset.is_valid() && offset_imm == 0)
return addr;
135 if (offset_imm != 0) assm->
Add(tmp, addr, offset_imm);
143template <ShiftDirection dir, ShiftSign sign = ShiftSign::kSigned>
155 assm->
Dup(tmp, shift);
162 assm->Sshl(dst, lhs, tmp);
164 assm->Ushl(dst, lhs, tmp);
168template <VectorFormat format, ShiftSign sign>
173 int32_t shift = rhs &
mask;
182 assm->Sshr(dst, lhs, rhs &
mask);
184 assm->Ushr(dst, lhs, rhs &
mask);
193 assm->Umaxp(temp, src.fp().V4S(), src.fp().V4S());
195 assm->
Cmp(dst.
gp().
X(), 0);
204 assm->
Umov(dst.
gp().
W(), temp, 0);
205 assm->
Cmp(dst.
gp().
W(), 0);
215 if (src.is_const()) {
216 if (src.kind() ==
kI32) {
217 if (src.i32_const() == 0)
return wzr;
219 assm->
Mov(temp, src.i32_const());
223 if (src.i32_const() == 0)
return xzr;
225 assm->
Mov(temp,
static_cast<int64_t
>(src.i32_const()));
236 if (src.kind() ==
kI16) {
238 assm->Strh(src.reg().gp(), dst);
243 assm->Str(src_reg, dst);
265 kLiftoffFrameSetupFunctionReg) ==
273 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
279 int stack_param_delta) {
281 temps.Exclude(x16, x17);
290#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
300 Register scratch = temps.AcquireX();
305 Sub(x16, x16, stack_param_delta * 8);
306 int slot_count = num_callee_stack_params;
307 for (
int i = slot_count - 1;
i >= 0; --
i) {
323 "kStackSlotSize must equal kXRegSize");
330 frame_size += padding;
336 int offset, SafepointTableBuilder* safepoint_table_builder,
337 bool feedback_vector_slot,
size_t stack_param_slots) {
344 if (feedback_vector_slot) {
352 PatchingAssembler patching_assembler(
zone(), AssemblerOptions{},
359 patching_assembler.PatchSubSp(frame_size);
385 if (frame_size <
v8_flags.stack_size * 1024) {
387 Register stack_limit = temps.AcquireX();
389 Add(stack_limit, stack_limit, Operand(frame_size));
390 Cmp(
sp, stack_limit);
394 if (
v8_flags.experimental_wasm_growable_stacks) {
397 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
402 Add(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
406 safepoint_table_builder->DefineSafepoint(
this);
409 Call(
static_cast<Address>(Builtin::kWasmStackOverflow),
412 safepoint_table_builder->DefineSafepoint(
this);
420 Claim(frame_size, 1);
455 const FreezeCacheState& frozen) {
457 Register budget_array = temps.AcquireX();
466 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
469 int budget_arr_offset =
kInt32Size * declared_func_index;
474 Add(budget_array, budget_array, budget_arr_offset);
475 budget_arr_offset = 0;
479 MemOperand budget_addr{budget_array, budget_arr_offset};
480 ldr(budget, budget_addr);
482 if (budget_used > 0xFFF000) {
483 budget_used = 0xFFF000;
484 }
else if (budget_used > 0xFFF) {
485 budget_used &= 0xFFF000;
489 str(budget, budget_addr);
494 if (!
v8_flags.experimental_wasm_growable_stacks) {
498 Label done, call_runtime;
502 B(
eq, &call_runtime);
503 Mov(old_fp.gp(), fp);
523 Register scratch = temps.AcquireX();
538 if (
options().enable_simulator_code) {
550 switch (value.type().kind()) {
552 Mov(
reg.gp().W(), value.to_i32());
555 Mov(
reg.gp().X(), value.to_i64());
558 Fmov(
reg.fp().S(), value.to_f32());
561 Fmov(
reg.fp().D(), value.to_f64());
627template <LoadOrStore kLoadOrStore, u
int8_t kExtraEmittedInstructions = 0>
631 uint32_t* protected_instruction_pc)
671 uint32_t* protected_load_pc,
676 offset_imm,
false, shift_amount);
680 collect_protected_load(
this, protected_load_pc);
685 int32_t offset_imm) {
690 int32_t offset_imm) {
694 Ldr(dst.X(), src_op);
697#ifdef V8_ENABLE_SANDBOX
698void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
700 int32_t offset_imm) {
704 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op,
711 int32_t offset_imm, Register src,
713 uint32_t* protected_store_pc,
714 SkipWriteBarrier skip_write_barrier) {
716 Operand offset_op = offset_reg.is_valid() ? Operand(offset_reg.W(),
UXTW)
717 : Operand(offset_imm);
722 if (offset_reg.is_valid() && offset_imm) {
723 Register effective_offset = temps.AcquireX();
724 Add(effective_offset.W(), offset_reg.W(), offset_imm);
725 offset_op = effective_offset;
728 GetProtectedInstruction<LoadOrStore::kStore> collect_protected_store(
729 this, protected_store_pc);
730 StoreTaggedField(src, MemOperand(dst_addr.X(), offset_op));
733 if (skip_write_barrier ||
v8_flags.disable_write_barriers)
return;
742 StubCallMode::kCallWasmRuntimeStub);
747 Register offset_reg, uintptr_t offset_imm,
748 LoadType type, uint32_t* protected_load_pc,
749 bool ,
bool i64_offset,
752 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
754 offset_imm, i64_offset, shift_amount);
755 DCHECK(!src_op.IsPostIndex());
756 GetProtectedInstruction<LoadOrStore::kLoad> collect_protected_load(
757 this, protected_load_pc);
758 switch (type.value()) {
759 case LoadType::kI32Load8U:
760 case LoadType::kI64Load8U:
761 Ldrb(dst.gp().W(), src_op);
763 case LoadType::kI32Load8S:
764 Ldrsb(dst.gp().W(), src_op);
766 case LoadType::kI64Load8S:
767 Ldrsb(dst.gp().X(), src_op);
769 case LoadType::kI32Load16U:
770 case LoadType::kI64Load16U:
771 Ldrh(dst.gp().W(), src_op);
773 case LoadType::kI32Load16S:
774 Ldrsh(dst.gp().W(), src_op);
776 case LoadType::kI64Load16S:
777 Ldrsh(dst.gp().X(), src_op);
779 case LoadType::kI32Load:
780 case LoadType::kI64Load32U:
781 Ldr(dst.gp().W(), src_op);
783 case LoadType::kI64Load32S:
784 Ldrsw(dst.gp().X(), src_op);
786 case LoadType::kI64Load:
787 Ldr(dst.gp().X(), src_op);
789 case LoadType::kF32Load:
790 Ldr(dst.fp().S(), src_op);
792 case LoadType::kF32LoadF16: {
793 CpuFeatureScope scope(
this, FP16);
794 Ldr(dst.fp().H(), src_op);
795 Fcvt(dst.fp().S(), dst.fp().H());
798 case LoadType::kF64Load:
799 Ldr(dst.fp().D(), src_op);
801 case LoadType::kS128Load:
802 Ldr(dst.fp().Q(), src_op);
808 uintptr_t offset_imm, LiftoffRegister src,
809 StoreType type, LiftoffRegList ,
810 uint32_t* protected_store_pc,
811 bool ,
bool i64_offset) {
814 offset_imm, i64_offset);
815 DCHECK(!dst_op.IsPostIndex());
816 GetProtectedInstruction<LoadOrStore::kStore> collect_protected_store(
817 this, protected_store_pc);
818 switch (type.value()) {
819 case StoreType::kI32Store8:
820 case StoreType::kI64Store8:
821 Strb(src.gp().W(), dst_op);
823 case StoreType::kI32Store16:
824 case StoreType::kI64Store16:
825 Strh(src.gp().W(), dst_op);
827 case StoreType::kI32Store:
828 case StoreType::kI64Store32:
829 Str(src.gp().W(), dst_op);
831 case StoreType::kI64Store:
832 Str(src.gp().X(), dst_op);
834 case StoreType::kF32StoreF16: {
835 CpuFeatureScope scope(
this, FP16);
836 Fcvt(src.fp().H(), src.fp().S());
837 Str(src.fp().H(), dst_op);
840 case StoreType::kF32Store:
841 Str(src.fp().S(), dst_op);
843 case StoreType::kF64Store:
844 Str(src.fp().D(), dst_op);
846 case StoreType::kS128Store:
847 Str(src.fp().Q(), dst_op);
858 uintptr_t offset_imm) {
860 if (offset_reg ==
no_reg && offset_imm == 0)
return addr_reg;
862 if (offset_reg ==
no_reg) {
874 Register offset_reg, uintptr_t offset_imm,
878 if (offset_reg !=
no_reg) pinned.
set(offset_reg);
879 Register store_result = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
883 result.gp() != offset_reg);
887 lasm, temps, dst_addr, offset_reg, offset_imm);
893 switch (type.value()) {
894 case StoreType::kI64Store8:
895 case StoreType::kI32Store8: {
898 __ mvn(temp, value.gp().
W());
902 case StoreType::kI64Store16:
903 case StoreType::kI32Store16: {
906 __ mvn(temp, value.gp().
W());
910 case StoreType::kI64Store32:
911 case StoreType::kI32Store: {
914 __ mvn(temp, value.gp().
W());
918 case StoreType::kI64Store: {
921 __ mvn(temp, value.gp());
930 switch (type.value()) {
931 case StoreType::kI64Store8:
932 case StoreType::kI32Store8: {
935 __ neg(temp, value.gp().
W());
939 case StoreType::kI64Store16:
940 case StoreType::kI32Store16: {
943 __ neg(temp, value.gp().
W());
947 case StoreType::kI64Store32:
948 case StoreType::kI32Store: {
951 __ neg(temp, value.gp().
W());
955 case StoreType::kI64Store: {
958 __ neg(temp, value.gp());
966#define ATOMIC_BINOP_CASE(op, instr) \
968 switch (type.value()) { \
969 case StoreType::kI64Store8: \
970 case StoreType::kI32Store8: \
971 __ instr##b(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
973 case StoreType::kI64Store16: \
974 case StoreType::kI32Store16: \
975 __ instr##h(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
977 case StoreType::kI64Store32: \
978 case StoreType::kI32Store: \
979 __ instr(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
981 case StoreType::kI64Store: \
982 __ instr(value.gp(), result.gp(), MemOperand(actual_addr)); \
992#undef ATOMIC_BINOP_CASE
1002 switch (type.value()) {
1003 case StoreType::kI64Store8:
1004 case StoreType::kI32Store8:
1005 __ ldaxrb(
result.gp().W(), actual_addr);
1007 case StoreType::kI64Store16:
1008 case StoreType::kI32Store16:
1009 __ ldaxrh(
result.gp().W(), actual_addr);
1011 case StoreType::kI64Store32:
1012 case StoreType::kI32Store:
1013 __ ldaxr(
result.gp().W(), actual_addr);
1015 case StoreType::kI64Store:
1016 __ ldaxr(
result.gp().X(), actual_addr);
1024 __ add(temp,
result.gp(), value.gp());
1027 __ sub(temp,
result.gp(), value.gp());
1030 __ and_(temp,
result.gp(), value.gp());
1033 __ orr(temp,
result.gp(), value.gp());
1036 __ eor(temp,
result.gp(), value.gp());
1039 __ mov(temp, value.gp());
1043 switch (type.value()) {
1044 case StoreType::kI64Store8:
1045 case StoreType::kI32Store8:
1046 __ stlxrb(store_result.
W(), temp.
W(), actual_addr);
1048 case StoreType::kI64Store16:
1049 case StoreType::kI32Store16:
1050 __ stlxrh(store_result.
W(), temp.
W(), actual_addr);
1052 case StoreType::kI64Store32:
1053 case StoreType::kI32Store:
1054 __ stlxr(store_result.
W(), temp.
W(), actual_addr);
1056 case StoreType::kI64Store:
1057 __ stlxr(store_result.
W(), temp.
X(), actual_addr);
1063 __ Cbnz(store_result.
W(), &retry);
1071 Register offset_reg, uintptr_t offset_imm,
1072 LoadType type, LiftoffRegList ,
1076 offset_reg, offset_imm);
1077 switch (type.value()) {
1078 case LoadType::kI32Load8U:
1079 case LoadType::kI64Load8U:
1080 Ldarb(dst.gp().W(), src_reg);
1082 case LoadType::kI32Load16U:
1083 case LoadType::kI64Load16U:
1084 Ldarh(dst.gp().W(), src_reg);
1086 case LoadType::kI32Load:
1087 case LoadType::kI64Load32U:
1088 Ldar(dst.gp().W(), src_reg);
1090 case LoadType::kI64Load:
1091 Ldar(dst.gp().X(), src_reg);
1099 uintptr_t offset_imm, LiftoffRegister src,
1100 StoreType type, LiftoffRegList ,
1104 offset_reg, offset_imm);
1105 switch (type.value()) {
1106 case StoreType::kI64Store8:
1107 case StoreType::kI32Store8:
1108 Stlrb(src.gp().W(), dst_reg);
1110 case StoreType::kI64Store16:
1111 case StoreType::kI32Store16:
1112 Stlrh(src.gp().W(), dst_reg);
1114 case StoreType::kI64Store32:
1115 case StoreType::kI32Store:
1116 Stlr(src.gp().W(), dst_reg);
1118 case StoreType::kI64Store:
1119 Stlr(src.gp().X(), dst_reg);
1127 uintptr_t offset_imm, LiftoffRegister value,
1128 LiftoffRegister
result, StoreType type,
1135 uintptr_t offset_imm, LiftoffRegister value,
1136 LiftoffRegister
result, StoreType type,
1143 uintptr_t offset_imm, LiftoffRegister value,
1144 LiftoffRegister
result, StoreType type,
1151 uintptr_t offset_imm, LiftoffRegister value,
1152 LiftoffRegister
result, StoreType type,
1159 uintptr_t offset_imm, LiftoffRegister value,
1160 LiftoffRegister
result, StoreType type,
1167 uintptr_t offset_imm,
1168 LiftoffRegister value,
1169 LiftoffRegister
result, StoreType type,
1176 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
1177 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister
result,
1178 StoreType type,
bool ) {
1179 LiftoffRegList pinned{dst_addr, expected, new_value};
1180 if (offset_reg !=
no_reg) pinned.set(offset_reg);
1183 if (pinned.has(
result)) {
1190 this, temps, dst_addr, offset_reg, offset_imm);
1193 CpuFeatureScope scope(
this, LSE);
1194 switch (type.value()) {
1195 case StoreType::kI64Store8:
1196 case StoreType::kI32Store8:
1197 if (
result.gp() != expected.gp()) {
1198 mov(
result.gp().W(), expected.gp().W());
1202 case StoreType::kI64Store16:
1203 case StoreType::kI32Store16:
1204 if (
result.gp() != expected.gp()) {
1205 mov(
result.gp().W(), expected.gp().W());
1209 case StoreType::kI64Store32:
1210 case StoreType::kI32Store:
1211 if (
result.gp() != expected.gp()) {
1212 mov(
result.gp().W(), expected.gp().W());
1216 case StoreType::kI64Store:
1217 if (
result.gp() != expected.gp()) {
1218 mov(
result.gp().X(), expected.gp().X());
1226 Register store_result = temps.AcquireW();
1231 switch (type.value()) {
1232 case StoreType::kI64Store8:
1233 case StoreType::kI32Store8:
1234 ldaxrb(result_reg.W(), actual_addr);
1237 stlxrb(store_result.W(), new_value.gp().W(), actual_addr);
1239 case StoreType::kI64Store16:
1240 case StoreType::kI32Store16:
1241 ldaxrh(result_reg.W(), actual_addr);
1244 stlxrh(store_result.W(), new_value.gp().W(), actual_addr);
1246 case StoreType::kI64Store32:
1247 case StoreType::kI32Store:
1248 ldaxr(result_reg.W(), actual_addr);
1251 stlxr(store_result.W(), new_value.gp().W(), actual_addr);
1253 case StoreType::kI64Store:
1254 ldaxr(result_reg.X(), actual_addr);
1257 stlxr(store_result.W(), new_value.gp().X(), actual_addr);
1263 Cbnz(store_result.W(), &retry);
1267 if (result_reg !=
result.gp()) {
1275 uint32_t caller_slot_idx,
1282 uint32_t caller_slot_idx,
1284 Register frame_pointer) {
1304 Mov(dst.W(), src.W());
1307 Mov(dst.X(), src.X());
1314 Fmov(dst.S(), src.S());
1316 Fmov(dst.D(), src.D());
1319 Mov(dst.Q(), src.Q());
1334 switch (value.type().kind()) {
1336 if (value.to_i32() == 0) {
1339 src = temps.AcquireW();
1340 Mov(src.W(), value.to_i32());
1344 if (value.to_i64() == 0) {
1347 src = temps.AcquireX();
1348 Mov(src.X(), value.to_i64());
1411 Register address_reg = temps.AcquireX();
1413 Sub(address_reg, fp,
start + size);
1414 Register count_reg = temps.AcquireX();
1415 Mov(count_reg, size / 4);
1419 sub(count_reg, count_reg, 1);
1421 cbnz(count_reg, &loop);
1431 Add(dst.W(), lhs.W(), rhs.W());
1434 Add(dst.W(), lhs.W(), Immediate(imm));
1438 Sub(dst.W(), lhs.W(), rhs.W());
1441 Sub(dst.W(), lhs.W(), Immediate(imm));
1445 Mul(dst.W(), lhs.W(), rhs.W());
1449 And(dst.W(), lhs.W(), rhs.W());
1452 And(dst.W(), lhs.W(), Immediate(imm));
1456 Orr(dst.W(), lhs.W(), rhs.W());
1459 Orr(dst.W(), lhs.W(), Immediate(imm));
1463 Eor(dst.W(), lhs.W(), rhs.W());
1466 Eor(dst.W(), lhs.W(), Immediate(imm));
1471 Lsl(dst.W(), src.W(), amount.W());
1475 Lsl(dst.W(), src.W(), amount & 31);
1480 Asr(dst.W(), src.W(), amount.W());
1484 Asr(dst.W(), src.W(), amount & 31);
1489 Lsr(dst.W(), src.W(), amount.W());
1493 Lsr(dst.W(), src.W(), amount & 31);
1497 LiftoffRegister rhs) {
1498 Add(dst.gp().X(), lhs.gp().X(), rhs.gp().X());
1502 LiftoffRegister rhs) {
1503 Sub(dst.gp().X(), lhs.gp().X(), rhs.gp().X());
1507 LiftoffRegister rhs) {
1508 Mul(dst.gp().X(), lhs.gp().X(), rhs.gp().X());
1519 Mul(dst.
gp().
X(), lhs.
gp().
X(), scratch);
1551 Lsl(dst.
gp().
X(), src.gp().
X(), amount.
X());
1555 Lsl(dst.gp().X(), src.gp().X(), amount & 63);
1560 Asr(dst.gp().X(), src.gp().X(), amount.X());
1564 Asr(dst.gp().X(), src.gp().X(), amount & 63);
1569 Lsr(dst.gp().X(), src.gp().X(), amount.X());
1573 Lsr(dst.gp().X(), src.gp().X(), amount & 63);
1578 Fadd(dst.S(), lhs.S(), rhs.S());
1583 Fsub(dst.S(), lhs.S(), rhs.S());
1588 Fmul(dst.S(), lhs.S(), rhs.S());
1593 Fdiv(dst.S(), lhs.S(), rhs.S());
1598 Fmin(dst.S(), lhs.S(), rhs.S());
1603 Fmax(dst.S(), lhs.S(), rhs.S());
1607 Fabs(dst.S(), src.S());
1611 Fneg(dst.S(), src.S());
1615 Frintp(dst.S(), src.S());
1620 Frintm(dst.S(), src.S());
1625 Frintz(dst.S(), src.S());
1631 Frintn(dst.S(), src.S());
1636 Fsqrt(dst.S(), src.S());
1641 Fadd(dst.D(), lhs.D(), rhs.D());
1646 Fsub(dst.D(), lhs.D(), rhs.D());
1651 Fmul(dst.D(), lhs.D(), rhs.D());
1656 Fdiv(dst.D(), lhs.D(), rhs.D());
1661 Fmin(dst.D(), lhs.D(), rhs.D());
1666 Fmax(dst.D(), lhs.D(), rhs.D());
1670 Fabs(dst.D(), src.D());
1674 Fneg(dst.D(), src.D());
1678 Frintp(dst.D(), src.D());
1683 Frintm(dst.D(), src.D());
1688 Frintz(dst.D(), src.D());
1694 Frintn(dst.D(), src.D());
1699 Fsqrt(dst.D(), src.D());
1704 Add(dst.gp().X(), lhs.gp().X(), imm);
1708 Clz(dst.W(), src.W());
1712 Rbit(dst.W(), src.W());
1713 Clz(dst.W(), dst.W());
1722 Clz(dst.gp().X(), src.gp().X());
1726 Rbit(dst.gp().X(), src.gp().X());
1727 Clz(dst.gp().X(), dst.gp().X());
1731 LiftoffRegister src) {
1740 Register scratch = temps.AcquireW();
1745 Register scratch = temps.AcquireX();
1747 Add(scratch, scratch, Operand(1));
1754 Label* trap_div_by_zero,
1755 Label* trap_div_unrepresentable) {
1759 bool can_use_dst = !dst_w.Aliases(lhs_w) && !dst_w.Aliases(rhs_w);
1762 Sdiv(dst_w, lhs_w, rhs_w);
1765 Cbz(rhs_w, trap_div_by_zero);
1769 B(trap_div_unrepresentable,
vs);
1772 Sdiv(dst_w, lhs_w, rhs_w);
1777 Label* trap_div_by_zero) {
1779 Cbz(rhs.W(), trap_div_by_zero);
1781 Udiv(dst.W(), lhs.W(), rhs.W());
1785 Label* trap_div_by_zero) {
1793 Register scratch = temps.AcquireW();
1794 Sdiv(scratch, lhs_w, rhs_w);
1796 Cbz(rhs_w, trap_div_by_zero);
1798 Msub(dst_w, scratch, rhs_w, lhs_w);
1802 Label* trap_div_by_zero) {
1808 Register scratch = temps.AcquireW();
1809 Udiv(scratch, lhs_w, rhs_w);
1811 Cbz(rhs_w, trap_div_by_zero);
1813 Msub(dst_w, scratch, rhs_w, lhs_w);
1817 LiftoffRegister rhs,
1818 Label* trap_div_by_zero,
1819 Label* trap_div_unrepresentable) {
1823 bool can_use_dst = !dst_x.Aliases(lhs_x) && !dst_x.Aliases(rhs_x);
1826 Sdiv(dst_x, lhs_x, rhs_x);
1829 Cbz(rhs_x, trap_div_by_zero);
1833 B(trap_div_unrepresentable,
vs);
1836 Sdiv(dst_x, lhs_x, rhs_x);
1842 LiftoffRegister rhs,
1843 Label* trap_div_by_zero) {
1845 Cbz(rhs.gp().X(), trap_div_by_zero);
1847 Udiv(dst.gp().X(), lhs.gp().X(), rhs.gp().X());
1852 LiftoffRegister rhs,
1853 Label* trap_div_by_zero) {
1861 Register scratch = temps.AcquireX();
1862 Sdiv(scratch, lhs_x, rhs_x);
1864 Cbz(rhs_x, trap_div_by_zero);
1866 Msub(dst_x, scratch, rhs_x, lhs_x);
1871 LiftoffRegister rhs,
1872 Label* trap_div_by_zero) {
1878 Register scratch = temps.AcquireX();
1879 Udiv(scratch, lhs_x, rhs_x);
1881 Cbz(rhs_x, trap_div_by_zero);
1883 Msub(dst_x, scratch, rhs_x, lhs_x);
1897 Ushr(scratch.V2S(), rhs.V2S(), 31);
1899 Fmov(dst.S(), lhs.S());
1901 Sli(dst.V2S(), scratch.V2S(), 31);
1908 Ushr(scratch.V1D(), rhs.V1D(), 63);
1910 Fmov(dst.D(), lhs.D());
1912 Sli(dst.V1D(), scratch.V1D(), 63);
1916 LiftoffRegister dst,
1917 LiftoffRegister src, Label*
trap) {
1919 case kExprI32ConvertI64:
1920 Mov(dst.gp().W(), src.gp().W());
1922 case kExprI32SConvertF32:
1923 Fcvtzs(dst.gp().W(), src.fp().S());
1925 Fcmp(src.fp().S(),
static_cast<float>(INT32_MIN));
1930 case kExprI32UConvertF32:
1931 Fcvtzu(dst.gp().W(), src.fp().S());
1933 Fcmp(src.fp().S(), -1.0);
1938 case kExprI32SConvertF64: {
1943 VRegister fp_ref = temps.AcquireD();
1944 VRegister fp_cmp = temps.AcquireD();
1945 Fcvtzs(dst.gp().W(), src.fp().D());
1946 Frintz(fp_ref, src.fp().D());
1947 Scvtf(fp_cmp, dst.gp().W());
1949 Fcmp(fp_cmp, fp_ref);
1953 case kExprI32UConvertF64: {
1958 VRegister fp_ref = temps.AcquireD();
1959 VRegister fp_cmp = temps.AcquireD();
1960 Fcvtzu(dst.gp().W(), src.fp().D());
1961 Frintz(fp_ref, src.fp().D());
1962 Ucvtf(fp_cmp, dst.gp().W());
1964 Fcmp(fp_cmp, fp_ref);
1968 case kExprI32SConvertSatF32:
1969 Fcvtzs(dst.gp().W(), src.fp().S());
1971 case kExprI32UConvertSatF32:
1972 Fcvtzu(dst.gp().W(), src.fp().S());
1974 case kExprI32SConvertSatF64:
1975 Fcvtzs(dst.gp().W(), src.fp().D());
1977 case kExprI32UConvertSatF64:
1978 Fcvtzu(dst.gp().W(), src.fp().D());
1980 case kExprI64SConvertSatF32:
1981 Fcvtzs(dst.gp().X(), src.fp().S());
1983 case kExprI64UConvertSatF32:
1984 Fcvtzu(dst.gp().X(), src.fp().S());
1986 case kExprI64SConvertSatF64:
1987 Fcvtzs(dst.gp().X(), src.fp().D());
1989 case kExprI64UConvertSatF64:
1990 Fcvtzu(dst.gp().X(), src.fp().D());
1992 case kExprI32ReinterpretF32:
1993 Fmov(dst.gp().W(), src.fp().S());
1995 case kExprI64SConvertI32:
1996 Sxtw(dst.gp().X(), src.gp().W());
1998 case kExprI64SConvertF32:
1999 Fcvtzs(dst.gp().X(), src.fp().S());
2001 Fcmp(src.fp().S(),
static_cast<float>(INT64_MIN));
2006 case kExprI64UConvertF32:
2007 Fcvtzu(dst.gp().X(), src.fp().S());
2009 Fcmp(src.fp().S(), -1.0);
2014 case kExprI64SConvertF64:
2015 Fcvtzs(dst.gp().X(), src.fp().D());
2017 Fcmp(src.fp().D(),
static_cast<float>(INT64_MIN));
2022 case kExprI64UConvertF64:
2023 Fcvtzu(dst.gp().X(), src.fp().D());
2025 Fcmp(src.fp().D(), -1.0);
2030 case kExprI64UConvertI32:
2031 Mov(dst.gp().W(), src.gp().W());
2033 case kExprI64ReinterpretF64:
2034 Fmov(dst.gp().X(), src.fp().D());
2036 case kExprF32SConvertI32:
2037 Scvtf(dst.fp().S(), src.gp().W());
2039 case kExprF32UConvertI32:
2040 Ucvtf(dst.fp().S(), src.gp().W());
2042 case kExprF32SConvertI64:
2043 Scvtf(dst.fp().S(), src.gp().X());
2045 case kExprF32UConvertI64:
2046 Ucvtf(dst.fp().S(), src.gp().X());
2048 case kExprF32ConvertF64:
2049 Fcvt(dst.fp().S(), src.fp().D());
2051 case kExprF32ReinterpretI32:
2052 Fmov(dst.fp().S(), src.gp().W());
2054 case kExprF64SConvertI32:
2055 Scvtf(dst.fp().D(), src.gp().W());
2057 case kExprF64UConvertI32:
2058 Ucvtf(dst.fp().D(), src.gp().W());
2060 case kExprF64SConvertI64:
2061 Scvtf(dst.fp().D(), src.gp().X());
2063 case kExprF64UConvertI64:
2064 Ucvtf(dst.fp().D(), src.gp().X());
2066 case kExprF64ConvertF32:
2067 Fcvt(dst.fp().D(), src.fp().S());
2069 case kExprF64ReinterpretI64:
2070 Fmov(dst.fp().D(), src.gp().X());
2078 sxtb(dst.W(), src.W());
2082 sxth(dst.W(), src.W());
2086 LiftoffRegister src) {
2087 sxtb(dst.gp(), src.gp());
2091 LiftoffRegister src) {
2092 sxth(dst.gp(), src.gp());
2096 LiftoffRegister src) {
2097 sxtw(dst.gp(), src.gp());
2107 const FreezeCacheState& frozen) {
2110 if (rhs.is_valid()) {
2111 Cmp(lhs.W(), rhs.W());
2120#if defined(V8_COMPRESS_POINTERS)
2121 Cmp(lhs.W(), rhs.W());
2123 Cmp(lhs.X(), rhs.X());
2127 if (rhs.is_valid()) {
2128 Cmp(lhs.X(), rhs.X());
2140 Register lhs, int32_t imm,
2141 const FreezeCacheState& frozen) {
2142 Cmp(lhs.W(), Operand(imm));
2160 Cmp(lhs.
W(), rhs.
W());
2161 Cset(dst.
W(), cond);
2165 Cmp(src.gp().X(), xzr);
2170 LiftoffRegister lhs,
2171 LiftoffRegister rhs) {
2172 Cmp(lhs.gp().X(), rhs.gp().X());
2173 Cset(dst.W(), cond);
2179 Fcmp(lhs.S(), rhs.S());
2180 Cset(dst.W(), cond);
2183 Csel(dst.W(), wzr, dst.W(),
vs);
2190 Fcmp(lhs.D(), rhs.D());
2191 Cset(dst.W(), cond);
2194 Csel(dst.W(), wzr, dst.W(),
vs);
2199 LiftoffRegister true_value,
2200 LiftoffRegister false_value,
2210 Csel(dst.gp().W(), true_value.gp().W(), false_value.gp().W(),
ne);
2213 Csel(dst.gp().X(), true_value.gp().X(), false_value.gp().X(),
ne);
2216 Fcsel(dst.fp().S(), true_value.fp().S(), false_value.fp().S(),
ne);
2219 Fcsel(dst.fp().D(), true_value.fp().D(), false_value.fp().D(),
ne);
2227 const FreezeCacheState& frozen) {
2230 JumpIfSmi(obj, smi_label, not_smi_label);
2234 Register offset_reg, uintptr_t offset_imm,
2237 uint32_t* protected_load_pc,
2243 offset_reg, offset_imm)}
2247 MachineType memtype = type.mem_type();
2251 Ldr(dst.fp().D(), src_op);
2252 Sxtl(dst.fp().V8H(), dst.fp().V8B());
2254 Ldr(dst.fp().D(), src_op);
2255 Uxtl(dst.fp().V8H(), dst.fp().V8B());
2257 Ldr(dst.fp().D(), src_op);
2258 Sxtl(dst.fp().V4S(), dst.fp().V4H());
2260 Ldr(dst.fp().D(), src_op);
2261 Uxtl(dst.fp().V4S(), dst.fp().V4H());
2263 Ldr(dst.fp().D(), src_op);
2264 Sxtl(dst.fp().V2D(), dst.fp().V2S());
2266 Ldr(dst.fp().D(), src_op);
2267 Uxtl(dst.fp().V2D(), dst.fp().V2S());
2271 Ldr(dst.fp().S(), src_op);
2274 Ldr(dst.fp().D(), src_op);
2279 ld1r(dst.fp().V16B(), src_op);
2281 ld1r(dst.fp().V8H(), src_op);
2283 ld1r(dst.fp().V4S(), src_op);
2285 ld1r(dst.fp().V2D(), src_op);
2291 Register addr, Register offset_reg,
2292 uintptr_t offset_imm, LoadType type,
2293 uint8_t laneidx, uint32_t* protected_load_pc,
2297 offset_imm, i64_offset)};
2299 MachineType mem_type = type.mem_type();
2301 Mov(dst.fp().Q(), src.fp().Q());
2306 ld1(dst.fp().B(), laneidx, src_op);
2308 ld1(dst.fp().H(), laneidx, src_op);
2310 ld1(dst.fp().S(), laneidx, src_op);
2312 ld1(dst.fp().D(), laneidx, src_op);
2319 uintptr_t offset_imm, LiftoffRegister src,
2320 StoreType type, uint8_t lane,
2321 uint32_t* protected_store_pc,
2325 offset_imm, i64_offset)};
2326 if (protected_store_pc) *protected_store_pc =
pc_offset();
2330 st1(src.fp().B(), lane, dst_op);
2332 st1(src.fp().H(), lane, dst_op);
2334 st1(src.fp().S(), lane, dst_op);
2337 st1(src.fp().D(), lane, dst_op);
2342 LiftoffRegister lhs,
2343 LiftoffRegister rhs) {
2344 Tbl(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
2348 LiftoffRegister lhs,
2349 LiftoffRegister rhs) {
2350 Tbl(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
2354 LiftoffRegister src) {
2355 Fcvtzs(dst.fp().V4S(), src.fp().V4S());
2359 LiftoffRegister src) {
2360 Fcvtzu(dst.fp().V4S(), src.fp().V4S());
2364 LiftoffRegister dst, LiftoffRegister src) {
2365 Fcvtzs(dst.fp().V2D(), src.fp().V2D());
2366 Sqxtn(dst.fp().V2S(), dst.fp().V2D());
2370 LiftoffRegister dst, LiftoffRegister src) {
2371 Fcvtzu(dst.fp().V2D(), src.fp().V2D());
2372 Uqxtn(dst.fp().V2S(), dst.fp().V2D());
2376 LiftoffRegister src1,
2377 LiftoffRegister src2,
2378 LiftoffRegister
mask,
2385 LiftoffRegister src) {
2386 Dup(dst.fp().V2D(), src.fp().D(), 0);
2390 LiftoffRegister lhs,
2391 uint8_t imm_lane_idx) {
2392 Mov(dst.fp().D(), lhs.fp().V2D(), imm_lane_idx);
2396 LiftoffRegister src1,
2397 LiftoffRegister src2,
2398 uint8_t imm_lane_idx) {
2400 Mov(dst.fp().V2D(), src1.fp().V2D());
2402 Mov(dst.fp().V2D(), imm_lane_idx, src2.fp().V2D(), 0);
2406 LiftoffRegister src) {
2407 Fabs(dst.fp().V2D(), src.fp().V2D());
2411 LiftoffRegister src) {
2412 Fneg(dst.fp().V2D(), src.fp().V2D());
2416 LiftoffRegister src) {
2417 Fsqrt(dst.fp().V2D(), src.fp().V2D());
2421 LiftoffRegister src) {
2422 Frintp(dst.fp().V2D(), src.fp().V2D());
2427 LiftoffRegister src) {
2428 Frintm(dst.fp().V2D(), src.fp().V2D());
2433 LiftoffRegister src) {
2434 Frintz(dst.fp().V2D(), src.fp().V2D());
2439 LiftoffRegister src) {
2440 Frintn(dst.fp().V2D(), src.fp().V2D());
2445 LiftoffRegister rhs) {
2446 Fadd(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2450 LiftoffRegister rhs) {
2451 Fsub(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2455 LiftoffRegister rhs) {
2456 Fmul(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2460 LiftoffRegister rhs) {
2461 Fdiv(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2465 LiftoffRegister rhs) {
2466 Fmin(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2470 LiftoffRegister rhs) {
2471 Fmax(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2475 LiftoffRegister rhs) {
2478 VRegister tmp = dst.fp();
2479 if (dst == lhs || dst == rhs) {
2483 Fcmgt(tmp.V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2484 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
2486 if (dst == lhs || dst == rhs) {
2487 Mov(dst.fp().V2D(), tmp);
2492 LiftoffRegister rhs) {
2495 VRegister tmp = dst.fp();
2496 if (dst == lhs || dst == rhs) {
2500 Fcmgt(tmp.V2D(), rhs.fp().V2D(), lhs.fp().V2D());
2501 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
2503 if (dst == lhs || dst == rhs) {
2504 Mov(dst.fp().V2D(), tmp);
2509 LiftoffRegister lhs,
2510 LiftoffRegister rhs) {
2511 Fmin(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2515 LiftoffRegister lhs,
2516 LiftoffRegister rhs) {
2517 Fmax(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2521 LiftoffRegister src) {
2522 Sxtl(dst.fp().V2D(), src.fp().V2S());
2523 Scvtf(dst.fp().V2D(), dst.fp().V2D());
2527 LiftoffRegister src) {
2528 Uxtl(dst.fp().V2D(), src.fp().V2S());
2529 Ucvtf(dst.fp().V2D(), dst.fp().V2D());
2533 LiftoffRegister src) {
2534 Fcvtl(dst.fp().V2D(), src.fp().V2S());
2538 LiftoffRegister src) {
2539 Dup(dst.fp().V4S(), src.fp().S(), 0);
2543 LiftoffRegister lhs,
2544 uint8_t imm_lane_idx) {
2545 Mov(dst.fp().S(), lhs.fp().V4S(), imm_lane_idx);
2549 LiftoffRegister src1,
2550 LiftoffRegister src2,
2551 uint8_t imm_lane_idx) {
2553 Mov(dst.fp().V4S(), src1.fp().V4S());
2555 Mov(dst.fp().V4S(), imm_lane_idx, src2.fp().V4S(), 0);
2559 LiftoffRegister src) {
2560 Fabs(dst.fp().V4S(), src.fp().V4S());
2564 LiftoffRegister src) {
2565 Fneg(dst.fp().V4S(), src.fp().V4S());
2569 LiftoffRegister src) {
2570 Fsqrt(dst.fp().V4S(), src.fp().V4S());
2574 LiftoffRegister src) {
2575 Frintp(dst.fp().V4S(), src.fp().V4S());
2580 LiftoffRegister src) {
2581 Frintm(dst.fp().V4S(), src.fp().V4S());
2586 LiftoffRegister src) {
2587 Frintz(dst.fp().V4S(), src.fp().V4S());
2592 LiftoffRegister src) {
2593 Frintn(dst.fp().V4S(), src.fp().V4S());
2598 LiftoffRegister rhs) {
2599 Fadd(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2603 LiftoffRegister rhs) {
2604 Fsub(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2608 LiftoffRegister rhs) {
2609 Fmul(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2613 LiftoffRegister rhs) {
2614 Fdiv(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2618 LiftoffRegister rhs) {
2619 Fmin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2623 LiftoffRegister rhs) {
2624 Fmax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2628 LiftoffRegister lhs,
2629 LiftoffRegister rhs) {
2630 Fmin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2634 LiftoffRegister lhs,
2635 LiftoffRegister rhs) {
2636 Fmax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2640 LiftoffRegister rhs) {
2643 VRegister tmp = dst.fp();
2644 if (dst == lhs || dst == rhs) {
2648 Fcmgt(tmp.V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2649 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
2651 if (dst == lhs || dst == rhs) {
2652 Mov(dst.fp().V4S(), tmp);
2657 LiftoffRegister rhs) {
2660 VRegister tmp = dst.fp();
2661 if (dst == lhs || dst == rhs) {
2665 Fcmgt(tmp.V4S(), rhs.fp().V4S(), lhs.fp().V4S());
2666 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
2668 if (dst == lhs || dst == rhs) {
2669 Mov(dst.fp().V4S(), tmp);
2674 LiftoffRegister src) {
2675 Dup(dst.fp().V2D(), src.gp().X());
2679 LiftoffRegister lhs,
2680 uint8_t imm_lane_idx) {
2681 Mov(dst.gp().X(), lhs.fp().V2D(), imm_lane_idx);
2685 LiftoffRegister src1,
2686 LiftoffRegister src2,
2687 uint8_t imm_lane_idx) {
2689 Mov(dst.fp().V2D(), src1.fp().V2D());
2691 Mov(dst.fp().V2D(), imm_lane_idx, src2.gp().X());
2695 LiftoffRegister src) {
2696 Neg(dst.fp().V2D(), src.fp().V2D());
2700 LiftoffRegister src) {
2705 LiftoffRegister rhs) {
2707 this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(),
kFormat2D);
2712 Shl(dst.fp().V2D(), lhs.fp().V2D(), rhs & 63);
2716 LiftoffRegister lhs,
2717 LiftoffRegister rhs) {
2720 this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(),
kFormat2D);
2724 LiftoffRegister lhs, int32_t rhs) {
2726 this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
2730 LiftoffRegister lhs,
2731 LiftoffRegister rhs) {
2734 this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(),
kFormat2D);
2738 LiftoffRegister lhs, int32_t rhs) {
2741 this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
2745 LiftoffRegister rhs) {
2746 Add(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2750 LiftoffRegister rhs) {
2751 Sub(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
2755 LiftoffRegister rhs) {
2757 VRegister tmp1 = temps.AcquireV(
kFormat2D);
2758 VRegister tmp2 = temps.AcquireV(
kFormat2D);
2765 Xtn(tmp1.V2S(), lhs.fp().V2D());
2766 Xtn(tmp2.V2S(), rhs.fp().V2D());
2767 Umull(tmp1.V2D(), tmp1.V2S(), tmp2.V2S());
2768 Rev64(tmp2.V4S(), rhs.fp().V4S());
2769 Mul(tmp2.V4S(), tmp2.V4S(), lhs.fp().V4S());
2770 Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S());
2771 Shll(dst.fp().V2D(), tmp2.V2S(), 32);
2772 Add(dst.fp().V2D(), dst.fp().V2D(), tmp1.V2D());
2776 LiftoffRegister src1,
2777 LiftoffRegister src2) {
2778 Smull(dst.fp().V2D(), src1.fp().V2S(), src2.fp().V2S());
2782 LiftoffRegister src1,
2783 LiftoffRegister src2) {
2784 Umull(dst.fp().V2D(), src1.fp().V2S(), src2.fp().V2S());
2788 LiftoffRegister src1,
2789 LiftoffRegister src2) {
2790 Smull2(dst.fp().V2D(), src1.fp().V4S(), src2.fp().V4S());
2794 LiftoffRegister src1,
2795 LiftoffRegister src2) {
2796 Umull2(dst.fp().V2D(), src1.fp().V4S(), src2.fp().V4S());
2800 LiftoffRegister src) {
2805 LiftoffRegister src) {
2806 Sxtl(dst.fp().V2D(), src.fp().V2S());
2810 LiftoffRegister src) {
2811 Sxtl2(dst.fp().V2D(), src.fp().V4S());
2815 LiftoffRegister src) {
2816 Uxtl(dst.fp().V2D(), src.fp().V2S());
2820 LiftoffRegister src) {
2821 Uxtl2(dst.fp().V2D(), src.fp().V4S());
2825 LiftoffRegister src) {
2826 Dup(dst.fp().V4S(), src.gp().W());
2830 LiftoffRegister lhs,
2831 uint8_t imm_lane_idx) {
2832 Mov(dst.gp().W(), lhs.fp().V4S(), imm_lane_idx);
2836 LiftoffRegister src1,
2837 LiftoffRegister src2,
2838 uint8_t imm_lane_idx) {
2840 Mov(dst.fp().V4S(), src1.fp().V4S());
2842 Mov(dst.fp().V4S(), imm_lane_idx, src2.gp().W());
2846 LiftoffRegister src) {
2847 Neg(dst.fp().V4S(), src.fp().V4S());
2851 LiftoffRegister src) {
2856 LiftoffRegister src) {
2861 LiftoffRegister rhs) {
2863 this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(),
kFormat4S);
2868 Shl(dst.fp().V4S(), lhs.fp().V4S(), rhs & 31);
2872 LiftoffRegister lhs,
2873 LiftoffRegister rhs) {
2876 this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(),
kFormat4S);
2880 LiftoffRegister lhs, int32_t rhs) {
2882 this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
2886 LiftoffRegister lhs,
2887 LiftoffRegister rhs) {
2890 this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(),
kFormat4S);
2894 LiftoffRegister lhs, int32_t rhs) {
2897 this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
2901 LiftoffRegister rhs) {
2902 Add(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2906 LiftoffRegister rhs) {
2907 Sub(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2911 LiftoffRegister rhs) {
2912 Mul(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2916 LiftoffRegister lhs,
2917 LiftoffRegister rhs) {
2918 Smin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2922 LiftoffRegister lhs,
2923 LiftoffRegister rhs) {
2924 Umin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2928 LiftoffRegister lhs,
2929 LiftoffRegister rhs) {
2930 Smax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2934 LiftoffRegister lhs,
2935 LiftoffRegister rhs) {
2936 Umax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
2940 LiftoffRegister lhs,
2941 LiftoffRegister rhs) {
2943 VRegister tmp1 = scope.AcquireV(
kFormat4S);
2944 VRegister tmp2 = scope.AcquireV(
kFormat4S);
2945 Smull(tmp1, lhs.fp().V4H(), rhs.fp().V4H());
2946 Smull2(tmp2, lhs.fp().V8H(), rhs.fp().V8H());
2947 Addp(dst.fp().V4S(), tmp1, tmp2);
2951 LiftoffRegister src) {
2952 Saddlp(dst.fp().V4S(), src.fp().V8H());
2956 LiftoffRegister src) {
2957 Uaddlp(dst.fp().V4S(), src.fp().V8H());
2961 LiftoffRegister src1,
2962 LiftoffRegister src2) {
2963 Smull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H());
2967 LiftoffRegister src1,
2968 LiftoffRegister src2) {
2969 Umull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H());
2973 LiftoffRegister src1,
2974 LiftoffRegister src2) {
2975 Smull2(dst.fp().V4S(), src1.fp().V8H(), src2.fp().V8H());
2979 LiftoffRegister src1,
2980 LiftoffRegister src2) {
2981 Umull2(dst.fp().V4S(), src1.fp().V8H(), src2.fp().V8H());
2985 LiftoffRegister src) {
2986 Dup(dst.fp().V8H(), src.gp().W());
2990 LiftoffRegister lhs,
2991 uint8_t imm_lane_idx) {
2992 Umov(dst.gp().W(), lhs.fp().V8H(), imm_lane_idx);
2996 LiftoffRegister lhs,
2997 uint8_t imm_lane_idx) {
2998 Smov(dst.gp().W(), lhs.fp().V8H(), imm_lane_idx);
3002 LiftoffRegister src1,
3003 LiftoffRegister src2,
3004 uint8_t imm_lane_idx) {
3006 Mov(dst.fp().V8H(), src1.fp().V8H());
3008 Mov(dst.fp().V8H(), imm_lane_idx, src2.gp().W());
3012 LiftoffRegister src) {
3013 Neg(dst.fp().V8H(), src.fp().V8H());
3017 LiftoffRegister src) {
3022 LiftoffRegister src) {
3027 LiftoffRegister rhs) {
3029 this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(),
kFormat8H);
3034 Shl(dst.fp().V8H(), lhs.fp().V8H(), rhs & 15);
3038 LiftoffRegister lhs,
3039 LiftoffRegister rhs) {
3042 this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(),
kFormat8H);
3046 LiftoffRegister lhs, int32_t rhs) {
3048 this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
3052 LiftoffRegister lhs,
3053 LiftoffRegister rhs) {
3056 this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(),
kFormat8H);
3060 LiftoffRegister lhs, int32_t rhs) {
3063 this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
3067 LiftoffRegister rhs) {
3068 Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3072 LiftoffRegister lhs,
3073 LiftoffRegister rhs) {
3074 Sqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3078 LiftoffRegister rhs) {
3079 Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3083 LiftoffRegister lhs,
3084 LiftoffRegister rhs) {
3085 Sqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3089 LiftoffRegister lhs,
3090 LiftoffRegister rhs) {
3091 Uqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3095 LiftoffRegister rhs) {
3096 Mul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3100 LiftoffRegister lhs,
3101 LiftoffRegister rhs) {
3102 Uqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3106 LiftoffRegister lhs,
3107 LiftoffRegister rhs) {
3108 Smin(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3112 LiftoffRegister lhs,
3113 LiftoffRegister rhs) {
3114 Umin(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3118 LiftoffRegister lhs,
3119 LiftoffRegister rhs) {
3120 Smax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3124 LiftoffRegister lhs,
3125 LiftoffRegister rhs) {
3126 Umax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3130 LiftoffRegister lhs,
3131 LiftoffRegister rhs,
3132 const uint8_t shuffle[16],
3134 VRegister src1 = lhs.fp();
3135 VRegister src2 = rhs.fp();
3136 VRegister temp = dst.fp();
3137 if (dst == lhs || dst == rhs) {
3149 Mov(src1.Q(), lhs.fp().Q());
3150 Mov(src2.Q(), rhs.fp().Q());
3153 int64_t imms[2] = {0, 0};
3154 for (
int i = 7;
i >= 0;
i--) {
3155 imms[0] = (imms[0] << 8) | (shuffle[
i]);
3156 imms[1] = (imms[1] << 8) | (shuffle[
i + 8]);
3159 (lhs == rhs ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0));
3161 Movi(temp.V16B(), imms[1], imms[0]);
3164 Tbl(dst.fp().V16B(), src1.V16B(), temp.V16B());
3166 Tbl(dst.fp().V16B(), src1.V16B(), src2.V16B(), temp.V16B());
3171 LiftoffRegister src) {
3172 Cnt(dst.fp().V16B(), src.fp().V16B());
3176 LiftoffRegister src) {
3177 Dup(dst.fp().V16B(), src.gp().W());
3181 LiftoffRegister lhs,
3182 uint8_t imm_lane_idx) {
3183 Umov(dst.gp().W(), lhs.fp().V16B(), imm_lane_idx);
3187 LiftoffRegister lhs,
3188 uint8_t imm_lane_idx) {
3189 Smov(dst.gp().W(), lhs.fp().V16B(), imm_lane_idx);
3193 LiftoffRegister src1,
3194 LiftoffRegister src2,
3195 uint8_t imm_lane_idx) {
3197 Mov(dst.fp().V16B(), src1.fp().V16B());
3199 Mov(dst.fp().V16B(), imm_lane_idx, src2.gp().W());
3203 LiftoffRegister src) {
3204 Neg(dst.fp().V16B(), src.fp().V16B());
3208 LiftoffRegister src) {
3213 LiftoffRegister src) {
3218 LiftoffRegister src) {
3229 LiftoffRegister rhs) {
3231 this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(),
kFormat16B);
3236 Shl(dst.fp().V16B(), lhs.fp().V16B(), rhs & 7);
3240 LiftoffRegister lhs,
3241 LiftoffRegister rhs) {
3244 this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(),
kFormat16B);
3248 LiftoffRegister lhs, int32_t rhs) {
3250 this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
3254 LiftoffRegister lhs,
3255 LiftoffRegister rhs) {
3258 this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(),
kFormat16B);
3262 LiftoffRegister lhs, int32_t rhs) {
3265 this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
3269 LiftoffRegister rhs) {
3270 Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3274 LiftoffRegister lhs,
3275 LiftoffRegister rhs) {
3276 Sqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3280 LiftoffRegister rhs) {
3281 Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3285 LiftoffRegister lhs,
3286 LiftoffRegister rhs) {
3287 Sqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3291 LiftoffRegister lhs,
3292 LiftoffRegister rhs) {
3293 Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3297 LiftoffRegister lhs,
3298 LiftoffRegister rhs) {
3299 Uqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3303 LiftoffRegister lhs,
3304 LiftoffRegister rhs) {
3305 Smin(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3309 LiftoffRegister lhs,
3310 LiftoffRegister rhs) {
3311 Umin(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3315 LiftoffRegister lhs,
3316 LiftoffRegister rhs) {
3317 Smax(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3321 LiftoffRegister lhs,
3322 LiftoffRegister rhs) {
3323 Umax(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3327 LiftoffRegister rhs) {
3328 Cmeq(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3332 LiftoffRegister rhs) {
3333 Cmeq(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3334 Mvn(dst.fp().V16B(), dst.fp().V16B());
3338 LiftoffRegister rhs) {
3339 Cmgt(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3343 LiftoffRegister rhs) {
3344 Cmhi(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3348 LiftoffRegister rhs) {
3349 Cmge(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3353 LiftoffRegister rhs) {
3354 Cmhs(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3358 LiftoffRegister rhs) {
3359 Cmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3363 LiftoffRegister rhs) {
3364 Cmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3365 Mvn(dst.fp().V8H(), dst.fp().V8H());
3369 LiftoffRegister rhs) {
3370 Cmgt(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3374 LiftoffRegister rhs) {
3375 Cmhi(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3379 LiftoffRegister rhs) {
3380 Cmge(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3384 LiftoffRegister rhs) {
3385 Cmhs(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3389 LiftoffRegister rhs) {
3390 Cmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3394 LiftoffRegister rhs) {
3395 Cmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3396 Mvn(dst.fp().V4S(), dst.fp().V4S());
3400 LiftoffRegister rhs) {
3401 Cmgt(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3405 LiftoffRegister rhs) {
3406 Cmhi(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3410 LiftoffRegister rhs) {
3411 Cmge(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3415 LiftoffRegister rhs) {
3416 Cmhs(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3420 LiftoffRegister rhs) {
3421 Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3425 LiftoffRegister rhs) {
3426 Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3427 Mvn(dst.fp().V2D(), dst.fp().V2D());
3431 LiftoffRegister rhs) {
3432 Cmgt(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3436 LiftoffRegister rhs) {
3437 Cmge(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3441 LiftoffRegister rhs) {
3442 Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3446 LiftoffRegister rhs) {
3447 Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
3448 Mvn(dst.fp().V4S(), dst.fp().V4S());
3452 LiftoffRegister rhs) {
3453 Fcmgt(dst.fp().V4S(), rhs.fp().V4S(), lhs.fp().V4S());
3457 LiftoffRegister rhs) {
3458 Fcmge(dst.fp().V4S(), rhs.fp().V4S(), lhs.fp().V4S());
3462 LiftoffRegister rhs) {
3463 Fcmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3467 LiftoffRegister rhs) {
3468 Fcmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
3469 Mvn(dst.fp().V2D(), dst.fp().V2D());
3473 LiftoffRegister rhs) {
3474 Fcmgt(dst.fp().V2D(), rhs.fp().V2D(), lhs.fp().V2D());
3478 LiftoffRegister rhs) {
3479 Fcmge(dst.fp().V2D(), rhs.fp().V2D(), lhs.fp().V2D());
3483 const uint8_t imms[16]) {
3485 memcpy(vals, imms,
sizeof(vals));
3486 Movi(dst.fp().V16B(), vals[1], vals[0]);
3490 Mvn(dst.fp().V16B(), src.fp().V16B());
3494 LiftoffRegister rhs) {
3495 And(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3499 LiftoffRegister rhs) {
3500 Orr(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3504 LiftoffRegister rhs) {
3505 Eor(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3509 LiftoffRegister src1,
3510 LiftoffRegister src2,
3511 LiftoffRegister
mask) {
3513 Mov(dst.fp().V16B(),
mask.fp().V16B());
3515 Bsl(dst.fp().V16B(), src1.fp().V16B(), src2.fp().V16B());
3519 LiftoffRegister src) {
3520 Fcvtzs(dst.fp().V4S(), src.fp().V4S());
3524 LiftoffRegister src) {
3525 Fcvtzu(dst.fp().V4S(), src.fp().V4S());
3529 LiftoffRegister src) {
3530 Scvtf(dst.fp().V4S(), src.fp().V4S());
3534 LiftoffRegister src) {
3535 Ucvtf(dst.fp().V4S(), src.fp().V4S());
3539 LiftoffRegister src) {
3540 Fcvtn(dst.fp().V2S(), src.fp().V2D());
3544 LiftoffRegister lhs,
3545 LiftoffRegister rhs) {
3547 VRegister tmp = temps.AcquireV(
kFormat8H);
3548 VRegister right = rhs.fp().V8H();
3553 Sqxtn(dst.fp().V8B(), lhs.fp().V8H());
3554 Sqxtn2(dst.fp().V16B(), right);
3558 LiftoffRegister lhs,
3559 LiftoffRegister rhs) {
3561 VRegister tmp = temps.AcquireV(
kFormat8H);
3562 VRegister right = rhs.fp().V8H();
3567 Sqxtun(dst.fp().V8B(), lhs.fp().V8H());
3568 Sqxtun2(dst.fp().V16B(), right);
3572 LiftoffRegister lhs,
3573 LiftoffRegister rhs) {
3575 VRegister tmp = temps.AcquireV(
kFormat4S);
3576 VRegister right = rhs.fp().V4S();
3581 Sqxtn(dst.fp().V4H(), lhs.fp().V4S());
3582 Sqxtn2(dst.fp().V8H(), right);
3586 LiftoffRegister lhs,
3587 LiftoffRegister rhs) {
3589 VRegister tmp = temps.AcquireV(
kFormat4S);
3590 VRegister right = rhs.fp().V4S();
3595 Sqxtun(dst.fp().V4H(), lhs.fp().V4S());
3596 Sqxtun2(dst.fp().V8H(), right);
3600 LiftoffRegister src) {
3601 Sxtl(dst.fp().V8H(), src.fp().V8B());
3605 LiftoffRegister src) {
3606 Sxtl2(dst.fp().V8H(), src.fp().V16B());
3610 LiftoffRegister src) {
3611 Uxtl(dst.fp().V8H(), src.fp().V8B());
3615 LiftoffRegister src) {
3616 Uxtl2(dst.fp().V8H(), src.fp().V16B());
3620 LiftoffRegister src) {
3621 Sxtl(dst.fp().V4S(), src.fp().V4H());
3625 LiftoffRegister src) {
3626 Sxtl2(dst.fp().V4S(), src.fp().V8H());
3630 LiftoffRegister src) {
3631 Uxtl(dst.fp().V4S(), src.fp().V4H());
3635 LiftoffRegister src) {
3636 Uxtl2(dst.fp().V4S(), src.fp().V8H());
3640 LiftoffRegister src) {
3641 Fcvtzs(dst.fp().V2D(), src.fp().V2D());
3642 Sqxtn(dst.fp().V2S(), dst.fp().V2D());
3646 LiftoffRegister src) {
3647 Fcvtzu(dst.fp().V2D(), src.fp().V2D());
3648 Uqxtn(dst.fp().V2S(), dst.fp().V2D());
3652 LiftoffRegister lhs,
3653 LiftoffRegister rhs) {
3654 Bic(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3658 LiftoffRegister lhs,
3659 LiftoffRegister rhs) {
3660 Urhadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
3664 LiftoffRegister lhs,
3665 LiftoffRegister rhs) {
3666 Urhadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3670 LiftoffRegister src) {
3671 Abs(dst.fp().V16B(), src.fp().V16B());
3675 LiftoffRegister src) {
3676 Abs(dst.fp().V8H(), src.fp().V8H());
3680 LiftoffRegister src) {
3681 Saddlp(dst.fp().V8H(), src.fp().V16B());
3685 LiftoffRegister src) {
3686 Uaddlp(dst.fp().V8H(), src.fp().V16B());
3690 LiftoffRegister src1,
3691 LiftoffRegister src2) {
3692 Smull(dst.fp().V8H(), src1.fp().V8B(), src2.fp().V8B());
3696 LiftoffRegister src1,
3697 LiftoffRegister src2) {
3698 Umull(dst.fp().V8H(), src1.fp().V8B(), src2.fp().V8B());
3702 LiftoffRegister src1,
3703 LiftoffRegister src2) {
3704 Smull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
3708 LiftoffRegister src1,
3709 LiftoffRegister src2) {
3710 Umull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
3714 LiftoffRegister src1,
3715 LiftoffRegister src2) {
3716 Sqrdmulh(dst.fp().V8H(), src1.fp().V8H(), src2.fp().V8H());
3720 LiftoffRegister src1,
3721 LiftoffRegister src2) {
3722 Sqrdmulh(dst.fp().V8H(), src1.fp().V8H(), src2.fp().V8H());
3726 LiftoffRegister lhs,
3727 LiftoffRegister rhs) {
3729 VRegister tmp1 = scope.AcquireV(
kFormat8H);
3730 VRegister tmp2 = scope.AcquireV(
kFormat8H);
3731 Smull(tmp1, lhs.fp().V8B(), rhs.fp().V8B());
3732 Smull2(tmp2, lhs.fp().V16B(), rhs.fp().V16B());
3733 Addp(dst.fp().V8H(), tmp1, tmp2);
3737 LiftoffRegister lhs,
3738 LiftoffRegister rhs,
3739 LiftoffRegister acc) {
3741 VRegister tmp1 = scope.AcquireV(
kFormat8H);
3742 VRegister tmp2 = scope.AcquireV(
kFormat8H);
3743 Smull(tmp1, lhs.fp().V8B(), rhs.fp().V8B());
3744 Smull2(tmp2, lhs.fp().V16B(), rhs.fp().V16B());
3745 Addp(tmp1, tmp1, tmp2);
3746 Saddlp(tmp1.V4S(), tmp1);
3747 Add(dst.fp().V4S(), tmp1.V4S(), acc.fp().V4S());
3751 LiftoffRegister src) {
3752 Abs(dst.fp().V4S(), src.fp().V4S());
3756 LiftoffRegister src) {
3757 Abs(dst.fp().V2D(), src.fp().V2D());
3760#define EMIT_QFMOP(instr, format) \
3761 if (dst == src3) { \
3762 instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
3763 } else if (dst != src1 && dst != src2) { \
3764 Mov(dst.fp().V##format(), src3.fp().V##format()); \
3765 instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
3767 DCHECK(dst == src1 || dst == src2); \
3768 UseScratchRegisterScope temps(this); \
3769 VRegister tmp = temps.AcquireV(kFormat##format); \
3770 Mov(tmp, src3.fp().V##format()); \
3771 instr(tmp, src1.fp().V##format(), src2.fp().V##format()); \
3772 Mov(dst.fp().V##format(), tmp); \
3776 LiftoffRegister src1,
3777 LiftoffRegister src2,
3778 LiftoffRegister src3) {
3787 LiftoffRegister src1,
3788 LiftoffRegister src2,
3789 LiftoffRegister src3) {
3798 LiftoffRegister src1,
3799 LiftoffRegister src2,
3800 LiftoffRegister src3) {
3805 LiftoffRegister src1,
3806 LiftoffRegister src2,
3807 LiftoffRegister src3) {
3812 LiftoffRegister src1,
3813 LiftoffRegister src2,
3814 LiftoffRegister src3) {
3819 LiftoffRegister src1,
3820 LiftoffRegister src2,
3821 LiftoffRegister src3) {
3828 LiftoffRegister src) {
3832 Fcvt(dst.fp().H(), src.fp().S());
3833 Dup(dst.fp().V8H(), dst.fp().H(), 0);
3838 LiftoffRegister lhs,
3839 uint8_t imm_lane_idx) {
3843 Mov(dst.fp().H(), lhs.fp().V8H(), imm_lane_idx);
3844 Fcvt(dst.fp().S(), dst.fp().H());
3849 LiftoffRegister src1,
3850 LiftoffRegister src2,
3851 uint8_t imm_lane_idx) {
3856 Mov(dst.fp().V8H(), src1.fp().V8H());
3860 VRegister tmp = temps.AcquireV(
kFormat8H);
3861 Fcvt(tmp.H(), src2.fp().S());
3862 Mov(dst.fp().V8H(), imm_lane_idx, tmp.V8H(), 0);
3867 LiftoffRegister src) {
3871 Fabs(dst.fp().V8H(), src.fp().V8H());
3876 LiftoffRegister src) {
3880 Fneg(dst.fp().V8H(), src.fp().V8H());
3885 LiftoffRegister src) {
3889 Fsqrt(dst.fp().V8H(), src.fp().V8H());
3894 LiftoffRegister src) {
3898 Frintp(dst.fp().V8H(), src.fp().V8H());
3903 LiftoffRegister src) {
3907 Frintm(dst.fp().V8H(), src.fp().V8H());
3912 LiftoffRegister src) {
3916 Frintz(dst.fp().V8H(), src.fp().V8H());
3921 LiftoffRegister src) {
3925 Frintn(dst.fp().V8H(), src.fp().V8H());
3930 LiftoffRegister rhs) {
3934 Fcmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3939 LiftoffRegister rhs) {
3943 Fcmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3944 Mvn(dst.fp().V8H(), dst.fp().V8H());
3949 LiftoffRegister rhs) {
3953 Fcmgt(dst.fp().V8H(), rhs.fp().V8H(), lhs.fp().V8H());
3958 LiftoffRegister rhs) {
3962 Fcmge(dst.fp().V8H(), rhs.fp().V8H(), lhs.fp().V8H());
3967 LiftoffRegister rhs) {
3971 Fadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3976 LiftoffRegister rhs) {
3980 Fsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3985 LiftoffRegister rhs) {
3989 Fmul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
3994 LiftoffRegister rhs) {
3998 Fdiv(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
4003 LiftoffRegister rhs) {
4007 Fmin(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
4012 LiftoffRegister rhs) {
4016 Fmax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
4021 LiftoffRegister rhs) {
4027 VRegister tmp = dst.fp();
4028 if (dst == lhs || dst == rhs) {
4032 Fcmgt(tmp.V8H(), lhs.fp().V8H(), rhs.fp().V8H());
4033 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
4035 if (dst == lhs || dst == rhs) {
4036 Mov(dst.fp().V8H(), tmp);
4042 LiftoffRegister rhs) {
4048 VRegister tmp = dst.fp();
4049 if (dst == lhs || dst == rhs) {
4053 Fcmgt(tmp.V8H(), rhs.fp().V8H(), lhs.fp().V8H());
4054 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B());
4056 if (dst == lhs || dst == rhs) {
4057 Mov(dst.fp().V8H(), tmp);
4063 LiftoffRegister src) {
4067 Fcvtzs(dst.fp().V8H(), src.fp().V8H());
4072 LiftoffRegister src) {
4076 Fcvtzu(dst.fp().V8H(), src.fp().V8H());
4081 LiftoffRegister src) {
4085 Scvtf(dst.fp().V8H(), src.fp().V8H());
4090 LiftoffRegister src) {
4094 Ucvtf(dst.fp().V8H(), src.fp().V8H());
4099 LiftoffRegister src) {
4103 Fcvtn(dst.fp().V4H(), src.fp().V4S());
4108 LiftoffRegister src) {
4116 Mov(fp_scratch.D(), src.fp().V2D(), 1);
4117 Fcvt(fp_scratch.H(), fp_scratch.D());
4119 Fcvt(dst.fp().H(), src.fp().D());
4120 Mov(dst.fp().V8H(), 1, fp_scratch.V8H(), 0);
4125 LiftoffRegister src) {
4129 Fcvtl(dst.fp().V4S(), src.fp().V4H());
4138 Label* trap_label) {
4139 Cmp(index, max_index);
4145 Register limit_address = temps.AcquireX();
4147 Cmp(
sp, limit_address);
4166 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
4167 LiftoffRegList ref_spills,
int spill_offset) {
4171 bool needs_padding = (gp_spills.GetNumRegsSet() & 1) != 0;
4172 if (needs_padding) {
4176 while (!gp_spills.is_empty()) {
4177 LiftoffRegister
reg = gp_spills.GetLastRegSet();
4178 if (ref_spills.has(
reg)) {
4179 safepoint.DefineTaggedStackSlot(spill_offset);
4181 gp_spills.clear(
reg);
4195 const std::initializer_list<VarState>
args,
const LiftoffRegister* rets,
4197 ExternalReference ext_ref) {
4201 Claim(total_size, 1);
4214 constexpr int kNumCCallArgs = 1;
4218 const LiftoffRegister* next_result_reg = rets;
4219 if (return_kind != kVoid) {
4220 constexpr Register kReturnReg = x0;
4221 if (kReturnReg != next_result_reg->gp()) {
4222 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
4228 if (out_argument_kind != kVoid) {
4229 if (out_argument_kind ==
kI16) {
4236 Drop(total_size, 1);
4240 ExternalReference ext_ref) {
4241 const int num_args =
static_cast<int>(args_list.size());
4251 ParallelMove parallel_move{
this};
4252 for (
int reg_arg = 0; reg_arg < num_args; ++reg_arg) {
4253 parallel_move.LoadIntoRegister(LiftoffRegister{
kCArgRegs[reg_arg]},
4256 parallel_move.Execute();
4271 compiler::CallDescriptor* call_descriptor,
4275 DCHECK(target.is_valid());
4276 CallWasmCodePointer(target, call_descriptor->signature_hash());
4280 compiler::CallDescriptor* call_descriptor, Register target) {
4281 DCHECK(target.is_valid());
4288 CallWasmCodePointer(x17, call_descriptor->signature_hash(),
4318 Fcmp(src.S(), src.S());
4324 Fcmp(src.D(), src.D());
4334 LiftoffRegister src,
4336 LiftoffRegister tmp_s128,
4339 if (lane_kind ==
kF32) {
4340 Fmaxv(tmp_fp.S(), src.fp().V4S());
4343 Fmaxp(tmp_fp.D(), src.fp().V2D());
4356 for (
auto& slot :
slots_) {
4358 switch (slot.src_.loc()) {
4360 UseScratchRegisterScope temps(
asm_);
4372 if (slot.src_.i32_const() == 0) {
4373 Register zero_reg = slot.src_.kind() ==
kI32 ? wzr : xzr;
4376 UseScratchRegisterScope temps(
asm_);
4378 slot.src_.kind() ==
kI32 ? temps.AcquireW() : temps.AcquireX();
4379 asm_->
Mov(scratch, int64_t{slot.src_.i32_const()});
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
const AssemblerOptions & options() const
void casal(const Register &rs, const Register &rt, const MemOperand &src)
static constexpr bool IsImmLSScaled(int64_t offset, unsigned size_log2)
static bool IsImmLSPair(int64_t offset, unsigned size)
void stlxrb(const Register &rs, const Register &rt, const Register &rn)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void sxtw(const Register &rd, const Register &rn)
void ld1(const VRegister &vt, const MemOperand &src)
void sxtb(Register dst, Register src, int rotate=0, Condition cond=al)
void ldaxrh(const Register &rt, const Register &rn)
Instruction * InstructionAt(ptrdiff_t offset) const
void sxth(Register dst, Register src, int rotate=0, Condition cond=al)
static constexpr bool IsImmAddSub(int64_t immediate)
friend class UseScratchRegisterScope
void str(Register src, const MemOperand &dst, Condition cond=al)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ld1r(const VRegister &vt, const MemOperand &src)
void ldaxrb(const Register &rt, const Register &rn)
static constexpr bool IsImmLSUnscaled(int64_t offset)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void stlxr(const Register &rs, const Register &rt, const Register &rn)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void cbnz(const Register &rt, Label *label)
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void ForceConstantPoolEmissionWithoutJump()
void st1(const VRegister &vt, const MemOperand &src)
void AbortedCodeGeneration() override
void ldaxr(const Register &rt, const Register &rn)
void casalb(const Register &rs, const Register &rt, const MemOperand &src)
void casalh(const Register &rs, const Register &rt, const MemOperand &src)
void stlxrh(const Register &rs, const Register &rt, const Register &rn)
static constexpr CPURegister no_reg()
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Mul(const Register &rd, const Register &rn, const Register &rm)
void Asr(const Register &rd, const Register &rn, unsigned shift)
void Msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Fcvt(const VRegister &fd, const VRegister &fn)
void Call(Register target, Condition cond=al)
void St1(const VRegister &vt, const MemOperand &dst)
void Cmp(const Register &rn, int imm)
void Cmge(const VRegister &vd, const VRegister &vn, int imm)
void Drop(int count, Condition cond=al)
void Udiv(const Register &rd, const Register &rn, const Register &rm)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void Neg(const Register &rd, const Operand &operand)
void mov(Register rd, Register rj)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Fmin(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void Bind(Label *label, BranchTargetIdentifier id=BranchTargetIdentifier::kNone)
void Bic(const VRegister &vd, const int imm8, const int left_shift=0)
void Fmax(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void Fcvtzu(const Register &rd, const VRegister &fn)
void Fmov(VRegister fd, VRegister fn)
void I64x2AllTrue(Register dst, QwNeonRegister src)
void I32x4BitMask(Register dst, VRegister src)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void JumpIfSmi(Register value, Label *smi_label)
void Smov(const Register &rd, const VRegister &vn, int vn_index)
void Poke(const CPURegister &src, const Operand &offset)
void Fabs(const VRegister &fd, const VRegister &fn)
void Smull(const Register &rd, const Register &rn, const Register &rm)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Clz(const Register &rd, const Register &rn)
void Peek(const CPURegister &dst, const Operand &offset)
void Uxtw(const Register &rd, const Register &rn)
void PopcntHelper(Register dst, Register src)
void Scvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Dmb(BarrierDomain domain, BarrierType type)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void SmiTag(Register reg, SBit s=LeaveCC)
void Umull(const Register &rd, const Register &rn, const Register &rm)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
void Cmeq(const VRegister &vd, const VRegister &vn, int imm)
void Fdiv(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void Ldr(const CPURegister &rt, const Operand &imm)
void DropSlots(int64_t count)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Umov(const Register &rd, const VRegister &vn, int vn_index)
void Sdiv(const Register &rd, const Register &rn, const Register &rm)
void Jump(Register target, Condition cond=al)
void Fadd(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void Fcmp(const VRegister &fn, const VRegister &fm)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Ucvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Rbit(const Register &rd, const Register &rn)
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void PushCPURegList(CPURegList registers)
void Cmgt(const VRegister &vd, const VRegister &vn, int imm)
void Movi(const VRegister &vd, uint64_t imm, Shift shift=LSL, int shift_amount=0)
void I16x8BitMask(Register dst, VRegister src)
void Cset(const Register &rd, Condition cond)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Dup(const VRegister &vd, const VRegister &vn, int index)
void I64x2BitMask(Register dst, QwNeonRegister src)
void Claim(int64_t count, uint64_t unit_size=kXRegSize)
void Tbl(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void Fsub(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void PopCPURegList(CPURegList registers)
void Fcvtzs(const Register &rd, const VRegister &fn)
void Mvn(const Register &rd, uint64_t imm)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Fcsel(const VRegister &fd, const VRegister &fn, const VRegister &fm, Condition cond)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void Cbnz(const Register &rt, Label *label)
void Cbz(const Register &rt, Label *label)
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=nullptr, Label *is_representable=nullptr)
void Sxtw(const Register &rd, const Register &rn)
void Fmul(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void Br(const Register &xn)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
constexpr void set(RegisterT reg)
constexpr unsigned Count() const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFrameTypeOffset
VRegister AcquireV(VectorFormat format)
QwNeonRegister AcquireQ()
static constexpr VRegister Create(int code, int size, int lane_count=1)
bool IsSameFormat(const VRegister &other) const
static constexpr Register GapRegister()
static constexpr int kInstanceDataOffset
static constexpr int kFeedbackVectorOffset
GetProtectedInstruction(LiftoffAssembler *assm, uint32_t *protected_instruction_pc)
MacroAssembler::BlockPoolsScope blocked_pools_scope_
~GetProtectedInstruction()
uint32_t * protected_instruction_pc_
static constexpr int kReservedInstructions
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void set_trap_on_oob_mem64(Register index, uint64_t max_index, Label *trap_label)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src)
void emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i32_clz(Register dst, Register src)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_shri(Register dst, Register src, int32_t amount)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_store_nonzero(Register dst)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_floor(DoubleRegister dst, DoubleRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void FillI64Half(Register, int offset, RegPairHalf)
bool emit_f16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void TailCallNativeWasmCode(Address addr)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void SpillInstanceData(Register instance)
void RecordOolSpillSpaceSize(int size)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i32_subi(Register dst, Register lhs, int32_t imm)
void emit_i32_shli(Register dst, Register src, int32_t amount)
void emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sar(Register dst, Register src, Register amount)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i32_shr(Register dst, Register src, Register amount)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void DeallocateStackSlot(uint32_t size)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_andi(Register dst, Register lhs, int32_t imm)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void StackCheck(Label *ool_code)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
int max_used_spill_offset_
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f32_neg(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_and(Register dst, Register lhs, Register rhs)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void CallBuiltin(Builtin builtin)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_relaxed_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_abs(DoubleRegister dst, DoubleRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f64_neg(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Spill(VarState *slot)
void emit_i32_xor(Register dst, Register lhs, Register rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void AssertUnreachable(AbortReason reason)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_or(Register dst, Register lhs, Register rhs)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_sari(Register dst, Register src, int32_t amount)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
int GetTotalFrameSize() const
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallNativeWasmCode(Address addr)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
static bool NeedsAlignment(ValueKind kind)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
static int SlotSizeForType(ValueKind kind)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_ori(Register dst, Register lhs, int32_t imm)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_u32_to_uintptr(Register dst, Register src)
bool supports_f16_mem_access()
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
bool emit_f64_floor(DoubleRegister dst, DoubleRegister src)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadInstanceDataFromFrame(Register dst)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void emit_f64_abs(DoubleRegister dst, DoubleRegister src)
static constexpr int kStackSlotSize
void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_xori(Register dst, Register lhs, int32_t imm)
CacheState * cache_state()
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sub(Register dst, Register lhs, Register rhs)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void PushRegisters(LiftoffRegList)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void IncrementSmi(LiftoffRegister dst, int offset)
void PopRegisters(LiftoffRegList)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src)
void emit_i32_addi(Register dst, Register lhs, int32_t imm)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void FillStackSlotsWithZero(int start, int size)
void emit_i32_shl(Register dst, Register src, Register amount)
void emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src)
void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
Register LoadOldFramePointer()
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void clear_i32_upper_half(Register dst)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i32_add(Register dst, Register lhs, Register rhs)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
static constexpr int StaticStackFrameSize()
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_signextend_i8(Register dst, Register src)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr Register set(Register reg)
constexpr unsigned GetNumRegsSet() const
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
void Construct(int param_slots)
LiftoffAssembler *const asm_
static constexpr int ToTagged(int offset)
#define ATOMIC_BINOP_CASE(op, inst)
#define COMPRESS_POINTERS_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
#define EMIT_QFMOP(instr, format)
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
void EmitAllTrue(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, VectorFormat format)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
void EmitSimdShiftRightImmediate(LiftoffAssembler *assm, VRegister dst, VRegister lhs, int32_t rhs)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
void EmitAnyTrue(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src)
CPURegister LoadToRegister(LiftoffAssembler *assm, UseScratchRegisterScope *temps, const LiftoffAssembler::VarState &src)
void Add(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
MemOperand GetStackSlot(int offset)
void EmitSimdShift(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
CPURegister AcquireByType(UseScratchRegisterScope *temps, ValueKind kind)
Register GetEffectiveAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, uintptr_t offset_imm, bool i64_offset=false)
CPURegList PadVRegList(DoubleRegList list)
MemOperand GetInstanceDataOperand()
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
CPURegister GetRegFromType(const LiftoffRegister ®, ValueKind kind)
CPURegList PadRegList(RegList list)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
V8_EXPORT_PRIVATE bool AreConsecutive(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg)
constexpr unsigned kQuadWordSizeInBytes
constexpr int kSimd128Size
V8_EXPORT_PRIVATE int LaneCountFromFormat(VectorFormat vform)
@ kUnsignedGreaterThanEqual
DwVfpRegister DoubleRegister
kWasmInternalFunctionIndirectPointerTag instance_data
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr uint8_t kInstrSizeLog2
constexpr int kSystemPointerSize
const Instr kImmExceptionIsSwitchStackLimit
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
constexpr VRegister NoVReg
unsigned LaneSizeInBitsFromFormat(VectorFormat vform)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr AddrMode PostIndex
constexpr int kXRegSizeInBits
constexpr uint8_t kInstrSize
constexpr int kQRegSizeInBits
constexpr Register kCArgRegs[]
constexpr int kXRegSizeLog2
VectorFormat ScalarFormatFromFormat(VectorFormat vform)
constexpr Register padreg
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
Register cached_instance_data
LiftoffRegList used_registers
#define V8_LIKELY(condition)