5#ifndef V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_INL_H_
6#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_INL_H_
51 "Slot size should be twice the size of the 32 bit pointer.");
73 unsigned shift_amount = 0) {
77 if (shift_amount == 0) {
93 if (offset_reg ==
no_reg && offset_imm == 0) {
94 if (result_reg == addr_reg || result_reg ==
no_reg)
return addr_reg;
95 assm->
mov(result_reg, addr_reg);
99 if (offset_reg ==
no_reg) {
100 assm->
add(result_reg, addr_reg,
Operand(offset_imm));
102 assm->
add(result_reg, addr_reg,
Operand(offset_reg));
103 if (offset_imm != 0) assm->
add(result_reg, result_reg,
Operand(offset_imm));
156 int32_t imm_low_word =
static_cast<int32_t
>(imm);
157 int32_t imm_high_word =
static_cast<int32_t
>(imm >> 32);
174 Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
182 Register* later_src_reg = is_left_shift ? &src_low : &src_high;
183 if (*later_src_reg == clobbered_dst_reg) {
185 assm->MacroAssembler::Move(*later_src_reg, clobbered_dst_reg);
188 (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
205template <
typename RegisterType>
207 RegisterType lhs, RegisterType rhs,
209 DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8);
211 assm->MacroAssembler::Move(dst, lhs);
216 assm->MacroAssembler::FloatMin(dst, lhs, rhs, &is_nan);
218 assm->MacroAssembler::FloatMax(dst, lhs, rhs, &is_nan);
223 assm->
vadd(dst, lhs, rhs);
230 if (
reg != must_not_alias)
return reg;
263 if (cond ==
lt || cond ==
le) {
267 assm->
vmov(dest.
low(), scratch, scratch);
272 if (cond ==
lt || cond ==
le) {
276 assm->
vmov(dest.
high(), scratch, scratch);
289 assm->
strh(src.gp(), dst);
294 assm->
str(src.gp(), dst);
307 assm->
vstr(src.fp(), dst);
325 assm->
ldrh(dst.
gp(), src);
330 assm->
ldr(dst.
gp(), src);
342 assm->
vldr(dst.
fp(), src);
379template <ShiftDirection dir = kLeft, NeonDataType dt, NeonSize sz>
387 assm->
vdup(sz, tmp, shift);
389 assm->
vneg(sz, tmp, tmp);
395template <ShiftDirection dir, NeonDataType dt>
408 }
else if (dst != lhs) {
418 assm->
vpmax(
NeonU32, scratch, src.low_fp(), src.high_fp());
425class CacheStatePreservingTempRegisters {
429 : assm_(assm), pinned_(pinned) {}
438 if (assm_->cache_state()->has_unused_register(
kGpReg, pinned_)) {
440 assm_->cache_state()->unused_register(
kGpReg, pinned_).gp());
445 DCHECK(!available.is_empty());
484 kLiftoffFrameSetupFunctionReg) ==
499 int stack_param_delta) {
512 int slot_count = num_callee_stack_params + 2;
513 for (
int i = slot_count - 1;
i >= 0; --
i) {
528 bool feedback_vector_slot,
size_t stack_param_slots) {
534 if (feedback_vector_slot) {
544 patching_assembler.sub(
sp,
sp,
Operand(frame_size));
545 patching_assembler.PadWithNops();
565 patching_assembler.PadWithNops();
572 if (frame_size <
v8_flags.stack_size * 1024) {
576 add(stack_limit, stack_limit,
Operand(frame_size));
577 cmp(
sp, stack_limit);
581 if (
v8_flags.experimental_wasm_growable_stacks) {
584 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
589 add(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
596 Call(
static_cast<Address>(Builtin::kWasmStackOverflow),
612 int func_start_offset =
644 Register budget_array = temps.Acquire();
653 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
656 int budget_arr_offset =
kInt32Size * declared_func_index;
660 add(budget_array, budget_array,
Operand{budget_arr_offset});
661 budget_arr_offset = 0;
665 MemOperand budget_addr{budget_array, budget_arr_offset};
666 ldr(budget, budget_addr);
668 str(budget, budget_addr);
674 if (!
v8_flags.experimental_wasm_growable_stacks) {
678 Label done, call_runtime;
682 b(&call_runtime,
eq);
683 mov(old_fp.
gp(), fp);
726 switch (value.type().kind()) {
731 int32_t low_word = value.to_i64();
732 int32_t high_word = value.to_i64() >> 32;
796 uint32_t* protected_load_pc =
nullptr,
797 bool needs_shift =
false) {
798 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
801 if (type.value() == LoadType::kF64Load ||
802 type.value() == LoadType::kF32Load ||
803 type.value() == LoadType::kS128Load) {
808 lasm, &temps, src_addr, offset_reg, offset_imm);
809 if (type.value() == LoadType::kF64Load) {
814 }
else if (type.value() == LoadType::kF32Load) {
818 (actual_src_addr == src_addr) ? temps.
Acquire() : actual_src_addr;
829 offset_imm, shift_amount);
830 if (protected_load_pc) *protected_load_pc =
__ pc_offset();
831 switch (type.value()) {
832 case LoadType::kI32Load8U:
833 __ ldrb(dst.
gp(), src_op);
835 case LoadType::kI64Load8U:
839 case LoadType::kI32Load8S:
840 __ ldrsb(dst.
gp(), src_op);
842 case LoadType::kI64Load8S:
846 case LoadType::kI32Load16U:
847 __ ldrh(dst.
gp(), src_op);
849 case LoadType::kI64Load16U:
853 case LoadType::kI32Load16S:
854 __ ldrsh(dst.
gp(), src_op);
856 case LoadType::kI32Load:
857 __ ldr(dst.
gp(), src_op);
859 case LoadType::kI64Load16S:
863 case LoadType::kI64Load32U:
867 case LoadType::kI64Load32S:
871 case LoadType::kI64Load:
895 uint32_t* protected_load_pc,
899 offset_imm, LoadType::kI32Load, protected_load_pc,
910 int32_t offset_imm) {
921 uint32_t* protected_store_pc,
925 Register actual_offset_reg = offset_reg;
926 if (offset_reg !=
no_reg && offset_imm != 0) {
932 actual_offset_reg = temps.Acquire();
934 add(actual_offset_reg, offset_reg,
Operand(offset_imm));
940 if (protected_store_pc) *protected_store_pc =
pc_offset();
944 if (skip_write_barrier ||
v8_flags.disable_write_barriers)
return;
961 Register offset_reg, uint32_t offset_imm,
962 LoadType type, uint32_t* protected_load_pc,
966 DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
968 static_cast<int32_t
>(offset_imm), type,
969 protected_load_pc, needs_shift);
975 uint32_t* protected_store_pc,
978 DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
980 if (type.value() == StoreType::kF64Store) {
982 this, &temps, dst_addr, offset_reg, offset_imm);
986 }
else if (type.value() == StoreType::kS128Store) {
988 this, &temps, dst_addr, offset_reg, offset_imm);
993 }
else if (type.value() == StoreType::kF32Store) {
997 this, &temps, dst_addr, offset_reg, offset_imm);
1000 temps.CanAcquire() ? temps.Acquire() : liftoff_temps.Acquire();
1006 if (protected_store_pc) *protected_store_pc =
pc_offset();
1007 switch (type.value()) {
1008 case StoreType::kI64Store8:
1011 case StoreType::kI32Store8:
1012 strb(src.gp(), dst_op);
1014 case StoreType::kI64Store16:
1017 case StoreType::kI32Store16:
1018 strh(src.gp(), dst_op);
1020 case StoreType::kI64Store32:
1023 case StoreType::kI32Store:
1024 str(src.gp(), dst_op);
1026 case StoreType::kI64Store:
1027 str(src.low_gp(), dst_op);
1031 if (temps.CanAcquire()) {
1037 str(src.high_gp(), dst_op);
1064 result.gp() != offset_reg);
1068 lasm, &temps, dst_addr, offset_reg, offset_imm);
1073 (lasm->*load)(
result.gp(), actual_addr,
al);
1074 op(lasm, temp,
result.gp(), value.gp());
1075 (lasm->*
store)(store_result, temp, actual_addr,
al);
1083 __ add(dst, lhs, rhs);
1088 __ sub(dst, lhs, rhs);
1093 __ and_(dst, lhs, rhs);
1098 __ orr(dst, lhs, rhs);
1103 __ eor(dst, lhs, rhs);
1112 Register offset_reg, uint32_t offset_imm,
1118 if (offset_reg !=
no_reg) pinned.
set(offset_reg);
1119 switch (type.value()) {
1120 case StoreType::kI64Store8:
1123 value = value.low();
1125 case StoreType::kI32Store8:
1129 case StoreType::kI64Store16:
1132 value = value.low();
1134 case StoreType::kI32Store16:
1138 case StoreType::kI64Store32:
1141 value = value.low();
1143 case StoreType::kI32Store:
1153 Register offset_reg, uint32_t offset_imm,
1155 std::optional<LiftoffRegister>
result,
1167 Register value_low = value.low_gp();
1168 Register value_high = value.high_gp();
1170 auto regs_to_check = {&dst_addr, &offset_reg, &value_low, &value_high};
1171 auto re_pin = [regs_to_check, &pinned] {
1172 for (
auto*
reg : regs_to_check) {
1177 __ ClearRegister(dst_low, regs_to_check, pinned);
1179 __ ClearRegister(dst_high, regs_to_check, pinned);
1187 if (
result.has_value()) {
1188 result_low =
result.value().low_gp();
1189 if (pinned.
has(result_low)) {
1190 result_low =
__ GetUnusedRegister(
kGpReg, pinned).gp();
1192 pinned.
set(result_low);
1194 result_high =
result.value().high_gp();
1195 if (pinned.
has(result_high)) {
1196 result_high =
__ GetUnusedRegister(
kGpReg, pinned).gp();
1198 pinned.
set(result_high);
1205 lasm, &temps, dst_addr, offset_reg, offset_imm);
1214 __ ldrexd(dst_low, dst_high, actual_addr);
1215 if (
result.has_value()) {
1216 __ mov(result_low, dst_low);
1217 __ mov(result_high, dst_high);
1222 __ strexd(store_result, dst_low, dst_high, actual_addr);
1227 if (
result.has_value()) {
1228 if (result_low !=
result.value().low_gp()) {
1229 __ mov(
result.value().low_gp(), result_low);
1231 if (result_high !=
result.value().high_gp()) {
1232 __ mov(
result.value().high_gp(), result_high);
1239 __ mov(dst.
low_gp(), src.low_gp());
1247 Register offset_reg, uint32_t offset_imm,
1250 if (type.value() != LoadType::kI64Load) {
1251 Load(dst, src_addr, offset_reg, offset_imm, type,
nullptr,
true);
1266 this, &temps, src_addr, offset_reg, offset_imm);
1267 ldrexd(dst_low, dst_high, actual_addr);
1279 if (type.value() == StoreType::kI64Store) {
1286 Store(dst_addr, offset_reg, offset_imm, src, type, pinned,
nullptr,
true);
1295 if (type.value() == StoreType::kI64Store) {
1308 if (type.value() == StoreType::kI64Store) {
1321 if (type.value() == StoreType::kI64Store) {
1334 if (type.value() == StoreType::kI64Store) {
1347 if (type.value() == StoreType::kI64Store) {
1357 uint32_t offset_imm,
1361 if (type.value() == StoreType::kI64Store) {
1375 uint32_t offset_imm,
1396 __ SpillRegisters(dst_addr,
offset, result_low, result_high, new_value_low,
1397 new_value_high, store_result, expected_low, expected_high);
1399 __ ParallelRegisterMove(
1403 {dst_addr, dst_addr_reg,
kI32},
1410 offset_imm, dst_addr);
1419 __ ldrexd(result_low, result_high, dst_addr);
1420 __ cmp(result_low, expected_low);
1422 __ cmp(result_high, expected_high);
1424 __ strexd(store_result, new_value_low, new_value_high, dst_addr);
1430 __ ParallelRegisterMove(
1440 if (type.value() == StoreType::kI64Store) {
1442 expected, new_value,
result);
1452 if (offset_reg !=
no_reg) pinned.
set(offset_reg);
1456 switch (type.value()) {
1457 case StoreType::kI64Store8:
1458 result_high =
result.high_gp();
1460 new_value = new_value.
low();
1461 expected = expected.
low();
1463 case StoreType::kI32Store8:
1472 uxtb(expected.
gp(), expected.
gp());
1474 case StoreType::kI64Store16:
1475 result_high =
result.high_gp();
1477 new_value = new_value.
low();
1478 expected = expected.
low();
1480 case StoreType::kI32Store16:
1489 uxth(expected.
gp(), expected.
gp());
1491 case StoreType::kI64Store32:
1492 result_high =
result.high_gp();
1494 new_value = new_value.
low();
1495 expected = expected.
low();
1497 case StoreType::kI32Store:
1504 pinned.
set(new_value);
1505 pinned.
set(expected);
1516 this, &temps, dst_addr, offset_reg, offset_imm);
1522 (this->*load)(
result.gp(), actual_addr,
al);
1525 (this->*
store)(store_result, new_value.
gp(), actual_addr,
al);
1531 if (
result.gp() != result_reg) {
1534 if (result_high !=
no_reg) {
1542 uint32_t caller_slot_idx,
1549 uint32_t caller_slot_idx,
1566 Register scratch = temps.Acquire();
1567 const int kRegSize = 4;
1570 if (src_offset < dst_offset) {
1624 src = liftoff_temps.Acquire();
1626 src = assembler_temps.
Acquire();
1628 switch (value.type().kind()) {
1634 int32_t low_word = value.to_i64();
1637 int32_t high_word = value.to_i64() >> 32;
1721 Register scratch = temps.Acquire();
1723 mul(dst, lhs, scratch);
1727 and_(dst, lhs, rhs);
1758 }
else if (dst != src) {
1774 }
else if (dst != src) {
1790 }
else if (dst != src) {
1833 vadd(dst, lhs, rhs);
1838 vsub(dst, lhs, rhs);
1843 vmul(dst, lhs, rhs);
1848 vdiv(dst, lhs, rhs);
1876 if (src == scratch1) std::swap(scratch1, scratch2);
1883 assm->
and_(scratch1, dst, scratch1);
1906 Label* trap_div_by_zero,
1907 Label* trap_div_unrepresentable) {
1915 bool speculative_sdiv = dst != lhs && dst != rhs;
1916 if (speculative_sdiv) {
1917 sdiv(dst, lhs, rhs);
1922 b(trap_div_by_zero,
eq);
1927 b(trap_div_unrepresentable,
eq);
1929 if (!speculative_sdiv) {
1930 sdiv(dst, lhs, rhs);
1935 Label* trap_div_by_zero) {
1943 b(trap_div_by_zero,
eq);
1944 udiv(dst, lhs, rhs);
1948 Label* trap_div_by_zero) {
1960 sdiv(scratch, lhs, rhs);
1963 b(trap_div_by_zero,
eq);
1965 mls(dst, scratch, rhs, lhs);
1969 Label* trap_div_by_zero) {
1981 udiv(scratch, lhs, rhs);
1984 b(trap_div_by_zero,
eq);
1986 mls(dst, scratch, rhs, lhs);
2026 Label* trap_div_by_zero,
2027 Label* trap_div_unrepresentable) {
2033 Label* trap_div_by_zero) {
2039 Label* trap_div_by_zero) {
2045 Label* trap_div_by_zero) {
2099 b(&high_is_zero,
eq);
2104 bind(&high_is_zero);
2118 b(&low_is_zero,
eq);
2137 Register src1 = src.high_gp() == dst.
low_gp() ? src.high_gp() : src.low_gp();
2138 Register src2 = src.high_gp() == dst.
low_gp() ? src.low_gp() : src.high_gp();
2268 constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
2279 orr(scratch, scratch, scratch2);
2285 constexpr uint32_t kF64SignBitHighWord = uint32_t{1} << 31;
2293 bic(scratch, scratch,
Operand(kF64SignBitHighWord));
2296 and_(scratch2, scratch2,
Operand(kF64SignBitHighWord));
2298 orr(scratch, scratch, scratch2);
2307 case kExprI32ConvertI64:
2310 case kExprI32SConvertF32: {
2316 vmov(dst.
gp(), scratch_f);
2318 vmov(scratch_f,
Float32(
static_cast<float>(INT32_MIN)));
2326 case kExprI32UConvertF32: {
2332 vmov(dst.
gp(), scratch_f);
2342 case kExprI32SConvertF64: {
2346 vmov(dst.
gp(), scratch_f);
2358 case kExprI32UConvertF64: {
2362 vmov(dst.
gp(), scratch_f);
2374 case kExprI32SConvertSatF32: {
2380 vmov(dst.
gp(), scratch_f);
2383 case kExprI32UConvertSatF32: {
2389 vmov(dst.
gp(), scratch_f);
2392 case kExprI32SConvertSatF64: {
2396 vmov(dst.
gp(), scratch_f);
2399 case kExprI32UConvertSatF64: {
2403 vmov(dst.
gp(), scratch_f);
2406 case kExprI32ReinterpretF32:
2409 case kExprI64SConvertI32:
2413 case kExprI64UConvertI32:
2417 case kExprI64ReinterpretF64:
2420 case kExprF32SConvertI32: {
2422 vmov(dst_float, src.gp());
2426 case kExprF32UConvertI32: {
2428 vmov(dst_float, src.gp());
2432 case kExprF32ConvertF64:
2435 case kExprF32ReinterpretI32:
2438 case kExprF64SConvertI32: {
2443 case kExprF64UConvertI32: {
2448 case kExprF64ConvertF32:
2451 case kExprF64ReinterpretI64:
2452 vmov(dst.
fp(), src.low_gp(), src.high_gp());
2454 case kExprF64SConvertI64:
2455 case kExprF64UConvertI64:
2456 case kExprI64SConvertF32:
2457 case kExprI64UConvertF32:
2458 case kExprI64SConvertSatF32:
2459 case kExprI64UConvertSatF32:
2460 case kExprF32SConvertI64:
2461 case kExprF32UConvertI64:
2462 case kExprI64SConvertF64:
2463 case kExprI64UConvertF64:
2464 case kExprI64SConvertSatF64:
2465 case kExprI64UConvertSatF64:
2538 orr(dst, src.low_gp(), src.high_gp());
2554 if (speculative_move) {
2560 if (unsigned_cond == cond) {
2562 if (!speculative_move) {
2571 if (!speculative_move) {
2577 if (!speculative_move) {
2626 Register offset_reg, uintptr_t offset_imm,
2629 uint32_t* protected_load_pc,
2633 this, &temps, src_addr, offset_reg, offset_imm);
2671 vmov(dest.high(), 0);
2696 uintptr_t offset_imm,
LoadType type,
2697 uint8_t laneidx, uint32_t* protected_load_pc,
2701 this, &temps, addr, offset_reg, offset_imm);
2715 uint32_t* protected_store_pc,
2791 uint8_t imm_lane_idx) {
2798 uint8_t imm_lane_idx) {
2924 if (dst != rhs)
vmov(dest.
low(), right.low(),
mi);
2927 if (dst != rhs)
vmov(dest.
high(), right.high(),
mi);
2938 if (dst != rhs)
vmov(dest.
low(), right.low(),
gt);
2941 if (dst != rhs)
vmov(dest.
high(), right.high(),
gt);
2982 uint8_t imm_lane_idx) {
2990 uint8_t imm_lane_idx) {
3137 if (dst == lhs || dst == rhs) {
3143 vcgt(tmp, left, right);
3144 vbsl(tmp, right, left);
3146 if (dst == lhs || dst == rhs) {
3156 if (dst == lhs || dst == rhs) {
3162 vcgt(tmp, right, left);
3163 vbsl(tmp, right, left);
3165 if (dst == lhs || dst == rhs) {
3180 uint8_t imm_lane_idx) {
3184 imm_lane_idx * 2 + 1);
3190 uint8_t imm_lane_idx) {
3195 imm_lane_idx * 2 + 1);
3203 vmov(zero, uint64_t{0});
3276 if (used_plus_dst.
has(lhs) && used_plus_dst.
has(rhs)) {
3282 }
else if (used_plus_dst.
has(lhs)) {
3284 }
else if (used_plus_dst.
has(rhs)) {
3292 if (tmp2 != right) {
3366 uint8_t imm_lane_idx) {
3374 uint8_t imm_lane_idx) {
3718 uint8_t imm_lane_idx) {
3725 uint8_t imm_lane_idx) {
3733 uint8_t imm_lane_idx) {
3836 const uint8_t shuffle[16],
3843 if ((src1 != src2) && src1.
code() + 1 != src2.
code()) {
3851 "This only works if q14-q15 (d28-d31) are not used.");
3853 "This only works if q14-q15 (d28-d31) are not used.");
3855 "This only works if q14-q15 (d28-d31) are not used.");
3857 "This only works if q14-q15 (d28-d31) are not used.");
3864 int table_size = src1 == src2 ? 2 : 4;
3866 int scratch_s_base = scratch.
code() * 4;
3867 for (
int j = 0;
j < 4;
j++) {
3869 for (
int i = 3;
i >= 0;
i--) {
3870 imm = (imm << 8) | shuffle[
j * 4 +
i];
3872 DCHECK_EQ(0, imm & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
3880 if (dest != src1 && dest != src2) {
3881 vtbl(dest.low(), table, scratch.low());
3882 vtbl(dest.high(), table, scratch.high());
3884 vtbl(scratch.low(), table, scratch.low());
3885 vtbl(scratch.high(), table, scratch.high());
3886 vmov(dest, scratch);
3902 uint8_t imm_lane_idx) {
3908 uint8_t imm_lane_idx) {
3915 uint8_t imm_lane_idx) {
3936 vpmin(
NeonU8, scratch, src.low_fp(), src.high_fp());
4254 const uint8_t imms[16]) {
4256 memcpy(vals, imms,
sizeof(vals));
4509 uint8_t imm_lane_idx) {
4516 uint8_t imm_lane_idx) {
4670 cmp(
sp, limit_address);
4695 if ((code != last.code() + 1) || ((code - first.code() + 1) > 16))
break;
4713 if ((code != first.code() - 1) || ((last.code() - code + 1) > 16))
break;
4733 if (ref_spills.
has(
reg)) {
4745 Drop(num_stack_slots);
4764 }
else if (arg.is_const()) {
4781 }
while (--words > 0);
4791 constexpr int kNumCCallArgs = 1;
4797 if (return_kind != kVoid) {
4799 if (kReturnReg != rets->
gp()) {
4806 if (out_argument_kind != kVoid) {
4815 int num_args =
static_cast<int>(
args.size());
4827 parallel_move.LoadIntoRegister(
4847 if (arg.is_const()) {
4876 CallWasmCodePointer(target);
4924 if (lane_kind ==
kF32) {
4943 int last_stack_slot = param_slots;
4944 for (
auto& slot :
slots_) {
4945 const int stack_slot = slot.dst_slot_;
4948 last_stack_slot = stack_slot;
4950 switch (src.loc()) {
4952 switch (src.kind()) {
4993 switch (src.kind()) {
5027 : src.i32_const() >> 31));
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
void uxtb(Register dst, Register src, int rotate=0, Condition cond=al)
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift)
void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src)
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vshr(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src, int shift)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2)
void strex(Register src1, Register src2, Register dst, Condition cond=al)
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void sxtb(Register dst, Register src, int rotate=0, Condition cond=al)
void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void tbl(const VRegister &vd, const VRegister &vn, const VRegister &vm)
bool ImmediateFitsAddrMode2Instruction(int32_t imm32)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckConstPool(bool force_emit, bool require_jump)
void vmvn(QwNeonRegister dst, QwNeonRegister src)
void vmov(const SwVfpRegister dst, Float32 imm)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void vld1r(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void ldrexb(Register dst, Register src, Condition cond=al)
void sxth(Register dst, Register src, int rotate=0, Condition cond=al)
void vqmovn(NeonDataType dst_dt, NeonDataType src_dt, DwVfpRegister dst, QwNeonRegister src)
void lsr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vtbl(DwVfpRegister dst, const NeonListOperand &list, DwVfpRegister index)
void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void vpaddl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void ldrexd(Register dst1, Register dst2, Register src, Condition cond=al)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void rbit(Register dst, Register src, Condition cond=al)
void vmlal(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void clz(Register dst, Register src, Condition cond=al)
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void dmb(BarrierOption option)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vrintn(const SwVfpRegister dst, const SwVfpRegister src)
void udiv(Register dst, Register src1, Register src2, Condition cond=al)
void vrhadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vld1(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
void vrintz(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
void strb(Register src, const MemOperand &dst, Condition cond=al)
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vqrdmulh(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void uxth(Register dst, Register src, int rotate=0, Condition cond=al)
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void vrintp(const SwVfpRegister dst, const SwVfpRegister src)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vrintm(const SwVfpRegister dst, const SwVfpRegister src)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vdup(NeonSize size, QwNeonRegister dst, Register src)
void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void strexh(Register src1, Register src2, Register dst, Condition cond=al)
void vld1s(NeonSize size, const NeonListOperand &dst, uint8_t index, const NeonMemOperand &src)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2, int bytes)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vcnt(QwNeonRegister dst, QwNeonRegister src)
void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2)
void ldrexh(Register dst, Register src, Condition cond=al)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vbic(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void ldrex(Register dst, Register src, Condition cond=al)
void bx(Register target, Condition cond=al)
void vst1(NeonSize size, const NeonListOperand &src, const NeonMemOperand &dst)
void strexb(Register src1, Register src2, Register dst, Condition cond=al)
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
void vmull(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void vpush(QwNeonRegister src, Condition cond=al)
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void asr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr int kPcLoadDelta
SwVfpRegister high() const
SwVfpRegister low() const
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void Drop(int count, Condition cond=al)
void mov(Register rd, Register rj)
void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void I64x2AllTrue(Register dst, QwNeonRegister src)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void Move(Register dst, Tagged< Smi > smi)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane)
void JumpIfSmi(Register value, Label *smi_label)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void VmovLow(Register dst, DwVfpRegister src)
void PushCommonFrame(Register marker_reg=no_reg)
void LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void Jump(Register target, Condition cond=al)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void VmovHigh(Register dst, DwVfpRegister src)
void I64x2BitMask(Register dst, QwNeonRegister src)
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane)
void I64x2Abs(QwNeonRegister dst, QwNeonRegister src)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void AllocateStackSpace(Register bytes)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
DwVfpRegister low() const
DwVfpRegister high() const
constexpr bool is_empty() const
constexpr bool has(RegisterT reg) const
static constexpr LowDwVfpRegister from_code(int8_t code)
constexpr int8_t code() const
void DefineTaggedStackSlot(int index)
Safepoint DefineSafepoint(Assembler *assembler, int pc_offset=0)
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFrameTypeOffset
QwNeonRegister AcquireQ()
static constexpr Register GapRegister()
static constexpr int kInstanceDataOffset
static constexpr int kFeedbackVectorOffset
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src)
void emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i32_clz(Register dst, Register src)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_shri(Register dst, Register src, int32_t amount)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_store_nonzero(Register dst)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_floor(DoubleRegister dst, DoubleRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void FillI64Half(Register, int offset, RegPairHalf)
bool emit_f16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void TailCallNativeWasmCode(Address addr)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void SpillInstanceData(Register instance)
void RecordOolSpillSpaceSize(int size)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i32_subi(Register dst, Register lhs, int32_t imm)
void emit_i32_shli(Register dst, Register src, int32_t amount)
void emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sar(Register dst, Register src, Register amount)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i32_shr(Register dst, Register src, Register amount)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void DeallocateStackSlot(uint32_t size)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_andi(Register dst, Register lhs, int32_t imm)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void StackCheck(Label *ool_code)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f32_neg(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_and(Register dst, Register lhs, Register rhs)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void CallBuiltin(Builtin builtin)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_relaxed_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_abs(DoubleRegister dst, DoubleRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f64_neg(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Spill(VarState *slot)
void emit_i32_xor(Register dst, Register lhs, Register rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void AssertUnreachable(AbortReason reason)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_or(Register dst, Register lhs, Register rhs)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_sari(Register dst, Register src, int32_t amount)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
int GetTotalFrameSize() const
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallNativeWasmCode(Address addr)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
static bool NeedsAlignment(ValueKind kind)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
static int SlotSizeForType(ValueKind kind)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_ori(Register dst, Register lhs, int32_t imm)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
bool supports_f16_mem_access()
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
bool emit_f64_floor(DoubleRegister dst, DoubleRegister src)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadInstanceDataFromFrame(Register dst)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void ParallelRegisterMove(base::Vector< const ParallelRegisterMoveTuple >)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void emit_f64_abs(DoubleRegister dst, DoubleRegister src)
static constexpr int kStackSlotSize
void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_xori(Register dst, Register lhs, int32_t imm)
CacheState * cache_state()
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sub(Register dst, Register lhs, Register rhs)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void SpillRegisters(Regs... regs)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void PushRegisters(LiftoffRegList)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void bailout(LiftoffBailoutReason reason, const char *detail)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void IncrementSmi(LiftoffRegister dst, int offset)
void PopRegisters(LiftoffRegList)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src)
void emit_i32_addi(Register dst, Register lhs, int32_t imm)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void FillStackSlotsWithZero(int start, int size)
void emit_i32_shl(Register dst, Register src, Register amount)
void emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
Register LoadOldFramePointer()
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
V8_NOINLINE V8_PRESERVE_MOST void SpillRegister(LiftoffRegister)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i32_add(Register dst, Register lhs, Register rhs)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
static constexpr int StaticStackFrameSize()
void emit_i32_muli(Register dst, Register lhs, int32_t imm)
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_signextend_i8(Register dst, Register src)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr Register set(Register reg)
LiftoffRegister GetLastRegSet() const
constexpr LiftoffRegister clear(LiftoffRegister reg)
bool has(LiftoffRegister reg) const
constexpr bool is_empty() const
constexpr unsigned GetNumRegsSet() const
LiftoffRegister GetFirstRegSet() const
DoubleRegister high_fp() const
constexpr DoubleRegister fp() const
bool overlaps(const LiftoffRegister other) const
LiftoffRegister high() const
constexpr bool is_gp_pair() const
static LiftoffRegister ForPair(Register low, Register high)
LiftoffRegister low() const
DoubleRegister low_fp() const
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
void Construct(int param_slots)
LiftoffAssembler *const asm_
static constexpr int ToTagged(int offset)
~CacheStatePreservingTempRegisters()
LiftoffAssembler *const assm_
CacheStatePreservingTempRegisters(LiftoffAssembler *assm, LiftoffRegList pinned={})
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
FloatWithBits< 32 > Float32
void EmitSimdShiftImmediate(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
constexpr int MaskFromNeonDataType(NeonDataType dt)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
Condition MakeUnsigned(Condition cond)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void I64Shiftop(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, Register amount)
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
void EmitFloatMinOrMax(LiftoffAssembler *assm, RegisterType dst, RegisterType lhs, RegisterType rhs, MinOrMax min_or_max)
MemOperand GetHalfStackSlot(int offset, RegPairHalf half)
void Xor(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void EmitAnyTrue(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src)
void AtomicOp32(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister value, LiftoffRegister result, LiftoffRegList pinned, void(Assembler::*load)(Register, Register, Condition), void(Assembler::*store)(Register, Register, Register, Condition), void(*op)(LiftoffAssembler *, Register, Register, Register))
void Add(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void I64BinopI(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void F64x2Compare(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Condition cond)
MemOperand GetStackSlot(int offset)
Register EnsureNoAlias(Assembler *assm, Register reg, Register must_not_alias, UseScratchRegisterScope *temps)
void AtomicOp64(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister value, std::optional< LiftoffRegister > result, void(*op)(LiftoffAssembler *, LiftoffRegister, LiftoffRegister, LiftoffRegister))
void EmitSimdShift(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
Simd128Register GetSimd128Register(DoubleRegister reg)
constexpr int32_t kPatchInstructionsRequired
FloatRegister GetFloatRegister(DoubleRegister reg)
void LoadInternal(LiftoffAssembler *lasm, LiftoffRegister dst, Register src_addr, Register offset_reg, int32_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool needs_shift=false)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
MemOperand GetInstanceDataOperand()
void Exchange(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
constexpr int kHalfStackSlotSize
void I64Binop(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicBinop32(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, void(*op)(LiftoffAssembler *, Register, Register, Register))
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void I64Store(LiftoffAssembler *lasm, LiftoffRegister dst, LiftoffRegister, LiftoffRegister src)
void S128NarrowOp(LiftoffAssembler *assm, NeonDataType dt, NeonDataType sdt, LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicI64CompareExchange(LiftoffAssembler *lasm, Register dst_addr_reg, Register offset_reg, uint32_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result)
void GeneratePopCnt(Assembler *assm, Register dst, Register src, Register scratch1, Register scratch2)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
static constexpr bool needs_gp_reg_pair(ValueKind kind)
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs
uint32_t WasmInterpreterRuntime int64_t r0
@ kUnsupportedArchitecture
constexpr RegList kLiftoffAssemblerGpCacheRegs
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
typedef void(VECTORCALL PWasmOp)(const uint8_t *code
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
constexpr IndependentValueType kWasmI64
constexpr Register no_reg
constexpr int kTaggedSize
constexpr int kSimd128Size
constexpr NeonDataType NeonS16
constexpr NeonSize Neon32
@ kUnsignedGreaterThanEqual
constexpr BlockAddrMode ia_w
constexpr BlockAddrMode db_w
constexpr NeonSize Neon64
constexpr NeonDataType NeonS8
kWasmInternalFunctionIndirectPointerTag instance_data
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr NeonSize Neon16
constexpr int kSystemPointerSize
constexpr int kRegSizeInBitsLog2
constexpr NeonDataType NeonU8
constexpr Register kReturnRegister0
constexpr BarrierOption ISH
Condition NegateCondition(Condition cond)
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr NeonDataType NeonU16
constexpr NeonDataType NeonS32
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr NeonDataType NeonU32
constexpr AddrMode PostIndex
const intptr_t kSmiTagMask
constexpr NeonDataType NeonU64
constexpr uint8_t kInstrSize
constexpr NeonDataType NeonS64
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
Register cached_instance_data
LiftoffRegList used_registers
#define V8_LIKELY(condition)