5#ifndef V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV64_INL_H_
6#define V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV64_INL_H_
47 bool i64_offset =
false,
unsigned shift_amount = 0) {
54 if (shift_amount != 0) {
61 if (is_int31(offset_imm)) {
62 int32_t offset_imm32 =
static_cast<int32_t
>(offset_imm);
75 assm->
Lw(dst.
gp(), src);
80 assm->
Ld(dst.
gp(), src);
91 if (src.offset() != 0) {
92 assm->AddWord(src_reg, src.rm(), src.offset());
94 assm->
vl(dst.
fp().toV(), src_reg, 0, E8);
102inline void Store(LiftoffAssembler* assm, Register base, int32_t
offset,
107 assm->Sw(src.gp(), dst);
112 assm->Sd(src.gp(), dst);
115 assm->StoreFloat(src.fp(), dst);
118 assm->StoreDouble(src.fp(), dst);
123 if (dst.offset() != 0) {
126 assm->vs(src.fp().toV(), dst_reg, 0, VSew::E8);
143 assm->push(
reg.gp());
156 assm->vs(
reg.fp().toV(),
sp, 0, VSew::E8);
166 UseScratchRegisterScope temps(assm);
167 if (src.is_const()) {
169 if (src.i32_const() == 0) {
172 src_reg = temps.Acquire();
173 assm->li(src_reg, src.i32_const());
175 assm->StoreWord(src_reg, dst);
176 }
else if (src.is_reg()) {
177 switch (src.kind()) {
179 return assm->Sw(src.reg().gp(), dst);
183 return assm->Sd(src.reg().gp(), dst);
185 return assm->StoreFloat(src.reg().fp(), dst);
187 return assm->StoreDouble(src.reg().fp(), dst);
191 assm->Add64(dst_reg, dst.rm(), dst.offset());
192 assm->vs(src.reg().fp().toV(), dst_reg, 0, VSew::E8);
201 switch (src.kind()) {
223 assm->Add64(src_reg,
sp, src.offset());
226 assm->Add64(dst_reg, dst.rm(), dst.offset());
239 switch (value.type().kind()) {
248 value.to_f32_boxed().get_bits());
252 value.to_f64_boxed().get_bits());
262 uint32_t* protected_load_pc,
266 false, shift_amount);
267 Assembler::BlockPoolsScope blocked_pools_scope_(
this, 4 *
kInstrSize);
269 if (protected_load_pc) *protected_load_pc =
offset;
271 if (protected_load_pc) {
277 int32_t offset_imm) {
282 int32_t offset_imm) {
284 LoadWord(dst, src_op);
287#ifdef V8_ENABLE_SANDBOX
288void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
290 int32_t offset_imm) {
292 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op,
299 int32_t offset_imm, Register src,
300 LiftoffRegList pinned,
301 uint32_t* protected_store_pc,
302 SkipWriteBarrier skip_write_barrier) {
305 offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
309 if (offset_reg.is_valid() && offset_imm) {
310 Register effective_offset = temps.Acquire();
311 AddWord(effective_offset, offset_reg, Operand(offset_imm));
312 offset_op = Operand(effective_offset);
314 auto trapper = [protected_store_pc](
int offset) {
315 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
317 if (offset_op.is_reg()) {
318 AddWord(kScratchReg, dst_addr, offset_op.rm());
319 StoreTaggedField(src, MemOperand(kScratchReg, 0), trapper);
321 StoreTaggedField(src, MemOperand(dst_addr, offset_imm), trapper);
323 if (protected_store_pc) {
327 if (skip_write_barrier ||
v8_flags.disable_write_barriers)
return;
335 StubCallMode::kCallWasmRuntimeStub);
340 Register offset_reg, uintptr_t offset_imm,
341 LoadType type, uint32_t* protected_load_pc,
342 bool is_load_mem,
bool i64_offset,
344 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
346 i64_offset, shift_amount);
347 Assembler::BlockPoolsScope blocked_pools_scope_(
this, 4 *
kInstrSize);
348 auto trapper = [protected_load_pc](
int offset) {
349 if (protected_load_pc) *protected_load_pc =
static_cast<uint32_t
>(
offset);
351 switch (type.value()) {
352 case LoadType::kI32Load8U:
353 case LoadType::kI64Load8U:
354 Lbu(dst.gp(), src_op, trapper);
356 case LoadType::kI32Load8S:
357 case LoadType::kI64Load8S:
358 Lb(dst.gp(), src_op, trapper);
360 case LoadType::kI32Load16U:
361 case LoadType::kI64Load16U:
362 Lhu(dst.gp(), src_op, trapper);
364 case LoadType::kI32Load16S:
365 case LoadType::kI64Load16S:
366 Lh(dst.gp(), src_op, trapper);
368 case LoadType::kI64Load32U:
369 Lwu(dst.gp(), src_op, trapper);
371 case LoadType::kI32Load:
372 case LoadType::kI64Load32S:
373 Lw(dst.gp(), src_op, trapper);
375 case LoadType::kI64Load:
376 Ld(dst.gp(), src_op, trapper);
378 case LoadType::kF32Load:
381 case LoadType::kF64Load:
384 case LoadType::kS128Load: {
387 if (src_op.offset() != 0) {
388 MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
391 vl(dst.fp().toV(), src_reg, 0, E8);
394 case LoadType::kF32LoadF16:
400 if (protected_load_pc) {
404#if defined(V8_TARGET_BIG_ENDIAN)
406 pinned.set(src_op.rm());
407 liftoff::ChangeEndiannessLoad(
this, dst, type, pinned);
413 uintptr_t offset_imm, LiftoffRegister src,
414 StoreType type, LiftoffRegList pinned,
415 uint32_t* protected_store_pc,
bool is_store_mem,
420#if defined(V8_TARGET_BIG_ENDIAN)
422 pinned.set(dst_op.rm());
425 Move(tmp, src, type.value_type());
429 liftoff::ChangeEndiannessStore(
this, src, type, pinned);
433 Assembler::BlockPoolsScope blocked_pools_scope_(
this, 4 *
kInstrSize);
434 auto trapper = [protected_store_pc](
int offset) {
435 if (protected_store_pc) *protected_store_pc =
static_cast<uint32_t
>(
offset);
437 switch (type.value()) {
438 case StoreType::kI32Store8:
439 case StoreType::kI64Store8:
440 Sb(src.gp(), dst_op, trapper);
442 case StoreType::kI32Store16:
443 case StoreType::kI64Store16:
444 Sh(src.gp(), dst_op, trapper);
446 case StoreType::kI32Store:
447 case StoreType::kI64Store32:
448 Sw(src.gp(), dst_op, trapper);
450 case StoreType::kI64Store:
451 Sd(src.gp(), dst_op, trapper);
453 case StoreType::kF32Store:
456 case StoreType::kF64Store:
459 case StoreType::kS128Store: {
462 if (dst_op.offset() != 0) {
466 vs(src.fp().toV(), dst_reg, 0, VSew::E8);
472 if (protected_store_pc) {
481 UseScratchRegisterScope& temps,
482 Register addr_reg, Register offset_reg,
483 uintptr_t offset_imm) {
485 if (offset_reg ==
no_reg && offset_imm == 0)
return addr_reg;
486 Register
result = temps.Acquire();
487 if (offset_reg ==
no_reg) {
488 __ AddWord(
result, addr_reg, Operand(offset_imm));
490 __ AddWord(
result, addr_reg, Operand(offset_reg));
491 if (offset_imm != 0)
__ AddWord(
result,
result, Operand(offset_imm));
499 Register offset_reg, uintptr_t offset_imm,
503 if (offset_reg !=
no_reg) pinned.
set(offset_reg);
504 Register store_result = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
508 if (result_reg == value.gp() || result_reg == dst_addr ||
509 result_reg == offset_reg) {
510 result_reg =
__ GetUnusedRegister(
kGpReg, pinned).gp();
513 UseScratchRegisterScope temps(lasm);
515 lasm, temps, dst_addr, offset_reg, offset_imm);
520 Register temp = temps.Acquire();
524 switch (type.value()) {
525 case StoreType::kI64Store8:
526 case StoreType::kI32Store8:
527 __ lbu(result_reg, actual_addr, 0);
530 case StoreType::kI64Store16:
531 case StoreType::kI32Store16:
532 __ lhu(result_reg, actual_addr, 0);
535 case StoreType::kI64Store32:
536 __ lr_w(
true,
false, result_reg, actual_addr);
537 __ ZeroExtendWord(result_reg, result_reg);
539 case StoreType::kI32Store:
540 __ lr_w(
true,
false, result_reg, actual_addr);
542 case StoreType::kI64Store:
543 __ lr_d(
true,
false, result_reg, actual_addr);
551 __ add(temp, result_reg, value.gp());
554 __ sub(temp, result_reg, value.gp());
557 __ and_(temp, result_reg, value.gp());
560 __ or_(temp, result_reg, value.gp());
563 __ xor_(temp, result_reg, value.gp());
566 __ mv(temp, value.gp());
569 switch (type.value()) {
570 case StoreType::kI64Store8:
571 case StoreType::kI32Store8:
573 __ sb(temp, actual_addr, 0);
575 __ mv(store_result, zero_reg);
577 case StoreType::kI64Store16:
578 case StoreType::kI32Store16:
580 __ sh(temp, actual_addr, 0);
582 __ mv(store_result, zero_reg);
584 case StoreType::kI64Store32:
585 case StoreType::kI32Store:
586 __ sc_w(
false,
true, store_result, actual_addr, temp);
588 case StoreType::kI64Store:
589 __ sc_d(
false,
true, store_result, actual_addr, temp);
595 __ bnez(store_result, &retry);
596 if (result_reg !=
result.gp()) {
605 Register offset_reg, uintptr_t offset_imm,
606 LoadType type, LiftoffRegList pinned,
610 offset_reg, offset_imm);
611 switch (type.value()) {
612 case LoadType::kI32Load8U:
613 case LoadType::kI64Load8U:
614 lbu(dst.gp(), src_reg, 0);
617 case LoadType::kI32Load16U:
618 case LoadType::kI64Load16U:
619 lhu(dst.gp(), src_reg, 0);
622 case LoadType::kI32Load:
623 lw(dst.gp(), src_reg, 0);
626 case LoadType::kI64Load32U:
627 lwu(dst.gp(), src_reg, 0);
630 case LoadType::kI64Load:
631 ld(dst.gp(), src_reg, 0);
640 uintptr_t offset_imm, LiftoffRegister src,
641 StoreType type, LiftoffRegList pinned,
645 offset_reg, offset_imm);
646 switch (type.value()) {
647 case StoreType::kI64Store8:
648 case StoreType::kI32Store8:
650 sb(src.gp(), dst_reg, 0);
652 case StoreType::kI64Store16:
653 case StoreType::kI32Store16:
655 sh(src.gp(), dst_reg, 0);
657 case StoreType::kI64Store32:
658 case StoreType::kI32Store:
660 sw(src.gp(), dst_reg, 0);
662 case StoreType::kI64Store:
664 sd(src.gp(), dst_reg, 0);
672 uintptr_t offset_imm, LiftoffRegister value,
673 LiftoffRegister
result, StoreType type,
680 uintptr_t offset_imm, LiftoffRegister value,
681 LiftoffRegister
result, StoreType type,
688 uintptr_t offset_imm, LiftoffRegister value,
689 LiftoffRegister
result, StoreType type,
696 uintptr_t offset_imm, LiftoffRegister value,
697 LiftoffRegister
result, StoreType type,
704 uintptr_t offset_imm, LiftoffRegister value,
705 LiftoffRegister
result, StoreType type,
712 uintptr_t offset_imm,
713 LiftoffRegister value,
714 LiftoffRegister
result, StoreType type,
720#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
723 Label compareExchange; \
726 bind(&compareExchange); \
727 load_linked(result.gp(), MemOperand(temp0, 0)); \
728 BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
729 mv(temp2, new_value.gp()); \
730 store_conditional(temp2, MemOperand(temp0, 0)); \
731 BranchShort(&compareExchange, ne, temp2, Operand(zero_reg)); \
736#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
737 load_linked, store_conditional, size, aligned) \
739 Label compareExchange; \
741 andi(temp1, temp0, aligned); \
742 Sub64(temp0, temp0, Operand(temp1)); \
743 Sll32(temp1, temp1, 3); \
745 bind(&compareExchange); \
746 load_linked(temp2, MemOperand(temp0, 0)); \
747 ExtractBits(result.gp(), temp2, temp1, size, false); \
748 ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
749 BranchShort(&exit, ne, temp2, Operand(result.gp())); \
750 InsertBits(temp2, new_value.gp(), temp1, size); \
751 store_conditional(temp2, MemOperand(temp0, 0)); \
752 BranchShort(&compareExchange, ne, temp2, Operand(zero_reg)); \
758 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
759 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister
result,
760 StoreType type,
bool i64_offset) {
761 LiftoffRegList pinned{dst_addr, expected, new_value,
result};
762 if (offset_reg !=
no_reg) pinned.set(offset_reg);
768 Add64(temp0, dst_op.rm(), dst_op.offset());
769 switch (type.value()) {
770 case StoreType::kI64Store8:
773 case StoreType::kI32Store8:
776 case StoreType::kI64Store16:
779 case StoreType::kI32Store16:
782 case StoreType::kI64Store32:
785 case StoreType::kI32Store:
788 case StoreType::kI64Store:
795#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
796#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
801 uint32_t caller_slot_idx,
808 uint32_t caller_slot_idx,
810 Register frame_pointer) {
848 if (src.offset() != 0) {
849 MacroAssembler::Add64(src_reg, src.rm(), src.offset());
853 if (dst.offset() != 0) {
882 MacroAssembler::vmv_vv(dst.toV(), src.toV());
907 if (dst.offset() != 0) {
910 vs(
reg.fp().toV(), dst_reg, 0, VSew::E8);
921 switch (value.type().kind()) {
965 if (src.offset() != 0) {
966 MacroAssembler::Add64(src_reg, src.rm(), src.offset());
968 vl(
reg.fp().toV(), src_reg, 0, E8);
999 Add64(a0, fp, Operand(-
start - size));
1000 Add64(a1, fp, Operand(-
start));
1013 MacroAssembler::Clz64(dst.gp(), src.gp());
1017 MacroAssembler::Ctz64(dst.gp(), src.gp());
1021 LiftoffRegister src) {
1027 MacroAssembler::Mul32(dst, lhs, rhs);
1031 Label* trap_div_by_zero,
1032 Label* trap_div_unrepresentable) {
1042 MacroAssembler::Div32(dst, lhs, rhs);
1046 Label* trap_div_by_zero) {
1048 MacroAssembler::Divu32(dst, lhs, rhs);
1052 Label* trap_div_by_zero) {
1054 MacroAssembler::Mod32(dst, lhs, rhs);
1058 Label* trap_div_by_zero) {
1060 MacroAssembler::Modu32(dst, lhs, rhs);
1063#define I32_BINOP(name, instruction) \
1064 void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
1066 instruction(dst, lhs, rhs); \
1079#define I32_BINOP_I(name, instruction) \
1080 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
1082 instruction(dst, lhs, Operand(imm)); \
1108#define I32_SHIFTOP(name, instruction) \
1109 void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
1110 Register amount) { \
1111 instruction(dst, src, amount); \
1113#define I32_SHIFTOP_I(name, instruction) \
1114 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
1116 instruction(dst, src, amount & 31); \
1131 LiftoffRegister rhs) {
1132 MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
1142 Register scratch = temps.Acquire();
1144 Mul64(dst.gp(), lhs.gp(), scratch);
1148 LiftoffRegister rhs,
1149 Label* trap_div_by_zero,
1150 Label* trap_div_unrepresentable) {
1155 Operand(std::numeric_limits<int64_t>::min()),
ne);
1161 MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
1166 LiftoffRegister rhs,
1167 Label* trap_div_by_zero) {
1169 MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
1174 LiftoffRegister rhs,
1175 Label* trap_div_by_zero) {
1177 MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
1182 LiftoffRegister rhs,
1183 Label* trap_div_by_zero) {
1185 MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
1189#define I64_BINOP(name, instruction) \
1190 void LiftoffAssembler::emit_i64_##name( \
1191 LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
1192 instruction(dst.gp(), lhs.gp(), rhs.gp()); \
1205#define I64_BINOP_I(name, instruction) \
1206 void LiftoffAssembler::emit_i64_##name##i( \
1207 LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
1208 instruction(dst.gp(), lhs.gp(), Operand(imm)); \
1219#define I64_SHIFTOP(name, instruction) \
1220 void LiftoffAssembler::emit_i64_##name( \
1221 LiftoffRegister dst, LiftoffRegister src, Register amount) { \
1222 instruction(dst.gp(), src.gp(), amount); \
1232 if (is_uint6(amount)) {
1233 slli(dst.gp(), src.gp(), amount);
1242 if (is_uint6(amount)) {
1243 srai(dst.gp(), src.gp(), amount);
1252 if (is_uint6(amount)) {
1253 srli(dst.gp(), src.gp(), amount);
1262 MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
1265 ZeroExtendWord(dst, src);
1274#define FP_UNOP_RETURN_TRUE(name, instruction) \
1275 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1276 instruction(dst, src, kScratchDoubleReg); \
1285#undef FP_UNOP_RETURN_TRUE
1288 LiftoffRegister dst,
1289 LiftoffRegister src, Label*
trap) {
1291 case kExprI32ConvertI64:
1295 MacroAssembler::SignExtendWord(dst.gp(), src.gp());
1297 case kExprI32SConvertF32:
1298 case kExprI32UConvertF32:
1299 case kExprI32SConvertF64:
1300 case kExprI32UConvertF64:
1301 case kExprI64SConvertF32:
1302 case kExprI64UConvertF32:
1303 case kExprI64SConvertF64:
1304 case kExprI64UConvertF64:
1305 case kExprF32ConvertF64: {
1309 case kExprI32SConvertF32:
1312 case kExprI32UConvertF32:
1315 case kExprI32SConvertF64:
1318 case kExprI32UConvertF64:
1321 case kExprI64SConvertF32:
1324 case kExprI64UConvertF32:
1327 case kExprI64SConvertF64:
1330 case kExprI64UConvertF64:
1333 case kExprF32ConvertF64:
1341 if (
trap !=
nullptr) {
1347 case kExprI32ReinterpretF32:
1350 case kExprI64SConvertI32:
1351 MacroAssembler::SignExtendWord(dst.gp(), src.gp());
1353 case kExprI64UConvertI32:
1354 MacroAssembler::ZeroExtendWord(dst.gp(), src.gp());
1356 case kExprI64ReinterpretF64:
1357 fmv_x_d(dst.gp(), src.fp());
1359 case kExprF32SConvertI32: {
1363 case kExprF32UConvertI32:
1366 case kExprF32ReinterpretI32:
1369 case kExprF64SConvertI32: {
1373 case kExprF64UConvertI32:
1376 case kExprF64ConvertF32:
1379 case kExprF64ReinterpretI64:
1380 fmv_d_x(dst.fp(), src.gp());
1382 case kExprI32SConvertSatF32: {
1387 case kExprI32UConvertSatF32: {
1392 case kExprI32SConvertSatF64: {
1397 case kExprI32UConvertSatF64: {
1402 case kExprI64SConvertSatF32: {
1403 fcvt_l_s(dst.gp(), src.fp(),
RTZ);
1407 case kExprI64UConvertSatF32: {
1408 fcvt_lu_s(dst.gp(), src.fp(),
RTZ);
1412 case kExprI64SConvertSatF64: {
1413 fcvt_l_d(dst.gp(), src.fp(),
RTZ);
1417 case kExprI64UConvertSatF64: {
1418 fcvt_lu_d(dst.gp(), src.fp(),
RTZ);
1428 LiftoffRegister lhs,
1429 uint8_t imm_lane_idx) {
1436 slliw(dst, src, 32 - 8);
1437 sraiw(dst, dst, 32 - 8);
1441 slliw(dst, src, 32 - 16);
1442 sraiw(dst, dst, 32 - 16);
1446 LiftoffRegister src) {
1447 slli(dst.gp(), src.gp(), 64 - 8);
1448 srai(dst.gp(), dst.gp(), 64 - 8);
1452 LiftoffRegister src) {
1453 slli(dst.gp(), src.gp(), 64 - 16);
1454 srai(dst.gp(), dst.gp(), 64 - 16);
1458 LiftoffRegister src) {
1459 slli(dst.gp(), src.gp(), 64 - 32);
1460 srai(dst.gp(), dst.gp(), 64 - 32);
1474 const FreezeCacheState& frozen) {
1478 Register scratch0 = temps.Acquire();
1479 slliw(scratch0, lhs, 0);
1496 Register lhs, int32_t imm,
1497 const FreezeCacheState& frozen) {
1502 Register lhs, int32_t imm,
1503 const FreezeCacheState& frozen) {
1508 MacroAssembler::slliw(dst, src, 0);
1509 MacroAssembler::Sltu(dst, src, 1);
1513 Register lhs, Register rhs) {
1515 Register scratch0 = temps.Acquire();
1517 MacroAssembler::slliw(scratch0, lhs, 0);
1518 MacroAssembler::slliw(scratch1, rhs, 0);
1523 MacroAssembler::Sltu(dst, src.gp(), 1);
1527 LiftoffRegister lhs,
1528 LiftoffRegister rhs) {
1536 Register scratch = temps.Acquire();
1541 Register scratch = temps.Acquire();
1543 Add64(scratch, scratch, Operand(1));
1550 Register offset_reg, uintptr_t offset_imm,
1553 uint32_t* protected_load_pc,
1556 Register scratch = temps.Acquire();
1559 VRegister dst_v = dst.fp().toV();
1560 auto trapper = [protected_load_pc](
int offset) {
1561 if (protected_load_pc) *protected_load_pc =
static_cast<uint32_t
>(
offset);
1563 MachineType memtype = type.mem_type();
1565 Ld(scratch, src_op, trapper);
1596 vxor_vv(dst_v, dst_v, dst_v);
1599 Lwu(scratch, src_op, trapper);
1604 Ld(scratch, src_op, trapper);
1611 Lb(scratch, src_op, trapper);
1615 Lh(scratch, src_op, trapper);
1619 Lw(scratch, src_op, trapper);
1623 Ld(scratch, src_op, trapper);
1627 if (protected_load_pc) {
1633 Register addr, Register offset_reg,
1634 uintptr_t offset_imm, LoadType type,
1635 uint8_t laneidx, uint32_t* protected_load_pc,
1639 MachineType mem_type = type.mem_type();
1641 Register scratch = temps.Acquire();
1642 auto trapper = [protected_load_pc](
int offset) {
1643 if (protected_load_pc) *protected_load_pc =
static_cast<uint32_t
>(
offset);
1646 Lbu(scratch, src_op, trapper);
1651 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1653 Lhu(scratch, src_op, trapper);
1657 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1659 Lwu(scratch, src_op, trapper);
1663 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1665 Ld(scratch, src_op, trapper);
1669 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1673 if (protected_load_pc) {
1679 uintptr_t offset_imm, LiftoffRegister src,
1680 StoreType type, uint8_t lane,
1681 uint32_t* protected_store_pc,
1686 auto trapper = [protected_store_pc](
int offset) {
1687 if (protected_store_pc) *protected_store_pc =
static_cast<uint32_t
>(
offset);
1711 if (protected_store_pc) {
1717 LiftoffRegister src) {
1719 vmv_vx(dst.fp().toV(), src.gp());
1723 LiftoffRegister src1,
1724 LiftoffRegister src2,
1725 uint8_t imm_lane_idx) {
1729 vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
1733 LiftoffRegister rhs) {
1735 const int64_t
kNaN = 0x7ff8000000000000L;
1736 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1746 LiftoffRegister rhs) {
1748 const int64_t
kNaN = 0x7ff8000000000000L;
1749 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1759 LiftoffRegister src) {
1775 LiftoffRegister src) {
1791 LiftoffRegister src) {
1807 LiftoffRegister src) {
1823 const std::initializer_list<VarState>
args,
const LiftoffRegister* rets,
1825 ExternalReference ext_ref) {
1826 AddWord(
sp,
sp, Operand(-stack_bytes));
1837 constexpr Register kFirstArgReg = a0;
1838 mv(kFirstArgReg,
sp);
1841 constexpr int kNumCCallArgs = 1;
1846 const LiftoffRegister* next_result_reg = rets;
1847 if (return_kind != kVoid) {
1848 constexpr Register kReturnReg = a0;
1849 if (kReturnReg != next_result_reg->gp()) {
1850 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
1856 if (out_argument_kind != kVoid) {
1860 AddWord(
sp,
sp, Operand(stack_bytes));
1864 ExternalReference ext_ref) {
1865 const int num_args =
static_cast<int>(args_list.size());
1873 ParallelMove parallel_move{
this};
1874 for (
int reg_arg = 0; reg_arg < num_args; ++reg_arg) {
1875 parallel_move.LoadIntoRegister(LiftoffRegister{
kCArgRegs[reg_arg]},
1878 parallel_move.Execute();
1887 int last_stack_slot = param_slots;
1888 for (
auto& slot :
slots_) {
1889 const int stack_slot = slot.dst_slot_;
1892 last_stack_slot = stack_slot;
1894 switch (src.loc()) {
1896 if (src.kind() !=
kS128) {
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fmv_w_x(FPURegister rd, Register rs1)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void mv(Register rd, Register rs)
void srai(Register rd, Register rs1, uint8_t shamt)
void srli(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
void vmv_xs(Register rd, VRegister vs2)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void set(Register rd, VSew sew, Vlmul lmul)
void ld(Register rd, const MemOperand &rs)
void fcvt_d_s(FPURegister fd, FPURegister fj)
void sd(Register rd, const MemOperand &rs)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
Instruction * InstructionAt(ptrdiff_t offset) const
void lw(Register rd, const MemOperand &rs)
friend class UseScratchRegisterScope
void sb(Register rd, const MemOperand &rs)
void lwu(Register rd, const MemOperand &rs)
void srl(Register rd, Register rt, uint16_t sa)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
void sh(Register rd, const MemOperand &rs)
void sra(Register rt, Register rd, uint16_t sa)
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Lbu(Register rd, const MemOperand &rs)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void Clear_if_nan_s(Register rd, FPURegister fs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void Scd(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Clz32(Register rd, Register rs)
void Lb(Register rd, const MemOperand &rs)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Lwu(Register rd, const MemOperand &rs)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void LoadFPRImmediate(FPURegister dst, float imm)
void ExtractLowWordFromF64(Register dst_low, FPURegister src)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sd(Register rd, const MemOperand &rs)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Cvt_s_w(FPURegister fd, Register rs)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void Lhu(Register rd, const MemOperand &rs)
void Jump(Register target, Condition cond=al)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Clear_if_nan_d(Register rd, FPURegister fs)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
void Cvt_d_w(FPURegister fd, Register rs)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void Lld(Register rd, const MemOperand &rs)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Lw(Register rd, const MemOperand &rs)
void Lh(Register rd, const MemOperand &rs)
void Popcnt64(Register dst, Register src)
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Sc(Register rd, const MemOperand &rs)
void AllocateStackSpace(Register bytes)
void Ll(Register rd, const MemOperand &rs)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void Ctz32(Register rd, Register rs)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr Tagged< Smi > FromInt(int value)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void Spill(VarState *slot)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_u32_to_uintptr(Register dst, Register src)
bool supports_f16_mem_access()
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
static constexpr int kStackSlotSize
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void FillStackSlotsWithZero(int start, int size)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void clear_i32_upper_half(Register dst)
void emit_i32_signextend_i8(Register dst, Register src)
constexpr Register set(Register reg)
constexpr DoubleRegister fp() const
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
void Construct(int param_slots)
LiftoffAssembler *const asm_
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
#define FP_UNOP_RETURN_TRUE(name, instruction)
#define I64_BINOP(name, instruction)
#define I64_BINOP_I(name, instruction)
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define I64_SHIFTOP(name, instruction)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
#define I32_SHIFTOP(name, instruction)
std::optional< OolTrapLabel > trap
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
constexpr DoubleRegister kScratchDoubleReg
MemOperand GetStackSlot(int offset)
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
constexpr int value_kind_size(ValueKind kind)
constexpr Register no_reg
constexpr int kSimd128Size
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
constexpr Register kScratchReg2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
constexpr int kSystemPointerSize
constexpr Simd128Register kSimd128ScratchReg
constexpr bool SmiValuesAre31Bits()
constexpr VRegister kSimd128ScratchReg3
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
#define DCHECK_LE(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)