5#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_INL_H_
6#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_INL_H_
55 Register scratch,
bool i64_offset =
false,
56 unsigned shift_amount = 0) {
66 if (shift_amount != 0) {
72 if (is_int31(offset_imm)) {
73 int32_t offset_imm32 =
static_cast<int32_t
>(offset_imm);
97 assm->
StoreU16(src.reg().gp(), dst, scratch1);
100 assm->
StoreU32(src.reg().gp(), dst, scratch1);
103 assm->
StoreU64(src.reg().gp(), dst, scratch1);
106 assm->
StoreF32(src.reg().fp(), dst, scratch1);
109 assm->
StoreF64(src.reg().fp(), dst, scratch1);
112 assm->
StoreSimd128(src.reg().fp().toSimd(), dst, scratch1);
117 }
else if (src.is_const()) {
118 if (src.kind() ==
kI32) {
119 assm->
mov(scratch2,
Operand(src.i32_const()));
120 assm->
StoreU32(scratch2, dst, scratch1);
122 assm->
mov(scratch2,
Operand(
static_cast<int64_t
>(src.i32_const())));
123 assm->
StoreU64(scratch2, dst, scratch1);
127 assm->
StoreU32(scratch2, dst, scratch1);
131 assm->
StoreU64(scratch2, dst, scratch1);
149 kLiftoffFrameSetupFunctionReg) ==
153 Register scratch = ip;
156 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
162 int stack_param_delta) {
163 Register scratch = ip;
172 int slot_count = num_callee_stack_params + 2;
173 for (
int i = slot_count - 1;
i >= 0; --
i) {
188 int offset, SafepointTableBuilder* safepoint_table_builder,
189 bool feedback_vector_slot,
size_t stack_param_slots) {
194 if (feedback_vector_slot) {
203 patching_assembler.addi(
sp,
sp, Operand(-frame_size));
235 if (frame_size <
v8_flags.stack_size * 1024) {
238 AddS64(stack_limit, stack_limit, Operand(frame_size),
r0);
243 if (
v8_flags.experimental_wasm_growable_stacks) {
246 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
251 AddS64(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
255 safepoint_table_builder->DefineSafepoint(
this);
258 Call(
static_cast<Address>(Builtin::kWasmStackOverflow),
261 safepoint_table_builder->DefineSafepoint(
this);
306 const FreezeCacheState& frozen) {
316 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
319 int budget_arr_offset =
kInt32Size * declared_func_index;
324 MemOperand budget_addr(budget_array, budget_arr_offset);
326 mov(
r0, Operand(budget_used));
334 if (!
v8_flags.experimental_wasm_growable_stacks) {
338 Label done, call_runtime;
387 switch (value.type().kind()) {
389 mov(
reg.gp(), Operand(value.to_i32()));
392 mov(
reg.gp(), Operand(value.to_i64()));
397 mov(scratch, Operand(value.to_f32_boxed().get_bits()));
404 mov(scratch, Operand(value.to_f64_boxed().get_bits()));
456 uint32_t* protected_load_pc,
459 if (offset_reg !=
no_reg && shift_amount != 0) {
463 if (protected_load_pc) *protected_load_pc =
pc_offset();
474 int32_t offset_imm) {
478#ifdef V8_ENABLE_SANDBOX
479void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
481 int32_t offset_imm) {
486 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op, scratch);
492 int32_t offset_imm, Register src,
494 uint32_t* protected_store_pc,
495 SkipWriteBarrier skip_write_barrier) {
497 if (protected_store_pc) *protected_store_pc =
pc_offset();
500 if (skip_write_barrier ||
v8_flags.disable_write_barriers)
return;
511 mov(ip, Operand(offset_imm));
512 add(ip, ip, dst_addr);
513 if (offset_reg !=
no_reg) {
514 add(ip, ip, offset_reg);
517 StubCallMode::kCallWasmRuntimeStub);
522 Register offset_reg, uintptr_t offset_imm,
523 LoadType type, uint32_t* protected_load_pc,
524 bool is_load_mem,
bool i64_offset,
526 if (!i64_offset && offset_reg !=
no_reg) {
530 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
531 if (offset_reg !=
no_reg && shift_amount != 0) {
536 if (protected_load_pc) *protected_load_pc =
pc_offset();
537 switch (type.value()) {
538 case LoadType::kI32Load8U:
539 case LoadType::kI64Load8U:
542 case LoadType::kI32Load8S:
543 case LoadType::kI64Load8S:
546 case LoadType::kI32Load16U:
547 case LoadType::kI64Load16U:
554 case LoadType::kI32Load16S:
555 case LoadType::kI64Load16S:
562 case LoadType::kI64Load32U:
569 case LoadType::kI32Load:
570 case LoadType::kI64Load32S:
577 case LoadType::kI64Load:
584 case LoadType::kF32Load:
588 if (offset_reg == ip) {
593 if (offset_reg == ip) {
600 case LoadType::kF64Load:
604 if (offset_reg == ip) {
609 if (offset_reg == ip) {
616 case LoadType::kS128Load:
629 uintptr_t offset_imm, LiftoffRegister src,
630 StoreType type, LiftoffRegList pinned,
631 uint32_t* protected_store_pc,
bool is_store_mem,
633 if (!i64_offset && offset_reg !=
no_reg) {
638 if (protected_store_pc) *protected_store_pc =
pc_offset();
639 switch (type.value()) {
640 case StoreType::kI32Store8:
641 case StoreType::kI64Store8:
644 case StoreType::kI32Store16:
645 case StoreType::kI64Store16:
652 case StoreType::kI32Store:
653 case StoreType::kI64Store32:
660 case StoreType::kI64Store:
667 case StoreType::kF32Store:
675 case StoreType::kF64Store:
683 case StoreType::kS128Store: {
697 Register offset_reg, uintptr_t offset_imm,
698 LoadType type, LiftoffRegList ,
700 Load(dst, src_addr, offset_reg, offset_imm, type,
nullptr,
true, i64_offset);
705 uintptr_t offset_imm, LiftoffRegister src,
706 StoreType type, LiftoffRegList pinned,
709 Store(dst_addr, offset_reg, offset_imm, src, type, pinned,
nullptr,
true,
714#ifdef V8_TARGET_BIG_ENDIAN
715constexpr bool is_be =
true;
720#define ATOMIC_OP(instr) \
722 if (!i64_offset && offset_reg != no_reg) { \
723 ZeroExtWord32(ip, offset_reg); \
727 Register offset = r0; \
728 if (offset_imm != 0) { \
729 mov(offset, Operand(offset_imm)); \
730 if (offset_reg != no_reg) add(offset, offset, offset_reg); \
733 } else if (offset_reg != no_reg) { \
734 offset = offset_reg; \
737 MemOperand dst = MemOperand(offset, dst_addr); \
739 switch (type.value()) { \
740 case StoreType::kI32Store8: \
741 case StoreType::kI64Store8: { \
742 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
743 instr(dst, lhs, rhs); \
745 AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
748 case StoreType::kI32Store16: \
749 case StoreType::kI64Store16: { \
750 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
752 Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
754 ByteReverseU16(dst, lhs, scratch); \
755 instr(dst, dst, rhs); \
756 ByteReverseU16(dst, dst, scratch); \
759 instr(dst, lhs, rhs); \
762 AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
764 ByteReverseU16(result.gp(), result.gp(), ip); \
768 case StoreType::kI32Store: \
769 case StoreType::kI64Store32: { \
770 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
772 Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
774 ByteReverseU32(dst, lhs, scratch); \
775 instr(dst, dst, rhs); \
776 ByteReverseU32(dst, dst, scratch); \
779 instr(dst, lhs, rhs); \
782 AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
784 ByteReverseU32(result.gp(), result.gp(), ip); \
788 case StoreType::kI64Store: { \
789 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
791 ByteReverseU64(dst, lhs); \
792 instr(dst, dst, rhs); \
793 ByteReverseU64(dst, dst); \
795 instr(dst, lhs, rhs); \
798 AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
800 ByteReverseU64(result.gp(), result.gp()); \
810 uintptr_t offset_imm, LiftoffRegister value,
811 LiftoffRegister
result, StoreType type,
817 uintptr_t offset_imm, LiftoffRegister value,
818 LiftoffRegister
result, StoreType type,
824 uintptr_t offset_imm, LiftoffRegister value,
825 LiftoffRegister
result, StoreType type,
831 uintptr_t offset_imm, LiftoffRegister value,
832 LiftoffRegister
result, StoreType type,
838 uintptr_t offset_imm, LiftoffRegister value,
839 LiftoffRegister
result, StoreType type,
845 uintptr_t offset_imm,
846 LiftoffRegister value,
847 LiftoffRegister
result, StoreType type,
849 if (!i64_offset && offset_reg !=
no_reg) {
855 if (offset_imm != 0) {
860 }
else if (offset_reg !=
no_reg) {
864 switch (type.value()) {
865 case StoreType::kI32Store8:
866 case StoreType::kI64Store8: {
870 case StoreType::kI32Store16:
871 case StoreType::kI64Store16: {
884 case StoreType::kI32Store:
885 case StoreType::kI64Store32: {
898 case StoreType::kI64Store: {
914 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
915 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister
result,
916 StoreType type,
bool i64_offset) {
917 if (!i64_offset && offset_reg !=
no_reg) {
923 if (offset_imm != 0) {
928 }
else if (offset_reg !=
no_reg) {
932 switch (type.value()) {
933 case StoreType::kI32Store8:
934 case StoreType::kI64Store8: {
936 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
939 case StoreType::kI32Store16:
940 case StoreType::kI64Store16: {
942 Push(new_value.gp(), expected.gp());
944 new_value.gp(), expected.gp(),
result.gp());
950 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
952 Pop(new_value.gp(), expected.gp());
955 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
959 case StoreType::kI32Store:
960 case StoreType::kI64Store32: {
962 Push(new_value.gp(), expected.gp());
964 new_value.gp(), expected.gp(),
result.gp());
970 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
972 Pop(new_value.gp(), expected.gp());
975 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
979 case StoreType::kI64Store: {
981 Push(new_value.gp(), expected.gp());
985 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
987 Pop(new_value.gp(), expected.gp());
990 dst, expected.gp(), new_value.gp(),
result.gp(),
r0);
1002 uint32_t caller_slot_idx,
1007#if defined(V8_TARGET_BIG_ENDIAN)
1039 uint32_t caller_slot_idx,
1041 Register frame_pointer) {
1045#if defined(V8_TARGET_BIG_ENDIAN)
1080#if defined(V8_TARGET_BIG_ENDIAN)
1111#ifdef V8_TARGET_BIG_ENDIAN
1153 vor(dst.toSimd(), src.toSimd(), src.toSimd());
1190 switch (value.type().kind()) {
1192 mov(src, Operand(value.to_i32()));
1197 mov(src, Operand(value.to_i64()));
1280#define SIGN_EXT(r) extsw(r, r)
1281#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
1282#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
1283#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
1284#define REGISTER_AND_WITH_1F \
1285 ([&](Register rhs) { \
1286 andi(r0, rhs, Operand(31)); \
1290#define REGISTER_AND_WITH_3F \
1291 ([&](Register rhs) { \
1292 andi(r0, rhs, Operand(63)); \
1296#define LFR_TO_REG(reg) reg.gp()
1299#define UNOP_LIST(V) \
1300 V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
1301 V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
1302 V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
1304 V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1306 V(f32_ceil, frip, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1308 V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1310 V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
1311 V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
1312 V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
1313 V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1314 V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1315 V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1316 V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
1317 V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
1318 V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
1319 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1320 V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
1321 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1322 V(u32_to_uintptr, ZeroExtWord32, Register, Register, , , USE, , void) \
1323 V(i32_signextend_i8, extsb, Register, Register, , , USE, , void) \
1324 V(i32_signextend_i16, extsh, Register, Register, , , USE, , void) \
1325 V(i64_signextend_i8, extsb, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1326 LFR_TO_REG, USE, , void) \
1327 V(i64_signextend_i16, extsh, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1328 LFR_TO_REG, USE, , void) \
1329 V(i64_signextend_i32, extsw, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1330 LFR_TO_REG, USE, , void) \
1331 V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
1332 V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1333 LFR_TO_REG, USE, true, bool)
1335#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
1337 return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
1338 auto _dst = dcast(dst); \
1339 auto _src = scast(src); \
1340 instr(_dst, _src); \
1345#undef EMIT_UNOP_FUNCTION
1350#define BINOP_LIST(V) \
1351 V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
1352 , , , USE, , void) \
1353 V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
1354 , , , USE, , void) \
1355 V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1357 V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1359 V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1361 V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1363 V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1364 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1365 V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1366 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1367 V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
1368 LFR_TO_REG, Operand, USE, , void) \
1369 V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
1370 V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
1371 V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
1372 V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
1373 V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
1374 V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1375 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1376 V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
1377 V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
1378 V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
1379 V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
1380 V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
1381 V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
1382 V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1383 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1384 V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1385 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1386 V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1387 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1388 V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1389 LFR_TO_REG, Operand, USE, , void) \
1390 V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1391 LFR_TO_REG, Operand, USE, , void) \
1392 V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1393 LFR_TO_REG, Operand, USE, , void) \
1394 V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
1395 INT32_AND_WITH_1F, USE, , void) \
1396 V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
1397 INT32_AND_WITH_1F, USE, , void) \
1398 V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
1399 INT32_AND_WITH_1F, USE, , void) \
1400 V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
1401 REGISTER_AND_WITH_1F, USE, , void) \
1402 V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
1403 REGISTER_AND_WITH_1F, USE, , void) \
1404 V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
1405 REGISTER_AND_WITH_1F, USE, , void) \
1406 V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
1407 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1408 V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
1409 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1410 V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
1411 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1412 V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
1413 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1414 V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
1415 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1416 V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
1417 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1418 V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1420 V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1422 V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1424 V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1426 V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1428 V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1430 V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1432 V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1435#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
1436 scast2, rcast, ret, return_type) \
1437 return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
1439 auto _dst = dcast(dst); \
1440 auto _lhs = scast1(lhs); \
1441 auto _rhs = scast2(rhs); \
1442 instr(_dst, _lhs, _rhs); \
1449#undef EMIT_BINOP_FUNCTION
1451#undef INT32_AND_WITH_1F
1452#undef REGISTER_AND_WITH_1F
1469 Register scratch = temps.Acquire();
1474 Register scratch = temps.Acquire();
1476 AddS64(scratch, scratch, Operand(1));
1483 Label* trap_div_by_zero,
1484 Label* trap_div_unrepresentable) {
1489 b(
eq, trap_div_by_zero);
1495 b(
eq, trap_div_unrepresentable);
1502 Label* trap_div_by_zero) {
1504 beq(trap_div_by_zero);
1509 Label* trap_div_by_zero) {
1510 Label cont, done, trap_div_unrepresentable;
1513 beq(trap_div_by_zero);
1519 beq(&trap_div_unrepresentable);
1527 bind(&trap_div_unrepresentable);
1528 mov(dst, Operand(0));
1533 Label* trap_div_by_zero) {
1535 beq(trap_div_by_zero);
1540 LiftoffRegister rhs,
1541 Label* trap_div_by_zero,
1542 Label* trap_div_unrepresentable) {
1543 constexpr int64_t kMinInt64 =
static_cast<int64_t
>(1) << 63;
1547 beq(trap_div_by_zero);
1552 CmpS64(lhs.gp(), Operand(kMinInt64),
r0);
1553 beq(trap_div_unrepresentable);
1556 DivS64(dst.gp(), lhs.gp(), rhs.gp());
1561 LiftoffRegister rhs,
1562 Label* trap_div_by_zero) {
1564 beq(trap_div_by_zero);
1566 DivU64(dst.gp(), lhs.gp(), rhs.gp());
1571 LiftoffRegister rhs,
1572 Label* trap_div_by_zero) {
1573 constexpr int64_t kMinInt64 =
static_cast<int64_t
>(1) << 63;
1575 Label trap_div_unrepresentable;
1581 beq(trap_div_by_zero);
1586 CmpS64(lhs.gp(), Operand(kMinInt64),
r0);
1587 beq(&trap_div_unrepresentable);
1590 ModS64(dst.gp(), lhs.gp(), rhs.gp());
1593 bind(&trap_div_unrepresentable);
1594 mov(dst.gp(), Operand(0));
1600 LiftoffRegister rhs,
1601 Label* trap_div_by_zero) {
1603 beq(trap_div_by_zero);
1604 ModU64(dst.gp(), lhs.gp(), rhs.gp());
1609 LiftoffRegister dst,
1610 LiftoffRegister src, Label*
trap) {
1612 case kExprI32ConvertI64:
1613 extsw(dst.gp(), src.gp());
1615 case kExprI64SConvertI32:
1616 extsw(dst.gp(), src.gp());
1618 case kExprI64UConvertI32:
1621 case kExprF32ConvertF64:
1622 frsp(dst.fp(), src.fp());
1624 case kExprF64ConvertF32:
1625 fmr(dst.fp(), src.fp());
1627 case kExprF32SConvertI32: {
1631 case kExprF32UConvertI32: {
1635 case kExprF64SConvertI32: {
1639 case kExprF64UConvertI32: {
1643 case kExprF64SConvertI64: {
1647 case kExprF64UConvertI64: {
1651 case kExprF32SConvertI64: {
1655 case kExprF32UConvertI64: {
1659 case kExprI32SConvertF64:
1660 case kExprI32SConvertF32: {
1672 case kExprI32UConvertF64:
1673 case kExprI32UConvertF32: {
1684 case kExprI64SConvertF64:
1685 case kExprI64SConvertF32: {
1697 case kExprI64UConvertF64:
1698 case kExprI64UConvertF32: {
1710 case kExprI32SConvertSatF64:
1711 case kExprI32SConvertSatF32: {
1712 Label done, src_is_nan;
1728 case kExprI32UConvertSatF64:
1729 case kExprI32UConvertSatF32: {
1730 Label done, src_is_nan;
1746 case kExprI64SConvertSatF64:
1747 case kExprI64SConvertSatF32: {
1748 Label done, src_is_nan;
1764 case kExprI64UConvertSatF64:
1765 case kExprI64UConvertSatF32: {
1766 Label done, src_is_nan;
1782 case kExprI32ReinterpretF32: {
1786 case kExprI64ReinterpretF64: {
1790 case kExprF32ReinterpretI32: {
1794 case kExprF64ReinterpretI64: {
1810 const FreezeCacheState& frozen) {
1825#if defined(V8_COMPRESS_POINTERS)
1859 Register lhs, int32_t imm,
1860 const FreezeCacheState& frozen) {
1871 Register lhs, int32_t imm,
1872 const FreezeCacheState& frozen) {
1885 mov(dst, Operand(1));
1892 Register lhs, Register rhs) {
1900 mov(dst, Operand(1));
1908 cmpi(src.gp(), Operand(0));
1909 mov(dst, Operand(1));
1916 LiftoffRegister lhs,
1917 LiftoffRegister rhs) {
1920 CmpS64(lhs.gp(), rhs.gp());
1922 CmpU64(lhs.gp(), rhs.gp());
1925 mov(dst, Operand(1));
1934 fcmpu(lhs, rhs, cr0);
1939 mov(dst, Operand(1));
1943 mov(dst, Operand(1));
1963 mov(
r0, Operand(imm));
1968 LiftoffRegister true_value,
1969 LiftoffRegister false_value,
1978#define SIMD_BINOP_LIST(V) \
1979 V(f64x2_add, F64x2Add) \
1980 V(f64x2_sub, F64x2Sub) \
1981 V(f64x2_mul, F64x2Mul) \
1982 V(f64x2_div, F64x2Div) \
1983 V(f64x2_eq, F64x2Eq) \
1984 V(f64x2_lt, F64x2Lt) \
1985 V(f64x2_le, F64x2Le) \
1986 V(f32x4_add, F32x4Add) \
1987 V(f32x4_sub, F32x4Sub) \
1988 V(f32x4_mul, F32x4Mul) \
1989 V(f32x4_div, F32x4Div) \
1990 V(f32x4_min, F32x4Min) \
1991 V(f32x4_max, F32x4Max) \
1992 V(f32x4_eq, F32x4Eq) \
1993 V(f32x4_lt, F32x4Lt) \
1994 V(f32x4_le, F32x4Le) \
1995 V(i64x2_add, I64x2Add) \
1996 V(i64x2_sub, I64x2Sub) \
1997 V(i64x2_eq, I64x2Eq) \
1998 V(i64x2_gt_s, I64x2GtS) \
1999 V(i32x4_add, I32x4Add) \
2000 V(i32x4_sub, I32x4Sub) \
2001 V(i32x4_mul, I32x4Mul) \
2002 V(i32x4_min_s, I32x4MinS) \
2003 V(i32x4_min_u, I32x4MinU) \
2004 V(i32x4_max_s, I32x4MaxS) \
2005 V(i32x4_max_u, I32x4MaxU) \
2006 V(i32x4_eq, I32x4Eq) \
2007 V(i32x4_gt_s, I32x4GtS) \
2008 V(i32x4_gt_u, I32x4GtU) \
2009 V(i32x4_dot_i16x8_s, I32x4DotI16x8S) \
2010 V(i16x8_add, I16x8Add) \
2011 V(i16x8_sub, I16x8Sub) \
2012 V(i16x8_mul, I16x8Mul) \
2013 V(i16x8_min_s, I16x8MinS) \
2014 V(i16x8_min_u, I16x8MinU) \
2015 V(i16x8_max_s, I16x8MaxS) \
2016 V(i16x8_max_u, I16x8MaxU) \
2017 V(i16x8_eq, I16x8Eq) \
2018 V(i16x8_gt_s, I16x8GtS) \
2019 V(i16x8_gt_u, I16x8GtU) \
2020 V(i16x8_add_sat_s, I16x8AddSatS) \
2021 V(i16x8_sub_sat_s, I16x8SubSatS) \
2022 V(i16x8_add_sat_u, I16x8AddSatU) \
2023 V(i16x8_sub_sat_u, I16x8SubSatU) \
2024 V(i16x8_sconvert_i32x4, I16x8SConvertI32x4) \
2025 V(i16x8_uconvert_i32x4, I16x8UConvertI32x4) \
2026 V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
2027 V(i16x8_q15mulr_sat_s, I16x8Q15MulRSatS) \
2028 V(i8x16_add, I8x16Add) \
2029 V(i8x16_sub, I8x16Sub) \
2030 V(i8x16_min_s, I8x16MinS) \
2031 V(i8x16_min_u, I8x16MinU) \
2032 V(i8x16_max_s, I8x16MaxS) \
2033 V(i8x16_max_u, I8x16MaxU) \
2034 V(i8x16_eq, I8x16Eq) \
2035 V(i8x16_gt_s, I8x16GtS) \
2036 V(i8x16_gt_u, I8x16GtU) \
2037 V(i8x16_add_sat_s, I8x16AddSatS) \
2038 V(i8x16_sub_sat_s, I8x16SubSatS) \
2039 V(i8x16_add_sat_u, I8x16AddSatU) \
2040 V(i8x16_sub_sat_u, I8x16SubSatU) \
2041 V(i8x16_sconvert_i16x8, I8x16SConvertI16x8) \
2042 V(i8x16_uconvert_i16x8, I8x16UConvertI16x8) \
2043 V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
2044 V(s128_and, S128And) \
2045 V(s128_or, S128Or) \
2046 V(s128_xor, S128Xor) \
2047 V(s128_and_not, S128AndNot)
2049#define EMIT_SIMD_BINOP(name, op) \
2050 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2051 LiftoffRegister rhs) { \
2052 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd()); \
2055#undef EMIT_SIMD_BINOP
2056#undef SIMD_BINOP_LIST
2058#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
2059 V(f64x2_ne, F64x2Ne) \
2060 V(f64x2_pmin, F64x2Pmin) \
2061 V(f64x2_pmax, F64x2Pmax) \
2062 V(f32x4_ne, F32x4Ne) \
2063 V(f32x4_pmin, F32x4Pmin) \
2064 V(f32x4_pmax, F32x4Pmax) \
2065 V(i64x2_ne, I64x2Ne) \
2066 V(i64x2_ge_s, I64x2GeS) \
2067 V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
2068 V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
2069 V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
2070 V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
2071 V(i32x4_ne, I32x4Ne) \
2072 V(i32x4_ge_s, I32x4GeS) \
2073 V(i32x4_ge_u, I32x4GeU) \
2074 V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
2075 V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
2076 V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
2077 V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
2078 V(i16x8_ne, I16x8Ne) \
2079 V(i16x8_ge_s, I16x8GeS) \
2080 V(i16x8_ge_u, I16x8GeU) \
2081 V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
2082 V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
2083 V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
2084 V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U) \
2085 V(i16x8_dot_i8x16_i7x16_s, I16x8DotI8x16S) \
2086 V(i8x16_ne, I8x16Ne) \
2087 V(i8x16_ge_s, I8x16GeS) \
2088 V(i8x16_ge_u, I8x16GeU) \
2089 V(i8x16_swizzle, I8x16Swizzle)
2091#define EMIT_SIMD_BINOP_WITH_SCRATCH(name, op) \
2092 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2093 LiftoffRegister rhs) { \
2094 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), \
2095 kScratchSimd128Reg); \
2098#undef EMIT_SIMD_BINOP_WITH_SCRATCH
2099#undef SIMD_BINOP_WITH_SCRATCH_LIST
2101#define SIMD_SHIFT_RR_LIST(V) \
2102 V(i64x2_shl, I64x2Shl) \
2103 V(i64x2_shr_s, I64x2ShrS) \
2104 V(i64x2_shr_u, I64x2ShrU) \
2105 V(i32x4_shl, I32x4Shl) \
2106 V(i32x4_shr_s, I32x4ShrS) \
2107 V(i32x4_shr_u, I32x4ShrU) \
2108 V(i16x8_shl, I16x8Shl) \
2109 V(i16x8_shr_s, I16x8ShrS) \
2110 V(i16x8_shr_u, I16x8ShrU) \
2111 V(i8x16_shl, I8x16Shl) \
2112 V(i8x16_shr_s, I8x16ShrS) \
2113 V(i8x16_shr_u, I8x16ShrU)
2115#define EMIT_SIMD_SHIFT_RR(name, op) \
2116 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2117 LiftoffRegister rhs) { \
2118 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.gp(), kScratchSimd128Reg); \
2121#undef EMIT_SIMD_SHIFT_RR
2122#undef SIMD_SHIFT_RR_LIST
2124#define SIMD_SHIFT_RI_LIST(V) \
2125 V(i64x2_shli, I64x2Shl, 63) \
2126 V(i64x2_shri_s, I64x2ShrS, 63) \
2127 V(i64x2_shri_u, I64x2ShrU, 63) \
2128 V(i32x4_shli, I32x4Shl, 31) \
2129 V(i32x4_shri_s, I32x4ShrS, 31) \
2130 V(i32x4_shri_u, I32x4ShrU, 31) \
2131 V(i16x8_shli, I16x8Shl, 15) \
2132 V(i16x8_shri_s, I16x8ShrS, 15) \
2133 V(i16x8_shri_u, I16x8ShrU, 15) \
2134 V(i8x16_shli, I8x16Shl, 7) \
2135 V(i8x16_shri_s, I8x16ShrS, 7) \
2136 V(i8x16_shri_u, I8x16ShrU, 7)
2138#define EMIT_SIMD_SHIFT_RI(name, op, mask) \
2139 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2141 op(dst.fp().toSimd(), lhs.fp().toSimd(), Operand(rhs & mask), r0, \
2142 kScratchSimd128Reg); \
2145#undef EMIT_SIMD_SHIFT_RI
2146#undef SIMD_SHIFT_RI_LIST
2148#define SIMD_UNOP_LIST(V) \
2149 V(f64x2_abs, F64x2Abs, , void) \
2150 V(f64x2_neg, F64x2Neg, , void) \
2151 V(f64x2_sqrt, F64x2Sqrt, , void) \
2152 V(f64x2_ceil, F64x2Ceil, true, bool) \
2153 V(f64x2_floor, F64x2Floor, true, bool) \
2154 V(f64x2_trunc, F64x2Trunc, true, bool) \
2155 V(f64x2_promote_low_f32x4, F64x2PromoteLowF32x4, , void) \
2156 V(f32x4_abs, F32x4Abs, , void) \
2157 V(f32x4_neg, F32x4Neg, , void) \
2158 V(f32x4_sqrt, F32x4Sqrt, , void) \
2159 V(f32x4_ceil, F32x4Ceil, true, bool) \
2160 V(f32x4_floor, F32x4Floor, true, bool) \
2161 V(f32x4_trunc, F32x4Trunc, true, bool) \
2162 V(f32x4_sconvert_i32x4, F32x4SConvertI32x4, , void) \
2163 V(f32x4_uconvert_i32x4, F32x4UConvertI32x4, , void) \
2164 V(i64x2_neg, I64x2Neg, , void) \
2165 V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, , void) \
2166 V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, , void) \
2167 V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, , void) \
2168 V(i32x4_neg, I32x4Neg, , void) \
2169 V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, , void) \
2170 V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, , void) \
2171 V(i32x4_uconvert_f32x4, I32x4UConvertF32x4, , void) \
2172 V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, , void) \
2173 V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, , void) \
2174 V(i8x16_popcnt, I8x16Popcnt, , void) \
2175 V(s128_not, S128Not, , void)
2177#define EMIT_SIMD_UNOP(name, op, return_val, return_type) \
2178 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2179 LiftoffRegister src) { \
2180 op(dst.fp().toSimd(), src.fp().toSimd()); \
2181 return return_val; \
2184#undef EMIT_SIMD_UNOP
2185#undef SIMD_UNOP_LIST
2187#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
2188 V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \
2189 V(i64x2_abs, I64x2Abs, , void) \
2190 V(i32x4_abs, I32x4Abs, , void) \
2191 V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \
2192 V(i32x4_trunc_sat_f64x2_s_zero, I32x4TruncSatF64x2SZero, , void) \
2193 V(i32x4_trunc_sat_f64x2_u_zero, I32x4TruncSatF64x2UZero, , void) \
2194 V(i16x8_abs, I16x8Abs, , void) \
2195 V(i16x8_neg, I16x8Neg, , void) \
2196 V(i8x16_abs, I8x16Abs, , void) \
2197 V(i8x16_neg, I8x16Neg, , void)
2199#define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \
2200 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2201 LiftoffRegister src) { \
2202 op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg); \
2203 return return_val; \
2206#undef EMIT_SIMD_UNOP_WITH_SCRATCH
2207#undef SIMD_UNOP_WITH_SCRATCH_LIST
2209#define SIMD_ALL_TRUE_LIST(V) \
2210 V(i64x2_alltrue, I64x2AllTrue) \
2211 V(i32x4_alltrue, I32x4AllTrue) \
2212 V(i16x8_alltrue, I16x8AllTrue) \
2213 V(i8x16_alltrue, I8x16AllTrue)
2214#define EMIT_SIMD_ALL_TRUE(name, op) \
2215 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2216 LiftoffRegister src) { \
2217 op(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg); \
2220#undef EMIT_SIMD_ALL_TRUE
2221#undef SIMD_ALL_TRUE_LIST
2223#define SIMD_QFM_LIST(V) \
2224 V(f64x2_qfma, F64x2Qfma) \
2225 V(f64x2_qfms, F64x2Qfms) \
2226 V(f32x4_qfma, F32x4Qfma) \
2227 V(f32x4_qfms, F32x4Qfms)
2229#define EMIT_SIMD_QFM(name, op) \
2230 void LiftoffAssembler::emit_##name( \
2231 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2232 LiftoffRegister src3) { \
2233 op(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(), \
2234 src3.fp().toSimd(), kScratchSimd128Reg); \
2240#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2241 V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
2242 V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
2243 V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
2244 V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
2245#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
2246 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2247 LiftoffRegister src) { \
2248 op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg, \
2249 kScratchSimd128Reg2); \
2252#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2253#undef SIMD_EXT_ADD_PAIRWISE_LIST
2255#define SIMD_RELAXED_BINOP_LIST(V) \
2256 V(i8x16_relaxed_swizzle, i8x16_swizzle) \
2257 V(f64x2_relaxed_min, f64x2_pmin) \
2258 V(f64x2_relaxed_max, f64x2_pmax) \
2259 V(f32x4_relaxed_min, f32x4_pmin) \
2260 V(f32x4_relaxed_max, f32x4_pmax) \
2261 V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
2263#define SIMD_VISIT_RELAXED_BINOP(name, op) \
2264 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2265 LiftoffRegister rhs) { \
2266 emit_##op(dst, lhs, rhs); \
2269#undef SIMD_VISIT_RELAXED_BINOP
2270#undef SIMD_RELAXED_BINOP_LIST
2272#define SIMD_RELAXED_UNOP_LIST(V) \
2273 V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
2274 V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
2275 V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
2276 V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
2278#define SIMD_VISIT_RELAXED_UNOP(name, op) \
2279 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2280 LiftoffRegister src) { \
2281 emit_##op(dst, src); \
2284#undef SIMD_VISIT_RELAXED_UNOP
2285#undef SIMD_RELAXED_UNOP_LIST
2287#define F16_UNOP_LIST(V) \
2295 V(f16x8_nearest_int) \
2296 V(i16x8_sconvert_f16x8) \
2297 V(i16x8_uconvert_f16x8) \
2298 V(f16x8_sconvert_i16x8) \
2299 V(f16x8_uconvert_i16x8) \
2300 V(f16x8_demote_f32x4_zero) \
2301 V(f32x4_promote_low_f16x8) \
2302 V(f16x8_demote_f64x2_zero)
2304#define VISIT_F16_UNOP(name) \
2305 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2306 LiftoffRegister src) { \
2310#undef VISIT_F16_UNOP
2313#define F16_BINOP_LIST(V) \
2327#define VISIT_F16_BINOP(name) \
2328 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2329 LiftoffRegister rhs) { \
2333#undef VISIT_F16_BINOP
2334#undef F16_BINOP_LIST
2337 LiftoffRegister lhs,
2338 uint8_t imm_lane_idx) {
2343 LiftoffRegister src1,
2344 LiftoffRegister src2,
2345 uint8_t imm_lane_idx) {
2350 LiftoffRegister src1,
2351 LiftoffRegister src2,
2352 LiftoffRegister src3) {
2357 LiftoffRegister src1,
2358 LiftoffRegister src2,
2359 LiftoffRegister src3) {
2366 LiftoffRegister src) {
2371 LiftoffRegister src) {
2376 LiftoffRegister src) {
2381 LiftoffRegister src) {
2386 LiftoffRegister src) {
2391 LiftoffRegister src) {
2396 LiftoffRegister lhs,
2397 uint8_t imm_lane_idx) {
2403 LiftoffRegister lhs,
2404 uint8_t imm_lane_idx) {
2410 LiftoffRegister lhs,
2411 uint8_t imm_lane_idx) {
2417 LiftoffRegister lhs,
2418 uint8_t imm_lane_idx) {
2424 LiftoffRegister lhs,
2425 uint8_t imm_lane_idx) {
2431 LiftoffRegister lhs,
2432 uint8_t imm_lane_idx) {
2438 LiftoffRegister lhs,
2439 uint8_t imm_lane_idx) {
2445 LiftoffRegister lhs,
2446 uint8_t imm_lane_idx) {
2452 LiftoffRegister src1,
2453 LiftoffRegister src2,
2454 uint8_t imm_lane_idx) {
2460 LiftoffRegister src1,
2461 LiftoffRegister src2,
2462 uint8_t imm_lane_idx) {
2468 LiftoffRegister src1,
2469 LiftoffRegister src2,
2470 uint8_t imm_lane_idx) {
2476 LiftoffRegister src1,
2477 LiftoffRegister src2,
2478 uint8_t imm_lane_idx) {
2484 LiftoffRegister src1,
2485 LiftoffRegister src2,
2486 uint8_t imm_lane_idx) {
2492 LiftoffRegister src1,
2493 LiftoffRegister src2,
2494 uint8_t imm_lane_idx) {
2500 LiftoffRegister rhs) {
2504 I64x2Mul(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), ip,
r0,
2510 LiftoffRegister rhs) {
2511 F64x2Min(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
2516 LiftoffRegister rhs) {
2517 F64x2Max(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
2522 LiftoffRegister src) {
2527 LiftoffRegister src) {
2532 Register offset_reg, uintptr_t offset_imm,
2535 uint32_t* protected_load_pc,
2539 MachineType memtype = type.mem_type();
2577 const FreezeCacheState& frozen) {
2584 Register addr, Register offset_reg,
2585 uintptr_t offset_imm, LoadType type,
2586 uint8_t laneidx, uint32_t* protected_load_pc,
2588 if (!i64_offset && offset_reg !=
no_reg) {
2594 MachineType mem_type = type.mem_type();
2596 vor(dst.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd());
2599 if (protected_load_pc) *protected_load_pc =
pc_offset();
2613 uintptr_t offset_imm, LiftoffRegister src,
2614 StoreType type, uint8_t lane,
2615 uint32_t* protected_store_pc,
2623 if (protected_store_pc) *protected_store_pc =
pc_offset();
2639 LiftoffRegister src1,
2640 LiftoffRegister src2,
2641 LiftoffRegister
mask,
2648 LiftoffRegister src) {
2654 LiftoffRegister src) {
2659 LiftoffRegister src) {
2665 LiftoffRegister src) {
2671 LiftoffRegister src) {
2676 LiftoffRegister src) {
2681 LiftoffRegister lhs,
2682 LiftoffRegister rhs,
2683 LiftoffRegister acc) {
2689 LiftoffRegister lhs,
2690 LiftoffRegister rhs,
2691 const uint8_t shuffle[16],
2700 uint8_t current_index = shuffle[
i];
2701 shuffle_remapped[
i] = (current_index <= max_index
2702 ? max_index - current_index
2703 : total_lane_count - current_index + max_index);
2706 memcpy(vals, shuffle_remapped,
sizeof(shuffle_remapped));
2707#ifdef V8_TARGET_BIG_ENDIAN
2711 I8x16Shuffle(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), vals[1],
2716 LiftoffRegister src) {
2721 LiftoffRegister src) {
2726 const uint8_t imms[16]) {
2728 memcpy(vals, imms,
sizeof(vals));
2729#ifdef V8_TARGET_BIG_ENDIAN
2733 S128Const(dst.fp().toSimd(), vals[1], vals[0],
r0, ip);
2737 LiftoffRegister src1,
2738 LiftoffRegister src2,
2739 LiftoffRegister
mask) {
2740 S128Select(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(),
2741 mask.fp().toSimd());
2745 LiftoffRegister src) {
2751 LiftoffRegister src) {
2757 LiftoffRegister src) {
2763 LiftoffRegister src) {
2793 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
2794 LiftoffRegList ref_spills,
int spill_offset) {
2798 while (!gp_spills.is_empty()) {
2799 LiftoffRegister
reg = gp_spills.GetLastRegSet();
2800 if (ref_spills.has(
reg)) {
2801 safepoint.DefineTaggedStackSlot(spill_offset);
2803 gp_spills.clear(
reg);
2812 Drop(num_stack_slots);
2817 const std::initializer_list<VarState>
args,
const LiftoffRegister* rets,
2819 ExternalReference ext_ref) {
2836 constexpr int kNumCCallArgs = 1;
2841 const LiftoffRegister* result_reg = rets;
2842 if (return_kind != kVoid) {
2843 constexpr Register kReturnReg = r3;
2844 if (kReturnReg != rets->gp()) {
2845 Move(*rets, LiftoffRegister(kReturnReg), return_kind);
2851 if (out_argument_kind != kVoid) {
2852 switch (out_argument_kind) {
2881 ExternalReference ext_ref) {
2883 int num_args =
static_cast<int>(
args.size());
2890 ParallelMove parallel_move{
this};
2893 parallel_move.LoadIntoRegister(LiftoffRegister{
kCArgRegs[reg_args]}, arg);
2907 parallel_move.Execute();
2922 compiler::CallDescriptor* call_descriptor,
2925 CallWasmCodePointer(target);
2929 compiler::CallDescriptor* call_descriptor, Register target) {
2954 Label return_nan, done;
2964 LiftoffRegister src,
2966 LiftoffRegister tmp_s128,
2969 if (lane_kind ==
kF32) {
2970 xvcmpeqsp(tmp_s128.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd(),
2974 xvcmpeqdp(tmp_s128.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd(),
2980 b(all_lanes_true, &done, cr6);
2983 li(tmp_gp, Operand(1));
2995 int last_stack_slot = param_slots;
2996 for (
auto& slot :
slots_) {
2997 const int stack_slot = slot.dst_slot_;
3000 last_stack_slot = stack_slot;
3002 switch (src.loc()) {
3004 switch (src.kind()) {
3010 UseScratchRegisterScope temps(
asm_);
3011 Register scratch = temps.Acquire();
3049 switch (src.kind()) {
3077 UseScratchRegisterScope temps(
asm_);
3078 Register scratch = temps.Acquire();
3080 switch (src.kind()) {
3082 asm_->
mov(scratch, Operand(src.i32_const()));
3085 asm_->
mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
void addi(Register dst, Register src, const Operand &imm)
void mtfsb0(FPSCRBit bit, RCBit rc=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mr(Register dst, Register src)
void extsw(Register rs, Register ra, RCBit rc=LeaveRC)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bne(Register rj, Register rd, int32_t offset)
void fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void blt(Register rj, Register rd, int32_t offset)
void frsp(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
friend class UseScratchRegisterScope
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void xor_(Register dst, int32_t imm32)
static constexpr int kGap
void mcrfs(CRegister cr, FPSCRBit bit)
uint64_t jump_offset(Label *L)
void fcmpu(const DoubleRegister fra, const DoubleRegister frb, CRegister cr=cr0)
void fctiwz(const DoubleRegister frt, const DoubleRegister frb)
void fctiwuz(const DoubleRegister frt, const DoubleRegister frb)
void cmpi(Register src1, const Operand &src2, CRegister cr=cr0)
void fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void bge(Register rj, Register rd, int32_t offset)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void fmr(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void boverflow(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void beq(Register rj, Register rd, int32_t offset)
void bdnz(Label *L, LKBit lk=LeaveLK)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void LoadSimd128LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void Abort(AbortReason msg)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void ByteReverseU64(Register dst, Register val, Register=r0)
void I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Drop(int count, Condition cond=al)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
void I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void mov(Register rd, Register rj)
void ModS32(Register dst, Register src, Register value)
void ModS64(Register dst, Register src, Register value)
void SmiUntag(Register reg, SBit s=LeaveCC)
void TestIfSmi(Register value, Register scratch)
void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void MovDoubleLowToInt(Register dst, DoubleRegister src)
void StoreU64WithUpdate(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I32x4BitMask(Register dst, VRegister src)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void ZeroExtWord32(Register dst, Register src)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void I16x8Splat(Simd128Register dst, Register src)
void JumpIfSmi(Register value, Label *smi_label)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPush(RegList regs)
void AtomicExchange(MemOperand dst, Register new_value, Register output)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch)
void ExtractBitRange(Register dst, Register src, int rangeStart, int rangeEnd, RCBit rc=LeaveRC, bool test=false)
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void ModU32(Register dst, Register src, Register value)
void AtomicCompareExchange(MemOperand dst, Register old_value, Register new_value, Register output, Register scratch)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreSimd128(Simd128Register src, const MemOperand &mem, Register scratch)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void MultiPop(RegList regs)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void LoadSimd128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void I16x8BitMask(Register dst, VRegister src)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2BitMask(Register dst, QwNeonRegister src)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void AllocateStackSpace(Register bytes)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void I8x16Splat(Simd128Register dst, Register src)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
void ByteReverseU16(Register dst, Register val, Register scratch)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void StoreSimd128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ByteReverseU32(Register dst, Register val, Register scratch)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr RegListBase FromBits()
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
static constexpr int kInstanceDataOffset
static constexpr int kFeedbackVectorOffset
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_store_nonzero(Register dst)
void emit_i32_eqz(Register dst, Register src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void TailCallNativeWasmCode(Address addr)
void SpillInstanceData(Register instance)
void RecordOolSpillSpaceSize(int size)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void DeallocateStackSlot(uint32_t size)
void StackCheck(Label *ool_code)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void CallBuiltin(Builtin builtin)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Spill(VarState *slot)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void AssertUnreachable(AbortReason reason)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
int GetTotalFrameSize() const
void CallNativeWasmCode(Address addr)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
static bool NeedsAlignment(ValueKind kind)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
static int SlotSizeForType(ValueKind kind)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
bool supports_f16_mem_access()
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void LoadInstanceDataFromFrame(Register dst)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
static constexpr int kStackSlotSize
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
CacheState * cache_state()
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void PushRegisters(LiftoffRegList)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void bailout(LiftoffBailoutReason reason, const char *detail)
void IncrementSmi(LiftoffRegister dst, int offset)
void PopRegisters(LiftoffRegList)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void FillStackSlotsWithZero(int start, int size)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
Register LoadOldFramePointer()
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void clear_i32_upper_half(Register dst)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
static constexpr int StaticStackFrameSize()
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
void Construct(int param_slots)
LiftoffAssembler *const asm_
static constexpr int ToTagged(int offset)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define EMIT_SIMD_UNOP_WITH_SCRATCH(name)
#define COMPRESS_POINTERS_BOOL
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
#define V8_ENABLE_SANDBOX_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, ret, return_type)
#define SIMD_VISIT_RELAXED_BINOP(name, op)
#define SIMD_RELAXED_BINOP_LIST(V)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast, ret, return_type)
#define SIMD_RELAXED_UNOP_LIST(V)
#define EMIT_SIMD_SHIFT_RR(name, op)
#define SIMD_SHIFT_RI_LIST(V)
#define SIMD_VISIT_RELAXED_UNOP(name, op)
#define VISIT_F16_BINOP(name)
#define F16_BINOP_LIST(V)
#define VISIT_F16_UNOP(name)
#define EMIT_SIMD_SHIFT_RI(name, op, mask)
#define SIMD_SHIFT_RR_LIST(V)
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
#define ATOMIC_OP(op, type, kind)
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_WITH_SCRATCH_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_UNOP_WITH_SCRATCH_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
MemOperand GetStackSlot(int offset)
MemOperand GetInstanceDataOperand()
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
uint32_t WasmInterpreterRuntime int64_t r0
@ kUnsupportedArchitecture
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
RegListBase< DoubleRegister > DoubleRegList
constexpr int kSimd128Size
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
constexpr Simd128Register kScratchSimd128Reg
const int kStackFrameExtraParamSlot
kWasmInternalFunctionIndirectPointerTag instance_data
constexpr Register kScratchReg2
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr VFPRoundingMode kRoundToZero
constexpr uint8_t kInstrSize
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
constexpr int kDoubleSize
constexpr Simd128Register kScratchSimd128Reg2
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
static V ByteReverse(V value)
#define DCHECK_LE(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
Register cached_instance_data
LiftoffRegList used_registers
#define V8_LIKELY(condition)