5#ifndef V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV32_INL_H_ 
    6#define V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV32_INL_H_ 
   43#if defined(V8_TARGET_BIG_ENDIAN) 
   59                           unsigned shift_amount = 0) {
 
   62  if (is_uint31(offset_imm)) {
 
   63    int32_t offset_imm32 = 
static_cast<int32_t
>(offset_imm);
 
   65    if (shift_amount != 0) {
 
   76    if (shift_amount != 0) {
 
 
   93      assm->
Lw(dst.
gp(), src);
 
  110      if (src.offset() != 0) {
 
  111        assm->AddWord(src_reg, src.rm(), src.offset());
 
  113      assm->
vl(dst.
fp().toV(), src_reg, 0, E8);
 
  121inline void Store(LiftoffAssembler* assm, Register base, int32_t 
offset,
 
  128      assm->Sw(src.gp(), dst);
 
  131      assm->Sw(src.low_gp(),
 
  133      assm->Sw(src.high_gp(),
 
  137      assm->StoreFloat(src.fp(), dst);
 
  140      assm->StoreDouble(src.fp(), dst);
 
  145      if (dst.offset() != 0) {
 
  146        assm->AddWord(
kScratchReg, dst.rm(), dst.offset());
 
  148      assm->vs(src.fp().toV(), dst_reg, 0, VSew::E8);
 
  165      assm->Push(
reg.high_gp(), 
reg.low_gp());
 
  178      assm->vs(
reg.fp().toV(), 
sp, 0, VSew::E8);
 
  200  switch (value.type().kind()) {
 
  205      int32_t low_word = value.to_i64();
 
  206      int32_t high_word = value.to_i64() >> 32;
 
  213                                       value.to_f32_boxed().get_bits());
 
  217                                       value.to_f64_boxed().get_bits());
 
  227                                         uint32_t* protected_load_pc,
 
  230  Load(LiftoffRegister(dst), src_addr, offset_reg,
 
  231       static_cast<uint32_t
>(offset_imm), LoadType::kI32Load, protected_load_pc,
 
  232       false, 
false, needs_shift);
 
  242                                       int32_t offset_imm) {
 
  244  LoadWord(dst, src_op);
 
  249                                          int32_t offset_imm, Register src,
 
  250                                          LiftoffRegList pinned,
 
  251                                          uint32_t* protected_store_pc,
 
  252                                          SkipWriteBarrier skip_write_barrier) {
 
  255  Register actual_offset_reg = offset_reg;
 
  256  if (offset_reg != 
no_reg && offset_imm != 0) {
 
  257    if (
cache_state()->is_used(LiftoffRegister(offset_reg))) {
 
  262      actual_offset_reg = temps.Acquire();
 
  264    Add32(actual_offset_reg, offset_reg, Operand(offset_imm));
 
  267  if (actual_offset_reg == 
no_reg) {
 
  273  auto trapper = [protected_store_pc](
int offset) {
 
  274    if (protected_store_pc) *protected_store_pc = 
static_cast<uint32_t
>(
offset);
 
  276  StoreWord(src, dst_op, trapper);
 
  277  if (protected_store_pc) {
 
  281  if (skip_write_barrier || 
v8_flags.disable_write_barriers) 
return;
 
  291      actual_offset_reg == 
no_reg ? Operand(offset_imm)
 
  292                                  : Operand(actual_offset_reg),
 
  298                            Register offset_reg, uintptr_t offset_imm,
 
  299                            LoadType type, uint32_t* protected_load_pc,
 
  302  unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
 
  305  auto trapper = [protected_load_pc](
int offset) {
 
  306    if (protected_load_pc) *protected_load_pc = 
static_cast<uint32_t
>(
offset);
 
  308  switch (type.value()) {
 
  309    case LoadType::kI32Load8U:
 
  310      Lbu(dst.gp(), src_op, trapper);
 
  312    case LoadType::kI64Load8U:
 
  313      Lbu(dst.low_gp(), src_op, trapper);
 
  314      mv(dst.high_gp(), zero_reg);
 
  316    case LoadType::kI32Load8S:
 
  317      Lb(dst.gp(), src_op, trapper);
 
  319    case LoadType::kI64Load8S:
 
  320      Lb(dst.low_gp(), src_op, trapper);
 
  321      srai(dst.high_gp(), dst.low_gp(), 31);
 
  323    case LoadType::kI32Load16U:
 
  324      Lhu(dst.gp(), src_op, trapper);
 
  326    case LoadType::kI64Load16U:
 
  327      Lhu(dst.low_gp(), src_op, trapper);
 
  328      mv(dst.high_gp(), zero_reg);
 
  330    case LoadType::kI32Load16S:
 
  331      Lh(dst.gp(), src_op, trapper);
 
  333    case LoadType::kI64Load16S:
 
  334      Lh(dst.low_gp(), src_op, trapper);
 
  335      srai(dst.high_gp(), dst.low_gp(), 31);
 
  337    case LoadType::kI64Load32U:
 
  338      Lw(dst.low_gp(), src_op, trapper);
 
  339      mv(dst.high_gp(), zero_reg);
 
  341    case LoadType::kI64Load32S:
 
  342      Lw(dst.low_gp(), src_op, trapper);
 
  343      srai(dst.high_gp(), dst.low_gp(), 31);
 
  345    case LoadType::kI32Load:
 
  346      Lw(dst.gp(), src_op, trapper);
 
  348    case LoadType::kI64Load: {
 
  349      Lw(dst.low_gp(), src_op, trapper);
 
  352      Lw(dst.high_gp(), src_op);
 
  354    case LoadType::kF32Load:
 
  357    case LoadType::kF64Load:
 
  360    case LoadType::kS128Load: {
 
  363      if (src_op.offset() != 0) {
 
  364        AddWord(src_reg, src_op.rm(), src_op.offset());
 
  367      vl(dst.fp().toV(), src_reg, 0, E8);
 
  370    case LoadType::kF32LoadF16:
 
  376  if (protected_load_pc) {
 
  380#if defined(V8_TARGET_BIG_ENDIAN) 
  382    pinned.set(src_op.rm());
 
  383    liftoff::ChangeEndiannessLoad(
this, dst, type, pinned);
 
  389                             uintptr_t offset_imm, LiftoffRegister src,
 
  390                             StoreType type, LiftoffRegList pinned,
 
  391                             uint32_t* protected_store_pc, 
bool is_store_mem,
 
  395#if defined(V8_TARGET_BIG_ENDIAN) 
  397    pinned.set(dst_op.rm());
 
  400    Move(tmp, src, type.value_type());
 
  404    liftoff::ChangeEndiannessStore(
this, src, type, pinned);
 
  407  auto trapper = [protected_store_pc](
int offset) {
 
  408    if (protected_store_pc) *protected_store_pc = 
static_cast<uint32_t
>(
offset);
 
  410  switch (type.value()) {
 
  411    case StoreType::kI32Store8:
 
  412      Sb(src.gp(), dst_op, trapper);
 
  414    case StoreType::kI64Store8:
 
  415      Sb(src.low_gp(), dst_op, trapper);
 
  417    case StoreType::kI32Store16:
 
  418      Sh(src.gp(), dst_op, trapper);
 
  420    case StoreType::kI64Store16:
 
  421      Sh(src.low_gp(), dst_op, trapper);
 
  423    case StoreType::kI32Store:
 
  424      Sw(src.gp(), dst_op, trapper);
 
  426    case StoreType::kI64Store32:
 
  427      Sw(src.low_gp(), dst_op, trapper);
 
  429    case StoreType::kI64Store: {
 
  430      Sw(src.low_gp(), dst_op, trapper);
 
  433      Sw(src.high_gp(), dst_op, trapper);
 
  436    case StoreType::kF32Store:
 
  439    case StoreType::kF64Store:
 
  442    case StoreType::kS128Store: {
 
  445      if (dst_op.offset() != 0) {
 
  446        AddWord(
kScratchReg, dst_op.rm(), dst_op.offset());
 
  449      vs(src.fp().toV(), dst_reg, 0, VSew::E8);
 
  455  if (protected_store_pc) {
 
  466                                       uintptr_t offset_imm,
 
  468  if (offset_reg == 
no_reg && offset_imm == 0) {
 
  469    if (result_reg == addr_reg || result_reg == 
no_reg) 
return addr_reg;
 
  470    lasm->
mv(result_reg, addr_reg);
 
  474  if (offset_reg == 
no_reg) {
 
  475    lasm->AddWord(result_reg, addr_reg, 
Operand(offset_imm));
 
  477    lasm->AddWord(result_reg, addr_reg, 
Operand(offset_reg));
 
  479      lasm->AddWord(result_reg, result_reg, 
Operand(offset_imm));
 
 
  486                          Register offset_reg, uintptr_t offset_imm,
 
  494  __ MultiPush(c_params - result_list);
 
  506      extern_func_ref = ExternalReference::atomic_pair_add_function();
 
  509      extern_func_ref = ExternalReference::atomic_pair_sub_function();
 
  512      extern_func_ref = ExternalReference::atomic_pair_and_function();
 
  515      extern_func_ref = ExternalReference::atomic_pair_or_function();
 
  518      extern_func_ref = ExternalReference::atomic_pair_xor_function();
 
  521      extern_func_ref = ExternalReference::atomic_pair_exchange_function();
 
  526  __ CallCFunction(extern_func_ref, 3, 0);
 
  530  __ MultiPop(c_params - result_list);
 
 
  535                        Register offset_reg, uintptr_t offset_imm,
 
  539  if (offset_reg != 
no_reg) pinned.
set(offset_reg);
 
  540  Register store_result = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
  545  bool change_result = 
false;
 
  546  switch (type.value()) {
 
  547    case StoreType::kI64Store8:
 
  548    case StoreType::kI64Store16:
 
  549      __ LoadConstant(
result.high(), WasmValue(0));
 
  550      result_reg = 
result.low_gp();
 
  551      value_reg = value.low_gp();
 
  553    case StoreType::kI32Store8:
 
  554    case StoreType::kI32Store16:
 
  556      value_reg = value.gp();
 
  561  if (result_reg == value_reg || result_reg == dst_addr ||
 
  562      result_reg == offset_reg) {
 
  563    result_reg = 
__ GetUnusedRegister(
kGpReg, pinned).gp();
 
  564    change_result = 
true;
 
  567  UseScratchRegisterScope temps(lasm);
 
  569      lasm, temps, dst_addr, offset_reg, offset_imm);
 
  574  Register temp = temps.Acquire();
 
  578  switch (type.value()) {
 
  579    case StoreType::kI64Store8:
 
  580    case StoreType::kI32Store8:
 
  581      __ lbu(result_reg, actual_addr, 0);
 
  584    case StoreType::kI64Store16:
 
  585    case StoreType::kI32Store16:
 
  586      __ lhu(result_reg, actual_addr, 0);
 
  589    case StoreType::kI64Store32:
 
  590    case StoreType::kI32Store:
 
  591      __ lr_w(
true, 
false, result_reg, actual_addr);
 
  599      __ add(temp, result_reg, value_reg);
 
  602      __ sub(temp, result_reg, value_reg);
 
  605      __ and_(temp, result_reg, value_reg);
 
  608      __ or_(temp, result_reg, value_reg);
 
  611      __ xor_(temp, result_reg, value_reg);
 
  614      __ mv(temp, value_reg);
 
  617  switch (type.value()) {
 
  618    case StoreType::kI64Store8:
 
  619    case StoreType::kI32Store8:
 
  621      __ sb(temp, actual_addr, 0);
 
  623      __ mv(store_result, zero_reg);
 
  625    case StoreType::kI64Store16:
 
  626    case StoreType::kI32Store16:
 
  628      __ sh(temp, actual_addr, 0);
 
  630      __ mv(store_result, zero_reg);
 
  632    case StoreType::kI64Store32:
 
  633    case StoreType::kI32Store:
 
  634      __ sc_w(
false, 
true, store_result, actual_addr, temp);
 
  640  __ bnez(store_result, &retry);
 
  642    switch (type.value()) {
 
  643      case StoreType::kI64Store8:
 
  644      case StoreType::kI64Store16:
 
  645      case StoreType::kI64Store32:
 
  648      case StoreType::kI32Store8:
 
  649      case StoreType::kI32Store16:
 
  650      case StoreType::kI32Store:
 
  663                                  Register offset_reg, uintptr_t offset_imm,
 
  664                                  LoadType type, LiftoffRegList pinned,
 
  668                                                     offset_reg, offset_imm);
 
  670  switch (type.value()) {
 
  671    case LoadType::kI32Load8U:
 
  672    case LoadType::kI32Load16U:
 
  673    case LoadType::kI32Load:
 
  676    case LoadType::kI64Load8U:
 
  677    case LoadType::kI64Load16U:
 
  678    case LoadType::kI64Load32U:
 
  679      dst_reg = dst.low_gp();
 
  685  switch (type.value()) {
 
  686    case LoadType::kI32Load8U:
 
  687    case LoadType::kI64Load8U:
 
  689      lbu(dst_reg, src_reg, 0);
 
  692    case LoadType::kI32Load16U:
 
  693    case LoadType::kI64Load16U:
 
  695      lhu(dst_reg, src_reg, 0);
 
  698    case LoadType::kI32Load:
 
  699    case LoadType::kI64Load32U:
 
  701      lw(dst_reg, src_reg, 0);
 
  704    case LoadType::kI64Load:
 
  716                                   uintptr_t offset_imm, LiftoffRegister src,
 
  717                                   StoreType type, LiftoffRegList pinned,
 
  721                                                     offset_reg, offset_imm);
 
  723  switch (type.value()) {
 
  724    case StoreType::kI32Store8:
 
  725    case StoreType::kI32Store16:
 
  726    case StoreType::kI32Store:
 
  729    case StoreType::kI64Store8:
 
  730    case StoreType::kI64Store16:
 
  731    case StoreType::kI64Store32:
 
  732      src_reg = src.low_gp();
 
  737  switch (type.value()) {
 
  738    case StoreType::kI64Store8:
 
  739    case StoreType::kI32Store8:
 
  741      sb(src_reg, dst_reg, 0);
 
  743    case StoreType::kI64Store16:
 
  744    case StoreType::kI32Store16:
 
  746      sh(src_reg, dst_reg, 0);
 
  748    case StoreType::kI64Store32:
 
  749    case StoreType::kI32Store:
 
  751      sw(src_reg, dst_reg, 0);
 
  753    case StoreType::kI64Store:
 
  764                                 uint32_t offset_imm, LiftoffRegister value,
 
  765                                 LiftoffRegister 
result, StoreType type,
 
  767  if (type.value() == StoreType::kI64Store) {
 
  772  if (type.value() == StoreType::kI32Store ||
 
  773      type.value() == StoreType::kI64Store32) {
 
  776        this, temps, dst_addr, offset_reg, offset_imm);
 
  777    if (type.value() == StoreType::kI64Store32) {
 
  791                                 uint32_t offset_imm, LiftoffRegister value,
 
  792                                 LiftoffRegister 
result, StoreType type,
 
  794  if (type.value() == StoreType::kI64Store) {
 
  799  if (type.value() == StoreType::kI32Store ||
 
  800      type.value() == StoreType::kI64Store32) {
 
  803        this, temps, dst_addr, offset_reg, offset_imm);
 
  804    if (type.value() == StoreType::kI64Store32) {
 
  818                                 uint32_t offset_imm, LiftoffRegister value,
 
  819                                 LiftoffRegister 
result, StoreType type,
 
  821  if (type.value() == StoreType::kI64Store) {
 
  826  if (type.value() == StoreType::kI32Store ||
 
  827      type.value() == StoreType::kI64Store32) {
 
  830        this, temps, dst_addr, offset_reg, offset_imm);
 
  831    if (type.value() == StoreType::kI64Store32) {
 
  844                                uint32_t offset_imm, LiftoffRegister value,
 
  845                                LiftoffRegister 
result, StoreType type,
 
  847  if (type.value() == StoreType::kI64Store) {
 
  852  if (type.value() == StoreType::kI32Store ||
 
  853      type.value() == StoreType::kI64Store32) {
 
  856        this, temps, dst_addr, offset_reg, offset_imm);
 
  857    if (type.value() == StoreType::kI64Store32) {
 
  870                                 uint32_t offset_imm, LiftoffRegister value,
 
  871                                 LiftoffRegister 
result, StoreType type,
 
  873  if (type.value() == StoreType::kI64Store) {
 
  878  if (type.value() == StoreType::kI32Store ||
 
  879      type.value() == StoreType::kI64Store32) {
 
  882        this, temps, dst_addr, offset_reg, offset_imm);
 
  883    if (type.value() == StoreType::kI64Store32) {
 
  897                                      LiftoffRegister value,
 
  898                                      LiftoffRegister 
result, StoreType type,
 
  900  if (type.value() == StoreType::kI64Store) {
 
  905  if (type.value() == StoreType::kI32Store ||
 
  906      type.value() == StoreType::kI64Store32) {
 
  909        this, temps, dst_addr, offset_reg, offset_imm);
 
  910    if (type.value() == StoreType::kI64Store32) {
 
  923    Register dst_addr, Register offset_reg, uintptr_t offset_imm,
 
  924    LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister 
result,
 
  925    StoreType type, 
bool i64_offset) {
 
  927  LiftoffRegList pinned{dst_addr, expected, new_value, 
result};
 
  928  if (offset_reg != 
no_reg) pinned.set(offset_reg);
 
  930  if (type.value() == StoreType::kI64Store) {
 
  933        this, temps, dst_addr, offset_reg, offset_imm, 
kScratchReg);
 
  943    Mv(a1, expected.low_gp());
 
  944    Mv(a2, expected.high_gp());
 
  945    Mv(a3, new_value.low_gp());
 
  946    Mv(a4, new_value.high_gp());
 
  951    CallCFunction(ExternalReference::atomic_pair_compare_exchange_function(), 5,
 
  961  switch (type.value()) {
 
  962    case StoreType::kI64Store8:
 
  963    case StoreType::kI64Store16:
 
  964    case StoreType::kI64Store32:
 
  967      new_value = new_value.low();
 
  968      expected = expected.low();
 
  970    case StoreType::kI32Store8:
 
  971    case StoreType::kI32Store16:
 
  972    case StoreType::kI32Store:
 
  980      this, temps, dst_addr, offset_reg, offset_imm, 
kScratchReg);
 
  986  if (type.value() != StoreType::kI32Store &&
 
  987      type.value() != StoreType::kI64Store32) {
 
  988    And(temp1, actual_addr, 0x3);
 
  989    SubWord(temp0, actual_addr, Operand(temp1));
 
  990    SllWord(temp1, temp1, 3);
 
  995  switch (type.value()) {
 
  996    case StoreType::kI64Store8:
 
  997    case StoreType::kI32Store8:
 
  998      lr_w(
true, 
true, temp2, temp0);
 
 1000      ExtractBits(temp2, expected.gp(), zero_reg, 8, 
false);
 
 1003      sc_w(
true, 
true, temp2, temp0, temp2);
 
 1005    case StoreType::kI64Store16:
 
 1006    case StoreType::kI32Store16:
 
 1007      lr_w(
true, 
true, temp2, temp0);
 
 1009      ExtractBits(temp2, expected.gp(), zero_reg, 16, 
false);
 
 1011      InsertBits(temp2, new_value.gp(), temp1, 16);
 
 1012      sc_w(
true, 
true, temp2, temp0, temp2);
 
 1014    case StoreType::kI64Store32:
 
 1015    case StoreType::kI32Store:
 
 1018      sc_w(
true, 
true, temp2, actual_addr, new_value.gp());
 
 1023  bnez(temp2, &retry);
 
 1030                                           uint32_t caller_slot_idx,
 
 1037                                            uint32_t caller_slot_idx,
 
 1039                                            Register frame_pointer) {
 
 1081      if (src.offset() != 0) {
 
 1082        MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
 
 1086      if (dst.offset() != 0) {
 
 1110    MacroAssembler::vmv_vv(dst.toV(), src.toV());
 
 1136      if (dst.offset() != 0) {
 
 1139      vs(
reg.fp().toV(), dst_reg, 0, VSew::E8);
 
 1151  Register tmp = assembler_temps.Acquire();
 
 1152  switch (value.type().kind()) {
 
 1161      int32_t low_word = value.to_i64();
 
 1162      int32_t high_word = value.to_i64() >> 32;
 
 1198      if (src.offset() != 0) {
 
 1199        MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
 
 1201      vl(
reg.fp().toV(), src_reg, 0, E8);
 
 1235    AddWord(a0, fp, Operand(-
start - size));
 
 1236    AddWord(a1, fp, Operand(-
start));
 
 1252  Branch(&high_is_zero, 
eq, src.high_gp(), Operand(zero_reg));
 
 1254  Clz32(dst.low_gp(), src.high_gp());
 
 1257  bind(&high_is_zero);
 
 1258  Clz32(dst.low_gp(), src.low_gp());
 
 1259  AddWord(dst.low_gp(), dst.low_gp(), Operand(32));
 
 1262  mv(dst.high_gp(), zero_reg);  
 
 1269  Branch(&low_is_zero, 
eq, src.low_gp(), Operand(zero_reg));
 
 1271  Ctz32(dst.low_gp(), src.low_gp());
 
 1275  Ctz32(dst.low_gp(), src.high_gp());
 
 1276  AddWord(dst.low_gp(), dst.low_gp(), Operand(32));
 
 1279  mv(dst.high_gp(), zero_reg);  
 
 1283                                       LiftoffRegister src) {
 
 1285  Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
 
 1286  Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
 
 1290  AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp());
 
 1291  mv(dst.high_gp(), zero_reg);
 
 1305  Register scratch = temps.Acquire();
 
 1306  li(scratch, Operand{imm});
 
 1311                                     Label* trap_div_by_zero,
 
 1312                                     Label* trap_div_unrepresentable) {
 
 1322  MacroAssembler::Div(dst, lhs, rhs);
 
 1326                                     Label* trap_div_by_zero) {
 
 1328  MacroAssembler::Divu(dst, lhs, rhs);
 
 1332                                     Label* trap_div_by_zero) {
 
 1334  MacroAssembler::Mod(dst, lhs, rhs);
 
 1338                                     Label* trap_div_by_zero) {
 
 1340  MacroAssembler::Modu(dst, lhs, rhs);
 
 1343#define I32_BINOP(name, instruction)                                 \ 
 1344  void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ 
 1346    instruction(dst, lhs, rhs);                                      \ 
 
 1359#define I32_BINOP_I(name, instruction)                                  \ 
 1360  void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ 
 1362    instruction(dst, lhs, Operand(imm));                                \ 
 
 1388#define I32_SHIFTOP(name, instruction)                               \ 
 1389  void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ 
 1390                                         Register amount) {          \ 
 1391    instruction(dst, src, amount);                                   \ 
 
 1393#define I32_SHIFTOP_I(name, instruction)                                \ 
 1394  void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ 
 1396    instruction(dst, src, amount & 31);                                 \ 
 
 1411                                    LiftoffRegister rhs) {
 
 1412  MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
 
 1413                          lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
 
 1420                                     LiftoffRegister rhs,
 
 1421                                     Label* trap_div_by_zero,
 
 1422                                     Label* trap_div_unrepresentable) {
 
 1427                                     LiftoffRegister rhs,
 
 1428                                     Label* trap_div_by_zero) {
 
 1433                                     LiftoffRegister rhs,
 
 1434                                     Label* trap_div_by_zero) {
 
 1439                                     LiftoffRegister rhs,
 
 1440                                     Label* trap_div_by_zero) {
 
 1464  assm->
And(amount_capped, amount, 
Operand(63));
 
 1468    (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(),
 
 1473    assm->MacroAssembler::Move(dst.
high_gp(), tmp.high_gp());
 
 1474    assm->MacroAssembler::Move(dst.
low_gp(), tmp.low_gp());
 
 1476    (assm->*emit_shift)(dst.
low_gp(), dst.
high_gp(), src.low_gp(),
 
 
 1484                                    LiftoffRegister rhs) {
 
 1485  MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
 
 1486                          lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
 
 1492  LiftoffRegister imm_reg =
 
 1494  int32_t imm_low_word = 
static_cast<int32_t
>(imm);
 
 1495  int32_t imm_high_word = 
static_cast<int32_t
>(imm >> 32);
 
 1501  MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
 
 1502                          lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(),
 
 1507                                    LiftoffRegister rhs) {
 
 1508  MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
 
 1509                          lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
 
 1525  temps.Include(temp.gp());
 
 1549  temps.Include(temp.gp());
 
 1572  temps.Include(temp.gp());
 
 1584#define FP_UNOP_RETURN_FALSE(name)                                             \ 
 1585  bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ 
 
 1594#undef FP_UNOP_RETURN_FALSE 
 1597                                            LiftoffRegister dst,
 
 1598                                            LiftoffRegister src, Label* 
trap) {
 
 1600    case kExprI32ConvertI64:
 
 1603    case kExprI32SConvertF32:
 
 1604    case kExprI32UConvertF32:
 
 1605    case kExprI32SConvertF64:
 
 1606    case kExprI32UConvertF64:
 
 1607    case kExprI64SConvertF32:
 
 1608    case kExprI64UConvertF32:
 
 1609    case kExprI64SConvertF64:
 
 1610    case kExprI64UConvertF64:
 
 1611    case kExprF32ConvertF64: {
 
 1615        case kExprI32SConvertF32:
 
 1618        case kExprI32UConvertF32:
 
 1621        case kExprI32SConvertF64:
 
 1624        case kExprI32UConvertF64:
 
 1627        case kExprF32ConvertF64:
 
 1630        case kExprI64SConvertF32:
 
 1631        case kExprI64UConvertF32:
 
 1632        case kExprI64SConvertF64:
 
 1633        case kExprI64UConvertF64:
 
 1640      if (
trap != 
nullptr) {
 
 1646    case kExprI32ReinterpretF32:
 
 1649    case kExprI64SConvertI32:
 
 1652      srai(dst.high_gp(), dst.high_gp(), 31);
 
 1654    case kExprI64UConvertI32:
 
 1658    case kExprI64ReinterpretF64:
 
 1665    case kExprF32SConvertI32: {
 
 1669    case kExprF32UConvertI32:
 
 1672    case kExprF32ReinterpretI32:
 
 1675    case kExprF64SConvertI32: {
 
 1679    case kExprF64UConvertI32:
 
 1682    case kExprF64ConvertF32:
 
 1685    case kExprF64ReinterpretI64:
 
 1692    case kExprI32SConvertSatF32: {
 
 1697    case kExprI32UConvertSatF32: {
 
 1702    case kExprI32SConvertSatF64: {
 
 1707    case kExprI32UConvertSatF64: {
 
 1712    case kExprI64SConvertSatF32:
 
 1713    case kExprI64UConvertSatF32:
 
 1714    case kExprI64SConvertSatF64:
 
 1715    case kExprI64UConvertSatF64:
 
 1723                                               LiftoffRegister lhs,
 
 1724                                               uint8_t imm_lane_idx) {
 
 1733  slli(dst, src, 32 - 8);
 
 1734  srai(dst, dst, 32 - 8);
 
 1738  slli(dst, src, 32 - 16);
 
 1739  srai(dst, dst, 32 - 16);
 
 1743                                              LiftoffRegister src) {
 
 1745  srai(dst.high_gp(), dst.low_gp(), 31);
 
 1749                                               LiftoffRegister src) {
 
 1751  srai(dst.high_gp(), dst.low_gp(), 31);
 
 1755                                               LiftoffRegister src) {
 
 1756  mv(dst.low_gp(), src.low_gp());
 
 1757  srai(dst.high_gp(), src.low_gp(), 31);
 
 1771                                      const FreezeCacheState& frozen) {
 
 1783                                           Register lhs, int32_t imm,
 
 1784                                           const FreezeCacheState& frozen) {
 
 1789  MacroAssembler::Sltu(dst, src, 1);
 
 1793                                         Register lhs, Register rhs) {
 
 1799  Sltu(tmp, src.low_gp(), 1);
 
 1800  Sltu(dst, src.high_gp(), 1);
 
 1801  and_(dst, dst, tmp);
 
 1822                                         LiftoffRegister lhs,
 
 1823                                         LiftoffRegister rhs) {
 
 1841  Branch(&low, 
eq, lhs.high_gp(), Operand(rhs.high_gp()));
 
 1843  Branch(&cont, cond, lhs.high_gp(), Operand(rhs.high_gp()));
 
 1848  if (unsigned_cond == cond) {
 
 1849    Branch(&cont, cond, lhs.low_gp(), Operand(rhs.low_gp()));
 
 1853    Branch(<_zero, 
lt, lhs.high_gp(), Operand(zero_reg));
 
 1854    Branch(&cont, unsigned_cond, lhs.low_gp(), Operand(rhs.low_gp()));
 
 1858    Branch(&cont, unsigned_cond, rhs.low_gp(), Operand(lhs.low_gp()));
 
 1869  Register scratch = temps.Acquire();
 
 1871  AddWord(scratch, scratch, Operand(1));
 
 1877                                     Register offset_reg, uintptr_t offset_imm,
 
 1880                                     uint32_t* protected_load_pc,
 
 1883  Register scratch = temps.Acquire();
 
 1885  VRegister dst_v = dst.fp().toV();
 
 1886  auto trapper = [protected_load_pc](
int offset) {
 
 1887    if (protected_load_pc) *protected_load_pc = 
static_cast<uint32_t
>(
offset);
 
 1889  MachineType memtype = type.mem_type();
 
 1924    vxor_vv(dst_v, dst_v, dst_v);
 
 1927      Lw(scratch, src_op, trapper);
 
 1939      Lb(scratch, src_op, trapper);
 
 1943      Lh(scratch, src_op, trapper);
 
 1947      Lw(scratch, src_op, trapper);
 
 1955  if (protected_load_pc) {
 
 1961                                Register addr, Register offset_reg,
 
 1962                                uintptr_t offset_imm, LoadType type,
 
 1963                                uint8_t laneidx, uint32_t* protected_load_pc,
 
 1966  Register scratch = temps.Acquire();
 
 1968  MachineType mem_type = type.mem_type();
 
 1969  auto trapper = [protected_load_pc](
int offset) {
 
 1970    if (protected_load_pc) *protected_load_pc = 
static_cast<uint32_t
>(
offset);
 
 1973    Lbu(scratch, src_op, trapper);
 
 1978    vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
 
 1980    Lhu(scratch, src_op, trapper);
 
 1984    vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
 
 1986    Lw(scratch, src_op, trapper);
 
 1990    vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
 
 2000  if (protected_load_pc) {
 
 2006                                 uintptr_t offset_imm, LiftoffRegister src,
 
 2007                                 StoreType type, uint8_t lane,
 
 2008                                 uint32_t* protected_store_pc,
 
 2011  auto trapper = [protected_store_pc](
int offset) {
 
 2012    if (protected_store_pc) *protected_store_pc = 
static_cast<uint32_t
>(
offset);
 
 2037  if (protected_store_pc) {
 
 2043                                        LiftoffRegister src) {
 
 2051                                               LiftoffRegister src1,
 
 2052                                               LiftoffRegister src2,
 
 2053                                               uint8_t imm_lane_idx) {
 
 2065                                      LiftoffRegister rhs) {
 
 2068  vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
 
 2080                                      LiftoffRegister rhs) {
 
 2083  vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
 
 2095                                                          LiftoffRegister src) {
 
 2115                                                          LiftoffRegister src) {
 
 2135                                                          LiftoffRegister src) {
 
 2155                                                          LiftoffRegister src) {
 
 2175    const std::initializer_list<VarState> 
args, 
const LiftoffRegister* rets,
 
 2177    ExternalReference ext_ref) {
 
 2178  AddWord(
sp, 
sp, Operand(-stack_bytes));
 
 2187    } 
else if (arg.is_const()) {
 
 2189      if (arg.i32_const() == 0) {
 
 2192        src = temps.Acquire();
 
 2193        li(src, arg.i32_const());
 
 2195      StoreWord(src, dst);
 
 2199      auto scratch = temps.Acquire();
 
 2209  constexpr Register kFirstArgReg = a0;
 
 2210  mv(kFirstArgReg, 
sp);
 
 2213  constexpr int kNumCCallArgs = 1;
 
 2218  const LiftoffRegister* next_result_reg = rets;
 
 2219  if (return_kind != kVoid) {
 
 2220    constexpr Register kReturnReg = a0;
 
 2221    if (kReturnReg != next_result_reg->gp()) {
 
 2222      Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
 
 2228  if (out_argument_kind != kVoid) {
 
 2232  AddWord(
sp, 
sp, Operand(stack_bytes));
 
 2236                             ExternalReference ext_ref) {
 
 2238  int num_args = 
static_cast<int>(
args.size());
 
 2244  ParallelMove parallel_move{
this};
 
 2249      parallel_move.LoadIntoRegister(
 
 2257      parallel_move.LoadIntoRegister(LiftoffRegister{
kCArgRegs[reg_args]}, arg);
 
 2264      liftoff::Store(
this, dst.rm(), dst.offset(), arg.reg(), arg.kind());
 
 2268    Register scratch = temps.Acquire();
 
 2269    if (arg.is_const()) {
 
 2271      li(scratch, Operand(arg.i32_const()));
 
 2280  parallel_move.Execute();
 
 2290  int last_stack_slot = param_slots;
 
 2291  for (
auto& slot : 
slots_) {
 
 2292    const int stack_slot = slot.dst_slot_;
 
 2295    last_stack_slot = stack_slot;
 
 2297    switch (src.loc()) {
 
 2299        switch (src.kind()) {
 
 2308            UseScratchRegisterScope temps(
asm_);
 
 2309            Register scratch = temps.Acquire();
 
 2339        if (src.kind() == 
kI64) {
 
 2341              asm_, slot.half_ == 
kLowWord ? src.reg().low() : src.reg().high(),
 
void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void lr_w(bool aq, bool rl, Register rd, Register rs1)
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fmv_w_x(FPURegister rd, Register rs1)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void mv(Register rd, Register rs)
void fence(uint8_t pred, uint8_t succ)
void srai(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vfmv_vf(VRegister vd, FPURegister fs1)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vfmv_sf(VRegister vd, FPURegister fs)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void set(Register rd, VSew sew, Vlmul lmul)
void fcvt_d_s(FPURegister fd, FPURegister fj)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
Instruction * InstructionAt(ptrdiff_t offset) const
void lw(Register rd, const MemOperand &rs)
friend class UseScratchRegisterScope
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sb(Register rd, const MemOperand &rs)
void sc_w(Register rd, Register rj, int32_t si14)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
void sh(Register rd, const MemOperand &rs)
void bnez(Register rj, int32_t offset)
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Mul(const Register &rd, const Register &rn, const Register &rm)
void Lbu(Register rd, const MemOperand &rs)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void Clear_if_nan_s(Register rd, FPURegister fs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Clz32(Register rd, Register rs)
void Lb(Register rd, const MemOperand &rs)
void SarPair(Register high, Register low, uint8_t imm8)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void MultiPush(RegList regs)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void LoadFPRImmediate(FPURegister dst, float imm)
void ExtractLowWordFromF64(Register dst_low, FPURegister src)
void Cvt_s_w(FPURegister fd, Register rs)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void ShlPair(Register high, Register low, uint8_t imm8)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void ShrPair(Register high, Register low, uint8_t imm8)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void MultiPop(RegList regs)
void Jump(Register target, Condition cond=al)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Clear_if_nan_d(Register rd, FPURegister fs)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void Cvt_d_w(FPURegister fd, Register rs)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Lw(Register rd, const MemOperand &rs)
void Lh(Register rd, const MemOperand &rs)
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void AllocateStackSpace(Register bytes)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void Ctz32(Register rd, Register rs)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void RecordUsedSpillOffset(int offset)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i32_shli(Register dst, Register src, int32_t amount)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void Spill(VarState *slot)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
bool supports_f16_mem_access()
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
static constexpr int kStackSlotSize
void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
CacheState * cache_state()
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void FillStackSlotsWithZero(int start, int size)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void emit_i32_muli(Register dst, Register lhs, int32_t imm)
void emit_i32_signextend_i8(Register dst, Register src)
constexpr Register set(Register reg)
constexpr DoubleRegister fp() const
bool overlaps(const LiftoffRegister other) const
constexpr bool is_gp_pair() const
static LiftoffRegister ForPair(Register low, Register high)
constexpr Register gp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
void Construct(int param_slots)
LiftoffAssembler *const asm_
#define ASM_CODE_COMMENT(asm)
#define V8_ENABLE_SANDBOX_BOOL
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
#define I32_SHIFTOP(name, instruction)
#define FP_UNOP_RETURN_FALSE(name)
std::optional< OolTrapLabel > trap
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
constexpr int32_t kHighWordOffset
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
MemOperand GetHalfStackSlot(int offset, RegPairHalf half)
constexpr int32_t kLowWordOffset
void Emit64BitShiftOperation(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, Register amount, void(MacroAssembler::*emit_shift)(Register, Register))
MemOperand GetStackSlot(int offset)
Register EnsureNoAlias(Assembler *assm, Register reg, Register must_not_alias, UseScratchRegisterScope *temps)
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
bool IsRegInRegPair(LiftoffRegister pair, Register reg)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
void AtomicBinop64(LiftoffAssembler *lasm, Binop op, Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister value, LiftoffRegister result)
Condition cond_make_unsigned(Condition cond)
static constexpr bool needs_gp_reg_pair(ValueKind kind)
constexpr int value_kind_size(ValueKind kind)
constexpr bool is_reference(ValueKind kind)
constexpr Register no_reg
constexpr int kTaggedSize
constexpr int kSimd128Size
@ kUnsignedGreaterThanEqual
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
constexpr Register kScratchReg2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
constexpr int kSystemPointerSize
constexpr Register kReturnRegister1
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
constexpr VRegister kSimd128ScratchReg3
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
#define DCHECK_LE(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)