41    return rt.
rm() == zero_reg;
 
 
   53  RegList exclusions = {exclusion1, exclusion2, exclusion3};
 
 
   68  RegList exclusions = {exclusion1, exclusion2, exclusion3};
 
 
   89  RegList exclusions = {exclusion1, exclusion2, exclusion3};
 
 
   97#define __ ACCESS_MASM(masm) 
   99#ifndef V8_ENABLE_LEAPTIERING 
  101static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
 
  102                                      Register optimized_code_entry,
 
  103                                      Register scratch1, Register scratch2) {
 
  112  Label heal_optimized_code_slot;
 
  116  __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
 
  117                   &heal_optimized_code_slot);
 
  120  __ LoadCodePointerField(
 
  121      optimized_code_entry,
 
  126  __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch1,
 
  127                                         &heal_optimized_code_slot);
 
  133  __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, a1);
 
  142  __ bind(&heal_optimized_code_slot);
 
  143  __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
 
  148#ifdef V8_ENABLE_DEBUG_CODE 
  152    Assert(
eq, AbortReason::kExpectedFeedbackCell, scratch,
 
  153           Operand(FEEDBACK_CELL_TYPE));
 
  159    Assert(
eq, AbortReason::kExpectedFeedbackVector, scratch,
 
  160           Operand(FEEDBACK_VECTOR_TYPE));
 
  172#ifdef V8_ENABLE_LEAPTIERING 
 
  212#ifndef V8_ENABLE_LEAPTIERING 
  217    Label* flags_need_processing) {
 
 
  234  Label maybe_has_optimized_code, maybe_needs_logging;
 
  247  bind(&maybe_needs_logging);
 
  252    And(scratch, flags, 
Operand(FeedbackVector::LogNextExecutionBit::kMask));
 
  259  bind(&maybe_has_optimized_code);
 
  263                                  FeedbackVector::kMaybeOptimizedCodeOffset));
 
  264  TailCallOptimizedCodeSlot(
this, optimized_code_entry, temps.Acquire(),
 
 
  274#if V8_TARGET_ARCH_RISCV64 
  299#ifdef V8_TARGET_ARCH_RISCV64 
 
  377    Abort(AbortReason::kUnalignedCellInWriteBarrier);
 
 
  390#ifdef V8_ENABLE_SANDBOX 
 
  399#ifdef V8_ENABLE_SANDBOX 
 
  406#ifdef V8_ENABLE_SANDBOX 
  415    Label is_trusted_pointer_handle, done;
 
  421    bind(&is_trusted_pointer_handle);
 
  425  } 
else if (tag == kCodeIndirectPointerTag) {
 
  432void MacroAssembler::ResolveTrustedPointerHandle(Register 
destination,
 
  442                             IsolateData::trusted_pointer_table_offset()});
 
  455void MacroAssembler::ResolveCodePointerHandle(Register 
destination,
 
  461  LoadCodePointerTableBase(table);
 
  471void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register 
destination,
 
  478  LoadCodePointerTableBase(scratch);
 
  485    li(scratch, Operand(tag));
 
  490void MacroAssembler::LoadCodePointerTableBase(Register 
destination) {
 
  491#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES 
  495       ExternalReference::code_pointer_table_base_address(
isolate()));
 
  507  li(
destination, ExternalReference::global_code_pointer_table_base_address());
 
  518#ifdef V8_ENABLE_SANDBOX 
  523  if (isolate_root == 
no_reg) {
 
  527  LoadWord(external_table,
 
  529                      IsolateData::external_pointer_table_offset() +
 
  534  AddWord(external_table, external_table, 
destination);
 
 
  544#ifdef V8_TARGET_ARCH_RISCV64 
  548#ifdef V8_ENABLE_SANDBOX 
  564#ifdef V8_ENABLE_SANDBOX 
  566  Register scratch = temps.Acquire();
 
  568     FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
 
  569  Sw(scratch, dst_field_operand, std::forward<Trapper>(trapper));
 
  647  if (mode == StubCallMode::kCallWasmRuntimeStub) {
 
 
  665  if (dst_slot != 
object) {
 
  666    AddWord(dst_slot, 
object, 
offset);
 
  667    mv(dst_object, 
object);
 
  675  if (
offset.IsImmediate() || (
offset.rm() != dst_object)) {
 
  676    mv(dst_object, dst_slot);
 
  677    AddWord(dst_slot, dst_slot, 
offset);
 
  686  AddWord(dst_slot, dst_slot, dst_object);
 
  687  SubWord(dst_object, dst_slot, dst_object);
 
 
  703    AddWord(temp, 
object, 
offset);
 
  704#ifdef V8_TARGET_ARCH_RISCV64 
  715    Assert(
eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
 
  719  if (
v8_flags.disable_write_barriers) {
 
  759    AddWord(slot_address, 
object, 
offset);
 
 
  777#if V8_TARGET_ARCH_RISCV64 
  780#ifdef V8_ENABLE_SANDBOX 
  781  srli(value, value, kSandboxedPointerShift);
 
  791#ifdef V8_ENABLE_SANDBOX 
  793  LoadWord(
destination, field_operand, std::forward<Trapper>(trapper));
 
  801    Register value, 
const MemOperand& dst_field_operand, Trapper&& trapper) {
 
  802#ifdef V8_ENABLE_SANDBOX 
  805  Register scratch = temps.Acquire();
 
  807  slli(scratch, scratch, kSandboxedPointerShift);
 
  808  StoreWord(scratch, dst_field_operand, std::forward<Trapper>(trapper));
 
  814void MacroAssembler::Add32(Register rd, Register rs, 
const Operand& rt) {
 
  816    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  817        ((rd.code() & 0b11000) == 0b01000) &&
 
  818        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
  821      addw(rd, rs, 
rt.rm());
 
  824    if (
v8_flags.riscv_c_extension && is_int6(
rt.immediate()) &&
 
  825        (rd.code() == rs.code()) && (rd != zero_reg) &&
 
  827      c_addiw(rd, 
static_cast<int8_t
>(
rt.immediate()));
 
  829      addiw(rd, rs, 
static_cast<int32_t>(
rt.immediate()));
 
  830    } 
else if ((-4096 <= 
rt.immediate() && 
rt.immediate() <= -2049) ||
 
  831               (2048 <= 
rt.immediate() && 
rt.immediate() <= 4094)) {
 
  832      addiw(rd, rs, 
rt.immediate() / 2);
 
  833      addiw(rd, rd, 
rt.immediate() - (
rt.immediate() / 2));
 
  838      Li(scratch, 
rt.immediate());
 
  839      addw(rd, rs, scratch);
 
  844void MacroAssembler::Sub32(Register rd, Register rs, 
const Operand& rt) {
 
  846    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  847        ((rd.code() & 0b11000) == 0b01000) &&
 
  848        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
  851      subw(rd, rs, 
rt.rm());
 
  855    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  856        (rd != zero_reg) && is_int6(-
rt.immediate()) &&
 
  862    } 
else if (is_int12(-
rt.immediate()) && !
MustUseReg(
rt.rmode())) {
 
  866    } 
else if ((-4096 <= -
rt.immediate() && -
rt.immediate() <= -2049) ||
 
  867               (2048 <= -
rt.immediate() && -
rt.immediate() <= 4094)) {
 
  868      addiw(rd, rs, -
rt.immediate() / 2);
 
  869      addiw(rd, rd, -
rt.immediate() - (-
rt.immediate() / 2));
 
  875        Li(scratch, -
rt.immediate());
 
  876        addw(rd, rs, scratch);
 
  879        Li(scratch, 
rt.immediate());
 
  880        subw(rd, rs, scratch);
 
  886void MacroAssembler::AddWord(Register rd, Register rs, 
const Operand& rt) {
 
  890void MacroAssembler::SubWord(Register rd, Register rs, 
const Operand& rt) {
 
  894void MacroAssembler::Sub64(Register rd, Register rs, 
const Operand& rt) {
 
  896    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  897        ((rd.code() & 0b11000) == 0b01000) &&
 
  898        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
  901      sub(rd, rs, 
rt.rm());
 
  903  } 
else if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  904             (rd != zero_reg) && is_int6(-
rt.immediate()) &&
 
  910  } 
else if (
v8_flags.riscv_c_extension && is_int10(-
rt.immediate()) &&
 
  911             (
rt.immediate() != 0) && ((
rt.immediate() & 0xf) == 0) &&
 
  912             (rd.code() == rs.code()) && (rd == sp) &&
 
  915  } 
else if (is_int12(-
rt.immediate()) && !
MustUseReg(
rt.rmode())) {
 
  919  } 
else if ((-4096 <= -
rt.immediate() && -
rt.immediate() <= -2049) ||
 
  920             (2048 <= -
rt.immediate() && -
rt.immediate() <= 4094)) {
 
  921    addi(rd, rs, -
rt.immediate() / 2);
 
  922    addi(rd, rd, -
rt.immediate() - (-
rt.immediate() / 2));
 
  926    if (li_neg_count < li_count && !
MustUseReg(
rt.rmode())) {
 
  928      DCHECK(
rt.immediate() != std::numeric_limits<int32_t>::min());
 
  931      li(scratch, Operand(-
rt.immediate()));
 
  932      add(rd, rs, scratch);
 
  938      sub(rd, rs, scratch);
 
  943void MacroAssembler::Add64(Register rd, Register rs, 
const Operand& rt) {
 
  945    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
  946        (
rt.rm() != zero_reg) && (rs != zero_reg)) {
 
  949      add(rd, rs, 
rt.rm());
 
  952    if (
v8_flags.riscv_c_extension && is_int6(
rt.immediate()) &&
 
  953        (rd.code() == rs.code()) && (rd != zero_reg) && (
rt.immediate() != 0) &&
 
  955      c_addi(rd, 
static_cast<int8_t
>(
rt.immediate()));
 
  956    } 
else if (
v8_flags.riscv_c_extension && is_int10(
rt.immediate()) &&
 
  957               (
rt.immediate() != 0) && ((
rt.immediate() & 0xf) == 0) &&
 
  958               (rd.code() == rs.code()) && (rd == sp) &&
 
  961    } 
else if (
v8_flags.riscv_c_extension &&
 
  962               ((rd.code() & 0b11000) == 0b01000) && (rs == sp) &&
 
  963               is_uint10(
rt.immediate()) && (
rt.immediate() != 0) &&
 
  968    } 
else if ((-4096 <= 
rt.immediate() && 
rt.immediate() <= -2049) ||
 
  969               (2048 <= 
rt.immediate() && 
rt.immediate() <= 4094)) {
 
  970      addi(rd, rs, 
rt.immediate() / 2);
 
  971      addi(rd, rd, 
rt.immediate() - (
rt.immediate() / 2));
 
  978      add(rd, rs, scratch);
 
  983void MacroAssembler::Mul32(Register rd, Register rs, 
const Operand& rt) {
 
  985    mulw(rd, rs, 
rt.rm());
 
  990    Li(scratch, 
rt.immediate());
 
  991    mulw(rd, rs, scratch);
 
  995void MacroAssembler::Mulh32(Register rd, Register rs, 
const Operand& rt) {
 
  997    mul(rd, rs, 
rt.rm());
 
 1001    Register scratch = temps.Acquire();
 
 1002    Li(scratch, 
rt.immediate());
 
 1003    mul(rd, rs, scratch);
 
 1008void MacroAssembler::Mulhu32(Register rd, Register rs, 
const Operand& rt,
 
 1009                             Register rsz, Register rtz) {
 
 1014    Li(rtz, 
rt.immediate() << 32);
 
 1016  mulhu(rd, rsz, rtz);
 
 1020void MacroAssembler::Mul64(Register rd, Register rs, 
const Operand& rt) {
 
 1022    mul(rd, rs, 
rt.rm());
 
 1026    Register scratch = temps.Acquire();
 
 1027    Li(scratch, 
rt.immediate());
 
 1028    mul(rd, rs, scratch);
 
 1032void MacroAssembler::Mulh64(Register rd, Register rs, 
const Operand& rt) {
 
 1038    Register scratch = temps.Acquire();
 
 1039    Li(scratch, 
rt.immediate());
 
 1040    mulh(rd, rs, scratch);
 
 1044void MacroAssembler::Mulhu64(Register rd, Register rs, 
const Operand& rt) {
 
 1050    Register scratch = temps.Acquire();
 
 1051    Li(scratch, 
rt.immediate());
 
 1052    mulhu(rd, rs, scratch);
 
 1056void MacroAssembler::Div32(Register res, Register rs, 
const Operand& rt) {
 
 1062    Register scratch = temps.Acquire();
 
 1063    Li(scratch, 
rt.immediate());
 
 1064    divw(res, rs, scratch);
 
 1068void MacroAssembler::Mod32(Register rd, Register rs, 
const Operand& rt) {
 
 1070    remw(rd, rs, 
rt.rm());
 
 1074    Register scratch = temps.Acquire();
 
 1075    Li(scratch, 
rt.immediate());
 
 1076    remw(rd, rs, scratch);
 
 1080void MacroAssembler::Modu32(Register rd, Register rs, 
const Operand& rt) {
 
 1082    remuw(rd, rs, 
rt.rm());
 
 1086    Register scratch = temps.Acquire();
 
 1087    Li(scratch, 
rt.immediate());
 
 1088    remuw(rd, rs, scratch);
 
 1092void MacroAssembler::Div64(Register rd, Register rs, 
const Operand& rt) {
 
 1094    div(rd, rs, 
rt.rm());
 
 1098    Register scratch = temps.Acquire();
 
 1099    Li(scratch, 
rt.immediate());
 
 1100    div(rd, rs, scratch);
 
 1104void MacroAssembler::Divu32(Register res, Register rs, 
const Operand& rt) {
 
 1106    divuw(res, rs, 
rt.rm());
 
 1110    Register scratch = temps.Acquire();
 
 1111    Li(scratch, 
rt.immediate());
 
 1112    divuw(res, rs, scratch);
 
 1116void MacroAssembler::Divu64(Register res, Register rs, 
const Operand& rt) {
 
 1122    Register scratch = temps.Acquire();
 
 1123    Li(scratch, 
rt.immediate());
 
 1124    divu(res, rs, scratch);
 
 1128void MacroAssembler::Mod64(Register rd, Register rs, 
const Operand& rt) {
 
 1130    rem(rd, rs, 
rt.rm());
 
 1134    Register scratch = temps.Acquire();
 
 1135    Li(scratch, 
rt.immediate());
 
 1136    rem(rd, rs, scratch);
 
 1140void MacroAssembler::Modu64(Register rd, Register rs, 
const Operand& rt) {
 
 1146    Register scratch = temps.Acquire();
 
 1147    Li(scratch, 
rt.immediate());
 
 1148    remu(rd, rs, scratch);
 
 1151#elif V8_TARGET_ARCH_RISCV32 
 1152void MacroAssembler::AddWord(Register rd, Register rs, 
const Operand& rt) {
 
 1156void MacroAssembler::Add32(Register rd, Register rs, 
const Operand& rt) {
 
 1158    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1159        (
rt.rm() != zero_reg) && (rs != zero_reg)) {
 
 1162      add(rd, rs, 
rt.rm());
 
 1165    if (
v8_flags.riscv_c_extension && is_int6(
rt.immediate()) &&
 
 1166        (rd.code() == rs.code()) && (rd != zero_reg) && (
rt.immediate() != 0) &&
 
 1168      c_addi(rd, 
static_cast<int8_t
>(
rt.immediate()));
 
 1169    } 
else if (
v8_flags.riscv_c_extension && is_int10(
rt.immediate()) &&
 
 1170               (
rt.immediate() != 0) && ((
rt.immediate() & 0xf) == 0) &&
 
 1171               (rd.code() == rs.code()) && (rd == sp) &&
 
 1174    } 
else if (
v8_flags.riscv_c_extension &&
 
 1175               ((rd.code() & 0b11000) == 0b01000) && (rs == sp) &&
 
 1176               is_uint10(
rt.immediate()) && (
rt.immediate() != 0) &&
 
 1179    } 
else if (is_int12(
rt.immediate()) && !
MustUseReg(
rt.rmode())) {
 
 1181    } 
else if ((-4096 <= 
rt.immediate() && 
rt.immediate() <= -2049) ||
 
 1182               (2048 <= 
rt.immediate() && 
rt.immediate() <= 4094)) {
 
 1183      addi(rd, rs, 
rt.immediate() / 2);
 
 1184      addi(rd, rd, 
rt.immediate() - (
rt.immediate() / 2));
 
 1188      Register scratch = temps.Acquire();
 
 1191      add(rd, rs, scratch);
 
 1196void MacroAssembler::SubWord(Register rd, Register rs, 
const Operand& rt) {
 
 1200void MacroAssembler::Sub32(Register rd, Register rs, 
const Operand& rt) {
 
 1202    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1203        ((rd.code() & 0b11000) == 0b01000) &&
 
 1204        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
 1207      sub(rd, rs, 
rt.rm());
 
 1209  } 
else if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1210             (rd != zero_reg) && is_int6(-
rt.immediate()) &&
 
 1213           static_cast<int8_t
>(
 
 1216  } 
else if (
v8_flags.riscv_c_extension && is_int10(-
rt.immediate()) &&
 
 1217             (
rt.immediate() != 0) && ((
rt.immediate() & 0xf) == 0) &&
 
 1218             (rd.code() == rs.code()) && (rd == sp) &&
 
 1221  } 
else if (is_int12(-
rt.immediate()) && !
MustUseReg(
rt.rmode())) {
 
 1225  } 
else if ((-4096 <= -
rt.immediate() && -
rt.immediate() <= -2049) ||
 
 1226             (2048 <= -
rt.immediate() && -
rt.immediate() <= 4094)) {
 
 1227    addi(rd, rs, -
rt.immediate() / 2);
 
 1228    addi(rd, rd, -
rt.immediate() - (-
rt.immediate() / 2));
 
 1233    if (li_neg_count < li_count && !
MustUseReg(
rt.rmode())) {
 
 1235      DCHECK(
rt.immediate() != std::numeric_limits<int32_t>::min());
 
 1237      Register scratch = temps.Acquire();
 
 1238      li(scratch, Operand(-
rt.immediate()));
 
 1239      add(rd, rs, scratch);
 
 1243      Register scratch = temps.Acquire();
 
 1245      sub(rd, rs, scratch);
 
 1250void MacroAssembler::Mul32(Register rd, Register rs, 
const Operand& rt) {
 
 1256    mul(rd, rs, 
rt.rm());
 
 1260    Register scratch = temps.Acquire();
 
 1261    Li(scratch, 
rt.immediate());
 
 1262    mul(rd, rs, scratch);
 
 1266void MacroAssembler::Mulh(Register rd, Register rs, 
const Operand& rt) {
 
 1272    Register scratch = temps.Acquire();
 
 1273    Li(scratch, 
rt.immediate());
 
 1274    mulh(rd, rs, scratch);
 
 1278void MacroAssembler::Mulhu(Register rd, Register rs, 
const Operand& rt,
 
 1279                           Register rsz, Register rtz) {
 
 1285    Register scratch = temps.Acquire();
 
 1286    Li(scratch, 
rt.immediate());
 
 1287    mulhu(rd, rs, scratch);
 
 1291void MacroAssembler::Div(Register res, Register rs, 
const Operand& rt) {
 
 1293    div(res, rs, 
rt.rm());
 
 1297    Register scratch = temps.Acquire();
 
 1298    Li(scratch, 
rt.immediate());
 
 1299    div(res, rs, scratch);
 
 1303void MacroAssembler::Mod(Register rd, Register rs, 
const Operand& rt) {
 
 1305    rem(rd, rs, 
rt.rm());
 
 1309    Register scratch = temps.Acquire();
 
 1310    Li(scratch, 
rt.immediate());
 
 1311    rem(rd, rs, scratch);
 
 1315void MacroAssembler::Modu(Register rd, Register rs, 
const Operand& rt) {
 
 1321    Register scratch = temps.Acquire();
 
 1322    Li(scratch, 
rt.immediate());
 
 1323    remu(rd, rs, scratch);
 
 1327void MacroAssembler::Divu(Register res, Register rs, 
const Operand& rt) {
 
 1333    Register scratch = temps.Acquire();
 
 1334    Li(scratch, 
rt.immediate());
 
 1335    divu(res, rs, scratch);
 
 1344        ((rd.
code() & 0b11000) == 0b01000) &&
 
 1345        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
 1351    if (
v8_flags.riscv_c_extension && is_int6(
rt.immediate()) &&
 
 1353        ((rd.
code() & 0b11000) == 0b01000)) {
 
 1354      c_andi(rd, 
static_cast<int8_t
>(
rt.immediate()));
 
 1355    } 
else if (is_int12(
rt.immediate()) && !
MustUseReg(
rt.rmode())) {
 
 1356      andi(rd, rs, 
static_cast<int32_t
>(
rt.immediate()));
 
 1361      Li(scratch, 
rt.immediate());
 
 1362      and_(rd, rs, scratch);
 
 
 1370        ((rd.
code() & 0b11000) == 0b01000) &&
 
 1371        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
 1374      or_(rd, rs, 
rt.rm());
 
 1378      ori(rd, rs, 
static_cast<int32_t
>(
rt.immediate()));
 
 1383      Li(scratch, 
rt.immediate());
 
 1384      or_(rd, rs, scratch);
 
 
 1392        ((rd.
code() & 0b11000) == 0b01000) &&
 
 1393        ((
rt.rm().
code() & 0b11000) == 0b01000)) {
 
 1400      xori(rd, rs, 
static_cast<int32_t
>(
rt.immediate()));
 
 1405      Li(scratch, 
rt.immediate());
 
 1406      xor_(rd, rs, scratch);
 
 
 1413    or_(rd, rs, 
rt.rm());
 
 1426void MacroAssembler::Seqz(Register rd, 
const Operand& rt) {
 
 1430    li(rd, 
rt.immediate() == 0);
 
 1434void MacroAssembler::Snez(Register rd, 
const Operand& rt) {
 
 1438    li(rd, 
rt.immediate() != 0);
 
 1442void MacroAssembler::Seq(Register rd, Register rs, 
const Operand& rt) {
 
 1443  if (rs == zero_reg) {
 
 1448    SubWord(rd, rs, 
rt);
 
 1453void MacroAssembler::Sne(Register rd, Register rs, 
const Operand& rt) {
 
 1454  if (rs == zero_reg) {
 
 1459    SubWord(rd, rs, 
rt);
 
 1464void MacroAssembler::Slt(Register rd, Register rs, 
const Operand& rt) {
 
 1466    slt(rd, rs, 
rt.rm());
 
 1473      Register scratch = temps.Acquire();
 
 1475      Li(scratch, 
rt.immediate());
 
 1476      slt(rd, rs, scratch);
 
 1481void MacroAssembler::Sltu(Register rd, Register rs, 
const Operand& rt) {
 
 1490      Register scratch = temps.Acquire();
 
 1492      Li(scratch, 
rt.immediate());
 
 1493      sltu(rd, rs, scratch);
 
 1498void MacroAssembler::Sle(Register rd, Register rs, 
const Operand& rt) {
 
 1500    slt(rd, 
rt.rm(), rs);
 
 1504    Register scratch = temps.Acquire();
 
 1506    Li(scratch, 
rt.immediate());
 
 1507    slt(rd, scratch, rs);
 
 1512void MacroAssembler::Sleu(Register rd, Register rs, 
const Operand& rt) {
 
 1518    Register scratch = temps.Acquire();
 
 1520    Li(scratch, 
rt.immediate());
 
 1521    sltu(rd, scratch, rs);
 
 1526void MacroAssembler::Sge(Register rd, Register rs, 
const Operand& rt) {
 
 1531void MacroAssembler::Sgeu(Register rd, Register rs, 
const Operand& rt) {
 
 1536void MacroAssembler::Sgt(Register rd, Register rs, 
const Operand& rt) {
 
 1538    slt(rd, 
rt.rm(), rs);
 
 1542    Register scratch = temps.Acquire();
 
 1544    Li(scratch, 
rt.immediate());
 
 1545    slt(rd, scratch, rs);
 
 1549void MacroAssembler::Sgtu(Register rd, Register rs, 
const Operand& rt) {
 
 1555    Register scratch = temps.Acquire();
 
 1557    Li(scratch, 
rt.immediate());
 
 1558    sltu(rd, scratch, rs);
 
 1562#if V8_TARGET_ARCH_RISCV64 
 1563void MacroAssembler::Sll32(Register rd, Register rs, 
const Operand& rt) {
 
 1565    sllw(rd, rs, 
rt.rm());
 
 1567    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1568    slliw(rd, rs, shamt);
 
 1572void MacroAssembler::Sra32(Register rd, Register rs, 
const Operand& rt) {
 
 1574    sraw(rd, rs, 
rt.rm());
 
 1576    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1577    sraiw(rd, rs, shamt);
 
 1581void MacroAssembler::Srl32(Register rd, Register rs, 
const Operand& rt) {
 
 1583    srlw(rd, rs, 
rt.rm());
 
 1585    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1586    srliw(rd, rs, shamt);
 
 1590void MacroAssembler::SraWord(Register rd, Register rs, 
const Operand& rt) {
 
 1594void MacroAssembler::Sra64(Register rd, Register rs, 
const Operand& rt) {
 
 1596    sra(rd, rs, 
rt.rm());
 
 1597  } 
else if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1598             ((rd.code() & 0b11000) == 0b01000) && is_int6(
rt.immediate())) {
 
 1599    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1602    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1603    srai(rd, rs, shamt);
 
 1607void MacroAssembler::SrlWord(Register rd, Register rs, 
const Operand& rt) {
 
 1611void MacroAssembler::Srl64(Register rd, Register rs, 
const Operand& rt) {
 
 1613    srl(rd, rs, 
rt.rm());
 
 1614  } 
else if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1615             ((rd.code() & 0b11000) == 0b01000) && is_int6(
rt.immediate())) {
 
 1616    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1619    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1620    srli(rd, rs, shamt);
 
 1624void MacroAssembler::SllWord(Register rd, Register rs, 
const Operand& rt) {
 
 1628void MacroAssembler::Sll64(Register rd, Register rs, 
const Operand& rt) {
 
 1630    sll(rd, rs, 
rt.rm());
 
 1632    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1633    if (
v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
 
 1634        (rd != zero_reg) && (shamt != 0) && is_uint6(shamt)) {
 
 1637      slli(rd, rs, shamt);
 
 1645      rorw(rd, rs, 
rt.rm());
 
 1647      int64_t ror_value = 
rt.immediate() % 32;
 
 1648      if (ror_value < 0) {
 
 1651      roriw(rd, rs, ror_value);
 
 1656  Register scratch = temps.Acquire();
 
 1660    sllw(scratch, rs, scratch);
 
 1661    srlw(rd, rs, 
rt.rm());
 
 1662    or_(rd, scratch, rd);
 
 1665    int64_t ror_value = 
rt.immediate() % 32;
 
 1666    if (ror_value == 0) {
 
 1669    } 
else if (ror_value < 0) {
 
 1672    srliw(scratch, rs, ror_value);
 
 1673    slliw(rd, rs, 32 - ror_value);
 
 1674    or_(rd, scratch, rd);
 
 1679void MacroAssembler::Dror(Register rd, Register rs, 
const Operand& rt) {
 
 1682      ror(rd, rs, 
rt.rm());
 
 1684      int64_t dror_value = 
rt.immediate() % 64;
 
 1685      if (dror_value < 0) {
 
 1688      rori(rd, rs, dror_value);
 
 1693  Register scratch = temps.Acquire();
 
 1697    sll(scratch, rs, scratch);
 
 1698    srl(rd, rs, 
rt.rm());
 
 1699    or_(rd, scratch, rd);
 
 1701    int64_t dror_value = 
rt.immediate() % 64;
 
 1702    if (dror_value == 0) {
 
 1705    } 
else if (dror_value < 0) {
 
 1708    srli(scratch, rs, dror_value);
 
 1709    slli(rd, rs, 64 - dror_value);
 
 1710    or_(rd, scratch, rd);
 
 1713#elif V8_TARGET_ARCH_RISCV32 
 1714void MacroAssembler::SllWord(Register rd, Register rs, 
const Operand& rt) {
 
 1718void MacroAssembler::Sll32(Register rd, Register rs, 
const Operand& rt) {
 
 1720    sll(rd, rs, 
rt.rm());
 
 1722    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1723    slli(rd, rs, shamt);
 
 1727void MacroAssembler::SraWord(Register rd, Register rs, 
const Operand& rt) {
 
 1731void MacroAssembler::Sra32(Register rd, Register rs, 
const Operand& rt) {
 
 1733    sra(rd, rs, 
rt.rm());
 
 1735    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1736    srai(rd, rs, shamt);
 
 1740void MacroAssembler::SrlWord(Register rd, Register rs, 
const Operand& rt) {
 
 1744void MacroAssembler::Srl32(Register rd, Register rs, 
const Operand& rt) {
 
 1746    srl(rd, rs, 
rt.rm());
 
 1748    uint8_t shamt = 
static_cast<uint8_t
>(
rt.immediate());
 
 1749    srli(rd, rs, shamt);
 
 1756      ror(rd, rs, 
rt.rm());
 
 1758      int32_t ror_value = 
rt.immediate() % 32;
 
 1759      if (ror_value < 0) {
 
 1762      rori(rd, rs, ror_value);
 
 1767  Register scratch = temps.Acquire();
 
 1770    neg(scratch, 
rt.rm());
 
 1771    sll(scratch, rs, scratch);
 
 1772    srl(rd, rs, 
rt.rm());
 
 1773    or_(rd, scratch, rd);
 
 1775    int32_t ror_value = 
rt.immediate() % 32;
 
 1776    if (ror_value == 0) {
 
 1779    } 
else if (ror_value < 0) {
 
 1782    srli(scratch, rs, ror_value);
 
 1783    slli(rd, rs, 32 - ror_value);
 
 1784    or_(rd, scratch, rd);
 
 1789void MacroAssembler::Li(Register rd, intptr_t imm) {
 
 1790  if (
v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
 
 1797void MacroAssembler::Mv(Register rd, 
const Operand& rt) {
 
 1798  if (
v8_flags.riscv_c_extension && (rd != zero_reg) && (
rt.rm() != zero_reg)) {
 
 1807  DCHECK(sa >= 1 && sa <= 31);
 
 1827  AddWord(rd, 
rt, tmp);
 
 
 1834template <
int NBYTES>
 
 1838  DCHECK((rs != tmp1) && (rs != tmp2));
 
 1839  DCHECK((rd != tmp1) && (rd != tmp2));
 
 1851  andi(tmp1, rs, ByteMask);
 
 1853  for (
int nbyte = 1; nbyte < NBYTES - 1; nbyte++) {
 
 1855    andi(tmp2, tmp2, ByteMask);
 
 1856    or_(tmp1, tmp1, tmp2);
 
 1860  andi(rd, rd, ByteMask);
 
 
 1864#if V8_TARGET_ARCH_RISCV64 
 1867  DCHECK(operand_size == 4 || operand_size == 8);
 
 1870    if (operand_size == 4) {
 
 1876  temps.Include(t4, t6);
 
 1877  Register x0 = temps.Acquire();
 
 1878  Register x1 = temps.Acquire();
 
 1881  if (operand_size == 4) {
 
 1882    DCHECK((rd != t6) && (rs != t6));
 
 1903    DCHECK((rd != t6) && (rs != t6));
 
 1913      li(x1, 0x0000FFFF0000FFFFl);
 
 1923      li(x1, 0x00FF00FF00FF00FFl);
 
 1934#elif V8_TARGET_ARCH_RISCV32 
 1945  DCHECK((rd != t6) && (rs != t6));
 
 1969template <
int NBYTES, 
bool LOAD_SIGNED>
 
 1972  DCHECK(rd != rs.
rm() && rd != scratch);
 
 1983  slli(rd, rd, 8 * (NBYTES - 1));
 
 1984  for (
int i = (NBYTES - 2); 
i >= 0; 
i--) {
 
 1986    if (
i) 
slli(scratch, scratch, 
i * 8);
 
 1987    or_(rd, rd, scratch);
 
 
 1991template <
int NBYTES, 
bool LOAD_SIGNED>
 
 1996  DCHECK(rs.
rm() != scratch0 && rs.
rm() != scratch1 && scratch0 != scratch1);
 
 2001    lb(scratch0, rs.
rm(), rs.
offset() + (NBYTES - 1));
 
 2003    lbu(scratch0, rs.
rm(), rs.
offset() + (NBYTES - 1));
 
 2007  slli(scratch0, scratch0, 8 * (NBYTES - 1));
 
 2008  for (
int i = (NBYTES - 2); 
i >= 0; 
i--) {
 
 2011      slli(scratch1, scratch1, 
i * 8);
 
 2012      or_(scratch0, scratch0, scratch1);
 
 2015      or_(rs.
rm(), scratch0, scratch1);
 
 
 2020template <
int NBYTES, 
bool IS_SIGNED>
 
 2034    DCHECK(rd != source.rm());
 
 2039    if (rd != rs.
rm()) {
 
 
 2050#if V8_TARGET_ARCH_RISCV64 
 2051template <
int NBYTES>
 
 2054  DCHECK(NBYTES == 4 || NBYTES == 8);
 
 2058  Register scratch_base = temps.Acquire();
 
 2065  temps.Include(t4, t6);
 
 2066  Register scratch = temps.Acquire();
 
 2067  Register scratch_other = temps.Acquire();
 
 2068  DCHECK(scratch != rs.
rm() && scratch_other != scratch &&
 
 2069         scratch_other != rs.
rm());
 
 2074    fmv_d_x(frd, scratch);
 
 2076#elif V8_TARGET_ARCH_RISCV32 
 2077template <
int NBYTES>
 
 2084  Register scratch_base = temps.Acquire();
 
 2087    DCHECK(scratch_base != rs.rm());
 
 2091  temps.Include(t4, t6);
 
 2092  Register scratch = temps.Acquire();
 
 2093  Register scratch_other = temps.Acquire();
 
 2094  DCHECK(scratch != rs.rm() && scratch_other != scratch &&
 
 2095         scratch_other != rs.rm());
 
 2100void MacroAssembler::UnalignedDoubleHelper(FPURegister frd,
 
 2105  Register scratch_base = temps.Acquire();
 
 2108    DCHECK(scratch_base != rs.rm());
 
 2112  temps.Include(t4, t6);
 
 2113  Register scratch = temps.Acquire();
 
 2114  Register scratch_other = temps.Acquire();
 
 2115  DCHECK(scratch != rs.rm() && scratch_other != scratch &&
 
 2116         scratch_other != rs.rm());
 
 2120  source.set_offset(source.offset() + 4);
 
 2128template <
int NBYTES>
 
 2139    DCHECK(scratch_base != rd && scratch_base != rs.
rm());
 
 2145  DCHECK(scratch_other != rd && scratch_other != rs.
rm() &&
 
 2146         scratch_other != source.rm());
 
 2148  sb(rd, source.rm(), source.offset());
 
 2149  for (
size_t i = 1; 
i <= (NBYTES - 1); 
i++) {
 
 2150    srli(scratch_other, rd, 
i * 8);
 
 2151    sb(scratch_other, source.rm(), source.offset() + 
i);
 
 
 2155#if V8_TARGET_ARCH_RISCV64 
 2156template <
int NBYTES>
 
 2159  DCHECK(NBYTES == 8 || NBYTES == 4);
 
 2161  Register scratch = temps.Acquire();
 
 2165    fmv_x_d(scratch, frd);
 
 2169#elif V8_TARGET_ARCH_RISCV32 
 2170template <
int NBYTES>
 
 2175  Register scratch = temps.Acquire();
 
 2179void MacroAssembler::UnalignedDStoreHelper(FPURegister frd,
 
 2182  Register scratch = temps.Acquire();
 
 2189  source.set_offset(source.offset() + 4);
 
 2195template <
typename Reg_T, 
typename Func>
 
 2206  generator(target, source);
 
 
 2209template <
typename Reg_T, 
typename Func>
 
 2218    if (std::is_same<Reg_T, Register>::value) {
 
 2224  generator(value, source);
 
 
 2231#if V8_TARGET_ARCH_RISCV64 
 2255#if V8_TARGET_ARCH_RISCV64 
 2261  Register scratch = temps.Acquire();
 
 2264  slli(scratch, scratch, 32);
 
 2265  AddWord(rd, rd, scratch);
 
 2271  Register scratch = temps.Acquire();
 
 2273  srai(scratch, rd, 32);
 
 2291#if V8_TARGET_ARCH_RISCV64 
 2293#elif V8_TARGET_ARCH_RISCV32 
 2294  UnalignedDoubleHelper(fd, rs);
 
 
 2299#if V8_TARGET_ARCH_RISCV64 
 2301#elif V8_TARGET_ARCH_RISCV32 
 2302  UnalignedDStoreHelper(fd, rs);
 
 
 2309    lb(target, source.rm(), source.offset());
 
 
 2317    lbu(target, source.rm(), source.offset());
 
 
 2325    sb(value, source.rm(), source.offset());
 
 
 2333    lh(target, source.rm(), source.offset());
 
 
 2341    lhu(target, source.rm(), source.offset());
 
 
 2349    sh(value, source.rm(), source.offset());
 
 
 2357    if (
v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
 
 2358        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2359        is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
 
 2360      c_lw(target, source.rm(), source.offset());
 
 2361    } 
else if (
v8_flags.riscv_c_extension && (target != zero_reg) &&
 
 2362               is_uint8(source.offset()) && (source.rm() == sp) &&
 
 2363               ((source.offset() & 0x3) == 0)) {
 
 2364      c_lwsp(target, source.offset());
 
 2366      lw(target, source.rm(), source.offset());
 
 
 2372#if V8_TARGET_ARCH_RISCV64 
 2376    lwu(target, source.rm(), source.offset());
 
 2385    if (
v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
 
 2386        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2387        is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
 
 2388      c_sw(value, source.rm(), source.offset());
 
 2389    } 
else if (
v8_flags.riscv_c_extension && (source.rm() == sp) &&
 
 2390               is_uint8(source.offset()) && (((source.offset() & 0x3) == 0))) {
 
 2391      c_swsp(value, source.offset());
 
 2393      sw(value, source.rm(), source.offset());
 
 
 2399#if V8_TARGET_ARCH_RISCV64 
 2403    if (
v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
 
 2404        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2405        is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2406      c_ld(target, source.rm(), source.offset());
 
 2407    } 
else if (
v8_flags.riscv_c_extension && (target != zero_reg) &&
 
 2408               is_uint9(source.offset()) && (source.rm() == sp) &&
 
 2409               ((source.offset() & 0x7) == 0)) {
 
 2410      c_ldsp(target, source.offset());
 
 2412      ld(target, source.rm(), source.offset());
 
 2421    if (
v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
 
 2422        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2423        is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2424      c_sd(value, source.rm(), source.offset());
 
 2425    } 
else if (
v8_flags.riscv_c_extension && (source.rm() == sp) &&
 
 2426               is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2427      c_sdsp(value, source.offset());
 
 2429      sd(value, source.rm(), source.offset());
 
 2440    flw(target, source.rm(), source.offset());
 
 
 2449    fsw(value, source.rm(), source.offset());
 
 
 2458    if (
v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
 
 2459        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2460        is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2461      c_fld(target, source.rm(), source.offset());
 
 2462    } 
else if (
v8_flags.riscv_c_extension && (source.rm() == sp) &&
 
 2463               is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2464      c_fldsp(target, source.offset());
 
 2466      fld(target, source.rm(), source.offset());
 
 
 2476    if (
v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
 
 2477        ((source.rm().code() & 0b11000) == 0b01000) &&
 
 2478        is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2479      c_fsd(value, source.rm(), source.offset());
 
 2480    } 
else if (
v8_flags.riscv_c_extension && (source.rm() == sp) &&
 
 2481               is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
 
 2482      c_fsdsp(value, source.offset());
 
 2484      fsd(value, source.rm(), source.offset());
 
 
 2491  bool is_one_instruction = rs.
offset() == 0;
 
 2492  if (is_one_instruction) {
 
 2494    lr_w(
false, 
false, rd, rs.
rm());
 
 2498    AddWord(scratch, rs.
rm(), rs.
offset());
 
 2500    lr_w(
false, 
false, rd, scratch);
 
 
 2504#if V8_TARGET_ARCH_RISCV64 
 2506  bool is_one_instruction = rs.
offset() == 0;
 
 2507  if (is_one_instruction) {
 
 2509    lr_d(
false, 
false, rd, rs.
rm());
 
 2512    Register scratch = temps.Acquire();
 
 2513    AddWord(scratch, rs.
rm(), rs.
offset());
 
 2515    lr_d(
false, 
false, rd, scratch);
 
 2521  bool is_one_instruction = rs.
offset() == 0;
 
 2522  if (is_one_instruction) {
 
 2524    sc_w(
false, 
false, rd, rs.
rm(), rd);
 
 2528    AddWord(scratch, rs.
rm(), rs.
offset());
 
 2530    sc_w(
false, 
false, rd, scratch, rd);
 
 
 2534#if V8_TARGET_ARCH_RISCV64 
 2536  bool is_one_instruction = rs.
offset() == 0;
 
 2537  if (is_one_instruction) {
 
 2539    sc_d(
false, 
false, rd, rs.
rm(), rd);
 
 2542    Register scratch = temps.Acquire();
 
 2543    AddWord(scratch, rs.
rm(), rs.
offset());
 
 2545    sc_d(
false, 
false, rd, scratch, rd);
 
 2572    if (
options().isolate_independent_code) {
 
 
 2584  int64_t Hi20 = ((value + 0x800) >> 12);
 
 2586  if (Hi20 == 0 || Lo12 == 0) {
 
 
 2593  if (is_int32(value + 0x800)) {
 
 2596    return RV_li_count(value);
 
 
 2606  Li(rd, 
j.immediate());
 
 
 2615    int reverse_count = RV_li_count(~
j.immediate(), temps.
CanAcquire());
 
 2616    if (
v8_flags.riscv_constant_pool && 
count >= 4 && reverse_count >= 4) {
 
 2618#if V8_TARGET_ARCH_RISCV32 
 2620#elif V8_TARGET_ARCH_RISCV64 
 2629      if ((
count - reverse_count) > 1) {
 
 2630        Li(rd, ~
j.immediate());
 
 2633        Li(rd, 
j.immediate());
 
 2642      DCHECK(is_int32(
j.immediate()) || is_uint32(
j.immediate()));
 
 2644#if V8_TARGET_ARCH_RISCV64 
 2645      li_constant32(rd, int32_t(
j.immediate()));
 
 2646#elif V8_TARGET_ARCH_RISCV32 
 2647      li_constant(rd, int32_t(
j.immediate()));
 
 2653      DCHECK(is_uint32(index));
 
 2655#if V8_TARGET_ARCH_RISCV64 
 2656      li_constant32(rd, 
static_cast<uint32_t
>(index));
 
 2657#elif V8_TARGET_ARCH_RISCV32 
 2658      li_constant(rd, index);
 
 2662      if (
j.IsHeapNumberRequest()) {
 
 2664        immediate = 
j.immediate_for_heap_number_request();
 
 2666        immediate = 
j.immediate();
 
 2668#if V8_TARGET_ARCH_RISCV64 
 2672      if (
RecordEntry(
static_cast<uint64_t
>(index), 
j.rmode()) ==
 
 2673          RelocInfoStatus::kMustRecord) {
 
 2682#elif V8_TARGET_ARCH_RISCV32 
 2684      li_constant(rd, immediate);
 
 2688      immediate = 
j.immediate();
 
 2691    li_ptr(rd, immediate);
 
 2696    li_ptr(rd, 
j.immediate());
 
 2699    li_ptr(rd, 
j.immediate());
 
 
 2705static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11};
 
 2708  int16_t num_to_push = regs.
Count();
 
 2711#define TEST_AND_PUSH_REG(reg)                    \ 
 2712  if (regs.has(reg)) {                            \ 
 2713    stack_offset -= kSystemPointerSize;           \ 
 2714    StoreWord(reg, MemOperand(sp, stack_offset)); \ 
 2718#define T_REGS(V) V(t6) V(t5) V(t4) V(t3) V(t2) V(t1) V(t0) 
 2719#define A_REGS(V) V(a7) V(a6) V(a5) V(a4) V(a3) V(a2) V(a1) V(a0) 
 2721  V(s11) V(s10) V(s9) V(s8) V(s7) V(s6) V(s5) V(s4) V(s3) V(s2) V(s1) 
 2723  SubWord(sp, sp, 
Operand(stack_offset));
 
 2745#undef TEST_AND_PUSH_REG 
 
 2752  int16_t stack_offset = 0;
 
 2754#define TEST_AND_POP_REG(reg)                    \ 
 2755  if (regs.has(reg)) {                           \ 
 2756    LoadWord(reg, MemOperand(sp, stack_offset)); \ 
 2757    stack_offset += kSystemPointerSize;          \ 
 2761#define T_REGS(V) V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) 
 2762#define A_REGS(V) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) 
 2764  V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) V(s10) V(s11) 
 2784  addi(sp, sp, stack_offset);
 
 2786#undef TEST_AND_POP_REG 
 
 2793  int16_t num_to_push = regs.
Count();
 
 2796  SubWord(sp, sp, 
Operand(stack_offset));
 
 2798    if ((regs.
bits() & (1 << 
i)) != 0) {
 
 
 2806  int16_t stack_offset = 0;
 
 2809    if ((regs.
bits() & (1 << 
i)) != 0) {
 
 2814  addi(sp, sp, stack_offset);
 
 
 2817#if V8_TARGET_ARCH_RISCV32 
 2823  Register scratch3 = temps.Acquire();
 
 2826  Add32(scratch1, left_low, right_low);
 
 2828  Sltu(scratch3, scratch1, left_low);
 
 2829  Add32(scratch2, left_high, right_high);
 
 2832  Add32(dst_high, scratch2, scratch3);
 
 2833  Move(dst_low, scratch1);
 
 2836void MacroAssembler::SubPair(Register dst_low, Register dst_high,
 
 2837                             Register left_low, Register left_high,
 
 2838                             Register right_low, Register right_high,
 
 2839                             Register scratch1, Register scratch2) {
 
 2841  Register scratch3 = temps.Acquire();
 
 2845  Sltu(scratch3, left_low, right_low);
 
 2846  Sub32(scratch1, left_low, right_low);
 
 2847  Sub32(scratch2, left_high, right_high);
 
 2850  Sub32(dst_high, scratch2, scratch3);
 
 2851  Move(dst_low, scratch1);
 
 2854void MacroAssembler::AndPair(Register dst_low, Register dst_high,
 
 2855                             Register left_low, Register left_high,
 
 2856                             Register right_low, Register right_high) {
 
 2857  And(dst_low, left_low, right_low);
 
 2858  And(dst_high, left_high, right_high);
 
 2861void MacroAssembler::OrPair(Register dst_low, Register dst_high,
 
 2862                            Register left_low, Register left_high,
 
 2863                            Register right_low, Register right_high) {
 
 2864  Or(dst_low, left_low, right_low);
 
 2865  Or(dst_high, left_high, right_high);
 
 2867void MacroAssembler::XorPair(Register dst_low, Register dst_high,
 
 2868                             Register left_low, Register left_high,
 
 2869                             Register right_low, Register right_high) {
 
 2870  Xor(dst_low, left_low, right_low);
 
 2871  Xor(dst_high, left_high, right_high);
 
 2874void MacroAssembler::MulPair(Register dst_low, Register dst_high,
 
 2875                             Register left_low, Register left_high,
 
 2876                             Register right_low, Register right_high,
 
 2877                             Register scratch1, Register scratch2) {
 
 2879  Register scratch3 = temps.Acquire();
 
 2881  if (dst_low == right_low) {
 
 2882    mv(scratch1, right_low);
 
 2884  Mul(scratch3, left_low, right_high);
 
 2887  mulhu(scratch2, left_low, right_low);
 
 2889  Mul(dst_low, left_low, right_low);
 
 2891  Add32(scratch2, scratch2, scratch3);
 
 2892  if (dst_low != right_low) {
 
 2893    Mul(scratch3, left_high, right_low);
 
 2895    Mul(scratch3, left_high, scratch1);
 
 2897  Add32(dst_high, scratch2, scratch3);
 
 2901                             Register src_low, Register src_high,
 
 2902                             Register shift, Register scratch1,
 
 2903                             Register scratch2) {
 
 2909  if (dst_low == src_low) {
 
 2910    scratch3 = temps.Acquire();
 
 2911    mv(scratch3, src_low);
 
 2915  sll(dst_low, src_low, scratch1);
 
 2917  sll(dst_high, src_high, scratch1);
 
 2924  Sub32(scratch2, scratch2, scratch1);
 
 2925  if (dst_low == src_low) {
 
 2926    srl(scratch1, scratch3, scratch2);
 
 2928    srl(scratch1, src_low, scratch2);
 
 2932  Or(dst_high, dst_high, scratch1);
 
 2938  Branch(&done, 
eq, scratch1, Operand(zero_reg));
 
 2939  Move(dst_high, dst_low);
 
 2940  Move(dst_low, zero_reg);
 
 2946                             Register src_low, Register src_high, int32_t shift,
 
 2947                             Register scratch1, Register scratch2) {
 
 2953    Move(dst_high, src_high);
 
 2954    Move(dst_low, src_low);
 
 2955  } 
else if (
shift == 32) {
 
 2956    Move(dst_high, src_low);
 
 2957    li(dst_low, Operand(0));
 
 2958  } 
else if (
shift > 32) {
 
 2961    li(dst_low, Operand(0));
 
 2966    Or(dst_high, dst_high, scratch1);
 
 2971                             Register src_low, Register src_high,
 
 2972                             Register shift, Register scratch1,
 
 2973                             Register scratch2) {
 
 2979  if (dst_high == src_high) {
 
 2980    scratch3 = temps.Acquire();
 
 2981    mv(scratch3, src_high);
 
 2985  srl(dst_high, src_high, scratch1);
 
 2987  srl(dst_low, src_low, scratch1);
 
 2994  Sub32(scratch2, scratch2, scratch1);
 
 2995  if (dst_high == src_high) {
 
 2996    sll(scratch1, scratch3, scratch2);
 
 2998    sll(scratch1, src_high, scratch2);
 
 3002  Or(dst_low, dst_low, scratch1);
 
 3008  Branch(&done, 
eq, scratch1, Operand(zero_reg));
 
 3009  Move(dst_low, dst_high);
 
 3010  Move(dst_high, zero_reg);
 
 3016                             Register src_low, Register src_high, int32_t shift,
 
 3017                             Register scratch1, Register scratch2) {
 
 3023    mv(dst_low, src_high);
 
 3024    li(dst_high, Operand(0));
 
 3025  } 
else if (
shift > 32) {
 
 3028    li(dst_high, Operand(0));
 
 3029  } 
else if (
shift == 0) {
 
 3030    Move(dst_low, src_low);
 
 3031    Move(dst_high, src_high);
 
 3036    Or(dst_low, dst_low, scratch1);
 
 3041                             Register src_low, Register src_high,
 
 3042                             Register shift, Register scratch1,
 
 3043                             Register scratch2) {
 
 3048  if (dst_high == src_high) {
 
 3049    scratch3 = temps.Acquire();
 
 3050    mv(scratch3, src_high);
 
 3054  sra(dst_high, src_high, scratch1);
 
 3056  srl(dst_low, src_low, scratch1);
 
 3063  Sub32(scratch2, scratch2, scratch1);
 
 3064  if (dst_high == src_high) {
 
 3065    sll(scratch1, scratch3, scratch2);
 
 3067    sll(scratch1, src_high, scratch2);
 
 3070  Or(dst_low, dst_low, scratch1);
 
 3076  Branch(&done, 
eq, scratch1, Operand(zero_reg));
 
 3077  Move(dst_low, dst_high);
 
 3078  Sra32(dst_high, dst_high, 31);
 
 3084                             Register src_low, Register src_high, int32_t shift,
 
 3085                             Register scratch1, Register scratch2) {
 
 3091    mv(dst_low, src_low);
 
 3092    mv(dst_high, src_high);
 
 3093  } 
else if (
shift < 32) {
 
 3097    Or(dst_low, dst_low, scratch1);
 
 3098  } 
else if (
shift == 32) {
 
 3099    srai(dst_high, src_high, 31);
 
 3100    mv(dst_low, src_high);
 
 3102    srai(dst_high, src_high, 31);
 
 3109                                 uint16_t size, 
bool sign_extend) {
 
 3110#if V8_TARGET_ARCH_RISCV64 
 3111  DCHECK(
pos < 64 && 0 < size && size <= 64 && 0 < 
pos + size &&
 
 3119#elif V8_TARGET_ARCH_RISCV32 
 
 3136#if V8_TARGET_ARCH_RISCV64 
 3138#elif V8_TARGET_ARCH_RISCV32 
 
 3173#if V8_TARGET_ARCH_RISCV64 
 3188#if V8_TARGET_ARCH_RISCV64 
 3194template <
typename CvtFunc>
 
 3197                                                 CvtFunc fcvt_generator) {
 
 3206    fcvt_generator(
this, rd, fs);
 
 3215    fcvt_generator(
this, rd, fs);
 
 
 3245#if V8_TARGET_ARCH_RISCV64   
 3251    fcvt_l_d(rd, fs, 
RTZ);
 
 3253    blt(rd, scratch, &bad);
 
 3254    Sub32(scratch, scratch, 1);  
 
 3255    bgt(rd, scratch, &bad);
 
 
 3288#if V8_TARGET_ARCH_RISCV64 
 3292        masm->fcvt_lu_d(dst, src, 
RTZ);
 
 3298      rd, fs, 
result, [](MacroAssembler* masm, Register dst, FPURegister src) {
 
 3299        masm->fcvt_l_d(dst, src, 
RTZ);
 
 3305      rd, fs, 
result, [](MacroAssembler* masm, Register dst, FPURegister src) {
 
 3306        masm->fcvt_lu_s(dst, src, 
RTZ);
 
 3310void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register 
result) {
 
 3312      rd, fs, 
result, [](MacroAssembler* masm, Register dst, FPURegister src) {
 
 3313        masm->fcvt_l_s(dst, src, 
RTZ);
 
 3364#if V8_TARGET_ARCH_RISCV64 
 3365template <
typename F>
 
 3370  Register scratch2 = temps.Acquire();
 
 3372  DCHECK((std::is_same<float, F>::value) || (std::is_same<double, F>::value));
 
 3374  DCHECK(!(dst == src && dst == fpu_scratch));
 
 3386    Register scratch = temps2.Acquire();
 
 3388    if (std::is_same<F, double>::value) {
 
 3389      fmv_x_d(scratch, src);
 
 3400    if (std::is_same<F, double>::value) {
 
 3409    Register scratch = temps2.Acquire();
 
 3417    if (std::is_same<F, double>::value) {
 
 3418      feq_d(scratch, src, src);
 
 3419      bnez(scratch, ¬_NaN);
 
 3422      feq_s(scratch, src, src);
 
 3423      bnez(scratch, ¬_NaN);
 
 3442  FPURegister old_src = src;
 
 3444    DCHECK(fpu_scratch != dst);
 
 3445    Move(fpu_scratch, src);
 
 3446    old_src = fpu_scratch;
 
 3455    Register scratch = temps.Acquire();
 
 3456    if (std::is_same<F, double>::value) {
 
 3457      fcvt_l_d(scratch, src, frm);
 
 3458      fcvt_d_l(dst, scratch, frm);
 
 3471  if (std::is_same<F, double>::value) {
 
 3479#elif V8_TARGET_ARCH_RISCV32 
 3489  Register scratch2 = temps.Acquire();
 
 3492  DCHECK(!(dst == src && dst == fpu_scratch));
 
 3501    Register scratch = temps2.Acquire();
 
 3516    Register scratch = temps2.Acquire();
 
 3524    feq_s(scratch, src, src);
 
 3525    bnez(scratch, ¬_NaN);
 
 3543  FPURegister old_src = src;
 
 3545    DCHECK(fpu_scratch != dst);
 
 3546    Move(fpu_scratch, src);
 
 3547    old_src = fpu_scratch;
 
 3556    Register scratch = temps.Acquire();
 
 3577template <
typename F>
 
 3580                                 bool keep_nan_same) {
 
 3581  VU.
set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
 
 3606  vmv_vx(v_scratch, zero_reg);
 
 3608  vsll_vx(v_scratch, src, scratch);
 
 3610  vsrl_vx(v_scratch, v_scratch, scratch);
 
 3612  vmslt_vx(v0, v_scratch, scratch);
 
 3616    vmv_vv(v_scratch, src);
 
 3626    vfsngj_vv(dst, dst, v_scratch);
 
 3628    vfsngj_vv(dst, dst, src);
 
 3630  if (!keep_nan_same) {
 
 3631    vmfeq_vv(v0, src, src);
 
 3633    if (std::is_same<F, float>::value) {
 
 3636#ifdef V8_TARGET_ARCH_RISCV64 
 3638#elif V8_TARGET_ARCH_RISCV32 
 
 3686#if V8_TARGET_ARCH_RISCV64 
 3693                              FPURegister fpu_scratch) {
 
 3698                               FPURegister fpu_scratch) {
 
 3703                               FPURegister fpu_scratch) {
 
 3710#if V8_TARGET_ARCH_RISCV64 
 3712#elif V8_TARGET_ARCH_RISCV32 
 
 3719#if V8_TARGET_ARCH_RISCV64 
 3721#elif V8_TARGET_ARCH_RISCV32 
 
 3728#if V8_TARGET_ARCH_RISCV64 
 3730#elif V8_TARGET_ARCH_RISCV32 
 
 3737#if V8_TARGET_ARCH_RISCV64 
 3739#elif V8_TARGET_ARCH_RISCV32 
 
 3768      feq_s(rd, cmp1, cmp2);
 
 3771      feq_s(rd, cmp1, cmp2);
 
 3775      flt_s(rd, cmp1, cmp2);
 
 3778      fle_s(rd, cmp2, cmp1);
 
 3781      fle_s(rd, cmp1, cmp2);
 
 3784      flt_s(rd, cmp2, cmp1);
 
 
 3795      feq_d(rd, cmp1, cmp2);
 
 3798      feq_d(rd, cmp1, cmp2);
 
 3802      flt_d(rd, cmp1, cmp2);
 
 3805      fle_d(rd, cmp2, cmp1);
 
 3808      fle_d(rd, cmp1, cmp2);
 
 3811      flt_d(rd, cmp2, cmp1);
 
 
 3824  feq_s(rd, cmp1, cmp1);       
 
 3825  feq_s(scratch, cmp2, cmp2);  
 
 3826  And(rd, rd, scratch);        
 
 
 3835  feq_d(rd, cmp1, cmp1);       
 
 3836  feq_d(scratch, cmp2, cmp2);  
 
 3837  And(rd, rd, scratch);        
 
 
 3887#if V8_TARGET_ARCH_RISCV64 
 3893  DCHECK(src_high != scratch2 && src_high != scratch);
 
 3895  fmv_x_d(scratch, dst);
 
 3896  slli(scratch2, src_high, 32);
 
 3897  slli(scratch, scratch, 32);
 
 3898  srli(scratch, scratch, 32);
 
 3899  or_(scratch, scratch, scratch2);
 
 3900  fmv_d_x(dst, scratch);
 
 3901#elif V8_TARGET_ARCH_RISCV32 
 
 3912#if V8_TARGET_ARCH_RISCV64 
 3918  DCHECK(src_low != scratch && src_low != scratch2);
 
 3919  fmv_x_d(scratch, dst);
 
 3920  slli(scratch2, src_low, 32);
 
 3921  srli(scratch2, scratch2, 32);
 
 3922  srli(scratch, scratch, 32);
 
 3923  slli(scratch, scratch, 32);
 
 3924  or_(scratch, scratch, scratch2);
 
 3925  fmv_d_x(dst, scratch);
 
 3926#elif V8_TARGET_ARCH_RISCV32 
 
 3955        li(scratch, 
Operand(
static_cast<int32_t
>(src)));
 
 
 3971#if V8_TARGET_ARCH_RISCV64 
 3974      fcvt_d_l(dst, zero_reg);
 
 3980        fcvt_d_l(dst, zero_reg);
 
 3983        fmv_d_x(dst, scratch);
 
 3986#elif V8_TARGET_ARCH_RISCV32 
 3998        uint32_t low_32 = src & 0xffffffffull;
 
 3999        uint32_t up_32 = src >> 32;
 
 4001        li(scratch, 
Operand(
static_cast<int32_t
>(low_32)));
 
 4003        li(scratch, 
Operand(
static_cast<int32_t
>(up_32)));
 
 
 4067    neg(scratch, scratch);  
 
 4068    and_(dest, dest, scratch);
 
 
 4082    neg(scratch, scratch);  
 
 4083    and_(dest, dest, scratch);
 
 
 4089#if V8_TARGET_ARCH_RISCV64 
 4106    Label L0, L1, L2, L3, L4;
 
 4115#if V8_TARGET_ARCH_RISCV64 
 4141#elif V8_TARGET_ARCH_RISCV32 
 
 4171#if V8_TARGET_ARCH_RISCV64 
 4188    Label L0, L1, L2, L3, L4, L5;
 
 4192    Register 
y = temps.Acquire();
 
 4193    Register n = temps.Acquire();
 
 4233#if V8_TARGET_ARCH_RISCV64 
 4245      AddWord(scratch, rs, -1);
 
 4246      Xor(rd, scratch, rs);
 
 4247      And(rd, rd, scratch);
 
 4257      Sub32(rd, scratch, rd);
 
 
 4261#if V8_TARGET_ARCH_RISCV64 
 4271      Register scratch = temps.Acquire();
 
 4272      AddWord(scratch, rs, -1);
 
 4273      Xor(rd, scratch, rs);
 
 4274      And(rd, rd, scratch);
 
 4282      Register scratch = temps.Acquire();
 
 4284      SubWord(rd, scratch, rd);
 
 4291#if V8_TARGET_ARCH_RISCV64 
 4318    uint32_t 
shift = 24;
 
 4323    DCHECK((rd != value) && (rs != value));
 
 4324    li(value, 0x01010101);     
 
 4325    li(scratch2, 0x55555555);  
 
 4326    Srl32(scratch, rs, 1);
 
 4327    And(scratch, scratch, scratch2);
 
 4328    Sub32(scratch, rs, scratch);
 
 4329    li(scratch2, 0x33333333);  
 
 4330    slli(rd, scratch2, 4);
 
 4331    or_(scratch2, scratch2, rd);
 
 4332    And(rd, scratch, scratch2);
 
 4333    Srl32(scratch, scratch, 2);
 
 4334    And(scratch, scratch, scratch2);
 
 4335    Add32(scratch, rd, scratch);
 
 4336    Srl32(rd, scratch, 4);
 
 4337    Add32(rd, rd, scratch);
 
 4339    Mul32(scratch2, value, scratch2);  
 
 4340    And(rd, rd, scratch2);
 
 4341    Mul32(rd, rd, value);
 
 4342    Srl32(rd, rd, 
shift);
 
 
 4345#if V8_TARGET_ARCH_RISCV64 
 4357    uint64_t 
shift = 24;
 
 4360    Register scratch2 = temps.Acquire();
 
 4361    Register value = temps.Acquire();
 
 4362    DCHECK((rd != value) && (rs != value));
 
 4363    li(value, 0x1111111111111111l);  
 
 4365    Mul64(scratch2, value, scratch2);  
 
 4366    Srl64(scratch, rs, 1);
 
 4367    And(scratch, scratch, scratch2);
 
 4368    SubWord(scratch, rs, scratch);
 
 4370    Mul64(scratch2, value, scratch2);  
 
 4371    And(rd, scratch, scratch2);
 
 4372    Srl64(scratch, scratch, 2);
 
 4373    And(scratch, scratch, scratch2);
 
 4374    AddWord(scratch, rd, scratch);
 
 4375    Srl64(rd, scratch, 4);
 
 4376    AddWord(rd, rd, scratch);
 
 4378    li(value, 0x0101010101010101l);    
 
 4379    Mul64(scratch2, value, scratch2);  
 
 4380    And(rd, rd, scratch2);
 
 4381    Mul64(rd, rd, value);
 
 4410  fsd(double_input, sp, 0);
 
 4411#if V8_ENABLE_WEBASSEMBLY 
 4412  if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
 
 
 4430#define BRANCH_ARGS_CHECK(cond, rs, rt)                                  \ 
 4431  DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ 
 4432         (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) 
 
 4447  if (
L->is_bound()) {
 
 
 4464  if (
L->is_bound()) {
 
 
 4524    Sll32(scratch0, r1, 0);
 
 4530        Sll32(scratch1, r2.
rm(), 0);
 
 4533        Sll32(scratch1, scratch1, 0);
 
 
 4579  if (!
is_near(
L, bits)) 
return false;
 
 
 4586  if (!
is_near(
L, bits)) 
return false;
 
 
 4599    if (
rt.immediate() == 0) {
 
 4618        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4628        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4639        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4648        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4658        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4667        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4679        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4688        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4698        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4707        if (
rt.is_reg() && rs == 
rt.rm()) {
 
 4715      case Condition::overflow:
 
 4719      case Condition::no_overflow:
 
 
 4751                                 const Operand& 
rt) {
 
 4755void MacroAssembler::BranchAndLink(int32_t 
offset) {
 
 4759void MacroAssembler::BranchAndLink(int32_t 
offset, 
Condition cond, Register rs,
 
 4760                                   const Operand& 
rt) {
 
 4766void MacroAssembler::BranchAndLink(Label* 
L) {
 
 4767  if (
L->is_bound()) {
 
 4782void MacroAssembler::BranchAndLink(Label* 
L, 
Condition cond, Register rs,
 
 4783                                   const Operand& 
rt) {
 
 4784  if (
L->is_bound()) {
 
 4827  if (!
is_near(
L, OffsetSize::kOffset21)) 
return false;
 
 
 4861                                            int constant_index) {
 
 
 4883    if (
options().enable_root_relative_access) {
 
 4910  li(scratch, reference);
 
 
 4944    li(t6, Operand(target, rmode));
 
 4945    Jump(t6, 
al, zero_reg, Operand(zero_reg));
 
 4952                          Register rs, 
const Operand& 
rt) {
 
 4954  Jump(
static_cast<intptr_t
>(target), rmode, cond, rs, 
rt);
 
 4964  if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
 
 4983    Jump(code.address(), rmode, cond);
 
 5041#if V8_TARGET_ARCH_RISCV64 
 5068    Register temp = temps.Acquire();
 
 5081                                     unsigned higher_limit,
 
 5082                                     Label* on_in_range) {
 
 5083  if (lower_limit != 0) {
 
 5086    SubWord(scratch, value, 
Operand(lower_limit));
 
 5088           Operand(higher_limit - lower_limit));
 
 5091           Operand(higher_limit - lower_limit));
 
 
 5126  offset -= 
reinterpret_cast<int64_t
>(
pc);
 
 
 5139    li(t6, Operand(
static_cast<intptr_t
>(target), rmode), 
ADDRESS_LOAD);
 
 5152  if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
 
 5171    Call(code.address(), rmode);
 
 5177#if V8_TARGET_ARCH_RISCV64 
 5179#elif V8_TARGET_ARCH_RISCV32 
 5189           MemOperand(target, IsolateData::builtin_entry_table_offset()));
 
 
 5200  switch (
options().builtin_call_jump_mode) {
 
 5215      if (
options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
 
 5220                        static_cast<int32_t
>(index));
 
 
 5242  switch (
options().builtin_call_jump_mode) {
 
 5257      if (
options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
 
 5262                        static_cast<int32_t
>(index));
 
 
 5292#if V8_TARGET_ARCH_RISCV64 
 5294#elif V8_TARGET_ARCH_RISCV32 
 5297  *
reinterpret_cast<uintptr_t*
>(
pc_) = target;  
 
 5298  pc_ += 
sizeof(uintptr_t);
 
 
 5314  int kNumInstructionsToJump = 5;
 
 5315  if (
v8_flags.riscv_c_extension) kNumInstructionsToJump = 4;
 
 5323       (kNumInstructionsToJump + 1) *
 
 
 5428#ifdef V8_TARGET_ARCH_RISCV32 
 5434  uint64_t frame_alignment_mask = ~(
static_cast<uint64_t
>(frame_alignment) - 1);
 
 5446    int32_t Hi20 = (((int32_t)
offset + 0x800) >> 12);
 
 5447    int32_t Lo12 = (int32_t)
offset << 20 >> 20;
 
 5450    addi(dst, dst, Lo12);
 
 
 5458                            int case_value_base, 
Label** labels,
 
 5461  Label fallthrough, jump_table;
 
 5462  if (case_value_base != 0) {
 
 5463    SubWord(value, value, 
Operand(case_value_base));
 
 5465  Branch(&fallthrough, Condition::Ugreater_equal, value, 
Operand(num_labels));
 
 5478  int instructions_per_label_area =
 
 5484  for (
int i = 0; 
i < num_labels; ++
i) {
 
 
 5500  li(scratch, 
Operand(
static_cast<uint32_t
>(index.ptr())));
 
 
 5511    Mv(scratch, zero_reg);
 
 5517    AddWord(scratch, scratch, 
Operand(1));
 
 5528    AddWord(scratch, scratch, 
Operand(-1));
 
 
 5556  LoadWord(handler, 
MemOperand(handler_address));
 
 
 5630  DCHECK(stack_overflow != 
nullptr || done != 
nullptr);
 
 5634  SubWord(scratch1, sp, scratch1);
 
 5638  if (stack_overflow != 
nullptr) {
 
 5640  } 
else if (done != 
nullptr) {
 
 
 5650  Label regular_invoke;
 
 5657  DCHECK_EQ(expected_parameter_count, a2);
 
 5661  SubWord(expected_parameter_count, expected_parameter_count,
 
 5662          actual_parameter_count);
 
 5663  Branch(®ular_invoke, 
le, expected_parameter_count, 
Operand(zero_reg));
 
 5665  Label stack_overflow;
 
 5670                       temps.
Acquire(), &stack_overflow);
 
 5682    Move(t0, actual_parameter_count);
 
 5693  LoadRoot(t0, RootIndex::kUndefinedValue);
 
 5698    SubWord(expected_parameter_count, expected_parameter_count, 
Operand(1));
 
 5704  bind(&stack_overflow);
 
 5711  bind(®ular_invoke);
 
 
 5716    Register expected_parameter_count_or_dispatch_handle,
 
 5720                     expected_parameter_count_or_dispatch_handle,
 
 5721                     actual_parameter_count));
 
 5728  SmiTag(expected_parameter_count_or_dispatch_handle);
 
 5729  SmiTag(actual_parameter_count);
 
 5730  Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
 
 5742  Pop(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
 
 5744  SmiUntag(expected_parameter_count_or_dispatch_handle);
 
 
 5747#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64) 
 5763                     argument_adaption_mode);
 
 5767    Register function, Register 
new_target, Register actual_parameter_count,
 
 5783    Register function, Register 
new_target, Register actual_parameter_count,
 
 5796  Label debug_hook, continue_after_hook;
 
 5797  Register scratch = s1;
 
 5800       ExternalReference::debug_hook_on_function_call_address(
isolate()));
 
 5804  bind(&continue_after_hook);
 
 5808    LoadRoot(a3, RootIndex::kUndefinedValue);
 
 5812    Register expected_parameter_count = a2;
 
 5813    LoadParameterCountFromJSDispatchTable(expected_parameter_count,
 
 5814                                          dispatch_handle, scratch);
 
 5815    InvokePrologue(expected_parameter_count, actual_parameter_count, type);
 
 5822                                    dispatch_handle, scratch);
 
 5837                          actual_parameter_count);
 
 5838  Branch(&continue_after_hook);
 
 5857                     actual_parameter_count, type);
 
 
 5869  Register expected_parameter_count = a2;
 
 5878    Lhu(expected_parameter_count,
 
 5880                        SharedFunctionInfo::kFormalParameterCountOffset));
 
 5883                     actual_parameter_count, type);
 
 
 5897  Label debug_hook, continue_after_hook;
 
 5901       ExternalReference::debug_hook_on_function_call_address(
isolate()));
 
 5905  bind(&continue_after_hook);
 
 5909    LoadRoot(a3, RootIndex::kUndefinedValue);
 
 5912  InvokePrologue(expected_parameter_count, actual_parameter_count, type);
 
 5917  constexpr int unused_argument_count = 0;
 
 5933                          actual_parameter_count);
 
 5934  Branch(&continue_after_hook);
 
 
 5954  SubWord(range, type_reg, 
Operand(lower_limit));
 
 
 5961  vmseq_vv(v0, lhs, rhs);
 
 
 5970  vmsne_vv(v0, lhs, rhs);
 
 
 5979  vmsle_vv(v0, rhs, lhs);
 
 
 5988  vmsleu_vv(v0, rhs, lhs);
 
 
 5997  vmslt_vv(v0, rhs, lhs);
 
 
 6006  vmsltu_vv(v0, rhs, lhs);
 
 
 6012#if V8_TARGET_ARCH_RISCV64 
 6015  memcpy(vals, imms, 
sizeof(vals));
 
 6023#elif V8_TARGET_ARCH_RISCV32 
 6026  memcpy(vals, imms, 
sizeof(vals));
 
 6050  } 
else if (ts == 16) {
 
 6056  } 
else if (ts == 32) {
 
 6057    Load32U(
kScratchReg2, src, std::forward<Trapper>(trapper));
 
 6062  } 
else if (ts == 64) {
 
 6063#if V8_TARGET_ARCH_RISCV64 
 6064    LoadWord(
kScratchReg2, src, std::forward<Trapper>(trapper));
 
 6069#elif V8_TARGET_ARCH_RISCV32 
 
 6090  } 
else if (sz == 16) {
 
 6096  } 
else if (sz == 32) {
 
 6106#if V8_TARGET_ARCH_RISCV64 
 6109    StoreWord(
kScratchReg, dst, std::forward<Trapper>(trapper));
 
 6110#elif V8_TARGET_ARCH_RISCV32 
 
 6119#if V8_TARGET_ARCH_RISCV64 
 6125  Register scratch = temps.Acquire();
 
 6126  Register scratch2 = temps.Acquire();
 
 6127  if (!right.is_reg()) {
 
 6129    right_reg = scratch;
 
 6131    right_reg = right.rm();
 
 6133  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6134         overflow != scratch2);
 
 6135  DCHECK(overflow != left && overflow != right_reg);
 
 6136  if (dst == left || dst == right_reg) {
 
 6137    add(scratch2, left, right_reg);
 
 6138    xor_(overflow, scratch2, left);
 
 6139    xor_(scratch, scratch2, right_reg);
 
 6140    and_(overflow, overflow, scratch);
 
 6143    add(dst, left, right_reg);
 
 6144    xor_(overflow, dst, left);
 
 6145    xor_(scratch, dst, right_reg);
 
 6146    and_(overflow, overflow, scratch);
 
 6150void MacroAssembler::SubOverflow64(Register dst, Register left,
 
 6151                                   const Operand& right, Register overflow) {
 
 6155  Register scratch = temps.Acquire();
 
 6156  Register scratch2 = temps.Acquire();
 
 6157  if (!right.is_reg()) {
 
 6158    li(scratch, Operand(right));
 
 6159    right_reg = scratch;
 
 6161    right_reg = right.rm();
 
 6164  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6165         overflow != scratch2);
 
 6166  DCHECK(overflow != left && overflow != right_reg);
 
 6168  if (dst == left || dst == right_reg) {
 
 6169    sub(scratch2, left, right_reg);
 
 6170    xor_(overflow, left, scratch2);
 
 6171    xor_(scratch, left, right_reg);
 
 6172    and_(overflow, overflow, scratch);
 
 6175    sub(dst, left, right_reg);
 
 6176    xor_(overflow, left, dst);
 
 6177    xor_(scratch, left, right_reg);
 
 6178    and_(overflow, overflow, scratch);
 
 6183                                   const Operand& right, Register overflow,
 
 6184                                   bool sign_extend_inputs) {
 
 6189  Register scratch = temps.Acquire();
 
 6190  if (!right.is_reg()) {
 
 6191    if (!right.IsHeapNumberRequest()) {
 
 6194      imm = 
static_cast<int32_t>(right.immediate() & 0xFFFFFFFFU);
 
 6195      li(scratch, Operand(imm));
 
 6197      li(scratch, Operand(right));
 
 6199    right_reg = scratch;
 
 6201    right_reg = right.rm();
 
 6205  DCHECK(overflow != left && overflow != right_reg);
 
 6206  if (sign_extend_inputs) {
 
 6207    sext_w(overflow, left);
 
 6208    if (right.is_reg()) {
 
 6209      sext_w(scratch, right_reg);
 
 6218    if (right.is_reg()) {
 
 6223  mul(overflow, rs1, rs2);
 
 6224  sext_w(dst, overflow);
 
 6225  xor_(overflow, overflow, dst);
 
 6229                                   const Operand& right, Register overflow) {
 
 6234  Register scratch = temps.Acquire();
 
 6235  Register scratch2 = temps.Acquire();
 
 6236  if (!right.is_reg()) {
 
 6237    li(scratch, Operand(right));
 
 6238    right_reg = scratch;
 
 6240    right_reg = right.rm();
 
 6243  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6244         overflow != scratch2);
 
 6245  DCHECK(overflow != left && overflow != right_reg);
 
 6249  mulh(scratch2, left, right_reg);
 
 6251  mul(dst, left, right_reg);
 
 6253  srai(overflow, dst, 63);
 
 6256  xor_(overflow, overflow, scratch2);
 
 6259#elif V8_TARGET_ARCH_RISCV32 
 6260void MacroAssembler::AddOverflow(Register dst, Register left,
 
 6261                                 const Operand& right, Register overflow) {
 
 6265  Register scratch = temps.Acquire();
 
 6266  Register scratch2 = temps.Acquire();
 
 6267  if (!right.is_reg()) {
 
 6268    li(scratch, Operand(right));
 
 6269    right_reg = scratch;
 
 6271    right_reg = right.rm();
 
 6273  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6274         overflow != scratch2);
 
 6275  DCHECK(overflow != left && overflow != right_reg);
 
 6276  if (dst == left || dst == right_reg) {
 
 6277    add(scratch2, left, right_reg);
 
 6278    xor_(overflow, scratch2, left);
 
 6279    xor_(scratch, scratch2, right_reg);
 
 6280    and_(overflow, overflow, scratch);
 
 6283    add(dst, left, right_reg);
 
 6284    xor_(overflow, dst, left);
 
 6285    xor_(scratch, dst, right_reg);
 
 6286    and_(overflow, overflow, scratch);
 
 6290void MacroAssembler::SubOverflow(Register dst, Register left,
 
 6291                                 const Operand& right, Register overflow) {
 
 6295  Register scratch = temps.Acquire();
 
 6296  Register scratch2 = temps.Acquire();
 
 6297  if (!right.is_reg()) {
 
 6298    li(scratch, Operand(right));
 
 6299    right_reg = scratch;
 
 6301    right_reg = right.rm();
 
 6304  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6305         overflow != scratch2);
 
 6306  DCHECK(overflow != left && overflow != right_reg);
 
 6308  if (dst == left || dst == right_reg) {
 
 6309    sub(scratch2, left, right_reg);
 
 6310    xor_(overflow, left, scratch2);
 
 6311    xor_(scratch, left, right_reg);
 
 6312    and_(overflow, overflow, scratch);
 
 6315    sub(dst, left, right_reg);
 
 6316    xor_(overflow, left, dst);
 
 6317    xor_(scratch, left, right_reg);
 
 6318    and_(overflow, overflow, scratch);
 
 6323                                   const Operand& right, Register overflow,
 
 6324                                   bool sign_extend_inputs) {
 
 6329  Register scratch = temps.Acquire();
 
 6330  Register scratch2 = temps.Acquire();
 
 6331  if (!right.is_reg()) {
 
 6332    li(scratch, Operand(right));
 
 6333    right_reg = scratch;
 
 6335    right_reg = right.rm();
 
 6338  DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
 
 6339         overflow != scratch2);
 
 6340  DCHECK(overflow != left && overflow != right_reg);
 
 6341  mulh(overflow, left, right_reg);
 
 6342  mul(dst, left, right_reg);
 
 6343  srai(scratch2, dst, 31);
 
 6344  xor_(overflow, overflow, scratch2);
 
 6349                                 int num_arguments) {
 
 6364  bool switch_to_central = 
options().is_wasm;
 
 
 6372  if (function->nargs >= 0) {
 
 
 6379                                             bool builtin_exit_frame) {
 
 
 6386                                   Label* target_if_cleared) {
 
 
 6404    Add32(scratch1, scratch1, 
Operand(value));
 
 
 6420    Sub32(scratch1, scratch1, 
Operand(value));
 
 
 6452  Branch(&ok, 
kEqual, map_tmp, RootIndex::kHeapNumberMap);
 
 6456  Branch(&ok, 
kEqual, 
object, RootIndex::kUndefinedValue);
 
 6464  Abort(abort_reason);
 
 
 6468#ifdef V8_ENABLE_DEBUG_CODE 
 6471  if (!
v8_flags.slow_debug_code) 
return;
 
 6473  Assert(Condition::ule, AbortReason::k32BitValueInRegisterIsNotZeroExtended,
 
 6478  if (!
v8_flags.slow_debug_code) 
return;
 
 6480  Assert(Condition::le, AbortReason::k32BitValueInRegisterIsNotSignExtended,
 
 6481         int32_register, Operand(
kMaxInt));
 
 6482  Assert(Condition::ge, AbortReason::k32BitValueInRegisterIsNotSignExtended,
 
 6483         int32_register, Operand(
kMinInt));
 
 6487                                 Register value, Register scratch,
 
 6488                                 unsigned lower_limit, 
unsigned higher_limit) {
 
 6491  BranchRange(&ok, cond, value, scratch, lower_limit, higher_limit);
 
 6529    li(a0, 
Operand(
static_cast<int>(reason)));
 
 6530    li(a1, ExternalReference::abort_with_reason());
 
 6564    static const int kExpectedAbortInstructions = 10;
 
 6566    DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
 
 6567    while (abort_instructions++ < kExpectedAbortInstructions) {
 
 
 6586    if (map == temp_type_reg) {
 
 6589      temp_type_reg = temps.
Acquire();
 
 6592    Assert(
eq, AbortReason::kUnexpectedValue, temp_type_reg, 
Operand(MAP_TYPE));
 
 
 6613               dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
 
 
 6624  Label fallthrough, clear_slot;
 
 6629  LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
 
 6641    Load32U(scratch, 
FieldMemOperand(scratch_and_result, Code::kFlagsOffset));
 
 6644    if (min_opt_level == CodeKind::TURBOFAN_JS) {
 
 6649      Load32U(scratch, 
FieldMemOperand(scratch_and_result, Code::kFlagsOffset));
 
 6655      DCHECK_EQ(min_opt_level, CodeKind::MAGLEV);
 
 6668  Move(scratch_and_result, zero_reg);
 
 
 6692#if V8_ENABLE_WEBASSEMBLY 
 6693  if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP) {
 
 
 6709  DCHECK(frame_type == StackFrame::EXIT ||
 
 6710         frame_type == StackFrame::BUILTIN_EXIT ||
 
 6711         frame_type == StackFrame::API_ACCESSOR_EXIT ||
 
 6712         frame_type == StackFrame::API_CALLBACK_EXIT);
 
 6747  ER c_entry_fp_address =
 
 6748      ER::Create(IsolateAddressId::kCEntryFPAddress, 
isolate());
 
 6750  ER context_address = ER::Create(IsolateAddressId::kContextAddress, 
isolate());
 
 6760  if (frame_alignment > 0) {
 
 
 6777  ER context_address = ER::Create(IsolateAddressId::kContextAddress, 
isolate());
 
 6786  ER c_entry_fp_address =
 
 6787      ER::Create(IsolateAddressId::kCEntryFPAddress, 
isolate());
 
 
 6799#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64 
 6810  return v8_flags.sim_stack_alignment;
 
 
 6818    const int frame_alignment_mask = frame_alignment - 1;
 
 6821      Label alignment_as_expected;
 
 6826        andi(scratch, sp, frame_alignment_mask);
 
 6831      bind(&alignment_as_expected);
 
 
 6853  if (
v8_flags.enable_slow_asserts) {
 
 6872  Register scratch = temps.Acquire();
 
 6874  Branch(smi_label, 
eq, scratch, Operand(zero_reg), distance);
 
 6892  Register scratch = temps.Acquire();
 
 6908    if (std::optional<RootIndex> expected =
 
 
 6924  CHECK(
cc == Condition::kUnsignedLessThan ||
 
 6925        cc == Condition::kUnsignedGreaterThanEqual);
 
 6929    LoadMap(scratch, heap_object);
 
 6931    Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
 
 6932           Operand(LAST_JS_RECEIVER_TYPE - FIRST_JS_RECEIVER_TYPE));
 
 6934    LoadMap(scratch, heap_object);
 
 6937    Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
 
 6938           Operand(LAST_PRIMITIVE_HEAP_OBJECT_TYPE -
 
 6939                   FIRST_PRIMITIVE_HEAP_OBJECT_TYPE));
 
 6941    Abort(AbortReason::kInvalidReceiver);
 
 6952    static_assert(LAST_JS_RECEIVER_TYPE == 
LAST_TYPE);
 
 
 6988    Check(
ne, AbortReason::kOperandIsASmiAndNotAConstructor, scratch,
 
 6993    And(scratch, scratch, 
Operand(Map::Bits1::IsConstructorBit::kMask));
 
 6994    Check(
ne, AbortReason::kOperandIsNotAConstructor, scratch,
 
 
 7007    Check(
ne, AbortReason::kOperandIsASmiAndNotAFunction, scratch,
 
 7014          Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
 
 
 7023  AssertNotSmi(
object, AbortReason::kOperandIsASmiAndNotAFunction);
 
 
 7043    Check(
ne, AbortReason::kOperandIsASmiAndNotABoundFunction, scratch,
 
 7046    Check(
eq, AbortReason::kOperandIsNotABoundFunction, scratch,
 
 7047          Operand(JS_BOUND_FUNCTION_TYPE));
 
 
 7051#ifdef V8_ENABLE_DEBUG_CODE 
 7054#if V8_TARGET_ARCH_RISCV64 
 7059  Push(
object, zero_reg);
 
 7063    Register scratch = temps.Acquire();
 
 7068  Srl64(
object, 
object, Operand(32));
 
 7069  Sll64(
object, 
object, Operand(32));
 
 7073  Check(
kEqual, AbortReason::kObjectNotTagged, 
object,
 
 7076  Pop(
object, zero_reg);
 
 7089  Check(
ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, scratch,
 
 7096      Uless_equal, AbortReason::kOperandIsNotAGeneratorObject, scratch,
 
 7097      Operand(LAST_JS_GENERATOR_OBJECT_TYPE - FIRST_JS_GENERATOR_OBJECT_TYPE));
 
 
 7104    Label done_checking;
 
 7106    LoadRoot(scratch, RootIndex::kUndefinedValue);
 
 7109    Assert(
eq, AbortReason::kExpectedUndefinedOrCell, scratch,
 
 7110           Operand(ALLOCATION_SITE_TYPE));
 
 7111    bind(&done_checking);
 
 
 7115template <
typename F_TYPE>
 
 7118  DCHECK((std::is_same<F_TYPE, float>::value) ||
 
 7119         (std::is_same<F_TYPE, double>::value));
 
 7121  if (src1 == src2 && dst != src1) {
 
 7122    if (std::is_same<float, F_TYPE>::value) {
 
 7138  if (std::is_same<float, F_TYPE>::value) {
 
 7146    if (std::is_same<float, F_TYPE>::value) {
 
 7152    if (std::is_same<float, F_TYPE>::value) {
 
 7162  if (std::is_same<float, F_TYPE>::value) {
 
 
 7196                                               int num_fp_arguments) {
 
 7197  int stack_passed_dwords = 0;
 
 7208  return stack_passed_dwords;
 
 
 7212                                          int num_double_arguments,
 
 7220  int stack_passed_arguments =
 
 
 7242                                  int num_reg_arguments,
 
 7243                                  int num_double_arguments,
 
 7245                                  Label* return_location) {
 
 7249                             set_isolate_data_slots, return_location);
 
 
 7253                                  int num_double_arguments,
 
 7255                                  Label* return_location) {
 
 7257                             set_isolate_data_slots, return_location);
 
 
 7262                                  Label* return_location) {
 
 7263  return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
 
 
 7269                                  Label* return_location) {
 
 7270  return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
 
 
 7275    Register function, 
int num_reg_arguments, 
int num_double_arguments,
 
 7286#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64 
 7289    int frame_alignment_mask = frame_alignment - 1;
 
 7292      Label alignment_as_expected;
 
 7296        And(scratch, sp, 
Operand(frame_alignment_mask));
 
 7302      bind(&alignment_as_expected);
 
 7313      if (function != t6) {
 
 7328      StoreWord(pc_scratch,
 
 7338  if (return_location) 
bind(return_location);
 
 7347  int stack_passed_arguments =
 
 7355  return call_pc_offset;
 
 
 7358#undef BRANCH_ARGS_CHECK 
 7361                                   Label* condition_met) {
 
 
 7375  RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
 
 7381    if (regs.
has(candidate)) 
continue;
 
 
 7392    SubWord(dst, dst, 
pc);
 
 
 7414#ifdef V8_ENABLE_LEAPTIERING 
 7416    Label not_deoptimized;
 
 7419    Abort(AbortReason::kInvalidDeoptimizedCode);
 
 7420    bind(¬_deoptimized);
 
 
 7446#ifdef V8_ENABLE_SANDBOX 
 7447  LoadCodeEntrypointViaCodePointer(
 
 
 7459#ifdef V8_ENABLE_SANDBOX 
 
 7481#ifdef V8_TARGET_ARCH_RISCV64 
 7483                                    [[maybe_unused]] uint16_t argument_count) {
 
 7486#ifdef V8_ENABLE_LEAPTIERING 
 7490  Register scratch = temps.Acquire();
 
 7493  LoadEntrypointAndParameterCountFromJSDispatchTable(code, 
parameter_count,
 
 7494                                                     dispatch_handle, scratch);
 
 7499#elif V8_ENABLE_SANDBOX 
 7503  LoadCodeEntrypointViaCodePointer(
 
 7515                                    uint16_t argument_count) {
 
 7517#if V8_ENABLE_LEAPTIERING 
 7523  LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
 
 
 7533#if V8_ENABLE_LEAPTIERING 
 7535                                         uint16_t argument_count) {
 
 7546  static_assert(!JSDispatchTable::kSupportsCompaction);
 
 7547  LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
 
 7560#ifdef V8_ENABLE_LEAPTIERING 
 7566  LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
 
 
 7575#ifdef V8_ENABLE_WEBASSEMBLY 
 7576void MacroAssembler::ResolveWasmCodePointer(
Register target,
 
 7577                                            uint64_t signature_hash) {
 
 7580      ExternalReference::wasm_code_pointer_table();
 
 7582  Register scratch = temps.Acquire();
 
 7583  li(scratch, global_jump_table);
 
 7585#ifdef V8_ENABLE_SANDBOX 
 7590      MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash));
 
 7597  SbxCheck(Condition::kEqual, AbortReason::kWasmSignatureMismatch, scratch,
 
 7606void MacroAssembler::CallWasmCodePointer(Register target,
 
 7607                                         uint64_t signature_hash,
 
 7609  ResolveWasmCodePointer(target, signature_hash);
 
 7617void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
 
 7618  ExternalReference global_jump_table =
 
 7619      ExternalReference::wasm_code_pointer_table();
 
 7621  Register scratch = temps.Acquire();
 
 7622  li(scratch, global_jump_table);
 
 7623  constexpr unsigned int kEntrySizeLog2 =
 
 7624      std::bit_width(
sizeof(wasm::WasmCodePointerTableEntry)) - 1;
 
 7630void MacroAssembler::LoadWasmCodePointer(Register dst, 
MemOperand src) {
 
 7631  static_assert(
sizeof(WasmCodePointer) == 4);
 
 7636#ifdef V8_ENABLE_LEAPTIERING 
 7637void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register 
destination,
 
 7638                                                       Register dispatch_handle,
 
 7643  li(scratch, ExternalReference::js_dispatch_table_address());
 
 7644#ifdef V8_TARGET_ARCH_RISCV32 
 7645  static_assert(kJSDispatchHandleShift == 0);
 
 7648  srli(index, dispatch_handle, kJSDispatchHandleShift);
 
 7651  AddWord(scratch, scratch, index);
 
 7653           MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
 
 7656void MacroAssembler::LoadEntrypointFromJSDispatchTable(
 
 7660  li(scratch, ExternalReference::js_dispatch_table_address());
 
 7667  static_assert(!JSDispatchTable::kSupportsCompaction);
 
 7668  int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
 
 7669               JSDispatchEntry::kEntrypointOffset;
 
 7673#ifdef V8_TARGET_ARCH_RISCV64 
 7674void MacroAssembler::LoadParameterCountFromJSDispatchTable(
 
 7675    Register 
destination, Register dispatch_handle, Register scratch) {
 
 7679  srli(index, dispatch_handle, kJSDispatchHandleShift);
 
 7681  li(scratch, ExternalReference::js_dispatch_table_address());
 
 7682  AddWord(scratch, scratch, index);
 
 7683  static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
 
 7687void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
 
 7688    Register entrypoint, Register 
parameter_count, Register dispatch_handle,
 
 7693  li(scratch, ExternalReference::js_dispatch_table_address());
 
 7694  srli(index, dispatch_handle, kJSDispatchHandleShift);
 
 7696  AddWord(scratch, scratch, index);
 
 7697  LoadWord(entrypoint, 
MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
 
 7698  static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
 
 7704#if V8_TARGET_ARCH_RISCV64 
 7710                     std::forward<Trapper>(trapper));
 
 7712    Ld(
destination, field_operand, std::forward<Trapper>(trapper));
 
 7743    Sw(value, dst_field_operand, std::forward<Trapper>(trapper));
 
 7745    Sd(value, dst_field_operand, std::forward<Trapper>(trapper));
 
 7752  Register scratch = temps.Acquire();
 
 7753  AddWord(scratch, dst.rm(), dst.offset());
 
 7756    amoswap_w(
true, 
true, zero_reg, src, scratch);
 
 7758    amoswap_d(
true, 
true, zero_reg, src, scratch);
 
 7783                                      const Register& source) {
 
 7797#ifdef V8_ENABLE_SANDBOX 
 7801  Register scratch = temps.Acquire();
 
 7802  Lw(
destination, field_operand, std::forward<Trapper>(trapper));
 
 7815  Lwu(dst, src, std::forward<Trapper>(trapper));
 
 7827  Lwu(dst, src, std::forward<Trapper>(trapper));
 
 7853                              int slots_to_drop_on_return,
 
 7861      ER::handle_scope_next_address(isolate), 
no_reg);
 
 7863      ER::handle_scope_limit_address(isolate), 
no_reg);
 
 7865      ER::handle_scope_level_address(isolate), 
no_reg);
 
 7883                     scratch, scratch2, prev_next_address_reg, prev_limit_reg));
 
 7888                     scratch, scratch2, prev_next_address_reg, prev_limit_reg));
 
 7890                     scratch, scratch2, prev_next_address_reg, prev_limit_reg));
 
 7893                            "Allocate HandleScope in callee-save registers.");
 
 7894    __ LoadWord(prev_next_address_reg, next_mem_op);
 
 7895    __ LoadWord(prev_limit_reg, limit_mem_op);
 
 7896    __ Lw(prev_level_reg, level_mem_op);
 
 7897    __ Add32(scratch, prev_level_reg, 
Operand(1));
 
 7898    __ Sw(scratch, level_mem_op);
 
 7901  Label profiler_or_side_effects_check_enabled, done_api_call;
 
 7902  if (with_profiling) {
 
 7903    __ RecordComment(
"Check if profiler or side effects check is enabled");
 
 7906    __ Branch(&profiler_or_side_effects_check_enabled, 
ne, scratch,
 
 7908#ifdef V8_RUNTIME_CALL_STATS 
 7910    __ li(scratch, ER::address_of_runtime_stats_flag());
 
 7912    __ Branch(&profiler_or_side_effects_check_enabled, 
ne, scratch,
 
 7921  Label propagate_exception;
 
 7922  Label delete_allocated_handles;
 
 7923  Label leave_exit_frame;
 
 7926  __ LoadWord(return_value, return_value_operand);
 
 7931        "No more valid handles (the result handle was the last one)." 
 7932        "Restore previous handle scope.");
 
 7933    __ StoreWord(prev_next_address_reg, next_mem_op);
 
 7935      __ Lw(scratch, level_mem_op);
 
 7937      __ Check(
eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, scratch,
 
 7940    __ Sw(prev_level_reg, level_mem_op);
 
 7941    __ LoadWord(scratch, limit_mem_op);
 
 7945  __ bind(&leave_exit_frame);
 
 7947  Register argc_reg = prev_limit_reg;
 
 7948  if (argc_operand != 
nullptr) {
 
 7950    __ LoadWord(argc_reg, *argc_operand);
 
 7957                            "Check if the function scheduled an exception.");
 
 7958    __ LoadRoot(scratch, RootIndex::kTheHoleValue);
 
 7960                              ER::exception_address(isolate), 
no_reg));
 
 7965                 AbortReason::kAPICallReturnedInvalidObject);
 
 7967  if (argc_operand == 
nullptr) {
 
 7972    if (slots_to_drop_on_return != 0) {
 
 7980  if (with_profiling) {
 
 7982    __ bind(&profiler_or_side_effects_check_enabled);
 
 7986          IsolateFieldId::kApiCallbackThunkArgument);
 
 7987      __ StoreWord(thunk_arg, thunk_arg_mem_op);
 
 7989    __ li(scratch, thunk_ref);
 
 7995  __ bind(&propagate_exception);
 
 8000        masm, 
"HandleScope limit has changed. Delete allocated extensions.");
 
 8001    __ bind(&delete_allocated_handles);
 
 8002    __ StoreWord(prev_limit_reg, limit_mem_op);
 
 8004    Register saved_result = prev_limit_reg;
 
 8005    __ Move(saved_result, a0);
 
 
 8028  LoadRoot(dst, RootIndex::kUndefinedValue);
 
 
 8035                                 Register scratch, 
unsigned lower_limit,
 
 8036                                 unsigned higher_limit,
 
 8040  if (lower_limit != 0) {
 
 8041    SubWord(scratch, value, 
Operand(lower_limit));
 
 8042    Branch(
L, cond, scratch, 
Operand(higher_limit - lower_limit), distance);
 
 8044    Branch(
L, cond, scratch, 
Operand(higher_limit - lower_limit), distance);
 
 
#define DEBUG_PRINTF(...)
#define Assert(condition)
static int ActivationFrameAlignment()
constexpr UnderlyingType & value() &
void RequestHeapNumber(HeapNumberRequest request)
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
size_t EmbeddedObjectIndex
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
const AssemblerOptions & options() const
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void lr_w(bool aq, bool rl, Register rd, Register rs1)
void sh2add(Register rd, Register rs1, Register rs2)
void cpop(Register rd, Register rs)
void sh3add(Register rd, Register rs1, Register rs2)
void rev8(Register rd, Register rs)
void ctz(Register rd, Register rs)
void sh1add(Register rd, Register rs1, Register rs2)
void rori(Register rd, Register rs1, uint8_t shamt)
void c_fsdsp(FPURegister rs2, uint16_t uimm9)
void c_li(Register rd, int8_t imm6)
void c_srli(Register rs1, int8_t shamt6)
void c_addi4spn(Register rd, int16_t uimm10)
void c_add(Register rd, Register rs2)
void c_sub(Register rd, Register rs2)
void c_xor(Register rd, Register rs2)
void c_mv(Register rd, Register rs2)
void c_fld(FPURegister rd, Register rs1, uint16_t uimm8)
void c_or(Register rd, Register rs2)
void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8)
void c_lw(Register rd, Register rs1, uint16_t uimm7)
void c_and(Register rd, Register rs2)
void c_swsp(Register rs2, uint16_t uimm8)
void c_slli(Register rd, uint8_t shamt6)
void c_andi(Register rs1, int8_t imm6)
void c_lwsp(Register rd, uint16_t uimm8)
void c_srai(Register rs1, int8_t shamt6)
void c_addi16sp(int16_t imm10)
void c_fldsp(FPURegister rd, uint16_t uimm9)
void c_sw(Register rs2, Register rs1, uint16_t uimm7)
void c_addi(Register rd, int8_t imm6)
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void flt_d(Register rd, FPURegister rs1, FPURegister rs2)
void feq_d(Register rd, FPURegister rs1, FPURegister rs2)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fle_d(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2)
void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fsd(FPURegister source, Register base, int16_t imm12)
void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fmv_d(FPURegister rd, FPURegister rs)
void fmv_w_x(FPURegister rd, Register rs1)
void feq_s(Register rd, FPURegister rs1, FPURegister rs2)
void fsw(FPURegister source, Register base, int16_t imm12)
void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void flt_s(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2)
void fmv_x_w(Register rd, FPURegister rs1)
void fle_s(Register rd, FPURegister rs1, FPURegister rs2)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fmv_s(FPURegister rd, FPURegister rs)
void flw(FPURegister rd, Register rs1, int16_t imm12)
void mv(Register rd, Register rs)
void snez(Register rd, Register rs)
void seqz(Register rd, Register rs)
void srai(Register rd, Register rs1, uint8_t shamt)
void srli(Register rd, Register rs1, uint8_t shamt)
void bleu(Register rs1, Register rs2, int16_t imm13)
void bgtu(Register rs1, Register rs2, int16_t imm13)
void slli(Register rd, Register rs1, uint8_t shamt)
void mulhu(Register rd, Register rs1, Register rs2)
void mulh(Register rd, Register rs1, Register rs2)
void rem(Register rd, Register rs1, Register rs2)
void remu(Register rd, Register rs1, Register rs2)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vnot_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void czero_nez(Register rd, Register rs1, Register rs2)
void czero_eqz(Register rd, Register rs1, Register rs2)
void frflags(Register rd)
void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5)
void set(Register rd, VSew sew, Vlmul lmul)
void ld(Register rd, const MemOperand &rs)
void sd(Register rd, const MemOperand &rs)
void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
bool is_near(Label *L, OffsetSize bits)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void break_(uint32_t code, bool break_as_stop=false)
void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk)
void divw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void bgeu(Register rj, Register rd, int32_t offset)
bool is_trampoline_emitted() const
void beqz(Register rj, int32_t offset)
void fneg_s(FPURegister fd, FPURegister fj)
void lb(Register rd, const MemOperand &rs)
void jalr(Register rs, Register rd=ra)
void sltiu(Register rd, Register rs, int32_t j)
void bne(Register rj, Register rd, int32_t offset)
friend class BlockTrampolinePoolScope
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
bool is_trampoline_pool_blocked() const
void slti(Register rd, Register rj, int32_t si12)
void auipc(Register rs, int16_t imm16)
void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk)
int InstructionsGeneratedSince(Label *label)
void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void blt(Register rj, Register rd, int32_t offset)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void neg(const Register &rd, const Operand &operand)
Simd128Register Simd128Register ra
uint64_t jump_address(Label *L)
void lw(Register rd, const MemOperand &rs)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode)
static constexpr int kJumpOffsetBits
void EmitConstPoolWithJumpIfNeeded(size_t margin=0)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
bool MustUseReg(RelocInfo::Mode rmode)
void bltu(Register rj, Register rd, int32_t offset)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void clz(Register dst, Register src, Condition cond=al)
void or_(Register dst, int32_t imm32)
void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sb(Register rd, const MemOperand &rs)
void xor_(Register dst, int32_t imm32)
void andi(Register rd, Register rj, int32_t ui12)
void lh(Register rd, const MemOperand &rs)
void sc_d(Register rd, Register rj, int32_t si14)
void BlockTrampolinePoolFor(int instructions)
void sltu(Register rd, Register rj, Register rk)
void sc_w(Register rd, Register rj, int32_t si14)
void lwu(Register rd, const MemOperand &rs)
void srl(Register rd, Register rt, uint16_t sa)
void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void fneg_d(FPURegister fd, FPURegister fj)
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void not_(const VRegister &vd, const VRegister &vn)
void bge(Register rj, Register rd, int32_t offset)
void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
void ori(Register rd, Register rj, int32_t ui12)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
uint64_t branch_long_offset(Label *L)
void AdjustBaseAndOffset(MemOperand *src)
void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk)
void slt(Register rd, Register rj, Register rk)
void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk)
void sh(Register rd, const MemOperand &rs)
bool NeedAdjustBaseAndOffset(const MemOperand &src, OffsetAccessType=OffsetAccessType::SINGLE_ACCESS, int second_Access_add_to_offset=4)
void ForceConstantPoolEmissionWithoutJump()
void bgt(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void bnez(Register rj, int32_t offset)
void divu(Register rs, Register rt)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void CheckTrampolinePoolQuick(int extra_instructions=0)
int SizeOfCodeGeneratedSince(Label *label)
void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
void beq(Register rj, Register rd, int32_t offset)
void sra(Register rt, Register rd, uint16_t sa)
void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
static const int kMarkedForDeoptimizationBit
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static const int kInvalidContext
static V8_INLINE constexpr int SlotOffset(int index)
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kSPOffset
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
int32_t offset_from_root_register() const
bool IsIsolateFieldId() const
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Register IndirectPointerTagRegister()
static constexpr Register ObjectRegister()
static constexpr Register SlotAddressRegister()
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static constexpr int jslimit_offset()
static IsolateGroup * current()
Address BuiltinEntry(Builtin builtin)
bool root_array_available_
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Isolate * isolate() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool root_array_available() const
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool should_abort_hard() const
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void Abort(AbortReason msg)
void LoadStackLimit(Register destination, StackLimitKind kind)
void LoadReceiver(Register dest)
void GetObjectType(Register function, Register map, Register type_reg)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void Round_d(FPURegister fd, FPURegister fj)
void Lbu(Register rd, const MemOperand &rs)
void LoadAddress(Register destination, ExternalReference source)
void BranchAndLinkLong(Label *L, BranchDelaySlot bdslot)
void LoadNBytes(Register rd, const MemOperand &rs, Register scratch)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void AlignedStoreHelper(Reg_T value, const MemOperand &rs, Func generator)
void Clear_if_nan_s(Register rd, FPURegister fs)
void MultiPopFPU(DoubleRegList regs)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void Drop(int count, Condition cond=al)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void MultiPushFPU(DoubleRegList regs)
void Neg(const Register &rd, const Operand &operand)
void MovFromFloatResult(DwVfpRegister dst)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void Floor_d(FPURegister fd, FPURegister fj)
void Scd(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Ceil_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Neg_s(FPURegister fd, FPURegister fj)
void Round_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Clz32(Register rd, Register rs)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void near_call(int offset, RelocInfo::Mode rmode)
void Lb(Register rd, const MemOperand &rs)
void BranchRange(Label *L, Condition cond, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label::Distance distance=Label::kFar)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void SarPair(Register high, Register low, uint8_t imm8)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void PushStandardFrame(Register function_reg)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void Uld(Register rd, const MemOperand &rs)
void AssertSignExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE
void UnalignedFStoreHelper(FPURegister frd, const MemOperand &rs)
void CompareRoot(Register obj, RootIndex index)
void MovFromFloatParameter(DwVfpRegister dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void RoundHelper(VRegister dst, VRegister src, Register scratch, VRegister v_scratch, FPURoundingMode frm, bool keep_nan_same=true)
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void Move(Register dst, Tagged< Smi > smi)
void SmiTst(Register value)
bool has_double_zero_reg_set_
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void AtomicDecompressTaggedSigned(const Register &destination, const Register &base, const Register &index, const Register &temp)
void StoreReturnAddressAndCall(Register target)
void LoadZeroIfConditionZero(Register dest, Register condition)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
int32_t GetOffset(Label *L, OffsetSize bits)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void ReverseBytesHelper(Register rd, Register rs, Register tmp1, Register tmp2)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void Round_d_d(FPURegister fd, FPURegister fs)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Cvt_s_ul(FPURegister fd, FPURegister fs)
void UnalignedFLoadHelper(FPURegister frd, const MemOperand &rs)
void MultiPush(RegList regs)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void UnalignedStoreHelper(Register rd, const MemOperand &rs, Register scratch_other=no_reg)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Ceil_d_d(FPURegister fd, FPURegister fs)
void Lwu(Register rd, const MemOperand &rs)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void MulOverflow32(Register dst, Register left, const Operand &right, Register overflow, bool sign_extend_inputs=true)
void Ror(const Register &rd, const Register &rs, unsigned shift)
void UStoreFloat(FPURegister fd, const MemOperand &rs)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
bool IsDoubleZeroRegSet()
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void CompareTaggedRootAndBranch(const Register &with, RootIndex index, Condition cc, Label *target)
void InsertLowWordF64(FPURegister dst, Register src_low)
void NegateBool(Register rd, Register rs)
void AtomicDecompressTagged(const Register &destination, const Register &base, const Register &index, const Register &temp)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void Ulwu(Register rd, const MemOperand &rs)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
bool BranchAndLinkShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void LoadFPRImmediate(FPURegister dst, float imm)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Cvt_d_ul(FPURegister fd, FPURegister fs)
void near_jump(int offset, RelocInfo::Mode rmode)
void BranchTrueF(Label *target, CFRegister cc=FCC0)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void BailoutIfDeoptimized()
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void DecodeSandboxedPointer(Register value)
void Sd(Register rd, const MemOperand &rs)
void BranchShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2)
void WasmRvvS128const(VRegister dst, const uint8_t imms[16])
void CompareTaggedRoot(Register with, RootIndex index)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
void Trunc_s_s(FPURegister fd, FPURegister fs)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode)
void DropAndRet(int drop)
void Cvt_s_w(FPURegister fd, Register rs)
void EnforceStackAlignment()
void SmiTag(Register reg, SBit s=LeaveCC)
void CompareObjectTypeAndJump(Register heap_object, Register map, Register type_reg, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SbxCheck(Condition cc, AbortReason reason)
void LoadNBytesOverwritingBaseReg(const MemOperand &rs, Register scratch0, Register scratch1)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void MovToFloatResult(DwVfpRegister src)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void MovToFloatParameter(DwVfpRegister src)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void PushCommonFrame(Register marker_reg=no_reg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void Trunc_d(FPURegister fd, FPURegister fj)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void ShlPair(Register high, Register low, uint8_t imm8)
int LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int InstrCountForLi64Bit(int64_t value)
void ShrPair(Register high, Register low, uint8_t imm8)
void JumpIfMarking(Label *is_marking, Label::Distance condition_met_distance=Label::kFar)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
Operand ClearedValue() const
void BranchAndLinkShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void BranchLong(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void ByteSwap(Register dest, Register src, int operand_size)
void MultiPop(RegList regs)
void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void GenPCRelativeJumpAndLink(Register rd, int64_t offset)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void Jump(Register target, Condition cond=al)
void Usw(Register rd, const MemOperand &rs)
void LoadRoot(Register destination, RootIndex index) final
void Trunc_d_d(FPURegister fd, FPURegister fs)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
void UnalignedLoadHelper(Register rd, const MemOperand &rs)
void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void PrepareCEntryArgs(int num_args)
void ULoadDouble(FPURegister fd, const MemOperand &rs)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Floor_d_d(FPURegister fd, FPURegister fs)
void Round_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void Clear_if_nan_d(Register rd, FPURegister fs)
void PatchAndJump(Address target)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Round_s_s(FPURegister fd, FPURegister fs)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void Floor_w_s(Register rd, FPURegister fs, Register result=no_reg)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
static int ActivationFrameAlignment()
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void LoadFromConstantsTable(Register destination, int constant_index) final
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void PrepareCEntryFunction(const ExternalReference &ref)
void Cvt_d_w(FPURegister fd, Register rs)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void LoadWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Lld(Register rd, const MemOperand &rs)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
void SmiToInt32(Register smi)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void Ulhu(Register rd, const MemOperand &rs)
void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void Lw(Register rd, const MemOperand &rs)
void JumpIfNotMarking(Label *not_marking, Label::Distance condition_met_distance=Label::kFar)
void Neg_d(FPURegister fd, FPURegister fk)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
bool BranchShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void Ceil_d(FPURegister fd, FPURegister fj)
void Lh(Register rd, const MemOperand &rs)
void Popcnt64(Register dst, Register src)
void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2)
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void InsertHighWordF64(FPURegister dst, Register src_high)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Xor(Register dst, Register src)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
std::function< void(int)> Trapper
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void CmpTagged(const Register &r1, const Register &r2)
void Check(Condition cond, AbortReason reason)
void Usd(Register rd, const MemOperand &rs)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Or(Register dst, Register src)
bool CalculateOffset(Label *L, int32_t *offset, OffsetSize bits)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, MaxMinKind kind)
void Sc(Register rd, const MemOperand &rs)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
Register GetRtAsRegisterHelper(const Operand &rt, Register scratch)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, TruncFunc trunc)
void AssertZeroExtended(Register int32_register)
void Ll(Register rd, const MemOperand &rs)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void CompareRootAndBranch(const Register &obj, RootIndex index, Condition cc, Label *target, ComparisonMode mode=ComparisonMode::kDefault)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Floor_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void AssertRange(Condition cond, AbortReason reason, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void StoreWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void UStoreDouble(FPURegister fd, const MemOperand &rs)
void Ceil_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Ceil_s_s(FPURegister fd, FPURegister fs)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Ulw(Register rd, const MemOperand &rs)
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void Ulh(Register rd, const MemOperand &rs)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void MaybeRestoreRegisters(RegList registers)
void MulOverflow64(Register dst, Register left, const Operand &right, Register overflow)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void LoadCompressedTaggedRoot(Register destination, RootIndex index)
void Floor_s_s(FPURegister fd, FPURegister fs)
void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void Ctz32(Register rd, Register rs)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Round_w_d(FPURegister fd, FPURegister fs)
static int SafepointRegisterStackIndex(int reg_code)
void AlignedLoadHelper(Reg_T target, const MemOperand &rs, Func generator)
void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void SmiUntagField(Register dst, const MemOperand &src)
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void Trunc_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void StoreRootRelative(int32_t offset, Register value) final
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void Swap(Register srcdst0, Register srcdst1)
void LoadNativeContextSlot(Register dst, int index)
void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Floor_w_d(FPURegister fd, FPURegister fs)
void ULoadFloat(FPURegister fd, const MemOperand &rs)
bool has_single_zero_reg_set_
void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void DropArguments(Register count)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void GenPCRelativeJump(Register rd, int64_t offset)
void Ush(Register rd, const MemOperand &rs, Register scratch)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
bool is_reg(Register reg) const
int32_t immediate() const
constexpr bool is_empty() const
constexpr bool has(RegisterT reg) const
constexpr unsigned Count() const
constexpr storage_t bits() const
constexpr bool is_valid() const
static constexpr FPURegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
int num_allocatable_general_registers() const
int GetAllocatableGeneralCode(int index) const
static constexpr Register from_code(int code)
static constexpr bool IsWasmCanonicalSigId(Mode mode)
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
static constexpr bool IsCodeTarget(Mode mode)
static constexpr bool IsJSDispatchHandle(Mode mode)
static constexpr bool IsWasmCodePointerTableEntry(Mode mode)
static constexpr bool IsFullEmbeddedObject(Mode mode)
static constexpr bool IsReadOnly(RootIndex root_index)
static constexpr bool IsImmortalImmovable(RootIndex root_index)
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
bool contains_direct_pointer() const
static SlotDescriptor ForCodePointerSlot()
IndirectPointerTag indirect_pointer_tag() const
bool contains_indirect_pointer() const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr Tagged< Smi > zero()
static constexpr int32_t TypeToMarker(Type type)
static bool IsJavaScript(Type t)
static const int kNextOffset
static constexpr int kContextOffset
V8_EXPORT_PRIVATE bool Enabled()
static constexpr int OffsetOfElementAt(int index)
static constexpr int kFixedFrameSizeFromFp
void Include(const Register ®1, const Register ®2=no_reg)
static constexpr Register ObjectRegister()
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Register SlotAddressRegister()
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
#define ASM_CODE_COMMENT(asm)
#define V8_ENABLE_LEAPTIERING_BOOL
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
DirectHandle< Object > new_target
ZoneVector< RpoNumber > & result
#define SmiWordOffset(offset)
#define TEST_AND_PUSH_REG(reg)
#define TEST_AND_POP_REG(reg)
#define BRANCH_ARGS_CHECK(cond, rs, rt)
RegListBase< RegisterT > registers
InstructionOperand source
InstructionOperand destination
constexpr bool IsPowerOfTwo(T value)
V8_INLINE Dest bit_cast(Source const &source)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
constexpr Tagged_t kNonJsReceiverMapLimit
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr int kTaggedSize
constexpr int kBitsPerByte
constexpr unsigned kFloatMantissaBits
@ kUnsignedGreaterThanEqual
const int kFloat64MantissaBits
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr DoubleRegister kScratchDoubleReg
@ kUnknownIndirectPointerTag
constexpr bool CodeKindCanTierUp(CodeKind kind)
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
const int kFloat64ExponentBias
static int InstrCountForLiLower32Bit(int64_t value)
constexpr int kTrustedPointerTableEntrySizeLog2
const Address kWeakHeapObjectMask
const int kFloat32ExponentBias
constexpr unsigned kFloatExponentBias
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
constexpr Register kScratchReg
static const int kRegisterPassedArguments
constexpr int kUIntptrSize
MemOperand FieldMemOperand(Register object, int offset)
constexpr uint8_t kInstrSizeLog2
constexpr DoubleRegister kSingleRegZero
constexpr unsigned kFloatExponentBits
constexpr int kSystemPointerSize
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kDebugZapValue
constexpr Simd128Register kSimd128ScratchReg
constexpr uint32_t kZapValue
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
const DoubleRegList kCallerSavedFPU
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
constexpr uint32_t kCodePointerHandleShift
const int kCArgsSlotsSize
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalPointerNullTag
const int kFloat32ExponentBits
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
constexpr bool SmiValuesAre32Bits()
const int kFloat32MantissaBits
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
const int kSafepointRegisterStackIndexMap[kNumRegs]
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const int kFloat64ExponentBits
constexpr uint64_t kTrustedPointerTableMarkBit
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
@ kFirstStrongOrReadOnlyRoot
@ kLastStrongOrReadOnlyRoot
constexpr uint32_t kMaxUInt32
constexpr Register kJavaScriptCallNewTargetRegister
constexpr int kNumRegisters
constexpr bool PointerCompressionIsEnabled()
static bool IsZero(const Operand &rt)
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr bool IsAligned(T value, U alignment)
#define V8_STATIC_ROOTS_BOOL