59#define FREEZE_STATE(witness_name) FreezeCacheState witness_name(asm_) 
   63    if (v8_flags.trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \ 
 
   66#define WASM_TRUSTED_INSTANCE_DATA_FIELD_OFFSET(name) \ 
   67  ObjectAccess::ToTagged(WasmTrustedInstanceData::k##name##Offset) 
 
   69template <
int expected_size, 
int actual_size>
 
   70struct assert_field_size {
 
   71  static_assert(expected_size == actual_size,
 
   72                "field in WasmInstance does not have the expected size");
 
   73  static constexpr int size = actual_size;
 
   76#define WASM_TRUSTED_INSTANCE_DATA_FIELD_SIZE(name) \ 
   77  FIELD_SIZE(WasmTrustedInstanceData::k##name##Offset) 
 
   79#define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned)            \ 
   80  __ LoadFromInstance(                                               \ 
   81      dst, LoadInstanceIntoRegister(pinned, dst),                    \ 
   82      WASM_TRUSTED_INSTANCE_DATA_FIELD_OFFSET(name),                 \ 
   83      assert_field_size<WASM_TRUSTED_INSTANCE_DATA_FIELD_SIZE(name), \ 
 
   86#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned)                  \ 
   88      WASM_TRUSTED_INSTANCE_DATA_FIELD_SIZE(name) == kTaggedSize,          \ 
   89      "field in WasmTrustedInstanceData does not have the expected size"); \ 
   90  __ LoadTaggedPointerFromInstance(                                        \ 
   91      dst, LoadInstanceIntoRegister(pinned, dst),                          \ 
   92      WASM_TRUSTED_INSTANCE_DATA_FIELD_OFFSET(name)); 
 
   94#define LOAD_PROTECTED_PTR_INSTANCE_FIELD(dst, name, pinned)                 \ 
   96      WASM_TRUSTED_INSTANCE_DATA_FIELD_SIZE(Protected##name) == kTaggedSize, \ 
   97      "field in WasmTrustedInstanceData does not have the expected size");   \ 
   98  __ LoadProtectedPointer(                                                   \ 
   99      dst, LoadInstanceIntoRegister(pinned, dst),                            \ 
  100      WASM_TRUSTED_INSTANCE_DATA_FIELD_OFFSET(Protected##name)); 
 
  104#ifdef V8_CODE_COMMENTS 
  105#define CODE_COMMENT(str) __ RecordComment(str, SourceLocation{}) 
  106#define SCOPED_CODE_COMMENT(str)                                \ 
  107  AssemblerBase::CodeComment CONCAT(scoped_comment_, __LINE__)( \ 
  108      &asm_, str, SourceLocation{}) 
  110#define CODE_COMMENT(str) ((void)0) 
  111#define SCOPED_CODE_COMMENT(str) ((void)0) 
  118constexpr int kHeavyInstructionSteps = 1000;
 
  124using MakeSig = FixedSizeSignature<ValueKind>;
 
  126#if V8_TARGET_ARCH_ARM64 
  133  explicit MovableLabel(
Zone* zone) : 
label_(zone->New<Label>()) {}
 
  135  Label* get() { 
return label_; }
 
  145  explicit MovableLabel(
Zone*) {}
 
  147  Label* get() { 
return &
label_; }
 
  154compiler::CallDescriptor* GetLoweredCallDescriptor(
 
  155    Zone* zone, compiler::CallDescriptor* call_desc) {
 
  157             ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
 
  189class DebugSideTableBuilder {
 
  190  using Entry = DebugSideTable::Entry;
 
  191  using Value = Entry::Value;
 
  194  enum AssumeSpilling {
 
  207    explicit EntryBuilder(
int pc_offset, 
int stack_height,
 
  208                          std::vector<Value> changed_values)
 
  213    Entry ToTableEntry() {
 
  217    void MinimizeBasedOnPreviousStack(
const std::vector<Value>& last_values) {
 
  220      for (
auto src = dst; src != 
end; ++src) {
 
  221        if (src->index < 
static_cast<int>(last_values.size()) &&
 
  222            *src == last_values[src->index]) {
 
  225        if (dst != src) *dst = *src;
 
  232    void set_pc_offset(
int new_pc_offset) { 
pc_offset_ = new_pc_offset; }
 
  242                base::Vector<DebugSideTable::Entry::Value> values) {
 
  249  EntryBuilder* NewOOLEntry(base::Vector<DebugSideTable::Entry::Value> values) {
 
  250    constexpr int kNoPcOffsetYet = -1;
 
  251    ool_entries_.emplace_back(kNoPcOffsetYet, 
static_cast<int>(values.size()),
 
  252                              GetChangedStackValues(last_ool_values_, values));
 
  256  void SetNumLocals(
int num_locals) {
 
  262  std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
 
  269      ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_);
 
  274    for (
auto& entry : entries_) 
entries.push_back(entry.ToTableEntry());
 
  275    for (
auto& entry : ool_entries_) 
entries.push_back(entry.ToTableEntry());
 
  278        [](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }));
 
  279    return std::make_unique<DebugSideTable>(num_locals_, std::move(
entries));
 
  283  static std::vector<Value> GetChangedStackValues(
 
  284      std::vector<Value>& last_values, base::Vector<Value> values) {
 
  285    std::vector<Value> changed_values;
 
  286    int old_stack_size = 
static_cast<int>(last_values.size());
 
  287    last_values.resize(values.size());
 
  290    for (
const auto& value : values) {
 
  291      if (index >= old_stack_size || last_values[index] != value) {
 
  292        changed_values.push_back(value);
 
  297    return changed_values;
 
  314                         const CompilationEnv* env) {
 
  322    FATAL(
"--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
 
  330  if (
v8_flags.enable_testing_opcode_in_wasm &&
 
  331      strcmp(detail, 
"testing opcode") == 0) {
 
  336#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC64 || \ 
  337    V8_TARGET_ARCH_LOONG64 
  341#if V8_TARGET_ARCH_ARM 
  348#define LIST_FEATURE(name, ...) WasmEnabledFeature::name, 
  349  constexpr WasmEnabledFeatures kExperimentalFeatures{
 
  354  if (env->enabled_features.contains_any(kExperimentalFeatures)) 
return;
 
  357  FATAL(
"Liftoff bailout should not happen. Cause: %s\n", detail);
 
  360class TempRegisterScope {
 
  362  LiftoffRegister Acquire(
RegClass rc) {
 
  368  void Return(LiftoffRegister&& temp) {
 
  373  void Return(Register&& temp) {
 
  374    Return(LiftoffRegister{temp});
 
  378  LiftoffRegList AddTempRegisters(
int count, 
RegClass rc,
 
  379                                  LiftoffAssembler* lasm,
 
  380                                  LiftoffRegList pinned) {
 
  381    LiftoffRegList temps;
 
  384      temps.set(lasm->GetUnusedRegister(rc, pinned | temps));
 
  394class ScopedTempRegister {
 
  396  ScopedTempRegister(TempRegisterScope& temp_scope, 
RegClass rc)
 
  399  ScopedTempRegister(
const ScopedTempRegister&) = 
delete;
 
  401  ScopedTempRegister(ScopedTempRegister&& other) 
V8_NOEXCEPT 
  404    other.temp_scope_ = 
nullptr;
 
  407  ScopedTempRegister& operator=(
const ScopedTempRegister&) = 
delete;
 
  409  ~ScopedTempRegister() {
 
  410    if (temp_scope_) Reset();
 
  413  LiftoffRegister 
reg()
 const {
 
  431class LiftoffCompiler {
 
  433  using ValidationTag = Decoder::NoValidationTag;
 
  434  using Value = ValueBase<ValidationTag>;
 
  451    explicit ElseState(
Zone* zone) : 
label(zone), state(zone) {}
 
  453    LiftoffAssembler::CacheState state;
 
  464  struct Control : 
public ControlBase<Value, ValidationTag> {
 
  474    template <
typename... Args>
 
  476        : ControlBase(zone, std::forward<Args>(
args)...),
 
  481  using FullDecoder = WasmFullDecoder<ValidationTag, LiftoffCompiler>;
 
  484  class MostlySmallValueKindSig : 
public Signature<ValueKind> {
 
  495      const size_t size = 
sig->parameter_count() + 
sig->return_count();
 
  499      std::transform(
sig->all().begin(), 
sig->all().end(), storage,
 
  500                     [](ValueType type) { return type.kind(); });
 
  509  struct SpilledRegistersForInspection : 
public ZoneObject {
 
  517    explicit SpilledRegistersForInspection(
Zone* zone) : 
entries(zone) {}
 
  520  struct OutOfLineSafepointInfo {
 
  521    ZoneVector<int> slots;
 
  524    explicit OutOfLineSafepointInfo(
Zone* zone) : slots(zone) {}
 
  527  struct OutOfLineCode {
 
  540    static OutOfLineCode Trap(
 
  542        SpilledRegistersForInspection* spilled_registers,
 
  543        OutOfLineSafepointInfo* safepoint_info,
 
  544        DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
 
  558    static OutOfLineCode StackCheck(
 
  560        Register cached_instance_data,
 
  561        SpilledRegistersForInspection* spilled_regs,
 
  562        OutOfLineSafepointInfo* safepoint_info,
 
  563        DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
 
  564      Builtin stack_guard = Builtin::kWasmStackGuard;
 
  565      if (
v8_flags.experimental_wasm_growable_stacks) {
 
  566        stack_guard = Builtin::kWasmGrowableStackGuard;
 
  580    static OutOfLineCode TierupCheck(
 
  582        Register cached_instance_data,
 
  583        SpilledRegistersForInspection* spilled_regs,
 
  584        OutOfLineSafepointInfo* safepoint_info,
 
  585        DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
 
  589          Builtin::kWasmTriggerTierUp,   
 
  600  LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
 
  601                  CompilationEnv* env, 
Zone* zone,
 
  602                  std::unique_ptr<AssemblerBuffer> buffer,
 
  603                  DebugSideTableBuilder* debug_sidetable_builder,
 
  604                  const LiftoffOptions& options)
 
  605      : 
asm_(zone, 
std::move(buffer)),
 
  606        descriptor_(GetLoweredCallDescriptor(zone, call_descriptor)),
 
  629    DCHECK(options.is_initialized());
 
  632        next_breakpoint_ptr_ == next_breakpoint_end_,
 
  633        next_breakpoint_ptr_ == 
nullptr && next_breakpoint_end_ == 
nullptr);
 
  634    DCHECK_IMPLIES(!for_debugging_, debug_sidetable_builder_ == 
nullptr);
 
  640  void GetCode(CodeDesc* desc) {
 
  641    asm_.GetCode(
nullptr, desc, &safepoint_table_builder_,
 
  642                 handler_table_offset_);
 
  645  std::unique_ptr<AssemblerBuffer> ReleaseBuffer() {
 
  646    return asm_.ReleaseBuffer();
 
  649  std::unique_ptr<LiftoffFrameDescriptionForDeopt> ReleaseFrameDescriptions() {
 
  650    return std::move(frame_description_);
 
  653  base::OwnedVector<uint8_t> GetSourcePositionTable() {
 
  657  base::OwnedVector<uint8_t> GetProtectedInstructionsData()
 const {
 
  662  uint32_t GetTotalFrameSlotCountForGC()
 const {
 
  663    return __ GetTotalFrameSlotCountForGC();
 
  666  uint32_t OolSpillCount()
 const { 
return __ OolSpillCount(); }
 
  669                   const char* detail) {
 
  671    if (did_bailout()) 
return;
 
  673    TRACE(
"unsupported: %s\n", detail);
 
  674    decoder->errorf(decoder->pc_offset(), 
"unsupported liftoff operation: %s",
 
  676    UnuseLabels(decoder);
 
  677    CheckBailoutAllowed(reason, detail, env_);
 
  680  bool DidAssemblerBailout(FullDecoder* decoder) {
 
  681    if (decoder->failed() || !
__ did_bailout()) 
return false;
 
  682    unsupported(decoder, 
__ bailout_reason(), 
__ bailout_detail());
 
  687                                    const char* context) {
 
  689    return MaybeBailoutForUnsupportedType(decoder, 
kind, context);
 
  692  V8_NOINLINE bool MaybeBailoutForUnsupportedType(FullDecoder* decoder,
 
  694                                                  const char* context) {
 
  704        bailout_reason = 
kSimd;
 
  709    base::EmbeddedVector<char, 128> buffer;
 
  711    unsupported(decoder, bailout_reason, buffer.begin());
 
  715  void UnuseLabels(FullDecoder* decoder) {
 
  717    auto Unuse = [](Label* 
label) {
 
  723    uint32_t control_depth = decoder ? decoder->control_depth() : 0;
 
  724    for (uint32_t 
i = 0; 
i < control_depth; ++
i) {
 
  725      Control* c = decoder->control_at(
i);
 
  726      Unuse(c->label.get());
 
  727      if (c->else_state) Unuse(c->else_state->label.get());
 
  728      if (c->try_info != 
nullptr) Unuse(&c->try_info->catch_label);
 
  730    for (
auto& ool : out_of_line_code_) Unuse(ool.label.get());
 
  734  void StartFunction(FullDecoder* decoder) {
 
  736      StdoutStream{} << 
"hint: add --trace-wasm-decoder to also see the wasm " 
  737                        "instructions being decoded\n";
 
  739    int num_locals = decoder->num_locals();
 
  740    __ set_num_locals(num_locals);
 
  741    for (
int i = 0; 
i < num_locals; ++
i) {
 
  747  class ParameterProcessor {
 
  749    ParameterProcessor(LiftoffCompiler* compiler, uint32_t num_params)
 
  754      while (NextParam()) {
 
  755        MaybeCollectRegister();
 
  756        if (needs_gp_pair_) {
 
  758          MaybeCollectRegister();
 
  764      while (NextParam()) {
 
  765        LiftoffRegister 
reg = LoadToReg(param_regs_);
 
  776#if V8_TARGET_ARCH_64_BIT 
  781        if (needs_gp_pair_) {
 
  783          LiftoffRegister reg2 = LoadToReg(param_regs_ | LiftoffRegList{
reg});
 
  792      if (param_idx_ >= num_params_) {
 
  804    void NextLocation() {
 
  808    LiftoffRegister CurrentRegister() {
 
  814    void MaybeCollectRegister() {
 
  820    LiftoffRegister LoadToReg(LiftoffRegList pinned) {
 
  822        LiftoffRegister 
reg = CurrentRegister();
 
  830      LiftoffRegister 
reg = 
compiler_->asm_.GetUnusedRegister(rc_, pinned);
 
  853    if (!
v8_flags.wasm_stack_checks) 
return;
 
  861    SpilledRegistersForInspection* spilled_regs = 
nullptr;
 
  865    __ cache_state()->GetTaggedSlotsForOOLCode(
 
  877      if (
__ cache_state()->cached_mem_start != 
no_reg) {
 
  880      spilled_regs = GetSpilledRegistersForInspection();
 
  884        spilled_regs, 
safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
 
  886    __ StackCheck(ool.label.get());
 
  887    __ bind(ool.continuation.get());
 
  896    const int max_budget_use = std::max(1, 
v8_flags.wasm_tiering_budget / 4);
 
  897    if (budget_used > max_budget_use) budget_used = max_budget_use;
 
  903    SpilledRegistersForInspection* spilled_regs = 
nullptr;
 
  907    __ cache_state()->GetTaggedSlotsForOOLCode(
 
  920        spilled_regs, 
safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
 
  925                   budget_used, ool.label.get(), tierup_check);
 
  927    __ bind(ool.continuation.get());
 
  930  bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
 
  931    int actual_locals = 
__ num_locals() - num_params;
 
  937    if (actual_locals > kNumCacheRegisters / 2) 
return true;
 
  940    for (uint32_t param_idx = num_params; param_idx < 
__ num_locals();
 
  950    __ SpillAllRegisters();
 
  952        __ pc_offset(), SourcePosition(decoder->position()), 
false);
 
  953    __ CallBuiltin(Builtin::kWasmTraceEnter);
 
  957  bool dynamic_tiering() {
 
  958    return v8_flags.wasm_dynamic_tiering &&
 
  960           (
v8_flags.wasm_tier_up_filter == -1 ||
 
  964  void StartFunctionBody(FullDecoder* decoder, Control* block) {
 
  965    for (uint32_t 
i = 0; 
i < 
__ num_locals(); ++
i) {
 
  966      if (!CheckSupportedType(decoder, 
__ local_kind(
i), 
"param")) 
return;
 
  970    uint32_t num_params =
 
  971        static_cast<uint32_t
>(decoder->sig_->parameter_count());
 
  977      int declared_func_index =
 
  980      __ CallFrameSetupStub(declared_func_index);
 
  982      __ EnterFrame(StackFrame::WASM);
 
  984    __ set_has_frame(
true);
 
  992    if (DidAssemblerBailout(decoder)) 
return;
 
  995    [[maybe_unused]] 
constexpr int kInstanceDataParameterIndex = 1;
 
  999                  descriptor_->GetInputLocation(kInstanceDataParameterIndex)
 
 1005      ParameterProcessor processor(
this, num_params);
 
 1006      processor.Process();
 
 1008    int params_size = 
__ TopSpillOffset();
 
 1012    if (SpillLocalsInitially(decoder, num_params)) {
 
 1013      bool has_refs = 
false;
 
 1014      for (uint32_t param_idx = num_params; param_idx < 
__ num_locals();
 
 1020      int spill_size = 
__ TopSpillOffset() - params_size;
 
 1021      __ FillStackSlotsWithZero(params_size, spill_size);
 
 1025        LiftoffRegList pinned;
 
 1027            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned).gp());
 
 1029            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned).gp());
 
 1032        for (uint32_t local_index = num_params; local_index < 
__ num_locals();
 
 1034          ValueType type = decoder->local_types_[local_index];
 
 1035          if (type.is_reference()) {
 
 1036            __ Spill(
__ cache_state()->stack_state[local_index].
offset(),
 
 1037                     type.use_wasm_null() ? LiftoffRegister(wasm_null_ref_reg)
 
 1038                                          : LiftoffRegister(null_ref_reg),
 
 1044      for (uint32_t param_idx = num_params; param_idx < 
__ num_locals();
 
 1049        __ PushConstant(
kind, int32_t{0});
 
 1053    DCHECK_EQ(
__ num_locals(), 
__ cache_state()->stack_height());
 
 1060      __ ResetOSRTarget();
 
 1069            zone_, Builtin::kThrowWasmTrapUnreachable, decoder->position(),
 
 1070            nullptr, 
nullptr, 
nullptr));
 
 1076        CheckMaxSteps(decoder, 16 + 
__ num_locals());
 
 1086      LiftoffRegList pinned;
 
 1088      Register instance = pinned.set(LoadInstanceIntoRegister(pinned, scratch));
 
 1090      __ LoadMap(scratch, instance);
 
 1094              LoadType::kI32Load16U);
 
 1097      FreezeCacheState frozen{
asm_};
 
 1098      __ emit_i32_cond_jumpi(
kEqual, &ok, scratch,
 
 1099                             WASM_TRUSTED_INSTANCE_DATA_TYPE, frozen);
 
 1100      __ AssertUnreachable(AbortReason::kUnexpectedInstanceType);
 
 1106    StackCheck(decoder, 0);
 
 1111  void GenerateOutOfLineCode(OutOfLineCode* ool) {
 
 1113    __ bind(ool->label.get());
 
 1114    const bool is_stack_check =
 
 1115        ool->builtin == Builtin::kWasmStackGuard ||
 
 1116        ool->builtin == Builtin::kWasmGrowableStackGuard;
 
 1117    const bool is_tierup = ool->builtin == Builtin::kWasmTriggerTierUp;
 
 1119    if (!ool->regs_to_save.is_empty()) {
 
 1120      __ PushRegisters(ool->regs_to_save);
 
 1122    if (
V8_UNLIKELY(ool->spilled_registers != 
nullptr)) {
 
 1123      for (
auto& entry : ool->spilled_registers->entries) {
 
 1125        DCHECK(!ool->regs_to_save.has(entry.reg));
 
 1126        __ Spill(entry.offset, entry.reg, entry.kind);
 
 1130    if (ool->builtin == Builtin::kWasmGrowableStackGuard) {
 
 1131      WasmGrowableStackGuardDescriptor descriptor;
 
 1132      DCHECK_EQ(0, descriptor.GetStackParameterCount());
 
 1133      DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
 
 1134      Register param_reg = descriptor.GetRegisterParameter(0);
 
 1135      __ LoadConstant(LiftoffRegister(param_reg),
 
 1141        __ pc_offset(), SourcePosition(ool->position), 
true);
 
 1142    __ CallBuiltin(ool->builtin);
 
 1147    if (ool->safepoint_info) {
 
 1148      for (
auto index : ool->safepoint_info->slots) {
 
 1149        safepoint.DefineTaggedStackSlot(index);
 
 1152      int total_frame_size = 
__ GetTotalFrameSize();
 
 1161      __ RecordSpillsInSafepoint(safepoint, ool->regs_to_save,
 
 1162                                 ool->safepoint_info->spills, index);
 
 1165    DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
 
 1166    if (
V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
 
 1167      ool->debug_sidetable_entry_builder->set_pc_offset(
__ pc_offset());
 
 1169    DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check || is_tierup);
 
 1170    if (is_stack_check) {
 
 1173    if (!ool->regs_to_save.is_empty()) 
__ PopRegisters(ool->regs_to_save);
 
 1174    if (is_stack_check || is_tierup) {
 
 1175      if (
V8_UNLIKELY(ool->spilled_registers != 
nullptr)) {
 
 1177        for (
auto& entry : ool->spilled_registers->entries) {
 
 1178          __ Fill(entry.reg, entry.offset, entry.kind);
 
 1181      if (ool->cached_instance_data != 
no_reg) {
 
 1182        __ LoadInstanceDataFromFrame(ool->cached_instance_data);
 
 1184      __ emit_jump(ool->continuation.get());
 
 1186      __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
 
 1190  void FinishFunction(FullDecoder* decoder) {
 
 1191    if (DidAssemblerBailout(decoder)) 
return;
 
 1192    __ AlignFrameSize();
 
 1194    int frame_size = 
__ GetTotalFrameSize();
 
 1196    for (OutOfLineCode& ool : out_of_line_code_) {
 
 1197      GenerateOutOfLineCode(&ool);
 
 1200    __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
 
 1201                              &safepoint_table_builder_, 
v8_flags.wasm_inlining,
 
 1208      for (
auto& handler : handlers_) {
 
 1210                                      handler.handler.get()->pos());
 
 1213    __ MaybeEmitOutOfLineConstantPool();
 
 1215    DidAssemblerBailout(decoder);
 
 1220      TypeFeedbackStorage& type_feedback = 
env_->module->type_feedback;
 
 1222      FunctionTypeFeedback& function_feedback =
 
 1224      function_feedback.liftoff_frame_size = 
__ GetTotalFrameSize();
 
 1225      base::OwnedVector<uint32_t>& call_targets =
 
 1226          function_feedback.call_targets;
 
 1227      if (call_targets.empty()) {
 
 1235    if (frame_description_) {
 
 1240  void OnFirstError(FullDecoder* decoder) {
 
 1242    UnuseLabels(decoder);
 
 1243    asm_.AbortCompilation();
 
 1248#define FUZZER_HEAVY_INSTRUCTION                      \ 
 1250    if (V8_UNLIKELY(max_steps_ != nullptr)) {         \ 
 1251      CheckMaxSteps(decoder, kHeavyInstructionSteps); \ 
 
 1255  V8_NOINLINE void CheckMaxSteps(FullDecoder* decoder, 
int steps_done = 1) {
 
 1258    LiftoffRegList pinned;
 
 1259    LiftoffRegister max_steps = pinned.set(
__ GetUnusedRegister(
kGpReg, {}));
 
 1260    LiftoffRegister max_steps_addr =
 
 1261        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1267      __ Load(max_steps, max_steps_addr.gp(), 
no_reg, 0, LoadType::kI32Load);
 
 1272      __ emit_i32_subi(max_steps.gp(), max_steps.gp(), steps_done);
 
 1273      __ Store(max_steps_addr.gp(), 
no_reg, 0, max_steps, StoreType::kI32Store,
 
 1276      DCHECK_EQ(Builtin::kThrowWasmTrapUnreachable,
 
 1279      __ emit_i32_cond_jumpi(
kLessThan, trap_label, max_steps.gp(), 0, frozen);
 
 1294    bool has_breakpoint = 
false;
 
 1295    if (next_breakpoint_ptr_) {
 
 1296      if (*next_breakpoint_ptr_ == 0) {
 
 1298        DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
 
 1299        has_breakpoint = 
true;
 
 1301        while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
 
 1302               *next_breakpoint_ptr_ < decoder->
position()) {
 
 1306        if (next_breakpoint_ptr_ == next_breakpoint_end_) {
 
 1308        } 
else if (*next_breakpoint_ptr_ == decoder->position()) {
 
 1309          has_breakpoint = 
true;
 
 1313    if (has_breakpoint) {
 
 1315      EmitBreakpoint(decoder);
 
 1319    } 
else if (!did_function_entry_break_checks_) {
 
 1330      __ Load(LiftoffRegister{flag}, flag, 
no_reg, 0, LoadType::kI32Load8U, {});
 
 1338      EmitBreakpoint(decoder);
 
 1340    } 
else if (dead_breakpoint_ == decoder->position()) {
 
 1341      DCHECK(!next_breakpoint_ptr_ ||
 
 1342             *next_breakpoint_ptr_ != dead_breakpoint_);
 
 1349      __ emit_jump(&cont);
 
 1350      EmitBreakpoint(decoder);
 
 1354      CheckMaxSteps(decoder);
 
 1358  void NextInstruction(FullDecoder* decoder, 
WasmOpcode opcode) {
 
 1359    TraceCacheState(decoder);
 
 1363            ? decoder->read_prefixed_opcode<ValidationTag>(decoder->pc()).first
 
 1366    if (!has_outstanding_op() && decoder->control_at(0)->reachable()) {
 
 1369      DCHECK_EQ(decoder->stack_size() + 
__ num_locals() + num_exceptions_,
 
 1370                __ cache_state()->stack_state.size());
 
 1375    if (
V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
 
 1378  void EmitBreakpoint(FullDecoder* decoder) {
 
 1381        __ pc_offset(), SourcePosition(decoder->position()), 
true);
 
 1382    __ CallBuiltin(Builtin::kWasmDebugBreak);
 
 1383    DefineSafepointWithCalleeSavedRegisters();
 
 1384    RegisterDebugSideTableEntry(decoder,
 
 1385                                DebugSideTableBuilder::kAllowRegisters);
 
 1389  void PushControl(Control* block) {
 
 1395  void Block(FullDecoder* decoder, Control* block) { PushControl(block); }
 
 1397  void Loop(FullDecoder* decoder, Control* loop) {
 
 1405    __ SpillLoopArgs(loop->start_merge.arity);
 
 1408    __ bind(loop->label.get());
 
 1411    loop->label_state.Split(*
__ cache_state());
 
 1415    if (!dynamic_tiering()) {
 
 1419      StackCheck(decoder, decoder->position());
 
 1423  void Try(FullDecoder* decoder, Control* block) {
 
 1424    block->try_info = 
zone_->New<TryInfo>(
zone_);
 
 1429  LiftoffRegister GetExceptionProperty(
const VarState& exception,
 
 1431    DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||
 
 1432           root_index == RootIndex::kwasm_exception_values_symbol);
 
 1434    LiftoffRegList pinned;
 
 1435    LiftoffRegister tag_symbol_reg =
 
 1436        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1437    LoadExceptionSymbol(tag_symbol_reg.gp(), pinned, root_index);
 
 1438    LiftoffRegister context_reg =
 
 1439        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1445    CallBuiltin(Builtin::kWasmGetOwnProperty,
 
 1452  void CatchException(FullDecoder* decoder, 
const TagIndexImmediate& imm,
 
 1453                      Control* block, base::Vector<Value> values) {
 
 1454    DCHECK(block->is_try_catch());
 
 1455    __ emit_jump(block->label.get());
 
 1459    __ bind(&block->try_info->catch_label);
 
 1460    block->try_info->catch_label.Unuse();
 
 1461    block->try_info->catch_label.UnuseNear();
 
 1463    __ cache_state()->Split(block->try_info->catch_state);
 
 1467    LiftoffRegister caught_tag =
 
 1468        GetExceptionProperty(
__ cache_state()->stack_state.back(),
 
 1469                             RootIndex::kwasm_exception_tag_symbol);
 
 1470    LiftoffRegList pinned;
 
 1471    pinned.set(caught_tag);
 
 1476    __ LoadTaggedPointer(
 
 1477        imm_tag, imm_tag, 
no_reg,
 
 1482    if (imm.tag->sig->parameter_count() == 1 &&
 
 1490      LiftoffRegister undefined =
 
 1491          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1495      LiftoffRegister js_tag = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1497      __ LoadTaggedPointer(
 
 1498          js_tag.gp(), js_tag.gp(), 
no_reg,
 
 1500      __ LoadTaggedPointer(
 
 1501          js_tag.gp(), js_tag.gp(), 
no_reg,
 
 1504        LiftoffAssembler::CacheState initial_state(zone_);
 
 1505        LiftoffAssembler::CacheState end_state(zone_);
 
 1509        initial_state.Split(*
__ cache_state());
 
 1516                            undefined.gp(), state_merged_explicitly);
 
 1518                            caught_tag.gp(), state_merged_explicitly);
 
 1521        GetExceptionValues(decoder, 
__ cache_state()->stack_state.back(),
 
 1525        end_state.Steal(*
__ cache_state());
 
 1526        __ emit_jump(&done);
 
 1528        __ bind(&js_exception);
 
 1529        __ cache_state()->Split(initial_state);
 
 1533                            js_tag.gp(), state_merged_explicitly);
 
 1539        LiftoffRegister exception = 
__ PeekToRegister(0, pinned);
 
 1540        __ PushRegister(
kRef, exception);
 
 1543        __ MergeFullStackWith(end_state);
 
 1544        __ emit_jump(&done);
 
 1549        __ cache_state()->Steal(initial_state);
 
 1550        __ MergeFullStackWith(block->try_info->catch_state);
 
 1551        __ emit_jump(&block->try_info->catch_label);
 
 1554        __ cache_state()->Steal(end_state);
 
 1564        __ MergeFullStackWith(block->try_info->catch_state);
 
 1565        __ emit_jump(&block->try_info->catch_label);
 
 1568      GetExceptionValues(decoder, 
__ cache_state()->stack_state.back(),
 
 1571    if (!block->try_info->in_handler) {
 
 1572      block->try_info->in_handler = 
true;
 
 1577  void Rethrow(FullDecoder* decoder, 
const VarState& exception) {
 
 1578    CallBuiltin(Builtin::kWasmRethrow, MakeSig::Params(
kRef), {exception},
 
 1579                decoder->position());
 
 1582  void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
 
 1583    DCHECK_EQ(block, decoder->control_at(0));
 
 1584    Control* target = decoder->control_at(depth);
 
 1585    DCHECK(block->is_incomplete_try());
 
 1586    __ bind(&block->try_info->catch_label);
 
 1587    if (block->try_info->catch_reached) {
 
 1588      __ cache_state()->Steal(block->try_info->catch_state);
 
 1589      if (depth == decoder->control_depth() - 1) {
 
 1591        Rethrow(decoder, 
__ cache_state()->stack_state.back());
 
 1594        DCHECK(target->is_incomplete_try());
 
 1595        if (target->try_info->catch_reached) {
 
 1596          __ MergeStackWith(target->try_info->catch_state, 1,
 
 1599          target->try_info->catch_state = 
__ MergeIntoNewState(
 
 1600              __ num_locals(), 1, target->stack_depth + target->num_exceptions);
 
 1601          target->try_info->catch_reached = 
true;
 
 1603        __ emit_jump(&target->try_info->catch_label);
 
 1608  void Rethrow(FullDecoder* decoder, Control* try_block) {
 
 1609    int index = try_block->try_info->catch_state.stack_height() - 1;
 
 1610    auto& exception = 
__ cache_state()->stack_state[
index];
 
 1611    Rethrow(decoder, exception);
 
 1617  void CatchAll(FullDecoder* decoder, Control* block) {
 
 1618    DCHECK(block->is_try_catchall() || block->is_try_catch());
 
 1619    DCHECK_EQ(decoder->control_at(0), block);
 
 1620    __ bind(&block->try_info->catch_label);
 
 1621    __ cache_state()->Split(block->try_info->catch_state);
 
 1622    if (!block->try_info->in_handler) {
 
 1623      block->try_info->in_handler = 
true;
 
 1628  void TryTable(FullDecoder* decoder, Control* block) {
 
 1629    block->try_info = 
zone_->New<TryInfo>(
zone_);
 
 1633  void CatchCase(FullDecoder* decoder, Control* block,
 
 1634                 const CatchCase& catch_case, base::Vector<Value> values) {
 
 1635    DCHECK(block->is_try_table());
 
 1639    __ bind(&block->try_info->catch_label);
 
 1640    block->try_info->catch_label.Unuse();
 
 1641    block->try_info->catch_label.UnuseNear();
 
 1642    __ cache_state()->Split(block->try_info->catch_state);
 
 1650      BrOrRet(decoder, catch_case.br_imm.depth);
 
 1656    LiftoffRegister caught_tag =
 
 1657        GetExceptionProperty(
__ cache_state()->stack_state.back(),
 
 1658                             RootIndex::kwasm_exception_tag_symbol);
 
 1659    LiftoffRegList pinned;
 
 1660    pinned.set(caught_tag);
 
 1665    __ LoadTaggedPointer(imm_tag, imm_tag, 
no_reg,
 
 1667                             catch_case.maybe_tag.tag_imm.index));
 
 1669    VarState exn = 
__ cache_state() -> stack_state.back();
 
 1672    if (catch_case.maybe_tag.tag_imm.tag->sig->parameter_count() == 1 &&
 
 1673        catch_case.maybe_tag.tag_imm.tag->sig->GetParam(0) == 
kWasmExternRef) {
 
 1680      LiftoffRegister undefined =
 
 1681          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1685      LiftoffRegister js_tag = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 1687      __ LoadTaggedPointer(
 
 1688          js_tag.gp(), js_tag.gp(), 
no_reg,
 
 1690      __ LoadTaggedPointer(
 
 1691          js_tag.gp(), js_tag.gp(), 
no_reg,
 
 1694        LiftoffAssembler::CacheState initial_state(zone_);
 
 1695        LiftoffAssembler::CacheState end_state(zone_);
 
 1699        initial_state.Split(*
__ cache_state());
 
 1706                            undefined.gp(), state_merged_explicitly);
 
 1708                            caught_tag.gp(), state_merged_explicitly);
 
 1712        GetExceptionValues(decoder, 
__ cache_state()->stack_state.back(),
 
 1713                           catch_case.maybe_tag.tag_imm.tag);
 
 1716        end_state.Steal(*
__ cache_state());
 
 1717        __ emit_jump(&done);
 
 1719        __ bind(&js_exception);
 
 1720        __ cache_state() -> Split(initial_state);
 
 1724                            js_tag.gp(), state_merged_explicitly);
 
 1731        LiftoffRegister exception = 
__ PeekToRegister(0, pinned);
 
 1735        __ MergeFullStackWith(end_state);
 
 1736        __ emit_jump(&done);
 
 1741        __ cache_state() -> Steal(initial_state);
 
 1742        __ MergeFullStackWith(block->try_info->catch_state);
 
 1743        __ emit_jump(&block->try_info->catch_label);
 
 1746        __ cache_state() -> Steal(end_state);
 
 1756        __ MergeFullStackWith(block->try_info->catch_state);
 
 1757        __ emit_jump(&block->try_info->catch_label);
 
 1763      GetExceptionValues(decoder, 
__ cache_state()->stack_state.back(),
 
 1764                         catch_case.maybe_tag.tag_imm.tag);
 
 1771      LiftoffRegister 
reg = 
__ GetUnusedRegister(rc, pinned);
 
 1777    BrOrRet(decoder, catch_case.br_imm.depth);
 
 1778    bool is_last = &catch_case == &block->catch_cases.last();
 
 1779    if (is_last && !decoder->HasCatchAll(block)) {
 
 1780      __ bind(&block->try_info->catch_label);
 
 1781      __ cache_state()->Steal(block->try_info->catch_state);
 
 1782      ThrowRef(decoder, 
nullptr);
 
 1786  void ThrowRef(FullDecoder* decoder, Value*) {
 
 1789    CallBuiltin(Builtin::kWasmThrowRef, MakeSig::Params(
kRef), {exn},
 
 1790                decoder->position());
 
 1798  void JumpIfFalse(FullDecoder* decoder, Label* false_dst,
 
 1799                   std::optional<FreezeCacheState>& will_freeze) {
 
 1800    DCHECK(!will_freeze.has_value());
 
 1802        test_and_reset_outstanding_op(kExprI32Eqz) ? 
kNotZero : 
kZero;
 
 1804    if (!has_outstanding_op()) {
 
 1807      will_freeze.emplace(asm_);
 
 1808      __ emit_cond_jump(cond, false_dst, 
kI32, value, 
no_reg, *will_freeze);
 
 1813    cond = 
Negate(GetCompareCondition(outstanding_op_));
 
 1815    VarState rhs_slot = 
__ cache_state()->stack_state.back();
 
 1816    if (rhs_slot.is_const()) {
 
 1818      int32_t rhs_imm = rhs_slot.i32_const();
 
 1819      __ cache_state()->stack_state.pop_back();
 
 1821      will_freeze.emplace(asm_);
 
 1822      __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm, *will_freeze);
 
 1827    VarState lhs_slot = 
__ cache_state()->stack_state.back();
 
 1828    if (lhs_slot.is_const()) {
 
 1830      int32_t lhs_imm = lhs_slot.i32_const();
 
 1831      __ cache_state()->stack_state.pop_back();
 
 1833      will_freeze.emplace(asm_);
 
 1834      __ emit_i32_cond_jumpi(
Flip(cond), false_dst, rhs, lhs_imm, *will_freeze);
 
 1839    Register lhs = 
__ PopToRegister(LiftoffRegList{rhs}).gp();
 
 1840    will_freeze.emplace(asm_);
 
 1841    __ emit_cond_jump(cond, false_dst, 
kI32, lhs, rhs, *will_freeze);
 
 1844  void If(FullDecoder* decoder, 
const Value& cond, Control* if_block) {
 
 1845    DCHECK_EQ(if_block, decoder->control_at(0));
 
 1846    DCHECK(if_block->is_if());
 
 1849    if_block->else_state = 
zone_->New<ElseState>(
zone_);
 
 1852    std::optional<FreezeCacheState> frozen;
 
 1853    JumpIfFalse(decoder, if_block->else_state->label.get(), frozen);
 
 1857    if_block->else_state->state.Split(*
__ cache_state());
 
 1859    PushControl(if_block);
 
 1862  void FallThruTo(FullDecoder* decoder, Control* c) {
 
 1864    if (c->end_merge.reached) {
 
 1865      __ MergeStackWith(c->label_state, c->br_merge()->arity,
 
 1868      c->label_state = 
__ MergeIntoNewState(
__ num_locals(), c->end_merge.arity,
 
 1869                                            c->stack_depth + c->num_exceptions);
 
 1871    __ emit_jump(c->label.get());
 
 1872    TraceCacheState(decoder);
 
 1875  void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
 
 1876    DCHECK(c->is_onearmed_if());
 
 1877    if (c->end_merge.reached) {
 
 1879      if (c->reachable()) {
 
 1881        __ MergeFullStackWith(c->label_state);
 
 1882        __ emit_jump(c->label.get());
 
 1886      __ bind(c->else_state->label.get());
 
 1887      __ cache_state()->Steal(c->else_state->state);
 
 1888      __ MergeFullStackWith(c->label_state);
 
 1889      __ cache_state()->Steal(c->label_state);
 
 1890    } 
else if (c->reachable()) {
 
 1894      DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
 
 1896          __ MergeIntoNewState(
__ num_locals(), c->start_merge.arity,
 
 1897                               c->stack_depth + c->num_exceptions);
 
 1898      __ emit_jump(c->label.get());
 
 1901      __ bind(c->else_state->label.get());
 
 1902      __ cache_state()->Steal(c->else_state->state);
 
 1903      __ MergeFullStackWith(c->label_state);
 
 1904      __ cache_state()->Steal(c->label_state);
 
 1907      __ bind(c->else_state->label.get());
 
 1908      __ cache_state()->Steal(c->else_state->state);
 
 1912  void FinishTry(FullDecoder* decoder, Control* c) {
 
 1913    DCHECK(c->is_try_catch() || c->is_try_catchall() || c->is_try_table());
 
 1914    if (!c->end_merge.reached) {
 
 1915      if (c->try_info->catch_reached && !c->is_try_table()) {
 
 1917        __ DropExceptionValueAtOffset(
__ num_locals() + c->stack_depth +
 
 1922      if (c->reachable()) {
 
 1923        __ MergeStackWith(c->label_state, c->br_merge()->arity,
 
 1926      __ cache_state()->Steal(c->label_state);
 
 1928    if (c->try_info->catch_reached && !c->is_try_table()) {
 
 1933  void PopControl(FullDecoder* decoder, Control* c) {
 
 1934    if (c->is_loop()) 
return;  
 
 1935    if (c->is_onearmed_if()) {
 
 1937      FinishOneArmedIf(decoder, c);
 
 1938    } 
else if (c->is_try_catch() || c->is_try_catchall() || c->is_try_table()) {
 
 1939      FinishTry(decoder, c);
 
 1940    } 
else if (c->end_merge.reached) {
 
 1943      if (c->reachable()) {
 
 1944        __ MergeFullStackWith(c->label_state);
 
 1946      __ cache_state()->Steal(c->label_state);
 
 1951    if (!c->label.get()->is_bound()) 
__ bind(c->label.get());
 
 1956  LiftoffRegister GenerateCCall(
ValueKind return_kind,
 
 1957                                const std::initializer_list<VarState> 
args,
 
 1958                                ExternalReference ext_ref) {
 
 1960        std::string{
"Call extref: "} +
 
 1963    __ SpillAllRegisters();
 
 1971  void GenerateCCallWithStackBuffer(
const LiftoffRegister* result_regs,
 
 1974                                    const std::initializer_list<VarState> 
args,
 
 1975                                    ExternalReference ext_ref) {
 
 1977        std::string{
"Call extref: "} +
 
 1982    __ SpillAllRegisters();
 
 1985    int param_bytes = 0;
 
 1991    int stack_bytes = std::max(param_bytes, out_arg_bytes);
 
 1992    __ CallCWithStackBuffer(
args, result_regs, return_kind, out_argument_kind,
 
 1993                            stack_bytes, ext_ref);
 
 1996  template <
typename EmitFn, 
typename... Args>
 
 1997  void CallEmitFn(EmitFn 
fn, Args... 
args)
 
 1998    requires(!std::is_member_function_pointer_v<EmitFn>)
 
 2003  template <
typename EmitFn, 
typename... Args>
 
 2004  void CallEmitFn(EmitFn 
fn, Args... 
args)
 
 2005    requires std::is_member_function_pointer_v<EmitFn>
 
 2012  struct AssemblerRegisterConverter {
 
 2013    LiftoffRegister 
reg;
 
 2014    operator LiftoffRegister() { 
return reg; }
 
 2021  template <
typename T>
 
 2022  std::conditional_t<std::is_same_v<LiftoffRegister, T>,
 
 2023                     AssemblerRegisterConverter, T>
 
 2024  ConvertAssemblerArg(T t) {
 
 2028  template <
typename EmitFn, 
typename ArgType>
 
 2029  struct EmitFnWithFirstArg {
 
 2034  template <
typename EmitFn, 
typename ArgType>
 
 2035  EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn 
fn, ArgType arg) {
 
 2039  template <
typename EmitFn, 
typename T, 
typename... Args>
 
 2040  void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... 
args) {
 
 2041    CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(
args)...);
 
 2045            ValueKind result_lane_kind = kVoid, 
class EmitFn>
 
 2046  void EmitUnOp(EmitFn 
fn) {
 
 2049    LiftoffRegister src = 
__ PopToRegister();
 
 2050    LiftoffRegister dst = src_rc == result_rc
 
 2051                              ? 
__ GetUnusedRegister(result_rc, {src}, {})
 
 2052                              : 
__ GetUnusedRegister(result_rc, {});
 
 2053    CallEmitFn(
fn, dst, src);
 
 2055      LiftoffRegList pinned{dst};
 
 2056      if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
 
 2057        CheckNan(dst, pinned, result_kind);
 
 2058      } 
else if (result_kind == ValueKind::kS128 &&
 
 2059                 (result_lane_kind == 
kF32 || result_lane_kind == 
kF64)) {
 
 2061        CheckS128Nan(dst, pinned, result_lane_kind);
 
 2064    __ PushRegister(result_kind, dst);
 
 2067  template <ValueKind kind>
 
 2068  void EmitFloatUnOpWithCFallback(
 
 2070      ExternalReference (*fallback_fn)()) {
 
 2071    auto emit_with_c_fallback = [
this, emit_fn, fallback_fn](
 
 2072                                    LiftoffRegister dst, LiftoffRegister src) {
 
 2073      if ((
asm_.*emit_fn)(dst.fp(), src.fp())) 
return;
 
 2074      ExternalReference ext_ref = fallback_fn();
 
 2078    EmitUnOp<kind, kind>(emit_with_c_fallback);
 
 2081  enum TypeConversionTrapping : 
bool { kCanTrap = 
true, kNoTrap = 
false };
 
 2083            TypeConversionTrapping can_trap>
 
 2084  void EmitTypeConversion(FullDecoder* decoder, 
WasmOpcode opcode,
 
 2085                          ExternalReference (*fallback_fn)()) {
 
 2088    LiftoffRegister src = 
__ PopToRegister();
 
 2089    LiftoffRegister dst = src_rc == dst_rc
 
 2090                              ? 
__ GetUnusedRegister(dst_rc, {src}, {})
 
 2091                              : 
__ GetUnusedRegister(dst_rc, {});
 
 2092    bool emitted = 
__ emit_type_conversion(
 
 2094        can_trap ? AddOutOfLineTrap(decoder,
 
 2095                                    Builtin::kThrowWasmTrapFloatUnrepresentable)
 
 2100      ExternalReference ext_ref = fallback_fn();
 
 2103        LiftoffRegister ret_reg =
 
 2104            __ GetUnusedRegister(
kGpReg, LiftoffRegList{dst});
 
 2105        LiftoffRegister dst_regs[] = {ret_reg, dst};
 
 2106        GenerateCCallWithStackBuffer(dst_regs, 
kI32, dst_kind,
 
 2107                                     {
VarState{src_kind, src, 0}}, ext_ref);
 
 2108        OolTrapLabel 
trap = AddOutOfLineTrap(
 
 2109            decoder, Builtin::kThrowWasmTrapFloatUnrepresentable);
 
 2113        GenerateCCallWithStackBuffer(&dst, kVoid, dst_kind,
 
 2114                                     {
VarState{src_kind, src, 0}}, ext_ref);
 
 2117    __ PushRegister(dst_kind, dst);
 
 2120  void EmitIsNull(
WasmOpcode opcode, ValueType type) {
 
 2121    LiftoffRegList pinned;
 
 2122    LiftoffRegister ref = pinned.set(
__ PopToRegister());
 
 2123    LiftoffRegister 
null = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 2124    LoadNullValueForCompare(
null.gp(), pinned, type);
 
 2127    LiftoffRegister dst = 
__ GetUnusedRegister(
kGpReg, {ref, 
null}, {});
 
 2128#if defined(V8_COMPRESS_POINTERS) 
 2132                         dst.gp(), ref.gp(), 
null.gp());
 
 2135                             dst.gp(), ref, 
null);
 
 2137    __ PushRegister(
kI32, dst);
 
 2140  void UnOp(FullDecoder* decoder, 
WasmOpcode opcode, 
const Value& value,
 
 2142#define CASE_I32_UNOP(opcode, fn) \ 
 2143  case kExpr##opcode:             \ 
 2144    return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn); 
 2145#define CASE_I64_UNOP(opcode, fn) \ 
 2146  case kExpr##opcode:             \ 
 2147    return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn); 
 2148#define CASE_FLOAT_UNOP(opcode, kind, fn) \ 
 2149  case kExpr##opcode:                     \ 
 2150    return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn); 
 2151#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn)                     \ 
 2152  case kExpr##opcode:                                                        \ 
 2153    return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \ 
 2154                                               &ExternalReference::wasm_##fn); 
 2155#define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \ 
 2156  case kExpr##opcode:                                                       \ 
 2157    return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>(          \ 
 2158        decoder, kExpr##opcode, ext_ref); 
 2185                           &ExternalReference::wasm_float32_to_int64, kCanTrap)
 
 2187                           &ExternalReference::wasm_float32_to_uint64, kCanTrap)
 
 2189                           &ExternalReference::wasm_float64_to_int64, kCanTrap)
 
 2191                           &ExternalReference::wasm_float64_to_uint64, kCanTrap)
 
 2196                           &ExternalReference::wasm_int64_to_float32, kNoTrap)
 
 2198                           &ExternalReference::wasm_uint64_to_float32, kNoTrap)
 
 2204                           &ExternalReference::wasm_int64_to_float64, kNoTrap)
 
 2206                           &ExternalReference::wasm_uint64_to_float64, kNoTrap)
 
 2221                           &ExternalReference::wasm_float32_to_int64_sat,
 
 2224                           &ExternalReference::wasm_float32_to_uint64_sat,
 
 2227                           &ExternalReference::wasm_float64_to_int64_sat,
 
 2230                           &ExternalReference::wasm_float64_to_uint64_sat,
 
 2233        DCHECK(decoder->lookahead(0, kExprI32Eqz));
 
 2234        if ((decoder->lookahead(1, kExprBrIf) ||
 
 2235             decoder->lookahead(1, kExprIf)) &&
 
 2237          DCHECK(!has_outstanding_op());
 
 2244      case kExprI32Popcnt:
 
 2245        return EmitUnOp<kI32, kI32>(
 
 2246            [
this](LiftoffRegister dst, LiftoffRegister src) {
 
 2247              if (
__ emit_i32_popcnt(dst.gp(), src.gp())) 
return;
 
 2250                                ExternalReference::wasm_word32_popcnt());
 
 2253      case kExprI64Popcnt:
 
 2254        return EmitUnOp<kI64, kI64>(
 
 2255            [
this](LiftoffRegister dst, LiftoffRegister src) {
 
 2256              if (
__ emit_i64_popcnt(dst, src)) 
return;
 
 2260                                ExternalReference::wasm_word64_popcnt());
 
 2262              __ emit_type_conversion(kExprI64UConvertI32, dst, 
result,
 
 2265      case kExprRefIsNull:
 
 2268      case kExprRefAsNonNull:
 
 2269        return EmitIsNull(opcode, value.type);
 
 2270      case kExprAnyConvertExtern: {
 
 2271        VarState input_state = 
__ cache_state()->stack_state.back();
 
 2272        CallBuiltin(Builtin::kWasmAnyConvertExtern,
 
 2274                    decoder->position());
 
 2279      case kExprExternConvertAny: {
 
 2280        LiftoffRegList pinned;
 
 2281        LiftoffRegister ref = pinned.set(
__ PopToModifiableRegister(pinned));
 
 2282        LiftoffRegister 
null = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 2300#undef CASE_FLOAT_UNOP 
 2301#undef CASE_FLOAT_UNOP_WITH_CFALLBACK 
 2302#undef CASE_TYPE_CONVERSION 
 2307  void EmitBinOpImm(EmitFn 
fn, EmitFnImm fnImm) {
 
 2311    VarState rhs_slot = 
__ cache_state()->stack_state.back();
 
 2313    if (rhs_slot.is_const()) {
 
 2314      __ cache_state()->stack_state.pop_back();
 
 2315      int32_t imm = rhs_slot.i32_const();
 
 2317      LiftoffRegister lhs = 
__ PopToRegister();
 
 2320      LiftoffRegList pinned{lhs};
 
 2321      LiftoffRegister dst = src_rc == result_rc
 
 2322                                ? 
__ GetUnusedRegister(result_rc, {lhs}, pinned)
 
 2323                                : 
__ GetUnusedRegister(result_rc, pinned);
 
 2325      CallEmitFn(fnImm, dst, lhs, imm);
 
 2326      static_assert(result_kind != 
kF32 && result_kind != 
kF64,
 
 2327                    "Unhandled nondeterminism for fuzzing.");
 
 2328      __ PushRegister(result_kind, dst);
 
 2331      EmitBinOp<src_kind, result_kind>(
fn);
 
 2336            bool swap_lhs_rhs = 
false, 
ValueKind result_lane_kind = kVoid,
 
 2338  void EmitBinOp(EmitFn 
fn) {
 
 2341    LiftoffRegister rhs = 
__ PopToRegister();
 
 2342    LiftoffRegister lhs = 
__ PopToRegister(LiftoffRegList{rhs});
 
 2343    LiftoffRegister dst = src_rc == result_rc
 
 2344                              ? 
__ GetUnusedRegister(result_rc, {lhs, rhs}, {})
 
 2345                              : 
__ GetUnusedRegister(result_rc, {});
 
 2347    if (swap_lhs_rhs) std::swap(lhs, rhs);
 
 2349    CallEmitFn(
fn, dst, lhs, rhs);
 
 2351      LiftoffRegList pinned{dst};
 
 2352      if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
 
 2353        CheckNan(dst, pinned, result_kind);
 
 2354      } 
else if (result_kind == ValueKind::kS128 &&
 
 2355                 (result_lane_kind == 
kF32 || result_lane_kind == 
kF64)) {
 
 2356        CheckS128Nan(dst, pinned, result_lane_kind);
 
 2359    __ PushRegister(result_kind, dst);
 
 2364  void EmitI8x16Swizzle(
bool relaxed) {
 
 2366    LiftoffRegister 
mask = 
__ PopToRegister();
 
 2367    LiftoffRegister src = 
__ PopToRegister(LiftoffRegList{
mask});
 
 2368#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) 
 2369    LiftoffRegister dst =
 
 2371            ? 
__ GetUnusedRegister(result_rc, {src, 
mask}, {})
 
 2372            : 
__ GetUnusedRegister(result_rc, {src}, LiftoffRegList{
mask});
 
 2374    LiftoffRegister dst = 
__ GetUnusedRegister(result_rc, {src, 
mask}, {});
 
 2377      __ emit_i8x16_relaxed_swizzle(dst, src, 
mask);
 
 2379      __ emit_i8x16_swizzle(dst, src, 
mask);
 
 2384  void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
 
 2385                           LiftoffRegister rhs, ExternalReference ext_ref,
 
 2386                           Label* trap_by_zero,
 
 2387                           Label* trap_unrepresentable = 
nullptr) {
 
 2389    LiftoffRegister ret = 
__ GetUnusedRegister(
kGpReg, LiftoffRegList{dst});
 
 2390    LiftoffRegister result_regs[] = {ret, dst};
 
 2391    GenerateCCallWithStackBuffer(result_regs, 
kI32, 
kI64,
 
 2392                                 {{
kI64, lhs, 0}, {
kI64, rhs, 0}}, ext_ref);
 
 2394    __ emit_i32_cond_jumpi(
kEqual, trap_by_zero, ret.gp(), 0, trapping);
 
 2395    if (trap_unrepresentable) {
 
 2396      __ emit_i32_cond_jumpi(
kEqual, trap_unrepresentable, ret.gp(), -1,
 
 2401  template <WasmOpcode opcode>
 
 2402  void EmitI32CmpOp(FullDecoder* decoder) {
 
 2403    DCHECK(decoder->lookahead(0, opcode));
 
 2404    if ((decoder->lookahead(1, kExprBrIf) || decoder->lookahead(1, kExprIf)) &&
 
 2406      DCHECK(!has_outstanding_op());
 
 2411                                           GetCompareCondition(opcode)));
 
 2414  template <ValueKind kind, ExternalReference(ExtRefFn)()>
 
 2415  void EmitBitRotationCCall() {
 
 2416    EmitBinOp<kind, kind>([
this](LiftoffRegister dst, LiftoffRegister input,
 
 2417                                 LiftoffRegister shift) {
 
 2421          GenerateCCall(
kind, {{
kind, input, 0}, {
kI32, shift, 0}}, ExtRefFn());
 
 2426  template <
typename EmitFn, 
typename EmitFnImm>
 
 2427  void EmitI64Shift(EmitFn 
fn, EmitFnImm fnImm) {
 
 2428    return EmitBinOpImm<kI64, kI64>(
 
 2429        [
this, 
fn](LiftoffRegister dst, LiftoffRegister src,
 
 2430                   LiftoffRegister amount) {
 
 2431          CallEmitFn(
fn, dst, src,
 
 2432                     amount.is_gp_pair() ? amount.low_gp() : amount.gp());
 
 2438             const Value& rhs, Value* 
result) {
 
 2457        return EmitI32CmpOp<kExprI32Eq>(decoder);
 
 2459        return EmitI32CmpOp<kExprI32Ne>(decoder);
 
 2461        return EmitI32CmpOp<kExprI32LtS>(decoder);
 
 2463        return EmitI32CmpOp<kExprI32LtU>(decoder);
 
 2465        return EmitI32CmpOp<kExprI32GtS>(decoder);
 
 2467        return EmitI32CmpOp<kExprI32GtU>(decoder);
 
 2469        return EmitI32CmpOp<kExprI32LeS>(decoder);
 
 2471        return EmitI32CmpOp<kExprI32LeU>(decoder);
 
 2473        return EmitI32CmpOp<kExprI32GeS>(decoder);
 
 2475        return EmitI32CmpOp<kExprI32GeU>(decoder);
 
 2493        return EmitBinOp<kI64, kI32>(
 
 2496        return EmitBinOp<kI64, kI32>(
 
 2499        return EmitBinOp<kI64, kI32>(
 
 2502        return EmitBinOp<kI64, kI32>(
 
 2505        return EmitBinOp<kI64, kI32>(
 
 2508        return EmitBinOp<kI64, kI32>(BindFirst(
 
 2511        return EmitBinOp<kI64, kI32>(
 
 2514        return EmitBinOp<kI64, kI32>(BindFirst(
 
 2517        return EmitBinOp<kI64, kI32>(
 
 2520        return EmitBinOp<kI64, kI32>(BindFirst(
 
 2523        return EmitBinOp<kF32, kI32>(
 
 2526        return EmitBinOp<kF32, kI32>(
 
 2529        return EmitBinOp<kF32, kI32>(
 
 2532        return EmitBinOp<kF32, kI32>(BindFirst(
 
 2535        return EmitBinOp<kF32, kI32>(BindFirst(
 
 2538        return EmitBinOp<kF32, kI32>(BindFirst(
 
 2541        return EmitBinOp<kF64, kI32>(
 
 2544        return EmitBinOp<kF64, kI32>(
 
 2547        return EmitBinOp<kF64, kI32>(
 
 2550        return EmitBinOp<kF64, kI32>(BindFirst(
 
 2553        return EmitBinOp<kF64, kI32>(BindFirst(
 
 2556        return EmitBinOp<kF64, kI32>(BindFirst(
 
 2568        return EmitBitRotationCCall<kI32, ExternalReference::wasm_word32_rol>();
 
 2570        return EmitBitRotationCCall<kI32, ExternalReference::wasm_word32_ror>();
 
 2581        return EmitBitRotationCCall<kI64, ExternalReference::wasm_word64_rol>();
 
 2583        return EmitBitRotationCCall<kI64, ExternalReference::wasm_word64_ror>();
 
 2596      case kExprF32CopySign:
 
 2610      case kExprF64CopySign:
 
 2613        return EmitBinOp<kI32, kI32>([
this, decoder](LiftoffRegister dst,
 
 2614                                                     LiftoffRegister lhs,
 
 2615                                                     LiftoffRegister rhs) {
 
 2616          AddOutOfLineTrapDeprecated(decoder, Builtin::kThrowWasmTrapDivByZero);
 
 2619          AddOutOfLineTrapDeprecated(decoder,
 
 2620                                     Builtin::kThrowWasmTrapDivUnrepresentable);
 
 2623          __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
 
 2624                           div_unrepresentable);
 
 2627        return EmitBinOp<kI32, kI32>([
this, decoder](LiftoffRegister dst,
 
 2628                                                     LiftoffRegister lhs,
 
 2629                                                     LiftoffRegister rhs) {
 
 2630          Label* div_by_zero = AddOutOfLineTrapDeprecated(
 
 2631              decoder, Builtin::kThrowWasmTrapDivByZero);
 
 2632          __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
 
 2635        return EmitBinOp<kI32, kI32>([
this, decoder](LiftoffRegister dst,
 
 2636                                                     LiftoffRegister lhs,
 
 2637                                                     LiftoffRegister rhs) {
 
 2638          Label* rem_by_zero = AddOutOfLineTrapDeprecated(
 
 2639              decoder, Builtin::kThrowWasmTrapRemByZero);
 
 2640          __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
 
 2643        return EmitBinOp<kI32, kI32>([
this, decoder](LiftoffRegister dst,
 
 2644                                                     LiftoffRegister lhs,
 
 2645                                                     LiftoffRegister rhs) {
 
 2646          Label* rem_by_zero = AddOutOfLineTrapDeprecated(
 
 2647              decoder, Builtin::kThrowWasmTrapRemByZero);
 
 2648          __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
 
 2651        return EmitBinOp<kI64, kI64>([
this, decoder](LiftoffRegister dst,
 
 2652                                                     LiftoffRegister lhs,
 
 2653                                                     LiftoffRegister rhs) {
 
 2654          AddOutOfLineTrapDeprecated(decoder, Builtin::kThrowWasmTrapDivByZero);
 
 2657          AddOutOfLineTrapDeprecated(decoder,
 
 2658                                     Builtin::kThrowWasmTrapDivUnrepresentable);
 
 2661          if (!
__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
 
 2662                                div_unrepresentable)) {
 
 2663            ExternalReference ext_ref = ExternalReference::wasm_int64_div();
 
 2664            EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
 
 2665                                div_unrepresentable);
 
 2669        return EmitBinOp<kI64, kI64>([
this, decoder](LiftoffRegister dst,
 
 2670                                                     LiftoffRegister lhs,
 
 2671                                                     LiftoffRegister rhs) {
 
 2672          Label* div_by_zero = AddOutOfLineTrapDeprecated(
 
 2673              decoder, Builtin::kThrowWasmTrapDivByZero);
 
 2674          if (!
__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
 
 2675            ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
 
 2676            EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
 
 2680        return EmitBinOp<kI64, kI64>([
this, decoder](LiftoffRegister dst,
 
 2681                                                     LiftoffRegister lhs,
 
 2682                                                     LiftoffRegister rhs) {
 
 2683          Label* rem_by_zero = AddOutOfLineTrapDeprecated(
 
 2684              decoder, Builtin::kThrowWasmTrapRemByZero);
 
 2685          if (!
__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
 
 2686            ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
 
 2687            EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
 
 2691        return EmitBinOp<kI64, kI64>([
this, decoder](LiftoffRegister dst,
 
 2692                                                     LiftoffRegister lhs,
 
 2693                                                     LiftoffRegister rhs) {
 
 2694          Label* rem_by_zero = AddOutOfLineTrapDeprecated(
 
 2695              decoder, Builtin::kThrowWasmTrapRemByZero);
 
 2696          if (!
__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
 
 2697            ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
 
 2698            EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
 
 2702#if defined(V8_COMPRESS_POINTERS) 
 2705        return EmitBinOp<kRefNull, kI32>(
 
 2708        return EmitBinOp<kRefNull, kI32>(
 
 2718  void TraceInstruction(FullDecoder* decoder, uint32_t markid) {
 
 2719#if V8_TARGET_ARCH_X64 
 2720    __ emit_trace_instruction(markid);
 
 2724  void I32Const(FullDecoder* decoder, Value* 
result, int32_t value) {
 
 2725    __ PushConstant(
kI32, value);
 
 2728  void I64Const(FullDecoder* decoder, Value* 
result, int64_t value) {
 
 2734    if (value_i32 == value) {
 
 2735      __ PushConstant(
kI64, value_i32);
 
 2738      __ LoadConstant(
reg, WasmValue(value));
 
 2743  void F32Const(FullDecoder* decoder, Value* 
result, 
float value) {
 
 2744    LiftoffRegister 
reg = 
__ GetUnusedRegister(
kFpReg, {});
 
 2745    __ LoadConstant(
reg, WasmValue(value));
 
 2749  void F64Const(FullDecoder* decoder, Value* 
result, 
double value) {
 
 2750    LiftoffRegister 
reg = 
__ GetUnusedRegister(
kFpReg, {});
 
 2751    __ LoadConstant(
reg, WasmValue(value));
 
 2755  void RefNull(FullDecoder* decoder, ValueType type, Value*) {
 
 2756    LiftoffRegister 
null = 
__ GetUnusedRegister(
kGpReg, {});
 
 2757    LoadNullValue(
null.gp(), type);
 
 2758    __ PushRegister(type.kind(), 
null);
 
 2761  void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* 
result) {
 
 2762    CallBuiltin(Builtin::kWasmRefFunc,
 
 2766                decoder->position());
 
 2770  void RefAsNonNull(FullDecoder* decoder, 
const Value& arg, Value* 
result) {
 
 2772    DCHECK(arg.type.is_nullable());
 
 2773    LiftoffRegList pinned;
 
 2774    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 2777        !arg.type.use_wasm_null()) {
 
 2782      MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
 
 2783    } 
else if (!
v8_flags.experimental_wasm_skip_null_checks) {
 
 2785      static_assert(WasmStruct::kHeaderSize > 
kTaggedSize);
 
 2786      static_assert(WasmArray::kHeaderSize > 
kTaggedSize);
 
 2787      static_assert(WasmInternalFunction::kHeaderSize > 
kTaggedSize);
 
 2788      LiftoffRegister dst = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 2789      uint32_t protected_load_pc = 0;
 
 2791              LoadType::kI32Load, &protected_load_pc);
 
 2792      RegisterProtectedInstruction(decoder, protected_load_pc);
 
 2794    __ PushRegister(
kRef, obj);
 
 2797  void Drop(FullDecoder* decoder) { 
__ DropValues(1); }
 
 2802    __ SpillAllRegisters();
 
 2806    size_t num_returns = decoder->sig_->return_count();
 
 2808    WasmTraceExitDescriptor descriptor;
 
 2809    DCHECK_EQ(0, descriptor.GetStackParameterCount());
 
 2810    DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
 
 2811    Register param_reg = descriptor.GetRegisterParameter(0);
 
 2812    if (num_returns == 1) {
 
 2813      auto& return_slot = 
__ cache_state()->stack_state.back();
 
 2814      if (return_slot.is_const()) {
 
 2815        __ Spill(&return_slot);
 
 2817      DCHECK(return_slot.is_stack());
 
 2818      __ LoadSpillAddress(param_reg, return_slot.offset(), return_slot.kind());
 
 2821      LoadSmi(LiftoffRegister{param_reg}, 0);
 
 2825        __ pc_offset(), SourcePosition(decoder->position()), 
false);
 
 2826    __ CallBuiltin(Builtin::kWasmTraceExit);
 
 2830  void TierupCheckOnTailCall(FullDecoder* decoder) {
 
 2831    if (!dynamic_tiering()) 
return;
 
 2832    TierupCheck(decoder, decoder->position(),
 
 2836  void DoReturn(FullDecoder* decoder, uint32_t ) {
 
 2837    ReturnImpl(decoder);
 
 2840  void ReturnImpl(FullDecoder* decoder) {
 
 2845    base::Vector<const ValueType> returns = decoder->sig_->returns();
 
 2847            returns.begin(), returns.end(),
 
 2848            [](
const ValueType type) { return type.is_uninhabited(); }))) {
 
 2849      __ Abort(AbortReason::kUninhabitableType);
 
 2852    if (dynamic_tiering()) {
 
 2853      TierupCheck(decoder, decoder->position(),
 
 2856    size_t num_returns = decoder->sig_->return_count();
 
 2857    if (num_returns > 0) 
__ MoveToReturnLocations(decoder->sig_, descriptor_);
 
 2858    if (
v8_flags.experimental_wasm_growable_stacks) {
 
 2859      __ CheckStackShrink();
 
 2861    __ LeaveFrame(StackFrame::WASM);
 
 2862    __ DropStackSlotsAndRet(
 
 2863        static_cast<uint32_t
>(
descriptor_->ParameterSlotCount()));
 
 2866  void LocalGet(FullDecoder* decoder, Value* 
result,
 
 2867                const IndexImmediate& imm) {
 
 2868    auto local_slot = 
__ cache_state()->stack_state[imm.index];
 
 2869    __ cache_state()->stack_state.emplace_back(
 
 2870        local_slot.kind(), 
__ NextSpillOffset(local_slot.kind()));
 
 2871    auto* slot = &
__ cache_state()->stack_state.back();
 
 2872    if (local_slot.is_reg()) {
 
 2873      __ cache_state()->inc_used(local_slot.reg());
 
 2874      slot->MakeRegister(local_slot.reg());
 
 2875    } 
else if (local_slot.is_const()) {
 
 2876      slot->MakeConstant(local_slot.i32_const());
 
 2878      DCHECK(local_slot.is_stack());
 
 2880      LiftoffRegister 
reg = 
__ GetUnusedRegister(rc, {});
 
 2881      __ cache_state()->inc_used(
reg);
 
 2882      slot->MakeRegister(
reg);
 
 2883      __ Fill(
reg, local_slot.offset(), local_slot.kind());
 
 2887  void LocalSetFromStackSlot(
VarState* dst_slot, uint32_t local_index) {
 
 2888    auto& state = *
__ cache_state();
 
 2889    auto& src_slot = state.stack_state.back();
 
 2891    if (dst_slot->is_reg()) {
 
 2892      LiftoffRegister slot_reg = dst_slot->reg();
 
 2893      if (state.get_use_count(slot_reg) == 1) {
 
 2894        __ Fill(dst_slot->reg(), src_slot.offset(), 
kind);
 
 2897      state.dec_used(slot_reg);
 
 2898      dst_slot->MakeStack();
 
 2900    DCHECK(CompatibleStackSlotTypes(
kind, 
__ local_kind(local_index)));
 
 2902    LiftoffRegister dst_reg = 
__ GetUnusedRegister(rc, {});
 
 2903    __ Fill(dst_reg, src_slot.offset(), 
kind);
 
 2904    *dst_slot = 
VarState(
kind, dst_reg, dst_slot->offset());
 
 2905    __ cache_state()->inc_used(dst_reg);
 
 2908  void LocalSet(uint32_t local_index, 
bool is_tee) {
 
 2909    auto& state = *
__ cache_state();
 
 2910    auto& source_slot = state.stack_state.back();
 
 2911    auto& target_slot = state.stack_state[local_index];
 
 2912    switch (source_slot.loc()) {
 
 2914        if (target_slot.is_reg()) state.dec_used(target_slot.reg());
 
 2915        target_slot.Copy(source_slot);
 
 2916        if (is_tee) state.inc_used(target_slot.reg());
 
 2919        if (target_slot.is_reg()) state.dec_used(target_slot.reg());
 
 2920        target_slot.Copy(source_slot);
 
 2923        LocalSetFromStackSlot(&target_slot, local_index);
 
 2926    if (!is_tee) 
__ cache_state()->stack_state.pop_back();
 
 2929  void LocalSet(FullDecoder* decoder, 
const Value& value,
 
 2930                const IndexImmediate& imm) {
 
 2931    LocalSet(imm.index, 
false);
 
 2934  void LocalTee(FullDecoder* decoder, 
const Value& value, Value* 
result,
 
 2935                const IndexImmediate& imm) {
 
 2936    LocalSet(imm.index, 
true);
 
 2939  Register GetGlobalBaseAndOffset(
const WasmGlobal* global,
 
 2940                                  LiftoffRegList* pinned, uint32_t* 
offset) {
 
 2942    if (global->mutability && global->imported) {
 
 2947      __ LoadFullPointer(addr, addr, field_offset);
 
 2949#ifdef V8_ENABLE_SANDBOX 
 2950      __ DecodeSandboxedPointer(addr);
 
 2954      *
offset = global->offset;
 
 2959  void GetBaseAndOffsetForImportedMutableExternRefGlobal(
 
 2960      const WasmGlobal* global, LiftoffRegList* pinned, Register* base,
 
 2963        pinned->set(
__ GetUnusedRegister(
kGpReg, *pinned)).gp();
 
 2965                                   ImportedMutableGlobalsBuffers, *pinned);
 
 2966    *
base = globals_buffer;
 
 2967    __ LoadTaggedPointer(
 
 2968        *base, globals_buffer, 
no_reg,
 
 2974    Register imported_mutable_globals =
 
 2975        pinned->set(
__ GetUnusedRegister(
kGpReg, *pinned)).gp();
 
 2978                                   ImportedMutableGlobals, *pinned);
 
 2979    *
offset = imported_mutable_globals;
 
 2984            field_offset, LoadType::kI32Load);
 
 2990  void GlobalGet(FullDecoder* decoder, Value* 
result,
 
 2991                 const GlobalIndexImmediate& imm) {
 
 2992    const auto* global = &
env_->module->globals[imm.index];
 
 2994    if (!CheckSupportedType(decoder, 
kind, 
"global")) {
 
 2999      if (global->mutability && global->imported) {
 
 3000        LiftoffRegList pinned;
 
 3003        GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
 
 3005        __ LoadTaggedPointer(base, base, 
offset, 0);
 
 3006        __ PushRegister(
kind, LiftoffRegister(base));
 
 3010      LiftoffRegList pinned;
 
 3012          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 3016      __ LoadTaggedPointer(value, globals_buffer, 
no_reg,
 
 3018                               imm.global->offset));
 
 3019      __ PushRegister(
kind, LiftoffRegister(value));
 
 3022    LiftoffRegList pinned;
 
 3024    Register addr = GetGlobalBaseAndOffset(global, &pinned, &
offset);
 
 3025    LiftoffRegister value =
 
 3029    __ PushRegister(
kind, value);
 
 3032  void GlobalSet(FullDecoder* decoder, 
const Value&,
 
 3033                 const GlobalIndexImmediate& imm) {
 
 3034    auto* global = &
env_->module->globals[imm.index];
 
 3036    if (!CheckSupportedType(decoder, 
kind, 
"global")) {
 
 3041      if (global->mutability && global->imported) {
 
 3042        LiftoffRegList pinned;
 
 3043        Register value = pinned.set(
__ PopToRegister(pinned)).gp();
 
 3046        GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
 
 3048        __ StoreTaggedPointer(base, 
offset, 0, value, pinned);
 
 3052      LiftoffRegList pinned;
 
 3054          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 3057      Register value = pinned.set(
__ PopToRegister(pinned)).gp();
 
 3058      __ StoreTaggedPointer(globals_buffer, 
no_reg,
 
 3060                                imm.global->offset),
 
 3064    LiftoffRegList pinned;
 
 3066    Register addr = GetGlobalBaseAndOffset(global, &pinned, &
offset);
 
 3067    LiftoffRegister 
reg = pinned.set(
__ PopToRegister(pinned));
 
 3072  void TableGet(FullDecoder* decoder, 
const Value&, Value*,
 
 3073                const TableIndexImmediate& imm) {
 
 3075    LiftoffRegList pinned;
 
 3076    VarState table_index{
kI32, 
static_cast<int>(imm.index), 0};
 
 3079    VarState index = PopIndexToVarState(&index_high_word, &pinned);
 
 3081    CheckHighWordEmptyForTableType(decoder, index_high_word, &pinned);
 
 3083    ValueType type = imm.table->type;
 
 3086        is_funcref ? Builtin::kWasmTableGetFuncRef : Builtin::kWasmTableGet;
 
 3088    CallBuiltin(stub, MakeSig::Returns(type.kind()).Params(
kI32, kIntPtrKind),
 
 3089                {table_index, index}, decoder->position());
 
 3091    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 3096  void TableSet(FullDecoder* decoder, 
const Value&, 
const Value&,
 
 3097                const TableIndexImmediate& imm) {
 
 3099    LiftoffRegList pinned;
 
 3100    VarState table_index{
kI32, 
static_cast<int>(imm.index), 0};
 
 3103    if (value.is_reg()) pinned.set(value.reg());
 
 3105    VarState index = PopIndexToVarState(&index_high_word, &pinned);
 
 3107    CheckHighWordEmptyForTableType(decoder, index_high_word, &pinned);
 
 3112        is_funcref ? Builtin::kWasmTableSetFuncRef : Builtin::kWasmTableSet;
 
 3115                {table_index, extract_shared_part, 
index, value},
 
 3116                decoder->position());
 
 3118    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 3123#define RUNTIME_STUB_FOR_TRAP(trap_reason) \ 
 3124  case k##trap_reason:                     \ 
 3125    return Builtin::kThrowWasm##trap_reason; 
 3128#undef RUNTIME_STUB_FOR_TRAP 
 3134  void Trap(FullDecoder* decoder, 
TrapReason reason) {
 
 3136        AddOutOfLineTrap(decoder, GetBuiltinForTrapReason(reason));
 
 3137    __ emit_jump(
trap.label());
 
 3138    __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
 
 3141  void AssertNullTypecheckImpl(FullDecoder* decoder, 
const Value& arg,
 
 3143    LiftoffRegList pinned;
 
 3144    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 3145    LiftoffRegister 
null = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 3146    LoadNullValueForCompare(
null.gp(), pinned, arg.type);
 
 3149          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapIllegalCast);
 
 3156  void AssertNullTypecheck(FullDecoder* decoder, 
const Value& arg,
 
 3161  void AssertNotNullTypecheck(FullDecoder* decoder, 
const Value& arg,
 
 3166  void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
 
 3170  void Select(FullDecoder* decoder, 
const Value& cond, 
const Value& fval,
 
 3171              const Value& tval, Value* 
result) {
 
 3172    LiftoffRegList pinned;
 
 3175    DCHECK(CompatibleStackSlotTypes(
 
 3176        kind, 
__ cache_state()->stack_state.end()[-2].kind()));
 
 3177    LiftoffRegister false_value = pinned.set(
__ PopToRegister(pinned));
 
 3178    LiftoffRegister true_value = 
__ PopToRegister(pinned);
 
 3179    LiftoffRegister dst = 
__ GetUnusedRegister(true_value.reg_class(),
 
 3180                                               {true_value, false_value}, {});
 
 3187      if (dst != true_value) 
__ Move(dst, true_value, 
kind);
 
 3188      __ emit_jump(&cont);
 
 3190      __ bind(&case_false);
 
 3191      if (dst != false_value) 
__ Move(dst, false_value, 
kind);
 
 3194    __ PushRegister(
kind, dst);
 
 3197  void BrImpl(FullDecoder* decoder, Control* target) {
 
 3198    if (dynamic_tiering()) {
 
 3199      if (target->is_loop()) {
 
 3200        DCHECK(target->label.get()->is_bound());
 
 3201        int jump_distance = 
__ pc_offset() - target->label.get()->pos();
 
 3202        TierupCheck(decoder, decoder->position(), jump_distance);
 
 3210    if (target->br_merge()->reached) {
 
 3211      __ MergeStackWith(target->label_state, target->br_merge()->arity,
 
 3215      target->label_state =
 
 3216          __ MergeIntoNewState(
__ num_locals(), target->br_merge()->arity,
 
 3217                               target->stack_depth + target->num_exceptions);
 
 3219    __ jmp(target->label.get());
 
 3222  bool NeedsTierupCheck(FullDecoder* decoder, uint32_t br_depth) {
 
 3223    if (!dynamic_tiering()) 
return false;
 
 3224    return br_depth == decoder->control_depth() - 1 ||
 
 3225           decoder->control_at(br_depth)->is_loop();
 
 3228  void BrOrRet(FullDecoder* decoder, uint32_t depth) {
 
 3229    if (depth == decoder->control_depth() - 1) {
 
 3230      ReturnImpl(decoder);
 
 3232      BrImpl(decoder, decoder->control_at(depth));
 
 3236  void BrIf(FullDecoder* decoder, 
const Value& , uint32_t depth) {
 
 3238    if (depth != decoder->control_depth() - 1) {
 
 3239      __ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity, {});
 
 3245    std::optional<FreezeCacheState> frozen;
 
 3246    JumpIfFalse(decoder, &cont_false, frozen);
 
 3248    BrOrRet(decoder, depth);
 
 3250    __ bind(&cont_false);
 
 3255  void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
 
 3256                      ZoneMap<uint32_t, MovableLabel>* br_targets) {
 
 3257    auto [iterator, is_new_target] = br_targets->emplace(br_depth, zone_);
 
 3258    Label* 
label = iterator->second.get();
 
 3260    if (is_new_target) {
 
 3262      BrOrRet(decoder, br_depth);
 
 3270  void GenerateBrTable(FullDecoder* decoder, LiftoffRegister value,
 
 3271                       uint32_t min, uint32_t max,
 
 3272                       BranchTableIterator<ValidationTag>* table_iterator,
 
 3273                       ZoneMap<uint32_t, MovableLabel>* br_targets,
 
 3274                       const FreezeCacheState& frozen) {
 
 3277    if (max == min + 1) {
 
 3278      DCHECK_EQ(min, table_iterator->cur_index());
 
 3279      GenerateBrCase(decoder, table_iterator->next(), br_targets);
 
 3283    uint32_t 
split = min + (max - min) / 2;
 
 3288    GenerateBrTable(decoder, value, min, split, table_iterator, br_targets,
 
 3290    __ bind(&upper_half);
 
 3292    if (did_bailout()) 
return;
 
 3294    GenerateBrTable(decoder, value, split, max, table_iterator, br_targets,
 
 3298  void BrTable(FullDecoder* decoder, 
const BranchTableImmediate& imm,
 
 3300    LiftoffRegList pinned;
 
 3301    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 3306      auto [sample_depth, unused_length] =
 
 3307          decoder->read_u32v<Decoder::NoValidationTag>(imm.table,
 
 3309      __ PrepareForBranch(decoder->control_at(sample_depth)->br_merge()->arity,
 
 3313    BranchTableIterator<ValidationTag> table_iterator{decoder, imm};
 
 3314    ZoneMap<uint32_t, MovableLabel> br_targets{
zone_};
 
 3316    if (imm.table_count > 0) {
 
 3320                             value.gp(), imm.table_count, frozen);
 
 3322      GenerateBrTable(decoder, value, 0, imm.table_count, &table_iterator,
 
 3323                      &br_targets, frozen);
 
 3325      __ bind(&case_default);
 
 3327      if (did_bailout()) 
return;
 
 3331    GenerateBrCase(decoder, table_iterator.next(), &br_targets);
 
 3332    DCHECK(!table_iterator.has_next());
 
 3335  void Else(FullDecoder* decoder, Control* c) {
 
 3336    if (c->reachable()) {
 
 3337      if (c->end_merge.reached) {
 
 3338        __ MergeFullStackWith(c->label_state);
 
 3341            __ MergeIntoNewState(
__ num_locals(), c->end_merge.arity,
 
 3342                                 c->stack_depth + c->num_exceptions);
 
 3344      __ emit_jump(c->label.get());
 
 3346    __ bind(c->else_state->label.get());
 
 3347    __ cache_state()->Steal(c->else_state->state);
 
 3350  SpilledRegistersForInspection* GetSpilledRegistersForInspection() {
 
 3354    auto* spilled = 
zone_->New<SpilledRegistersForInspection>(
zone_);
 
 3355    for (uint32_t 
i = 0, e = 
__ cache_state()->stack_height(); 
i < e; ++
i) {
 
 3356      auto& slot = 
__ cache_state()->stack_state[
i];
 
 3357      if (!slot.is_reg()) 
continue;
 
 3358      spilled->entries.push_back(SpilledRegistersForInspection::Entry{
 
 3359          slot.offset(), slot.reg(), slot.kind()});
 
 3360      __ RecordUsedSpillOffset(slot.offset());
 
 3366  Label* AddOutOfLineTrapDeprecated(FullDecoder* decoder, 
Builtin builtin) {
 
 3367    return AddOutOfLineTrap(decoder, builtin).label();
 
 3370  class OolTrapLabel {
 
 3372    OolTrapLabel(LiftoffAssembler& assembler, Label* 
label)
 
 3376    const FreezeCacheState& frozen()
 const { 
return freeze_; }
 
 3383  OolTrapLabel AddOutOfLineTrap(FullDecoder* decoder, 
Builtin builtin) {
 
 3393      __ cache_state()->GetTaggedSlotsForOOLCode(
 
 3398        zone_, builtin, decoder->position(),
 
 3399        V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
 
 3405  enum ForceCheck : 
bool { kDoForceCheck = 
true, kDontForceCheck = 
false };
 
 3407    kCheckAlignment = 
true,
 
 3408    kDontCheckAlignment = 
false 
 3415  Register BoundsCheckMem(FullDecoder* decoder, 
const WasmMemory* memory,
 
 3416                          uint32_t access_size, uint64_t 
offset,
 
 3417                          LiftoffRegister index, LiftoffRegList pinned,
 
 3418                          ForceCheck force_check,
 
 3419                          AlignmentCheck check_alignment) {
 
 3422                                       memory->max_memory_size));
 
 3432    if (check_alignment) {
 
 3433      AlignmentCheckMem(decoder, access_size, 
offset, index_ptrsize,
 
 3434                        pinned | LiftoffRegList{index});
 
 3439      return index_ptrsize;
 
 3443    uintptr_t end_offset = 
offset + access_size - 1u;
 
 3444    DCHECK_LT(end_offset, memory->max_memory_size);
 
 3447    bool use_trap_handler = !force_check && bounds_checks == 
kTrapHandler;
 
 3450        memory->is_memory64() && !
v8_flags.wasm_memory64_trap_handling,
 
 3452#if V8_TRAP_HANDLER_SUPPORTED 
 3453    if (use_trap_handler) {
 
 3454#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 
 3455      if (memory->is_memory64()) {
 
 3458            AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 3467      CHECK(!memory->is_memory64());
 
 3473      return index_ptrsize;
 
 3476    CHECK(!use_trap_handler);
 
 3481    pinned.set(index_ptrsize);
 
 3484    if (!memory->is_memory64()) {
 
 3485      __ emit_u32_to_uintptr(index_ptrsize, index_ptrsize);
 
 3489          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 3497    LiftoffRegister end_offset_reg =
 
 3498        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 3499    LiftoffRegister mem_size = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 3502    if (memory->index == 0) {
 
 3511      __ LoadFullPointer(mem_size.gp(), mem_size.gp(), buffer_offset);
 
 3516        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 3523    if (end_offset > memory->min_memory_size) {
 
 3525                        end_offset_reg.gp(), mem_size.gp(), 
trap.frozen());
 
 3530    LiftoffRegister effective_size_reg = end_offset_reg;
 
 3531    __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
 
 3532                        end_offset_reg.gp());
 
 3535                      index_ptrsize, effective_size_reg.gp(), 
trap.frozen());
 
 3536    return index_ptrsize;
 
 3539  void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
 
 3540                         uintptr_t 
offset, Register index,
 
 3541                         LiftoffRegList pinned) {
 
 3544    if (access_size == 1) 
return;
 
 3548        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapUnalignedAccess);
 
 3550    const uint32_t align_mask = access_size - 1;
 
 3551    if ((
offset & align_mask) == 0) {
 
 3558      __ emit_i32_andi(address, index, align_mask);
 
 3563      __ emit_i32_addi(address, index, 
static_cast<uint32_t
>(
offset));
 
 3564      __ emit_i32_andi(address, address, align_mask);
 
 3571                            Register index, uintptr_t 
offset,
 
 3574    __ SpillAllRegisters();
 
 3576    LiftoffRegList pinned;
 
 3577    if (index != 
no_reg) pinned.set(index);
 
 3579    LiftoffRegister effective_offset =
 
 3580        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 3582    const WasmMemory* memory = 
env_->module->memories.data();
 
 3584      __ LoadConstant(effective_offset,
 
 3585                      WasmValue(
static_cast<uint64_t
>(
offset)));
 
 3587        __ emit_i64_add(effective_offset, effective_offset,
 
 3588                        LiftoffRegister(index));
 
 3594      __ LoadConstant(effective_offset,
 
 3595                      WasmValue(
static_cast<uint32_t
>(
offset)));
 
 3597        __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
 
 3602    LiftoffRegister info = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 3604    __ AllocateStackSlot(info.gp(), 
sizeof(MemoryTracingInfo));
 
 3608    LiftoffRegister data = effective_offset;
 
 3613      CHECK(
__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
 
 3617        info.gp(), 
no_reg, offsetof(MemoryTracingInfo, 
offset), data,
 
 3620    __ LoadConstant(data, WasmValue(is_store ? 1 : 0));
 
 3621    __ Store(info.gp(), 
no_reg, offsetof(MemoryTracingInfo, is_store), data,
 
 3622             StoreType::kI32Store8, pinned);
 
 3623    __ LoadConstant(data, WasmValue(
static_cast<int>(rep)));
 
 3624    __ Store(info.gp(), 
no_reg, offsetof(MemoryTracingInfo, mem_rep), data,
 
 3625             StoreType::kI32Store8, pinned);
 
 3627    WasmTraceMemoryDescriptor descriptor;
 
 3628    DCHECK_EQ(0, descriptor.GetStackParameterCount());
 
 3629    DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
 
 3630    Register param_reg = descriptor.GetRegisterParameter(0);
 
 3631    if (info.gp() != param_reg) {
 
 3632      __ Move(param_reg, info.gp(), kIntPtrKind);
 
 3637    __ CallBuiltin(Builtin::kWasmTraceMemory);
 
 3640    __ DeallocateStackSlot(
sizeof(MemoryTracingInfo));
 
 3643  bool IndexStaticallyInBounds(
const WasmMemory* memory,
 
 3644                               const VarState& index_slot, 
int access_size,
 
 3646    if (!index_slot.is_const()) 
return false;
 
 3650    const uintptr_t index =
 
 3651        memory->is_memory64()
 
 3652            ? 
static_cast<uintptr_t
>(intptr_t{index_slot.i32_const()})
 
 3653            : uintptr_t{static_cast<uint32_t>(index_slot.i32_const())};
 
 3654    const uintptr_t effective_offset = index + *
offset;
 
 3656    if (effective_offset < index  
 
 3658                                        memory->min_memory_size)) {
 
 3662    *
offset = effective_offset;
 
 3666  bool IndexStaticallyInBoundsAndAligned(
const WasmMemory* memory,
 
 3668                                         int access_size, uintptr_t* 
offset) {
 
 3669    uintptr_t new_offset = *
offset;
 
 3670    if (IndexStaticallyInBounds(memory, index_slot, access_size, &new_offset) &&
 
 3679    if (memory_index == 
__ cache_state()->cached_mem_index) {
 
 3680      Register memory_start = 
__ cache_state()->cached_mem_start;
 
 3682      return memory_start;
 
 3684    return GetMemoryStart_Slow(memory_index, pinned);
 
 3688  GetMemoryStart_Slow(
int memory_index, LiftoffRegList pinned) {
 
 3691    DCHECK_NE(memory_index, 
__ cache_state()->cached_mem_index);
 
 3692    __ cache_state()->ClearCachedMemStartRegister();
 
 3695    if (memory_index == 0) {
 
 3703      __ LoadFullPointer(memory_start, memory_start, buffer_offset);
 
 3705    __ cache_state()->SetMemStartCacheRegister(memory_start, memory_index);
 
 3706    return memory_start;
 
 3709  void LoadMem(FullDecoder* decoder, LoadType type,
 
 3710               const MemoryAccessImmediate& imm, 
const Value& index_val,
 
 3713    bool needs_f16_to_f32_conv = 
false;
 
 3714    if (type.value() == LoadType::kF32LoadF16 &&
 
 3715        !
asm_.supports_f16_mem_access()) {
 
 3716      needs_f16_to_f32_conv = 
true;
 
 3717      type = LoadType::kI32Load16U;
 
 3720    if (!CheckSupportedType(decoder, 
kind, 
"load")) 
return;
 
 3722    uintptr_t 
offset = imm.offset;
 
 3728    auto& index_slot = 
__ cache_state()->stack_state.back();
 
 3729    DCHECK_EQ(index_val.type.kind(), index_slot.kind());
 
 3730    bool i64_offset = imm.memory->is_memory64();
 
 3732    if (IndexStaticallyInBounds(imm.memory, index_slot, type.size(), &
offset)) {
 
 3733      __ cache_state()->stack_state.pop_back();
 
 3735      LiftoffRegList pinned;
 
 3736      Register mem = pinned.set(GetMemoryStart(imm.memory->index, pinned));
 
 3737      LiftoffRegister value = pinned.set(
__ GetUnusedRegister(rc, pinned));
 
 3739      if (needs_f16_to_f32_conv) {
 
 3740        LiftoffRegister dst = 
__ GetUnusedRegister(
kFpReg, {});
 
 3741        auto conv_ref = ExternalReference::wasm_float16_to_float32();
 
 3742        GenerateCCallWithStackBuffer(&dst, kVoid, 
kF32,
 
 3744        __ PushRegister(
kF32, dst);
 
 3746        __ PushRegister(
kind, value);
 
 3749      LiftoffRegister full_index = 
__ PopToRegister();
 
 3751          BoundsCheckMem(decoder, imm.memory, type.size(), 
offset, full_index,
 
 3752                         {}, kDontForceCheck, kDontCheckAlignment);
 
 3755      LiftoffRegList pinned{index};
 
 3759      Register mem = pinned.set(GetMemoryStart(imm.memory->index, pinned));
 
 3760      LiftoffRegister value = pinned.set(
__ GetUnusedRegister(rc, pinned));
 
 3762      uint32_t protected_load_pc = 0;
 
 3763      __ Load(value, mem, index, 
offset, type, &protected_load_pc, 
true,
 
 3766        RegisterProtectedInstruction(decoder, protected_load_pc);
 
 3768      if (needs_f16_to_f32_conv) {
 
 3769        LiftoffRegister dst = 
__ GetUnusedRegister(
kFpReg, {});
 
 3770        auto conv_ref = ExternalReference::wasm_float16_to_float32();
 
 3771        GenerateCCallWithStackBuffer(&dst, kVoid, 
kF32,
 
 3773        __ PushRegister(
kF32, dst);
 
 3775        __ PushRegister(
kind, value);
 
 3782      TraceMemoryOperation(
false, type.mem_type().representation(), index,
 
 3783                           offset, decoder->position());
 
 3787  void LoadTransform(FullDecoder* decoder, LoadType type,
 
 3789                     const MemoryAccessImmediate& imm, 
const Value& index_val,
 
 3791    CHECK(CheckSupportedType(decoder, 
kS128, 
"LoadTransform"));
 
 3793    LiftoffRegister full_index = 
__ PopToRegister();
 
 3797    uint32_t access_size =
 
 3800        BoundsCheckMem(decoder, imm.memory, access_size, imm.offset, full_index,
 
 3801                       {}, kDontForceCheck, kDontCheckAlignment);
 
 3803    uintptr_t 
offset = imm.offset;
 
 3804    LiftoffRegList pinned{index};
 
 3806    Register addr = GetMemoryStart(imm.mem_index, pinned);
 
 3808    uint32_t protected_load_pc = 0;
 
 3809    bool i64_offset = imm.memory->is_memory64();
 
 3810    __ LoadTransform(value, addr, index, 
offset, type, transform,
 
 3811                     &protected_load_pc, i64_offset);
 
 3815          trap_handler::ProtectedInstructionData{protected_load_pc});
 
 3817          protected_load_pc, SourcePosition(decoder->position()), 
true);
 
 3818      if (for_debugging_) {
 
 3819        DefineSafepoint(protected_load_pc);
 
 3822    __ PushRegister(
kS128, value);
 
 3831              : type.mem_type().representation();
 
 3832      TraceMemoryOperation(
false, mem_rep, index, 
offset, decoder->position());
 
 3836  void LoadLane(FullDecoder* decoder, LoadType type, 
const Value& _value,
 
 3837                const Value& _index, 
const MemoryAccessImmediate& imm,
 
 3838                const uint8_t laneidx, Value* _result) {
 
 3839    if (!CheckSupportedType(decoder, 
kS128, 
"LoadLane")) {
 
 3843    LiftoffRegList pinned;
 
 3844    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 3845    LiftoffRegister full_index = 
__ PopToRegister();
 
 3847        BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset, full_index,
 
 3848                       pinned, kDontForceCheck, kDontCheckAlignment);
 
 3850    bool i64_offset = imm.memory->is_memory64();
 
 3853    uintptr_t 
offset = imm.offset;
 
 3856    Register addr = GetMemoryStart(imm.mem_index, pinned);
 
 3858    uint32_t protected_load_pc = 0;
 
 3860                &protected_load_pc, i64_offset);
 
 3863          trap_handler::ProtectedInstructionData{protected_load_pc});
 
 3865          protected_load_pc, SourcePosition(decoder->position()), 
true);
 
 3866      if (for_debugging_) {
 
 3867        DefineSafepoint(protected_load_pc);
 
 3876      TraceMemoryOperation(
false, type.mem_type().representation(), index,
 
 3877                           offset, decoder->position());
 
 3881  void StoreMem(FullDecoder* decoder, StoreType type,
 
 3882                const MemoryAccessImmediate& imm, 
const Value& index_val,
 
 3883                const Value& value_val) {
 
 3886    if (!CheckSupportedType(decoder, 
kind, 
"store")) 
return;
 
 3888    LiftoffRegList pinned;
 
 3889    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 3891    if (type.value() == StoreType::kF32StoreF16 &&
 
 3892        !
asm_.supports_f16_mem_access()) {
 
 3893      type = StoreType::kI32Store16;
 
 3896      LiftoffRegister i16 = pinned.set(
__ GetUnusedRegister(
kGpReg, {}));
 
 3897      auto conv_ref = ExternalReference::wasm_float32_to_float16();
 
 3898      GenerateCCallWithStackBuffer(&i16, kVoid, 
kI16,
 
 3903    uintptr_t 
offset = imm.offset;
 
 3906    auto& index_slot = 
__ cache_state()->stack_state.back();
 
 3907    DCHECK_EQ(index_val.type.kind(), index_slot.kind());
 
 3908    bool i64_offset = imm.memory->is_memory64();
 
 3910    if (IndexStaticallyInBounds(imm.memory, index_slot, type.size(), &
offset)) {
 
 3911      __ cache_state()->stack_state.pop_back();
 
 3913      Register mem = pinned.set(GetMemoryStart(imm.memory->index, pinned));
 
 3917      LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 3922          BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset,
 
 3923                         full_index, pinned, force_check, kDontCheckAlignment);
 
 3927      uint32_t protected_store_pc = 0;
 
 3930      Register mem = pinned.set(GetMemoryStart(imm.memory->index, pinned));
 
 3931      LiftoffRegList outer_pinned;
 
 3934               &protected_store_pc, 
true, i64_offset);
 
 3936        RegisterProtectedInstruction(decoder, protected_store_pc);
 
 3943      TraceMemoryOperation(
true, type.mem_rep(), index, 
offset,
 
 3944                           decoder->position());
 
 3948  void StoreLane(FullDecoder* decoder, StoreType type,
 
 3949                 const MemoryAccessImmediate& imm, 
const Value& _index,
 
 3950                 const Value& _value, 
const uint8_t lane) {
 
 3951    if (!CheckSupportedType(decoder, 
kS128, 
"StoreLane")) 
return;
 
 3952    LiftoffRegList pinned;
 
 3953    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 3954    LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 3959        BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset, full_index,
 
 3960                       pinned, force_check, kDontCheckAlignment);
 
 3962    bool i64_offset = imm.memory->is_memory64();
 
 3965    uintptr_t 
offset = imm.offset;
 
 3968    Register addr = pinned.set(GetMemoryStart(imm.mem_index, pinned));
 
 3969    uint32_t protected_store_pc = 0;
 
 3970    __ StoreLane(addr, index, 
offset, value, type, lane, &protected_store_pc,
 
 3974          trap_handler::ProtectedInstructionData{protected_store_pc});
 
 3976          protected_store_pc, SourcePosition(decoder->position()), 
true);
 
 3977      if (for_debugging_) {
 
 3978        DefineSafepoint(protected_store_pc);
 
 3984      TraceMemoryOperation(
true, type.mem_rep(), index, 
offset,
 
 3985                           decoder->position());
 
 3989  void CurrentMemoryPages(FullDecoder* ,
 
 3990                          const MemoryIndexImmediate& imm,
 
 3992    LiftoffRegList pinned;
 
 3993    LiftoffRegister mem_size = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 3994    if (imm.memory->index == 0) {
 
 4003      __ LoadFullPointer(mem_size.gp(), mem_size.gp(), buffer_offset);
 
 4008      LiftoffRegister high_word =
 
 4009          __ GetUnusedRegister(
kGpReg, LiftoffRegList{mem_size});
 
 4011      __ LoadConstant(high_word, WasmValue{uint32_t{0}});
 
 4014    __ PushRegister(imm.memory->is_memory64() ? 
kI64 : 
kI32, mem_size);
 
 4017  void MemoryGrow(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 4018                  const Value& value, Value* result_val) {
 
 4020    LiftoffRegList pinned;
 
 4021    LiftoffRegister num_pages = pinned.set(
__ PopToRegister());
 
 4022    __ SpillAllRegisters();
 
 4024    LiftoffRegister 
result = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 4028    if (imm.memory->is_memory64()) {
 
 4031      __ LoadConstant(
result, WasmValue(int32_t{-1}));
 
 4035                          all_spilled_anyway);
 
 4036        num_pages = num_pages.low();
 
 4038        LiftoffRegister high_word = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 4039        __ emit_i64_shri(high_word, num_pages, 32);
 
 4042                          all_spilled_anyway);
 
 4046    WasmMemoryGrowDescriptor descriptor;
 
 4047    DCHECK_EQ(0, descriptor.GetStackParameterCount());
 
 4048    DCHECK_EQ(2, descriptor.GetRegisterParameterCount());
 
 4052    Register num_pages_param_reg = descriptor.GetRegisterParameter(1);
 
 4053    if (num_pages.gp() != num_pages_param_reg) {
 
 4054      __ Move(num_pages_param_reg, num_pages.gp(), 
kI32);
 
 4059    Register mem_index_param_reg = descriptor.GetRegisterParameter(0);
 
 4060    __ LoadConstant(LiftoffRegister{mem_index_param_reg},
 
 4061                    WasmValue(imm.memory->index));
 
 4063    __ CallBuiltin(Builtin::kWasmMemoryGrow);
 
 4065    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 4078    if (imm.memory->is_memory64()) {
 
 4079      LiftoffRegister result64 = 
result;
 
 4081      __ emit_type_conversion(kExprI64SConvertI32, result64, 
result, 
nullptr);
 
 4082      __ PushRegister(
kI64, result64);
 
 4088  base::OwnedVector<ValueType> GetStackValueTypesForDebugging(
 
 4089      FullDecoder* decoder) {
 
 4091    auto stack_value_types =
 
 4096      type = decoder->stack_value(++depth)->type;
 
 4098    return stack_value_types;
 
 4101  base::OwnedVector<DebugSideTable::Entry::Value>
 
 4102  GetCurrentDebugSideTableEntries(
 
 4103      FullDecoder* decoder,
 
 4104      DebugSideTableBuilder::AssumeSpilling assume_spilling) {
 
 4105    auto& stack_state = 
__ cache_state()->stack_state;
 
 4113    size_t expected_value_stack_size =
 
 4121            stack_state.size());
 
 4127    for (
int j = decoder->control_depth() - 1; j >= 0; j--) {
 
 4128      Control* control = decoder->control_at(j);
 
 4129      Control* next_control = j > 0 ? decoder->control_at(j - 1) : 
nullptr;
 
 4130      int end_index = next_control
 
 4131                          ? next_control->stack_depth + 
__ num_locals() +
 
 4132                                next_control->num_exceptions
 
 4133                          : 
__ cache_state()->stack_height();
 
 4134      bool exception_on_stack =
 
 4135          control->is_try_catch() || control->is_try_catchall();
 
 4136      for (; index < end_index; ++
index) {
 
 4137        const LiftoffVarState& slot = stack_state[
index];
 
 4138        DebugSideTable::Entry::Value& value = values[
index];
 
 4139        value.module = decoder->module_;
 
 4140        value.index = 
index;
 
 4141        if (exception_on_stack) {
 
 4143          exception_on_stack = 
false;
 
 4144        } 
else if (index < 
static_cast<int>(
__ num_locals())) {
 
 4145          value.type = decoder->local_type(index);
 
 4149          value.type = *stack_value_type_ptr++;
 
 4151        DCHECK(CompatibleStackSlotTypes(slot.kind(), value.type.kind()));
 
 4152        switch (slot.loc()) {
 
 4155            value.i32_const = slot.i32_const();
 
 4158            DCHECK_NE(DebugSideTableBuilder::kDidSpill, assume_spilling);
 
 4159            if (assume_spilling == DebugSideTableBuilder::kAllowRegisters) {
 
 4161              value.reg_code = slot.reg().liftoff_code();
 
 4164            DCHECK_EQ(DebugSideTableBuilder::kAssumeSpilling, assume_spilling);
 
 4168            value.stack_offset = slot.offset();
 
 4176        stack_value_type_ptr);
 
 4182  void RegisterDebugSideTableEntry(
 
 4183      FullDecoder* decoder,
 
 4184      DebugSideTableBuilder::AssumeSpilling assume_spilling) {
 
 4185    if (
V8_LIKELY(!debug_sidetable_builder_)) 
return;
 
 4188        GetCurrentDebugSideTableEntries(decoder, assume_spilling).as_vector());
 
 4191  DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry(
 
 4192      FullDecoder* decoder) {
 
 4193    if (
V8_LIKELY(!debug_sidetable_builder_)) 
return nullptr;
 
 4195        GetCurrentDebugSideTableEntries(decoder,
 
 4196                                        DebugSideTableBuilder::kAssumeSpilling)
 
 4200  void CallDirect(FullDecoder* decoder, 
const CallFunctionImmediate& imm,
 
 4201                  const Value 
args[], Value[]) {
 
 4205  void CallIndirect(FullDecoder* decoder, 
const Value& index_val,
 
 4206                    const CallIndirectImmediate& imm, 
const Value 
args[],
 
 4211  void CallRef(FullDecoder* decoder, 
const Value& func_ref,
 
 4216  void ReturnCall(FullDecoder* decoder, 
const CallFunctionImmediate& imm,
 
 4217                  const Value 
args[]) {
 
 4218    TierupCheckOnTailCall(decoder);
 
 4222  void ReturnCallIndirect(FullDecoder* decoder, 
const Value& index_val,
 
 4223                          const CallIndirectImmediate& imm,
 
 4224                          const Value 
args[]) {
 
 4225    TierupCheckOnTailCall(decoder);
 
 4229  void ReturnCallRef(FullDecoder* decoder, 
const Value& func_ref,
 
 4231    TierupCheckOnTailCall(decoder);
 
 4235  void BrOnNull(FullDecoder* decoder, 
const Value& ref_object, uint32_t depth,
 
 4236                bool pass_null_along_branch,
 
 4239    if (depth != decoder->control_depth() - 1) {
 
 4240      __ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity, {});
 
 4244    LiftoffRegList pinned;
 
 4245    LiftoffRegister ref =
 
 4246        pinned.set(pass_null_along_branch ? 
__ PeekToRegister(0, pinned)
 
 4247                                          : 
__ PopToRegister(pinned));
 
 4249    LoadNullValueForCompare(
null, pinned, ref_object.type);
 
 4252      __ emit_cond_jump(
kNotEqual, &cont_false, ref_object.type.kind(),
 
 4253                        ref.gp(), 
null, frozen);
 
 4254      BrOrRet(decoder, depth);
 
 4256    __ bind(&cont_false);
 
 4257    if (!pass_null_along_branch) {
 
 4259      __ PushRegister(
kRef, ref);
 
 4263  void BrOnNonNull(FullDecoder* decoder, 
const Value& ref_object,
 
 4264                   Value* , uint32_t depth,
 
 4265                   bool drop_null_on_fallthrough) {
 
 4267    if (depth != decoder->control_depth() - 1) {
 
 4268      __ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity, {});
 
 4272    LiftoffRegList pinned;
 
 4273    LiftoffRegister ref = pinned.set(
__ PeekToRegister(0, pinned));
 
 4276    LoadNullValueForCompare(
null, pinned, ref_object.type);
 
 4279      __ emit_cond_jump(
kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
 
 4282      BrOrRet(decoder, depth);
 
 4285    if (drop_null_on_fallthrough) 
__ DropValues(1);
 
 4286    __ bind(&cont_false);
 
 4290            ValueKind result_lane_kind = kVoid, 
typename EmitFn,
 
 4291            typename... ExtraArgs>
 
 4292  void EmitTerOp(EmitFn 
fn, LiftoffRegister dst, LiftoffRegister src1,
 
 4293                 LiftoffRegister src2, LiftoffRegister src3,
 
 4294                 ExtraArgs... extra_args) {
 
 4295    CallEmitFn(
fn, dst, src1, src2, src3, extra_args...);
 
 4297      LiftoffRegList pinned{dst};
 
 4298      if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
 
 4299        CheckNan(dst, pinned, result_kind);
 
 4300      } 
else if (result_kind == ValueKind::kS128 &&
 
 4301                 (result_lane_kind == 
kF32 || result_lane_kind == 
kF64)) {
 
 4302        CheckS128Nan(dst, LiftoffRegList{src1, src2, src3, dst},
 
 4306    __ PushRegister(result_kind, dst);
 
 4310            ValueKind result_lane_kind = kVoid, 
typename EmitFn>
 
 4311  void EmitTerOp(EmitFn 
fn) {
 
 4312    LiftoffRegister src3 = 
__ PopToRegister();
 
 4313    LiftoffRegister src2 = 
__ PopToRegister(LiftoffRegList{src3});
 
 4314    LiftoffRegister src1 = 
__ PopToRegister(LiftoffRegList{src3, src2});
 
 4320    LiftoffRegister dst =
 
 4321        (src2 == src3 || src1 == src3)
 
 4322            ? 
__ GetUnusedRegister(result_rc, LiftoffRegList{src1, src2})
 
 4323            : 
__ GetUnusedRegister(result_rc, {src3},
 
 4324                                   LiftoffRegList{src1, src2});
 
 4325    EmitTerOp<src_kind, result_kind, result_lane_kind, EmitFn>(
fn, dst, src1,
 
 4329  void EmitRelaxedLaneSelect(
int lane_width) {
 
 4330    DCHECK(lane_width == 8 || lane_width == 32 || lane_width == 64);
 
 4331#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) 
 4333#if defined(V8_TARGET_ARCH_IA32) 
 4335      LiftoffRegister 
mask = LiftoffRegister::from_uncached(xmm0);
 
 4337      LiftoffRegister 
mask(xmm0);
 
 4339      __ PopToFixedRegister(
mask);
 
 4340      LiftoffRegister src2 = 
__ PopToModifiableRegister(LiftoffRegList{
mask});
 
 4341      LiftoffRegister src1 = 
__ PopToRegister(LiftoffRegList{src2, 
mask});
 
 4343                              src2, src1, src2, 
mask, lane_width);
 
 4347    LiftoffRegList pinned;
 
 4348    LiftoffRegister 
mask = pinned.set(
__ PopToRegister(pinned));
 
 4349    LiftoffRegister src2 = pinned.set(
__ PopToRegister(pinned));
 
 4350    LiftoffRegister src1 = pinned.set(
__ PopToRegister(pinned));
 
 4351    LiftoffRegister dst =
 
 4354                            dst, src1, src2, 
mask, lane_width);
 
 4357  template <
typename EmitFn, 
typename EmitFnImm>
 
 4361    VarState rhs_slot = 
__ cache_state()->stack_state.back();
 
 4363    if (rhs_slot.is_const()) {
 
 4364      __ cache_state()->stack_state.pop_back();
 
 4365      int32_t imm = rhs_slot.i32_const();
 
 4367      LiftoffRegister operand = 
__ PopToRegister();
 
 4368      LiftoffRegister dst = 
__ GetUnusedRegister(result_rc, {operand}, {});
 
 4370      CallEmitFn(fnImm, dst, operand, imm);
 
 4373      LiftoffRegister count = 
__ PopToRegister();
 
 4374      LiftoffRegister operand = 
__ PopToRegister();
 
 4375      LiftoffRegister dst = 
__ GetUnusedRegister(result_rc, {operand}, {});
 
 4377      CallEmitFn(
fn, dst, operand, count);
 
 4382  template <ValueKind result_lane_kind>
 
 4383  void EmitSimdFloatRoundingOpWithCFallback(
 
 4384      bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
 
 4385      ExternalReference (*ext_ref)()) {
 
 4387    LiftoffRegister src = 
__ PopToRegister();
 
 4388    LiftoffRegister dst = 
__ GetUnusedRegister(rc, {src}, {});
 
 4389    if (!(
asm_.*emit_fn)(dst, src)) {
 
 4391      GenerateCCallWithStackBuffer(&dst, kVoid, 
kS128,
 
 4395      LiftoffRegList pinned{dst};
 
 4396      CheckS128Nan(dst, pinned, result_lane_kind);
 
 4401  template <ValueKind result_lane_kind, 
bool swap_lhs_rhs = false>
 
 4402  void EmitSimdFloatBinOpWithCFallback(
 
 4403      bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister,
 
 4405      ExternalReference (*ext_ref)()) {
 
 4407    LiftoffRegister src2 = 
__ PopToRegister();
 
 4408    LiftoffRegister src1 = 
__ PopToRegister(LiftoffRegList{src2});
 
 4409    LiftoffRegister dst = 
__ GetUnusedRegister(rc, {src1, src2}, {});
 
 4411    if (swap_lhs_rhs) std::swap(src1, src2);
 
 4413    if (!(
asm_.*emit_fn)(dst, src1, src2)) {
 
 4415      GenerateCCallWithStackBuffer(
 
 4420      LiftoffRegList pinned{dst};
 
 4421      CheckS128Nan(dst, pinned, result_lane_kind);
 
 4426  template <ValueKind result_lane_kind, 
typename EmitFn>
 
 4427  void EmitSimdFmaOp(EmitFn emit_fn) {
 
 4428    LiftoffRegList pinned;
 
 4429    LiftoffRegister src3 = pinned.set(
__ PopToRegister(pinned));
 
 4430    LiftoffRegister src2 = pinned.set(
__ PopToRegister(pinned));
 
 4431    LiftoffRegister src1 = pinned.set(
__ PopToRegister(pinned));
 
 4433    LiftoffRegister dst = 
__ GetUnusedRegister(dst_rc, {});
 
 4434    (
asm_.*emit_fn)(dst, src1, src2, src3);
 
 4436      LiftoffRegList pinned_inner{dst};
 
 4437      CheckS128Nan(dst, pinned_inner, result_lane_kind);
 
 4442  template <ValueKind result_lane_kind, 
typename EmitFn>
 
 4443  void EmitSimdFmaOpWithCFallback(EmitFn emit_fn,
 
 4444                                  ExternalReference (*ext_ref)()) {
 
 4445    LiftoffRegList pinned;
 
 4446    LiftoffRegister src3 = pinned.set(
__ PopToRegister(pinned));
 
 4447    LiftoffRegister src2 = pinned.set(
__ PopToRegister(pinned));
 
 4448    LiftoffRegister src1 = pinned.set(
__ PopToRegister(pinned));
 
 4450    LiftoffRegister dst = 
__ GetUnusedRegister(dst_rc, {});
 
 4451    if (!(
asm_.*emit_fn)(dst, src1, src2, src3)) {
 
 4453      GenerateCCallWithStackBuffer(
 
 4460      LiftoffRegList pinned_inner{dst};
 
 4461      CheckS128Nan(dst, pinned_inner, result_lane_kind);
 
 4466  void SimdOp(FullDecoder* decoder, 
WasmOpcode opcode, 
const Value* ,
 
 4470      case wasm::kExprI8x16Swizzle:
 
 4471        return EmitI8x16Swizzle(
false);
 
 4472      case wasm::kExprI8x16RelaxedSwizzle:
 
 4473        return EmitI8x16Swizzle(
true);
 
 4474      case wasm::kExprI8x16Popcnt:
 
 4476      case wasm::kExprI8x16Splat:
 
 4478      case wasm::kExprI16x8Splat:
 
 4480      case wasm::kExprI32x4Splat:
 
 4482      case wasm::kExprI64x2Splat:
 
 4484      case wasm::kExprF16x8Splat: {
 
 4485        auto emit_with_c_fallback = [
this](LiftoffRegister dst,
 
 4486                                           LiftoffRegister src) {
 
 4487          if (
asm_.emit_f16x8_splat(dst, src)) 
return;
 
 4488          LiftoffRegister value = 
__ GetUnusedRegister(
kGpReg, {});
 
 4489          auto conv_ref = ExternalReference::wasm_float32_to_float16();
 
 4490          GenerateCCallWithStackBuffer(&value, kVoid, 
kI16,
 
 4492          __ emit_i16x8_splat(dst, value);
 
 4494        return EmitUnOp<kF32, kS128>(emit_with_c_fallback);
 
 4496      case wasm::kExprF32x4Splat:
 
 4498      case wasm::kExprF64x2Splat:
 
 4500      case wasm::kExprI8x16Eq:
 
 4502      case wasm::kExprI8x16Ne:
 
 4504      case wasm::kExprI8x16LtS:
 
 4505        return EmitBinOp<kS128, kS128, true>(
 
 4507      case wasm::kExprI8x16LtU:
 
 4508        return EmitBinOp<kS128, kS128, true>(
 
 4510      case wasm::kExprI8x16GtS:
 
 4512      case wasm::kExprI8x16GtU:
 
 4514      case wasm::kExprI8x16LeS:
 
 4515        return EmitBinOp<kS128, kS128, true>(
 
 4517      case wasm::kExprI8x16LeU:
 
 4518        return EmitBinOp<kS128, kS128, true>(
 
 4520      case wasm::kExprI8x16GeS:
 
 4522      case wasm::kExprI8x16GeU:
 
 4524      case wasm::kExprI16x8Eq:
 
 4526      case wasm::kExprI16x8Ne:
 
 4528      case wasm::kExprI16x8LtS:
 
 4529        return EmitBinOp<kS128, kS128, true>(
 
 4531      case wasm::kExprI16x8LtU:
 
 4532        return EmitBinOp<kS128, kS128, true>(
 
 4534      case wasm::kExprI16x8GtS:
 
 4536      case wasm::kExprI16x8GtU:
 
 4538      case wasm::kExprI16x8LeS:
 
 4539        return EmitBinOp<kS128, kS128, true>(
 
 4541      case wasm::kExprI16x8LeU:
 
 4542        return EmitBinOp<kS128, kS128, true>(
 
 4544      case wasm::kExprI16x8GeS:
 
 4546      case wasm::kExprI16x8GeU:
 
 4548      case wasm::kExprI32x4Eq:
 
 4550      case wasm::kExprI32x4Ne:
 
 4552      case wasm::kExprI32x4LtS:
 
 4553        return EmitBinOp<kS128, kS128, true>(
 
 4555      case wasm::kExprI32x4LtU:
 
 4556        return EmitBinOp<kS128, kS128, true>(
 
 4558      case wasm::kExprI32x4GtS:
 
 4560      case wasm::kExprI32x4GtU:
 
 4562      case wasm::kExprI32x4LeS:
 
 4563        return EmitBinOp<kS128, kS128, true>(
 
 4565      case wasm::kExprI32x4LeU:
 
 4566        return EmitBinOp<kS128, kS128, true>(
 
 4568      case wasm::kExprI32x4GeS:
 
 4570      case wasm::kExprI32x4GeU:
 
 4572      case wasm::kExprI64x2Eq:
 
 4574      case wasm::kExprI64x2Ne:
 
 4576      case wasm::kExprI64x2LtS:
 
 4577        return EmitBinOp<kS128, kS128, true>(
 
 4579      case wasm::kExprI64x2GtS:
 
 4581      case wasm::kExprI64x2LeS:
 
 4582        return EmitBinOp<kS128, kS128, true>(
 
 4584      case wasm::kExprI64x2GeS:
 
 4586      case wasm::kExprF16x8Eq:
 
 4587        return EmitSimdFloatBinOpWithCFallback<kI16>(
 
 4589      case wasm::kExprF16x8Ne:
 
 4590        return EmitSimdFloatBinOpWithCFallback<kI16>(
 
 4592      case wasm::kExprF16x8Lt:
 
 4593        return EmitSimdFloatBinOpWithCFallback<kI16>(
 
 4595      case wasm::kExprF16x8Gt:
 
 4596        return EmitSimdFloatBinOpWithCFallback<kI16, true>(
 
 4598      case wasm::kExprF16x8Le:
 
 4599        return EmitSimdFloatBinOpWithCFallback<kI16>(
 
 4601      case wasm::kExprF16x8Ge:
 
 4602        return EmitSimdFloatBinOpWithCFallback<kI16, true>(
 
 4604      case wasm::kExprF32x4Eq:
 
 4606      case wasm::kExprF32x4Ne:
 
 4608      case wasm::kExprF32x4Lt:
 
 4610      case wasm::kExprF32x4Gt:
 
 4612      case wasm::kExprF32x4Le:
 
 4614      case wasm::kExprF32x4Ge:
 
 4616      case wasm::kExprF64x2Eq:
 
 4618      case wasm::kExprF64x2Ne:
 
 4620      case wasm::kExprF64x2Lt:
 
 4622      case wasm::kExprF64x2Gt:
 
 4624      case wasm::kExprF64x2Le:
 
 4626      case wasm::kExprF64x2Ge:
 
 4628      case wasm::kExprS128Not:
 
 4630      case wasm::kExprS128And:
 
 4632      case wasm::kExprS128Or:
 
 4634      case wasm::kExprS128Xor:
 
 4636      case wasm::kExprS128Select:
 
 4638      case wasm::kExprI8x16Neg:
 
 4640      case wasm::kExprV128AnyTrue:
 
 4642      case wasm::kExprI8x16AllTrue:
 
 4644      case wasm::kExprI8x16BitMask:
 
 4646      case wasm::kExprI8x16Shl:
 
 4649      case wasm::kExprI8x16ShrS:
 
 4652      case wasm::kExprI8x16ShrU:
 
 4655      case wasm::kExprI8x16Add:
 
 4657      case wasm::kExprI8x16AddSatS:
 
 4659      case wasm::kExprI8x16AddSatU:
 
 4661      case wasm::kExprI8x16Sub:
 
 4663      case wasm::kExprI8x16SubSatS:
 
 4665      case wasm::kExprI8x16SubSatU:
 
 4667      case wasm::kExprI8x16MinS:
 
 4669      case wasm::kExprI8x16MinU:
 
 4671      case wasm::kExprI8x16MaxS:
 
 4673      case wasm::kExprI8x16MaxU:
 
 4675      case wasm::kExprI16x8Neg:
 
 4677      case wasm::kExprI16x8AllTrue:
 
 4679      case wasm::kExprI16x8BitMask:
 
 4681      case wasm::kExprI16x8Shl:
 
 4684      case wasm::kExprI16x8ShrS:
 
 4687      case wasm::kExprI16x8ShrU:
 
 4690      case wasm::kExprI16x8Add:
 
 4692      case wasm::kExprI16x8AddSatS:
 
 4694      case wasm::kExprI16x8AddSatU:
 
 4696      case wasm::kExprI16x8Sub:
 
 4698      case wasm::kExprI16x8SubSatS:
 
 4700      case wasm::kExprI16x8SubSatU:
 
 4702      case wasm::kExprI16x8Mul:
 
 4704      case wasm::kExprI16x8MinS:
 
 4706      case wasm::kExprI16x8MinU:
 
 4708      case wasm::kExprI16x8MaxS:
 
 4710      case wasm::kExprI16x8MaxU:
 
 4712      case wasm::kExprI16x8ExtAddPairwiseI8x16S:
 
 4713        return EmitUnOp<kS128, kS128>(
 
 4715      case wasm::kExprI16x8ExtAddPairwiseI8x16U:
 
 4716        return EmitUnOp<kS128, kS128>(
 
 4718      case wasm::kExprI16x8ExtMulLowI8x16S:
 
 4719        return EmitBinOp<kS128, kS128>(
 
 4721      case wasm::kExprI16x8ExtMulLowI8x16U:
 
 4722        return EmitBinOp<kS128, kS128>(
 
 4724      case wasm::kExprI16x8ExtMulHighI8x16S:
 
 4725        return EmitBinOp<kS128, kS128>(
 
 4727      case wasm::kExprI16x8ExtMulHighI8x16U:
 
 4728        return EmitBinOp<kS128, kS128>(
 
 4730      case wasm::kExprI16x8Q15MulRSatS:
 
 4731        return EmitBinOp<kS128, kS128>(
 
 4733      case wasm::kExprI32x4Neg:
 
 4735      case wasm::kExprI32x4AllTrue:
 
 4737      case wasm::kExprI32x4BitMask:
 
 4739      case wasm::kExprI32x4Shl:
 
 4742      case wasm::kExprI32x4ShrS:
 
 4745      case wasm::kExprI32x4ShrU:
 
 4748      case wasm::kExprI32x4Add:
 
 4750      case wasm::kExprI32x4Sub:
 
 4752      case wasm::kExprI32x4Mul:
 
 4754      case wasm::kExprI32x4MinS:
 
 4756      case wasm::kExprI32x4MinU:
 
 4758      case wasm::kExprI32x4MaxS:
 
 4760      case wasm::kExprI32x4MaxU:
 
 4762      case wasm::kExprI32x4DotI16x8S:
 
 4763        return EmitBinOp<kS128, kS128>(
 
 4765      case wasm::kExprI32x4ExtAddPairwiseI16x8S:
 
 4766        return EmitUnOp<kS128, kS128>(
 
 4768      case wasm::kExprI32x4ExtAddPairwiseI16x8U:
 
 4769        return EmitUnOp<kS128, kS128>(
 
 4771      case wasm::kExprI32x4ExtMulLowI16x8S:
 
 4772        return EmitBinOp<kS128, kS128>(
 
 4774      case wasm::kExprI32x4ExtMulLowI16x8U:
 
 4775        return EmitBinOp<kS128, kS128>(
 
 4777      case wasm::kExprI32x4ExtMulHighI16x8S:
 
 4778        return EmitBinOp<kS128, kS128>(
 
 4780      case wasm::kExprI32x4ExtMulHighI16x8U:
 
 4781        return EmitBinOp<kS128, kS128>(
 
 4783      case wasm::kExprI64x2Neg:
 
 4785      case wasm::kExprI64x2AllTrue:
 
 4787      case wasm::kExprI64x2Shl:
 
 4790      case wasm::kExprI64x2ShrS:
 
 4793      case wasm::kExprI64x2ShrU:
 
 4796      case wasm::kExprI64x2Add:
 
 4798      case wasm::kExprI64x2Sub:
 
 4800      case wasm::kExprI64x2Mul:
 
 4802      case wasm::kExprI64x2ExtMulLowI32x4S:
 
 4803        return EmitBinOp<kS128, kS128>(
 
 4805      case wasm::kExprI64x2ExtMulLowI32x4U:
 
 4806        return EmitBinOp<kS128, kS128>(
 
 4808      case wasm::kExprI64x2ExtMulHighI32x4S:
 
 4809        return EmitBinOp<kS128, kS128>(
 
 4811      case wasm::kExprI64x2ExtMulHighI32x4U:
 
 4812        return EmitBinOp<kS128, kS128>(
 
 4814      case wasm::kExprI64x2BitMask:
 
 4816      case wasm::kExprI64x2SConvertI32x4Low:
 
 4817        return EmitUnOp<kS128, kS128>(
 
 4819      case wasm::kExprI64x2SConvertI32x4High:
 
 4820        return EmitUnOp<kS128, kS128>(
 
 4822      case wasm::kExprI64x2UConvertI32x4Low:
 
 4823        return EmitUnOp<kS128, kS128>(
 
 4825      case wasm::kExprI64x2UConvertI32x4High:
 
 4826        return EmitUnOp<kS128, kS128>(
 
 4828      case wasm::kExprF16x8Abs:
 
 4829        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4831            &ExternalReference::wasm_f16x8_abs);
 
 4832      case wasm::kExprF16x8Neg:
 
 4833        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4835            &ExternalReference::wasm_f16x8_neg);
 
 4836      case wasm::kExprF16x8Sqrt:
 
 4837        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4839            &ExternalReference::wasm_f16x8_sqrt);
 
 4840      case wasm::kExprF16x8Ceil:
 
 4841        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4843            &ExternalReference::wasm_f16x8_ceil);
 
 4844      case wasm::kExprF16x8Floor:
 
 4845        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4847            ExternalReference::wasm_f16x8_floor);
 
 4848      case wasm::kExprF16x8Trunc:
 
 4849        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4851            ExternalReference::wasm_f16x8_trunc);
 
 4852      case wasm::kExprF16x8NearestInt:
 
 4853        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4855            ExternalReference::wasm_f16x8_nearest_int);
 
 4856      case wasm::kExprF16x8Add:
 
 4857        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4859            ExternalReference::wasm_f16x8_add);
 
 4860      case wasm::kExprF16x8Sub:
 
 4861        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4863            ExternalReference::wasm_f16x8_sub);
 
 4864      case wasm::kExprF16x8Mul:
 
 4865        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4867            ExternalReference::wasm_f16x8_mul);
 
 4868      case wasm::kExprF16x8Div:
 
 4869        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4871            ExternalReference::wasm_f16x8_div);
 
 4872      case wasm::kExprF16x8Min:
 
 4873        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4875            ExternalReference::wasm_f16x8_min);
 
 4876      case wasm::kExprF16x8Max:
 
 4877        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4879            ExternalReference::wasm_f16x8_max);
 
 4880      case wasm::kExprF16x8Pmin:
 
 4881        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4883            ExternalReference::wasm_f16x8_pmin);
 
 4884      case wasm::kExprF16x8Pmax:
 
 4885        return EmitSimdFloatBinOpWithCFallback<kF16>(
 
 4887            ExternalReference::wasm_f16x8_pmax);
 
 4888      case wasm::kExprF32x4Abs:
 
 4890      case wasm::kExprF32x4Neg:
 
 4892      case wasm::kExprF32x4Sqrt:
 
 4894      case wasm::kExprF32x4Ceil:
 
 4895        return EmitSimdFloatRoundingOpWithCFallback<kF32>(
 
 4897            &ExternalReference::wasm_f32x4_ceil);
 
 4898      case wasm::kExprF32x4Floor:
 
 4899        return EmitSimdFloatRoundingOpWithCFallback<kF32>(
 
 4901            ExternalReference::wasm_f32x4_floor);
 
 4902      case wasm::kExprF32x4Trunc:
 
 4903        return EmitSimdFloatRoundingOpWithCFallback<kF32>(
 
 4905            ExternalReference::wasm_f32x4_trunc);
 
 4906      case wasm::kExprF32x4NearestInt:
 
 4907        return EmitSimdFloatRoundingOpWithCFallback<kF32>(
 
 4909            ExternalReference::wasm_f32x4_nearest_int);
 
 4910      case wasm::kExprF32x4Add:
 
 4911        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4913      case wasm::kExprF32x4Sub:
 
 4914        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4916      case wasm::kExprF32x4Mul:
 
 4917        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4919      case wasm::kExprF32x4Div:
 
 4920        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4922      case wasm::kExprF32x4Min:
 
 4923        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4925      case wasm::kExprF32x4Max:
 
 4926        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4928      case wasm::kExprF32x4Pmin:
 
 4929        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4931      case wasm::kExprF32x4Pmax:
 
 4932        return EmitBinOp<kS128, kS128, false, kF32>(
 
 4934      case wasm::kExprF64x2Abs:
 
 4936      case wasm::kExprF64x2Neg:
 
 4938      case wasm::kExprF64x2Sqrt:
 
 4940      case wasm::kExprF64x2Ceil:
 
 4941        return EmitSimdFloatRoundingOpWithCFallback<kF64>(
 
 4943            &ExternalReference::wasm_f64x2_ceil);
 
 4944      case wasm::kExprF64x2Floor:
 
 4945        return EmitSimdFloatRoundingOpWithCFallback<kF64>(
 
 4947            ExternalReference::wasm_f64x2_floor);
 
 4948      case wasm::kExprF64x2Trunc:
 
 4949        return EmitSimdFloatRoundingOpWithCFallback<kF64>(
 
 4951            ExternalReference::wasm_f64x2_trunc);
 
 4952      case wasm::kExprF64x2NearestInt:
 
 4953        return EmitSimdFloatRoundingOpWithCFallback<kF64>(
 
 4955            ExternalReference::wasm_f64x2_nearest_int);
 
 4956      case wasm::kExprF64x2Add:
 
 4957        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4959      case wasm::kExprF64x2Sub:
 
 4960        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4962      case wasm::kExprF64x2Mul:
 
 4963        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4965      case wasm::kExprF64x2Div:
 
 4966        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4968      case wasm::kExprF64x2Min:
 
 4969        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4971      case wasm::kExprF64x2Max:
 
 4972        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4974      case wasm::kExprF64x2Pmin:
 
 4975        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4977      case wasm::kExprF64x2Pmax:
 
 4978        return EmitBinOp<kS128, kS128, false, kF64>(
 
 4980      case wasm::kExprI32x4SConvertF32x4:
 
 4981        return EmitUnOp<kS128, kS128, kF32>(
 
 4983      case wasm::kExprI32x4UConvertF32x4:
 
 4984        return EmitUnOp<kS128, kS128, kF32>(
 
 4986      case wasm::kExprF32x4SConvertI32x4:
 
 4987        return EmitUnOp<kS128, kS128, kF32>(
 
 4989      case wasm::kExprF32x4UConvertI32x4:
 
 4990        return EmitUnOp<kS128, kS128, kF32>(
 
 4992      case wasm::kExprF32x4PromoteLowF16x8:
 
 4993        return EmitSimdFloatRoundingOpWithCFallback<kF32>(
 
 4995            &ExternalReference::wasm_f32x4_promote_low_f16x8);
 
 4996      case wasm::kExprF16x8DemoteF32x4Zero:
 
 4997        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 4999            &ExternalReference::wasm_f16x8_demote_f32x4_zero);
 
 5000      case wasm::kExprF16x8DemoteF64x2Zero:
 
 5001        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 5003            &ExternalReference::wasm_f16x8_demote_f64x2_zero);
 
 5004      case wasm::kExprI16x8SConvertF16x8:
 
 5005        return EmitSimdFloatRoundingOpWithCFallback<kI16>(
 
 5007            &ExternalReference::wasm_i16x8_sconvert_f16x8);
 
 5008      case wasm::kExprI16x8UConvertF16x8:
 
 5009        return EmitSimdFloatRoundingOpWithCFallback<kI16>(
 
 5011            &ExternalReference::wasm_i16x8_uconvert_f16x8);
 
 5012      case wasm::kExprF16x8SConvertI16x8:
 
 5013        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 5015            &ExternalReference::wasm_f16x8_sconvert_i16x8);
 
 5016      case wasm::kExprF16x8UConvertI16x8:
 
 5017        return EmitSimdFloatRoundingOpWithCFallback<kF16>(
 
 5019            &ExternalReference::wasm_f16x8_uconvert_i16x8);
 
 5020      case wasm::kExprI8x16SConvertI16x8:
 
 5021        return EmitBinOp<kS128, kS128>(
 
 5023      case wasm::kExprI8x16UConvertI16x8:
 
 5024        return EmitBinOp<kS128, kS128>(
 
 5026      case wasm::kExprI16x8SConvertI32x4:
 
 5027        return EmitBinOp<kS128, kS128>(
 
 5029      case wasm::kExprI16x8UConvertI32x4:
 
 5030        return EmitBinOp<kS128, kS128>(
 
 5032      case wasm::kExprI16x8SConvertI8x16Low:
 
 5033        return EmitUnOp<kS128, kS128>(
 
 5035      case wasm::kExprI16x8SConvertI8x16High:
 
 5036        return EmitUnOp<kS128, kS128>(
 
 5038      case wasm::kExprI16x8UConvertI8x16Low:
 
 5039        return EmitUnOp<kS128, kS128>(
 
 5041      case wasm::kExprI16x8UConvertI8x16High:
 
 5042        return EmitUnOp<kS128, kS128>(
 
 5044      case wasm::kExprI32x4SConvertI16x8Low:
 
 5045        return EmitUnOp<kS128, kS128>(
 
 5047      case wasm::kExprI32x4SConvertI16x8High:
 
 5048        return EmitUnOp<kS128, kS128>(
 
 5050      case wasm::kExprI32x4UConvertI16x8Low:
 
 5051        return EmitUnOp<kS128, kS128>(
 
 5053      case wasm::kExprI32x4UConvertI16x8High:
 
 5054        return EmitUnOp<kS128, kS128>(
 
 5056      case wasm::kExprS128AndNot:
 
 5058      case wasm::kExprI8x16RoundingAverageU:
 
 5059        return EmitBinOp<kS128, kS128>(
 
 5061      case wasm::kExprI16x8RoundingAverageU:
 
 5062        return EmitBinOp<kS128, kS128>(
 
 5064      case wasm::kExprI8x16Abs:
 
 5066      case wasm::kExprI16x8Abs:
 
 5068      case wasm::kExprI32x4Abs:
 
 5070      case wasm::kExprI64x2Abs:
 
 5072      case wasm::kExprF64x2ConvertLowI32x4S:
 
 5073        return EmitUnOp<kS128, kS128, kF64>(
 
 5075      case wasm::kExprF64x2ConvertLowI32x4U:
 
 5076        return EmitUnOp<kS128, kS128, kF64>(
 
 5078      case wasm::kExprF64x2PromoteLowF32x4:
 
 5079        return EmitUnOp<kS128, kS128, kF64>(
 
 5081      case wasm::kExprF32x4DemoteF64x2Zero:
 
 5082        return EmitUnOp<kS128, kS128, kF32>(
 
 5084      case wasm::kExprI32x4TruncSatF64x2SZero:
 
 5085        return EmitUnOp<kS128, kS128>(
 
 5087      case wasm::kExprI32x4TruncSatF64x2UZero:
 
 5088        return EmitUnOp<kS128, kS128>(
 
 5090      case wasm::kExprF16x8Qfma:
 
 5091        return EmitSimdFmaOpWithCFallback<kF16>(
 
 5093            &ExternalReference::wasm_f16x8_qfma);
 
 5094      case wasm::kExprF16x8Qfms:
 
 5095        return EmitSimdFmaOpWithCFallback<kF16>(
 
 5097            &ExternalReference::wasm_f16x8_qfms);
 
 5098      case wasm::kExprF32x4Qfma:
 
 5100      case wasm::kExprF32x4Qfms:
 
 5102      case wasm::kExprF64x2Qfma:
 
 5104      case wasm::kExprF64x2Qfms:
 
 5106      case wasm::kExprI16x8RelaxedLaneSelect:
 
 5107      case wasm::kExprI8x16RelaxedLaneSelect:
 
 5110        return EmitRelaxedLaneSelect(8);
 
 5111      case wasm::kExprI32x4RelaxedLaneSelect:
 
 5112        return EmitRelaxedLaneSelect(32);
 
 5113      case wasm::kExprI64x2RelaxedLaneSelect:
 
 5114        return EmitRelaxedLaneSelect(64);
 
 5115      case wasm::kExprF32x4RelaxedMin:
 
 5116        return EmitBinOp<kS128, kS128, false, kF32>(
 
 5118      case wasm::kExprF32x4RelaxedMax:
 
 5119        return EmitBinOp<kS128, kS128, false, kF32>(
 
 5121      case wasm::kExprF64x2RelaxedMin:
 
 5122        return EmitBinOp<kS128, kS128, false, kF64>(
 
 5124      case wasm::kExprF64x2RelaxedMax:
 
 5125        return EmitBinOp<kS128, kS128, false, kF64>(
 
 5127      case wasm::kExprI16x8RelaxedQ15MulRS:
 
 5128        return EmitBinOp<kS128, kS128>(
 
 5130      case wasm::kExprI32x4RelaxedTruncF32x4S:
 
 5131        return EmitUnOp<kS128, kS128>(
 
 5133      case wasm::kExprI32x4RelaxedTruncF32x4U:
 
 5134        return EmitUnOp<kS128, kS128>(
 
 5136      case wasm::kExprI32x4RelaxedTruncF64x2SZero:
 
 5137        return EmitUnOp<kS128, kS128>(
 
 5139      case wasm::kExprI32x4RelaxedTruncF64x2UZero:
 
 5140        return EmitUnOp<kS128, kS128>(
 
 5142      case wasm::kExprI16x8DotI8x16I7x16S:
 
 5143        return EmitBinOp<kS128, kS128>(
 
 5145      case wasm::kExprI32x4DotI8x16I7x16AddS: {
 
 5149        LiftoffRegList pinned;
 
 5150        LiftoffRegister acc = pinned.set(
__ PopToRegister(pinned));
 
 5151        LiftoffRegister rhs = pinned.set(
__ PopToRegister(pinned));
 
 5152        LiftoffRegister lhs = pinned.set(
__ PopToRegister(pinned));
 
 5153#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 
 5155        LiftoffRegister dst =
 
 5156            __ GetUnusedRegister(res_rc, {acc}, LiftoffRegList{lhs, rhs});
 
 5161        LiftoffRegister dst = 
__ GetUnusedRegister(res_rc, pinned);
 
 5164        __ emit_i32x4_dot_i8x16_i7x16_add_s(dst, lhs, rhs, acc);
 
 5173  template <ValueKind src_kind, ValueKind result_kind, 
typename EmitFn>
 
 5174  void EmitSimdExtractLaneOp(EmitFn 
fn, 
const SimdLaneImmediate& imm) {
 
 5177    LiftoffRegister lhs = 
__ PopToRegister();
 
 5178    LiftoffRegister dst = src_rc == result_rc
 
 5179                              ? 
__ GetUnusedRegister(result_rc, {lhs}, {})
 
 5180                              : 
__ GetUnusedRegister(result_rc, {});
 
 5181    fn(dst, lhs, imm.lane);
 
 5182    __ PushRegister(result_kind, dst);
 
 5185  template <ValueKind src2_kind, 
typename EmitFn>
 
 5186  void EmitSimdReplaceLaneOp(EmitFn 
fn, 
const SimdLaneImmediate& imm) {
 
 5196    LiftoffRegister src2 = 
__ PopToRegister();
 
 5197    LiftoffRegister src1 = (src1_rc == src2_rc || pin_src2)
 
 5198                               ? 
__ PopToRegister(LiftoffRegList{src2})
 
 5201    LiftoffRegister dst =
 
 5202        (src2_rc == result_rc || pin_src2)
 
 5203            ? 
__ GetUnusedRegister(result_rc, {src1}, LiftoffRegList{src2})
 
 5204            : 
__ GetUnusedRegister(result_rc, {src1}, {});
 
 5205    fn(dst, src1, src2, imm.lane);
 
 5209  void SimdLaneOp(FullDecoder* decoder, 
WasmOpcode opcode,
 
 5210                  const SimdLaneImmediate& imm,
 
 5211                  base::Vector<const Value> inputs, Value* 
result) {
 
 5214#define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn)      \ 
 5215  case wasm::kExpr##opcode:                              \ 
 5216    EmitSimdExtractLaneOp<kS128, k##kind>(               \ 
 5217        [this](LiftoffRegister dst, LiftoffRegister lhs, \ 
 5218               uint8_t imm_lane_idx) {                   \ 
 5219          __ emit_##fn(dst, lhs, imm_lane_idx);          \ 
 5231#undef CASE_SIMD_EXTRACT_LANE_OP 
 5232      case wasm::kExprF16x8ExtractLane:
 
 5233        EmitSimdExtractLaneOp<kS128, kF32>(
 
 5234            [
this](LiftoffRegister dst, LiftoffRegister lhs,
 
 5235                   uint8_t imm_lane_idx) {
 
 5236              if (
asm_.emit_f16x8_extract_lane(dst, lhs, imm_lane_idx)) 
return;
 
 5237              LiftoffRegister value = 
__ GetUnusedRegister(
kGpReg, {});
 
 5238              __ emit_i16x8_extract_lane_u(value, lhs, imm_lane_idx);
 
 5239              auto conv_ref = ExternalReference::wasm_float16_to_float32();
 
 5240              GenerateCCallWithStackBuffer(
 
 5245#define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn)          \ 
 5246  case wasm::kExpr##opcode:                                  \ 
 5247    EmitSimdReplaceLaneOp<k##kind>(                          \ 
 5248        [this](LiftoffRegister dst, LiftoffRegister src1,    \ 
 5249               LiftoffRegister src2, uint8_t imm_lane_idx) { \ 
 5250          __ emit_##fn(dst, src1, src2, imm_lane_idx);       \ 
 5260#undef CASE_SIMD_REPLACE_LANE_OP 
 5261      case wasm::kExprF16x8ReplaceLane: {
 
 5262        EmitSimdReplaceLaneOp<kI32>(
 
 5263            [
this](LiftoffRegister dst, LiftoffRegister src1,
 
 5264                   LiftoffRegister src2, uint8_t imm_lane_idx) {
 
 5265              if (
asm_.emit_f16x8_replace_lane(dst, src1, src2, imm_lane_idx)) {
 
 5269              LiftoffRegister value = 
__ GetUnusedRegister(
kGpReg, {});
 
 5270              auto conv_ref = ExternalReference::wasm_float32_to_float16();
 
 5271              GenerateCCallWithStackBuffer(&value, kVoid, 
kI16,
 
 5273              __ PopToFixedRegister(src1);
 
 5274              __ emit_i16x8_replace_lane(dst, src1, value, imm_lane_idx);
 
 5284  void S128Const(FullDecoder* decoder, 
const Simd128Immediate& imm,
 
 5288    LiftoffRegister dst = 
__ GetUnusedRegister(result_rc, {});
 
 5289    bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value),
 
 5290                                  [](uint8_t v) { return v == 0; });
 
 5291    bool all_ones = std::all_of(std::begin(imm.value), std::end(imm.value),
 
 5292                                [](uint8_t v) { return v == 0xff; });
 
 5295    } 
else if (all_ones) {
 
 5304  void Simd8x16ShuffleOp(FullDecoder* decoder, 
const Simd128Immediate& imm,
 
 5305                         const Value& input0, 
const Value& input1,
 
 5309    LiftoffRegList pinned;
 
 5310    LiftoffRegister rhs = pinned.set(
__ PopToRegister(pinned));
 
 5311    LiftoffRegister lhs = pinned.set(
__ PopToRegister(pinned));
 
 5312    LiftoffRegister dst = 
__ GetUnusedRegister(result_rc, {lhs, rhs}, {});
 
 5315    memcpy(shuffle, imm.value, 
sizeof(shuffle));
 
 5321      std::swap(lhs, rhs);
 
 5327  void ToSmi(Register 
reg) {
 
 5331      __ emit_i64_shli(LiftoffRegister{
reg}, LiftoffRegister{
reg},
 
 5336  void Store32BitExceptionValue(Register values_array, 
int* index_in_array,
 
 5337                                Register value, LiftoffRegList pinned) {
 
 5341    __ emit_i32_andi(tmp_reg, value, 0xffff);
 
 5343    __ StoreTaggedPointer(
 
 5350    __ emit_i32_shri(tmp_reg, value, 16);
 
 5352    __ StoreTaggedPointer(
 
 5358  void Store64BitExceptionValue(Register values_array, 
int* index_in_array,
 
 5359                                LiftoffRegister value, LiftoffRegList pinned) {
 
 5361      Store32BitExceptionValue(values_array, index_in_array, value.low_gp(),
 
 5363      Store32BitExceptionValue(values_array, index_in_array, value.high_gp(),
 
 5366      Store32BitExceptionValue(values_array, index_in_array, value.gp(),
 
 5368      __ emit_i64_shri(value, value, 32);
 
 5369      Store32BitExceptionValue(values_array, index_in_array, value.gp(),
 
 5374  void Load16BitExceptionValue(LiftoffRegister dst,
 
 5375                               LiftoffRegister values_array, uint32_t* index,
 
 5376                               LiftoffRegList pinned) {
 
 5378        dst, values_array.gp(),
 
 5383  void Load32BitExceptionValue(Register dst, LiftoffRegister values_array,
 
 5384                               uint32_t* index, LiftoffRegList pinned) {
 
 5385    LiftoffRegister upper = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5386    Load16BitExceptionValue(upper, values_array, index, pinned);
 
 5387    __ emit_i32_shli(upper.gp(), upper.gp(), 16);
 
 5388    Load16BitExceptionValue(LiftoffRegister(dst), values_array, index, pinned);
 
 5389    __ emit_i32_or(dst, upper.gp(), dst);
 
 5392  void Load64BitExceptionValue(LiftoffRegister dst,
 
 5393                               LiftoffRegister values_array, uint32_t* index,
 
 5394                               LiftoffRegList pinned) {
 
 5396      Load32BitExceptionValue(dst.high_gp(), values_array, index, pinned);
 
 5397      Load32BitExceptionValue(dst.low_gp(), values_array, index, pinned);
 
 5399      Load16BitExceptionValue(dst, values_array, index, pinned);
 
 5400      __ emit_i64_shli(dst, dst, 48);
 
 5401      LiftoffRegister tmp_reg =
 
 5402          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5403      Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
 
 5404      __ emit_i64_shli(tmp_reg, tmp_reg, 32);
 
 5405      __ emit_i64_or(dst, tmp_reg, dst);
 
 5406      Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
 
 5407      __ emit_i64_shli(tmp_reg, tmp_reg, 16);
 
 5408      __ emit_i64_or(dst, tmp_reg, dst);
 
 5409      Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
 
 5410      __ emit_i64_or(dst, tmp_reg, dst);
 
 5414  void StoreExceptionValue(ValueType type, Register values_array,
 
 5415                           int* index_in_array, LiftoffRegList pinned) {
 
 5416    LiftoffRegister value = pinned.set(
__ PopToRegister(pinned));
 
 5417    switch (type.kind()) {
 
 5419        Store32BitExceptionValue(values_array, index_in_array, value.gp(),
 
 5423        LiftoffRegister gp_reg =
 
 5424            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5425        __ emit_type_conversion(kExprI32ReinterpretF32, gp_reg, value, 
nullptr);
 
 5426        Store32BitExceptionValue(values_array, index_in_array, gp_reg.gp(),
 
 5431        Store64BitExceptionValue(values_array, index_in_array, value, pinned);
 
 5434        LiftoffRegister tmp_reg =
 
 5436        __ emit_type_conversion(kExprI64ReinterpretF64, tmp_reg, value,
 
 5438        Store64BitExceptionValue(values_array, index_in_array, tmp_reg, pinned);
 
 5442        LiftoffRegister tmp_reg =
 
 5443            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5444        for (
int i : {3, 2, 1, 0}) {
 
 5445          __ emit_i32x4_extract_lane(tmp_reg, value, 
i);
 
 5446          Store32BitExceptionValue(values_array, index_in_array, tmp_reg.gp(),
 
 5452      case wasm::kRefNull: {
 
 5453        --(*index_in_array);
 
 5454        __ StoreTaggedPointer(
 
 5458            value.gp(), pinned);
 
 5471  void LoadExceptionValue(
ValueKind kind, LiftoffRegister values_array,
 
 5472                          uint32_t* index, LiftoffRegList pinned) {
 
 5474    LiftoffRegister value = pinned.set(
__ GetUnusedRegister(rc, pinned));
 
 5477        Load32BitExceptionValue(value.gp(), values_array, index, pinned);
 
 5480        LiftoffRegister tmp_reg =
 
 5481            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5482        Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
 
 5483        __ emit_type_conversion(kExprF32ReinterpretI32, value, tmp_reg,
 
 5488        Load64BitExceptionValue(value, values_array, index, pinned);
 
 5492        LiftoffRegister tmp_reg =
 
 5493            pinned.set(
__ GetUnusedRegister(rc_i64, pinned));
 
 5494        Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
 
 5495        __ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
 
 5500        LiftoffRegister tmp_reg =
 
 5501            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5502        Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
 
 5503        __ emit_i32x4_splat(value, tmp_reg);
 
 5504        for (
int lane : {1, 2, 3}) {
 
 5505          Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
 
 5506          __ emit_i32x4_replace_lane(value, value, tmp_reg, lane);
 
 5511      case wasm::kRefNull: {
 
 5512        __ LoadTaggedPointer(
 
 5513            value.gp(), values_array.gp(), 
no_reg,
 
 5526    __ PushRegister(
kind, value);
 
 5529  void GetExceptionValues(FullDecoder* decoder, 
const VarState& exception_var,
 
 5530                          const WasmTag* tag) {
 
 5531    LiftoffRegList pinned;
 
 5533    LiftoffRegister values_array = GetExceptionProperty(
 
 5534        exception_var, RootIndex::kwasm_exception_values_symbol);
 
 5535    pinned.set(values_array);
 
 5538    for (ValueType param : 
sig->parameters()) {
 
 5539      LoadExceptionValue(param.kind(), values_array, &index, pinned);
 
 5544  void EmitLandingPad(FullDecoder* decoder, 
int handler_offset) {
 
 5545    if (decoder->current_catch() == -1) 
return;
 
 5546    MovableLabel handler{
zone_};
 
 5550    __ emit_jump(&skip_handler);
 
 5554    __ bind(handler.get());
 
 5555    __ ExceptionHandler();
 
 5557    handlers_.push_back({std::move(handler), handler_offset});
 
 5558    Control* current_try =
 
 5559        decoder->control_at(decoder->control_depth_of_current_catch());
 
 5561    if (current_try->try_info->catch_reached) {
 
 5562      __ MergeStackWith(current_try->try_info->catch_state, 1,
 
 5565      current_try->try_info->catch_state = 
__ MergeIntoNewState(
 
 5567          current_try->stack_depth + current_try->num_exceptions);
 
 5568      current_try->try_info->catch_reached = 
true;
 
 5570    __ emit_jump(¤t_try->try_info->catch_label);
 
 5572    __ bind(&skip_handler);
 
 5577  void Throw(FullDecoder* decoder, 
const TagIndexImmediate& imm,
 
 5579    LiftoffRegList pinned;
 
 5583    LiftoffRegister encoded_size_reg =
 
 5584        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5588    CallBuiltin(Builtin::kWasmAllocateFixedArray,
 
 5589                MakeSig::Returns(kIntPtrKind).Params(kIntPtrKind),
 
 5590                {
VarState{kIntPtrKind, LiftoffRegister{encoded_size_reg}, 0}},
 
 5591                decoder->position());
 
 5597    pinned.set(values_array);
 
 5602    int index = encoded_size;
 
 5603    auto* 
sig = imm.tag->sig;
 
 5604    for (
size_t param_idx = 
sig->parameter_count(); param_idx > 0;
 
 5606      ValueType type = 
sig->GetParam(param_idx - 1);
 
 5607      StoreExceptionValue(type, values_array.gp(), &index, pinned);
 
 5613    LiftoffRegister exception_tag =
 
 5614        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 5616    __ LoadTaggedPointer(
 
 5617        exception_tag.gp(), exception_tag.gp(), 
no_reg,
 
 5621    CallBuiltin(Builtin::kWasmThrow, MakeSig::Params(kIntPtrKind, kIntPtrKind),
 
 5622                {
VarState{kIntPtrKind, exception_tag, 0},
 
 5623                 VarState{kIntPtrKind, values_array, 0}},
 
 5624                decoder->position());
 
 5626    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 5633  void AtomicStoreMem(FullDecoder* decoder, StoreType type,
 
 5634                      const MemoryAccessImmediate& imm) {
 
 5635    LiftoffRegList pinned;
 
 5636    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 5637    bool i64_offset = imm.memory->is_memory64();
 
 5638    auto& index_slot = 
__ cache_state() -> stack_state.back();
 
 5640    uintptr_t 
offset = imm.offset;
 
 5641    LiftoffRegList outer_pinned;
 
 5644    if (IndexStaticallyInBoundsAndAligned(imm.memory, index_slot, type.size(),
 
 5646      __ cache_state() -> stack_state.pop_back();  
 
 5649      LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 5651          BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset,
 
 5652                         full_index, pinned, kDoForceCheck, kCheckAlignment);
 
 5656    Register addr = pinned.set(GetMemoryStart(imm.mem_index, pinned));
 
 5658      outer_pinned.set(index);
 
 5660    __ AtomicStore(addr, index, 
offset, value, type, outer_pinned, i64_offset);
 
 5664      TraceMemoryOperation(
true, type.mem_rep(), index, 
offset,
 
 5665                           decoder->position());
 
 5669  void AtomicLoadMem(FullDecoder* decoder, LoadType type,
 
 5670                     const MemoryAccessImmediate& imm) {
 
 5672    bool i64_offset = imm.memory->is_memory64();
 
 5673    auto& index_slot = 
__ cache_state() -> stack_state.back();
 
 5675    uintptr_t 
offset = imm.offset;
 
 5677    LiftoffRegList pinned;
 
 5679    if (IndexStaticallyInBoundsAndAligned(imm.memory, index_slot, type.size(),
 
 5681      __ cache_state() -> stack_state.pop_back();  
 
 5684      LiftoffRegister full_index = 
__ PopToRegister();
 
 5685      index = BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset,
 
 5686                             full_index, {}, kDoForceCheck, kCheckAlignment);
 
 5691    Register addr = pinned.set(GetMemoryStart(imm.mem_index, pinned));
 
 5693    LiftoffRegister value = pinned.set(
__ GetUnusedRegister(rc, pinned));
 
 5694    __ AtomicLoad(value, addr, index, 
offset, type, pinned, i64_offset);
 
 5695    __ PushRegister(
kind, value);
 
 5700      TraceMemoryOperation(
false, type.mem_type().representation(), index,
 
 5701                           offset, decoder->position());
 
 5705  void AtomicBinop(FullDecoder* decoder, StoreType type,
 
 5706                   const MemoryAccessImmediate& imm,
 
 5707                   void (LiftoffAssembler::*emit_fn)(Register, Register,
 
 5708                                                     uintptr_t, LiftoffRegister,
 
 5709                                                     LiftoffRegister, StoreType,
 
 5711    ValueKind result_kind = type.value_type().kind();
 
 5712    LiftoffRegList pinned;
 
 5713    LiftoffRegister value = pinned.set(
__ PopToRegister());
 
 5714#ifdef V8_TARGET_ARCH_IA32 
 5720    if (
__ cache_state()->is_used(value)) {
 
 5721      result = pinned.set(
__ GetUnusedRegister(value.reg_class(), pinned));
 
 5722      __ Move(
result, value, result_kind);
 
 5723      pinned.clear(value);
 
 5728        pinned.set(
__ GetUnusedRegister(value.reg_class(), pinned));
 
 5730    auto& index_slot = 
__ cache_state() -> stack_state.back();
 
 5731    uintptr_t 
offset = imm.offset;
 
 5732    bool i64_offset = imm.memory->is_memory64();
 
 5736    if (IndexStaticallyInBoundsAndAligned(imm.memory, index_slot, type.size(),
 
 5738      __ cache_state() -> stack_state.pop_back();  
 
 5741      LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 5743          BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset,
 
 5744                         full_index, pinned, kDoForceCheck, kCheckAlignment);
 
 5750    Register addr = pinned.set(GetMemoryStart(imm.mem_index, pinned));
 
 5752    __ PushRegister(result_kind, 
result);
 
 5755  void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
 
 5756                             const MemoryAccessImmediate& imm) {
 
 5757#ifdef V8_TARGET_ARCH_IA32 
 5763    LiftoffRegister full_index = 
__ PeekToRegister(2, {});
 
 5766        BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset, full_index,
 
 5767                       {}, kDoForceCheck, kCheckAlignment);
 
 5768    LiftoffRegList pinned{index};
 
 5770    uintptr_t 
offset = imm.offset;
 
 5772    if (imm.memory->index == 0) {
 
 5779      __ LoadFullPointer(addr, addr, buffer_offset);
 
 5781    __ emit_i32_add(addr, addr, index);
 
 5782    pinned.clear(LiftoffRegister(index));
 
 5783    LiftoffRegister new_value = pinned.set(
__ PopToRegister(pinned));
 
 5784    LiftoffRegister expected = pinned.set(
__ PopToRegister(pinned));
 
 5787    bool i64_offset = imm.memory->is_memory64();
 
 5789              __ cache_state()->stack_state.back().kind());
 
 5792    LiftoffRegister 
result = expected;
 
 5799    __ PushRegister(type.value_type().kind(), 
result);
 
 5802    ValueKind result_kind = type.value_type().kind();
 
 5803    LiftoffRegList pinned;
 
 5804    LiftoffRegister new_value = pinned.set(
__ PopToRegister(pinned));
 
 5805    LiftoffRegister expected = pinned.set(
__ PopToRegister(pinned));
 
 5809    auto& index_slot = 
__ cache_state() -> stack_state.back();
 
 5810    uintptr_t 
offset = imm.offset;
 
 5811    bool i64_offset = imm.memory->is_memory64();
 
 5815    if (IndexStaticallyInBoundsAndAligned(imm.memory, index_slot, type.size(),
 
 5817      __ cache_state() -> stack_state.pop_back();  
 
 5820      LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 5822          BoundsCheckMem(decoder, imm.memory, type.size(), imm.offset,
 
 5823                         full_index, pinned, kDoForceCheck, kCheckAlignment);
 
 5828    Register addr = pinned.set(GetMemoryStart(imm.mem_index, pinned));
 
 5829    __ AtomicCompareExchange(addr, index, 
offset, expected, new_value, 
result,
 
 5831    __ PushRegister(result_kind, 
result);
 
 5835  void CallBuiltin(
Builtin builtin, 
const ValueKindSig& 
sig,
 
 5836                   std::initializer_list<VarState> params, 
int position) {
 
 5842        interface_descriptor,                           
 
 5843        interface_descriptor.GetStackParameterCount(),  
 
 5846        StubCallMode::kCallWasmRuntimeStub);            
 
 5848    __ PrepareBuiltinCall(&
sig, call_descriptor, params);
 
 5853    __ CallBuiltin(builtin);
 
 5858                  const MemoryAccessImmediate& imm) {
 
 5862      LiftoffRegList pinned;
 
 5863      LiftoffRegister full_index = 
__ PeekToRegister(2, pinned);
 
 5867                         full_index, pinned, kDoForceCheck, kCheckAlignment);
 
 5868      pinned.set(index_reg);
 
 5870      uintptr_t 
offset = imm.offset;
 
 5871      Register index_plus_offset = index_reg;
 
 5873      if (
__ cache_state()->is_used(LiftoffRegister(index_reg))) {
 
 5875            pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 5876        __ Move(index_plus_offset, index_reg, kIntPtrKind);
 
 5879        __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, 
offset);
 
 5882      VarState& index = 
__ cache_state()->stack_state.end()[-3];
 
 5895      __ cache_state()->inc_used(LiftoffRegister(index_plus_offset));
 
 5896      if (index.is_reg()) 
__ cache_state()->dec_used(index.reg());
 
 5897      index_kind = index.
kind() == 
kI32 ? 
kI32 : kIntPtrKind;
 
 5899      index = 
VarState{index_kind, LiftoffRegister{index_plus_offset},
 
 5905      VarState i64_timeout = 
__ cache_state()->stack_state.back();
 
 5908          MakeSig::Returns(
kRef).Params(
kI64), {i64_timeout},
 
 5909          decoder->position());
 
 5918      expected = 
__ PeekToRegister(1, {}).gp();
 
 5920      VarState i64_expected = 
__ cache_state()->stack_state.end()[-2];
 
 5923          MakeSig::Returns(
kRef).Params(
kI64), {i64_expected},
 
 5924          decoder->position());
 
 5929    VarState timeout = 
__ cache_state()->stack_state.end()[-1];
 
 5930    VarState index = 
__ cache_state()->stack_state.end()[-3];
 
 5932    auto target = 
kind == 
kI32 ? Builtin::kWasmI32AtomicWait
 
 5933                               : Builtin::kWasmI64AtomicWait;
 
 5938    CallBuiltin(target, MakeSig::Params(
kI32, index_kind, expected_kind, 
kRef),
 
 5939                {{
kI32, 
static_cast<int32_t>(imm.memory->index), 0},
 
 5941                 {expected_kind, LiftoffRegister{expected}, 0},
 
 5943                decoder->position());
 
 5947    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 5952  void AtomicNotify(FullDecoder* decoder, 
const MemoryAccessImmediate& imm) {
 
 5953    LiftoffRegList pinned;
 
 5954    LiftoffRegister num_waiters_to_wake = pinned.set(
__ PopToRegister(pinned));
 
 5955    LiftoffRegister full_index = 
__ PopToRegister(pinned);
 
 5957        BoundsCheckMem(decoder, imm.memory, 
kInt32Size, imm.offset, full_index,
 
 5958                       pinned, kDoForceCheck, kCheckAlignment);
 
 5959    pinned.set(index_reg);
 
 5961    uintptr_t 
offset = imm.offset;
 
 5963    if (
__ cache_state()->is_used(LiftoffRegister(index_reg))) {
 
 5964      addr = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 5965      __ Move(addr, index_reg, kIntPtrKind);
 
 5968      __ emit_ptrsize_addi(addr, addr, 
offset);
 
 5971    Register mem_start = GetMemoryStart(imm.memory->index, pinned);
 
 5972    __ emit_ptrsize_add(addr, addr, mem_start);
 
 5976                      {{kIntPtrKind, LiftoffRegister{addr}, 0},
 
 5977                       {
kI32, num_waiters_to_wake, 0}},
 
 5978                      ExternalReference::wasm_atomic_notify());
 
 5980    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 5985#define ATOMIC_STORE_LIST(V)        \ 
 5986  V(I32AtomicStore, kI32Store)      \ 
 5987  V(I64AtomicStore, kI64Store)      \ 
 5988  V(I32AtomicStore8U, kI32Store8)   \ 
 5989  V(I32AtomicStore16U, kI32Store16) \ 
 5990  V(I64AtomicStore8U, kI64Store8)   \ 
 5991  V(I64AtomicStore16U, kI64Store16) \ 
 5992  V(I64AtomicStore32U, kI64Store32) 
 
 5994#define ATOMIC_LOAD_LIST(V)        \ 
 5995  V(I32AtomicLoad, kI32Load)       \ 
 5996  V(I64AtomicLoad, kI64Load)       \ 
 5997  V(I32AtomicLoad8U, kI32Load8U)   \ 
 5998  V(I32AtomicLoad16U, kI32Load16U) \ 
 5999  V(I64AtomicLoad8U, kI64Load8U)   \ 
 6000  V(I64AtomicLoad16U, kI64Load16U) \ 
 6001  V(I64AtomicLoad32U, kI64Load32U) 
 
 6003#define ATOMIC_BINOP_INSTRUCTION_LIST(V)         \ 
 6004  V(Add, I32AtomicAdd, kI32Store)                \ 
 6005  V(Add, I64AtomicAdd, kI64Store)                \ 
 6006  V(Add, I32AtomicAdd8U, kI32Store8)             \ 
 6007  V(Add, I32AtomicAdd16U, kI32Store16)           \ 
 6008  V(Add, I64AtomicAdd8U, kI64Store8)             \ 
 6009  V(Add, I64AtomicAdd16U, kI64Store16)           \ 
 6010  V(Add, I64AtomicAdd32U, kI64Store32)           \ 
 6011  V(Sub, I32AtomicSub, kI32Store)                \ 
 6012  V(Sub, I64AtomicSub, kI64Store)                \ 
 6013  V(Sub, I32AtomicSub8U, kI32Store8)             \ 
 6014  V(Sub, I32AtomicSub16U, kI32Store16)           \ 
 6015  V(Sub, I64AtomicSub8U, kI64Store8)             \ 
 6016  V(Sub, I64AtomicSub16U, kI64Store16)           \ 
 6017  V(Sub, I64AtomicSub32U, kI64Store32)           \ 
 6018  V(And, I32AtomicAnd, kI32Store)                \ 
 6019  V(And, I64AtomicAnd, kI64Store)                \ 
 6020  V(And, I32AtomicAnd8U, kI32Store8)             \ 
 6021  V(And, I32AtomicAnd16U, kI32Store16)           \ 
 6022  V(And, I64AtomicAnd8U, kI64Store8)             \ 
 6023  V(And, I64AtomicAnd16U, kI64Store16)           \ 
 6024  V(And, I64AtomicAnd32U, kI64Store32)           \ 
 6025  V(Or, I32AtomicOr, kI32Store)                  \ 
 6026  V(Or, I64AtomicOr, kI64Store)                  \ 
 6027  V(Or, I32AtomicOr8U, kI32Store8)               \ 
 6028  V(Or, I32AtomicOr16U, kI32Store16)             \ 
 6029  V(Or, I64AtomicOr8U, kI64Store8)               \ 
 6030  V(Or, I64AtomicOr16U, kI64Store16)             \ 
 6031  V(Or, I64AtomicOr32U, kI64Store32)             \ 
 6032  V(Xor, I32AtomicXor, kI32Store)                \ 
 6033  V(Xor, I64AtomicXor, kI64Store)                \ 
 6034  V(Xor, I32AtomicXor8U, kI32Store8)             \ 
 6035  V(Xor, I32AtomicXor16U, kI32Store16)           \ 
 6036  V(Xor, I64AtomicXor8U, kI64Store8)             \ 
 6037  V(Xor, I64AtomicXor16U, kI64Store16)           \ 
 6038  V(Xor, I64AtomicXor32U, kI64Store32)           \ 
 6039  V(Exchange, I32AtomicExchange, kI32Store)      \ 
 6040  V(Exchange, I64AtomicExchange, kI64Store)      \ 
 6041  V(Exchange, I32AtomicExchange8U, kI32Store8)   \ 
 6042  V(Exchange, I32AtomicExchange16U, kI32Store16) \ 
 6043  V(Exchange, I64AtomicExchange8U, kI64Store8)   \ 
 6044  V(Exchange, I64AtomicExchange16U, kI64Store16) \ 
 6045  V(Exchange, I64AtomicExchange32U, kI64Store32) 
 
 6047#define ATOMIC_COMPARE_EXCHANGE_LIST(V)       \ 
 6048  V(I32AtomicCompareExchange, kI32Store)      \ 
 6049  V(I64AtomicCompareExchange, kI64Store)      \ 
 6050  V(I32AtomicCompareExchange8U, kI32Store8)   \ 
 6051  V(I32AtomicCompareExchange16U, kI32Store16) \ 
 6052  V(I64AtomicCompareExchange8U, kI64Store8)   \ 
 6053  V(I64AtomicCompareExchange16U, kI64Store16) \ 
 6054  V(I64AtomicCompareExchange32U, kI64Store32) 
 
 6056  void AtomicOp(FullDecoder* decoder, 
WasmOpcode opcode, 
const Value 
args[],
 
 6057                const size_t argc, 
const MemoryAccessImmediate& imm,
 
 6060#define ATOMIC_STORE_OP(name, type)                \ 
 6061  case wasm::kExpr##name:                          \ 
 6062    AtomicStoreMem(decoder, StoreType::type, imm); \ 
 6066#undef ATOMIC_STORE_OP 
 6068#define ATOMIC_LOAD_OP(name, type)               \ 
 6069  case wasm::kExpr##name:                        \ 
 6070    AtomicLoadMem(decoder, LoadType::type, imm); \ 
 6074#undef ATOMIC_LOAD_OP 
 6076#define ATOMIC_BINOP_OP(op, name, type)                                        \ 
 6077  case wasm::kExpr##name:                                                      \ 
 6078    AtomicBinop(decoder, StoreType::type, imm, &LiftoffAssembler::Atomic##op); \ 
 6082#undef ATOMIC_BINOP_OP 
 6084#define ATOMIC_COMPARE_EXCHANGE_OP(name, type)            \ 
 6085  case wasm::kExpr##name:                                 \ 
 6086    AtomicCompareExchange(decoder, StoreType::type, imm); \ 
 6090#undef ATOMIC_COMPARE_EXCHANGE_OP 
 6092      case kExprI32AtomicWait:
 
 6093        AtomicWait(decoder, 
kI32, imm);
 
 6095      case kExprI64AtomicWait:
 
 6096        AtomicWait(decoder, 
kI64, imm);
 
 6098      case kExprAtomicNotify:
 
 6099        AtomicNotify(decoder, imm);
 
 6106#undef ATOMIC_STORE_LIST 
 6107#undef ATOMIC_LOAD_LIST 
 6108#undef ATOMIC_BINOP_INSTRUCTION_LIST 
 6109#undef ATOMIC_COMPARE_EXCHANGE_LIST 
 6111  void AtomicFence(FullDecoder* decoder) { 
__ AtomicFence(); }
 
 6116  VarState PopIndexToVarState(Register* high_word, LiftoffRegList* pinned) {
 
 6118    const bool is_64bit_value = slot.kind() == 
kI64;
 
 6121    if (
Is64() == is_64bit_value) {
 
 6122      if (slot.is_reg()) pinned->set(slot.reg());
 
 6129    if (slot.is_const() && (kIntPtrKind == 
kI32 || slot.i32_const() >= 0)) {
 
 6130      return {kIntPtrKind, slot.i32_const(), 0};
 
 6134    if constexpr (
Is64()) {
 
 6136      LiftoffRegister 
reg = 
__ LoadToModifiableRegister(slot, *pinned);
 
 6137      __ emit_u32_to_uintptr(
reg.gp(), 
reg.gp());
 
 6139      return {kIntPtrKind, 
reg, 0};
 
 6147    pinned->set(
reg.low());
 
 6148    if (*high_word == 
no_reg) {
 
 6153          !pinned->has(
reg.high()) && 
__ cache_state()->is_free(
reg.high())
 
 6155              : 
__ GetUnusedRegister(
kGpReg, *pinned).gp();
 
 6156      pinned->set(*high_word);
 
 6157      if (*high_word != 
reg.high_gp()) {
 
 6158        __ Move(*high_word, 
reg.high_gp(), 
kI32);
 
 6160    } 
else if (*high_word != 
reg.high_gp()) {
 
 6162      __ emit_i32_or(*high_word, *high_word, 
reg.high_gp());
 
 6164    return {kIntPtrKind, 
reg.low(), 0};
 
 6174  void CheckHighWordEmptyForTableType(FullDecoder* decoder,
 
 6175                                      const Register high_word,
 
 6176                                      LiftoffRegList* pinned) {
 
 6177    if constexpr (
Is64()) {
 
 6181    if (high_word == 
no_reg) 
return;
 
 6184        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapTableOutOfBounds);
 
 6189    pinned->clear(high_word);
 
 6197  VarState IndexToVarStateSaturating(
int stack_index, LiftoffRegList* pinned) {
 
 6199    DCHECK_LT(stack_index, 
__ cache_state()->stack_height());
 
 6200    VarState& slot = 
__ cache_state()->stack_state.end()[-1 - stack_index];
 
 6201    const bool is_mem64 = slot.
kind() == 
kI64;
 
 6205      if (slot.is_reg()) pinned->set(slot.reg());
 
 6212    if (slot.is_const() && (kIntPtrKind == 
kI32 || slot.i32_const() >= 0)) {
 
 6213      return {kIntPtrKind, slot.i32_const(), 0};
 
 6216    LiftoffRegister 
reg = 
__ LoadToModifiableRegister(slot, *pinned);
 
 6218    if constexpr (
Is64()) {
 
 6220      __ emit_u32_to_uintptr(
reg.gp(), 
reg.gp());
 
 6222      return {kIntPtrKind, 
reg, 0};
 
 6228    pinned->set(
reg.low());
 
 6232    __ LoadConstant(
reg.low(), WasmValue{kMaxUInt32});
 
 6235    return {kIntPtrKind, 
reg.low(), 0};
 
 6240  VarState PopIndexToVarStateSaturating(LiftoffRegList* pinned) {
 
 6251  bool MatchingMemTypeOnTopOfStack(
const WasmMemory* memory) {
 
 6252    return MatchingAddressTypeOnTopOfStack(memory->is_memory64());
 
 6256  bool MatchingAddressTypeOnTopOfStack(
bool expect_64bit_value) {
 
 6259    DCHECK_EQ(expected_kind, 
__ cache_state()->stack_state.back().kind());
 
 6263  bool MatchingMemType(
const WasmMemory* memory, 
int stack_index) {
 
 6265    DCHECK_LT(stack_index, 
__ cache_state()->stack_state.size());
 
 6268              __ cache_state()->stack_state.end()[-1 - stack_index].kind());
 
 6273  void MemoryInit(FullDecoder* decoder, 
const MemoryInitImmediate& imm,
 
 6274                  const Value&, 
const Value&, 
const Value&) {
 
 6277    LiftoffRegList pinned;
 
 6279    if (size.is_reg()) pinned.set(size.reg());
 
 6281    if (src.is_reg()) pinned.set(src.reg());
 
 6282    DCHECK(MatchingMemTypeOnTopOfStack(imm.memory.memory));
 
 6283    VarState dst = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6294    if (mem_offsets_high_word != 
no_reg) {
 
 6297          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6300      pinned.clear(mem_offsets_high_word);
 
 6306                       {
kI32, 
static_cast<int32_t>(imm.memory.index), 0},
 
 6309                       {
kI32, 
static_cast<int32_t>(imm.data_segment.index), 0},
 
 6311                      ExternalReference::wasm_memory_init());
 
 6313        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6318  void DataDrop(FullDecoder* decoder, 
const IndexImmediate& imm) {
 
 6319    LiftoffRegList pinned;
 
 6322        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 6325    LiftoffRegister seg_index =
 
 6326        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6334    LiftoffRegister null_reg = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6335    __ LoadConstant(null_reg, WasmValue(0));
 
 6336    __ Store(seg_size_array, seg_index.gp(), 0, null_reg, StoreType::kI32Store,
 
 6340  void MemoryCopy(FullDecoder* decoder, 
const MemoryCopyImmediate& imm,
 
 6341                  const Value&, 
const Value&, 
const Value&) {
 
 6344    LiftoffRegList pinned;
 
 6348        MatchingAddressTypeOnTopOfStack(imm.memory_dst.memory->is_memory64() &&
 
 6349                                        imm.memory_src.memory->is_memory64()));
 
 6350    VarState size = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6351    DCHECK(MatchingMemTypeOnTopOfStack(imm.memory_src.memory));
 
 6352    VarState src = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6353    DCHECK(MatchingMemTypeOnTopOfStack(imm.memory_dst.memory));
 
 6354    VarState dst = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6366    if (!
Is64() && mem_offsets_high_word != 
no_reg) {
 
 6369          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6377                       {
kI32, 
static_cast<int32_t>(imm.memory_dst.index), 0},
 
 6378                       {
kI32, 
static_cast<int32_t>(imm.memory_src.index), 0},
 
 6382                      ExternalReference::wasm_memory_copy());
 
 6384        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6389  void MemoryFill(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 6390                  const Value&, 
const Value&, 
const Value&) {
 
 6393    LiftoffRegList pinned;
 
 6394    DCHECK(MatchingMemTypeOnTopOfStack(imm.memory));
 
 6395    VarState size = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6397    if (value.is_reg()) pinned.set(value.reg());
 
 6398    DCHECK(MatchingMemTypeOnTopOfStack(imm.memory));
 
 6399    VarState dst = PopIndexToVarState(&mem_offsets_high_word, &pinned);
 
 6410    if (mem_offsets_high_word != 
no_reg) {
 
 6413          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6425                      ExternalReference::wasm_memory_fill());
 
 6427        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapMemOutOfBounds);
 
 6432  void LoadSmi(LiftoffRegister 
reg, 
int value) {
 
 6434    using smi_type = std::conditional_t<kSmiKind == kI32, int32_t, int64_t>;
 
 6435    __ LoadConstant(
reg, WasmValue{
static_cast<smi_type
>(smi_value)});
 
 6438  VarState LoadSmiConstant(int32_t constant, LiftoffRegList* pinned) {
 
 6439    if constexpr (kSmiKind == 
kI32) {
 
 6443      LiftoffRegister 
reg = pinned->set(
__ GetUnusedRegister(
kGpReg, *pinned));
 
 6444      LoadSmi(
reg, constant);
 
 6449  void TableInit(FullDecoder* decoder, 
const TableInitImmediate& imm,
 
 6450                 const Value&, 
const Value&, 
const Value&) {
 
 6452    LiftoffRegList pinned;
 
 6454    VarState table_index = LoadSmiConstant(imm.table.index, &pinned);
 
 6456        LoadSmiConstant(imm.element_segment.index, &pinned);
 
 6457    VarState extract_shared_data = LoadSmiConstant(0, &pinned);
 
 6460    if (size.is_reg()) pinned.set(size.reg());
 
 6462    if (src.is_reg()) pinned.set(src.reg());
 
 6464    VarState dst = PopIndexToVarState(&index_high_word, &pinned);
 
 6467    CheckHighWordEmptyForTableType(decoder, index_high_word, &pinned);
 
 6470        Builtin::kWasmTableInit,
 
 6471        MakeSig::Params(kIntPtrKind, 
kI32, 
kI32, kSmiKind, kSmiKind, kSmiKind),
 
 6472        {dst, src, 
size, table_index, segment_index, extract_shared_data},
 
 6473        decoder->position());
 
 6475    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 6478  void ElemDrop(FullDecoder* decoder, 
const IndexImmediate& imm) {
 
 6479    LiftoffRegList pinned;
 
 6481        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 6484    LiftoffRegister seg_index =
 
 6485        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6493        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 6499                          empty_fixed_array, pinned);
 
 6502  void TableCopy(FullDecoder* decoder, 
const TableCopyImmediate& imm,
 
 6503                 const Value&, 
const Value&, 
const Value&) {
 
 6506    LiftoffRegList pinned;
 
 6508    VarState table_src_index = LoadSmiConstant(imm.table_src.index, &pinned);
 
 6509    VarState table_dst_index = LoadSmiConstant(imm.table_dst.index, &pinned);
 
 6510    VarState extract_shared_data = LoadSmiConstant(0, &pinned);
 
 6512    VarState size = PopIndexToVarState(&index_high_word, &pinned);
 
 6513    VarState src = PopIndexToVarState(&index_high_word, &pinned);
 
 6514    VarState dst = PopIndexToVarState(&index_high_word, &pinned);
 
 6517    CheckHighWordEmptyForTableType(decoder, index_high_word, &pinned);
 
 6520        Builtin::kWasmTableCopy,
 
 6521        MakeSig::Params(kIntPtrKind, kIntPtrKind, kIntPtrKind, kSmiKind,
 
 6522                        kSmiKind, kSmiKind),
 
 6523        {dst, src, 
size, table_dst_index, table_src_index, extract_shared_data},
 
 6524        decoder->position());
 
 6526    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 6529  void TableGrow(FullDecoder* decoder, 
const TableIndexImmediate& imm,
 
 6530                 const Value&, 
const Value&, Value* 
result) {
 
 6532    LiftoffRegList pinned;
 
 6534    LiftoffRegister table_index_reg =
 
 6535        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6536    LoadSmi(table_index_reg, imm.index);
 
 6537    VarState table_index(kSmiKind, table_index_reg, 0);
 
 6539    VarState delta = PopIndexToVarStateSaturating(&pinned);
 
 6543    CallBuiltin(Builtin::kWasmTableGrow,
 
 6544                MakeSig::Returns(kSmiKind).Params(kSmiKind, kIntPtrKind, 
kI32,
 
 6546                {table_index, delta, extract_shared_data, value},
 
 6547                decoder->position());
 
 6549    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 6551    if (imm.table->is_table64()) {
 
 6556      __ emit_type_conversion(kExprI64SConvertI32, result64,
 
 6558      __ PushRegister(
kI64, result64);
 
 6564  void TableSize(FullDecoder* decoder, 
const TableIndexImmediate& imm, Value*) {
 
 6567    LiftoffRegList pinned;
 
 6573    __ LoadTaggedPointer(
 
 6577    int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
 
 6578                            WasmTableObject::kCurrentLengthOffset + 1;
 
 6583            length_field_size == 4 ? LoadType::kI32Load : LoadType::kI64Load);
 
 6587    if (imm.table->is_table64()) {
 
 6588      LiftoffRegister result64 = LiftoffRegister(
result);
 
 6593      __ emit_type_conversion(kExprI64SConvertI32, result64,
 
 6594                              LiftoffRegister(
result), 
nullptr);
 
 6595      __ PushRegister(
kI64, result64);
 
 6601  void TableFill(FullDecoder* decoder, 
const TableIndexImmediate& imm,
 
 6602                 const Value&, 
const Value&, 
const Value&) {
 
 6605    LiftoffRegList pinned;
 
 6607    VarState table_index = LoadSmiConstant(imm.index, &pinned);
 
 6610    VarState count = PopIndexToVarState(&high_words, &pinned);
 
 6612    if (value.is_reg()) pinned.set(value.reg());
 
 6615    CheckHighWordEmptyForTableType(decoder, high_words, &pinned);
 
 6618        Builtin::kWasmTableFill,
 
 6619        MakeSig::Params(kIntPtrKind, kIntPtrKind, 
kI32, kSmiKind, 
kRefNull),
 
 6620        {
start, 
count, extract_shared_data, table_index, value},
 
 6621        decoder->position());
 
 6623    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 6626  LiftoffRegister GetRtt(FullDecoder* decoder, ModuleTypeIndex index,
 
 6627                         const TypeDefinition& type,
 
 6628                         const Value& descriptor_value) {
 
 6629    if (!type.has_descriptor()) 
return RttCanon(index, {});
 
 6630    return GetRttFromDescriptorOnStack(decoder, descriptor_value);
 
 6633  LiftoffRegister GetRttFromDescriptorOnStack(FullDecoder* decoder,
 
 6634                                              const Value& descriptor_value) {
 
 6635    LiftoffRegList pinned;
 
 6636    LiftoffRegister descriptor = pinned.set(
__ PopToRegister({}));
 
 6637    auto [explicit_check, implicit_check] =
 
 6638        null_checks_for_struct_op(descriptor_value.type, 0);
 
 6639    if (explicit_check) {
 
 6640      MaybeEmitNullCheck(decoder, descriptor.gp(), pinned,
 
 6641                         descriptor_value.type);
 
 6643    LiftoffRegister rtt = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 6644    LoadObjectField(decoder, rtt, descriptor.gp(), 
no_reg,
 
 6646                    false, implicit_check, pinned);
 
 6650  void StructNew(FullDecoder* decoder, 
const StructIndexImmediate& imm,
 
 6651                 const Value& descriptor, 
bool initial_values_on_stack) {
 
 6652    const TypeDefinition& type = decoder->module_->type(imm.index);
 
 6653    LiftoffRegister rtt = GetRtt(decoder, imm.index, type, descriptor);
 
 6655    if (type.is_descriptor()) {
 
 6656      CallBuiltin(Builtin::kWasmAllocateDescriptorStruct,
 
 6660                  decoder->position());
 
 6662      CallBuiltin(Builtin::kWasmAllocateStructWithRtt,
 
 6666                  decoder->position());
 
 6670    LiftoffRegList pinned{obj};
 
 6672    for (uint32_t 
i = imm.struct_type->field_count(); 
i > 0;) {
 
 6675      ValueType field_type = imm.struct_type->field(
i);
 
 6676      LiftoffRegister value = pinned.set(
 
 6677          initial_values_on_stack
 
 6678              ? 
__ PopToRegister(pinned)
 
 6680      if (!initial_values_on_stack) {
 
 6681        if (!CheckSupportedType(decoder, field_type.kind(), 
"default value")) {
 
 6684        SetDefaultValue(value, field_type);
 
 6689      StoreObjectField(decoder, obj.gp(), 
no_reg, 
offset, value, 
false, pinned,
 
 6691      pinned.clear(value);
 
 6695    static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
 
 6697                  "empty struct might require initialization of padding field");
 
 6698    __ PushRegister(
kRef, obj);
 
 6701  void StructNew(FullDecoder* decoder, 
const StructIndexImmediate& imm,
 
 6702                 const Value& descriptor, 
const Value 
args[], Value* 
result) {
 
 6703    StructNew(decoder, imm, descriptor, 
true);
 
 6706  void StructNewDefault(FullDecoder* decoder, 
const StructIndexImmediate& imm,
 
 6707                        const Value& descriptor, Value* 
result) {
 
 6708    StructNew(decoder, imm, descriptor, 
false);
 
 6711  void StructGet(FullDecoder* decoder, 
const Value& struct_obj,
 
 6713    const StructType* struct_type = field.struct_imm.struct_type;
 
 6714    ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
 
 6715    if (!CheckSupportedType(decoder, field_kind, 
"field load")) 
return;
 
 6717    LiftoffRegList pinned;
 
 6718    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 6720    auto [explicit_check, implicit_check] =
 
 6721        null_checks_for_struct_op(struct_obj.type, field.field_imm.index);
 
 6723    if (explicit_check) {
 
 6724      MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
 
 6726    LiftoffRegister value =
 
 6728    LoadObjectField(decoder, value, obj.gp(), 
no_reg, 
offset, field_kind,
 
 6730    __ PushRegister(
unpacked(field_kind), value);
 
 6733  void StructSet(FullDecoder* decoder, 
const Value& struct_obj,
 
 6734                 const FieldImmediate& field, 
const Value& field_value) {
 
 6735    const StructType* struct_type = field.struct_imm.struct_type;
 
 6736    ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
 
 6738    LiftoffRegList pinned;
 
 6739    LiftoffRegister value = pinned.set(
__ PopToRegister(pinned));
 
 6740    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 6742    auto [explicit_check, implicit_check] =
 
 6743        null_checks_for_struct_op(struct_obj.type, field.field_imm.index);
 
 6745    if (explicit_check) {
 
 6746      MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
 
 6749    StoreObjectField(decoder, obj.gp(), 
no_reg, 
offset, value, implicit_check,
 
 6750                     pinned, field_kind);
 
 6753  void ArrayNew(FullDecoder* decoder, 
const ArrayIndexImmediate& imm,
 
 6754                bool initial_value_on_stack) {
 
 6758      LiftoffRegister length =
 
 6761          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapArrayTooLarge);
 
 6766    ValueType elem_type = imm.array_type->element_type();
 
 6771      LiftoffRegister rtt = RttCanon(imm.index, {});
 
 6772      CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
 
 6775                   __ cache_state()->stack_state.end()[-1],  
 
 6777                  decoder->position());
 
 6781    LiftoffRegList pinned{obj};
 
 6782    LiftoffRegister length = pinned.set(
__ PopToModifiableRegister(pinned));
 
 6783    LiftoffRegister value =
 
 6785    if (initial_value_on_stack) {
 
 6786      __ PopToFixedRegister(value);
 
 6788      if (!CheckSupportedType(decoder, elem_kind, 
"default value")) 
return;
 
 6789      SetDefaultValue(value, elem_type);
 
 6792    LiftoffRegister index = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6793    __ LoadConstant(index, WasmValue(int32_t{0}));
 
 6799    ArrayFillImpl(decoder, pinned, obj, index, value, length, elem_kind,
 
 6802    __ PushRegister(
kRef, obj);
 
 6805  void ArrayNew(FullDecoder* decoder, 
const ArrayIndexImmediate& imm,
 
 6806                const Value& length_value, 
const Value& initial_value,
 
 6808    ArrayNew(decoder, imm, 
true);
 
 6811  void ArrayNewDefault(FullDecoder* decoder, 
const ArrayIndexImmediate& imm,
 
 6812                       const Value& length, Value* 
result) {
 
 6813    ArrayNew(decoder, imm, 
false);
 
 6816  void ArrayFill(FullDecoder* decoder, ArrayIndexImmediate& imm,
 
 6817                 const Value& array, 
const Value& ,
 
 6818                 const Value& , 
const Value& ) {
 
 6822      LiftoffRegList pinned;
 
 6823      LiftoffRegister array_reg = pinned.set(
__ PeekToRegister(3, pinned));
 
 6825        MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
 
 6829      LiftoffRegister array_length =
 
 6830          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6831      bool implicit_null_check =
 
 6832          array.type.is_nullable() &&
 
 6834      LoadObjectField(decoder, array_length, array_reg.gp(), 
no_reg,
 
 6836                      false, implicit_null_check, pinned);
 
 6837      LiftoffRegister index = pinned.set(
__ PeekToRegister(2, pinned));
 
 6838      LiftoffRegister length = pinned.set(
__ PeekToRegister(0, pinned));
 
 6839      LiftoffRegister index_plus_length =
 
 6840          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6841      DCHECK(index_plus_length != array_length);
 
 6842      __ emit_i32_add(index_plus_length.gp(), length.gp(), index.gp());
 
 6844          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapArrayOutOfBounds);
 
 6846                        index_plus_length.gp(), array_length.gp(),
 
 6850                        index_plus_length.gp(), 
trap.frozen());
 
 6853    LiftoffRegList pinned;
 
 6854    LiftoffRegister length = pinned.set(
__ PopToModifiableRegister(pinned));
 
 6855    LiftoffRegister value = pinned.set(
__ PopToRegister(pinned));
 
 6856    LiftoffRegister index = pinned.set(
__ PopToModifiableRegister(pinned));
 
 6857    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 6859    ArrayFillImpl(decoder, pinned, obj, index, value, length,
 
 6860                  imm.array_type->element_type().kind(),
 
 6864  void ArrayGet(FullDecoder* decoder, 
const Value& array_obj,
 
 6865                const ArrayIndexImmediate& imm, 
const Value& index_val,
 
 6867    LiftoffRegList pinned;
 
 6868    LiftoffRegister index = pinned.set(
__ PopToModifiableRegister(pinned));
 
 6869    LiftoffRegister array = pinned.set(
__ PopToRegister(pinned));
 
 6871      MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
 
 6873    bool implicit_null_check =
 
 6874        array_obj.type.is_nullable() &&
 
 6876    BoundsCheckArray(decoder, implicit_null_check, array, index, pinned);
 
 6877    ValueKind elem_kind = imm.array_type->element_type().kind();
 
 6878    if (!CheckSupportedType(decoder, elem_kind, 
"array load")) 
return;
 
 6880    if (elem_size_shift != 0) {
 
 6881      __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
 
 6883    LiftoffRegister value =
 
 6885    LoadObjectField(decoder, value, array.gp(), index.gp(),
 
 6891  void ArraySet(FullDecoder* decoder, 
const Value& array_obj,
 
 6892                const ArrayIndexImmediate& imm, 
const Value& index_val,
 
 6893                const Value& value_val) {
 
 6894    LiftoffRegList pinned;
 
 6895    LiftoffRegister value = pinned.set(
__ PopToRegister(pinned));
 
 6898    LiftoffRegister index = pinned.set(
__ PopToModifiableRegister(pinned));
 
 6899    LiftoffRegister array = pinned.set(
__ PopToRegister(pinned));
 
 6901      MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
 
 6903    bool implicit_null_check =
 
 6904        array_obj.type.is_nullable() &&
 
 6906    BoundsCheckArray(decoder, implicit_null_check, array, index, pinned);
 
 6907    ValueKind elem_kind = imm.array_type->element_type().kind();
 
 6909    if (elem_size_shift != 0) {
 
 6910      __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
 
 6912    StoreObjectField(decoder, array.gp(), index.gp(),
 
 6914                     value, 
false, pinned, elem_kind);
 
 6917  void ArrayLen(FullDecoder* decoder, 
const Value& array_obj, Value* 
result) {
 
 6918    LiftoffRegList pinned;
 
 6919    LiftoffRegister obj = pinned.set(
__ PopToRegister(pinned));
 
 6921      MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
 
 6923    LiftoffRegister len = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 6925    bool implicit_null_check =
 
 6926        array_obj.type.is_nullable() &&
 
 6928    LoadObjectField(decoder, len, obj.gp(), 
no_reg, kLengthOffset, 
kI32,
 
 6929                    false , implicit_null_check, pinned);
 
 6930    __ PushRegister(
kI32, len);
 
 6933  void ArrayCopy(FullDecoder* decoder, 
const Value& dst, 
const Value& dst_index,
 
 6934                 const Value& src, 
const Value& src_index,
 
 6935                 const ArrayIndexImmediate& src_imm, 
const Value& length) {
 
 6939    CallBuiltin(Builtin::kWasmArrayCopy,
 
 6943                {
__ cache_state()->stack_state.end()[-4],
 
 6944                 __ cache_state()->stack_state.end()[-2],
 
 6945                 __ cache_state()->stack_state.end()[-1],
 
 6946                 __ cache_state()->stack_state.end()[-5],
 
 6947                 __ cache_state()->stack_state.end()[-3]},
 
 6948                decoder->position());
 
 6952  void ArrayNewFixed(FullDecoder* decoder, 
const ArrayIndexImmediate& array_imm,
 
 6953                     const IndexImmediate& length_imm,
 
 6954                     const Value* , Value* ) {
 
 6955    LiftoffRegister rtt = RttCanon(array_imm.index, {});
 
 6956    ValueKind elem_kind = array_imm.array_type->element_type().kind();
 
 6957    int32_t elem_count = length_imm.index;
 
 6959    CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
 
 6963                decoder->position());
 
 6967    if (!CheckSupportedType(decoder, elem_kind, 
"array.new_fixed")) 
return;
 
 6968    for (
int i = elem_count - 1; 
i >= 0; 
i--) {
 
 6969      LiftoffRegList pinned{array};
 
 6970      LiftoffRegister element = pinned.set(
__ PopToRegister(pinned));
 
 6976      StoreObjectField(decoder, array.gp(), 
no_reg,
 
 6982    __ PushRegister(
kRef, array);
 
 6985  void ArrayNewSegment(FullDecoder* decoder,
 
 6986                       const ArrayIndexImmediate& array_imm,
 
 6987                       const IndexImmediate& segment_imm,
 
 6988                       const Value& , 
const Value& ,
 
 6991    LiftoffRegList pinned;
 
 6993    LiftoffRegister rtt = pinned.set(RttCanon(array_imm.index, pinned));
 
 6995    LiftoffRegister is_element_reg =
 
 6996        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 6997    LoadSmi(is_element_reg,
 
 6998            array_imm.array_type->element_type().is_reference());
 
 7000    LiftoffRegister extract_shared_data_reg =
 
 7001        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7002    LoadSmi(extract_shared_data_reg, 0);
 
 7005        Builtin::kWasmArrayNewSegment,
 
 7009            VarState{
kI32, 
static_cast<int>(segment_imm.index), 0},  
 
 7010            __ cache_state()->stack_state.end()[-2],                 
 
 7011            __ cache_state()->stack_state.end()[-1],                 
 
 7012            VarState{kSmiKind, is_element_reg, 0},           
 
 7013            VarState{kSmiKind, extract_shared_data_reg, 0},  
 
 7016        decoder->position());
 
 7020    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7026  void ArrayInitSegment(FullDecoder* decoder,
 
 7027                        const ArrayIndexImmediate& array_imm,
 
 7028                        const IndexImmediate& segment_imm,
 
 7034    LiftoffRegList pinned;
 
 7036    LiftoffRegister segment_index_reg =
 
 7037        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7038    LoadSmi(segment_index_reg, 
static_cast<int32_t>(segment_imm.index));
 
 7040    LiftoffRegister is_element_reg =
 
 7041        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7042    LoadSmi(is_element_reg,
 
 7043            array_imm.array_type->element_type().is_reference());
 
 7045    LiftoffRegister extract_shared_data_reg =
 
 7046        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7047    LoadSmi(extract_shared_data_reg, 0);
 
 7051    CallBuiltin(Builtin::kWasmArrayInitSegment,
 
 7052                MakeSig::Params(
kI32, 
kI32, 
kI32, kSmiKind, kSmiKind, kSmiKind,
 
 7054                {
__ cache_state()->stack_state.end()[-3],
 
 7055                 __ cache_state()->stack_state.end()[-2],
 
 7056                 __ cache_state()->stack_state.end()[-1],
 
 7057                 VarState{kSmiKind, segment_index_reg, 0},
 
 7058                 VarState{kSmiKind, is_element_reg, 0},
 
 7059                 VarState{kSmiKind, extract_shared_data_reg, 0},
 
 7060                 __ cache_state()->stack_state.end()[-4]},
 
 7061                decoder->position());
 
 7065  void RefI31(FullDecoder* decoder, 
const Value& input, Value* 
result) {
 
 7066    LiftoffRegister src = 
__ PopToRegister();
 
 7067    LiftoffRegister dst = 
__ GetUnusedRegister(
kGpReg, {src}, {});
 
 7077      __ emit_i64_sari(dst, dst, 1);
 
 7079    __ PushRegister(
kRef, dst);
 
 7082  void I31GetS(FullDecoder* decoder, 
const Value& input, Value* 
result) {
 
 7083    LiftoffRegList pinned;
 
 7084    LiftoffRegister src = pinned.set(
__ PopToRegister());
 
 7085    MaybeEmitNullCheck(decoder, src.gp(), pinned, input.type);
 
 7086    LiftoffRegister dst = 
__ GetUnusedRegister(
kGpReg, {src}, {});
 
 7096    __ PushRegister(
kI32, dst);
 
 7099  void I31GetU(FullDecoder* decoder, 
const Value& input, Value* 
result) {
 
 7100    LiftoffRegList pinned;
 
 7101    LiftoffRegister src = pinned.set(
__ PopToRegister());
 
 7102    MaybeEmitNullCheck(decoder, src.gp(), pinned, input.type);
 
 7103    LiftoffRegister dst = 
__ GetUnusedRegister(
kGpReg, {src}, {});
 
 7109      __ emit_i64_shli(dst, src, 1);
 
 7112    __ PushRegister(
kI32, dst);
 
 7115  void RefGetDesc(FullDecoder* decoder, 
const Value& ref_val, Value* desc_val) {
 
 7116    LiftoffRegList pinned;
 
 7117    LiftoffRegister ref = pinned.set(
__ PopToRegister());
 
 7120    MaybeEmitNullCheck(decoder, ref.gp(), pinned, ref_val.type);
 
 7122    LiftoffRegister value = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 7123    __ LoadMap(value.gp(), ref.gp());
 
 7125        decoder, value, value.gp(), 
no_reg,
 
 7127        false, 
false, pinned);
 
 7128    __ PushRegister(
kRef, value);
 
 7131  LiftoffRegister RttCanon(ModuleTypeIndex type_index, LiftoffRegList pinned) {
 
 7132    LiftoffRegister rtt = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7134    __ LoadTaggedPointer(
 
 7135        rtt.gp(), rtt.gp(), 
no_reg,
 
 7140  enum NullSucceeds : 
bool {  
 
 7141    kNullSucceeds = 
true,
 
 7148                    ValueType 
obj_type, Register rtt_reg, HeapType target_type,
 
 7149                    Register scratch_null, Register scratch2, Label* 
no_match,
 
 7151                    const FreezeCacheState& frozen) {
 
 7173    if (module->type(target_type.ref_index()).is_final ||
 
 7174        target_type.is_exact()) {
 
 7181      __ emit_cond_jump(
kEqual, &match, ValueKind::kRef, tmp1, rtt_reg, frozen);
 
 7183      if (is_cast_from_any) {
 
 7187                LoadType::kI32Load16U);
 
 7188        __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE);
 
 7190                               LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
 
 7198          Map::kConstructorOrBackPointerOrNativeContextOffset);
 
 7199      __ LoadTaggedPointer(tmp1, tmp1, 
no_reg, kTypeInfoOffset);
 
 7203        LiftoffRegister list_length(scratch2);
 
 7206        __ LoadSmiAsInt32(list_length, tmp1, 
offset);
 
 7208                               list_length.gp(), rtt_depth, frozen);
 
 7211      __ LoadTaggedPointer(
 
 7223  void RefTest(FullDecoder* decoder, HeapType target_type, 
const Value& obj,
 
 7225    Label return_false, done;
 
 7226    LiftoffRegList pinned;
 
 7227    LiftoffRegister rtt_reg =
 
 7228        pinned.set(RttCanon(target_type.ref_index(), pinned));
 
 7229    LiftoffRegister 
obj_reg = pinned.set(
__ PopToRegister(pinned));
 
 7231        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 7232    LiftoffRegister 
result = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7233    if (obj.type.is_nullable()) {
 
 7234      LoadNullValueForCompare(scratch_null, pinned, obj.type);
 
 7239      SubtypeCheck(decoder->module_, 
obj_reg.gp(), obj.type, rtt_reg.gp(),
 
 7240                   target_type, scratch_null, 
result.gp(), &return_false,
 
 7243      __ LoadConstant(
result, WasmValue(1));
 
 7245      __ emit_jump(&done);
 
 7247      __ bind(&return_false);
 
 7248      __ LoadConstant(
result, WasmValue(0));
 
 7254  void RefTestAbstract(FullDecoder* decoder, 
const Value& obj, HeapType type,
 
 7256    switch (type.representation()) {
 
 7258        return AbstractTypeCheck<&LiftoffCompiler::EqCheck>(decoder, obj,
 
 7261        return AbstractTypeCheck<&LiftoffCompiler::I31Check>(decoder, obj,
 
 7264        return AbstractTypeCheck<&LiftoffCompiler::StructCheck>(decoder, obj,
 
 7267        return AbstractTypeCheck<&LiftoffCompiler::ArrayCheck>(decoder, obj,
 
 7270        return AbstractTypeCheck<&LiftoffCompiler::StringCheck>(decoder, obj,
 
 7277        return EmitIsNull(kExprRefIsNull, obj.type);
 
 7286  void RefCast(FullDecoder* decoder, 
const Value& obj, Value* 
result) {
 
 7287    if (
v8_flags.experimental_wasm_assume_ref_cast_succeeds) 
return;
 
 7288    LiftoffRegister rtt = RttCanon(
result->type.ref_index(), {});
 
 7289    return RefCastImpl(decoder, 
result->type, obj, rtt);
 
 7292  void RefCastDesc(FullDecoder* decoder, 
const Value& obj, 
const Value& desc,
 
 7294    if (
v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
 
 7298    LiftoffRegister rtt = GetRttFromDescriptorOnStack(decoder, desc);
 
 7300    return RefCastImpl(decoder, 
result->type.AsExact(), obj, rtt);
 
 7303  void RefCastImpl(FullDecoder* decoder, ValueType target_type,
 
 7304                   const Value& obj, LiftoffRegister rtt) {
 
 7305    LiftoffRegList pinned{rtt};
 
 7306    LiftoffRegister 
obj_reg = pinned.set(
__ PopToRegister(pinned));
 
 7308        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 7310    if (obj.type.is_nullable()) {
 
 7311      LoadNullValueForCompare(scratch_null, pinned, obj.type);
 
 7315      NullSucceeds on_null =
 
 7316          target_type.is_nullable() ? kNullSucceeds : kNullFails;
 
 7318          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapIllegalCast);
 
 7319      SubtypeCheck(decoder->module_, 
obj_reg.gp(), obj.type, rtt.gp(),
 
 7320                   target_type.heap_type(), scratch_null, scratch2,
 
 7321                   trap.label(), on_null, 
trap.frozen());
 
 7323    __ PushRegister(obj.type.kind(), 
obj_reg);
 
 7326  void RefCastAbstract(FullDecoder* decoder, 
const Value& obj, HeapType type,
 
 7328    switch (type.representation()) {
 
 7349        return AssertNullTypecheck(decoder, obj, result_val);
 
 7358  void BrOnCast(FullDecoder* decoder, HeapType target_type, 
const Value& obj,
 
 7359                Value* , uint32_t depth,
 
 7361    LiftoffRegister rtt = RttCanon(target_type.ref_index(), {});
 
 7362    return BrOnCastImpl(decoder, target_type, obj, rtt, depth, 
null_succeeds);
 
 7365  void BrOnCastDesc(FullDecoder* decoder, HeapType target_type,
 
 7366                    const Value& obj, 
const Value& descriptor,
 
 7367                    Value* , uint32_t depth,
 
 7369    LiftoffRegister rtt = GetRttFromDescriptorOnStack(decoder, descriptor);
 
 7371    return BrOnCastImpl(decoder, target_type.AsExact(), obj, rtt, depth,
 
 7374  void BrOnCastImpl(FullDecoder* decoder, HeapType target_type,
 
 7375                    const Value& obj, LiftoffRegister rtt, uint32_t depth,
 
 7377    LiftoffRegList pinned{rtt};
 
 7379    if (depth != decoder->control_depth() - 1) {
 
 7380      __ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity,
 
 7385    LiftoffRegister 
obj_reg = pinned.set(
__ PeekToRegister(0, pinned));
 
 7387        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 7389    if (obj.type.is_nullable()) {
 
 7394    NullSucceeds null_handling = 
null_succeeds ? kNullSucceeds : kNullFails;
 
 7395    SubtypeCheck(decoder->module_, 
obj_reg.gp(), obj.type, rtt.gp(),
 
 7396                 target_type, scratch_null, scratch2, &cont_false,
 
 7397                 null_handling, frozen);
 
 7399    BrOrRet(decoder, depth);
 
 7401    __ bind(&cont_false);
 
 7404  void BrOnCastFail(FullDecoder* decoder, HeapType target_type,
 
 7405                    const Value& obj, Value* ,
 
 7407    LiftoffRegister rtt = RttCanon(target_type.ref_index(), {});
 
 7408    return BrOnCastFailImpl(decoder, target_type, obj, rtt, depth,
 
 7412  void BrOnCastDescFail(FullDecoder* decoder, HeapType target_type,
 
 7413                        const Value& obj, 
const Value& descriptor,
 
 7414                        Value* , uint32_t depth,
 
 7416    LiftoffRegister rtt = GetRttFromDescriptorOnStack(decoder, descriptor);
 
 7418    return BrOnCastFailImpl(decoder, target_type.AsExact(), obj, rtt, depth,
 
 7422  void BrOnCastFailImpl(FullDecoder* decoder, HeapType target_type,
 
 7423                        const Value& obj, LiftoffRegister rtt, uint32_t depth,
 
 7425    LiftoffRegList pinned{rtt};
 
 7427    if (depth != decoder->control_depth() - 1) {
 
 7428      __ PrepareForBranch(decoder->control_at(depth)->br_merge()->arity,
 
 7432    Label cont_branch, fallthrough;
 
 7434    LiftoffRegister 
obj_reg = pinned.set(
__ PeekToRegister(0, pinned));
 
 7436        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 7438    if (obj.type.is_nullable()) {
 
 7443    NullSucceeds null_handling = 
null_succeeds ? kNullSucceeds : kNullFails;
 
 7444    SubtypeCheck(decoder->module_, 
obj_reg.gp(), obj.type, rtt.gp(),
 
 7445                 target_type, scratch_null, scratch2, &cont_branch,
 
 7446                 null_handling, frozen);
 
 7447    __ emit_jump(&fallthrough);
 
 7449    __ bind(&cont_branch);
 
 7450    BrOrRet(decoder, depth);
 
 7452    __ bind(&fallthrough);
 
 7455  void BrOnCastAbstract(FullDecoder* decoder, 
const Value& obj, HeapType type,
 
 7456                        Value* result_on_branch, uint32_t depth,
 
 7458    switch (type.representation()) {
 
 7460        return BrOnAbstractType<&LiftoffCompiler::EqCheck>(obj, decoder, depth,
 
 7463        return BrOnAbstractType<&LiftoffCompiler::I31Check>(obj, decoder, depth,
 
 7466        return BrOnAbstractType<&LiftoffCompiler::StructCheck>(
 
 7469        return BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(
 
 7472        return BrOnAbstractType<&LiftoffCompiler::StringCheck>(
 
 7479        return BrOnNull(decoder, obj, depth,  
true,
 
 7489  void BrOnCastFailAbstract(FullDecoder* decoder, 
const Value& obj,
 
 7490                            HeapType type, Value* result_on_fallthrough,
 
 7492    switch (type.representation()) {
 
 7494        return BrOnNonAbstractType<&LiftoffCompiler::EqCheck>(
 
 7497        return BrOnNonAbstractType<&LiftoffCompiler::I31Check>(
 
 7500        return BrOnNonAbstractType<&LiftoffCompiler::StructCheck>(
 
 7503        return BrOnNonAbstractType<&LiftoffCompiler::ArrayCheck>(
 
 7506        return BrOnNonAbstractType<&LiftoffCompiler::StringCheck>(
 
 7513        return BrOnNonNull(decoder, obj, 
nullptr, depth,
 
 7532    TypeCheck(ValueType obj_type, Label* no_match, 
bool null_succeeds,
 
 7543    Register null_reg() { 
return tmp; }       
 
 7544    Register instance_type() { 
return tmp; }  
 
 7547  enum PopOrPeek { kPop, kPeek };
 
 7549  void Initialize(TypeCheck& check, FullDecoder* decoder, PopOrPeek pop_or_peek,
 
 7551    LiftoffRegList pinned;
 
 7552    if (pop_or_peek == kPop) {
 
 7553      check.obj_reg = pinned.set(
__ PopToRegister(pinned)).gp();
 
 7555      check.obj_reg = pinned.set(
__ PeekToRegister(0, pinned)).gp();
 
 7557    check.tmp = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 7558    if (check.obj_type.is_nullable()) {
 
 7559      LoadNullValue(check.null_reg(), type);
 
 7561    if (check.no_match == 
nullptr) {
 
 7562      check.trap.emplace(AddOutOfLineTrap(decoder, check.no_match_trap));
 
 7563      check.no_match = check.trap->label();
 
 7566  void LoadInstanceType(TypeCheck& check, 
const FreezeCacheState& frozen,
 
 7571    if (!check.null_succeeds && check.obj_type.is_nullable()) {
 
 7573                        check.null_reg(), frozen);
 
 7577    __ LoadMap(check.instance_type(), check.obj_reg);
 
 7578    __ Load(LiftoffRegister(check.instance_type()), check.instance_type(),
 
 7580            LoadType::kI32Load16U);
 
 7584  void StructCheck(TypeCheck& check, 
const FreezeCacheState& frozen) {
 
 7585    LoadInstanceType(check, frozen, check.no_match);
 
 7586    LiftoffRegister instance_type(check.instance_type());
 
 7587    __ emit_i32_cond_jumpi(
kNotEqual, check.no_match, check.instance_type(),
 
 7588                           WASM_STRUCT_TYPE, frozen);
 
 7591  void ArrayCheck(TypeCheck& check, 
const FreezeCacheState& frozen) {
 
 7592    LoadInstanceType(check, frozen, check.no_match);
 
 7593    LiftoffRegister instance_type(check.instance_type());
 
 7594    __ emit_i32_cond_jumpi(
kNotEqual, check.no_match, check.instance_type(),
 
 7595                           WASM_ARRAY_TYPE, frozen);
 
 7598  void I31Check(TypeCheck& check, 
const FreezeCacheState& frozen) {
 
 7599    __ emit_smi_check(check.obj_reg, check.no_match,
 
 7603  void EqCheck(TypeCheck& check, 
const FreezeCacheState& frozen) {
 
 7605    LoadInstanceType(check, frozen, &match);
 
 7608    Register tmp = check.instance_type();
 
 7609    __ emit_i32_subi(tmp, tmp, FIRST_WASM_OBJECT_TYPE);
 
 7611                           LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
 
 7616  void StringCheck(TypeCheck& check, 
const FreezeCacheState& frozen) {
 
 7617    LoadInstanceType(check, frozen, check.no_match);
 
 7618    LiftoffRegister instance_type(check.instance_type());
 
 7624                                                const FreezeCacheState& frozen);
 
 7626  template <TypeChecker type_checker>
 
 7627  void AbstractTypeCheck(FullDecoder* decoder, 
const Value& 
object,
 
 7631    Initialize(check, decoder, kPop, 
object.type);
 
 7632    LiftoffRegister 
result(check.tmp);
 
 7638                          check.null_reg(), frozen);
 
 7641      (this->*type_checker)(check, frozen);
 
 7644      __ LoadConstant(
result, WasmValue(1));
 
 7646      __ emit_jump(&done);
 
 7649      __ LoadConstant(
result, WasmValue(0));
 
 7655  template <TypeChecker type_checker>
 
 7660                    Builtin::kThrowWasmTrapIllegalCast);
 
 7661    Initialize(check, decoder, kPeek, 
object.type);
 
 7666                        check.null_reg(), frozen);
 
 7668    (this->*type_checker)(check, frozen);
 
 7672  template <TypeChecker type_checker>
 
 7673  void BrOnAbstractType(
const Value& 
object, FullDecoder* decoder,
 
 7676    if (br_depth != decoder->control_depth() - 1) {
 
 7677      __ PrepareForBranch(decoder->control_at(br_depth)->br_merge()->arity, {});
 
 7682    Initialize(check, decoder, kPeek, 
object.type);
 
 7687                        check.null_reg(), frozen);
 
 7690    (this->*type_checker)(check, frozen);
 
 7692    BrOrRet(decoder, br_depth);
 
 7697  template <TypeChecker type_checker>
 
 7698  void BrOnNonAbstractType(
const Value& 
object, FullDecoder* decoder,
 
 7701    if (br_depth != decoder->control_depth() - 1) {
 
 7702      __ PrepareForBranch(decoder->control_at(br_depth)->br_merge()->arity, {});
 
 7707    Initialize(check, decoder, kPeek, 
object.type);
 
 7715    (this->*type_checker)(check, frozen);
 
 7719    BrOrRet(decoder, br_depth);
 
 7724  void StringNewWtf8(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 7726                     const Value& size, Value* 
result) {
 
 7728    LiftoffRegList pinned;
 
 7730    VarState memory_var{
kI32, 
static_cast<int>(imm.index), 0};
 
 7732    LiftoffRegister variant_reg =
 
 7733        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7734    LoadSmi(variant_reg, 
static_cast<int32_t>(variant));
 
 7735    VarState variant_var(kSmiKind, variant_reg, 0);
 
 7737    VarState& size_var = 
__ cache_state()->stack_state.end()[-1];
 
 7739    DCHECK(MatchingMemType(imm.memory, 1));
 
 7740    VarState address = IndexToVarStateSaturating(1, &pinned);
 
 7743        Builtin::kWasmStringNewWtf8,
 
 7745        {address, size_var, memory_var, variant_var}, decoder->position());
 
 7747    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7750    __ PushRegister(
kRef, result_reg);
 
 7753  void StringNewWtf8Array(FullDecoder* decoder,
 
 7755                          const Value& array, 
const Value& 
start,
 
 7758    LiftoffRegList pinned;
 
 7760    LiftoffRegister array_reg = pinned.set(
 
 7762    MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
 
 7765    LiftoffRegister variant_reg =
 
 7766        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7767    LoadSmi(variant_reg, 
static_cast<int32_t>(variant));
 
 7768    VarState variant_var(kSmiKind, variant_reg, 0);
 
 7770    CallBuiltin(Builtin::kWasmStringNewWtf8Array,
 
 7773                    __ cache_state()->stack_state.end()[-2],  
 
 7774                    __ cache_state()->stack_state.end()[-1],  
 
 7778                decoder->position());
 
 7779    __ cache_state()->stack_state.pop_back(3);
 
 7780    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7783    __ PushRegister(
kRef, result_reg);
 
 7786  void StringNewWtf16(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 7787                      const Value& 
offset, 
const Value& size, Value* 
result) {
 
 7791    VarState& size_var = 
__ cache_state()->stack_state.end()[-1];
 
 7793    LiftoffRegList pinned;
 
 7794    DCHECK(MatchingMemType(imm.memory, 1));
 
 7795    VarState address = IndexToVarStateSaturating(1, &pinned);
 
 7797    CallBuiltin(Builtin::kWasmStringNewWtf16,
 
 7798                MakeSig::Returns(
kRef).Params(
kI32, kIntPtrKind, 
kI32),
 
 7799                {memory_var, address, size_var}, decoder->position());
 
 7801    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7804    __ PushRegister(
kRef, result_reg);
 
 7807  void StringNewWtf16Array(FullDecoder* decoder, 
const Value& array,
 
 7808                           const Value& 
start, 
const Value& 
end,
 
 7811    LiftoffRegList pinned;
 
 7813    LiftoffRegister array_reg = pinned.set(
 
 7815    MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
 
 7818    CallBuiltin(Builtin::kWasmStringNewWtf16Array,
 
 7822                    __ cache_state()->stack_state.end()[-2],  
 
 7823                    __ cache_state()->stack_state.end()[-1],  
 
 7825                decoder->position());
 
 7826    __ cache_state()->stack_state.pop_back(3);
 
 7827    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7830    __ PushRegister(
kRef, result_reg);
 
 7833  void StringConst(FullDecoder* decoder, 
const StringConstImmediate& imm,
 
 7838    CallBuiltin(Builtin::kWasmStringConst, MakeSig::Returns(
kRef).Params(
kI32),
 
 7839                {index_var}, decoder->position());
 
 7840    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7843    __ PushRegister(
kRef, result_reg);
 
 7846  void StringMeasureWtf8(FullDecoder* decoder,
 
 7850    LiftoffRegList pinned;
 
 7851    LiftoffRegister string_reg = pinned.set(
__ PopToRegister(pinned));
 
 7852    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 7857      case unibrow::Utf8Variant::kUtf8:
 
 7858        builtin = Builtin::kWasmStringMeasureUtf8;
 
 7861      case unibrow::Utf8Variant::kWtf8:
 
 7862        builtin = Builtin::kWasmStringMeasureWtf8;
 
 7864      case unibrow::Utf8Variant::kUtf8NoTrap:
 
 7867    CallBuiltin(builtin, MakeSig::Returns(
kI32).Params(
kRef),
 
 7871                decoder->position());
 
 7872    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7875    __ PushRegister(
kI32, result_reg);
 
 7878  void StringMeasureWtf16(FullDecoder* decoder, 
const Value& str,
 
 7880    LiftoffRegList pinned;
 
 7881    LiftoffRegister string_reg = pinned.set(
__ PopToRegister(pinned));
 
 7882    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 7883    LiftoffRegister value = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 7884    LoadObjectField(decoder, value, string_reg.gp(), 
no_reg,
 
 7887                    ValueKind::kI32, 
false ,
 
 7889    __ PushRegister(
kI32, value);
 
 7892  void StringEncodeWtf8(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 7896    LiftoffRegList pinned;
 
 7898    DCHECK(MatchingMemType(imm.memory, 0));
 
 7899    VarState offset_var = IndexToVarStateSaturating(0, &pinned);
 
 7901    LiftoffRegister string_reg = pinned.set(
 
 7903    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 7909    CallBuiltin(Builtin::kWasmStringEncodeWtf8,
 
 7911                {offset_var, memory_var, variant_var, string_var},
 
 7912                decoder->position());
 
 7914    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7917    __ PushRegister(
kI32, result_reg);
 
 7920  void StringEncodeWtf8Array(FullDecoder* decoder,
 
 7922                             const Value& str, 
const Value& array,
 
 7925    LiftoffRegList pinned;
 
 7927    LiftoffRegister array_reg = pinned.set(
 
 7929    MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
 
 7932    LiftoffRegister string_reg = pinned.set(
 
 7934    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 7937    VarState& start_var = 
__ cache_state()->stack_state.end()[-1];
 
 7939    LiftoffRegister variant_reg =
 
 7940        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 7941    LoadSmi(variant_reg, 
static_cast<int32_t>(variant));
 
 7942    VarState variant_var(kSmiKind, variant_reg, 0);
 
 7944    CallBuiltin(Builtin::kWasmStringEncodeWtf8Array,
 
 7952                decoder->position());
 
 7954    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7957    __ PushRegister(
kI32, result_reg);
 
 7960  void StringEncodeWtf16(FullDecoder* decoder, 
const MemoryIndexImmediate& imm,
 
 7961                         const Value& str, 
const Value& 
offset, Value* 
result) {
 
 7963    LiftoffRegList pinned;
 
 7965    DCHECK(MatchingMemType(imm.memory, 0));
 
 7966    VarState offset_var = IndexToVarStateSaturating(0, &pinned);
 
 7968    LiftoffRegister string_reg = pinned.set(
 
 7970    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 7975    CallBuiltin(Builtin::kWasmStringEncodeWtf16,
 
 7976                MakeSig::Returns(
kI32).Params(
kRef, kIntPtrKind, 
kI32),
 
 7977                {string_var, offset_var, memory_var}, decoder->position());
 
 7979    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 7982    __ PushRegister(
kI32, result_reg);
 
 7985  void StringEncodeWtf16Array(FullDecoder* decoder, 
const Value& str,
 
 7986                              const Value& array, 
const Value& 
start,
 
 7989    LiftoffRegList pinned;
 
 7991    LiftoffRegister array_reg = pinned.set(
 
 7993    MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type);
 
 7996    LiftoffRegister string_reg = pinned.set(
 
 7998    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type);
 
 8001    VarState& start_var = 
__ cache_state()->stack_state.end()[-1];
 
 8003    CallBuiltin(Builtin::kWasmStringEncodeWtf16Array,
 
 8010                decoder->position());
 
 8012    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8015    __ PushRegister(
kI32, result_reg);
 
 8018  void StringConcat(FullDecoder* decoder, 
const Value& head, 
const Value& tail,
 
 8021    LiftoffRegList pinned;
 
 8023    LiftoffRegister tail_reg = pinned.set(
__ PopToRegister(pinned));
 
 8024    MaybeEmitNullCheck(decoder, tail_reg.gp(), pinned, tail.type);
 
 8027    LiftoffRegister head_reg = pinned.set(
__ PopToRegister(pinned));
 
 8028    MaybeEmitNullCheck(decoder, head_reg.gp(), pinned, head.type);
 
 8031    CallBuiltin(Builtin::kWasmStringConcat,
 
 8037                decoder->position());
 
 8038    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8041    __ PushRegister(
kRef, result_reg);
 
 8044  void StringEq(FullDecoder* decoder, 
const Value& a, 
const Value& b,
 
 8048    LiftoffRegList pinned{result_reg};
 
 8049    LiftoffRegister b_reg = pinned.set(
__ PopToModifiableRegister(pinned));
 
 8050    LiftoffRegister a_reg = pinned.set(
__ PopToModifiableRegister(pinned));
 
 8052    __ SpillAllRegisters();
 
 8057      LiftoffRegister 
null = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 8058      bool check_for_null = a.type.is_nullable() || b.type.is_nullable();
 
 8059      if (check_for_null) {
 
 8066      __ LoadConstant(result_reg, WasmValue(int32_t{1}));
 
 8071      if (check_for_null) {
 
 8072        __ LoadConstant(result_reg, WasmValue(int32_t{0}));
 
 8073        if (a.type.is_nullable()) {
 
 8077        if (b.type.is_nullable()) {
 
 8092    CallBuiltin(Builtin::kWasmStringEqual,
 
 8098                decoder->position());
 
 8099    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8103    __ PushRegister(
kI32, result_reg);
 
 8106  void StringIsUSVSequence(FullDecoder* decoder, 
const Value& str,
 
 8109    LiftoffRegList pinned;
 
 8111    LiftoffRegister str_reg = pinned.set(
__ PopToRegister(pinned));
 
 8112    MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type);
 
 8115    CallBuiltin(Builtin::kWasmStringIsUSVSequence,
 
 8116                MakeSig::Returns(
kI32).Params(
kRef),
 
 8120                decoder->position());
 
 8121    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8124    __ PushRegister(
kI32, result_reg);
 
 8127  void StringAsWtf8(FullDecoder* decoder, 
const Value& str, Value* 
result) {
 
 8129    LiftoffRegList pinned;
 
 8131    LiftoffRegister str_reg = pinned.set(
__ PopToRegister(pinned));
 
 8132    MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type);
 
 8135    CallBuiltin(Builtin::kWasmStringAsWtf8, MakeSig::Returns(
kRef).Params(
kRef),
 
 8139                decoder->position());
 
 8140    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8143    __ PushRegister(
kRef, result_reg);
 
 8146  void StringViewWtf8Advance(FullDecoder* decoder, 
const Value& view,
 
 8147                             const Value& 
pos, 
const Value& bytes,
 
 8150    LiftoffRegList pinned;
 
 8152    VarState& bytes_var = 
__ cache_state()->stack_state.end()[-1];
 
 8153    VarState& pos_var = 
__ cache_state()->stack_state.end()[-2];
 
 8155    LiftoffRegister view_reg = pinned.set(
 
 8157    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8160    CallBuiltin(Builtin::kWasmStringViewWtf8Advance,
 
 8167                decoder->position());
 
 8169    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8172    __ PushRegister(
kI32, result_reg);
 
 8175  void StringViewWtf8Encode(FullDecoder* decoder,
 
 8176                            const MemoryIndexImmediate& imm,
 
 8178                            const Value& view, 
const Value& addr,
 
 8179                            const Value& 
pos, 
const Value& bytes,
 
 8180                            Value* next_pos, Value* bytes_written) {
 
 8182    LiftoffRegList pinned;
 
 8184    VarState& bytes_var = 
__ cache_state()->stack_state.end()[-1];
 
 8185    VarState& pos_var = 
__ cache_state()->stack_state.end()[-2];
 
 8187    DCHECK(MatchingMemType(imm.memory, 2));
 
 8188    VarState addr_var = IndexToVarStateSaturating(2, &pinned);
 
 8190    LiftoffRegister view_reg = pinned.set(
 
 8192    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8197    LiftoffRegister memory_reg =
 
 8198        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 8199    LoadSmi(memory_reg, imm.index);
 
 8200    VarState memory_var(kSmiKind, memory_reg, 0);
 
 8202    LiftoffRegister variant_reg =
 
 8203        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 8204    LoadSmi(variant_reg, 
static_cast<int32_t>(variant));
 
 8205    VarState variant_var(kSmiKind, variant_reg, 0);
 
 8208        Builtin::kWasmStringViewWtf8Encode,
 
 8210            .Params(kIntPtrKind, 
kI32, 
kI32, 
kRef, kSmiKind, kSmiKind),
 
 8211        {addr_var, pos_var, bytes_var, view_var, memory_var, variant_var},
 
 8212        decoder->position());
 
 8214    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8217    __ PushRegister(
kI32, next_pos_reg);
 
 8219    __ PushRegister(
kI32, bytes_written_reg);
 
 8222  void StringViewWtf8Slice(FullDecoder* decoder, 
const Value& view,
 
 8223                           const Value& 
start, 
const Value& 
end,
 
 8226    LiftoffRegList pinned;
 
 8228    VarState& end_var = 
__ cache_state()->stack_state.end()[-1];
 
 8229    VarState& start_var = 
__ cache_state()->stack_state.end()[-2];
 
 8231    LiftoffRegister view_reg = pinned.set(
 
 8233    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8236    CallBuiltin(Builtin::kWasmStringViewWtf8Slice,
 
 8243                decoder->position());
 
 8245    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8248    __ PushRegister(
kRef, result_reg);
 
 8251  void StringAsWtf16(FullDecoder* decoder, 
const Value& str, Value* 
result) {
 
 8252    LiftoffRegList pinned;
 
 8254    LiftoffRegister str_reg = pinned.set(
__ PopToRegister(pinned));
 
 8255    MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type);
 
 8258    CallBuiltin(Builtin::kWasmStringAsWtf16,
 
 8259                MakeSig::Returns(
kRef).Params(
kRef),
 
 8263                decoder->position());
 
 8264    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8267    __ PushRegister(
kRef, result_reg);
 
 8270  void StringViewWtf16GetCodeUnit(FullDecoder* decoder, 
const Value& view,
 
 8272    LiftoffRegList pinned;
 
 8273    LiftoffRegister pos_reg = pinned.set(
__ PopToRegister(pinned));
 
 8274    LiftoffRegister view_reg = pinned.set(
__ PopToRegister(pinned));
 
 8275    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8279    CallBuiltin(Builtin::kWasmStringViewWtf16GetCodeUnit,
 
 8280                MakeSig::Returns(
kI32).Params(
kRef, 
kI32), {view_var, pos_var},
 
 8281                decoder->position());
 
 8282    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8285    __ PushRegister(
kI32, result_reg);
 
 8288  void StringViewWtf16Encode(FullDecoder* decoder,
 
 8289                             const MemoryIndexImmediate& imm, 
const Value& view,
 
 8291                             const Value& codeunits, Value* 
result) {
 
 8293    LiftoffRegList pinned;
 
 8295    VarState& codeunits_var = 
__ cache_state()->stack_state.end()[-1];
 
 8296    VarState& pos_var = 
__ cache_state()->stack_state.end()[-2];
 
 8298    DCHECK(MatchingMemType(imm.memory, 2));
 
 8299    VarState offset_var = IndexToVarStateSaturating(2, &pinned);
 
 8301    LiftoffRegister view_reg = pinned.set(
 
 8303    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8306    LiftoffRegister memory_reg =
 
 8307        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 8308    LoadSmi(memory_reg, imm.index);
 
 8309    VarState memory_var(kSmiKind, memory_reg, 0);
 
 8312        Builtin::kWasmStringViewWtf16Encode,
 
 8314        {offset_var, pos_var, codeunits_var, view_var, memory_var},
 
 8315        decoder->position());
 
 8317    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8320    __ PushRegister(
kI32, result_reg);
 
 8323  void StringViewWtf16Slice(FullDecoder* decoder, 
const Value& view,
 
 8324                            const Value& 
start, 
const Value& 
end,
 
 8327    LiftoffRegList pinned;
 
 8328    LiftoffRegister end_reg = pinned.set(
__ PopToRegister(pinned));
 
 8329    LiftoffRegister start_reg = pinned.set(
__ PopToRegister(pinned));
 
 8330    LiftoffRegister view_reg = pinned.set(
__ PopToRegister(pinned));
 
 8331    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8336    CallBuiltin(Builtin::kWasmStringViewWtf16Slice,
 
 8343                decoder->position());
 
 8344    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8347    __ PushRegister(
kRef, result_reg);
 
 8350  void StringAsIter(FullDecoder* decoder, 
const Value& str, Value* 
result) {
 
 8351    LiftoffRegList pinned;
 
 8353    LiftoffRegister str_reg = pinned.set(
__ PopToRegister(pinned));
 
 8354    MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type);
 
 8357    CallBuiltin(Builtin::kWasmStringAsIter, MakeSig::Returns(
kRef).Params(
kRef),
 
 8361                decoder->position());
 
 8362    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8365    __ PushRegister(
kRef, result_reg);
 
 8368  void StringViewIterNext(FullDecoder* decoder, 
const Value& view,
 
 8370    LiftoffRegList pinned;
 
 8372    LiftoffRegister view_reg = pinned.set(
__ PopToRegister(pinned));
 
 8373    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8376    CallBuiltin(Builtin::kWasmStringViewIterNext,
 
 8377                MakeSig::Returns(
kI32).Params(
kRef),
 
 8381                decoder->position());
 
 8382    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8385    __ PushRegister(
kI32, result_reg);
 
 8388  void StringViewIterAdvance(FullDecoder* decoder, 
const Value& view,
 
 8389                             const Value& codepoints, Value* 
result) {
 
 8390    LiftoffRegList pinned;
 
 8392    VarState& codepoints_var = 
__ cache_state()->stack_state.end()[-1];
 
 8394    LiftoffRegister view_reg = pinned.set(
 
 8396    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8399    CallBuiltin(Builtin::kWasmStringViewIterAdvance,
 
 8405                decoder->position());
 
 8406    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8410    __ PushRegister(
kI32, result_reg);
 
 8413  void StringViewIterRewind(FullDecoder* decoder, 
const Value& view,
 
 8414                            const Value& codepoints, Value* 
result) {
 
 8415    LiftoffRegList pinned;
 
 8417    VarState& codepoints_var = 
__ cache_state()->stack_state.end()[-1];
 
 8419    LiftoffRegister view_reg = pinned.set(
 
 8421    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8424    CallBuiltin(Builtin::kWasmStringViewIterRewind,
 
 8430                decoder->position());
 
 8431    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8435    __ PushRegister(
kI32, result_reg);
 
 8438  void StringViewIterSlice(FullDecoder* decoder, 
const Value& view,
 
 8439                           const Value& codepoints, Value* 
result) {
 
 8441    LiftoffRegList pinned;
 
 8443    VarState& codepoints_var = 
__ cache_state()->stack_state.end()[-1];
 
 8445    LiftoffRegister view_reg = pinned.set(
 
 8447    MaybeEmitNullCheck(decoder, view_reg.gp(), pinned, view.type);
 
 8450    CallBuiltin(Builtin::kWasmStringViewIterSlice,
 
 8456                decoder->position());
 
 8457    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8461    __ PushRegister(
kRef, result_reg);
 
 8464  void StringCompare(FullDecoder* decoder, 
const Value& lhs, 
const Value& rhs,
 
 8467    LiftoffRegList pinned;
 
 8468    LiftoffRegister rhs_reg = pinned.set(
 
 8470    MaybeEmitNullCheck(decoder, rhs_reg.gp(), pinned, rhs.type);
 
 8473    LiftoffRegister lhs_reg = pinned.set(
 
 8475    MaybeEmitNullCheck(decoder, lhs_reg.gp(), pinned, lhs.type);
 
 8478    CallBuiltin(Builtin::kStringCompare,
 
 8479                MakeSig::Returns(kSmiKind).Params(
kRef, 
kRef),
 
 8480                {lhs_var, rhs_var}, decoder->position());
 
 8481    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8486    __ PushRegister(
kI32, result_reg);
 
 8489  void StringFromCodePoint(FullDecoder* decoder, 
const Value& code_point,
 
 8491    VarState& codepoint_var = 
__ cache_state()->stack_state.end()[-1];
 
 8493    CallBuiltin(Builtin::kWasmStringFromCodePoint,
 
 8494                MakeSig::Returns(
kRef).Params(
kI32), {codepoint_var},
 
 8495                decoder->position());
 
 8496    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 8500    __ PushRegister(
kRef, result_reg);
 
 8503  void StringHash(FullDecoder* decoder, 
const Value& 
string, Value* 
result) {
 
 8505    LiftoffRegList pinned;
 
 8506    LiftoffRegister string_reg = pinned.set(
 
 8508    MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, 
string.type);
 
 8511    CallBuiltin(Builtin::kWasmStringHash, MakeSig::Returns(
kI32).Params(
kRef),
 
 8512                {string_var}, decoder->position());
 
 8516    __ PushRegister(
kI32, result_reg);
 
 8519  void Forward(FullDecoder* decoder, 
const Value& from, Value* to) {
 
 8524  void CallDirect(FullDecoder* decoder, 
const CallFunctionImmediate& imm,
 
 8525                  const Value 
args[], Value returns[],
 
 8527    MostlySmallValueKindSig 
sig(zone_, imm.sig);
 
 8529      if (!CheckSupportedType(decoder, ret, 
"return")) 
return;
 
 8532    bool needs_indirect_call = imm.index < 
env_->module->num_imported_functions;
 
 8537    call_descriptor = GetLoweredCallDescriptor(zone_, call_descriptor);
 
 8546    if (needs_indirect_call) {
 
 8549      LiftoffRegList pinned;
 
 8551          pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 8558                                          DispatchTableForImports, pinned);
 
 8559        __ LoadProtectedPointer(
 
 8560            implicit_arg, dispatch_table,
 
 8565            target, dispatch_table,
 
 8570      __ PrepareCall(&
sig, call_descriptor, &target, implicit_arg);
 
 8573            static_cast<int>(call_descriptor->ParameterSlotCount()),
 
 8575                call_descriptor->GetStackParameterDelta(descriptor_)));
 
 8576        __ TailCallIndirect(call_descriptor, target);
 
 8579            __ pc_offset(), SourcePosition(decoder->position()), 
true);
 
 8580        __ CallIndirect(&
sig, call_descriptor, target);
 
 8581        FinishCall(decoder, &
sig, call_descriptor);
 
 8586        LiftoffRegister vector = 
__ GetUnusedRegister(
kGpReg, {});
 
 8589        __ IncrementSmi(vector,
 
 8591                            static_cast<int>(vector_slot)));
 
 8595      __ PrepareCall(&
sig, call_descriptor);
 
 8601            static_cast<int>(call_descriptor->ParameterSlotCount()),
 
 8603                call_descriptor->GetStackParameterDelta(descriptor_)));
 
 8604        __ TailCallNativeWasmCode(addr);
 
 8607            __ pc_offset(), SourcePosition(decoder->position()), 
true);
 
 8608        __ CallNativeWasmCode(addr);
 
 8609        FinishCall(decoder, &
sig, call_descriptor);
 
 8614  void CallIndirectImpl(FullDecoder* decoder, 
const CallIndirectImmediate& imm,
 
 8616    MostlySmallValueKindSig 
sig(zone_, imm.sig);
 
 8618      if (!CheckSupportedType(decoder, ret, 
"return")) 
return;
 
 8620    const WasmTable* table = imm.table_imm.table;
 
 8622    if (deopt_info_bytecode_offset_ == decoder->pc_offset() &&
 
 8625      EmitDeoptPoint(decoder);
 
 8628    LiftoffRegList pinned;
 
 8629    VarState index_slot = IndexToVarStateSaturating(0, &pinned);
 
 8631    const bool is_static_index = index_slot.is_const();
 
 8639        table->has_maximum_size
 
 8642    const bool statically_oob =
 
 8646    TempRegisterScope temps;
 
 8647    pinned |= temps.AddTempRegisters(3, 
kGpReg, &asm_, pinned);
 
 8649    ScopedTempRegister dispatch_table{temps, 
kGpReg};
 
 8650    if (imm.table_imm.index == 0) {
 
 8652      LOAD_PROTECTED_PTR_INSTANCE_FIELD(dispatch_table.gp_reg(), DispatchTable0,
 
 8657      Register dispatch_tables = dispatch_table.gp_reg();
 
 8658      LOAD_PROTECTED_PTR_INSTANCE_FIELD(dispatch_tables, DispatchTables,
 
 8660      __ LoadProtectedPointer(dispatch_table.gp_reg(), dispatch_tables,
 
 8661                              ObjectAccess::ElementOffsetInProtectedFixedArray(
 
 8662                                  imm.table_imm.index));
 
 8666      SCOPED_CODE_COMMENT(
"Check index is in-bounds");
 
 8669      const bool needs_dynamic_size =
 
 8670          !table->has_maximum_size ||
 
 8671          table->maximum_size != table->initial_size;
 
 8673      ScopedTempRegister table_size{temps, kGpReg};
 
 8674      OolTrapLabel out_of_bounds =
 
 8675          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapTableOutOfBounds);
 
 8677      if (statically_oob) {
 
 8678        __ emit_jump(out_of_bounds.label());
 
 8682      } 
else if (needs_dynamic_size) {
 
 8683        __ Load(table_size.reg(), dispatch_table.gp_reg(), 
no_reg,
 
 8685                LoadType::kI32Load);
 
 8687        if (is_static_index) {
 
 8689                                 table_size.gp_reg(), index_slot.i32_const(),
 
 8690                                 out_of_bounds.frozen());
 
 8693          if (
Is64() && table->is_table64()) {
 
 8695            __ emit_u32_to_uintptr(table_size.gp_reg(), table_size.gp_reg());
 
 8696            comparison_type = kIntPtrKind;
 
 8699                            comparison_type, table_size.gp_reg(), index_reg,
 
 8700                            out_of_bounds.frozen());
 
 8704        if (is_static_index) {
 
 8706        } 
else if (
Is64() && table->is_table64()) {
 
 8713                                     out_of_bounds.label(), index_reg,
 
 8717                                 out_of_bounds.label(), index_reg,
 
 8726    ScopedTempRegister dispatch_table_base{std::move(dispatch_table)};
 
 8727    int dispatch_table_offset = 0;
 
 8728    if (is_static_index) {
 
 8732      dispatch_table_offset =
 
 8744      bool index_reg_still_used =
 
 8745          __ cache_state() -> get_use_count(LiftoffRegister{index_reg}) > 1;
 
 8746      if (index_reg_still_used) entry_offset = temps.Acquire(
kGpReg).gp();
 
 8748      __ emit_u32_to_uintptr(entry_offset, index_reg);
 
 8750      __ emit_ptrsize_muli(entry_offset, entry_offset,
 
 8752      __ emit_ptrsize_add(dispatch_table_base.gp_reg(),
 
 8753                          dispatch_table_base.gp_reg(), entry_offset);
 
 8754      if (index_reg_still_used) temps.Return(std::move(entry_offset));
 
 8755      dispatch_table_offset =
 
 8760        table->type.AsNonNull(), 
ValueType::Ref(imm.sig_imm.heap_type()),
 
 8761        decoder->module_, decoder->module_);
 
 8762    bool needs_null_check = table->type.is_nullable();
 
 8767    if (needs_type_check || needs_null_check) {
 
 8769                                           : 
"Check for null entry");
 
 8770      ScopedTempRegister real_sig_id{temps, 
kGpReg};
 
 8773      __ Load(real_sig_id.reg(), dispatch_table_base.gp_reg(), 
no_reg,
 
 8775              LoadType::kI32Load);
 
 8780      CanonicalTypeIndex canonical_sig_id =
 
 8781          decoder->module_->canonical_sig_id(imm.sig_imm.index);
 
 8782      OolTrapLabel sig_mismatch =
 
 8783          AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapFuncSigMismatch);
 
 8786      if (!needs_type_check) {
 
 8787        DCHECK(needs_null_check);
 
 8789        __ emit_i32_cond_jumpi(
kEqual, sig_mismatch.label(),
 
 8790                               real_sig_id.gp_reg(), -1, sig_mismatch.frozen());
 
 8791      } 
else if (!decoder->module_->type(imm.sig_imm.index).is_final) {
 
 8792        Label success_label;
 
 8793        __ emit_i32_cond_jumpi(
kEqual, &success_label, real_sig_id.gp_reg(),
 
 8794                               canonical_sig_id.index, sig_mismatch.frozen());
 
 8795        if (needs_null_check) {
 
 8796          __ emit_i32_cond_jumpi(
kEqual, sig_mismatch.label(),
 
 8797                                 real_sig_id.gp_reg(), -1,
 
 8798                                 sig_mismatch.frozen());
 
 8800        ScopedTempRegister real_rtt{temps, 
kGpReg};
 
 8804        __ LoadTaggedPointer(
 
 8805            real_rtt.gp_reg(), real_rtt.gp_reg(), real_sig_id.gp_reg(),
 
 8809        real_sig_id.Reset();
 
 8812          __ emit_i32_andi(real_rtt.gp_reg(), real_rtt.gp_reg(),
 
 8815          __ emit_i64_andi(real_rtt.reg(), real_rtt.reg(),
 
 8822            Map::kConstructorOrBackPointerOrNativeContextOffset);
 
 8823        ScopedTempRegister type_info{std::move(real_rtt)};
 
 8824        __ LoadTaggedPointer(type_info.gp_reg(), type_info.gp_reg(), 
no_reg,
 
 8827        uint32_t rtt_depth =
 
 8830          ScopedTempRegister list_length{temps, 
kGpReg};
 
 8833          __ LoadSmiAsInt32(list_length.reg(), type_info.gp_reg(), 
offset);
 
 8835                                 list_length.gp_reg(), rtt_depth,
 
 8836                                 sig_mismatch.frozen());
 
 8839        ScopedTempRegister maybe_match{std::move(type_info)};
 
 8840        __ LoadTaggedPointer(
 
 8841            maybe_match.gp_reg(), maybe_match.gp_reg(), 
no_reg,
 
 8844        ScopedTempRegister formal_rtt{temps, 
kGpReg};
 
 8850        __ LoadTaggedPointer(
 
 8851            formal_rtt.gp_reg(), formal_rtt.gp_reg(), 
no_reg,
 
 8853                imm.sig_imm.index.index));
 
 8855                          formal_rtt.gp_reg(), maybe_match.gp_reg(),
 
 8856                          sig_mismatch.frozen());
 
 8858        __ bind(&success_label);
 
 8860        __ emit_i32_cond_jumpi(
kNotEqual, sig_mismatch.label(),
 
 8861                               real_sig_id.gp_reg(), canonical_sig_id.index,
 
 8862                               sig_mismatch.frozen());
 
 8878        __ LoadProtectedPointer(
 
 8879            implicit_arg, dispatch_table_base.gp_reg(),
 
 8882            target, dispatch_table_base.gp_reg(),
 
 8886      if (
v8_flags.wasm_inlining_call_indirect) {
 
 8889        ScopedTempRegister vector{std::move(dispatch_table_base)};
 
 8899                      std::numeric_limits<uint32_t>::max() / 2);
 
 8900        uint32_t vector_slot =
 
 8921        VarState implicit_arg_var(
kRef, LiftoffRegister(implicit_arg), 0);
 
 8927        CallBuiltin(Builtin::kCallIndirectIC,
 
 8928                    MakeSig::Returns(kIntPtrKind, kIntPtrKind)
 
 8930                    {vector_var, index_var, target_var, implicit_arg_var},
 
 8931                    decoder->position());
 
 8938      call_descriptor = GetLoweredCallDescriptor(zone_, call_descriptor);
 
 8940      __ PrepareCall(&
sig, call_descriptor, &target, implicit_arg);
 
 8943            static_cast<int>(call_descriptor->ParameterSlotCount()),
 
 8945                call_descriptor->GetStackParameterDelta(descriptor_)));
 
 8946        __ TailCallIndirect(call_descriptor, target);
 
 8949            __ pc_offset(), SourcePosition(decoder->position()), 
true);
 
 8950        __ CallIndirect(&
sig, call_descriptor, target);
 
 8951        FinishCall(decoder, &
sig, call_descriptor);
 
 8956  void StoreFrameDescriptionForDeopt(
 
 8957      FullDecoder* decoder, uint32_t adapt_shadow_stack_pc_offset = 0) {
 
 8962        LiftoffFrameDescriptionForDeopt{
 
 8963            decoder->pc_offset(), 
static_cast<uint32_t
>(
__ pc_offset()),
 
 8964#ifdef V8_ENABLE_CET_SHADOW_STACK
 
 8965            adapt_shadow_stack_pc_offset,
 
 8967            std::vector<LiftoffVarState>(
__ cache_state()->stack_state.begin(),
 
 8968                                         __ cache_state()->stack_state.end()),
 
 8969            __ cache_state()->cached_instance_data});
 
 8972  void EmitDeoptPoint(FullDecoder* decoder) {
 
 8973#if defined(DEBUG) and !defined(V8_TARGET_ARCH_ARM) 
 8981              config->num_allocatable_simd128_registers());
 
 8983      const int* 
end = config->allocatable_simd128_codes() +
 
 8984                       config->num_allocatable_simd128_registers();
 
 8985      DCHECK(std::find(config->allocatable_simd128_codes(), 
end, 
reg.code()) !=
 
 8990    LiftoffAssembler::CacheState initial_state(
zone_);
 
 8991    initial_state.Split(*
__ cache_state());
 
 8995    __ emit_jump(&callref);
 
 8996    __ bind(&deopt_point);
 
 8997    uint32_t adapt_shadow_stack_pc_offset = 
__ pc_offset();
 
 8998#ifdef V8_ENABLE_CET_SHADOW_STACK 
 9000      __ CallBuiltin(Builtin::kAdaptShadowStackForDeopt);
 
 9003    StoreFrameDescriptionForDeopt(decoder, adapt_shadow_stack_pc_offset);
 
 9004    CallBuiltin(Builtin::kWasmLiftoffDeoptFinish, MakeSig(), {},
 
 9007    __ cache_state() -> Steal(initial_state);
 
 9011  void CallRefImpl(FullDecoder* decoder, ValueType func_ref_type,
 
 9013    MostlySmallValueKindSig 
sig(
zone_, type_sig);
 
 9015      if (!CheckSupportedType(decoder, ret, 
"return")) 
return;
 
 9019    call_descriptor = GetLoweredCallDescriptor(
zone_, call_descriptor);
 
 9028        EmitDeoptPoint(decoder);
 
 9030      LiftoffRegList pinned;
 
 9031      LiftoffRegister func_ref = pinned.set(
__ PopToRegister(pinned));
 
 9032      LiftoffRegister vector = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 9033      MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
 
 9043                    std::numeric_limits<uint32_t>::max() / 2);
 
 9044      uint32_t vector_slot =
 
 9052          Builtin::kCallRefIC,
 
 9053          MakeSig::Returns(kIntPtrKind, kIntPtrKind).Params(
kRef, 
kI32, 
kRef),
 
 9054          {vector_var, index_var, func_ref_var}, decoder->position());
 
 9062      __ SpillAllRegisters();
 
 9064      LiftoffRegList pinned;
 
 9065      Register func_ref = pinned.set(
__ PopToModifiableRegister(pinned)).gp();
 
 9066      MaybeEmitNullCheck(decoder, func_ref, pinned, func_ref_type);
 
 9067      implicit_arg_reg = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 9068      target_reg = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned)).gp();
 
 9071      Register internal_function = func_ref;
 
 9072      __ LoadTrustedPointer(
 
 9073          internal_function, func_ref,
 
 9075          kWasmInternalFunctionIndirectPointerTag);
 
 9079      __ LoadProtectedPointer(
 
 9080          implicit_arg_reg, internal_function,
 
 9082              WasmInternalFunction::kProtectedImplicitArgOffset));
 
 9084      __ LoadFullPointer(target_reg, internal_function,
 
 9086                             WasmInternalFunction::kRawCallTargetOffset));
 
 9093    __ PrepareCall(&
sig, call_descriptor, &target_reg, implicit_arg_reg);
 
 9096          static_cast<int>(call_descriptor->ParameterSlotCount()),
 
 9098              call_descriptor->GetStackParameterDelta(
descriptor_)));
 
 9099      __ TailCallIndirect(call_descriptor, target_reg);
 
 9102          __ pc_offset(), SourcePosition(decoder->position()), 
true);
 
 9103      __ CallIndirect(&
sig, call_descriptor, target_reg);
 
 9104      FinishCall(decoder, &
sig, call_descriptor);
 
 9108  void LoadNullValue(Register 
null, ValueType type) {
 
 9111        type.use_wasm_null()
 
 9120  void LoadNullValueForCompare(Register 
null, LiftoffRegList pinned,
 
 9122#if V8_STATIC_ROOTS_BOOL 
 9123    uint32_t value = type.use_wasm_null() ? StaticReadOnlyRoot::kWasmNull
 
 9124                                          : StaticReadOnlyRoot::kNullValue;
 
 9125    __ LoadConstant(LiftoffRegister(
null),
 
 9126                    WasmValue(
static_cast<uint32_t
>(value)));
 
 9128    LoadNullValue(
null, type);
 
 9132  void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
 
 9138  void MaybeEmitNullCheck(FullDecoder* decoder, Register 
object,
 
 9139                          LiftoffRegList pinned, ValueType type) {
 
 9140    if (
v8_flags.experimental_wasm_skip_null_checks || !type.is_nullable()) {
 
 9143    LiftoffRegister 
null = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 9144    LoadNullValueForCompare(
null.gp(), pinned, type);
 
 9146        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapNullDereference);
 
 9151  void BoundsCheckArray(FullDecoder* decoder, 
bool implicit_null_check,
 
 9152                        LiftoffRegister array, LiftoffRegister index,
 
 9153                        LiftoffRegList pinned) {
 
 9155    LiftoffRegister length = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 9156    constexpr int kLengthOffset =
 
 9158    uint32_t protected_instruction_pc = 0;
 
 9159    __ Load(length, array.gp(), 
no_reg, kLengthOffset, LoadType::kI32Load,
 
 9160            implicit_null_check ? &protected_instruction_pc : 
nullptr);
 
 9161    if (implicit_null_check) {
 
 9162      RegisterProtectedInstruction(decoder, protected_instruction_pc);
 
 9165        AddOutOfLineTrap(decoder, Builtin::kThrowWasmTrapArrayOutOfBounds);
 
 9167                      length.gp(), 
trap.frozen());
 
 9172                                        struct_type->field_offset(field_index));
 
 9175  std::pair<bool, bool> null_checks_for_struct_op(ValueType struct_type,
 
 9177    bool explicit_null_check =
 
 9178        struct_type.is_nullable() &&
 
 9181    bool implicit_null_check =
 
 9182        struct_type.is_nullable() && !explicit_null_check;
 
 9183    return {explicit_null_check, implicit_null_check};
 
 9186  void LoadObjectField(FullDecoder* decoder, LiftoffRegister dst, Register src,
 
 9188                       bool is_signed, 
bool trapping, LiftoffRegList pinned) {
 
 9189    uint32_t protected_load_pc = 0;
 
 9191      __ LoadTaggedPointer(dst.gp(), src, offset_reg, 
offset,
 
 9192                           trapping ? &protected_load_pc : 
nullptr);
 
 9197              trapping ? &protected_load_pc : 
nullptr);
 
 9199    if (trapping) RegisterProtectedInstruction(decoder, protected_load_pc);
 
 9202  void StoreObjectField(FullDecoder* decoder, Register obj, Register offset_reg,
 
 9203                        int offset, LiftoffRegister value, 
bool trapping,
 
 9207    uint32_t protected_load_pc = 0;
 
 9209      __ StoreTaggedPointer(obj, offset_reg, 
offset, value.gp(), pinned,
 
 9210                            trapping ? &protected_load_pc : 
nullptr,
 
 9211                            skip_write_barrier);
 
 9215      __ Store(obj, offset_reg, 
offset, value, store_type, pinned,
 
 9216               trapping ? &protected_load_pc : 
nullptr);
 
 9218    if (trapping) RegisterProtectedInstruction(decoder, protected_load_pc);
 
 9221  void SetDefaultValue(LiftoffRegister 
reg, ValueType type) {
 
 9222    DCHECK(type.is_defaultable());
 
 9223    switch (type.kind()) {
 
 9227        return __ LoadConstant(
reg, WasmValue(int32_t{0}));
 
 9229        return __ LoadConstant(
reg, WasmValue(int64_t{0}));
 
 9232        return __ LoadConstant(
reg, WasmValue(
float{0.0}));
 
 9234        return __ LoadConstant(
reg, WasmValue(
double{0.0}));
 
 9239        return LoadNullValue(
reg.
gp(), type);
 
 9255                  compiler::CallDescriptor* call_descriptor) {
 
 9257    RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
 
 9262      uint32_t adapt_shadow_stack_pc_offset = 0;
 
 9263#ifdef V8_ENABLE_CET_SHADOW_STACK 
 9271        __ emit_jump(&deopt_point);
 
 9273        __ CallBuiltin(Builtin::kAdaptShadowStackForDeopt);
 
 9274        __ bind(&deopt_point);
 
 9277      StoreFrameDescriptionForDeopt(decoder, adapt_shadow_stack_pc_offset);
 
 9283    __ FinishCall(
sig, call_descriptor);
 
 9286  void CheckNan(LiftoffRegister src, LiftoffRegList pinned, 
ValueKind kind) {
 
 9288    auto nondeterminism_addr = 
__ GetUnusedRegister(
kGpReg, pinned);
 
 9289    __ LoadConstant(nondeterminism_addr,
 
 9291    __ emit_store_nonzero_if_nan(nondeterminism_addr.gp(), src.fp(), 
kind);
 
 9294  void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned,
 
 9297    LiftoffRegister tmp_gp = pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 9298    LiftoffRegister tmp_s128 = pinned.set(
__ GetUnusedRegister(rc, pinned));
 
 9299    LiftoffRegister nondeterminism_addr =
 
 9300        pinned.set(
__ GetUnusedRegister(
kGpReg, pinned));
 
 9301    __ LoadConstant(nondeterminism_addr,
 
 9303    __ emit_s128_store_nonzero_if_nan(nondeterminism_addr.gp(), dst,
 
 9304                                      tmp_gp.gp(), tmp_s128, lane_kind);
 
 9307  void ArrayFillImpl(FullDecoder* decoder, LiftoffRegList pinned,
 
 9308                     LiftoffRegister obj, LiftoffRegister index,
 
 9309                     LiftoffRegister value, LiftoffRegister length,
 
 9315      __ emit_i32_shli(
offset.gp(), index.gp(),
 
 9322    LiftoffRegister end_offset = 
length;
 
 9324      __ emit_i32_shli(end_offset.gp(), length.gp(),
 
 9327    __ emit_i32_add(end_offset.gp(), end_offset.gp(), 
offset.gp());
 
 9333                      end_offset.gp(), frozen_for_conditional_jumps);
 
 9334    StoreObjectField(decoder, obj.gp(), 
offset.gp(), 0, value, 
false, pinned,
 
 9335                     elem_kind, skip_write_barrier);
 
 9337    __ emit_jump(&loop);
 
 9342  void RegisterProtectedInstruction(FullDecoder* decoder,
 
 9343                                    uint32_t protected_instruction_pc) {
 
 9345        trap_handler::ProtectedInstructionData{protected_instruction_pc});
 
 9347        protected_instruction_pc, SourcePosition(decoder->position()), 
true);
 
 9349      DefineSafepoint(protected_instruction_pc);
 
 9353  bool has_outstanding_op()
 const {
 
 9357  bool test_and_reset_outstanding_op(
WasmOpcode opcode) {
 
 9364  void TraceCacheState(FullDecoder* decoder)
 const {
 
 9365    if (!
v8_flags.trace_liftoff) 
return;
 
 9367    for (
int control_depth = decoder->control_depth() - 1; control_depth >= -1;
 
 9370          control_depth == -1 ? 
__ cache_state()
 
 9371                              : &decoder->control_at(control_depth)
 
 9374      if (control_depth != -1) 
PrintF(
"; ");
 
 9379  void DefineSafepoint(
int pc_offset = 0) {
 
 9384    __ cache_state()->DefineSafepoint(safepoint);
 
 9387  void DefineSafepointWithCalleeSavedRegisters() {
 
 9392    __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
 
 9400                                              Register fallback) {
 
 9403      instance = LoadInstanceIntoRegister_Slow(pinned, fallback);
 
 9409  LoadInstanceIntoRegister_Slow(LiftoffRegList pinned, Register fallback) {
 
 9412    Register instance = 
__ cache_state()->TrySetCachedInstanceRegister(
 
 9413        pinned | LiftoffRegList{fallback});
 
 9414    if (instance == 
no_reg) instance = fallback;
 
 9415    __ LoadInstanceDataFromFrame(instance);
 
 9468  struct HandlerInfo {
 
 9511constexpr WasmOpcode LiftoffCompiler::kNoOutstandingOp;
 
 9513constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported;
 
 9515std::unique_ptr<AssemblerBuffer> NewLiftoffAssemblerBuffer(
int func_body_size) {
 
 9516  size_t code_size_estimate =
 
 9522  int initial_buffer_size = 
static_cast<int>(128 + code_size_estimate * 4 / 3);
 
 9537    start_time = base::TimeTicks::Now();
 
 9539  int func_body_size = 
static_cast<int>(func_body.
end - func_body.
start);
 
 9541               "wasm.CompileBaseline", 
"funcIndex", compiler_options.
func_index,
 
 9542               "bodySize", func_body_size);
 
 9545  auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.
sig);
 
 9547  std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
 
 9549    debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
 
 9558                                         : &unused_detected_features,
 
 9559      func_body, call_descriptor, env, &zone,
 
 9560      NewLiftoffAssemblerBuffer(func_body_size), debug_sidetable_builder.get(),
 
 9563  LiftoffCompiler* compiler = &decoder.
interface();
 
 9564  if (decoder.
failed()) compiler->OnFirstError(&decoder);
 
 9566  if (
auto* counters = compiler_options.
counters) {
 
 9568    DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
 
 9570              counters->liftoff_bailout_reasons()->max());
 
 9572              counters->liftoff_bailout_reasons()->num_buckets());
 
 9574    counters->liftoff_bailout_reasons()->AddSample(
 
 9575        static_cast<int>(compiler->bailout_reason()));
 
 9581  compiler->GetCode(&
result.code_desc);
 
 9583  result.source_positions = compiler->GetSourcePositionTable();
 
 9584  result.protected_instructions_data = compiler->GetProtectedInstructionsData();
 
 9585  result.frame_slot_count = compiler->GetTotalFrameSlotCountForGC();
 
 9586  result.ool_spill_count = compiler->OolSpillCount();
 
 9587  auto* lowered_call_desc = GetLoweredCallDescriptor(&zone, call_descriptor);
 
 9588  result.tagged_parameter_slots = lowered_call_desc->GetTaggedParameterSlots();
 
 9590  result.result_tier = ExecutionTier::kLiftoff;
 
 9593  result.liftoff_frame_descriptions = compiler->ReleaseFrameDescriptions();
 
 9595    *debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
 
 9600    int codesize = 
result.code_desc.body_size();
 
 9602                   << 
reinterpret_cast<const void*
>(env->
module) << 
"#" 
 9603                   << compiler_options.
func_index << 
" using Liftoff, took " 
 9604                   << time.InMilliseconds() << 
" ms and " 
 9606                   << func_body_size << 
" codesize " << codesize << std::endl;
 
 
 9616  auto* native_module = code->native_module();
 
 9617  auto* function = &native_module->module()->functions[code->index()];
 
 9620      wire_bytes.GetFunctionBytes(function);
 
 9622  bool is_shared = native_module->module()->type(function->sig_index).is_shared;
 
 9627  auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig);
 
 9628  DebugSideTableBuilder debug_sidetable_builder;
 
 9630  constexpr int kSteppingBreakpoints[] = {0};
 
 9635          ? base::ArrayVector(kSteppingBreakpoints)
 
 9639      func_body, call_descriptor, &env, &zone,
 
 9641      &debug_sidetable_builder,
 
 9643          .set_func_index(code->index())
 
 9644          .set_for_debugging(code->for_debugging())
 
 9645          .set_breakpoints(breakpoints));
 
 9649  return debug_sidetable_builder.GenerateDebugSideTable();
 
 
#define SLOW_DCHECK(condition)
static OwnedVector< T > NewForOverwrite(size_t size)
constexpr T * begin() const
static Vector< T > cast(Vector< S > input)
constexpr T * end() const
static constexpr int kNoHandlerTable
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
static V8_INLINE constexpr int SlotOffset(int index)
static bool IsSupported(CpuFeature f)
static bool SupportsWasmSimd128()
static const char * NameOfIsolateIndependentAddress(Address address, MemorySpan< Address > shared_external_references)
static constexpr int OffsetOfElementAt(int index)
static void EmitReturnEntry(Assembler *masm, int offset, int handler)
static int EmitReturnTableStart(Assembler *masm)
static constexpr int root_slot_offset(RootIndex root_index)
static IsolateGroup * current()
MemorySpan< Address > external_ref_table()
static LinkageLocation ForAnyRegister(MachineType type=MachineType::None())
constexpr unsigned Count() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr Tagged< Smi > FromInt(int value)
V8_INLINE constexpr StorageType ptr() const
static constexpr int MaxLength(uint32_t element_size_bytes)
static constexpr size_t kTargetBias
static constexpr size_t kSigBias
static constexpr size_t kImplicitArgBias
static constexpr size_t kEntrySize
static constexpr size_t kLengthOffset
static constexpr size_t kEntriesOffset
static constexpr int OffsetOf(int index)
static uint32_t GetEncodedSize(const wasm::WasmTagSig *tag)
static constexpr int kFeedbackVectorOffset
static int Size(const wasm::StructType *type)
size_t allocation_size() const
static FieldAccess ForStringLength()
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
constexpr IndependentHeapType AsNonNull() const
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
static constexpr ValueKind kSmiKind
void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_shri(Register dst, Register src, int32_t amount)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i32_shli(Register dst, Register src, int32_t amount)
void emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sar(Register dst, Register src, Register amount)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_shr(Register dst, Register src, Register amount)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_andi(Register dst, Register lhs, int32_t imm)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_ptrsize_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_and(Register dst, Register lhs, Register rhs)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
Signature< ValueKind > ValueKindSig
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_xor(Register dst, Register lhs, Register rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_or(Register dst, Register lhs, Register rhs)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_sari(Register dst, Register src, int32_t amount)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_ori(Register dst, Register lhs, int32_t imm)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
static constexpr int kStackSlotSize
void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_xori(Register dst, Register lhs, int32_t imm)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_sub(Register dst, Register lhs, Register rhs)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_div(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
static constexpr ValueKind kIntPtrKind
void emit_i32_addi(Register dst, Register lhs, int32_t imm)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_shl(Register dst, Register src, Register amount)
void emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_add(Register dst, Register lhs, Register rhs)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
static LiftoffRegister from_external_code(RegClass rc, ValueKind kind, int code)
static LiftoffRegister ForPair(Register low, Register high)
constexpr Register gp() const
static LoadType ForValueKind(ValueKind kind, bool is_signed=false)
static constexpr int ElementOffsetInTaggedFixedArray(int index)
static constexpr int ElementOffsetInTaggedFixedUInt32Array(int index)
static constexpr int ElementOffsetInTaggedFixedAddressArray(int index)
static constexpr int ToTagged(int offset)
static void CanonicalizeShuffle(bool inputs_equal, uint8_t *shuffle, bool *needs_swap, bool *is_swizzle)
static StoreType ForValueKind(ValueKind kind)
static constexpr ValueType Ref(ModuleTypeIndex index, bool shared, RefTypeKind kind)
static size_t EstimateLiftoffCodeSize(int body_size)
static Address GetNondeterminismAddr()
static constexpr bool IsBreakable(WasmOpcode)
static constexpr const char * OpcodeName(WasmOpcode)
static constexpr bool IsPrefixOpcode(WasmOpcode)
static WasmValue ForUintPtr(uintptr_t value)
#define COMPRESS_POINTERS_BOOL
#define FOREACH_WASM_TRAPREASON(V)
const InternalIndex descriptor_
ZoneVector< OpIndex > candidates
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
static constexpr int kTierUpCostForFunctionEntry
std::vector< Value > changed_values_
#define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap)
LinkageLocation location_
std::unique_ptr< LiftoffFrameDescriptionForDeopt > frame_description_
#define ATOMIC_LOAD_OP(name, type)
ValueKind inline_storage_[kInlineStorage]
#define LIST_FEATURE(name,...)
SpilledRegistersForInspection * spilled_registers
MovableLabel continuation
WasmOpcode outstanding_op_
int32_t *const max_steps_
std::vector< EntryBuilder > entries_
LiftoffAssembler::CacheState label_state
#define CASE_I32_UNOP(opcode, fn)
ZoneVector< trap_handler::ProtectedInstructionData > protected_instructions_
LiftoffBailoutReason bailout_reason_
Register cached_instance_data
#define ATOMIC_BINOP_OP(op, name, type)
#define ATOMIC_STORE_LIST(V)
const ForDebugging for_debugging_
int last_safepoint_offset_
#define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn)
LiftoffAssembler::CacheState catch_state
LiftoffRegList param_regs_
static constexpr int kTierUpCostForCheck
base::OwnedVector< ValueType > stack_value_types_for_debugging_
std::list< EntryBuilder > ool_entries_
#define ATOMIC_BINOP_INSTRUCTION_LIST(V)
ZoneVector< HandlerInfo > handlers_
static constexpr uint32_t kFirstInputIdx
const int * next_breakpoint_ptr_
#define CASE_FLOAT_UNOP(opcode, kind, fn)
#define SCOPED_CODE_COMMENT(str)
ZoneVector< OutOfLineCode > out_of_line_code_
OutOfLineSafepointInfo * safepoint_info
#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned)
DebugSideTableBuilder::EntryBuilder * debug_sidetable_entry_builder
const uint32_t deopt_info_bytecode_offset_
uint32_t pc_offset_stack_frame_construction_
std::vector< Value > last_ool_values_
static constexpr bool kUsesPoppedArgs
const int * next_breakpoint_end_
base::EnumSet< ValueKind > supported_types_
#define RUNTIME_STUB_FOR_TRAP(trap_reason)
#define ATOMIC_COMPARE_EXCHANGE_OP(name, type)
DebugSideTableBuilder *const debug_sidetable_builder_
static constexpr base::EnumSet< ValueKind > kUnconditionallySupported
int handler_table_offset_
const bool detect_nondeterminism_
#define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn)
std::vector< uint32_t > encountered_call_instructions_
SafepointTableBuilder safepoint_table_builder_
const compiler::NullCheckStrategy null_check_strategy_
ZoneVector< Entry > entries
static constexpr WasmOpcode kNoOutstandingOp
#define CODE_COMMENT(str)
std::vector< Value > last_values_
#define FUZZER_HEAVY_INSTRUCTION
const LocationKindForDeopt deopt_location_kind_
LiftoffRegList regs_to_save
SourcePositionTableBuilder source_position_table_builder_
TempRegisterScope * temp_scope_
static constexpr size_t kInlineStorage
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn)
#define FREEZE_STATE(witness_name)
#define ATOMIC_LOAD_LIST(V)
bool did_function_entry_break_checks_
const uint32_t num_params_
#define LOAD_PROTECTED_PTR_INSTANCE_FIELD(dst, name, pinned)
#define ATOMIC_STORE_OP(name, type)
#define ATOMIC_COMPARE_EXCHANGE_LIST(V)
CompilationEnv *const env_
std::optional< OolTrapLabel > trap
#define CASE_I64_UNOP(opcode, fn)
LiftoffRegList free_temps_
int SNPrintF(Vector< char > str, const char *format,...)
constexpr bool IsInBounds(T index, T length, T max)
constexpr Vector< T > VectorOf(T *start, size_t size)
OwnedVector< T > OwnedCopyOf(const T *data, size_t size)
LockGuard< Mutex > MutexGuard
StaticCanonicalForLoopMatcher::BinOp BinOp
CallDescriptor * GetWasmCallDescriptor(Zone *zone, const Signature< T > *fsig, WasmCallKind call_kind, bool need_frame_state)
Node::Uses::const_iterator begin(const Node::Uses &uses)
void split(const std::string &str, char delimiter, std::vector< std::string > *vparams)
bool IsTrapHandlerEnabled()
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
CPURegister LoadToRegister(LiftoffAssembler *assm, UseScratchRegisterScope *temps, const LiftoffAssembler::VarState &src)
void EmitSimdShiftOp(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister operand, LiftoffRegister count)
static bool StringCheck(const WasmRef obj)
constexpr MachineType machine_type(ValueKind kind)
static constexpr RegClass reg_class_for(ValueKind kind)
constexpr uint32_t kWasmPageSizeLog2
static bool EqCheck(const WasmRef obj)
int GetSubtypingDepth(const WasmModule *module, ModuleTypeIndex type_index)
constexpr Condition Flip(Condition cond)
static constexpr bool needs_gp_reg_pair(ValueKind kind)
uint32_t max_table_size()
constexpr uint32_t kMinimumSupertypeArraySize
constexpr IndependentHeapType kWasmStringRef
constexpr Condition Negate(Condition cond)
bool AbstractTypeCast(Isolate *isolate, const WasmRef obj, const ValueType obj_type, bool null_succeeds)
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs
constexpr size_t kV8MaxWasmFunctionSize
constexpr IndependentHeapType kWasmAnyRef
V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2, const WasmModule *module1, const WasmModule *module2)
LiftoffAssembler::VarState VarState
@ kUnsupportedArchitecture
constexpr IndependentHeapType kWasmExternRef
static bool I31Check(const WasmRef obj)
constexpr RegList kLiftoffAssemblerGpCacheRegs
static bool StructCheck(const WasmRef obj)
static constexpr bool kNeedS128RegPair
WasmCompilationResult ExecuteLiftoffCompilation(CompilationEnv *env, const FunctionBody &func_body, const LiftoffOptions &compiler_options)
constexpr IndependentHeapType kWasmFuncRef
std::unique_ptr< DebugSideTable > GenerateLiftoffDebugSideTable(const WasmCode *code)
constexpr int value_kind_size_log2(ValueKind kind)
constexpr IndependentHeapType kWasmRefI31
constexpr ValueKind unpacked(ValueKind kind)
constexpr size_t kMaxMemory64Size
bool(*)(const WasmRef obj) TypeChecker
static constexpr LiftoffRegList GetCacheRegList(RegClass rc)
WasmEngine * GetWasmEngine()
int declared_function_index(const WasmModule *module, int func_index)
constexpr int kMaxStructFieldIndexForImplicitNullCheck
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype, const WasmModule *sub_module, const WasmModule *super_module)
typedef void(VECTORCALL PWasmOp)(const uint8_t *code
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static bool ArrayCheck(const WasmRef obj)
constexpr size_t kV8MaxWasmTableSize
constexpr bool kPartialOOBWritesAreNoops
Signature< ValueType > FunctionSig
constexpr bool is_reference(ValueKind kind)
static int StructFieldOffset(const StructType *struct_type, int field_index)
static constexpr bool kNeedI64RegPair
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr Register kRootRegister
constexpr int kTaggedSize
constexpr int kSimd128Size
@ kUnsignedGreaterThanEqual
constexpr int kNoSourcePosition
DwVfpRegister DoubleRegister
void PrintF(const char *format,...)
wasm::WasmModule WasmModule
const Address kWeakHeapObjectMask
kWasmInternalFunctionIndirectPointerTag instance_data
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in name
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset tables
constexpr Register kReturnRegister1
constexpr int kTaggedSizeLog2
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
std::unique_ptr< AssemblerBuffer > NewAssemblerBuffer(int size)
constexpr Register kWasmImplicitArgRegister
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset element_segments
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
JSArrayBuffer::IsDetachableBit is_shared
constexpr uint32_t kMaxUInt32
auto PrintCollection(const T &collection) -> PrintIteratorRange< typename std::common_type< decltype(std::begin(collection)), decltype(std::end(collection))>::type >
bool is_signed(Condition cond)
i::Address Load(i::Address address)
RegExpCompiler * compiler_
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TypeName)
constexpr bool IsAligned(T value, U alignment)
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
const WasmEnabledFeatures enabled_features
const WasmModule *const module
static constexpr uint32_t kCallRef
static constexpr uint32_t kCallIndirect
std::unique_ptr< DebugSideTable > * debug_sidetable
bool is_initialized() const
WasmDetectedFeatures * detected_features
ForDebugging for_debugging
std::unique_ptr< AssemblerBuffer > instr_buffer
#define OFFSET_OF_DATA_START(Type)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define V8_STATIC_ROOTS_BOOL
#define V8_LIKELY(condition)
#define V8_UNLIKELY(condition)
#define LOAD_INSTANCE_FIELD(instance, name, representation)
#define FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V)