24#if V8_ENABLE_WEBASSEMBLY 
   71    switch (constant.type()) {
 
   73        return Immediate(constant.ToInt32(), constant.rmode());
 
   79        return Immediate(constant.ToExternalReference());
 
   81        return Immediate(constant.ToHeapObject());
 
 
   99    static_assert(0 == 
static_cast<int>(
times_1));
 
  100    static_assert(1 == 
static_cast<int>(
times_2));
 
  101    static_assert(2 == 
static_cast<int>(
times_4));
 
  102    static_assert(3 == 
static_cast<int>(
times_8));
 
  103    int scale = 
static_cast<int>(mode - 
one);
 
 
  181    const int32_t disp = 4;
 
  182    if (mode == kMode_MR1) {
 
  186    } 
else if (mode == kMode_MRI) {
 
 
  196    if (op->IsImmediate() || op->IsConstant()) {
 
 
 
  220class OutOfLineLoadFloat32NaN final : 
public OutOfLineCode {
 
  222  OutOfLineLoadFloat32NaN(CodeGenerator* 
gen, XMMRegister 
result)
 
  225  void Generate() final {
 
  226    __ xorps(result_, result_);
 
  227    __ divss(result_, result_);
 
  234class OutOfLineLoadFloat64NaN final : 
public OutOfLineCode {
 
  236  OutOfLineLoadFloat64NaN(CodeGenerator* 
gen, XMMRegister 
result)
 
  239  void Generate() final {
 
  240    __ xorpd(result_, result_);
 
  241    __ divsd(result_, result_);
 
  248class OutOfLineTruncateDoubleToI final : 
public OutOfLineCode {
 
  250  OutOfLineTruncateDoubleToI(CodeGenerator* 
gen, Register 
result,
 
  252      : OutOfLineCode(
gen),
 
  255#if V8_ENABLE_WEBASSEMBLY
 
  256        stub_mode_(stub_mode),
 
  262  void Generate() final {
 
  265#if V8_ENABLE_WEBASSEMBLY 
  266    if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
 
  270      __ wasm_call(
static_cast<Address>(Builtin::kDoubleToI),
 
  277      __ CallBuiltin(Builtin::kDoubleToI);
 
  286#if V8_ENABLE_WEBASSEMBLY 
  293class OutOfLineRecordWrite final : 
public OutOfLineCode {
 
  295  OutOfLineRecordWrite(CodeGenerator* 
gen, 
Register object, Operand operand,
 
  298      : OutOfLineCode(
gen),
 
  305#if V8_ENABLE_WEBASSEMBLY 
  306        stub_mode_(stub_mode),
 
  313  void Generate() final {
 
  315                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
 
  318    SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
 
  319                                            ? SaveFPRegsMode::kSave
 
  320                                            : SaveFPRegsMode::kIgnore;
 
  321    if (
mode_ == RecordWriteMode::kValueIsEphemeronKey) {
 
  323#if V8_ENABLE_WEBASSEMBLY 
  324    } 
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
 
  329                                          StubCallMode::kCallWasmRuntimeStub);
 
  343#if V8_ENABLE_WEBASSEMBLY 
  344  StubCallMode 
const stub_mode_;
 
  351#define ASSEMBLE_COMPARE(asm_instr)                              \ 
  353    if (HasAddressingMode(instr)) {                              \ 
  355      Operand left = i.MemoryOperand(&index);                    \ 
  356      if (HasImmediateInput(instr, index)) {                     \ 
  357        __ asm_instr(left, i.InputImmediate(index));             \ 
  359        __ asm_instr(left, i.InputRegister(index));              \ 
  362      if (HasImmediateInput(instr, 1)) {                         \ 
  363        if (HasRegisterInput(instr, 0)) {                        \ 
  364          __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ 
  366          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \ 
  369        if (HasRegisterInput(instr, 1)) {                        \ 
  370          __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \ 
  372          __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \ 
 
  378#define ASSEMBLE_IEEE754_BINOP(name)                                     \ 
  381    __ PrepareCallCFunction(4, eax);                                     \ 
  382    __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0));   \ 
  383    __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1));   \ 
  384    __ CallCFunction(ExternalReference::ieee754_##name##_function(), 4); \ 
  387    __ AllocateStackSpace(kDoubleSize);                                  \ 
  388    __ fstp_d(Operand(esp, 0));                                          \ 
  389    __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));                 \ 
  390    __ add(esp, Immediate(kDoubleSize));                                 \ 
 
  393#define ASSEMBLE_IEEE754_UNOP(name)                                      \ 
  396    __ PrepareCallCFunction(2, eax);                                     \ 
  397    __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0));   \ 
  398    __ CallCFunction(ExternalReference::ieee754_##name##_function(), 2); \ 
  401    __ AllocateStackSpace(kDoubleSize);                                  \ 
  402    __ fstp_d(Operand(esp, 0));                                          \ 
  403    __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));                 \ 
  404    __ add(esp, Immediate(kDoubleSize));                                 \ 
 
  407#define ASSEMBLE_BINOP(asm_instr)                             \ 
  409    if (HasAddressingMode(instr)) {                           \ 
  411      Operand right = i.MemoryOperand(&index);                \ 
  412      __ asm_instr(i.InputRegister(0), right);                \ 
  414      if (HasImmediateInput(instr, 1)) {                      \ 
  415        __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ 
  417        __ asm_instr(i.InputRegister(0), i.InputOperand(1));  \ 
 
  422#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \ 
  426    __ mov_inst(eax, i.MemoryOperand(1));                       \ 
  427    __ Move(i.TempRegister(0), eax);                            \ 
  428    __ bin_inst(i.TempRegister(0), i.InputRegister(0));         \ 
  430    __ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0));     \ 
  431    __ j(not_equal, &binop);                                    \ 
 
  434#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2)                \ 
  438    __ mov(eax, i.MemoryOperand(2));                            \ 
  439    __ mov(edx, i.NextMemoryOperand(2));                        \ 
  441    frame_access_state()->IncreaseSPDelta(1);                   \ 
  442    i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0)); \ 
  443    __ push(i.InputRegister(1));                                \ 
  444    __ instr1(ebx, eax);                                        \ 
  445    __ instr2(i.InputRegister(1), edx);                         \ 
  447    __ cmpxchg8b(i.MemoryOperand(2));                           \ 
  448    __ pop(i.InputRegister(1));                                 \ 
  450    frame_access_state()->IncreaseSPDelta(-1);                  \ 
  451    __ j(not_equal, &binop);                                    \ 
 
  454#define ASSEMBLE_MOVX(mov_instr)                            \ 
  456    if (HasAddressingMode(instr)) {                         \ 
  457      __ mov_instr(i.OutputRegister(), i.MemoryOperand());  \ 
  458    } else if (HasRegisterInput(instr, 0)) {                \ 
  459      __ mov_instr(i.OutputRegister(), i.InputRegister(0)); \ 
  461      __ mov_instr(i.OutputRegister(), i.InputOperand(0));  \ 
 
  465#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE(opcode)                         \ 
  467    XMMRegister src0 = i.InputSimd128Register(0);                    \ 
  468    Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \ 
  469    if (CpuFeatures::IsSupported(AVX)) {                             \ 
  470      CpuFeatureScope avx_scope(masm(), AVX);                        \ 
  471      __ v##opcode(i.OutputSimd128Register(), src0, src1);           \ 
  473      DCHECK_EQ(i.OutputSimd128Register(), src0);                    \ 
  474      __ opcode(i.OutputSimd128Register(), src1);                    \ 
 
  478#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm)               \ 
  479  if (CpuFeatures::IsSupported(AVX)) {                                 \ 
  480    CpuFeatureScope avx_scope(masm(), AVX);                            \ 
  481    __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ 
  482                 i.InputOperand(1), imm);                              \ 
  484    CpuFeatureScope sse_scope(masm(), SSELevel);                       \ 
  485    DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));   \ 
  486    __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm);      \ 
 
  489#define ASSEMBLE_SIMD_ALL_TRUE(opcode)               \ 
  491    Register dst = i.OutputRegister();               \ 
  492    Operand src = i.InputOperand(0);                 \ 
  493    Register tmp = i.TempRegister(0);                \ 
  494    XMMRegister tmp_simd = i.TempSimd128Register(1); \ 
  495    __ mov(tmp, Immediate(1));                       \ 
  497    __ Pxor(tmp_simd, tmp_simd);                     \ 
  498    __ opcode(tmp_simd, src);                        \ 
  499    __ Ptest(tmp_simd, tmp_simd);                    \ 
  500    __ cmov(zero, dst, tmp);                         \ 
 
  503#define ASSEMBLE_SIMD_SHIFT(opcode, width)                \ 
  505    XMMRegister dst = i.OutputSimd128Register();          \ 
  506    DCHECK_EQ(dst, i.InputSimd128Register(0));            \ 
  507    if (HasImmediateInput(instr, 1)) {                    \ 
  508      __ opcode(dst, dst, uint8_t{i.InputInt##width(1)}); \ 
  510      XMMRegister tmp = i.TempSimd128Register(0);         \ 
  511      Register tmp_shift = i.TempRegister(1);             \ 
  512      constexpr int mask = (1 << width) - 1;              \ 
  513      __ mov(tmp_shift, i.InputRegister(1));              \ 
  514      __ and_(tmp_shift, Immediate(mask));                \ 
  515      __ Movd(tmp, tmp_shift);                            \ 
  516      __ opcode(dst, dst, tmp);                           \ 
 
  520#define ASSEMBLE_SIMD_PINSR(OPCODE, CPU_FEATURE)             \ 
  522    XMMRegister dst = i.OutputSimd128Register();             \ 
  523    XMMRegister src = i.InputSimd128Register(0);             \ 
  524    int8_t laneidx = i.InputInt8(1);                         \ 
  525    if (HasAddressingMode(instr)) {                          \ 
  526      if (CpuFeatures::IsSupported(AVX)) {                   \ 
  527        CpuFeatureScope avx_scope(masm(), AVX);              \ 
  528        __ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \ 
  530        DCHECK_EQ(dst, src);                                 \ 
  531        CpuFeatureScope sse_scope(masm(), CPU_FEATURE);      \ 
  532        __ OPCODE(dst, i.MemoryOperand(2), laneidx);         \ 
  535      if (CpuFeatures::IsSupported(AVX)) {                   \ 
  536        CpuFeatureScope avx_scope(masm(), AVX);              \ 
  537        __ v##OPCODE(dst, src, i.InputOperand(2), laneidx);  \ 
  539        DCHECK_EQ(dst, src);                                 \ 
  540        CpuFeatureScope sse_scope(masm(), CPU_FEATURE);      \ 
  541        __ OPCODE(dst, i.InputOperand(2), laneidx);          \ 
 
  560void AdjustStackPointerForTailCall(MacroAssembler* masm,
 
  561                                   FrameAccessState* state,
 
  562                                   int new_slot_above_sp,
 
  563                                   bool allow_shrinkage = 
true) {
 
  564  int current_sp_offset = state->GetSPToFPSlotCount() +
 
  566  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
 
  567  if (stack_slot_delta > 0) {
 
  569    state->IncreaseSPDelta(stack_slot_delta);
 
  570  } 
else if (allow_shrinkage && stack_slot_delta < 0) {
 
  572    state->IncreaseSPDelta(stack_slot_delta);
 
  577bool VerifyOutputOfAtomicPairInstr(IA32OperandConverter* converter,
 
  578                                   const Instruction* 
instr) {
 
  580    return (converter->OutputRegister(0) == eax &&
 
  581            converter->OutputRegister(1) == edx);
 
  584    return (converter->OutputRegister(0) == eax &&
 
  585            converter->TempRegister(0) == edx) ||
 
  586           (converter->OutputRegister(0) == edx &&
 
  587            converter->TempRegister(0) == eax);
 
  590  return (converter->TempRegister(0) == eax &&
 
  591          converter->TempRegister(1) == edx);
 
  598                                              int first_unused_slot_offset) {
 
  600  ZoneVector<MoveOperands*> pushes(
zone());
 
  603  if (!pushes.empty() &&
 
  605       first_unused_slot_offset)) {
 
  606    IA32OperandConverter g(
this, 
instr);
 
  607    for (
auto move : pushes) {
 
  608      LocationOperand destination_location(
 
  610      InstructionOperand 
source(move->source());
 
  612                                    destination_location.index());
 
  613      if (source.IsStackSlot()) {
 
  615        __ push(g.SlotToOperand(source_location.index()));
 
  616      } 
else if (source.IsRegister()) {
 
  618        __ push(source_location.GetRegister());
 
  619      } 
else if (source.IsImmediate()) {
 
  620        __ Push(Immediate(ImmediateOperand::cast(source).inline_int32_value()));
 
  630                                first_unused_slot_offset, 
false);
 
  634                                             int first_unused_slot_offset) {
 
  636                                first_unused_slot_offset);
 
  642  __ ComputeCodeStartAddress(eax);
 
  648#ifdef V8_ENABLE_LEAPTIERING 
  649void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
 
  672#ifdef V8_ENABLE_LEAPTIERING 
  674    __ Assert(zero, AbortReason::kInvalidDeoptimizedCode);
 
  679  __ TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode);
 
  686    Instruction* 
instr) {
 
  687  IA32OperandConverter 
i(
this, 
instr);
 
  690  switch (arch_opcode) {
 
  691    case kArchCallCodeObject: {
 
  692      InstructionOperand* op = 
instr->InputAt(0);
 
  693      if (op->IsImmediate()) {
 
  701        __ CallCodeObject(
reg);
 
  707    case kArchCallBuiltinPointer: {
 
  709      Register builtin_index = 
i.InputRegister(0);
 
  714      __ CallBuiltinByIndex(builtin_index, target);
 
  719#if V8_ENABLE_WEBASSEMBLY 
  720    case kArchCallWasmFunction:
 
  721    case kArchCallWasmFunctionIndirect: {
 
  722      if (arch_opcode == kArchCallWasmFunction) {
 
  726        Constant constant = 
i.ToConstant(
instr->InputAt(0));
 
  729          __ wasm_call(wasm_code, constant.rmode());
 
  731          __ call(wasm_code, constant.rmode());
 
  735        __ CallWasmCodePointer(
i.InputRegister(0));
 
  741    case kArchTailCallWasm:
 
  742    case kArchTailCallWasmIndirect: {
 
  743      if (arch_opcode == kArchTailCallWasm) {
 
  745        Constant constant = 
i.ToConstant(
instr->InputAt(0));
 
  747        __ jmp(wasm_code, constant.rmode());
 
  757    case kArchTailCallCodeObject: {
 
  766        __ JumpCodeObject(
reg);
 
  772    case kArchTailCallAddress: {
 
  783    case kArchCallJSFunction: {
 
  790      uint32_t num_arguments =
 
  791          i.InputUint32(
instr->JSCallArgumentCountInputIndex());
 
  792      __ CallJSFunction(func, num_arguments);
 
  797    case kArchPrepareCallCFunction: {
 
  802      __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
 
  806    case kArchSaveCallerRegisters: {
 
  820    case kArchRestoreCallerRegisters: {
 
  833    case kArchPrepareTailCall:
 
  836    case kArchCallCFunctionWithFrameState:
 
  837    case kArchCallCFunction: {
 
  841      Label return_location;
 
  843#if V8_ENABLE_WEBASSEMBLY 
  844      if (
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
 
  848        __ LoadLabelAddress(scratch, &return_location);
 
  849        __ mov(
MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
 
  857        ExternalReference ref = 
i.InputExternalReference(0);
 
  859                                     set_isolate_data_slots, &return_location);
 
  863                                     set_isolate_data_slots, &return_location);
 
  867      bool const needs_frame_state =
 
  868          (arch_opcode == kArchCallCFunctionWithFrameState);
 
  869      if (needs_frame_state) {
 
  895    case kArchBinarySearchSwitch:
 
  898    case kArchTableSwitch:
 
  902      __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt32(0)),
 
  905    case kArchAbortCSADcheck:
 
  906      DCHECK(
i.InputRegister(0) == edx);
 
  911        __ CallBuiltin(Builtin::kAbortCSADcheck);
 
  915    case kArchDebugBreak:
 
  919    case kArchThrowTerminator:
 
  922    case kArchDeoptimize: {
 
  923      DeoptimizationExit* exit =
 
  925      __ jmp(exit->label());
 
  931    case kArchFramePointer:
 
  932      __ mov(
i.OutputRegister(), ebp);
 
  934    case kArchParentFramePointer:
 
  936        __ mov(
i.OutputRegister(), Operand(ebp, 0));
 
  938        __ mov(
i.OutputRegister(), ebp);
 
  941#if V8_ENABLE_WEBASSEMBLY 
  942    case kArchStackPointer:
 
  943      __ mov(
i.OutputRegister(), esp);
 
  945    case kArchSetStackPointer:
 
  946      if (
instr->InputAt(0)->IsRegister()) {
 
  947        __ mov(esp, 
i.InputRegister(0));
 
  949        __ mov(esp, 
i.InputOperand(0));
 
  953    case kArchStackPointerGreaterThan: {
 
  961        lhs_register = 
i.TempRegister(0);
 
  965      constexpr size_t kValueIndex = 0;
 
  966      if (HasAddressingMode(
instr)) {
 
  967        __ cmp(lhs_register, 
i.MemoryOperand(kValueIndex));
 
  969        __ cmp(lhs_register, 
i.InputRegister(kValueIndex));
 
  973    case kArchStackCheckOffset:
 
  976    case kArchTruncateDoubleToI: {
 
  977      auto result = 
i.OutputRegister();
 
  978      auto input = 
i.InputDoubleRegister(0);
 
  979      auto ool = 
zone()->
New<OutOfLineTruncateDoubleToI>(
 
  981      __ cvttsd2si(
result, Operand(input));
 
  983      __ j(overflow, ool->entry());
 
  984      __ bind(ool->exit());
 
  987    case kArchStoreWithWriteBarrier:  
 
  988    case kArchAtomicStoreWithWriteBarrier: {
 
  992      Operand operand = 
i.MemoryOperand(&index);
 
 1001        __ Check(
not_equal, AbortReason::kOperandIsCleared);
 
 1004      auto ool = 
zone()->
New<OutOfLineRecordWrite>(
this, object, operand, 
value,
 
 1005                                                   scratch0, scratch1, 
mode,
 
 1007      if (arch_opcode == kArchStoreWithWriteBarrier) {
 
 1008        __ mov(operand, value);
 
 1010        __ mov(scratch0, value);
 
 1011        __ xchg(scratch0, operand);
 
 1014        __ JumpIfSmi(value, ool->exit());
 
 1016      __ CheckPageFlag(
object, scratch0,
 
 1019      __ bind(ool->exit());
 
 1022    case kArchStoreIndirectWithWriteBarrier:
 
 1024    case kArchStackSlot: {
 
 1028      __ lea(
i.OutputRegister(), Operand(base, 
offset.offset()));
 
 1031    case kIeee754Float64Acos:
 
 1034    case kIeee754Float64Acosh:
 
 1037    case kIeee754Float64Asin:
 
 1040    case kIeee754Float64Asinh:
 
 1043    case kIeee754Float64Atan:
 
 1046    case kIeee754Float64Atanh:
 
 1049    case kIeee754Float64Atan2:
 
 1052    case kIeee754Float64Cbrt:
 
 1055    case kIeee754Float64Cos:
 
 1058    case kIeee754Float64Cosh:
 
 1061    case kIeee754Float64Expm1:
 
 1064    case kIeee754Float64Exp:
 
 1067    case kIeee754Float64Log:
 
 1070    case kIeee754Float64Log1p:
 
 1073    case kIeee754Float64Log2:
 
 1076    case kIeee754Float64Log10:
 
 1079    case kIeee754Float64Pow:
 
 1082    case kIeee754Float64Sin:
 
 1085    case kIeee754Float64Sinh:
 
 1088    case kIeee754Float64Tan:
 
 1091    case kIeee754Float64Tanh:
 
 1120        __ imul(
i.OutputRegister(), 
i.InputOperand(0), 
i.InputInt32(1));
 
 1122        __ imul(
i.OutputRegister(), 
i.InputOperand(1));
 
 1126      __ imul(
i.InputRegister(1));
 
 1129      __ mul(
i.InputRegister(1));
 
 1133      __ idiv(
i.InputOperand(1));
 
 1136      __ Move(edx, Immediate(0));
 
 1137      __ div(
i.InputOperand(1));
 
 1140      __ not_(
i.OutputOperand());
 
 1143      __ neg(
i.OutputOperand());
 
 1156        __ shl(
i.OutputOperand(), 
i.InputInt5(1));
 
 1158        __ shl_cl(
i.OutputOperand());
 
 1163        __ shr(
i.OutputOperand(), 
i.InputInt5(1));
 
 1165        __ shr_cl(
i.OutputOperand());
 
 1170        __ sar(
i.OutputOperand(), 
i.InputInt5(1));
 
 1172        __ sar_cl(
i.OutputOperand());
 
 1175    case kIA32AddPair: {
 
 1180      bool use_temp = 
false;
 
 1182           i.OutputRegister(0).
code() == 
i.InputRegister(1).
code()) ||
 
 1183          i.OutputRegister(0).
code() == 
i.InputRegister(3).
code()) {
 
 1187        __ Move(
i.TempRegister(0), 
i.InputRegister(0));
 
 1188        __ add(
i.TempRegister(0), 
i.InputRegister(2));
 
 1190        __ add(
i.OutputRegister(0), 
i.InputRegister(2));
 
 1192      i.MoveInstructionOperandToRegister(
i.OutputRegister(1),
 
 1194      __ adc(
i.OutputRegister(1), Operand(
i.InputRegister(3)));
 
 1196        __ Move(
i.OutputRegister(0), 
i.TempRegister(0));
 
 1200    case kIA32SubPair: {
 
 1205      bool use_temp = 
false;
 
 1207           i.OutputRegister(0).
code() == 
i.InputRegister(1).
code()) ||
 
 1208          i.OutputRegister(0).
code() == 
i.InputRegister(3).
code()) {
 
 1212        __ Move(
i.TempRegister(0), 
i.InputRegister(0));
 
 1213        __ sub(
i.TempRegister(0), 
i.InputRegister(2));
 
 1215        __ sub(
i.OutputRegister(0), 
i.InputRegister(2));
 
 1217      i.MoveInstructionOperandToRegister(
i.OutputRegister(1),
 
 1219      __ sbb(
i.OutputRegister(1), Operand(
i.InputRegister(3)));
 
 1221        __ Move(
i.OutputRegister(0), 
i.TempRegister(0));
 
 1225    case kIA32MulPair: {
 
 1226      __ imul(
i.OutputRegister(1), 
i.InputOperand(0));
 
 1227      i.MoveInstructionOperandToRegister(
i.TempRegister(0), 
instr->InputAt(1));
 
 1228      __ imul(
i.TempRegister(0), 
i.InputOperand(2));
 
 1229      __ add(
i.OutputRegister(1), 
i.TempRegister(0));
 
 1230      __ mov(
i.OutputRegister(0), 
i.InputOperand(0));
 
 1232      __ mul(
i.InputRegister(2));
 
 1233      __ add(
i.OutputRegister(1), 
i.TempRegister(0));
 
 1239        __ ShlPair(
i.InputRegister(1), 
i.InputRegister(0), 
i.InputInt6(2));
 
 1242        __ ShlPair_cl(
i.InputRegister(1), 
i.InputRegister(0));
 
 1247        __ ShrPair(
i.InputRegister(1), 
i.InputRegister(0), 
i.InputInt6(2));
 
 1250        __ ShrPair_cl(
i.InputRegister(1), 
i.InputRegister(0));
 
 1255        __ SarPair(
i.InputRegister(1), 
i.InputRegister(0), 
i.InputInt6(2));
 
 1258        __ SarPair_cl(
i.InputRegister(1), 
i.InputRegister(0));
 
 1263        __ rol(
i.OutputOperand(), 
i.InputInt5(1));
 
 1265        __ rol_cl(
i.OutputOperand());
 
 1270        __ ror(
i.OutputOperand(), 
i.InputInt5(1));
 
 1272        __ ror_cl(
i.OutputOperand());
 
 1276      __ Lzcnt(
i.OutputRegister(), 
i.InputOperand(0));
 
 1279      __ Tzcnt(
i.OutputRegister(), 
i.InputOperand(0));
 
 1282      __ Popcnt(
i.OutputRegister(), 
i.InputOperand(0));
 
 1285      __ bswap(
i.OutputRegister());
 
 1293    case kIA32Float32Cmp:
 
 1294      __ Ucomiss(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1296    case kIA32Float32Sqrt:
 
 1297      __ Sqrtss(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1299    case kIA32Float32Round: {
 
 1300      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 1303      __ Roundss(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0), mode);
 
 1306    case kIA32Float64Cmp:
 
 1307      __ Ucomisd(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1309    case kIA32Float32Max: {
 
 1310      Label compare_swap, done_compare;
 
 1311      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1312        __ Ucomiss(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1314        __ Ucomiss(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1317          zone()->
New<OutOfLineLoadFloat32NaN>(
this, 
i.OutputDoubleRegister());
 
 1321      __ Movmskps(
i.TempRegister(0), 
i.InputDoubleRegister(0));
 
 1322      __ test(
i.TempRegister(0), Immediate(1));
 
 1324      __ bind(&compare_swap);
 
 1325      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1326        __ Movss(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1328        __ Movss(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1330      __ bind(&done_compare);
 
 1331      __ bind(ool->exit());
 
 1335    case kIA32Float64Max: {
 
 1336      Label compare_swap, done_compare;
 
 1337      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1338        __ Ucomisd(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1340        __ Ucomisd(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1343          zone()->
New<OutOfLineLoadFloat64NaN>(
this, 
i.OutputDoubleRegister());
 
 1347      __ Movmskpd(
i.TempRegister(0), 
i.InputDoubleRegister(0));
 
 1348      __ test(
i.TempRegister(0), Immediate(1));
 
 1350      __ bind(&compare_swap);
 
 1351      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1352        __ Movsd(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1354        __ Movsd(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1356      __ bind(&done_compare);
 
 1357      __ bind(ool->exit());
 
 1360    case kIA32Float32Min: {
 
 1361      Label compare_swap, done_compare;
 
 1362      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1363        __ Ucomiss(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1365        __ Ucomiss(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1368          zone()->
New<OutOfLineLoadFloat32NaN>(
this, 
i.OutputDoubleRegister());
 
 1372      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1373        __ Movmskps(
i.TempRegister(0), 
i.InputDoubleRegister(1));
 
 1378      __ test(
i.TempRegister(0), Immediate(1));
 
 1380      __ bind(&compare_swap);
 
 1381      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1382        __ Movss(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1384        __ Movss(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1386      __ bind(&done_compare);
 
 1387      __ bind(ool->exit());
 
 1390    case kIA32Float64Min: {
 
 1391      Label compare_swap, done_compare;
 
 1392      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1393        __ Ucomisd(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1395        __ Ucomisd(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1398          zone()->
New<OutOfLineLoadFloat64NaN>(
this, 
i.OutputDoubleRegister());
 
 1402      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1403        __ Movmskpd(
i.TempRegister(0), 
i.InputDoubleRegister(1));
 
 1408      __ test(
i.TempRegister(0), Immediate(1));
 
 1410      __ bind(&compare_swap);
 
 1411      if (
instr->InputAt(1)->IsFPRegister()) {
 
 1412        __ Movsd(
i.InputDoubleRegister(0), 
i.InputDoubleRegister(1));
 
 1414        __ Movsd(
i.InputDoubleRegister(0), 
i.InputOperand(1));
 
 1416      __ bind(&done_compare);
 
 1417      __ bind(ool->exit());
 
 1420    case kIA32Float64Mod: {
 
 1426      __ Movsd(Operand(esp, 0), 
i.InputDoubleRegister(1));
 
 1427      __ fld_d(Operand(esp, 0));
 
 1428      __ Movsd(Operand(esp, 0), 
i.InputDoubleRegister(0));
 
 1429      __ fld_d(Operand(esp, 0));
 
 1443      __ fstp_d(Operand(esp, 0));
 
 1444      __ Movsd(
i.OutputDoubleRegister(), Operand(esp, 0));
 
 1448    case kIA32Float64Sqrt:
 
 1449      __ Sqrtsd(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1451    case kIA32Float64Round: {
 
 1454      __ Roundsd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0), mode);
 
 1457    case kIA32Float32ToFloat64:
 
 1458      __ Cvtss2sd(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1460    case kIA32Float64ToFloat32:
 
 1461      __ Cvtsd2ss(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1463    case kIA32Float32ToInt32:
 
 1464      __ Cvttss2si(
i.OutputRegister(), 
i.InputOperand(0));
 
 1466    case kIA32Float32ToUint32:
 
 1467      __ Cvttss2ui(
i.OutputRegister(), 
i.InputOperand(0),
 
 1468                   i.TempSimd128Register(0));
 
 1470    case kIA32Float64ToInt32:
 
 1471      __ Cvttsd2si(
i.OutputRegister(), 
i.InputOperand(0));
 
 1473    case kIA32Float64ToUint32:
 
 1474      __ Cvttsd2ui(
i.OutputRegister(), 
i.InputOperand(0),
 
 1475                   i.TempSimd128Register(0));
 
 1477    case kSSEInt32ToFloat32:
 
 1479      __ cvtsi2ss(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1481    case kIA32Uint32ToFloat32:
 
 1482      __ Cvtui2ss(
i.OutputDoubleRegister(), 
i.InputOperand(0),
 
 1485    case kSSEInt32ToFloat64:
 
 1487      __ cvtsi2sd(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1489    case kIA32Uint32ToFloat64:
 
 1490      __ Cvtui2sd(
i.OutputDoubleRegister(), 
i.InputOperand(0),
 
 1493    case kIA32Float64ExtractLowWord32:
 
 1494      if (
instr->InputAt(0)->IsFPStackSlot()) {
 
 1495        __ mov(
i.OutputRegister(), 
i.InputOperand(0));
 
 1497        __ Movd(
i.OutputRegister(), 
i.InputDoubleRegister(0));
 
 1500    case kIA32Float64ExtractHighWord32:
 
 1501      if (
instr->InputAt(0)->IsFPStackSlot()) {
 
 1504        __ Pextrd(
i.OutputRegister(), 
i.InputDoubleRegister(0), 1);
 
 1507    case kIA32Float64InsertLowWord32:
 
 1508      __ Pinsrd(
i.OutputDoubleRegister(), 
i.InputOperand(1), 0);
 
 1510    case kIA32Float64InsertHighWord32:
 
 1511      __ Pinsrd(
i.OutputDoubleRegister(), 
i.InputOperand(1), 1);
 
 1513    case kIA32Float64FromWord32Pair:
 
 1514      __ Pinsrd(
i.OutputDoubleRegister(), 
i.InputOperand(0), 0);
 
 1515      __ Pinsrd(
i.OutputDoubleRegister(), 
i.InputOperand(1), 1);
 
 1517    case kIA32Float64LoadLowWord32:
 
 1518      __ Movd(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1521      __ Addss(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1526      __ Subss(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1531      __ Mulss(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1536      __ Divss(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1540      __ movaps(
i.OutputDoubleRegister(), 
i.OutputDoubleRegister());
 
 1544      __ Addsd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1549      __ Subsd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1554      __ Mulsd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1559      __ Divsd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1563      __ movaps(
i.OutputDoubleRegister(), 
i.OutputDoubleRegister());
 
 1567      __ Absps(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1572      __ Negps(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1577      __ Abspd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1582      __ Negpd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1586    case kIA32Float64SilenceNaN:
 
 1598      Operand operand = 
i.MemoryOperand(&index);
 
 1600        __ mov_b(operand, 
i.InputInt8(index));
 
 1602        __ mov_b(operand, 
i.InputRegister(index));
 
 1614      Operand operand = 
i.MemoryOperand(&index);
 
 1616        __ mov_w(operand, 
i.InputInt16(index));
 
 1618        __ mov_w(operand, 
i.InputRegister(index));
 
 1623      if (
instr->HasOutput()) {
 
 1624        __ mov(
i.OutputRegister(), 
i.MemoryOperand());
 
 1627        Operand operand = 
i.MemoryOperand(&index);
 
 1629          __ Move(operand, 
i.InputImmediate(index));
 
 1631          __ mov(operand, 
i.InputRegister(index));
 
 1636      if (
instr->HasOutput()) {
 
 1637        __ Movsd(
i.OutputDoubleRegister(), 
i.MemoryOperand());
 
 1640        Operand operand = 
i.MemoryOperand(&index);
 
 1641        __ Movsd(operand, 
i.InputDoubleRegister(index));
 
 1645      if (
instr->HasOutput()) {
 
 1646        __ Movss(
i.OutputDoubleRegister(), 
i.MemoryOperand());
 
 1649        Operand operand = 
i.MemoryOperand(&index);
 
 1650        __ Movss(operand, 
i.InputDoubleRegister(index));
 
 1654      if (
instr->HasOutput()) {
 
 1655        __ Movdqu(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 1658        Operand operand = 
i.MemoryOperand(&index);
 
 1659        __ Movdqu(operand, 
i.InputSimd128Register(index));
 
 1662    case kIA32BitcastFI:
 
 1663      if (
instr->InputAt(0)->IsFPStackSlot()) {
 
 1664        __ mov(
i.OutputRegister(), 
i.InputOperand(0));
 
 1666        __ Movd(
i.OutputRegister(), 
i.InputDoubleRegister(0));
 
 1669    case kIA32BitcastIF:
 
 1671        __ Movd(
i.OutputDoubleRegister(), 
i.InputRegister(0));
 
 1673        __ Movss(
i.OutputDoubleRegister(), 
i.InputOperand(0));
 
 1681      if (mode == kMode_MI) {
 
 1682        __ Move(
i.OutputRegister(), Immediate(
i.InputInt32(0)));
 
 1683      } 
else if (
i.InputRegister(0) == 
i.OutputRegister()) {
 
 1684        if (mode == kMode_MRI) {
 
 1685          int32_t constant_summand = 
i.InputInt32(1);
 
 1686          if (constant_summand > 0) {
 
 1687            __ add(
i.OutputRegister(), Immediate(constant_summand));
 
 1688          } 
else if (constant_summand < 0) {
 
 1689            __ sub(
i.OutputRegister(),
 
 1692        } 
else if (mode == kMode_MR1) {
 
 1693          if (
i.InputRegister(1) == 
i.OutputRegister()) {
 
 1694            __ shl(
i.OutputRegister(), 1);
 
 1696            __ add(
i.OutputRegister(), 
i.InputRegister(1));
 
 1698        } 
else if (mode == kMode_M2) {
 
 1699          __ shl(
i.OutputRegister(), 1);
 
 1700        } 
else if (mode == kMode_M4) {
 
 1701          __ shl(
i.OutputRegister(), 2);
 
 1702        } 
else if (mode == kMode_M8) {
 
 1703          __ shl(
i.OutputRegister(), 3);
 
 1705          __ lea(
i.OutputRegister(), 
i.MemoryOperand());
 
 1707      } 
else if (mode == kMode_MR1 &&
 
 1708                 i.InputRegister(1) == 
i.OutputRegister()) {
 
 1709        __ add(
i.OutputRegister(), 
i.InputRegister(0));
 
 1711        __ lea(
i.OutputRegister(), 
i.MemoryOperand());
 
 1716      int stack_decrement = 
i.InputInt32(0);
 
 1720      if (HasAddressingMode(
instr)) {
 
 1724        Operand operand = 
i.MemoryOperand(&index);
 
 1730        InstructionOperand* input = 
instr->InputAt(1);
 
 1731        if (input->IsRegister()) {
 
 1734        } 
else if (input->IsFloatRegister()) {
 
 1736          __ AllocateStackSpace(stack_decrement);
 
 1737          __ Movss(Operand(esp, 0), 
i.InputDoubleRegister(1));
 
 1738        } 
else if (input->IsDoubleRegister()) {
 
 1740          __ AllocateStackSpace(stack_decrement);
 
 1741          __ Movsd(Operand(esp, 0), 
i.InputDoubleRegister(1));
 
 1742        } 
else if (input->IsSimd128Register()) {
 
 1744          __ AllocateStackSpace(stack_decrement);
 
 1746          __ Movups(Operand(esp, 0), 
i.InputSimd128Register(1));
 
 1747        } 
else if (input->IsStackSlot() || input->IsFloatStackSlot()) {
 
 1750        } 
else if (input->IsDoubleStackSlot()) {
 
 1753          __ AllocateStackSpace(stack_decrement);
 
 1756          DCHECK(input->IsSimd128StackSlot());
 
 1760          __ AllocateStackSpace(stack_decrement);
 
 1777      int reverse_slot = 
i.InputInt32(0);
 
 1780      if (
instr->OutputAt(0)->IsFPRegister()) {
 
 1783          __ Movsd(
i.OutputDoubleRegister(), Operand(ebp, 
offset));
 
 1785          __ Movss(
i.OutputFloatRegister(), Operand(ebp, 
offset));
 
 1788          __ Movdqu(
i.OutputSimd128Register(), Operand(ebp, 
offset));
 
 1791        __ mov(
i.OutputRegister(), Operand(ebp, 
offset));
 
 1795    case kIA32F64x2Splat: {
 
 1796      __ Movddup(
i.OutputSimd128Register(), 
i.InputDoubleRegister(0));
 
 1799    case kIA32F64x2ExtractLane: {
 
 1800      __ F64x2ExtractLane(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1804    case kIA32F64x2ReplaceLane: {
 
 1805      __ F64x2ReplaceLane(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1806                          i.InputDoubleRegister(2), 
i.InputInt8(1));
 
 1809    case kIA32F64x2Sqrt: {
 
 1810      __ Sqrtpd(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 1813    case kIA32F64x2Add: {
 
 1814      __ Addpd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1818    case kIA32F64x2Sub: {
 
 1819      __ Subpd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1823    case kIA32F64x2Mul: {
 
 1824      __ Mulpd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1828    case kIA32F64x2Div: {
 
 1829      __ Divpd(
i.OutputDoubleRegister(), 
i.InputDoubleRegister(0),
 
 1833    case kIA32F64x2Min: {
 
 1834      __ F64x2Min(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1838    case kIA32F64x2Max: {
 
 1839      __ F64x2Max(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1843    case kIA32F64x2Eq: {
 
 1844      __ Cmpeqpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1848    case kIA32F64x2Ne: {
 
 1849      __ Cmpneqpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1853    case kIA32F64x2Lt: {
 
 1854      __ Cmpltpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1858    case kIA32F64x2Le: {
 
 1859      __ Cmplepd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1863    case kIA32F64x2Qfma: {
 
 1864      __ F64x2Qfma(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1865                   i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 1869    case kIA32F64x2Qfms: {
 
 1870      __ F64x2Qfms(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1871                   i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 1876      __ Minpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1877               i.InputSimd128Register(1));
 
 1881      __ Maxpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1882               i.InputSimd128Register(1));
 
 1885    case kIA32F64x2Round: {
 
 1888      __ Roundpd(
i.OutputSimd128Register(), 
i.InputDoubleRegister(0), mode);
 
 1891    case kIA32F64x2PromoteLowF32x4: {
 
 1892      if (HasAddressingMode(
instr)) {
 
 1893        __ Cvtps2pd(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 1895        __ Cvtps2pd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 1899    case kIA32F32x4DemoteF64x2Zero: {
 
 1900      __ Cvtpd2ps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 1903    case kIA32I32x4TruncSatF64x2SZero: {
 
 1904      __ I32x4TruncSatF64x2SZero(
i.OutputSimd128Register(),
 
 1909    case kIA32I32x4TruncSatF64x2UZero: {
 
 1910      __ I32x4TruncSatF64x2UZero(
i.OutputSimd128Register(),
 
 1915    case kIA32F64x2ConvertLowI32x4S: {
 
 1916      __ Cvtdq2pd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 1919    case kIA32F64x2ConvertLowI32x4U: {
 
 1920      __ F64x2ConvertLowI32x4U(
i.OutputSimd128Register(),
 
 1921                               i.InputSimd128Register(0), 
i.TempRegister(0));
 
 1924    case kIA32I64x2ExtMulLowI32x4S: {
 
 1925      __ I64x2ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1930    case kIA32I64x2ExtMulHighI32x4S: {
 
 1931      __ I64x2ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1936    case kIA32I64x2ExtMulLowI32x4U: {
 
 1937      __ I64x2ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1942    case kIA32I64x2ExtMulHighI32x4U: {
 
 1943      __ I64x2ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1948    case kIA32I32x4ExtMulLowI16x8S: {
 
 1949      __ I32x4ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1954    case kIA32I32x4ExtMulHighI16x8S: {
 
 1955      __ I32x4ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1960    case kIA32I32x4ExtMulLowI16x8U: {
 
 1961      __ I32x4ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1966    case kIA32I32x4ExtMulHighI16x8U: {
 
 1967      __ I32x4ExtMul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1972    case kIA32I16x8ExtMulLowI8x16S: {
 
 1973      __ I16x8ExtMulLow(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1978    case kIA32I16x8ExtMulHighI8x16S: {
 
 1979      __ I16x8ExtMulHighS(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1983    case kIA32I16x8ExtMulLowI8x16U: {
 
 1984      __ I16x8ExtMulLow(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1989    case kIA32I16x8ExtMulHighI8x16U: {
 
 1990      __ I16x8ExtMulHighU(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 1994    case kIA32I64x2SplatI32Pair: {
 
 1995      XMMRegister dst = 
i.OutputSimd128Register();
 
 1996      __ Pinsrd(dst, 
i.InputRegister(0), 0);
 
 1997      __ Pinsrd(dst, 
i.InputOperand(1), 1);
 
 1998      __ Pshufd(dst, dst, uint8_t{0x44});
 
 2001    case kIA32I64x2ReplaceLaneI32Pair: {
 
 2002      int8_t lane = 
i.InputInt8(1);
 
 2003      __ Pinsrd(
i.OutputSimd128Register(), 
i.InputOperand(2), lane * 2);
 
 2004      __ Pinsrd(
i.OutputSimd128Register(), 
i.InputOperand(3), lane * 2 + 1);
 
 2007    case kIA32I64x2Abs: {
 
 2008      __ I64x2Abs(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2012    case kIA32I64x2Neg: {
 
 2013      __ I64x2Neg(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2017    case kIA32I64x2Shl: {
 
 2021    case kIA32I64x2ShrS: {
 
 2022      XMMRegister dst = 
i.OutputSimd128Register();
 
 2023      XMMRegister src = 
i.InputSimd128Register(0);
 
 2028                     i.TempSimd128Register(0), 
i.TempRegister(1));
 
 2032    case kIA32I64x2Add: {
 
 2033      __ Paddq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2037    case kIA32I64x2Sub: {
 
 2038      __ Psubq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2042    case kIA32I64x2Mul: {
 
 2043      __ I64x2Mul(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2044                  i.InputSimd128Register(1), 
i.TempSimd128Register(0),
 
 2045                  i.TempSimd128Register(1));
 
 2048    case kIA32I64x2ShrU: {
 
 2052    case kIA32I64x2BitMask: {
 
 2053      __ Movmskpd(
i.OutputRegister(), 
i.InputSimd128Register(0));
 
 2056    case kIA32I64x2Eq: {
 
 2057      __ Pcmpeqq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2061    case kIA32I64x2Ne: {
 
 2062      __ Pcmpeqq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2068    case kIA32I64x2GtS: {
 
 2069      __ I64x2GtS(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2073    case kIA32I64x2GeS: {
 
 2074      __ I64x2GeS(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2078    case kIA32I64x2SConvertI32x4Low: {
 
 2079      __ Pmovsxdq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2082    case kIA32I64x2SConvertI32x4High: {
 
 2083      __ I64x2SConvertI32x4High(
i.OutputSimd128Register(),
 
 2084                                i.InputSimd128Register(0));
 
 2087    case kIA32I64x2UConvertI32x4Low: {
 
 2088      __ Pmovzxdq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2091    case kIA32I64x2UConvertI32x4High: {
 
 2092      __ I64x2UConvertI32x4High(
i.OutputSimd128Register(),
 
 2096    case kIA32I32x4ExtAddPairwiseI16x8S: {
 
 2097      __ I32x4ExtAddPairwiseI16x8S(
i.OutputSimd128Register(),
 
 2098                                   i.InputSimd128Register(0),
 
 2102    case kIA32I32x4ExtAddPairwiseI16x8U: {
 
 2103      __ I32x4ExtAddPairwiseI16x8U(
i.OutputSimd128Register(),
 
 2104                                   i.InputSimd128Register(0),
 
 2108    case kIA32I16x8ExtAddPairwiseI8x16S: {
 
 2109      __ I16x8ExtAddPairwiseI8x16S(
i.OutputSimd128Register(),
 
 2114    case kIA32I16x8ExtAddPairwiseI8x16U: {
 
 2115      __ I16x8ExtAddPairwiseI8x16U(
i.OutputSimd128Register(),
 
 2116                                   i.InputSimd128Register(0),
 
 2120    case kIA32I16x8Q15MulRSatS: {
 
 2121      __ I16x8Q15MulRSatS(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2125    case kIA32I16x8RelaxedQ15MulRS: {
 
 2126      __ Pmulhrsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2127                  i.InputSimd128Register(1));
 
 2130    case kIA32I16x8DotI8x16I7x16S: {
 
 2131      __ I16x8DotI8x16I7x16S(
i.OutputSimd128Register(),
 
 2132                             i.InputSimd128Register(0),
 
 2133                             i.InputSimd128Register(1));
 
 2136    case kIA32I32x4DotI8x16I7x16AddS: {
 
 2137      __ I32x4DotI8x16I7x16AddS(
 
 2138          i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2139          i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 2143    case kIA32F32x4Splat: {
 
 2144      __ F32x4Splat(
i.OutputSimd128Register(), 
i.InputDoubleRegister(0));
 
 2147    case kIA32F32x4ExtractLane: {
 
 2148      __ F32x4ExtractLane(
i.OutputFloatRegister(), 
i.InputSimd128Register(0),
 
 2152    case kIA32Insertps: {
 
 2154        CpuFeatureScope avx_scope(
masm(), AVX);
 
 2155        __ vinsertps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2156                     i.InputOperand(2), 
i.InputInt8(1) << 4);
 
 2158        DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2159        CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2160        __ insertps(
i.OutputSimd128Register(), 
i.InputOperand(2),
 
 2161                    i.InputInt8(1) << 4);
 
 2165    case kIA32F32x4SConvertI32x4: {
 
 2166      __ Cvtdq2ps(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2169    case kIA32F32x4UConvertI32x4: {
 
 2170      XMMRegister dst = 
i.OutputSimd128Register();
 
 2171      XMMRegister src = 
i.InputSimd128Register(0);
 
 2176      __ Psrld(dst, dst, uint8_t{1});  
 
 2177      __ Cvtdq2ps(dst, dst);    
 
 2178      __ Addps(dst, dst, dst);  
 
 2182    case kIA32F32x4Sqrt: {
 
 2183      __ Sqrtps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2186    case kIA32F32x4Add: {
 
 2187      __ Addps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2191    case kIA32F32x4Sub: {
 
 2192      __ Subps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2196    case kIA32F32x4Mul: {
 
 2197      __ Mulps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2201    case kIA32F32x4Div: {
 
 2202      __ Divps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2206    case kIA32F32x4Min: {
 
 2207      __ F32x4Min(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2211    case kIA32F32x4Max: {
 
 2212      __ F32x4Max(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2216    case kIA32F32x4Eq: {
 
 2217      __ Cmpeqps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2221    case kIA32F32x4Ne: {
 
 2222      __ Cmpneqps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2226    case kIA32F32x4Lt: {
 
 2227      __ Cmpltps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2231    case kIA32F32x4Le: {
 
 2232      __ Cmpleps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2236    case kIA32F32x4Qfma: {
 
 2237      __ F32x4Qfma(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2238                   i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 2242    case kIA32F32x4Qfms: {
 
 2243      __ F32x4Qfms(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2244                   i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 2249      __ Minps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2250               i.InputSimd128Register(1));
 
 2254      __ Maxps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2255               i.InputSimd128Register(1));
 
 2258    case kIA32F32x4Round: {
 
 2261      __ Roundps(
i.OutputSimd128Register(), 
i.InputDoubleRegister(0), mode);
 
 2264    case kIA32I32x4Splat: {
 
 2265      XMMRegister dst = 
i.OutputSimd128Register();
 
 2266      __ Movd(dst, 
i.InputOperand(0));
 
 2267      __ Pshufd(dst, dst, uint8_t{0x0});
 
 2270    case kIA32I32x4ExtractLane: {
 
 2271      __ Pextrd(
i.OutputRegister(), 
i.InputSimd128Register(0), 
i.InputInt8(1));
 
 2274    case kIA32I32x4SConvertF32x4: {
 
 2275      __ I32x4SConvertF32x4(
i.OutputSimd128Register(),
 
 2280    case kIA32I32x4SConvertI16x8Low: {
 
 2281      __ Pmovsxwd(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2284    case kIA32I32x4SConvertI16x8High: {
 
 2285      __ I32x4SConvertI16x8High(
i.OutputSimd128Register(),
 
 2286                                i.InputSimd128Register(0));
 
 2289    case kIA32I32x4Neg: {
 
 2290      XMMRegister dst = 
i.OutputSimd128Register();
 
 2291      Operand src = 
i.InputOperand(0);
 
 2292      if (src.is_reg(dst)) {
 
 2301    case kIA32I32x4Shl: {
 
 2305    case kIA32I32x4ShrS: {
 
 2309    case kIA32I32x4Add: {
 
 2310      __ Paddd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2314    case kIA32I32x4Sub: {
 
 2315      __ Psubd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2319    case kIA32I32x4Mul: {
 
 2320      __ Pmulld(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2324    case kIA32I32x4MinS: {
 
 2325      __ Pminsd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2329    case kIA32I32x4MaxS: {
 
 2330      __ Pmaxsd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2334    case kIA32I32x4Eq: {
 
 2335      __ Pcmpeqd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2339    case kIA32I32x4Ne: {
 
 2340      __ Pcmpeqd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2343      __ Pxor(
i.OutputSimd128Register(), 
i.OutputSimd128Register(),
 
 2347    case kIA32I32x4GtS: {
 
 2348      __ Pcmpgtd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2352    case kIA32I32x4GeS: {
 
 2353      XMMRegister dst = 
i.OutputSimd128Register();
 
 2354      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2355      XMMRegister src2 = 
i.InputSimd128Register(1);
 
 2357        CpuFeatureScope avx_scope(
masm(), AVX);
 
 2362        CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2363        __ pminsd(dst, src2);
 
 2364        __ pcmpeqd(dst, src2);
 
 2368    case kSSEI32x4UConvertF32x4: {
 
 2369      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2370      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2371      XMMRegister dst = 
i.OutputSimd128Register();
 
 2372      XMMRegister tmp = 
i.TempSimd128Register(0);
 
 2373      XMMRegister tmp2 = 
i.TempSimd128Register(1);
 
 2374      __ I32x4TruncF32x4U(dst, dst, tmp, tmp2);
 
 2377    case kAVXI32x4UConvertF32x4: {
 
 2378      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2379      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2380      XMMRegister dst = 
i.OutputSimd128Register();
 
 2381      XMMRegister tmp = 
i.TempSimd128Register(0);
 
 2394      __ vcvttps2dq(tmp, tmp);
 
 2399      __ vcvttps2dq(dst, dst);
 
 2401      __ vpaddd(dst, dst, tmp);
 
 2404    case kIA32I32x4UConvertI16x8Low: {
 
 2405      __ Pmovzxwd(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2408    case kIA32I32x4UConvertI16x8High: {
 
 2409      __ I32x4UConvertI16x8High(
i.OutputSimd128Register(),
 
 2413    case kIA32I32x4ShrU: {
 
 2417    case kIA32I32x4MinU: {
 
 2418      __ Pminud(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2422    case kIA32I32x4MaxU: {
 
 2423      __ Pmaxud(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2427    case kSSEI32x4GtU: {
 
 2428      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2429      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2430      XMMRegister dst = 
i.OutputSimd128Register();
 
 2431      Operand src = 
i.InputOperand(1);
 
 2432      __ pmaxud(dst, src);
 
 2433      __ pcmpeqd(dst, src);
 
 2438    case kAVXI32x4GtU: {
 
 2439      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2440      XMMRegister dst = 
i.OutputSimd128Register();
 
 2441      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2442      Operand src2 = 
i.InputOperand(1);
 
 2449    case kSSEI32x4GeU: {
 
 2450      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2451      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2452      XMMRegister dst = 
i.OutputSimd128Register();
 
 2453      Operand src = 
i.InputOperand(1);
 
 2454      __ pminud(dst, src);
 
 2455      __ pcmpeqd(dst, src);
 
 2458    case kAVXI32x4GeU: {
 
 2459      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2460      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2461      Operand src2 = 
i.InputOperand(1);
 
 2466    case kIA32I32x4Abs: {
 
 2467      __ Pabsd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2470    case kIA32I32x4BitMask: {
 
 2471      __ Movmskps(
i.OutputRegister(), 
i.InputSimd128Register(0));
 
 2474    case kIA32I32x4DotI16x8S: {
 
 2475      __ Pmaddwd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2479    case kIA32I16x8Splat: {
 
 2480      if (
instr->InputAt(0)->IsRegister()) {
 
 2481        __ I16x8Splat(
i.OutputSimd128Register(), 
i.InputRegister(0));
 
 2483        __ I16x8Splat(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2487    case kIA32I16x8ExtractLaneS: {
 
 2489      __ Pextrw(dst, 
i.InputSimd128Register(0), 
i.InputUint8(1));
 
 2490      __ movsx_w(dst, dst);
 
 2493    case kIA32I16x8SConvertI8x16Low: {
 
 2494      __ Pmovsxbw(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2497    case kIA32I16x8SConvertI8x16High: {
 
 2498      __ I16x8SConvertI8x16High(
i.OutputSimd128Register(),
 
 2499                                i.InputSimd128Register(0));
 
 2502    case kIA32I16x8Neg: {
 
 2503      XMMRegister dst = 
i.OutputSimd128Register();
 
 2504      Operand src = 
i.InputOperand(0);
 
 2505      if (src.is_reg(dst)) {
 
 2514    case kIA32I16x8Shl: {
 
 2518    case kIA32I16x8ShrS: {
 
 2522    case kIA32I16x8SConvertI32x4: {
 
 2523      __ Packssdw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2527    case kIA32I16x8Add: {
 
 2528      __ Paddw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2532    case kIA32I16x8AddSatS: {
 
 2533      __ Paddsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2537    case kIA32I16x8Sub: {
 
 2538      __ Psubw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2542    case kIA32I16x8SubSatS: {
 
 2543      __ Psubsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2547    case kIA32I16x8Mul: {
 
 2548      __ Pmullw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2552    case kIA32I16x8MinS: {
 
 2553      __ Pminsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2557    case kIA32I16x8MaxS: {
 
 2558      __ Pmaxsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2562    case kIA32I16x8Eq: {
 
 2563      __ Pcmpeqw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2568      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2569      __ pcmpeqw(
i.OutputSimd128Register(), 
i.InputOperand(1));
 
 2575      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2576      __ vpcmpeqw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2579      __ vpxor(
i.OutputSimd128Register(), 
i.OutputSimd128Register(),
 
 2583    case kIA32I16x8GtS: {
 
 2584      __ Pcmpgtw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2588    case kSSEI16x8GeS: {
 
 2589      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2590      XMMRegister dst = 
i.OutputSimd128Register();
 
 2591      Operand src = 
i.InputOperand(1);
 
 2592      __ pminsw(dst, src);
 
 2593      __ pcmpeqw(dst, src);
 
 2596    case kAVXI16x8GeS: {
 
 2597      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2598      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2599      Operand src2 = 
i.InputOperand(1);
 
 2604    case kIA32I16x8UConvertI8x16Low: {
 
 2605      __ Pmovzxbw(
i.OutputSimd128Register(), 
i.InputOperand(0));
 
 2608    case kIA32I16x8UConvertI8x16High: {
 
 2609      __ I16x8UConvertI8x16High(
i.OutputSimd128Register(),
 
 2613    case kIA32I16x8ShrU: {
 
 2617    case kIA32I16x8UConvertI32x4: {
 
 2618      __ Packusdw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2619                  i.InputSimd128Register(1));
 
 2622    case kIA32I16x8AddSatU: {
 
 2623      __ Paddusw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2627    case kIA32I16x8SubSatU: {
 
 2628      __ Psubusw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2632    case kIA32I16x8MinU: {
 
 2633      __ Pminuw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2637    case kIA32I16x8MaxU: {
 
 2638      __ Pmaxuw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2642    case kSSEI16x8GtU: {
 
 2643      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2644      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2645      XMMRegister dst = 
i.OutputSimd128Register();
 
 2646      Operand src = 
i.InputOperand(1);
 
 2647      __ pmaxuw(dst, src);
 
 2648      __ pcmpeqw(dst, src);
 
 2653    case kAVXI16x8GtU: {
 
 2654      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2655      XMMRegister dst = 
i.OutputSimd128Register();
 
 2656      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2657      Operand src2 = 
i.InputOperand(1);
 
 2664    case kSSEI16x8GeU: {
 
 2665      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2666      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2667      XMMRegister dst = 
i.OutputSimd128Register();
 
 2668      Operand src = 
i.InputOperand(1);
 
 2669      __ pminuw(dst, src);
 
 2670      __ pcmpeqw(dst, src);
 
 2673    case kAVXI16x8GeU: {
 
 2674      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2675      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2676      Operand src2 = 
i.InputOperand(1);
 
 2681    case kIA32I16x8RoundingAverageU: {
 
 2682      __ Pavgw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2686    case kIA32I16x8Abs: {
 
 2687      __ Pabsw(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2690    case kIA32I16x8BitMask: {
 
 2692      XMMRegister tmp = 
i.TempSimd128Register(0);
 
 2693      __ Packsswb(tmp, 
i.InputSimd128Register(0));
 
 2694      __ Pmovmskb(dst, tmp);
 
 2698    case kIA32I8x16Splat: {
 
 2699      if (
instr->InputAt(0)->IsRegister()) {
 
 2700        __ I8x16Splat(
i.OutputSimd128Register(), 
i.InputRegister(0),
 
 2703        __ I8x16Splat(
i.OutputSimd128Register(), 
i.InputOperand(0),
 
 2708    case kIA32I8x16ExtractLaneS: {
 
 2710      __ Pextrb(dst, 
i.InputSimd128Register(0), 
i.InputUint8(1));
 
 2711      __ movsx_b(dst, dst);
 
 2727      if (
instr->HasOutput()) {
 
 2728        __ Movlps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2729                  i.MemoryOperand(2));
 
 2732        Operand dst = 
i.MemoryOperand(&index);
 
 2733        __ Movlps(dst, 
i.InputSimd128Register(index));
 
 2738      if (
instr->HasOutput()) {
 
 2739        __ Movhps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2740                  i.MemoryOperand(2));
 
 2743        Operand dst = 
i.MemoryOperand(&index);
 
 2744        __ Movhps(dst, 
i.InputSimd128Register(index));
 
 2749      if (HasAddressingMode(
instr)) {
 
 2751        Operand operand = 
i.MemoryOperand(&index);
 
 2752        __ Pextrb(operand, 
i.InputSimd128Register(index),
 
 2753                  i.InputUint8(index + 1));
 
 2756        __ Pextrb(dst, 
i.InputSimd128Register(0), 
i.InputUint8(1));
 
 2761      if (HasAddressingMode(
instr)) {
 
 2763        Operand operand = 
i.MemoryOperand(&index);
 
 2764        __ Pextrw(operand, 
i.InputSimd128Register(index),
 
 2765                  i.InputUint8(index + 1));
 
 2768        __ Pextrw(dst, 
i.InputSimd128Register(0), 
i.InputUint8(1));
 
 2772    case kIA32S128Store32Lane: {
 
 2774      Operand operand = 
i.MemoryOperand(&index);
 
 2775      uint8_t laneidx = 
i.InputUint8(index + 1);
 
 2776      __ S128Store32Lane(operand, 
i.InputSimd128Register(index), laneidx);
 
 2779    case kIA32I8x16SConvertI16x8: {
 
 2780      __ Packsswb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2784    case kIA32I8x16Neg: {
 
 2785      XMMRegister dst = 
i.OutputSimd128Register();
 
 2786      Operand src = 
i.InputOperand(0);
 
 2787      if (src.is_reg(dst)) {
 
 2796    case kIA32I8x16Shl: {
 
 2797      XMMRegister dst = 
i.OutputSimd128Register();
 
 2798      XMMRegister src = 
i.InputSimd128Register(0);
 
 2805        XMMRegister tmp_simd = 
i.TempSimd128Register(1);
 
 2811    case kIA32I8x16ShrS: {
 
 2812      XMMRegister dst = 
i.OutputSimd128Register();
 
 2813      XMMRegister src = 
i.InputSimd128Register(0);
 
 2819        __ I8x16ShrS(dst, src, 
i.InputRegister(1), 
i.TempRegister(0),
 
 2824    case kIA32I8x16Add: {
 
 2825      __ Paddb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2829    case kIA32I8x16AddSatS: {
 
 2830      __ Paddsb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2834    case kIA32I8x16Sub: {
 
 2835      __ Psubb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2839    case kIA32I8x16SubSatS: {
 
 2840      __ Psubsb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2844    case kIA32I8x16MinS: {
 
 2845      __ Pminsb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2849    case kIA32I8x16MaxS: {
 
 2850      __ Pmaxsb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2854    case kIA32I8x16Eq: {
 
 2855      __ Pcmpeqb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2860      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2861      __ pcmpeqb(
i.OutputSimd128Register(), 
i.InputOperand(1));
 
 2867      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2868      __ vpcmpeqb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2871      __ vpxor(
i.OutputSimd128Register(), 
i.OutputSimd128Register(),
 
 2875    case kIA32I8x16GtS: {
 
 2876      __ Pcmpgtb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2880    case kSSEI8x16GeS: {
 
 2881      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2882      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 2883      XMMRegister dst = 
i.OutputSimd128Register();
 
 2884      Operand src = 
i.InputOperand(1);
 
 2885      __ pminsb(dst, src);
 
 2886      __ pcmpeqb(dst, src);
 
 2889    case kAVXI8x16GeS: {
 
 2890      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2891      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2892      Operand src2 = 
i.InputOperand(1);
 
 2897    case kIA32I8x16UConvertI16x8: {
 
 2898      __ Packuswb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2899                  i.InputSimd128Register(1));
 
 2902    case kIA32I8x16AddSatU: {
 
 2903      __ Paddusb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2907    case kIA32I8x16SubSatU: {
 
 2908      __ Psubusb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2912    case kIA32I8x16ShrU: {
 
 2913      XMMRegister dst = 
i.OutputSimd128Register();
 
 2914      XMMRegister src = 
i.InputSimd128Register(0);
 
 2922                     i.TempSimd128Register(1));
 
 2927    case kIA32I8x16MinU: {
 
 2928      __ Pminub(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2932    case kIA32I8x16MaxU: {
 
 2933      __ Pmaxub(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2937    case kSSEI8x16GtU: {
 
 2938      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2939      XMMRegister dst = 
i.OutputSimd128Register();
 
 2940      Operand src = 
i.InputOperand(1);
 
 2941      __ pmaxub(dst, src);
 
 2942      __ pcmpeqb(dst, src);
 
 2947    case kAVXI8x16GtU: {
 
 2948      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2949      XMMRegister dst = 
i.OutputSimd128Register();
 
 2950      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2951      Operand src2 = 
i.InputOperand(1);
 
 2958    case kSSEI8x16GeU: {
 
 2959      DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2960      XMMRegister dst = 
i.OutputSimd128Register();
 
 2961      Operand src = 
i.InputOperand(1);
 
 2962      __ pminub(dst, src);
 
 2963      __ pcmpeqb(dst, src);
 
 2966    case kAVXI8x16GeU: {
 
 2967      CpuFeatureScope avx_scope(
masm(), AVX);
 
 2968      XMMRegister src1 = 
i.InputSimd128Register(0);
 
 2969      Operand src2 = 
i.InputOperand(1);
 
 2974    case kIA32I8x16RoundingAverageU: {
 
 2975      __ Pavgb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2979    case kIA32I8x16Abs: {
 
 2980      __ Pabsb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 2983    case kIA32I8x16BitMask: {
 
 2984      __ Pmovmskb(
i.OutputRegister(), 
i.InputSimd128Register(0));
 
 2987    case kIA32I8x16Popcnt: {
 
 2988      __ I8x16Popcnt(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 2993    case kIA32S128Const: {
 
 2994      XMMRegister dst = 
i.OutputSimd128Register();
 
 2996      uint64_t low_qword = 
make_uint64(
i.InputUint32(1), 
i.InputUint32(0));
 
 2997      __ Move(dst, low_qword);
 
 2998      __ Move(tmp, Immediate(
i.InputUint32(2)));
 
 2999      __ Pinsrd(dst, tmp, 2);
 
 3000      __ Move(tmp, Immediate(
i.InputUint32(3)));
 
 3001      __ Pinsrd(dst, tmp, 3);
 
 3004    case kIA32S128Zero: {
 
 3005      XMMRegister dst = 
i.OutputSimd128Register();
 
 3009    case kIA32S128AllOnes: {
 
 3010      XMMRegister dst = 
i.OutputSimd128Register();
 
 3011      __ Pcmpeqd(dst, dst);
 
 3014    case kIA32S128Not: {
 
 3015      __ S128Not(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3019    case kIA32S128And: {
 
 3020      __ Pand(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3025      __ Por(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3029    case kIA32S128Xor: {
 
 3030      __ Pxor(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3034    case kIA32S128Select: {
 
 3035      __ S128Select(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3036                    i.InputSimd128Register(1), 
i.InputSimd128Register(2),
 
 3040    case kIA32S128AndNot: {
 
 3043      __ Andnps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3044                i.InputSimd128Register(1));
 
 3047    case kIA32I8x16Swizzle: {
 
 3048      __ I8x16Swizzle(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3053    case kIA32I8x16Shuffle: {
 
 3054      XMMRegister dst = 
i.OutputSimd128Register();
 
 3055      Operand src0 = 
i.InputOperand(0);
 
 3060      if (
instr->InputCount() == 5) {  
 
 3061        DCHECK_EQ(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 3062        for (
int j = 4; j > 0; j--) {
 
 3063          uint32_t 
mask = 
i.InputUint32(j);
 
 3066        __ Pshufb(dst, Operand(esp, 0));
 
 3070        for (
int j = 5; j > 1; j--) {
 
 3071          uint32_t lanes = 
i.InputUint32(j);
 
 3073          for (
int k = 0; k < 32; k += 8) {
 
 3074            uint8_t lane = lanes >> k;
 
 3080        Operand src1 = 
i.InputOperand(1);
 
 3081        if (!src1.is_reg(dst)) 
__ Movups(dst, src1);
 
 3082        for (
int j = 5; j > 1; j--) {
 
 3083          uint32_t lanes = 
i.InputUint32(j);
 
 3085          for (
int k = 0; k < 32; k += 8) {
 
 3086            uint8_t lane = lanes >> k;
 
 3091        __ Pshufb(dst, Operand(esp, 0));
 
 3097    case kIA32S128Load8Splat: {
 
 3098      __ S128Load8Splat(
i.OutputSimd128Register(), 
i.MemoryOperand(),
 
 3102    case kIA32S128Load16Splat: {
 
 3103      __ S128Load16Splat(
i.OutputSimd128Register(), 
i.MemoryOperand(),
 
 3107    case kIA32S128Load32Splat: {
 
 3108      __ S128Load32Splat(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3111    case kIA32S128Load64Splat: {
 
 3112      __ Movddup(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3115    case kIA32S128Load8x8S: {
 
 3116      __ Pmovsxbw(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3119    case kIA32S128Load8x8U: {
 
 3120      __ Pmovzxbw(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3123    case kIA32S128Load16x4S: {
 
 3124      __ Pmovsxwd(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3127    case kIA32S128Load16x4U: {
 
 3128      __ Pmovzxwd(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3131    case kIA32S128Load32x2S: {
 
 3132      __ Pmovsxdq(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3135    case kIA32S128Load32x2U: {
 
 3136      __ Pmovzxdq(
i.OutputSimd128Register(), 
i.MemoryOperand());
 
 3139    case kIA32S32x4Rotate: {
 
 3140      XMMRegister dst = 
i.OutputSimd128Register();
 
 3141      XMMRegister src = 
i.InputSimd128Register(0);
 
 3142      uint8_t 
mask = 
i.InputUint8(1);
 
 3145        __ Shufps(dst, src, src, 
mask);
 
 3147        __ Pshufd(dst, src, 
mask);
 
 3151    case kIA32S32x4Swizzle: {
 
 3153      __ Pshufd(
i.OutputSimd128Register(), 
i.InputOperand(0), 
i.InputUint8(1));
 
 3156    case kIA32S32x4Shuffle: {
 
 3158      uint8_t shuffle = 
i.InputUint8(2);
 
 3161      __ Pshufd(
i.OutputSimd128Register(), 
i.InputOperand(0), shuffle);
 
 3165    case kIA32S16x8Blend:
 
 3168    case kIA32S16x8HalfShuffle1: {
 
 3169      XMMRegister dst = 
i.OutputSimd128Register();
 
 3170      __ Pshuflw(dst, 
i.InputOperand(0), 
i.InputUint8(1));
 
 3171      __ Pshufhw(dst, dst, 
i.InputUint8(2));
 
 3174    case kIA32S16x8HalfShuffle2: {
 
 3175      XMMRegister dst = 
i.OutputSimd128Register();
 
 3178      __ Pshuflw(dst, 
i.InputOperand(0), 
i.InputUint8(2));
 
 3179      __ Pshufhw(dst, dst, 
i.InputUint8(3));
 
 3183    case kIA32S8x16Alignr:
 
 3186    case kIA32S16x8Dup: {
 
 3187      XMMRegister dst = 
i.OutputSimd128Register();
 
 3188      Operand src = 
i.InputOperand(0);
 
 3189      uint8_t lane = 
i.InputUint8(1) & 0x7;
 
 3190      uint8_t lane4 = lane & 0x3;
 
 3191      uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
 
 3193        __ Pshuflw(dst, src, half_dup);
 
 3194        __ Punpcklqdq(dst, dst);
 
 3196        __ Pshufhw(dst, src, half_dup);
 
 3197        __ Punpckhqdq(dst, dst);
 
 3201    case kIA32S8x16Dup: {
 
 3202      XMMRegister dst = 
i.OutputSimd128Register();
 
 3203      XMMRegister src = 
i.InputSimd128Register(0);
 
 3204      uint8_t lane = 
i.InputUint8(1) & 0xf;
 
 3206        CpuFeatureScope avx_scope(
masm(), AVX);
 
 3208          __ vpunpcklbw(dst, src, src);
 
 3210          __ vpunpckhbw(dst, src, src);
 
 3215          __ punpcklbw(dst, dst);
 
 3217          __ punpckhbw(dst, dst);
 
 3221      uint8_t lane4 = lane & 0x3;
 
 3222      uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
 
 3224        __ Pshuflw(dst, dst, half_dup);
 
 3225        __ Punpcklqdq(dst, dst);
 
 3227        __ Pshufhw(dst, dst, half_dup);
 
 3228        __ Punpckhqdq(dst, dst);
 
 3232    case kIA32S64x2UnpackHigh:
 
 3235    case kIA32S32x4UnpackHigh:
 
 3238    case kIA32S16x8UnpackHigh:
 
 3241    case kIA32S8x16UnpackHigh:
 
 3244    case kIA32S64x2UnpackLow:
 
 3247    case kIA32S32x4UnpackLow:
 
 3250    case kIA32S16x8UnpackLow:
 
 3253    case kIA32S8x16UnpackLow:
 
 3256    case kSSES16x8UnzipHigh: {
 
 3257      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 3258      XMMRegister dst = 
i.OutputSimd128Register();
 
 3259      XMMRegister src2 = dst;
 
 3261      if (
instr->InputCount() == 2) {
 
 3267      __ packusdw(dst, src2);
 
 3270    case kAVXS16x8UnzipHigh: {
 
 3271      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3272      XMMRegister dst = 
i.OutputSimd128Register();
 
 3273      XMMRegister src2 = dst;
 
 3274      if (
instr->InputCount() == 2) {
 
 3278      __ vpsrld(dst, 
i.InputSimd128Register(0), 16);
 
 3279      __ vpackusdw(dst, dst, src2);
 
 3282    case kSSES16x8UnzipLow: {
 
 3283      CpuFeatureScope sse_scope(
masm(), SSE4_1);
 
 3284      XMMRegister dst = 
i.OutputSimd128Register();
 
 3285      XMMRegister src2 = dst;
 
 3288      if (
instr->InputCount() == 2) {
 
 3293      __ packusdw(dst, src2);
 
 3296    case kAVXS16x8UnzipLow: {
 
 3297      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3298      XMMRegister dst = 
i.OutputSimd128Register();
 
 3299      XMMRegister src2 = dst;
 
 3301      if (
instr->InputCount() == 2) {
 
 3307      __ vpackusdw(dst, dst, src2);
 
 3310    case kSSES8x16UnzipHigh: {
 
 3311      XMMRegister dst = 
i.OutputSimd128Register();
 
 3312      XMMRegister src2 = dst;
 
 3314      if (
instr->InputCount() == 2) {
 
 3320      __ packuswb(dst, src2);
 
 3323    case kAVXS8x16UnzipHigh: {
 
 3324      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3325      XMMRegister dst = 
i.OutputSimd128Register();
 
 3326      XMMRegister src2 = dst;
 
 3327      if (
instr->InputCount() == 2) {
 
 3331      __ vpsrlw(dst, 
i.InputSimd128Register(0), 8);
 
 3332      __ vpackuswb(dst, dst, src2);
 
 3335    case kSSES8x16UnzipLow: {
 
 3336      XMMRegister dst = 
i.OutputSimd128Register();
 
 3337      XMMRegister src2 = dst;
 
 3339      if (
instr->InputCount() == 2) {
 
 3347      __ packuswb(dst, src2);
 
 3350    case kAVXS8x16UnzipLow: {
 
 3351      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3352      XMMRegister dst = 
i.OutputSimd128Register();
 
 3353      XMMRegister src2 = dst;
 
 3354      if (
instr->InputCount() == 2) {
 
 3359      __ vpsllw(dst, 
i.InputSimd128Register(0), 8);
 
 3360      __ vpsrlw(dst, dst, 8);
 
 3361      __ vpackuswb(dst, dst, src2);
 
 3364    case kSSES8x16TransposeLow: {
 
 3365      XMMRegister dst = 
i.OutputSimd128Register();
 
 3368      if (
instr->InputCount() == 1) {
 
 3379    case kAVXS8x16TransposeLow: {
 
 3380      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3381      XMMRegister dst = 
i.OutputSimd128Register();
 
 3382      if (
instr->InputCount() == 1) {
 
 3388        __ vpsllw(dst, 
i.InputSimd128Register(0), 8);
 
 3389        __ vpsrlw(dst, dst, 8);
 
 3394    case kSSES8x16TransposeHigh: {
 
 3395      XMMRegister dst = 
i.OutputSimd128Register();
 
 3398      if (
instr->InputCount() == 1) {
 
 3409    case kAVXS8x16TransposeHigh: {
 
 3410      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3411      XMMRegister dst = 
i.OutputSimd128Register();
 
 3412      if (
instr->InputCount() == 1) {
 
 3413        __ vpsrlw(dst, 
i.InputSimd128Register(0), 8);
 
 3418        __ vpsrlw(dst, 
i.InputSimd128Register(0), 8);
 
 3424    case kSSES8x8Reverse:
 
 3425    case kSSES8x4Reverse:
 
 3426    case kSSES8x2Reverse: {
 
 3428      XMMRegister dst = 
i.OutputSimd128Register();
 
 3430      if (arch_opcode != kSSES8x2Reverse) {
 
 3432        int8_t shuffle_mask = arch_opcode == kSSES8x4Reverse ? 0xB1 : 0x1B;
 
 3433        __ pshuflw(dst, dst, shuffle_mask);
 
 3434        __ pshufhw(dst, dst, shuffle_mask);
 
 3442    case kAVXS8x2Reverse:
 
 3443    case kAVXS8x4Reverse:
 
 3444    case kAVXS8x8Reverse: {
 
 3446      CpuFeatureScope avx_scope(
masm(), AVX);
 
 3447      XMMRegister dst = 
i.OutputSimd128Register();
 
 3448      XMMRegister src = dst;
 
 3449      if (arch_opcode != kAVXS8x2Reverse) {
 
 3451        int8_t shuffle_mask = arch_opcode == kAVXS8x4Reverse ? 0xB1 : 0x1B;
 
 3452        __ vpshuflw(dst, 
i.InputOperand(0), shuffle_mask);
 
 3453        __ vpshufhw(dst, dst, shuffle_mask);
 
 3455        src = 
i.InputSimd128Register(0);
 
 3459      __ vpsllw(dst, src, 8);
 
 3463    case kIA32S128AnyTrue: {
 
 3465      XMMRegister src = 
i.InputSimd128Register(0);
 
 3468      __ mov(dst, Immediate(1));
 
 3470      __ cmov(zero, dst, tmp);
 
 3477    case kIA32I64x2AllTrue:
 
 3480    case kIA32I32x4AllTrue:
 
 3483    case kIA32I16x8AllTrue:
 
 3486    case kIA32I8x16AllTrue: {
 
 3490    case kIA32Blendvpd: {
 
 3491      __ Blendvpd(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3492                  i.InputSimd128Register(1), 
i.InputSimd128Register(2));
 
 3495    case kIA32Blendvps: {
 
 3496      __ Blendvps(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3497                  i.InputSimd128Register(1), 
i.InputSimd128Register(2));
 
 3500    case kIA32Pblendvb: {
 
 3501      __ Pblendvb(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3502                  i.InputSimd128Register(1), 
i.InputSimd128Register(2));
 
 3505    case kIA32I32x4TruncF64x2UZero: {
 
 3506      __ I32x4TruncSatF64x2UZero(
i.OutputSimd128Register(),
 
 3511    case kIA32I32x4TruncF32x4U: {
 
 3512      __ I32x4TruncF32x4U(
i.OutputSimd128Register(), 
i.InputSimd128Register(0),
 
 3516    case kIA32Cvttps2dq: {
 
 3517      __ Cvttps2dq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 3520    case kIA32Cvttpd2dq: {
 
 3521      __ Cvttpd2dq(
i.OutputSimd128Register(), 
i.InputSimd128Register(0));
 
 3524    case kIA32Word32AtomicPairLoad: {
 
 3530    case kIA32Word32ReleasePairStore: {
 
 3532      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(1));
 
 3534      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(0));
 
 3545    case kIA32Word32SeqCstPairStore: {
 
 3548      __ mov(eax, 
i.MemoryOperand(2));
 
 3549      __ mov(edx, 
i.NextMemoryOperand(2));
 
 3552      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(0));
 
 3554      __ cmpxchg8b(
i.MemoryOperand(2));
 
 3560    case kAtomicExchangeInt8: {
 
 3561      __ xchg_b(
i.InputRegister(0), 
i.MemoryOperand(1));
 
 3562      __ movsx_b(
i.InputRegister(0), 
i.InputRegister(0));
 
 3565    case kAtomicExchangeUint8: {
 
 3566      __ xchg_b(
i.InputRegister(0), 
i.MemoryOperand(1));
 
 3567      __ movzx_b(
i.InputRegister(0), 
i.InputRegister(0));
 
 3570    case kAtomicExchangeInt16: {
 
 3571      __ xchg_w(
i.InputRegister(0), 
i.MemoryOperand(1));
 
 3572      __ movsx_w(
i.InputRegister(0), 
i.InputRegister(0));
 
 3575    case kAtomicExchangeUint16: {
 
 3576      __ xchg_w(
i.InputRegister(0), 
i.MemoryOperand(1));
 
 3577      __ movzx_w(
i.InputRegister(0), 
i.InputRegister(0));
 
 3580    case kAtomicExchangeWord32: {
 
 3581      __ xchg(
i.InputRegister(0), 
i.MemoryOperand(1));
 
 3584    case kIA32Word32AtomicPairExchange: {
 
 3588      __ mov(eax, 
i.MemoryOperand(2));
 
 3589      __ mov(edx, 
i.NextMemoryOperand(2));
 
 3592      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(0));
 
 3594      __ cmpxchg8b(
i.MemoryOperand(2));
 
 3600    case kAtomicCompareExchangeInt8: {
 
 3602      __ cmpxchg_b(
i.MemoryOperand(2), 
i.InputRegister(1));
 
 3603      __ movsx_b(eax, eax);
 
 3606    case kAtomicCompareExchangeUint8: {
 
 3608      __ cmpxchg_b(
i.MemoryOperand(2), 
i.InputRegister(1));
 
 3609      __ movzx_b(eax, eax);
 
 3612    case kAtomicCompareExchangeInt16: {
 
 3614      __ cmpxchg_w(
i.MemoryOperand(2), 
i.InputRegister(1));
 
 3615      __ movsx_w(eax, eax);
 
 3618    case kAtomicCompareExchangeUint16: {
 
 3620      __ cmpxchg_w(
i.MemoryOperand(2), 
i.InputRegister(1));
 
 3621      __ movzx_w(eax, eax);
 
 3624    case kAtomicCompareExchangeWord32: {
 
 3626      __ cmpxchg(
i.MemoryOperand(2), 
i.InputRegister(1));
 
 3629    case kIA32Word32AtomicPairCompareExchange: {
 
 3632      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(2));
 
 3634      __ cmpxchg8b(
i.MemoryOperand(4));
 
 3639#define ATOMIC_BINOP_CASE(op, inst)                \ 
 3640  case kAtomic##op##Int8: {                        \ 
 3641    ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \ 
 3642    __ movsx_b(eax, eax);                          \ 
 3645  case kAtomic##op##Uint8: {                       \ 
 3646    ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \ 
 3647    __ movzx_b(eax, eax);                          \ 
 3650  case kAtomic##op##Int16: {                       \ 
 3651    ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \ 
 3652    __ movsx_w(eax, eax);                          \ 
 3655  case kAtomic##op##Uint16: {                      \ 
 3656    ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \ 
 3657    __ movzx_w(eax, eax);                          \ 
 3660  case kAtomic##op##Word32: {                      \ 
 3661    ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg);     \ 
 3669#undef ATOMIC_BINOP_CASE 
 3670#define ATOMIC_BINOP_CASE(op, instr1, instr2)         \ 
 3671  case kIA32Word32AtomicPair##op: {                   \ 
 3672    DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr)); \ 
 3673    ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2)          \ 
 3680#undef ATOMIC_BINOP_CASE 
 3681    case kIA32Word32AtomicPairSub: {
 
 3686      __ mov(eax, 
i.MemoryOperand(2));
 
 3687      __ mov(edx, 
i.NextMemoryOperand(2));
 
 3691      i.MoveInstructionOperandToRegister(ebx, 
instr->InputAt(0));
 
 3695      __ adc(
i.InputRegister(1), 0);
 
 3696      __ neg(
i.InputRegister(1));
 
 3699      __ adc(
i.InputRegister(1), edx);
 
 3701      __ cmpxchg8b(
i.MemoryOperand(2));
 
 3703      __ pop(
i.InputRegister(1));
 
 3709    case kAtomicLoadInt8:
 
 3710    case kAtomicLoadUint8:
 
 3711    case kAtomicLoadInt16:
 
 3712    case kAtomicLoadUint16:
 
 3713    case kAtomicLoadWord32:
 
 3714    case kAtomicStoreWord8:
 
 3715    case kAtomicStoreWord16:
 
 3716    case kAtomicStoreWord32:
 
 3759  Label* tlabel = branch->true_label;
 
 3760  Label* flabel = branch->false_label;
 
 3769  if (!branch->fallthru) 
__ jmp(flabel);
 
 3773                                            BranchInfo* branch) {
 
 3782#if V8_ENABLE_WEBASSEMBLY 
 3783void CodeGenerator::AssembleArchTrap(Instruction* 
instr,
 
 3787    OutOfLineTrap(CodeGenerator* 
gen, Instruction* 
instr)
 
 3790    void Generate() final {
 
 3791      IA32OperandConverter 
i(
gen_, instr_);
 
 3793          static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
 
 3794      GenerateCallToTrap(trap_id);
 
 3798    void GenerateCallToTrap(TrapId trap_id) {
 
 3799      gen_->AssembleSourcePosition(instr_);
 
 3803      __ wasm_call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
 
 3804      ReferenceMap* reference_map =
 
 3805          gen_->zone()->New<ReferenceMap>(
gen_->zone());
 
 3806      gen_->RecordSafepoint(reference_map);
 
 3807      __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
 
 3810    Instruction* instr_;
 
 3811    CodeGenerator* 
gen_;
 
 3814  Label* tlabel = ool->entry();
 
 3829  IA32OperandConverter 
i(
this, 
instr);
 
 3839    __ Move(
reg, Immediate(0));
 
 3843    __ mov(
reg, Immediate(1));
 
 3849  if (
reg.is_byte_register()) {
 
 3857    __ Move(
reg, Immediate(0));
 
 3860    __ mov(
reg, Immediate(1));
 
 3870                                                  BranchInfo* branch) {
 
 3875  IA32OperandConverter 
i(
this, 
instr);
 
 3877  std::vector<std::pair<int32_t, Label*>> cases;
 
 3878  for (
size_t index = 2; index < 
instr->InputCount(); index += 2) {
 
 3879    cases.push_back({
i.InputInt32(index + 0), 
GetLabel(
i.InputRpo(index + 1))});
 
 3882                                      cases.data() + cases.size());
 
 3886  IA32OperandConverter 
i(
this, 
instr);
 
 3888  size_t const case_count = 
instr->InputCount() - 2;
 
 3890  for (
size_t index = 0; index < case_count; ++
index) {
 
 3894  __ cmp(input, Immediate(case_count));
 
 4032  const RegList saves = call_descriptor->CalleeSavedRegisters();
 
 4033  if (!saves.is_empty()) {  
 
 4035    frame->AllocateSavedCalleeRegisterSlots(saves.Count());
 
 4042    if (call_descriptor->IsCFunctionCall()) {
 
 4045#if V8_ENABLE_WEBASSEMBLY 
 4046      if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
 
 4052    } 
else if (call_descriptor->IsJSFunctionCall()) {
 
 4055      __ StubPrologue(
info()->GetOutputStackFrameType());
 
 4056#if V8_ENABLE_WEBASSEMBLY 
 4057      if (call_descriptor->IsAnyWasmFunctionCall() ||
 
 4058          call_descriptor->IsWasmImportWrapper() ||
 
 4059          call_descriptor->IsWasmCapiFunction()) {
 
 4066      if (call_descriptor->IsWasmCapiFunction()) {
 
 4074  int required_slots =
 
 4075      frame()->GetTotalFrameSlotCount() - 
frame()->GetFixedSlotCount();
 
 4077  if (
info()->is_osr()) {
 
 4079    __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
 
 4085    __ RecordComment(
"-- OSR entrypoint --");
 
 4090  const RegList saves = call_descriptor->CalleeSavedRegisters();
 
 4091  if (required_slots > 0) {
 
 4093#if V8_ENABLE_WEBASSEMBLY 
 4107        __ mov(scratch, esp);
 
 4114      if (
v8_flags.experimental_wasm_growable_stacks) {
 
 4118            WasmHandleStackOverflowDescriptor::FrameBaseRegister());
 
 4130        __ mov(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), ebp);
 
 4131        __ add(WasmHandleStackOverflowDescriptor::FrameBaseRegister(),
 
 4132               Immediate(
static_cast<int32_t>(
 
 4135        __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
 
 4145        __ wasm_call(
static_cast<Address>(Builtin::kWasmStackOverflow),
 
 4149        ReferenceMap* reference_map = 
zone()->
New<ReferenceMap>(
zone());
 
 4151        __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
 
 4158    required_slots -= saves.Count();
 
 4159    required_slots -= 
frame()->GetReturnSlotCount();
 
 4160    if (required_slots > 0) {
 
 4165  if (!saves.is_empty()) {  
 
 4173  if (
frame()->GetReturnSlotCount() > 0) {
 
 4180    __ mov(Operand(ebp, 
offset.offset()), Immediate(0));
 
 4187  const RegList saves = call_descriptor->CalleeSavedRegisters();
 
 4189  if (!saves.is_empty()) {
 
 4190    const int returns = 
frame()->GetReturnSlotCount();
 
 4194    for (Register 
reg : saves) {
 
 4199  IA32OperandConverter g(
this, 
nullptr);
 
 4200  int parameter_slots = 
static_cast<int>(call_descriptor->ParameterSlotCount());
 
 4204  if (parameter_slots != 0) {
 
 4205    if (additional_pop_count->IsImmediate()) {
 
 4206      DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
 
 4208      __ cmp(g.ToRegister(additional_pop_count), Immediate(0));
 
 4209      __ Assert(
equal, AbortReason::kUnexpectedAdditionalPopValue);
 
 4213#if V8_ENABLE_WEBASSEMBLY 
 4214  if (call_descriptor->IsAnyWasmFunctionCall() &&
 
 4215      v8_flags.experimental_wasm_growable_stacks) {
 
 4230    __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
 
 4251  const bool drop_jsargs = parameter_slots != 0 &&
 
 4253                           call_descriptor->IsJSFunctionCall();
 
 4254  if (call_descriptor->IsCFunctionCall()) {
 
 4259    if (additional_pop_count->IsImmediate() &&
 
 4260        g.ToConstant(additional_pop_count).ToInt32() == 0) {
 
 4271      DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
 
 4281    Label mismatch_return;
 
 4284    DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
 
 4285    DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
 
 4286    __ cmp(argc_reg, Immediate(parameter_slots));
 
 4289    __ bind(&mismatch_return);
 
 4290    __ DropArguments(argc_reg, scratch_reg);
 
 4293  } 
else if (additional_pop_count->IsImmediate()) {
 
 4294    int additional_count = g.ToConstant(additional_pop_count).ToInt32();
 
 4296    if (is_uint16(pop_size)) {
 
 4299      __ ret(
static_cast<int>(pop_size));
 
 4302      DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
 
 4303      CHECK_LE(pop_size, 
static_cast<size_t>(std::numeric_limits<int>::max()));
 
 4304      __ Ret(
static_cast<int>(pop_size), scratch_reg);
 
 4307    Register pop_reg = g.ToRegister(additional_pop_count);
 
 4308    Register scratch_reg = pop_reg == ecx ? edi : ecx;
 
 4309    DCHECK(!call_descriptor->CalleeSavedRegisters().has(scratch_reg));
 
 4310    DCHECK(!call_descriptor->CalleeSavedRegisters().has(pop_reg));
 
 4312    __ PopReturnAddressTo(scratch_reg);
 
 4314                        static_cast<int>(pop_size)));
 
 4315    __ PushReturnAddressFrom(scratch_reg);
 
 4323    ZoneDeque<DeoptimizationExit*>* exits) {}
 
 4328  IA32OperandConverter g(
this, 
nullptr);
 
 4329  int last_frame_slot_id =
 
 4332  int slot_id = last_frame_slot_id + sp_delta + new_slots;
 
 4334  if (source->IsRegister()) {
 
 4335    __ push(g.ToRegister(source));
 
 4337  } 
else if (source->IsStackSlot() || source->IsFloatStackSlot()) {
 
 4338    __ push(g.ToOperand(source));
 
 4352  IA32OperandConverter g(
this, 
nullptr);
 
 4354  if (dest->IsRegister()) {
 
 4356    __ pop(g.ToRegister(dest));
 
 4357  } 
else if (dest->IsStackSlot() || dest->IsFloatStackSlot()) {
 
 4359    __ pop(g.ToOperand(dest));
 
 4361    int last_frame_slot_id =
 
 4364    int slot_id = last_frame_slot_id + sp_delta;
 
 4384  DCHECK(!source->IsImmediate());
 
 4411  InstructionOperand* source = &move->source();
 
 4412  InstructionOperand* 
destination = &move->destination();
 
 4415    if (!source->IsStackSlot()) {
 
 4424  IA32OperandConverter g(
this, 
nullptr);
 
 4428      if (source->IsRegister()) {
 
 4431        DCHECK(source->IsFPRegister());
 
 4432        __ Movaps(g.ToDoubleRegister(
destination), g.ToDoubleRegister(source));
 
 4437      if (source->IsRegister()) {
 
 4438        __ mov(dst, g.ToRegister(source));
 
 4440        DCHECK(source->IsFPRegister());
 
 4441        XMMRegister src = g.ToDoubleRegister(source);
 
 4450          __ Movups(dst, src);
 
 4456      Operand src = g.ToOperand(source);
 
 4457      if (source->IsStackSlot()) {
 
 4460        DCHECK(source->IsFPStackSlot());
 
 4461        XMMRegister dst = g.ToDoubleRegister(
destination);
 
 4470          __ Movups(dst, src);
 
 4476      Operand src = g.ToOperand(source);
 
 4478      if (source->IsStackSlot()) {
 
 4499      Constant src = g.ToConstant(source);
 
 4503          __ Move(dst, src.ToHeapObject());
 
 4505          __ Move(dst, Immediate(src.ToExternalReference()));
 
 4507          __ Move(dst, g.ToImmediate(source));
 
 4511        XMMRegister dst = g.ToDoubleRegister(
destination);
 
 4514          __ Move(dst, src.ToFloat32AsInt());
 
 4517          __ Move(dst, src.ToFloat64().AsUint64());
 
 4523      Constant src = g.ToConstant(source);
 
 4526        __ Move(dst, g.ToImmediate(source));
 
 4530          __ Move(dst, Immediate(src.ToFloat32AsInt()));
 
 4533          uint64_t constant_value = src.ToFloat64().AsUint64();
 
 4534          uint32_t lower = 
static_cast<uint32_t
>(constant_value);
 
 4535          uint32_t upper = 
static_cast<uint32_t
>(constant_value >> 32);
 
 4538          __ Move(dst0, Immediate(lower));
 
 4539          __ Move(dst1, Immediate(upper));
 
 4550  IA32OperandConverter g(
this, 
nullptr);
 
 4555      if (source->IsRegister()) {
 
 4556        Register src = g.ToRegister(source);
 
 4562        DCHECK(source->IsFPRegister());
 
 4563        XMMRegister src = g.ToDoubleRegister(source);
 
 4564        XMMRegister dst = g.ToDoubleRegister(
destination);
 
 4566        __ Movaps(src, dst);
 
 4572      if (source->IsRegister()) {
 
 4573        Register src = g.ToRegister(source);
 
 4582        DCHECK(source->IsFPRegister());
 
 4583        XMMRegister src = g.ToDoubleRegister(source);
 
 4598          __ Movups(dst, src);
 
 4605      if (source->IsStackSlot()) {
 
 4609        Operand src1 = g.ToOperand(source);
 
 4614        Operand src2 = g.ToOperand(source);
 
 4617        DCHECK(source->IsFPStackSlot());
 
 4618        Operand src0 = g.ToOperand(source);
 
 4656  for (
auto target : targets) {
 
 4662#undef kScratchDoubleReg 
 4663#undef ASSEMBLE_COMPARE 
 4664#undef ASSEMBLE_IEEE754_BINOP 
 4665#undef ASSEMBLE_IEEE754_UNOP 
 4666#undef ASSEMBLE_BINOP 
 4667#undef ASSEMBLE_ATOMIC_BINOP 
 4668#undef ASSEMBLE_I64ATOMIC_BINOP 
 4670#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE 
 4671#undef ASSEMBLE_SIMD_IMM_SHUFFLE 
 4672#undef ASSEMBLE_SIMD_ALL_TRUE 
 4673#undef ASSEMBLE_SIMD_SHIFT 
 4674#undef ASSEMBLE_SIMD_PINSR 
#define Assert(condition)
interpreter::OperandScale scale
static constexpr T decode(U value)
static const int kMarkedForDeoptimizationBit
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr int kHeaderSize
void mov(Register rd, Register rj)
void Move(Register dst, Tagged< Smi > smi)
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand JumpTable(Register index, ScaleFactor scale, Label *table)
constexpr void set(RegisterT reg)
constexpr int8_t code() const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kArgCOffset
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
base::Vector< T > AllocateVector(size_t length)
static Type InferSwap(InstructionOperand *source, InstructionOperand *destination)
static Type InferMove(InstructionOperand *source, InstructionOperand *destination)
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
static void GetPushCompatibleMoves(Instruction *instr, PushTypeFlags push_type, ZoneVector< MoveOperands * > *pushes)
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
base::Flags< PushTypeFlag > PushTypeFlags
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
void AssembleDeconstructFrame()
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
friend class OutOfLineCode
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
RelocInfo::Mode rmode() const
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
Operand SlotToOperand(int slot, int extra=0)
Operand ToOperand(InstructionOperand *op, int extra=0)
IA32OperandConverter(CodeGenerator *gen, Instruction *instr)
Operand InputOperand(size_t index, int extra=0)
Operand MemoryOperand(size_t *offset)
static size_t NextOffset(size_t *offset)
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode)
Immediate InputImmediate(size_t index)
Immediate ToImmediate(InstructionOperand *operand)
Operand MemoryOperand(size_t first_input=0)
Operand NextMemoryOperand(size_t offset=0)
void MoveInstructionOperandToRegister(Register destination, InstructionOperand *op)
FrameAccessState * frame_access_state() const
DoubleRegister ToDoubleRegister(InstructionOperand *op)
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
Label * ToLabel(InstructionOperand *op)
Register InputRegister(size_t index) const
Register ToRegister(InstructionOperand *op) const
bool IsFPRegister() const
bool IsFPStackSlot() const
const InstructionOperand * Output() const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
size_t OutputCount() const
AddressingMode addressing_mode() const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
RecordWriteMode const mode_
#define ASSEMBLE_SIMD_PINSR(OPCODE, CPU_FEATURE)
#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE(opcode)
#define ASSEMBLE_BINOP(asm_instr)
#define ASSEMBLE_SIMD_ALL_TRUE(opcode)
#define ASSEMBLE_MOVX(mov_instr)
#define ASSEMBLE_COMPARE(asm_instr)
#define ASSEMBLE_SIMD_SHIFT(opcode, width)
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm)
#define V8_ENABLE_LEAPTIERING_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
ZoneVector< RpoNumber > & result
LiftoffRegList regs_to_save
InstructionOperand source
InstructionOperand destination
v8::SourceLocation SourceLocation
signed_type NegateWithWraparound(signed_type a)
FloatBinopMask::For< FloatBinopOp::Kind::kSub, FloatRepresentation::Float64()> kFloat64Sub
FloatBinopMask::For< FloatBinopOp::Kind::kSub, FloatRepresentation::Float32()> kFloat32Sub
FloatBinopMask::For< FloatBinopOp::Kind::kMul, FloatRepresentation::Float32()> kFloat32Mul
FloatBinopMask::For< FloatBinopOp::Kind::kMul, FloatRepresentation::Float64()> kFloat64Mul
FloatUnaryMask::For< FloatUnaryOp::Kind::kAbs, FloatRepresentation::Float64()> kFloat64Abs
static bool HasImmediateInput(Instruction *instr, size_t index)
static bool HasRegisterInput(Instruction *instr, size_t index)
static Condition FlagsConditionToCondition(FlagsCondition condition)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register kRootRegister
constexpr int kSimd128Size
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Operand FieldOperand(Register object, int offset)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
@ times_system_pointer_size
constexpr int kDoubleSize
const uint32_t kClearedWeakHeapObjectLower32
static int FrameSlotToFPOffset(int slot)
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
bool pending_double_scratch_register_use