24#define TRACE(...) PrintF(__VA_ARGS__)
68 switch (constant.type()) {
70 return Operand(constant.ToInt32());
72 return Operand(constant.ToInt64());
98 const size_t index = *first_index;
135class OutOfLineRecordWrite final :
public OutOfLineCode {
140 : OutOfLineCode(
gen),
147#if V8_ENABLE_WEBASSEMBLY
148 stub_mode_(stub_mode),
156 void Generate() final {
161 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
169 __ CallEphemeronKeyBarrier(object_,
scratch1_, save_fp_mode);
170#if V8_ENABLE_WEBASSEMBLY
171 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
175 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode,
176 StubCallMode::kCallWasmRuntimeStub);
179 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode);
193#if V8_ENABLE_WEBASSEMBLY
200#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
201 class ool_name final : public OutOfLineCode { \
203 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
204 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
206 void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
219#undef CREATE_OOL_CLASS
276FPUCondition FlagsConditionToConditionCmpFPU(
bool* predicate,
330#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
332 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
336#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
339 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
343#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
346 __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
349 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
350 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
351 Operand(i.InputRegister(2))); \
352 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
353 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
357#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
358 size, bin_instr, representation) \
361 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
362 if (representation == 32) { \
363 __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
365 DCHECK_EQ(representation, 64); \
366 __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
368 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
369 Operand(i.TempRegister(3))); \
370 __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
373 __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
374 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
375 size, sign_extend); \
376 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
377 Operand(i.InputRegister(2))); \
378 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
380 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
381 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
385#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
389 __ bind(&exchange); \
390 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
391 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
392 __ mov(i.TempRegister(1), i.InputRegister(2)); \
393 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
394 __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
398#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
399 load_linked, store_conditional, sign_extend, size, representation) \
402 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
403 if (representation == 32) { \
404 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
406 DCHECK_EQ(representation, 64); \
407 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
409 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
410 Operand(i.TempRegister(1))); \
411 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
413 __ bind(&exchange); \
414 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
415 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
416 size, sign_extend); \
417 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
419 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
420 __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
424#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
427 Label compareExchange; \
429 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
431 __ bind(&compareExchange); \
432 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
433 __ BranchShort(&exit, ne, i.InputRegister(2), \
434 Operand(i.OutputRegister(0))); \
435 __ mov(i.TempRegister(2), i.InputRegister(3)); \
436 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
437 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
438 Operand(zero_reg)); \
443#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
444 load_linked, store_conditional, sign_extend, size, representation) \
446 Label compareExchange; \
448 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
449 if (representation == 32) { \
450 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
452 DCHECK_EQ(representation, 64); \
453 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
455 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
456 Operand(i.TempRegister(1))); \
457 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
459 __ bind(&compareExchange); \
460 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
461 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
462 size, sign_extend); \
463 __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
465 __ BranchShort(&exit, ne, i.TempRegister(2), \
466 Operand(i.OutputRegister(0))); \
467 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
469 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
470 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
471 Operand(zero_reg)); \
476#define ASSEMBLE_IEEE754_BINOP(name) \
478 FrameScope scope(masm(), StackFrame::MANUAL); \
479 __ PrepareCallCFunction(0, 2, kScratchReg); \
480 __ MovToFloatParameters(i.InputDoubleRegister(0), \
481 i.InputDoubleRegister(1)); \
482 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
484 __ MovFromFloatResult(i.OutputDoubleRegister()); \
487#define ASSEMBLE_IEEE754_UNOP(name) \
489 FrameScope scope(masm(), StackFrame::MANUAL); \
490 __ PrepareCallCFunction(0, 1, kScratchReg); \
491 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
492 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
494 __ MovFromFloatResult(i.OutputDoubleRegister()); \
497#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
499 __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
500 i.InputSimd128Register(1)); \
518void AdjustStackPointerForTailCall(MacroAssembler* masm,
519 FrameAccessState* state,
520 int new_slot_above_sp,
521 bool allow_shrinkage =
true) {
522 int current_sp_offset = state->GetSPToFPSlotCount() +
524 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
525 if (stack_slot_delta > 0) {
527 state->IncreaseSPDelta(stack_slot_delta);
528 }
else if (allow_shrinkage && stack_slot_delta < 0) {
530 state->IncreaseSPDelta(stack_slot_delta);
537 int first_unused_slot_offset) {
539 first_unused_slot_offset,
false);
543 int first_unused_slot_offset) {
545 first_unused_slot_offset);
551 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart,
555#ifdef V8_ENABLE_LEAPTIERING
557void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
558 DCHECK(
linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
569 UseScratchRegisterScope temps(
masm());
570 Register actual_parameter_count = temps.Acquire();
575 UseScratchRegisterScope temps(
masm());
577 __ LoadParameterCountFromJSDispatchTable(
580 __ Assert(
eq, AbortReason::kWrongFunctionDispatchHandle,
589 Instruction*
instr) {
590 MipsOperandConverter
i(
this,
instr);
593 switch (arch_opcode) {
594 case kArchCallCodeObject: {
595 if (
instr->InputAt(0)->IsImmediate()) {
600 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
604 __ CallCodeObject(
reg, tag);
610 case kArchCallBuiltinPointer: {
612 Register builtin_index =
i.InputRegister(0);
617 __ CallBuiltinByIndex(builtin_index, target);
622#if V8_ENABLE_WEBASSEMBLY
623 case kArchCallWasmFunction:
624 case kArchCallWasmFunctionIndirect: {
625 if (
instr->InputAt(0)->IsImmediate()) {
626 DCHECK_EQ(arch_opcode, kArchCallWasmFunction);
627 Constant constant =
i.ToConstant(
instr->InputAt(0));
629 __ Call(wasm_code, constant.rmode());
630 }
else if (arch_opcode == kArchCallWasmFunctionIndirect) {
631 __ CallWasmCodePointer(
i.InputRegister(0));
633 __ Call(
i.InputRegister(0));
639 case kArchTailCallWasm:
640 case kArchTailCallWasmIndirect: {
641 if (
instr->InputAt(0)->IsImmediate()) {
642 DCHECK_EQ(arch_opcode, kArchTailCallWasm);
643 Constant constant =
i.ToConstant(
instr->InputAt(0));
645 __ Jump(wasm_code, constant.rmode());
646 }
else if (arch_opcode == kArchTailCallWasmIndirect) {
649 __ Jump(
i.InputRegister(0));
656 case kArchTailCallCodeObject: {
657 if (
instr->InputAt(0)->IsImmediate()) {
662 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
666 __ JumpCodeObject(
reg, tag);
672 case kArchTailCallAddress: {
683 case kArchCallJSFunction: {
691 uint32_t num_arguments =
692 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
693 __ CallJSFunction(func, num_arguments);
698 case kArchPrepareCallCFunction: {
705 case kArchSaveCallerRegisters: {
719 case kArchRestoreCallerRegisters: {
732 case kArchPrepareTailCall:
735 case kArchCallCFunctionWithFrameState:
736 case kArchCallCFunction: {
740 Label return_location;
741#if V8_ENABLE_WEBASSEMBLY
742 bool isWasmCapiFunction =
744 if (isWasmCapiFunction) {
748 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
753 if (
instr->InputAt(0)->IsImmediate()) {
754 ExternalReference ref =
i.InputExternalReference(0);
755 pc_offset =
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
756 set_isolate_data_slots, &return_location);
759 pc_offset =
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
760 set_isolate_data_slots, &return_location);
764 bool const needs_frame_state =
765 (arch_opcode == kArchCallCFunctionWithFrameState);
766 if (needs_frame_state) {
792 case kArchBinarySearchSwitch:
795 case kArchTableSwitch:
798 case kArchAbortCSADcheck:
799 DCHECK(
i.InputRegister(0) == a0);
804 __ CallBuiltin(Builtin::kAbortCSADcheck);
808 case kArchDebugBreak:
812 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)),
816 case kArchThrowTerminator:
819 case kArchDeoptimize: {
820 DeoptimizationExit* exit =
822 __ Branch(exit->label());
828#if V8_ENABLE_WEBASSEMBLY
829 case kArchStackPointer:
832 __ mov(
i.OutputRegister(), sp);
834 case kArchSetStackPointer: {
836 __ mov(sp,
i.InputRegister(0));
840 case kArchStackPointerGreaterThan: {
844 lhs_register =
i.TempRegister(1);
847 __ Sltu(
i.TempRegister(0),
i.InputRegister(0), lhs_register);
850 case kArchStackCheckOffset:
853 case kArchFramePointer:
854 __ mov(
i.OutputRegister(), fp);
856 case kArchParentFramePointer:
860 __ mov(
i.OutputRegister(), fp);
863 case kArchTruncateDoubleToI:
867 case kArchStoreWithWriteBarrier:
868 case kArchAtomicStoreWithWriteBarrier: {
876 scratch0, scratch1,
mode,
879 if (arch_opcode == kArchStoreWithWriteBarrier) {
882 DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
888 __ JumpIfSmi(value, ool->exit());
890 __ CheckPageFlag(
object, scratch0,
893 __ bind(ool->exit());
896 case kArchStoreIndirectWithWriteBarrier:
898 case kArchStackSlot: {
902 __ Daddu(
i.OutputRegister(), base_reg, Operand(
offset.offset()));
912 case kIeee754Float64Acos:
915 case kIeee754Float64Acosh:
918 case kIeee754Float64Asin:
921 case kIeee754Float64Asinh:
924 case kIeee754Float64Atan:
927 case kIeee754Float64Atanh:
930 case kIeee754Float64Atan2:
933 case kIeee754Float64Cos:
936 case kIeee754Float64Cosh:
939 case kIeee754Float64Cbrt:
942 case kIeee754Float64Exp:
945 case kIeee754Float64Expm1:
948 case kIeee754Float64Log:
951 case kIeee754Float64Log1p:
954 case kIeee754Float64Log2:
957 case kIeee754Float64Log10:
960 case kIeee754Float64Pow:
963 case kIeee754Float64Sin:
966 case kIeee754Float64Sinh:
969 case kIeee754Float64Tan:
972 case kIeee754Float64Tanh:
976 __ Addu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
979 __ Daddu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
982 __ DaddOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
986 __ Subu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
989 __ Dsubu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
992 __ DsubOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
996 __ Mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
999 __ MulOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
1002 case kMips64DMulOvf:
1003 __ DMulOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
1006 case kMips64MulHigh:
1007 __ Mulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1009 case kMips64MulHighU:
1010 __ Mulhu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1012 case kMips64DMulHigh:
1013 __ Dmulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1015 case kMips64DMulHighU:
1016 __ Dmulhu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1019 __ Div(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1021 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1023 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1027 __ Divu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1029 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1031 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1035 __ Mod(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1038 __ Modu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1041 __ Dmul(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1044 __ Ddiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1046 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1048 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1052 __ Ddivu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1054 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1056 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1060 __ Dmod(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1063 __ Dmodu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1067 __ Dlsa(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1072 __ Lsa(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1076 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1079 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1082 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1085 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1088 if (
instr->InputAt(1)->IsRegister()) {
1089 __ Nor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1092 __ Nor(
i.OutputRegister(),
i.InputRegister(0), zero_reg);
1096 if (
instr->InputAt(1)->IsRegister()) {
1097 __ Nor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1100 __ Nor(
i.OutputRegister(),
i.InputRegister(0), zero_reg);
1104 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1107 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1108 __ sll(
i.OutputRegister(),
i.OutputRegister(), 0x0);
1111 __ Clz(
i.OutputRegister(),
i.InputRegister(0));
1114 __ dclz(
i.OutputRegister(),
i.InputRegister(0));
1126 case kMips64Popcnt: {
1129 __ Popcnt(dst, src);
1131 case kMips64Dpopcnt: {
1134 __ Dpopcnt(dst, src);
1137 if (
instr->InputAt(1)->IsRegister()) {
1138 __ sllv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1140 int64_t imm =
i.InputOperand(1).immediate();
1141 __ sll(
i.OutputRegister(),
i.InputRegister(0),
1146 if (
instr->InputAt(1)->IsRegister()) {
1147 __ srlv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1149 int64_t imm =
i.InputOperand(1).immediate();
1150 __ srl(
i.OutputRegister(),
i.InputRegister(0),
1155 if (
instr->InputAt(1)->IsRegister()) {
1156 __ srav(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1158 int64_t imm =
i.InputOperand(1).immediate();
1159 __ sra(
i.OutputRegister(),
i.InputRegister(0),
1164 __ Ext(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1168 if (
instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1169 __ Ins(
i.OutputRegister(), zero_reg,
i.InputInt8(1),
i.InputInt8(2));
1171 __ Ins(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1176 __ Dext(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1181 if (
instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1182 __ Dins(
i.OutputRegister(), zero_reg,
i.InputInt8(1),
i.InputInt8(2));
1184 __ Dins(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1189 if (
instr->InputAt(1)->IsRegister()) {
1190 __ dsllv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1192 int64_t imm =
i.InputOperand(1).immediate();
1194 __ dsll(
i.OutputRegister(),
i.InputRegister(0),
1197 __ dsll32(
i.OutputRegister(),
i.InputRegister(0),
1203 if (
instr->InputAt(1)->IsRegister()) {
1204 __ dsrlv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1206 int64_t imm =
i.InputOperand(1).immediate();
1208 __ dsrl(
i.OutputRegister(),
i.InputRegister(0),
1211 __ dsrl32(
i.OutputRegister(),
i.InputRegister(0),
1217 if (
instr->InputAt(1)->IsRegister()) {
1218 __ dsrav(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1220 int64_t imm =
i.InputOperand(1).immediate();
1222 __ dsra(
i.OutputRegister(),
i.InputRegister(0), imm);
1224 __ dsra32(
i.OutputRegister(),
i.InputRegister(0), imm - 32);
1229 __ Ror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1232 __ Dror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1245 __ mov(
i.OutputRegister(),
i.InputRegister(0));
1247 __ li(
i.OutputRegister(),
i.InputOperand(0));
1252 FPURegister left =
i.InputOrZeroSingleRegister(0);
1253 FPURegister right =
i.InputOrZeroSingleRegister(1);
1256 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1259 !
__ IsDoubleZeroRegSet()) {
1263 __ CompareF32(
cc, left, right);
1267 __ add_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1268 i.InputDoubleRegister(1));
1271 __ sub_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1272 i.InputDoubleRegister(1));
1276 __ mul_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1277 i.InputDoubleRegister(1));
1280 __ div_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1281 i.InputDoubleRegister(1));
1285 __ abs_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1293 __ Neg_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1295 case kMips64SqrtS: {
1296 __ sqrt_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1300 __ max_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1301 i.InputDoubleRegister(1));
1304 __ min_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1305 i.InputDoubleRegister(1));
1308 FPURegister left =
i.InputOrZeroDoubleRegister(0);
1309 FPURegister right =
i.InputOrZeroDoubleRegister(1);
1312 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1314 !
__ IsDoubleZeroRegSet()) {
1317 __ CompareF64(
cc, left, right);
1321 __ add_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1322 i.InputDoubleRegister(1));
1325 __ sub_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1326 i.InputDoubleRegister(1));
1330 __ mul_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1331 i.InputDoubleRegister(1));
1334 __ div_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1335 i.InputDoubleRegister(1));
1342 __ MovToFloatParameters(
i.InputDoubleRegister(0),
1343 i.InputDoubleRegister(1));
1344 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1346 __ MovFromFloatResult(
i.OutputDoubleRegister());
1351 __ abs_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1359 __ Neg_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1361 case kMips64SqrtD: {
1362 __ sqrt_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1366 __ max_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1367 i.InputDoubleRegister(1));
1370 __ min_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1371 i.InputDoubleRegister(1));
1373 case kMips64Float64RoundDown: {
1374 __ Floor_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1377 case kMips64Float32RoundDown: {
1378 __ Floor_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1381 case kMips64Float64RoundTruncate: {
1382 __ Trunc_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1385 case kMips64Float32RoundTruncate: {
1386 __ Trunc_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1389 case kMips64Float64RoundUp: {
1390 __ Ceil_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1393 case kMips64Float32RoundUp: {
1394 __ Ceil_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1397 case kMips64Float64RoundTiesEven: {
1398 __ Round_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1401 case kMips64Float32RoundTiesEven: {
1402 __ Round_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1405 case kMips64Float32Max: {
1406 FPURegister dst =
i.OutputSingleRegister();
1407 FPURegister src1 =
i.InputSingleRegister(0);
1408 FPURegister src2 =
i.InputSingleRegister(1);
1409 auto ool =
zone()->
New<OutOfLineFloat32Max>(
this, dst, src1, src2);
1410 __ Float32Max(dst, src1, src2, ool->entry());
1411 __ bind(ool->exit());
1414 case kMips64Float64Max: {
1415 FPURegister dst =
i.OutputDoubleRegister();
1416 FPURegister src1 =
i.InputDoubleRegister(0);
1417 FPURegister src2 =
i.InputDoubleRegister(1);
1418 auto ool =
zone()->
New<OutOfLineFloat64Max>(
this, dst, src1, src2);
1419 __ Float64Max(dst, src1, src2, ool->entry());
1420 __ bind(ool->exit());
1423 case kMips64Float32Min: {
1424 FPURegister dst =
i.OutputSingleRegister();
1425 FPURegister src1 =
i.InputSingleRegister(0);
1426 FPURegister src2 =
i.InputSingleRegister(1);
1427 auto ool =
zone()->
New<OutOfLineFloat32Min>(
this, dst, src1, src2);
1428 __ Float32Min(dst, src1, src2, ool->entry());
1429 __ bind(ool->exit());
1432 case kMips64Float64Min: {
1433 FPURegister dst =
i.OutputDoubleRegister();
1434 FPURegister src1 =
i.InputDoubleRegister(0);
1435 FPURegister src2 =
i.InputDoubleRegister(1);
1436 auto ool =
zone()->
New<OutOfLineFloat64Min>(
this, dst, src1, src2);
1437 __ Float64Min(dst, src1, src2, ool->entry());
1438 __ bind(ool->exit());
1441 case kMips64Float64SilenceNaN:
1442 __ FPUCanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1445 __ cvt_s_d(
i.OutputSingleRegister(),
i.InputDoubleRegister(0));
1448 __ cvt_d_s(
i.OutputDoubleRegister(),
i.InputSingleRegister(0));
1450 case kMips64CvtDW: {
1452 __ mtc1(
i.InputRegister(0), scratch);
1453 __ cvt_d_w(
i.OutputDoubleRegister(), scratch);
1456 case kMips64CvtSW: {
1458 __ mtc1(
i.InputRegister(0), scratch);
1459 __ cvt_s_w(
i.OutputDoubleRegister(), scratch);
1462 case kMips64CvtSUw: {
1463 __ Cvt_s_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1466 case kMips64CvtSL: {
1468 __ dmtc1(
i.InputRegister(0), scratch);
1469 __ cvt_s_l(
i.OutputDoubleRegister(), scratch);
1472 case kMips64CvtDL: {
1474 __ dmtc1(
i.InputRegister(0), scratch);
1475 __ cvt_d_l(
i.OutputDoubleRegister(), scratch);
1478 case kMips64CvtDUw: {
1479 __ Cvt_d_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1482 case kMips64CvtDUl: {
1483 __ Cvt_d_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1486 case kMips64CvtSUl: {
1487 __ Cvt_s_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1490 case kMips64FloorWD: {
1492 __ floor_w_d(scratch,
i.InputDoubleRegister(0));
1493 __ mfc1(
i.OutputRegister(), scratch);
1496 case kMips64CeilWD: {
1498 __ ceil_w_d(scratch,
i.InputDoubleRegister(0));
1499 __ mfc1(
i.OutputRegister(), scratch);
1502 case kMips64RoundWD: {
1504 __ round_w_d(scratch,
i.InputDoubleRegister(0));
1505 __ mfc1(
i.OutputRegister(), scratch);
1508 case kMips64TruncWD: {
1511 __ trunc_w_d(scratch,
i.InputDoubleRegister(0));
1512 __ mfc1(
i.OutputRegister(), scratch);
1513 if (
instr->OutputCount() > 1) {
1515 __ li(
i.OutputRegister(1), 1);
1516 __ Move(scratch,
static_cast<double>(INT32_MIN));
1517 __ CompareF64(
LE, scratch,
i.InputDoubleRegister(0));
1518 __ LoadZeroIfNotFPUCondition(
i.OutputRegister(1));
1519 __ Move(scratch,
static_cast<double>(INT32_MAX) + 1);
1520 __ CompareF64(
LE, scratch,
i.InputDoubleRegister(0));
1521 __ LoadZeroIfFPUCondition(
i.OutputRegister(1));
1525 case kMips64FloorWS: {
1527 __ floor_w_s(scratch,
i.InputDoubleRegister(0));
1528 __ mfc1(
i.OutputRegister(), scratch);
1531 case kMips64CeilWS: {
1533 __ ceil_w_s(scratch,
i.InputDoubleRegister(0));
1534 __ mfc1(
i.OutputRegister(), scratch);
1537 case kMips64RoundWS: {
1539 __ round_w_s(scratch,
i.InputDoubleRegister(0));
1540 __ mfc1(
i.OutputRegister(), scratch);
1543 case kMips64TruncWS: {
1546 __ trunc_w_s(scratch,
i.InputDoubleRegister(0));
1547 __ mfc1(
i.OutputRegister(), scratch);
1548 if (set_overflow_to_min_i32) {
1557 case kMips64TruncLS: {
1561 bool load_status =
instr->OutputCount() > 1;
1563 __ trunc_l_s(scratch,
i.InputDoubleRegister(0));
1564 __ dmfc1(
i.OutputRegister(), scratch);
1576 case kMips64TruncLD: {
1581 bool load_status =
instr->OutputCount() > 1;
1584 __ trunc_l_d(scratch,
i.InputDoubleRegister(0));
1585 __ dmfc1(
i.OutputRegister(0), scratch);
1595 if (set_overflow_to_min_i64) {
1604 case kMips64TruncUwD: {
1606 __ Trunc_uw_d(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1607 if (
instr->OutputCount() > 1) {
1608 __ li(
i.OutputRegister(1), 1);
1609 __ Move(scratch,
static_cast<double>(-1.0));
1610 __ CompareF64(
LT, scratch,
i.InputDoubleRegister(0));
1611 __ LoadZeroIfNotFPUCondition(
i.OutputRegister(1));
1612 __ Move(scratch,
static_cast<double>(UINT32_MAX) + 1);
1613 __ CompareF64(
LE, scratch,
i.InputDoubleRegister(0));
1614 __ LoadZeroIfFPUCondition(
i.OutputRegister(1));
1618 case kMips64TruncUwS: {
1621 __ Trunc_uw_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1622 if (set_overflow_to_min_i32) {
1630 case kMips64TruncUlS: {
1633 __ Trunc_ul_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch,
1637 case kMips64TruncUlD: {
1640 __ Trunc_ul_d(
i.OutputRegister(0),
i.InputDoubleRegister(0), scratch,
1644 case kMips64BitcastDL:
1645 __ dmfc1(
i.OutputRegister(),
i.InputDoubleRegister(0));
1647 case kMips64BitcastLD:
1648 __ dmtc1(
i.InputRegister(0),
i.OutputDoubleRegister());
1650 case kMips64Float64ExtractLowWord32:
1651 __ FmoveLow(
i.OutputRegister(),
i.InputDoubleRegister(0));
1653 case kMips64Float64ExtractHighWord32:
1654 __ FmoveHigh(
i.OutputRegister(),
i.InputDoubleRegister(0));
1656 case kMips64Float64FromWord32Pair:
1657 __ Move(
i.OutputDoubleRegister(),
i.InputRegister(1),
i.InputRegister(0));
1659 case kMips64Float64InsertLowWord32:
1660 __ FmoveLow(
i.OutputDoubleRegister(),
i.InputRegister(1));
1662 case kMips64Float64InsertHighWord32:
1663 __ FmoveHigh(
i.OutputDoubleRegister(),
i.InputRegister(1));
1668 __ seb(
i.OutputRegister(),
i.InputRegister(0));
1671 __ seh(
i.OutputRegister(),
i.InputRegister(0));
1674 __ Lbu(
i.OutputRegister(),
i.MemoryOperand());
1677 __ Lb(
i.OutputRegister(),
i.MemoryOperand());
1682 __ Sb(
i.InputOrZeroRegister(index), mem);
1686 __ Lhu(
i.OutputRegister(),
i.MemoryOperand());
1689 __ Ulhu(
i.OutputRegister(),
i.MemoryOperand());
1692 __ Lh(
i.OutputRegister(),
i.MemoryOperand());
1695 __ Ulh(
i.OutputRegister(),
i.MemoryOperand());
1700 __ Sh(
i.InputOrZeroRegister(index), mem);
1710 __ Lw(
i.OutputRegister(),
i.MemoryOperand());
1713 __ Ulw(
i.OutputRegister(),
i.MemoryOperand());
1716 __ Lwu(
i.OutputRegister(),
i.MemoryOperand());
1719 __ Ulwu(
i.OutputRegister(),
i.MemoryOperand());
1722 __ Ld(
i.OutputRegister(),
i.MemoryOperand());
1725 __ Uld(
i.OutputRegister(),
i.MemoryOperand());
1730 __ Sw(
i.InputOrZeroRegister(index), mem);
1736 __ Usw(
i.InputOrZeroRegister(index), mem);
1742 __ Sd(
i.InputOrZeroRegister(index), mem);
1748 __ Usd(
i.InputOrZeroRegister(index), mem);
1752 __ Lwc1(
i.OutputSingleRegister(),
i.MemoryOperand());
1755 case kMips64Ulwc1: {
1762 FPURegister ft =
i.InputOrZeroSingleRegister(index);
1766 __ Swc1(ft, operand);
1769 case kMips64Uswc1: {
1772 FPURegister ft =
i.InputOrZeroSingleRegister(index);
1780 __ Ldc1(
i.OutputDoubleRegister(),
i.MemoryOperand());
1788 FPURegister ft =
i.InputOrZeroDoubleRegister(index);
1792 __ Sdc1(ft, operand);
1795 case kMips64Usdc1: {
1798 FPURegister ft =
i.InputOrZeroDoubleRegister(index);
1810 if (
instr->InputAt(0)->IsFPRegister()) {
1820 int reverse_slot =
i.InputInt32(0);
1823 if (
instr->OutputAt(0)->IsFPRegister()) {
1829 i.OutputSingleRegister(0),
1840 case kMips64StackClaim: {
1841 __ Dsubu(sp, sp, Operand(
i.InputInt32(0)));
1846 case kMips64StoreToStackSlot: {
1847 if (
instr->InputAt(0)->IsFPRegister()) {
1848 if (
instr->InputAt(0)->IsSimd128Register()) {
1849 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1850 __ st_b(
i.InputSimd128Register(0),
MemOperand(sp,
i.InputInt32(1)));
1852 __ Sdc1(
i.InputDoubleRegister(0),
MemOperand(sp,
i.InputInt32(1)));
1859 case kMips64ByteSwap64: {
1860 __ ByteSwapSigned(
i.OutputRegister(0),
i.InputRegister(0), 8);
1863 case kMips64ByteSwap32: {
1864 __ ByteSwapSigned(
i.OutputRegister(0),
i.InputRegister(0), 4);
1867 case kMips64S128LoadSplat: {
1868 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1870 __ LoadSplat(sz,
i.OutputSimd128Register(),
i.MemoryOperand());
1873 case kMips64S128Load8x8S: {
1874 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1879 __ clti_s_b(scratch, dst, 0);
1880 __ ilvr_b(dst, scratch, dst);
1883 case kMips64S128Load8x8U: {
1884 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1892 case kMips64S128Load16x4S: {
1893 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1898 __ clti_s_h(scratch, dst, 0);
1899 __ ilvr_h(dst, scratch, dst);
1902 case kMips64S128Load16x4U: {
1903 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1911 case kMips64S128Load32x2S: {
1912 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1917 __ clti_s_w(scratch, dst, 0);
1918 __ ilvr_w(dst, scratch, dst);
1921 case kMips64S128Load32x2U: {
1922 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1930 case kMips64S128Load32Zero: {
1931 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1933 __ xor_v(dst, dst, dst);
1938 case kMips64S128Load64Zero: {
1939 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1941 __ xor_v(dst, dst, dst);
1946 case kMips64S128LoadLane: {
1947 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1951 __ LoadLane(sz, dst,
i.InputUint8(1),
i.MemoryOperand(2));
1954 case kMips64S128StoreLane: {
1955 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
1958 __ StoreLane(sz, src,
i.InputUint8(1),
i.MemoryOperand(2));
1961 case kAtomicLoadInt8:
1965 case kAtomicLoadUint8:
1968 case kAtomicLoadInt16:
1972 case kAtomicLoadUint16:
1975 case kAtomicLoadWord32:
1981 case kMips64Word64AtomicLoadUint64:
1984 case kAtomicStoreWord8:
1987 case kAtomicStoreWord16:
1990 case kAtomicStoreWord32:
1993 case kMips64StoreCompressTagged:
1994 case kMips64Word64AtomicStoreWord64:
1997 case kAtomicExchangeInt8:
2001 case kAtomicExchangeUint8:
2011 case kAtomicExchangeInt16:
2015 case kAtomicExchangeUint16:
2025 case kAtomicExchangeWord32:
2035 case kMips64Word64AtomicExchangeUint64:
2038 case kAtomicCompareExchangeInt8:
2042 case kAtomicCompareExchangeUint8:
2052 case kAtomicCompareExchangeInt16:
2056 case kAtomicCompareExchangeUint16:
2066 case kAtomicCompareExchangeWord32:
2069 __ sll(
i.InputRegister(2),
i.InputRegister(2), 0);
2077 case kMips64Word64AtomicCompareExchangeUint64:
2080#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
2081 case kAtomic##op##Int8: \
2082 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2083 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
2085 case kAtomic##op##Uint8: \
2086 switch (AtomicWidthField::decode(opcode)) { \
2087 case AtomicWidth::kWord32: \
2088 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
2090 case AtomicWidth::kWord64: \
2091 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
2095 case kAtomic##op##Int16: \
2096 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2097 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
2099 case kAtomic##op##Uint16: \
2100 switch (AtomicWidthField::decode(opcode)) { \
2101 case AtomicWidth::kWord32: \
2102 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
2104 case AtomicWidth::kWord64: \
2105 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
2109 case kAtomic##op##Word32: \
2110 switch (AtomicWidthField::decode(opcode)) { \
2111 case AtomicWidth::kWord32: \
2112 ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
2114 case AtomicWidth::kWord64: \
2115 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
2119 case kMips64Word64Atomic##op##Uint64: \
2120 ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
2127#undef ATOMIC_BINOP_CASE
2128 case kMips64AssertEqual:
2130 i.InputRegister(0), Operand(
i.InputRegister(1)));
2132 case kMips64S128Const: {
2133 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2135 uint64_t imm1 =
make_uint64(
i.InputUint32(1),
i.InputUint32(0));
2136 uint64_t imm2 =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
2143 case kMips64S128Zero: {
2144 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2146 __ xor_v(dst, dst, dst);
2149 case kMips64S128AllOnes: {
2150 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2152 __ ceq_d(dst, dst, dst);
2155 case kMips64I32x4Splat: {
2156 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2157 __ fill_w(
i.OutputSimd128Register(),
i.InputRegister(0));
2160 case kMips64I32x4ExtractLane: {
2161 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2162 __ copy_s_w(
i.OutputRegister(),
i.InputSimd128Register(0),
2166 case kMips64I32x4ReplaceLane: {
2167 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2171 __ move_v(dst, src);
2173 __ insert_w(dst,
i.InputInt8(1),
i.InputRegister(2));
2176 case kMips64I32x4Add: {
2177 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2178 __ addv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2179 i.InputSimd128Register(1));
2182 case kMips64I32x4Sub: {
2183 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2184 __ subv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2185 i.InputSimd128Register(1));
2188 case kMips64F64x2Abs: {
2189 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2190 __ bclri_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 63);
2193 case kMips64F64x2Neg: {
2194 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2195 __ bnegi_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 63);
2198 case kMips64F64x2Sqrt: {
2199 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2200 __ fsqrt_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2203 case kMips64F64x2Add: {
2204 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2208 case kMips64F64x2Sub: {
2209 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2213 case kMips64F64x2Mul: {
2214 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2218 case kMips64F64x2Div: {
2219 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2223 case kMips64F64x2Min: {
2224 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2233 __ fseq_d(scratch0, src0, src1);
2234 __ bsel_v(scratch0, src1, src0);
2235 __ or_v(scratch1, scratch0, src1);
2237 __ fseq_d(scratch0, src0, src0);
2238 __ bsel_v(scratch0, src0, scratch1);
2240 __ fslt_d(scratch1, src0, scratch0);
2241 __ bsel_v(scratch1, scratch0, src0);
2243 __ fmin_d(dst, scratch1, scratch1);
2246 case kMips64F64x2Max: {
2247 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2256 __ fseq_d(scratch0, src0, src1);
2257 __ bsel_v(scratch0, src1, src0);
2258 __ and_v(scratch1, scratch0, src1);
2260 __ fseq_d(scratch0, src0, src0);
2261 __ bsel_v(scratch0, src0, scratch1);
2263 __ fslt_d(scratch1, scratch0, src0);
2264 __ bsel_v(scratch1, scratch0, src0);
2266 __ fmax_d(dst, scratch1, scratch1);
2269 case kMips64F64x2Eq: {
2270 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2271 __ fceq_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2272 i.InputSimd128Register(1));
2275 case kMips64F64x2Ne: {
2276 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2277 __ fcune_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2278 i.InputSimd128Register(1));
2281 case kMips64F64x2Lt: {
2282 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2283 __ fclt_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2284 i.InputSimd128Register(1));
2287 case kMips64F64x2Le: {
2288 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2289 __ fcle_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2290 i.InputSimd128Register(1));
2293 case kMips64F64x2Splat: {
2294 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2299 case kMips64F64x2ExtractLane: {
2300 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2305 case kMips64F64x2ReplaceLane: {
2306 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2311 __ move_v(dst, src);
2316 case kMips64I64x2Splat: {
2317 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2318 __ fill_d(
i.OutputSimd128Register(),
i.InputRegister(0));
2321 case kMips64I64x2ExtractLane: {
2322 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2323 __ copy_s_d(
i.OutputRegister(),
i.InputSimd128Register(0),
2327 case kMips64F64x2Pmin: {
2328 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2333 __ fclt_d(dst, rhs, lhs);
2334 __ bsel_v(dst, lhs, rhs);
2337 case kMips64F64x2Pmax: {
2338 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2343 __ fclt_d(dst, lhs, rhs);
2344 __ bsel_v(dst, lhs, rhs);
2347 case kMips64F64x2Ceil: {
2348 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2349 __ MSARoundD(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2353 case kMips64F64x2Floor: {
2354 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2355 __ MSARoundD(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2359 case kMips64F64x2Trunc: {
2360 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2361 __ MSARoundD(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2365 case kMips64F64x2NearestInt: {
2366 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2367 __ MSARoundD(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2371 case kMips64F64x2ConvertLowI32x4S: {
2372 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2380 case kMips64F64x2ConvertLowI32x4U: {
2381 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2387 case kMips64F64x2PromoteLowF32x4: {
2388 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2389 __ fexupr_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2392 case kMips64I64x2ReplaceLane: {
2393 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2397 __ move_v(dst, src);
2399 __ insert_d(dst,
i.InputInt8(1),
i.InputRegister(2));
2402 case kMips64I64x2Add: {
2403 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2404 __ addv_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2405 i.InputSimd128Register(1));
2408 case kMips64I64x2Sub: {
2409 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2410 __ subv_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2411 i.InputSimd128Register(1));
2414 case kMips64I64x2Mul: {
2415 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2416 __ mulv_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2417 i.InputSimd128Register(1));
2420 case kMips64I64x2Neg: {
2421 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2424 i.InputSimd128Register(0));
2427 case kMips64I64x2Shl: {
2428 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2429 if (
instr->InputAt(1)->IsRegister()) {
2431 __ sll_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2434 __ slli_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2439 case kMips64I64x2ShrS: {
2440 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2441 if (
instr->InputAt(1)->IsRegister()) {
2443 __ sra_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2446 __ srai_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2451 case kMips64I64x2ShrU: {
2452 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2453 if (
instr->InputAt(1)->IsRegister()) {
2455 __ srl_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2458 __ srli_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2463 case kMips64I64x2BitMask: {
2464 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2469 __ srli_d(scratch0, src, 63);
2470 __ shf_w(scratch1, scratch0, 0x02);
2471 __ slli_d(scratch1, scratch1, 1);
2472 __ or_v(scratch0, scratch0, scratch1);
2473 __ copy_u_b(dst, scratch0, 0);
2476 case kMips64I64x2Eq: {
2477 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2478 __ ceq_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2479 i.InputSimd128Register(1));
2482 case kMips64I64x2Ne: {
2483 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2484 __ ceq_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2485 i.InputSimd128Register(1));
2486 __ nor_v(
i.OutputSimd128Register(),
i.OutputSimd128Register(),
2487 i.OutputSimd128Register());
2490 case kMips64I64x2GtS: {
2491 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2492 __ clt_s_d(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2493 i.InputSimd128Register(0));
2496 case kMips64I64x2GeS: {
2497 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2498 __ cle_s_d(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2499 i.InputSimd128Register(0));
2502 case kMips64I64x2Abs: {
2503 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2505 __ add_a_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2509 case kMips64I64x2SConvertI32x4Low: {
2510 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2515 __ srai_d(dst, dst, 32);
2518 case kMips64I64x2SConvertI32x4High: {
2519 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2524 __ srai_d(dst, dst, 32);
2527 case kMips64I64x2UConvertI32x4Low: {
2528 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2531 i.InputSimd128Register(0));
2534 case kMips64I64x2UConvertI32x4High: {
2535 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2538 i.InputSimd128Register(0));
2541 case kMips64ExtMulLow: {
2543 __ ExtMulLow(dt,
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2544 i.InputSimd128Register(1));
2547 case kMips64ExtMulHigh: {
2549 __ ExtMulHigh(dt,
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2550 i.InputSimd128Register(1));
2553 case kMips64ExtAddPairwise: {
2555 __ ExtAddPairwise(dt,
i.OutputSimd128Register(),
2556 i.InputSimd128Register(0));
2559 case kMips64F32x4Splat: {
2560 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2565 case kMips64F32x4ExtractLane: {
2566 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2571 case kMips64F32x4ReplaceLane: {
2572 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2577 __ move_v(dst, src);
2582 case kMips64F32x4SConvertI32x4: {
2583 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2584 __ ffint_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2587 case kMips64F32x4UConvertI32x4: {
2588 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2589 __ ffint_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2592 case kMips64I32x4Mul: {
2593 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2594 __ mulv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2595 i.InputSimd128Register(1));
2598 case kMips64I32x4MaxS: {
2599 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2600 __ max_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2601 i.InputSimd128Register(1));
2604 case kMips64I32x4MinS: {
2605 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2606 __ min_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2607 i.InputSimd128Register(1));
2610 case kMips64I32x4Eq: {
2611 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2612 __ ceq_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2613 i.InputSimd128Register(1));
2616 case kMips64I32x4Ne: {
2617 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2619 __ ceq_w(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
2620 __ nor_v(dst, dst, dst);
2623 case kMips64I32x4Shl: {
2624 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2625 if (
instr->InputAt(1)->IsRegister()) {
2627 __ sll_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2630 __ slli_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2635 case kMips64I32x4ShrS: {
2636 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2637 if (
instr->InputAt(1)->IsRegister()) {
2639 __ sra_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2642 __ srai_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2647 case kMips64I32x4ShrU: {
2648 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2649 if (
instr->InputAt(1)->IsRegister()) {
2651 __ srl_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2654 __ srli_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2659 case kMips64I32x4MaxU: {
2660 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2661 __ max_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2662 i.InputSimd128Register(1));
2665 case kMips64I32x4MinU: {
2666 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2667 __ min_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2668 i.InputSimd128Register(1));
2671 case kMips64S128Select: {
2672 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2673 DCHECK(
i.OutputSimd128Register() ==
i.InputSimd128Register(0));
2674 __ bsel_v(
i.OutputSimd128Register(),
i.InputSimd128Register(2),
2675 i.InputSimd128Register(1));
2678 case kMips64S128AndNot: {
2679 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2681 dst =
i.OutputSimd128Register(),
2682 src0 =
i.InputSimd128Register(0),
2683 src1 =
i.InputSimd128Register(1);
2684 __ nor_v(scratch, src1, src1);
2685 __ and_v(dst, scratch, src0);
2688 case kMips64F32x4Abs: {
2689 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2690 __ bclri_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 31);
2693 case kMips64F32x4Neg: {
2694 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2695 __ bnegi_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 31);
2698 case kMips64F32x4Add: {
2699 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2700 __ fadd_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2701 i.InputSimd128Register(1));
2704 case kMips64F32x4Sub: {
2705 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2706 __ fsub_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2707 i.InputSimd128Register(1));
2710 case kMips64F32x4Mul: {
2711 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2712 __ fmul_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2713 i.InputSimd128Register(1));
2716 case kMips64F32x4Div: {
2717 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2718 __ fdiv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2719 i.InputSimd128Register(1));
2722 case kMips64F32x4Max: {
2723 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2732 __ fseq_w(scratch0, src0, src1);
2733 __ bsel_v(scratch0, src1, src0);
2734 __ and_v(scratch1, scratch0, src1);
2736 __ fseq_w(scratch0, src0, src0);
2737 __ bsel_v(scratch0, src0, scratch1);
2739 __ fslt_w(scratch1, scratch0, src0);
2740 __ bsel_v(scratch1, scratch0, src0);
2742 __ fmax_w(dst, scratch1, scratch1);
2745 case kMips64F32x4Min: {
2746 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2755 __ fseq_w(scratch0, src0, src1);
2756 __ bsel_v(scratch0, src1, src0);
2757 __ or_v(scratch1, scratch0, src1);
2759 __ fseq_w(scratch0, src0, src0);
2760 __ bsel_v(scratch0, src0, scratch1);
2762 __ fslt_w(scratch1, src0, scratch0);
2763 __ bsel_v(scratch1, scratch0, src0);
2765 __ fmin_w(dst, scratch1, scratch1);
2768 case kMips64F32x4Eq: {
2769 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2770 __ fceq_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2771 i.InputSimd128Register(1));
2774 case kMips64F32x4Ne: {
2775 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2776 __ fcune_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2777 i.InputSimd128Register(1));
2780 case kMips64F32x4Lt: {
2781 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2782 __ fclt_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2783 i.InputSimd128Register(1));
2786 case kMips64F32x4Le: {
2787 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2788 __ fcle_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2789 i.InputSimd128Register(1));
2792 case kMips64F32x4Pmin: {
2793 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2798 __ fclt_w(dst, rhs, lhs);
2799 __ bsel_v(dst, lhs, rhs);
2802 case kMips64F32x4Pmax: {
2803 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2808 __ fclt_w(dst, lhs, rhs);
2809 __ bsel_v(dst, lhs, rhs);
2812 case kMips64F32x4Ceil: {
2813 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2814 __ MSARoundW(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2818 case kMips64F32x4Floor: {
2819 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2820 __ MSARoundW(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2824 case kMips64F32x4Trunc: {
2825 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2826 __ MSARoundW(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2830 case kMips64F32x4NearestInt: {
2831 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2832 __ MSARoundW(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2836 case kMips64F32x4DemoteF64x2Zero: {
2837 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2840 i.InputSimd128Register(0));
2843 case kMips64I32x4SConvertF32x4: {
2844 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2845 __ ftrunc_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2848 case kMips64I32x4UConvertF32x4: {
2849 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2850 __ ftrunc_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2853 case kMips64F32x4Sqrt: {
2854 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2855 __ fsqrt_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2858 case kMips64I32x4Neg: {
2859 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2862 i.InputSimd128Register(0));
2865 case kMips64I32x4GtS: {
2866 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2867 __ clt_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2868 i.InputSimd128Register(0));
2871 case kMips64I32x4GeS: {
2872 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2873 __ cle_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2874 i.InputSimd128Register(0));
2877 case kMips64I32x4GtU: {
2878 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2879 __ clt_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2880 i.InputSimd128Register(0));
2883 case kMips64I32x4GeU: {
2884 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2885 __ cle_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2886 i.InputSimd128Register(0));
2889 case kMips64I32x4Abs: {
2890 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2892 __ asub_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2896 case kMips64I32x4BitMask: {
2897 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2902 __ srli_w(scratch0, src, 31);
2903 __ srli_d(scratch1, scratch0, 31);
2904 __ or_v(scratch0, scratch0, scratch1);
2905 __ shf_w(scratch1, scratch0, 0x0E);
2906 __ slli_d(scratch1, scratch1, 2);
2907 __ or_v(scratch0, scratch0, scratch1);
2908 __ copy_u_b(dst, scratch0, 0);
2911 case kMips64I32x4DotI16x8S: {
2912 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2913 __ dotp_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2914 i.InputSimd128Register(1));
2917 case kMips64I32x4TruncSatF64x2SZero: {
2918 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2926 case kMips64I32x4TruncSatF64x2UZero: {
2927 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2935 case kMips64I16x8Splat: {
2936 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2937 __ fill_h(
i.OutputSimd128Register(),
i.InputRegister(0));
2940 case kMips64I16x8ExtractLaneU: {
2941 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2942 __ copy_u_h(
i.OutputRegister(),
i.InputSimd128Register(0),
2946 case kMips64I16x8ExtractLaneS: {
2947 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2948 __ copy_s_h(
i.OutputRegister(),
i.InputSimd128Register(0),
2952 case kMips64I16x8ReplaceLane: {
2953 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2957 __ move_v(dst, src);
2959 __ insert_h(dst,
i.InputInt8(1),
i.InputRegister(2));
2962 case kMips64I16x8Neg: {
2963 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2966 i.InputSimd128Register(0));
2969 case kMips64I16x8Shl: {
2970 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2971 if (
instr->InputAt(1)->IsRegister()) {
2973 __ sll_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2976 __ slli_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2981 case kMips64I16x8ShrS: {
2982 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2983 if (
instr->InputAt(1)->IsRegister()) {
2985 __ sra_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2988 __ srai_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2993 case kMips64I16x8ShrU: {
2994 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
2995 if (
instr->InputAt(1)->IsRegister()) {
2997 __ srl_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3000 __ srli_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3005 case kMips64I16x8Add: {
3006 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3007 __ addv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3008 i.InputSimd128Register(1));
3011 case kMips64I16x8AddSatS: {
3012 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3013 __ adds_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3014 i.InputSimd128Register(1));
3017 case kMips64I16x8Sub: {
3018 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3019 __ subv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3020 i.InputSimd128Register(1));
3023 case kMips64I16x8SubSatS: {
3024 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3025 __ subs_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3026 i.InputSimd128Register(1));
3029 case kMips64I16x8Mul: {
3030 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3031 __ mulv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3032 i.InputSimd128Register(1));
3035 case kMips64I16x8MaxS: {
3036 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3037 __ max_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3038 i.InputSimd128Register(1));
3041 case kMips64I16x8MinS: {
3042 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3043 __ min_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3044 i.InputSimd128Register(1));
3047 case kMips64I16x8Eq: {
3048 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3049 __ ceq_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3050 i.InputSimd128Register(1));
3053 case kMips64I16x8Ne: {
3054 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3056 __ ceq_h(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
3057 __ nor_v(dst, dst, dst);
3060 case kMips64I16x8GtS: {
3061 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3062 __ clt_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3063 i.InputSimd128Register(0));
3066 case kMips64I16x8GeS: {
3067 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3068 __ cle_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3069 i.InputSimd128Register(0));
3072 case kMips64I16x8AddSatU: {
3073 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3074 __ adds_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3075 i.InputSimd128Register(1));
3078 case kMips64I16x8SubSatU: {
3079 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3080 __ subs_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3081 i.InputSimd128Register(1));
3084 case kMips64I16x8MaxU: {
3085 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3086 __ max_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3087 i.InputSimd128Register(1));
3090 case kMips64I16x8MinU: {
3091 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3092 __ min_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3093 i.InputSimd128Register(1));
3096 case kMips64I16x8GtU: {
3097 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3098 __ clt_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3099 i.InputSimd128Register(0));
3102 case kMips64I16x8GeU: {
3103 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3104 __ cle_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3105 i.InputSimd128Register(0));
3108 case kMips64I16x8RoundingAverageU: {
3109 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3110 __ aver_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3111 i.InputSimd128Register(0));
3114 case kMips64I16x8Abs: {
3115 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3117 __ asub_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3121 case kMips64I16x8BitMask: {
3122 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3127 __ srli_h(scratch0, src, 15);
3128 __ srli_w(scratch1, scratch0, 15);
3129 __ or_v(scratch0, scratch0, scratch1);
3130 __ srli_d(scratch1, scratch0, 30);
3131 __ or_v(scratch0, scratch0, scratch1);
3132 __ shf_w(scratch1, scratch0, 0x0E);
3133 __ slli_d(scratch1, scratch1, 4);
3134 __ or_v(scratch0, scratch0, scratch1);
3135 __ copy_u_b(dst, scratch0, 0);
3138 case kMips64I16x8Q15MulRSatS: {
3139 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3140 __ mulr_q_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3141 i.InputSimd128Register(1));
3144 case kMips64I8x16Splat: {
3145 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3146 __ fill_b(
i.OutputSimd128Register(),
i.InputRegister(0));
3149 case kMips64I8x16ExtractLaneU: {
3150 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3151 __ copy_u_b(
i.OutputRegister(),
i.InputSimd128Register(0),
3155 case kMips64I8x16ExtractLaneS: {
3156 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3157 __ copy_s_b(
i.OutputRegister(),
i.InputSimd128Register(0),
3161 case kMips64I8x16ReplaceLane: {
3162 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3166 __ move_v(dst, src);
3168 __ insert_b(dst,
i.InputInt8(1),
i.InputRegister(2));
3171 case kMips64I8x16Neg: {
3172 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3175 i.InputSimd128Register(0));
3178 case kMips64I8x16Shl: {
3179 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3180 if (
instr->InputAt(1)->IsRegister()) {
3182 __ sll_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3185 __ slli_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3190 case kMips64I8x16ShrS: {
3191 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3192 if (
instr->InputAt(1)->IsRegister()) {
3194 __ sra_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3197 __ srai_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3202 case kMips64I8x16Add: {
3203 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3204 __ addv_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3205 i.InputSimd128Register(1));
3208 case kMips64I8x16AddSatS: {
3209 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3210 __ adds_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3211 i.InputSimd128Register(1));
3214 case kMips64I8x16Sub: {
3215 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3216 __ subv_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3217 i.InputSimd128Register(1));
3220 case kMips64I8x16SubSatS: {
3221 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3222 __ subs_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3223 i.InputSimd128Register(1));
3226 case kMips64I8x16MaxS: {
3227 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3228 __ max_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3229 i.InputSimd128Register(1));
3232 case kMips64I8x16MinS: {
3233 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3234 __ min_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3235 i.InputSimd128Register(1));
3238 case kMips64I8x16Eq: {
3239 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3240 __ ceq_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3241 i.InputSimd128Register(1));
3244 case kMips64I8x16Ne: {
3245 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3247 __ ceq_b(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
3248 __ nor_v(dst, dst, dst);
3251 case kMips64I8x16GtS: {
3252 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3253 __ clt_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3254 i.InputSimd128Register(0));
3257 case kMips64I8x16GeS: {
3258 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3259 __ cle_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3260 i.InputSimd128Register(0));
3263 case kMips64I8x16ShrU: {
3264 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3265 if (
instr->InputAt(1)->IsRegister()) {
3267 __ srl_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3270 __ srli_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3275 case kMips64I8x16AddSatU: {
3276 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3277 __ adds_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3278 i.InputSimd128Register(1));
3281 case kMips64I8x16SubSatU: {
3282 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3283 __ subs_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3284 i.InputSimd128Register(1));
3287 case kMips64I8x16MaxU: {
3288 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3289 __ max_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3290 i.InputSimd128Register(1));
3293 case kMips64I8x16MinU: {
3294 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3295 __ min_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3296 i.InputSimd128Register(1));
3299 case kMips64I8x16GtU: {
3300 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3301 __ clt_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3302 i.InputSimd128Register(0));
3305 case kMips64I8x16GeU: {
3306 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3307 __ cle_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3308 i.InputSimd128Register(0));
3311 case kMips64I8x16RoundingAverageU: {
3312 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3313 __ aver_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3314 i.InputSimd128Register(0));
3317 case kMips64I8x16Abs: {
3318 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3320 __ asub_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3324 case kMips64I8x16Popcnt: {
3325 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3326 __ pcnt_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3329 case kMips64I8x16BitMask: {
3330 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3335 __ srli_b(scratch0, src, 7);
3336 __ srli_h(scratch1, scratch0, 7);
3337 __ or_v(scratch0, scratch0, scratch1);
3338 __ srli_w(scratch1, scratch0, 14);
3339 __ or_v(scratch0, scratch0, scratch1);
3340 __ srli_d(scratch1, scratch0, 28);
3341 __ or_v(scratch0, scratch0, scratch1);
3342 __ shf_w(scratch1, scratch0, 0x0E);
3343 __ ilvev_b(scratch0, scratch1, scratch0);
3344 __ copy_u_h(dst, scratch0, 0);
3347 case kMips64S128And: {
3348 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3349 __ and_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3350 i.InputSimd128Register(1));
3353 case kMips64S128Or: {
3354 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3355 __ or_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3356 i.InputSimd128Register(1));
3359 case kMips64S128Xor: {
3360 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3361 __ xor_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3362 i.InputSimd128Register(1));
3365 case kMips64S128Not: {
3366 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3367 __ nor_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3368 i.InputSimd128Register(0));
3371 case kMips64V128AnyTrue: {
3372 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3379 __ bind(&all_false);
3382 case kMips64I64x2AllTrue: {
3383 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3393 case kMips64I32x4AllTrue: {
3394 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3404 case kMips64I16x8AllTrue: {
3405 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3415 case kMips64I8x16AllTrue: {
3416 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3426 case kMips64MsaLd: {
3427 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3428 __ ld_b(
i.OutputSimd128Register(),
i.MemoryOperand());
3431 case kMips64MsaSt: {
3432 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3433 __ st_b(
i.InputSimd128Register(2),
i.MemoryOperand());
3436 case kMips64S32x4InterleaveRight: {
3437 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3439 src0 =
i.InputSimd128Register(0),
3440 src1 =
i.InputSimd128Register(1);
3443 __ ilvr_w(dst, src1, src0);
3446 case kMips64S32x4InterleaveLeft: {
3447 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3449 src0 =
i.InputSimd128Register(0),
3450 src1 =
i.InputSimd128Register(1);
3453 __ ilvl_w(dst, src1, src0);
3456 case kMips64S32x4PackEven: {
3457 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3459 src0 =
i.InputSimd128Register(0),
3460 src1 =
i.InputSimd128Register(1);
3463 __ pckev_w(dst, src1, src0);
3466 case kMips64S32x4PackOdd: {
3467 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3469 src0 =
i.InputSimd128Register(0),
3470 src1 =
i.InputSimd128Register(1);
3473 __ pckod_w(dst, src1, src0);
3476 case kMips64S32x4InterleaveEven: {
3477 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3479 src0 =
i.InputSimd128Register(0),
3480 src1 =
i.InputSimd128Register(1);
3483 __ ilvev_w(dst, src1, src0);
3486 case kMips64S32x4InterleaveOdd: {
3487 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3489 src0 =
i.InputSimd128Register(0),
3490 src1 =
i.InputSimd128Register(1);
3493 __ ilvod_w(dst, src1, src0);
3496 case kMips64S32x4Shuffle: {
3497 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3499 src0 =
i.InputSimd128Register(0),
3500 src1 =
i.InputSimd128Register(1);
3506 unsigned lane = shuffle & 0xFF;
3512 int32_t shuffle_helper = shuffle;
3513 for (
int i = 0;
i < 4; ++
i) {
3514 lane = shuffle_helper & 0xFF;
3516 shuffle_helper >>= 8;
3521 for (
int i = 0;
i < 4;
i++) {
3522 lane = shuffle & 0xFF;
3527 i8 |= lane << (2 *
i);
3530 __ shf_w(dst, src0, i8);
3536 }
else if (dst == src1) {
3546 __ vshf_w(dst, src1, src0);
3550 case kMips64S16x8InterleaveRight: {
3551 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3553 src0 =
i.InputSimd128Register(0),
3554 src1 =
i.InputSimd128Register(1);
3557 __ ilvr_h(dst, src1, src0);
3560 case kMips64S16x8InterleaveLeft: {
3561 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3563 src0 =
i.InputSimd128Register(0),
3564 src1 =
i.InputSimd128Register(1);
3567 __ ilvl_h(dst, src1, src0);
3570 case kMips64S16x8PackEven: {
3571 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3573 src0 =
i.InputSimd128Register(0),
3574 src1 =
i.InputSimd128Register(1);
3577 __ pckev_h(dst, src1, src0);
3580 case kMips64S16x8PackOdd: {
3581 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3583 src0 =
i.InputSimd128Register(0),
3584 src1 =
i.InputSimd128Register(1);
3587 __ pckod_h(dst, src1, src0);
3590 case kMips64S16x8InterleaveEven: {
3591 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3593 src0 =
i.InputSimd128Register(0),
3594 src1 =
i.InputSimd128Register(1);
3597 __ ilvev_h(dst, src1, src0);
3600 case kMips64S16x8InterleaveOdd: {
3601 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3603 src0 =
i.InputSimd128Register(0),
3604 src1 =
i.InputSimd128Register(1);
3607 __ ilvod_h(dst, src1, src0);
3610 case kMips64S16x4Reverse: {
3611 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3614 __ shf_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0x1B);
3617 case kMips64S16x2Reverse: {
3618 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3621 __ shf_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0xB1);
3624 case kMips64S8x16InterleaveRight: {
3625 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3627 src0 =
i.InputSimd128Register(0),
3628 src1 =
i.InputSimd128Register(1);
3631 __ ilvr_b(dst, src1, src0);
3634 case kMips64S8x16InterleaveLeft: {
3635 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3637 src0 =
i.InputSimd128Register(0),
3638 src1 =
i.InputSimd128Register(1);
3641 __ ilvl_b(dst, src1, src0);
3644 case kMips64S8x16PackEven: {
3645 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3647 src0 =
i.InputSimd128Register(0),
3648 src1 =
i.InputSimd128Register(1);
3651 __ pckev_b(dst, src1, src0);
3654 case kMips64S8x16PackOdd: {
3655 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3657 src0 =
i.InputSimd128Register(0),
3658 src1 =
i.InputSimd128Register(1);
3661 __ pckod_b(dst, src1, src0);
3664 case kMips64S8x16InterleaveEven: {
3665 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3667 src0 =
i.InputSimd128Register(0),
3668 src1 =
i.InputSimd128Register(1);
3671 __ ilvev_b(dst, src1, src0);
3674 case kMips64S8x16InterleaveOdd: {
3675 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3677 src0 =
i.InputSimd128Register(0),
3678 src1 =
i.InputSimd128Register(1);
3681 __ ilvod_b(dst, src1, src0);
3684 case kMips64S8x16Concat: {
3685 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3687 DCHECK(dst ==
i.InputSimd128Register(0));
3688 __ sldi_b(dst,
i.InputSimd128Register(1),
i.InputInt4(2));
3691 case kMips64I8x16Shuffle: {
3692 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3694 src0 =
i.InputSimd128Register(0),
3695 src1 =
i.InputSimd128Register(1);
3700 }
else if (dst == src1) {
3705 int64_t control_low =
3706 static_cast<int64_t
>(
i.InputInt32(3)) << 32 |
i.InputInt32(2);
3707 int64_t control_hi =
3708 static_cast<int64_t
>(
i.InputInt32(5)) << 32 |
i.InputInt32(4);
3713 __ vshf_b(dst, src1, src0);
3716 case kMips64I8x16Swizzle: {
3718 tbl =
i.InputSimd128Register(0),
3719 ctl =
i.InputSimd128Register(1);
3720 DCHECK(dst != ctl && dst != tbl);
3722 __ xor_v(zeroReg, zeroReg, zeroReg);
3723 __ move_v(dst, ctl);
3724 __ vshf_b(dst, zeroReg, tbl);
3727 case kMips64S8x8Reverse: {
3728 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3737 case kMips64S8x4Reverse: {
3738 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3741 __ shf_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0x1B);
3744 case kMips64S8x2Reverse: {
3745 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3748 __ shf_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0xB1);
3751 case kMips64I32x4SConvertI16x8Low: {
3752 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3757 __ srai_w(dst, dst, 16);
3760 case kMips64I32x4SConvertI16x8High: {
3761 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3766 __ srai_w(dst, dst, 16);
3769 case kMips64I32x4UConvertI16x8Low: {
3770 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3773 i.InputSimd128Register(0));
3776 case kMips64I32x4UConvertI16x8High: {
3777 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3780 i.InputSimd128Register(0));
3783 case kMips64I16x8SConvertI8x16Low: {
3784 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3789 __ srai_h(dst, dst, 8);
3792 case kMips64I16x8SConvertI8x16High: {
3793 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3798 __ srai_h(dst, dst, 8);
3801 case kMips64I16x8SConvertI32x4: {
3802 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3811 case kMips64I16x8UConvertI32x4: {
3812 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3820 __ sat_u_w(dst, dst, 15);
3824 case kMips64I16x8UConvertI8x16Low: {
3825 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3828 i.InputSimd128Register(0));
3831 case kMips64I16x8UConvertI8x16High: {
3832 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3835 i.InputSimd128Register(0));
3838 case kMips64I8x16SConvertI16x8: {
3839 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3848 case kMips64I8x16UConvertI16x8: {
3849 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
3857 __ sat_u_h(dst, dst, 7);
3865#define UNSUPPORTED_COND(opcode, condition) \
3866 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
3872 Label* tlabel, Label* flabel,
bool fallthru) {
3921 __ Branch(tlabel,
cc,
i.InputRegister(0),
i.InputOperand(1));
3926 __ xori(
i.TempRegister(0),
i.TempRegister(0), 1);
3928 __ Branch(tlabel,
ne,
i.TempRegister(0), Operand(zero_reg));
3932 FlagsConditionToConditionCmpFPU(&predicate,
condition);
3934 __ BranchTrueF(tlabel);
3936 __ BranchFalseF(tlabel);
3939 PrintF(
"AssembleArchBranch Unimplemented arch_opcode: %d\n",
3943 if (!fallthru)
__ Branch(flabel);
3950 Label* tlabel = branch->true_label;
3951 Label* flabel = branch->false_label;
3957#undef UNSUPPORTED_COND
3960 BranchInfo* branch) {
3969#if V8_ENABLE_WEBASSEMBLY
3970void CodeGenerator::AssembleArchTrap(Instruction*
instr,
3974 OutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
3976 void Generate() final {
3977 MipsOperandConverter
i(
gen_, instr_);
3979 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
3980 GenerateCallToTrap(trap_id);
3984 void GenerateCallToTrap(TrapId trap_id) {
3985 gen_->AssembleSourcePosition(instr_);
3989 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3990 ReferenceMap* reference_map =
3991 gen_->zone()->New<ReferenceMap>(
gen_->zone());
3992 gen_->RecordSafepoint(reference_map);
3997 Instruction* instr_;
3998 CodeGenerator*
gen_;
4001 Label* tlabel = ool->entry();
4009 MipsOperandConverter
i(
this,
instr);
4019 if (
instr->arch_opcode() == kMips64Tst) {
4027 }
else if (
instr->arch_opcode() == kMips64Dadd ||
4028 instr->arch_opcode() == kMips64Dsub) {
4037 }
else if (
instr->arch_opcode() == kMips64DaddOvf ||
4038 instr->arch_opcode() == kMips64DsubOvf) {
4041 }
else if (
instr->arch_opcode() == kMips64MulOvf ||
4042 instr->arch_opcode() == kMips64DMulOvf) {
4045 }
else if (
instr->arch_opcode() == kMips64Cmp) {
4047 __ CompareWord(
cc,
result,
i.InputRegister(0),
i.InputOperand(1));
4049 }
else if (
instr->arch_opcode() == kMips64CmpD ||
4050 instr->arch_opcode() == kMips64CmpS) {
4051 FPURegister left =
i.InputOrZeroDoubleRegister(0);
4052 FPURegister right =
i.InputOrZeroDoubleRegister(1);
4054 !
__ IsDoubleZeroRegSet()) {
4058 FlagsConditionToConditionCmpFPU(&predicate,
condition);
4067 if (
instr->arch_opcode() == kMips64CmpD) {
4080 }
else if (
instr->arch_opcode() == kArchStackPointerGreaterThan) {
4084 __ xori(
i.OutputRegister(),
i.TempRegister(0), 1);
4088 PrintF(
"AssembleArchBranch Unimplemented arch_opcode is : %d\n",
4089 instr->arch_opcode());
4090 TRACE(
"UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__,
4101 BranchInfo* branch) {
4106 MipsOperandConverter
i(
this,
instr);
4108 std::vector<std::pair<int32_t, Label*>> cases;
4109 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
4110 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
4113 UseScratchRegisterScope temps(
masm());
4114 Register scratch = temps.Acquire();
4117 __ sll(scratch, input, 0);
4119 cases.data() + cases.size());
4123 MipsOperandConverter
i(
this,
instr);
4125 size_t const case_count =
instr->InputCount() - 2;
4127 UseScratchRegisterScope temps(
masm());
4128 Register scratch = temps.Acquire();
4131 __ sll(scratch, input, 0);
4132 __ Branch(
GetLabel(
i.InputRpo(1)),
hs, scratch, Operand(case_count));
4133 __ GenerateSwitchTable(scratch, case_count, [&
i,
this](
size_t index) {
4146 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4147 if (!saves_fpu.is_empty()) {
4148 int count = saves_fpu.Count();
4150 frame->AllocateSavedCalleeRegisterSlots(count *
4154 const RegList saves = call_descriptor->CalleeSavedRegisters();
4155 if (!saves.is_empty()) {
4156 int count = saves.Count();
4157 frame->AllocateSavedCalleeRegisterSlots(count);
4165 if (call_descriptor->IsCFunctionCall()) {
4166#if V8_ENABLE_WEBASSEMBLY
4167 if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
4168 __ StubPrologue(StackFrame::C_WASM_ENTRY);
4179 }
else if (call_descriptor->IsJSFunctionCall()) {
4182 __ StubPrologue(
info()->GetOutputStackFrameType());
4183#if V8_ENABLE_WEBASSEMBLY
4184 if (call_descriptor->IsAnyWasmFunctionCall() ||
4185 call_descriptor->IsWasmImportWrapper() ||
4186 call_descriptor->IsWasmCapiFunction()) {
4193 if (call_descriptor->IsWasmCapiFunction()) {
4201 int required_slots =
4202 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
4204 if (
info()->is_osr()) {
4206 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
4212 __ RecordComment(
"-- OSR entrypoint --");
4217 const RegList saves = call_descriptor->CalleeSavedRegisters();
4218 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4220 if (required_slots > 0) {
4222#if V8_ENABLE_WEBASSEMBLY
4235 MacroAssembler::StackLimitKind::kRealStackLimit);
4241 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
4245 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
4254 const int returns =
frame()->GetReturnSlotCount();
4257 required_slots -= saves.Count();
4258 required_slots -= saves_fpu.Count();
4259 required_slots -= returns;
4260 if (required_slots > 0) {
4264 if (!saves_fpu.is_empty()) {
4266 __ MultiPushFPU(saves_fpu);
4270 if (!saves.is_empty()) {
4272 __ MultiPush(saves);
4290 const int returns =
frame()->GetReturnSlotCount();
4296 const RegList saves = call_descriptor->CalleeSavedRegisters();
4297 if (!saves.is_empty()) {
4302 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4303 if (!saves_fpu.is_empty()) {
4304 __ MultiPopFPU(saves_fpu);
4307 MipsOperandConverter g(
this,
nullptr);
4309 const int parameter_slots =
4310 static_cast<int>(call_descriptor->ParameterSlotCount());
4314 if (parameter_slots != 0) {
4315 if (additional_pop_count->IsImmediate()) {
4316 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4318 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue,
4319 g.ToRegister(additional_pop_count),
4320 Operand(
static_cast<int64_t
>(0)));
4329 call_descriptor->IsJSFunctionCall() &&
4330 parameter_slots != 0;
4332 if (call_descriptor->IsCFunctionCall()) {
4337 if (additional_pop_count->IsImmediate() &&
4338 g.ToConstant(additional_pop_count).ToInt32() == 0) {
4355 if (parameter_slots > 1) {
4361 }
else if (additional_pop_count->IsImmediate()) {
4362 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4363 __ Drop(parameter_slots + additional_count);
4365 Register pop_reg = g.ToRegister(additional_pop_count);
4366 __ Drop(parameter_slots);
4375 ZoneDeque<DeoptimizationExit*>* exits) {}
4380 MipsOperandConverter g(
this,
nullptr);
4381 int last_frame_slot_id =
4384 int slot_id = last_frame_slot_id + sp_delta + new_slots;
4386 if (source->IsRegister()) {
4387 __ Push(g.ToRegister(source));
4389 }
else if (source->IsStackSlot()) {
4390 UseScratchRegisterScope temps(
masm());
4391 Register scratch = temps.Acquire();
4392 __ Ld(scratch, g.ToMemOperand(source));
4407 MipsOperandConverter g(
this,
nullptr);
4409 if (dest->IsRegister()) {
4411 __ Pop(g.ToRegister(dest));
4412 }
else if (dest->IsStackSlot()) {
4414 UseScratchRegisterScope temps(
masm());
4415 Register scratch = temps.Acquire();
4417 __ Sd(scratch, g.ToMemOperand(dest));
4419 int last_frame_slot_id =
4422 int slot_id = last_frame_slot_id + sp_delta;
4442 DCHECK(!source->IsImmediate());
4450 if (temps.hasAvailable()) {
4487 InstructionOperand* src = &move->source();
4488 InstructionOperand* dst = &move->destination();
4489 UseScratchRegisterScope temps(
masm());
4490 if (src->IsConstant() && dst->IsFPLocationOperand()) {
4493 }
else if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) {
4494 MipsOperandConverter g(
this,
nullptr);
4495 bool src_need_scratch =
false;
4496 bool dst_need_scratch =
false;
4497 if (src->IsAnyStackSlot()) {
4500 (!is_int16(src_mem.offset())) || (((src_mem.offset() & 0b111) != 0) &&
4501 !is_int16(src_mem.offset() + 4));
4503 if (dst->IsAnyStackSlot()) {
4506 (!is_int16(dst_mem.offset())) || (((dst_mem.offset() & 0b111) != 0) &&
4507 !is_int16(dst_mem.offset() + 4));
4509 if (src_need_scratch || dst_need_scratch) {
4518bool Is32BitOperand(InstructionOperand* operand) {
4519 DCHECK(operand->IsStackSlot() || operand->IsRegister());
4528bool Use32BitMove(InstructionOperand* source, InstructionOperand*
destination) {
4529 return Is32BitOperand(source) && Is32BitOperand(
destination);
4536 MipsOperandConverter g(
this,
nullptr);
4539 if (source->IsRegister()) {
4541 Register src = g.ToRegister(source);
4547 }
else if (source->IsStackSlot()) {
4561 }
else if (source->IsConstant()) {
4562 Constant src = g.ToConstant(source);
4566 switch (src.type()) {
4568 __ li(dst, Operand(src.ToInt32(), src.rmode()));
4574 __ li(dst, Operand(src.ToInt64(), src.rmode()));
4580 __ li(dst, src.ToExternalReference());
4583 Handle<HeapObject> src_object = src.ToHeapObject();
4586 __ LoadRoot(dst, index);
4588 __ li(dst, src_object);
4602 __ Sd(zero_reg, dst);
4610 __ Move(dst, src.ToFloat32());
4617 __ Move(dst, src.ToFloat64().value());
4622 }
else if (source->IsFPRegister()) {
4625 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
4626 MSARegister src = g.ToSimd128Register(source);
4628 MSARegister dst = g.ToSimd128Register(
destination);
4629 __ move_v(dst, src);
4635 FPURegister src = g.ToDoubleRegister(source);
4637 FPURegister dst = g.ToDoubleRegister(
destination);
4644 }
else if (source->IsFPStackSlot()) {
4649 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
4675 MipsOperandConverter g(
this,
nullptr);
4678 if (source->IsRegister()) {
4681 Register src = g.ToRegister(source);
4694 }
else if (source->IsStackSlot()) {
4704 }
else if (source->IsFPRegister()) {
4707 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
4709 MSARegister src = g.ToSimd128Register(source);
4711 MSARegister dst = g.ToSimd128Register(
destination);
4712 __ move_v(temp, src);
4713 __ move_v(src, dst);
4714 __ move_v(dst, temp);
4718 __ move_v(temp, src);
4724 FPURegister src = g.ToDoubleRegister(source);
4726 FPURegister dst = g.ToDoubleRegister(
destination);
4738 }
else if (source->IsFPStackSlot()) {
4747 CpuFeatureScope msa_scope(
masm(), MIPS_SIMD);
4749 __ ld_b(temp_1, dst0);
4750 __ Ld(temp_0, src0);
4751 __ Sd(temp_0, dst0);
4752 __ Ld(temp_0, src1);
4753 __ Sd(temp_0, dst1);
4754 __ st_b(temp_1, src0);
4757 __ Ldc1(temp_1, dst0);
4758 __ Ld(temp_0, src0);
4759 __ Sdc1(temp_1, src0);
4760 __ Sd(temp_0, dst0);
4773#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
4774#undef ASSEMBLE_ATOMIC_STORE_INTEGER
4775#undef ASSEMBLE_ATOMIC_BINOP
4776#undef ASSEMBLE_ATOMIC_BINOP_EXT
4777#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
4778#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
4779#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
4780#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
4781#undef ASSEMBLE_IEEE754_BINOP
4782#undef ASSEMBLE_IEEE754_UNOP
4783#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
#define Assert(condition)
static constexpr T decode(U value)
static constexpr bool IsBuiltinId(Builtin builtin)
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
constexpr void set(RegisterT reg)
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int kArgCOffset
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
uint16_t parameter_count_
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
friend class OutOfLineCode
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
FrameAccessState * frame_access_state() const
DoubleRegister ToDoubleRegister(InstructionOperand *op)
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
DoubleRegister InputDoubleRegister(size_t index)
Register InputRegister(size_t index) const
Register ToRegister(InstructionOperand *op) const
bool IsFPStackSlot() const
ArchOpcode arch_opcode() const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
Register InputOrZeroRegister(size_t index)
FloatRegister OutputSingleRegister(size_t index=0)
FloatRegister InputSingleRegister(size_t index)
DoubleRegister InputOrZeroDoubleRegister(size_t index)
DoubleRegister InputOrZeroSingleRegister(size_t index)
MemOperand MemoryOperand(size_t *first_index)
MemOperand SlotToMemOperand(int slot) const
Operand InputImmediate(size_t index)
Operand InputOperand(size_t index)
MemOperand MemoryOperand(size_t index=0)
FloatRegister ToSingleRegister(InstructionOperand *op)
MemOperand ToMemOperand(InstructionOperand *op) const
MipsOperandConverter(CodeGenerator *gen, Instruction *instr)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)
#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order)
RecordWriteMode const mode_
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define UNSUPPORTED_COND(opcode, condition)
#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
static const ArchVariants kArchVariant
ZoneVector< RpoNumber > & result
InstructionOperand destination
v8::SourceLocation SourceLocation
V8_INLINE Dest bit_cast(Source const &source)
static bool HasRegisterInput(Instruction *instr, size_t index)
void AssembleBranchToLabels(CodeGenerator *gen, MacroAssembler *masm, Instruction *instr, FlagsCondition condition, Label *tlabel, Label *flabel, bool fallthru)
@ kSignedGreaterThanOrEqual
@ kFloatGreaterThanOrEqualOrUnordered
@ kUnsignedLessThanOrEqual
@ kFloatLessThanOrEqualOrUnordered
@ kFloatGreaterThanOrUnordered
@ kFloatGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kFloatLessThanOrUnordered
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Xor(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register no_reg
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
constexpr VFPRoundingMode kRoundToMinusInf
constexpr DoubleRegister kDoubleCompareReg
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
const uint32_t kFCSRInvalidOpCauseMask
void PrintF(const char *format,...)
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
constexpr FPUControlRegister FCSR
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr VFPRoundingMode kRoundToPlusInf
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
const uint32_t kFCSROverflowCauseMask
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
constexpr VFPRoundingMode kRoundToZero
const int kNumCalleeSavedFPU
constexpr int kDoubleSize
constexpr Register kJavaScriptCallDispatchHandleRegister
static int FrameSlotToFPOffset(int slot)
SwVfpRegister FloatRegister
#define CHECK_GE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps