20#if V8_ENABLE_WEBASSEMBLY
147 case kMode_Operand2_R_LSL_I:
149 case kMode_Operand2_R_LSR_I:
151 case kMode_Operand2_R_ASR_I:
153 case kMode_Operand2_R_ROR_I:
155 case kMode_Operand2_R_UXTB:
157 case kMode_Operand2_R_UXTH:
159 case kMode_Operand2_R_SXTB:
161 case kMode_Operand2_R_SXTH:
163 case kMode_Operand2_R_SXTW:
177 case kMode_Operand2_R_LSL_I:
179 case kMode_Operand2_R_LSR_I:
181 case kMode_Operand2_R_ASR_I:
183 case kMode_Operand2_R_ROR_I:
185 case kMode_Operand2_R_UXTB:
187 case kMode_Operand2_R_UXTH:
189 case kMode_Operand2_R_SXTB:
191 case kMode_Operand2_R_SXTH:
193 case kMode_Operand2_R_SXTW:
206 case kMode_Operand2_R_LSR_I:
207 case kMode_Operand2_R_ASR_I:
208 case kMode_Operand2_R_ROR_I:
209 case kMode_Operand2_R_UXTB:
210 case kMode_Operand2_R_UXTH:
211 case kMode_Operand2_R_SXTB:
212 case kMode_Operand2_R_SXTH:
213 case kMode_Operand2_R_SXTW:
217 case kMode_Operand2_R_LSL_I:
244 switch (constant.type()) {
246 return Operand(constant.ToInt32(), constant.rmode());
248 return Operand(constant.ToInt64(), constant.rmode());
254 return Operand(constant.ToExternalReference());
267 return Operand(constant.ToHeapObject());
270 return Operand(constant.ToHeapObject());
285 if (
offset.from_frame_pointer()) {
303class OutOfLineRecordWrite final :
public OutOfLineCode {
305 OutOfLineRecordWrite(
308 UnwindingInfoWriter* unwinding_info_writer,
310 : OutOfLineCode(
gen),
315#if V8_ENABLE_WEBASSEMBLY
316 stub_mode_(stub_mode),
324 void Generate() final {
329 __ DecompressTagged(value_, value_);
339 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
344 __ Push<MacroAssembler::kSignLR>(lr,
padreg);
348 __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
353 __ CallIndirectPointerBarrier(object_, offset_, save_fp_mode,
355#if V8_ENABLE_WEBASSEMBLY
356 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
360 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode,
361 StubCallMode::kCallWasmRuntimeStub);
364 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
367 __ Pop<MacroAssembler::kAuthLR>(
padreg, lr);
377#if V8_ENABLE_WEBASSEMBLY
441#if V8_ENABLE_WEBASSEMBLY
442class WasmOutOfLineTrap :
public OutOfLineCode {
444 WasmOutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
446 void Generate()
override {
447 Arm64OperandConverter
i(
gen_, instr_);
449 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
450 GenerateCallToTrap(trap_id);
456 void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
459 void GenerateCallToTrap(TrapId trap_id) {
460 gen_->AssembleSourcePosition(instr_);
461 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
462 ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(
gen_->zone());
463 gen_->RecordSafepoint(reference_map);
464 __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
470void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
476 codegen->RecordProtectedInstruction(
pc);
480void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
488template <
typename Fn>
489void EmitFpOrNeonUnop(MacroAssembler* masm, Fn
fn, Instruction*
instr,
496 (masm->*
fn)(output, input);
501#define ASSEMBLE_SHIFT(asm_instr, width) \
503 if (instr->InputAt(1)->IsRegister()) { \
504 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
505 i.InputRegister##width(1)); \
508 static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
509 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
514#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \
516 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
517 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
518 __ asm_instr(i.Output##reg(), i.TempRegister(0)); \
521#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \
523 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
524 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
525 __ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
528#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(suffix, reg) \
530 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
531 if (CpuFeatures::IsSupported(LSE)) { \
532 CpuFeatureScope scope(masm(), LSE); \
533 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
534 __ Swpal##suffix(i.Input##reg(2), i.Output##reg(), \
535 MemOperand(i.TempRegister(0))); \
538 __ Bind(&exchange); \
539 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
540 __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
541 __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(2), \
542 i.TempRegister(0)); \
543 __ Cbnz(i.TempRegister32(1), &exchange); \
547#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(suffix, ext, reg) \
549 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
550 if (CpuFeatures::IsSupported(LSE)) { \
551 DCHECK_EQ(i.OutputRegister(), i.InputRegister(2)); \
552 CpuFeatureScope scope(masm(), LSE); \
553 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
554 __ Casal##suffix(i.Output##reg(), i.Input##reg(3), \
555 MemOperand(i.TempRegister(0))); \
557 Label compareExchange; \
559 __ Bind(&compareExchange); \
560 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
561 __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
562 __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
564 __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(3), \
565 i.TempRegister(0)); \
566 __ Cbnz(i.TempRegister32(1), &compareExchange); \
571#define ASSEMBLE_ATOMIC_SUB(suffix, reg) \
573 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
574 if (CpuFeatures::IsSupported(LSE)) { \
575 CpuFeatureScope scope(masm(), LSE); \
576 UseScratchRegisterScope temps(masm()); \
577 Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
578 __ Neg(scratch, i.Input##reg(2)); \
579 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
580 __ Ldaddal##suffix(scratch, i.Output##reg(), \
581 MemOperand(i.TempRegister(0))); \
585 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
586 __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
587 __ Sub(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
588 __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
589 i.TempRegister(0)); \
590 __ Cbnz(i.TempRegister32(2), &binop); \
594#define ASSEMBLE_ATOMIC_AND(suffix, reg) \
596 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
597 if (CpuFeatures::IsSupported(LSE)) { \
598 CpuFeatureScope scope(masm(), LSE); \
599 UseScratchRegisterScope temps(masm()); \
600 Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
601 __ Mvn(scratch, i.Input##reg(2)); \
602 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
603 __ Ldclral##suffix(scratch, i.Output##reg(), \
604 MemOperand(i.TempRegister(0))); \
608 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
609 __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
610 __ And(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
611 __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
612 i.TempRegister(0)); \
613 __ Cbnz(i.TempRegister32(2), &binop); \
617#define ASSEMBLE_ATOMIC_BINOP(suffix, bin_instr, lse_instr, reg) \
619 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
620 if (CpuFeatures::IsSupported(LSE)) { \
621 CpuFeatureScope scope(masm(), LSE); \
622 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
623 __ lse_instr##suffix(i.Input##reg(2), i.Output##reg(), \
624 MemOperand(i.TempRegister(0))); \
628 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
629 __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
630 __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
631 __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
632 i.TempRegister(0)); \
633 __ Cbnz(i.TempRegister32(2), &binop); \
637#define ASSEMBLE_IEEE754_BINOP(name) \
639 FrameScope scope(masm(), StackFrame::MANUAL); \
640 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
643#define ASSEMBLE_IEEE754_UNOP(name) \
645 FrameScope scope(masm(), StackFrame::MANUAL); \
646 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
652#define ASSEMBLE_SIMD_SHIFT_LEFT(asm_imm, width, format, asm_shl, gp) \
654 if (instr->InputAt(1)->IsImmediate()) { \
655 __ asm_imm(i.OutputSimd128Register().format(), \
656 i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
658 UseScratchRegisterScope temps(masm()); \
659 VRegister tmp = temps.AcquireQ(); \
660 Register shift = temps.Acquire##gp(); \
661 constexpr int mask = (1 << width) - 1; \
662 __ And(shift, i.InputRegister32(1), mask); \
663 __ Dup(tmp.format(), shift); \
664 __ asm_shl(i.OutputSimd128Register().format(), \
665 i.InputSimd128Register(0).format(), tmp.format()); \
672#define ASSEMBLE_SIMD_SHIFT_RIGHT(asm_imm, width, format, asm_shl, gp) \
674 if (instr->InputAt(1)->IsImmediate()) { \
675 __ asm_imm(i.OutputSimd128Register().format(), \
676 i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
678 UseScratchRegisterScope temps(masm()); \
679 VRegister tmp = temps.AcquireQ(); \
680 Register shift = temps.Acquire##gp(); \
681 constexpr int mask = (1 << width) - 1; \
682 __ And(shift, i.InputRegister32(1), mask); \
683 __ Dup(tmp.format(), shift); \
684 __ Neg(tmp.format(), tmp.format()); \
685 __ asm_shl(i.OutputSimd128Register().format(), \
686 i.InputSimd128Register(0).format(), tmp.format()); \
706void AdjustStackPointerForTailCall(MacroAssembler* masm,
707 FrameAccessState* state,
708 int new_slot_above_sp,
709 bool allow_shrinkage =
true) {
710 int current_sp_offset = state->GetSPToFPSlotCount() +
712 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
714 if (stack_slot_delta > 0) {
715 masm->Claim(stack_slot_delta);
716 state->IncreaseSPDelta(stack_slot_delta);
717 }
else if (allow_shrinkage && stack_slot_delta < 0) {
718 masm->Drop(-stack_slot_delta);
719 state->IncreaseSPDelta(stack_slot_delta);
726 int first_unused_slot_offset) {
728 first_unused_slot_offset,
false);
732 int first_unused_slot_offset) {
733 DCHECK_EQ(first_unused_slot_offset % 2, 0);
735 first_unused_slot_offset);
737 InstructionOperandConverter g(
this,
instr);
738 int optional_padding_offset = g.InputInt32(
instr->InputCount() - 2);
739 if (optional_padding_offset % 2) {
746 UseScratchRegisterScope temps(
masm());
747 Register scratch = temps.AcquireX();
748 __ ComputeCodeStartAddress(scratch);
750 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart);
753#ifdef V8_ENABLE_LEAPTIERING
755void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
756 DCHECK(
linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
772 UseScratchRegisterScope temps(
masm());
773 Register actual_parameter_count = temps.AcquireX();
774 Register scratch = temps.AcquireX();
775 __ LoadParameterCountFromJSDispatchTable(
778 __ cmp(actual_parameter_count, scratch);
779 __ Assert(
eq, AbortReason::kWrongFunctionDispatchHandle);
785int32_t
GetLaneMask(int32_t lane_count) {
return lane_count * 2 - 1; }
793 int32_t shuffle =
i.InputInt32(2);
795 int32_t max_src0_lane = lane_count - 1;
798 int lane = shuffle & lane_mask;
799 VRegister src = (lane > max_src0_lane) ? src1 : src0;
800 lane &= max_src0_lane;
801 masm->
Dup(dst, src, lane);
811 if (dst == src0 || dst == src1) {
815 masm->
Mov(temp, src0);
817 }
else if (dst == src1) {
818 masm->
Mov(temp, src1);
822 int32_t shuffle =
i.InputInt32(2);
824 int32_t max_src0_lane = lane_count - 1;
828 for (
int i = 0;
i < 2;
i++) {
830 int lane = shuffle & lane_mask;
831 if (lane > max_src0_lane) {
833 lane &= max_src0_lane;
835 masm->
Mov(dst,
i, src, lane);
847 if (dst == src0 || dst == src1) {
851 masm->
Mov(temp, src0);
853 }
else if (dst == src1) {
854 masm->
Mov(temp, src1);
858 int32_t shuffle =
i.InputInt32(2);
860 int32_t max_src0_lane = lane_count - 1;
867 const std::array<int, 4> input_lanes{
868 shuffle & lane_mask, shuffle >> 8 & lane_mask, shuffle >> 16 & lane_mask,
869 shuffle >> 24 & lane_mask};
870 std::array<int, 8> lane_counts = {0};
871 for (
int lane : input_lanes) {
876 int duplicate_lane = -1;
877 for (
size_t lane = 0; lane < lane_counts.size(); ++lane) {
878 if (lane_counts[lane] > 1) {
879 duplicate_lane =
static_cast<int>(lane);
880 if (duplicate_lane > max_src0_lane) {
881 masm->
Dup(dst, src1, duplicate_lane & max_src0_lane);
883 masm->
Dup(dst, src0, duplicate_lane);
890 for (
int i = 0;
i < 4;
i++) {
891 int lane = shuffle & lane_mask;
893 if (lane == duplicate_lane)
continue;
895 if (lane > max_src0_lane) {
897 lane &= max_src0_lane;
899 masm->
Mov(dst,
i, src, lane);
906 Arm64OperandConverter
i(
this,
instr);
909 switch (arch_opcode) {
910 case kArchCallCodeObject: {
911 if (
instr->InputAt(0)->IsImmediate()) {
914 Register
reg =
i.InputRegister(0);
916 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
920 __ CallCodeObject(
reg, tag);
926 case kArchCallBuiltinPointer: {
928 Register builtin_index =
i.InputRegister(0);
933 __ CallBuiltinByIndex(builtin_index, target);
938#if V8_ENABLE_WEBASSEMBLY
939 case kArchCallWasmFunction:
940 case kArchCallWasmFunctionIndirect: {
941 if (
instr->InputAt(0)->IsImmediate()) {
942 DCHECK_EQ(arch_opcode, kArchCallWasmFunction);
943 Constant constant =
i.ToConstant(
instr->InputAt(0));
945 __ Call(wasm_code, constant.rmode());
946 }
else if (arch_opcode == kArchCallWasmFunctionIndirect) {
947 __ CallWasmCodePointer(
949 i.InputInt64(
instr->WasmSignatureHashInputIndex()));
951 __ Call(
i.InputRegister(0));
957 case kArchTailCallWasm:
958 case kArchTailCallWasmIndirect: {
959 if (
instr->InputAt(0)->IsImmediate()) {
960 DCHECK_EQ(arch_opcode, kArchTailCallWasm);
961 Constant constant =
i.ToConstant(
instr->InputAt(0));
963 __ Jump(wasm_code, constant.rmode());
966 UseScratchRegisterScope temps(
masm());
969 if (arch_opcode == kArchTailCallWasmIndirect) {
970 __ CallWasmCodePointer(
971 x17,
i.InputInt64(
instr->WasmSignatureHashInputIndex()),
983 case kArchTailCallCodeObject: {
984 if (
instr->InputAt(0)->IsImmediate()) {
989 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
993 __ JumpCodeObject(
reg, tag);
1000 case kArchTailCallAddress: {
1006 UseScratchRegisterScope temps(
masm());
1015 case kArchCallJSFunction: {
1019 UseScratchRegisterScope scope(
masm());
1021 __ LoadTaggedField(temp,
1024 __ Assert(
eq, AbortReason::kWrongFunctionContext);
1026 uint32_t num_arguments =
1027 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
1028 __ CallJSFunction(func, num_arguments);
1033 case kArchPrepareCallCFunction:
1041 case kArchSaveCallerRegisters: {
1055 case kArchRestoreCallerRegisters: {
1068 case kArchPrepareTailCall:
1071 case kArchCallCFunctionWithFrameState:
1072 case kArchCallCFunction: {
1075 Label return_location;
1077#if V8_ENABLE_WEBASSEMBLY
1078 if (
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1080 __ StoreReturnAddressInWasmExitFrame(&return_location);
1085 if (
instr->InputAt(0)->IsImmediate()) {
1086 ExternalReference ref =
i.InputExternalReference(0);
1087 pc_offset =
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
1088 set_isolate_data_slots, &return_location);
1091 pc_offset =
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
1092 set_isolate_data_slots, &return_location);
1096 bool const needs_frame_state =
1097 (arch_opcode == kArchCallCFunctionWithFrameState);
1098 if (needs_frame_state) {
1124 case kArchTableSwitch:
1127 case kArchBinarySearchSwitch:
1130 case kArchAbortCSADcheck:
1136 __ CallBuiltin(Builtin::kAbortCSADcheck);
1138 __ Debug(
"kArchAbortCSADcheck", 0,
BREAK);
1141 case kArchDebugBreak:
1145 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)));
1147 case kArchThrowTerminator:
1153 case kArchDeoptimize: {
1154 DeoptimizationExit* exit =
1156 __ B(exit->label());
1162 case kArchFramePointer:
1163 __ mov(
i.OutputRegister(), fp);
1165 case kArchParentFramePointer:
1169 __ mov(
i.OutputRegister(), fp);
1172#if V8_ENABLE_WEBASSEMBLY
1173 case kArchStackPointer:
1176 __ mov(
i.OutputRegister(), sp);
1178 case kArchSetStackPointer: {
1181 __ RecordComment(
"-- Set simulator stack limit --");
1187 __ Mov(sp,
i.InputRegister(0));
1191 case kArchStackPointerGreaterThan: {
1200 lhs_register =
i.TempRegister(0);
1204 constexpr size_t kValueIndex = 0;
1205 DCHECK(
instr->InputAt(kValueIndex)->IsRegister());
1206 __ Cmp(lhs_register,
i.InputRegister(kValueIndex));
1209 case kArchStackCheckOffset:
1212 case kArchTruncateDoubleToI:
1220 case kArchStoreWithWriteBarrier: {
1228 if (addressing_mode == kMode_MRI) {
1229 offset = Operand(
i.InputInt64(1));
1232 offset = Operand(
i.InputRegister(1));
1240 __ Check(
ne, AbortReason::kOperandIsCleared);
1243 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1249 __ JumpIfSmi(value, ool->exit());
1253 __ Bind(ool->exit());
1256 case kArchAtomicStoreWithWriteBarrier: {
1264 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1267 __ AtomicStoreTaggedField(value,
object,
offset,
i.TempRegister(0));
1273 __ JumpIfSmi(value, ool->exit());
1277 __ Bind(ool->exit());
1280 case kArchStoreIndirectWithWriteBarrier: {
1287 if (addressing_mode == kMode_MRI) {
1288 offset = Operand(
i.InputInt64(1));
1291 offset = Operand(
i.InputRegister(1));
1297 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1302 __ JumpIfMarking(ool->entry());
1303 __ Bind(ool->exit());
1306 case kArchStackSlot: {
1310 __ Add(
i.OutputRegister(0), base, Operand(
offset.offset()));
1313 case kIeee754Float64Acos:
1316 case kIeee754Float64Acosh:
1319 case kIeee754Float64Asin:
1322 case kIeee754Float64Asinh:
1325 case kIeee754Float64Atan:
1328 case kIeee754Float64Atanh:
1331 case kIeee754Float64Atan2:
1334 case kIeee754Float64Cos:
1337 case kIeee754Float64Cosh:
1340 case kIeee754Float64Cbrt:
1343 case kIeee754Float64Exp:
1346 case kIeee754Float64Expm1:
1349 case kIeee754Float64Log:
1352 case kIeee754Float64Log1p:
1355 case kIeee754Float64Log2:
1358 case kIeee754Float64Log10:
1361 case kIeee754Float64Pow:
1364 case kIeee754Float64Sin:
1367 case kIeee754Float64Sinh:
1370 case kIeee754Float64Tan:
1373 case kIeee754Float64Tanh:
1376 case kArm64Float16RoundDown:
1380 case kArm64Float32RoundDown:
1384 case kArm64Float64RoundDown:
1388 case kArm64Float16RoundUp:
1392 case kArm64Float32RoundUp:
1396 case kArm64Float64RoundUp:
1400 case kArm64Float64RoundTiesAway:
1404 case kArm64Float16RoundTruncate:
1408 case kArm64Float32RoundTruncate:
1412 case kArm64Float64RoundTruncate:
1416 case kArm64Float16RoundTiesEven:
1420 case kArm64Float32RoundTiesEven:
1424 case kArm64Float64RoundTiesEven:
1430 __ Adds(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1431 i.InputOperand2_64(1));
1433 __ Add(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1434 i.InputOperand2_64(1));
1439 __ Adds(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1440 i.InputOperand2_32(1));
1442 __ Add(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1443 i.InputOperand2_32(1));
1454 __ Ands(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1455 i.InputOperand2_64(1));
1457 __ And(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1458 i.InputOperand2_64(1));
1469 __ Ands(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1470 i.InputOperand2_32(1));
1472 __ And(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1473 i.InputOperand2_32(1));
1477 __ Bic(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1478 i.InputOperand2_64(1));
1481 __ Bic(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1482 i.InputOperand2_32(1));
1485 __ Mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1488 __ Smulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1491 __ Umulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1494 __ Mul(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1496#if V8_ENABLE_WEBASSEMBLY
1499 CpuFeatureScope scope(
masm(), SHA3);
1501 i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
1502 i.InputSimd128Register(1).V16B(),
i.InputSimd128Register(2).V16B());
1507 CpuFeatureScope scope(
masm(), SHA3);
1509 i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
1510 i.InputSimd128Register(1).V16B(),
i.InputSimd128Register(2).V16B());
1513 case kArm64Sadalp: {
1514 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1517 __ Sadalp(
i.OutputSimd128Register().Format(dst_f),
1518 i.InputSimd128Register(1).Format(src_f));
1521 case kArm64Saddlp: {
1524 __ Saddlp(
i.OutputSimd128Register().Format(dst_f),
1525 i.InputSimd128Register(0).Format(src_f));
1528 case kArm64Uadalp: {
1529 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1532 __ Uadalp(
i.OutputSimd128Register().Format(dst_f),
1533 i.InputSimd128Register(1).Format(src_f));
1536 case kArm64Uaddlp: {
1539 __ Uaddlp(
i.OutputSimd128Register().Format(dst_f),
1540 i.InputSimd128Register(0).Format(src_f));
1543 case kArm64ISplat: {
1546 :
i.InputRegister32(0);
1547 __ Dup(
i.OutputSimd128Register().Format(f), src);
1550 case kArm64FSplat: {
1555 __ Fcvt(
i.OutputFloat32Register(0).H(),
i.InputFloat32Register(0));
1556 __ Dup(
i.OutputSimd128Register().Format(dst_f),
1557 i.OutputSimd128Register().Format(src_f), 0);
1559 __ Dup(
i.OutputSimd128Register().Format(dst_f),
1560 i.InputSimd128Register(0).Format(src_f), 0);
1567 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1568 __ Smlal(
i.OutputSimd128Register().Format(dst_f),
1569 i.InputSimd128Register(1).Format(src_f),
1570 i.InputSimd128Register(2).Format(src_f));
1573 case kArm64Smlal2: {
1576 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1577 __ Smlal2(
i.OutputSimd128Register().Format(dst_f),
1578 i.InputSimd128Register(1).Format(src_f),
1579 i.InputSimd128Register(2).Format(src_f));
1585 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1586 __ Umlal(
i.OutputSimd128Register().Format(dst_f),
1587 i.InputSimd128Register(1).Format(src_f),
1588 i.InputSimd128Register(2).Format(src_f));
1591 case kArm64Umlal2: {
1594 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1595 __ Umlal2(
i.OutputSimd128Register().Format(dst_f),
1596 i.InputSimd128Register(1).Format(src_f),
1597 i.InputSimd128Register(2).Format(src_f));
1602 if (
instr->InputAt(0)->IsRegister()) {
1603 __ Smull(
i.OutputRegister(),
i.InputRegister32(0),
1604 i.InputRegister32(1));
1609 __ Smull(
i.OutputSimd128Register().Format(dst_f),
1610 i.InputSimd128Register(0).Format(src_f),
1611 i.InputSimd128Register(1).Format(src_f));
1615 case kArm64Smull2: {
1618 __ Smull2(
i.OutputSimd128Register().Format(dst_f),
1619 i.InputSimd128Register(0).Format(src_f),
1620 i.InputSimd128Register(1).Format(src_f));
1624 if (
instr->InputAt(0)->IsRegister()) {
1625 __ Umull(
i.OutputRegister(),
i.InputRegister32(0),
1626 i.InputRegister32(1));
1631 __ Umull(
i.OutputSimd128Register().Format(dst_f),
1632 i.InputSimd128Register(0).Format(src_f),
1633 i.InputSimd128Register(1).Format(src_f));
1637 case kArm64Umull2: {
1640 __ Umull2(
i.OutputSimd128Register().Format(dst_f),
1641 i.InputSimd128Register(0).Format(src_f),
1642 i.InputSimd128Register(1).Format(src_f));
1646 __ Madd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1647 i.InputRegister(2));
1650 __ Madd(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
1651 i.InputRegister32(2));
1654 __ Msub(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1655 i.InputRegister(2));
1658 __ Msub(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
1659 i.InputRegister32(2));
1662 __ Mneg(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1665 __ Mneg(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1668 __ Sdiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1671 __ Sdiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1674 __ Udiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1677 __ Udiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1680 UseScratchRegisterScope scope(
masm());
1682 __ Sdiv(temp,
i.InputRegister(0),
i.InputRegister(1));
1683 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
1686 case kArm64Imod32: {
1687 UseScratchRegisterScope scope(
masm());
1689 __ Sdiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
1690 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
1691 i.InputRegister32(0));
1695 UseScratchRegisterScope scope(
masm());
1697 __ Udiv(temp,
i.InputRegister(0),
i.InputRegister(1));
1698 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
1701 case kArm64Umod32: {
1702 UseScratchRegisterScope scope(
masm());
1704 __ Udiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
1705 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
1706 i.InputRegister32(0));
1710 __ Mvn(
i.OutputRegister(),
i.InputOperand(0));
1713 __ Mvn(
i.OutputRegister32(),
i.InputOperand32(0));
1716 __ Orr(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1717 i.InputOperand2_64(1));
1720 __ Orr(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1721 i.InputOperand2_32(1));
1724 __ Orn(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1725 i.InputOperand2_64(1));
1728 __ Orn(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1729 i.InputOperand2_32(1));
1732 __ Eor(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1733 i.InputOperand2_64(1));
1736 __ Eor(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1737 i.InputOperand2_32(1));
1740 __ Eon(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1741 i.InputOperand2_64(1));
1744 __ Eon(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1745 i.InputOperand2_32(1));
1749 __ Subs(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1750 i.InputOperand2_64(1));
1752 __ Sub(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1753 i.InputOperand2_64(1));
1758 __ Subs(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1759 i.InputOperand2_32(1));
1761 __ Sub(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1762 i.InputOperand2_32(1));
1790 __ Mov(
i.OutputRegister32(),
i.InputRegister32(0));
1793 __ Sxtb(
i.OutputRegister32(),
i.InputRegister32(0));
1796 __ Sxth(
i.OutputRegister32(),
i.InputRegister32(0));
1799 __ Sxtb(
i.OutputRegister(),
i.InputRegister32(0));
1802 __ Sxth(
i.OutputRegister(),
i.InputRegister32(0));
1805 __ Sxtw(
i.OutputRegister(),
i.InputRegister32(0));
1808 __ Sbfx(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt6(1),
1812 __ Sbfx(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1816 __ Ubfx(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt6(1),
1820 __ Ubfx(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1824 __ Ubfiz(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1828 __ Sbfiz(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt6(1),
1832 __ Bfi(
i.OutputRegister(),
i.InputRegister(1),
i.InputInt6(2),
1835 case kArm64TestAndBranch32:
1836 case kArm64TestAndBranch:
1839 case kArm64CompareAndBranch32:
1840 case kArm64CompareAndBranch:
1844 int count =
i.InputInt32(0);
1846 __ AssertSpAligned();
1855 if (
instr->InputAt(0)->IsSimd128Register()) {
1856 __ Poke(
i.InputSimd128Register(0), operand);
1857 }
else if (
instr->InputAt(0)->IsFPRegister()) {
1858 __ Poke(
i.InputFloat64Register(0), operand);
1860 __ Poke(
i.InputOrZeroRegister64(0), operand);
1864 case kArm64PokePair: {
1865 int slot =
i.InputInt32(2) - 1;
1866 if (
instr->InputAt(0)->IsFPRegister()) {
1867 __ PokePair(
i.InputFloat64Register(1),
i.InputFloat64Register(0),
1870 __ PokePair(
i.InputRegister(1),
i.InputRegister(0),
1876 int reverse_slot =
i.InputInt32(0);
1879 if (
instr->OutputAt(0)->IsFPRegister()) {
1895 __ Clz(
i.OutputRegister64(),
i.InputRegister64(0));
1898 __ Clz(
i.OutputRegister32(),
i.InputRegister32(0));
1901 __ Rbit(
i.OutputRegister64(),
i.InputRegister64(0));
1904 __ Rbit(
i.OutputRegister32(),
i.InputRegister32(0));
1907 __ Rev(
i.OutputRegister64(),
i.InputRegister64(0));
1910 __ Rev(
i.OutputRegister32(),
i.InputRegister32(0));
1913 __ Cmp(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1916 __ Cmp(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1919 __ Cmn(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1922 __ Cmn(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1925 __ PopcntHelper(
i.OutputRegister32(),
i.InputRegister32(0));
1929 __ PopcntHelper(
i.OutputRegister64(),
i.InputRegister64(0));
1934 __ Cnt(
i.OutputSimd128Register().Format(f),
1935 i.InputSimd128Register(0).Format(f));
1939 __ Tst(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1942 __ Tst(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1944 case kArm64Float32Cmp:
1945 if (
instr->InputAt(1)->IsFPRegister()) {
1946 __ Fcmp(
i.InputFloat32OrFPZeroRegister(0),
i.InputFloat32Register(1));
1951 __ Fcmp(
i.InputFloat32Register(0),
i.InputFloat32(1));
1954 case kArm64Float32Add:
1955 __ Fadd(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1956 i.InputFloat32Register(1));
1958 case kArm64Float32Sub:
1959 __ Fsub(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1960 i.InputFloat32Register(1));
1962 case kArm64Float32Mul:
1963 __ Fmul(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1964 i.InputFloat32Register(1));
1966 case kArm64Float32Div:
1967 __ Fdiv(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1968 i.InputFloat32Register(1));
1970 case kArm64Float32Abs:
1971 __ Fabs(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1973 case kArm64Float32Abd:
1974 __ Fabd(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1975 i.InputFloat32Register(1));
1977 case kArm64Float32Neg:
1978 __ Fneg(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1980 case kArm64Float32Sqrt:
1981 __ Fsqrt(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1983 case kArm64Float32Fnmul: {
1984 __ Fnmul(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1985 i.InputFloat32Register(1));
1988 case kArm64Float64Cmp:
1989 if (
instr->InputAt(1)->IsFPRegister()) {
1990 __ Fcmp(
i.InputFloat64OrFPZeroRegister(0),
i.InputDoubleRegister(1));
1995 __ Fcmp(
i.InputFloat64Register(0),
i.InputDouble(1));
1998 case kArm64Float64Add:
1999 __ Fadd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2000 i.InputDoubleRegister(1));
2002 case kArm64Float64Sub:
2003 __ Fsub(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2004 i.InputDoubleRegister(1));
2006 case kArm64Float64Mul:
2007 __ Fmul(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2008 i.InputDoubleRegister(1));
2010 case kArm64Float64Div:
2011 __ Fdiv(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2012 i.InputDoubleRegister(1));
2014 case kArm64Float64Mod: {
2021 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
2024 case kArm64Float32Max: {
2025 __ Fmax(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
2026 i.InputFloat32Register(1));
2029 case kArm64Float64Max: {
2030 __ Fmax(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2031 i.InputDoubleRegister(1));
2034 case kArm64Float32Min: {
2035 __ Fmin(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
2036 i.InputFloat32Register(1));
2039 case kArm64Float64Min: {
2040 __ Fmin(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2041 i.InputDoubleRegister(1));
2044 case kArm64Float64Abs:
2045 __ Fabs(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
2047 case kArm64Float64Abd:
2048 __ Fabd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2049 i.InputDoubleRegister(1));
2051 case kArm64Float64Neg:
2052 __ Fneg(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
2054 case kArm64Float64Sqrt:
2055 __ Fsqrt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
2057 case kArm64Float64Fnmul:
2058 __ Fnmul(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
2059 i.InputDoubleRegister(1));
2061 case kArm64Float32ToFloat64:
2062 __ Fcvt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0).S());
2064 case kArm64Float64ToFloat32:
2065 __ Fcvt(
i.OutputDoubleRegister().S(),
i.InputDoubleRegister(0));
2067 case kArm64Float64ToFloat16RawBits: {
2068 VRegister tmp_dst =
i.TempDoubleRegister(0);
2069 __ Fcvt(tmp_dst.H(),
i.InputDoubleRegister(0));
2070 __ Fmov(
i.OutputRegister32(), tmp_dst.S());
2073 case kArm64Float16RawBitsToFloat64: {
2074 VRegister tmp_dst =
i.TempDoubleRegister(0);
2075 __ Fmov(tmp_dst.S(),
i.InputRegister32(0));
2076 __ Fcvt(
i.OutputDoubleRegister(), tmp_dst.H());
2079 case kArm64Float32ToInt32: {
2080 __ Fcvtzs(
i.OutputRegister32(),
i.InputFloat32Register(0));
2082 if (set_overflow_to_min_i32) {
2085 __ Cmn(
i.OutputRegister32(), 1);
2086 __ Csinc(
i.OutputRegister32(),
i.OutputRegister32(),
2087 i.OutputRegister32(),
vc);
2091 case kArm64Float64ToInt32:
2092 __ Fcvtzs(
i.OutputRegister32(),
i.InputDoubleRegister(0));
2093 if (
i.OutputCount() > 1) {
2095 __ Fcmp(
i.InputDoubleRegister(0),
static_cast<double>(INT32_MIN));
2096 __ Cset(
i.OutputRegister(1).W(),
ge);
2097 __ Fcmp(
i.InputDoubleRegister(0),
static_cast<double>(INT32_MAX) + 1);
2098 __ CmovX(
i.OutputRegister(1), xzr,
ge);
2101 case kArm64Float32ToUint32: {
2102 __ Fcvtzu(
i.OutputRegister32(),
i.InputFloat32Register(0));
2104 if (set_overflow_to_min_u32) {
2107 __ Cmn(
i.OutputRegister32(), 1);
2108 __ Adc(
i.OutputRegister32(),
i.OutputRegister32(), Operand(0));
2112 case kArm64Float64ToUint32:
2113 __ Fcvtzu(
i.OutputRegister32(),
i.InputDoubleRegister(0));
2114 if (
i.OutputCount() > 1) {
2115 __ Fcmp(
i.InputDoubleRegister(0), -1.0);
2116 __ Cset(
i.OutputRegister(1).W(),
gt);
2117 __ Fcmp(
i.InputDoubleRegister(0),
static_cast<double>(UINT32_MAX) + 1);
2118 __ CmovX(
i.OutputRegister(1), xzr,
ge);
2121 case kArm64Float32ToInt64:
2122 __ Fcvtzs(
i.OutputRegister64(),
i.InputFloat32Register(0));
2123 if (
i.OutputCount() > 1) {
2125 __ Fcmp(
i.InputFloat32Register(0),
static_cast<float>(INT64_MIN));
2131 __ Cset(
i.OutputRegister(1),
vc);
2134 case kArm64Float64ToInt64: {
2135 __ Fcvtzs(
i.OutputRegister(0),
i.InputDoubleRegister(0));
2138 if (set_overflow_to_min_i64) {
2141 __ Cmn(
i.OutputRegister64(), 1);
2142 __ Csinc(
i.OutputRegister64(),
i.OutputRegister64(),
2143 i.OutputRegister64(),
vc);
2144 }
else if (
i.OutputCount() > 1) {
2146 __ Fcmp(
i.InputDoubleRegister(0),
static_cast<double>(INT64_MIN));
2148 __ Cset(
i.OutputRegister(1),
vc);
2152 case kArm64Float32ToUint64:
2153 __ Fcvtzu(
i.OutputRegister64(),
i.InputFloat32Register(0));
2154 if (
i.OutputCount() > 1) {
2156 __ Fcmp(
i.InputFloat32Register(0), -1.0);
2158 __ Cset(
i.OutputRegister(1),
ne);
2161 case kArm64Float64ToUint64:
2162 __ Fcvtzu(
i.OutputRegister64(),
i.InputDoubleRegister(0));
2163 if (
i.OutputCount() > 1) {
2165 __ Fcmp(
i.InputDoubleRegister(0), -1.0);
2167 __ Cset(
i.OutputRegister(1),
ne);
2170 case kArm64Int32ToFloat32:
2171 __ Scvtf(
i.OutputFloat32Register(),
i.InputRegister32(0));
2173 case kArm64Int32ToFloat64:
2174 __ Scvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
2176 case kArm64Int64ToFloat32:
2177 __ Scvtf(
i.OutputDoubleRegister().S(),
i.InputRegister64(0));
2179 case kArm64Int64ToFloat64:
2180 __ Scvtf(
i.OutputDoubleRegister(),
i.InputRegister64(0));
2182 case kArm64Uint32ToFloat32:
2183 __ Ucvtf(
i.OutputFloat32Register(),
i.InputRegister32(0));
2185 case kArm64Uint32ToFloat64:
2186 __ Ucvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
2188 case kArm64Uint64ToFloat32:
2189 __ Ucvtf(
i.OutputDoubleRegister().S(),
i.InputRegister64(0));
2191 case kArm64Uint64ToFloat64:
2192 __ Ucvtf(
i.OutputDoubleRegister(),
i.InputRegister64(0));
2194 case kArm64Float64ExtractLowWord32:
2195 __ Fmov(
i.OutputRegister32(),
i.InputFloat32Register(0));
2197 case kArm64Float64ExtractHighWord32:
2198 __ Umov(
i.OutputRegister32(),
i.InputFloat64Register(0).V2S(), 1);
2200 case kArm64Float64InsertLowWord32:
2201 DCHECK_EQ(
i.OutputFloat64Register(),
i.InputFloat64Register(0));
2202 __ Ins(
i.OutputFloat64Register().V2S(), 0,
i.InputRegister32(1));
2204 case kArm64Float64InsertHighWord32:
2205 DCHECK_EQ(
i.OutputFloat64Register(),
i.InputFloat64Register(0));
2206 __ Ins(
i.OutputFloat64Register().V2S(), 1,
i.InputRegister32(1));
2208 case kArm64Float64MoveU64:
2209 __ Fmov(
i.OutputFloat64Register(),
i.InputRegister(0));
2211 case kArm64Float64SilenceNaN:
2212 __ CanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
2214 case kArm64U64MoveFloat64:
2215 __ Fmov(
i.OutputRegister(),
i.InputDoubleRegister(0));
2219 __ Ldrb(
i.OutputRegister(),
i.MemoryOperand());
2223 __ Ldrsb(
i.OutputRegister(),
i.MemoryOperand());
2227 __ Ldrsb(
i.OutputRegister32(),
i.MemoryOperand());
2231 __ Strb(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
2235 __ Ldrh(
i.OutputRegister(),
i.MemoryOperand());
2239 __ Ldrsh(
i.OutputRegister(),
i.MemoryOperand());
2243 __ Ldrsh(
i.OutputRegister32(),
i.MemoryOperand());
2247 __ Strh(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
2251 __ Ldrsw(
i.OutputRegister(),
i.MemoryOperand());
2255 __ Ldr(
i.OutputRegister32(),
i.MemoryOperand());
2259 __ Str(
i.InputOrZeroRegister32(0),
i.MemoryOperand(1));
2261 case kArm64StrWPair:
2263 __ Stp(
i.InputOrZeroRegister32(0),
i.InputOrZeroRegister32(1),
2264 i.MemoryOperand(2));
2268 __ Ldr(
i.OutputRegister(),
i.MemoryOperand());
2270 case kArm64LdrDecompressTaggedSigned:
2272 __ DecompressTaggedSigned(
i.OutputRegister(),
i.MemoryOperand());
2274 case kArm64LdrDecompressTagged:
2276 __ DecompressTagged(
i.OutputRegister(),
i.MemoryOperand());
2278 case kArm64LdrDecompressProtected:
2280 __ DecompressProtected(
i.OutputRegister(),
i.MemoryOperand());
2282 case kArm64LdarDecompressTaggedSigned:
2283 __ AtomicDecompressTaggedSigned(
i.OutputRegister(),
i.InputRegister(0),
2284 i.InputRegister(1),
i.TempRegister(0));
2286 case kArm64LdarDecompressTagged:
2287 __ AtomicDecompressTagged(
i.OutputRegister(),
i.InputRegister(0),
2288 i.InputRegister(1),
i.TempRegister(0));
2290 case kArm64LdrDecodeSandboxedPointer:
2291 __ LoadSandboxedPointerField(
i.OutputRegister(),
i.MemoryOperand());
2295 __ Str(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
2299 __ Stp(
i.InputOrZeroRegister64(0),
i.InputOrZeroRegister64(1),
2300 i.MemoryOperand(2));
2302 case kArm64StrCompressTagged:
2304 __ StoreTaggedField(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
2306 case kArm64StlrCompressTagged:
2309 __ AtomicStoreTaggedField(
i.InputRegister(2),
i.InputRegister(0),
2310 i.InputRegister(1),
i.TempRegister(0));
2312 case kArm64StrIndirectPointer:
2313 __ StoreIndirectPointerField(
i.InputOrZeroRegister64(0),
2314 i.MemoryOperand(1));
2316 case kArm64StrEncodeSandboxedPointer:
2317 __ StoreSandboxedPointerField(
i.InputOrZeroRegister64(0),
2318 i.MemoryOperand(1));
2322 __ Ldr(
i.OutputDoubleRegister().H(),
i.MemoryOperand());
2323 __ Fcvt(
i.OutputDoubleRegister().S(),
i.OutputDoubleRegister().H());
2328 __ Fcvt(
i.InputFloat32OrZeroRegister(0).H(),
2329 i.InputFloat32OrZeroRegister(0).S());
2330 __ Str(
i.InputFloat32OrZeroRegister(0).H(),
i.MemoryOperand(1));
2334 __ Ldr(
i.OutputDoubleRegister().S(),
i.MemoryOperand());
2338 __ Str(
i.InputFloat32OrZeroRegister(0),
i.MemoryOperand(1));
2342 __ Ldr(
i.OutputDoubleRegister(),
i.MemoryOperand());
2346 __ Str(
i.InputFloat64OrZeroRegister(0),
i.MemoryOperand(1));
2350 __ Ldr(
i.OutputSimd128Register(),
i.MemoryOperand());
2354 __ Str(
i.InputSimd128Register(0),
i.MemoryOperand(1));
2363 case kAtomicLoadInt8:
2366 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
2368 case kAtomicLoadUint8:
2371 case kAtomicLoadInt16:
2374 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
2376 case kAtomicLoadUint16:
2379 case kAtomicLoadWord32:
2382 case kArm64Word64AtomicLoadUint64:
2385 case kAtomicStoreWord8:
2388 case kAtomicStoreWord16:
2391 case kAtomicStoreWord32:
2394 case kArm64Word64AtomicStoreWord64:
2397 case kAtomicExchangeInt8:
2399 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
2401 case kAtomicExchangeUint8:
2404 case kAtomicExchangeInt16:
2406 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
2408 case kAtomicExchangeUint16:
2411 case kAtomicExchangeWord32:
2414 case kArm64Word64AtomicExchangeUint64:
2417 case kAtomicCompareExchangeInt8:
2419 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
2421 case kAtomicCompareExchangeUint8:
2424 case kAtomicCompareExchangeInt16:
2426 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
2428 case kAtomicCompareExchangeUint16:
2431 case kAtomicCompareExchangeWord32:
2434 case kArm64Word64AtomicCompareExchangeUint64:
2437 case kAtomicSubInt8:
2439 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
2441 case kAtomicSubUint8:
2444 case kAtomicSubInt16:
2446 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
2448 case kAtomicSubUint16:
2451 case kAtomicSubWord32:
2454 case kArm64Word64AtomicSubUint64:
2457 case kAtomicAndInt8:
2459 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
2461 case kAtomicAndUint8:
2464 case kAtomicAndInt16:
2466 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
2468 case kAtomicAndUint16:
2471 case kAtomicAndWord32:
2474 case kArm64Word64AtomicAndUint64:
2477#define ATOMIC_BINOP_CASE(op, inst, lse_instr) \
2478 case kAtomic##op##Int8: \
2479 ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
2480 __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
2482 case kAtomic##op##Uint8: \
2483 ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
2485 case kAtomic##op##Int16: \
2486 ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
2487 __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
2489 case kAtomic##op##Uint16: \
2490 ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
2492 case kAtomic##op##Word32: \
2493 ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register32); \
2495 case kArm64Word64Atomic##op##Uint64: \
2496 ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register); \
2501#undef ATOMIC_BINOP_CASE
2502#undef ASSEMBLE_SHIFT
2503#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
2504#undef ASSEMBLE_ATOMIC_STORE_INTEGER
2505#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
2506#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
2507#undef ASSEMBLE_ATOMIC_BINOP
2508#undef ASSEMBLE_IEEE754_BINOP
2509#undef ASSEMBLE_IEEE754_UNOP
2511#if V8_ENABLE_WEBASSEMBLY
2512#define SIMD_UNOP_CASE(Op, Instr, FORMAT) \
2514 __ Instr(i.OutputSimd128Register().V##FORMAT(), \
2515 i.InputSimd128Register(0).V##FORMAT()); \
2517#define SIMD_UNOP_LANE_SIZE_CASE(Op, Instr) \
2519 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2520 __ Instr(i.OutputSimd128Register().Format(f), \
2521 i.InputSimd128Register(0).Format(f)); \
2524#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
2526 __ Instr(i.OutputSimd128Register().V##FORMAT(), \
2527 i.InputSimd128Register(0).V##FORMAT(), \
2528 i.InputSimd128Register(1).V##FORMAT()); \
2530#define SIMD_BINOP_LANE_SIZE_CASE(Op, Instr) \
2532 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2533 __ Instr(i.OutputSimd128Register().Format(f), \
2534 i.InputSimd128Register(0).Format(f), \
2535 i.InputSimd128Register(1).Format(f)); \
2538#define SIMD_FCM_L_CASE(Op, ImmOp, RegOp) \
2540 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2541 if (instr->InputCount() == 1) { \
2542 __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
2543 i.InputSimd128Register(0).Format(f), +0.0); \
2545 __ Fcm##RegOp(i.OutputSimd128Register().Format(f), \
2546 i.InputSimd128Register(1).Format(f), \
2547 i.InputSimd128Register(0).Format(f)); \
2551#define SIMD_FCM_G_CASE(Op, ImmOp) \
2553 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2555 DCHECK_EQ(instr->InputCount(), 1); \
2556 __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
2557 i.InputSimd128Register(0).Format(f), +0.0); \
2560#define SIMD_CM_L_CASE(Op, ImmOp) \
2562 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2563 DCHECK_EQ(instr->InputCount(), 1); \
2564 __ Cm##ImmOp(i.OutputSimd128Register().Format(f), \
2565 i.InputSimd128Register(0).Format(f), 0); \
2568#define SIMD_CM_G_CASE(Op, CmOp) \
2570 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2571 if (instr->InputCount() == 1) { \
2572 __ Cm##CmOp(i.OutputSimd128Register().Format(f), \
2573 i.InputSimd128Register(0).Format(f), 0); \
2575 __ Cm##CmOp(i.OutputSimd128Register().Format(f), \
2576 i.InputSimd128Register(0).Format(f), \
2577 i.InputSimd128Register(1).Format(f)); \
2581#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
2583 VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
2584 DCHECK_EQ(dst, i.InputSimd128Register(0).V##FORMAT()); \
2585 __ Instr(dst, i.InputSimd128Register(1).V##FORMAT(), \
2586 i.InputSimd128Register(2).V##FORMAT()); \
2589#define SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(Op, Instr) \
2591 VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
2592 VRegister dst = i.OutputSimd128Register().Format(f); \
2593 DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f)); \
2594 __ Instr(dst, i.InputSimd128Register(1).Format(f), \
2595 i.InputSimd128Register(2).Format(f)); \
2598#define SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(Op, Instr, FORMAT) \
2600 VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
2601 DCHECK_EQ(dst, i.InputSimd128Register(2).V##FORMAT()); \
2602 __ Instr(dst, i.InputSimd128Register(0).V##FORMAT(), \
2603 i.InputSimd128Register(1).V##FORMAT()); \
2606 SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
2607 SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
2608 SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
2609 SIMD_UNOP_LANE_SIZE_CASE(kArm64FSqrt, Fsqrt);
2610 SIMD_BINOP_LANE_SIZE_CASE(kArm64FAdd, Fadd);
2611 SIMD_BINOP_LANE_SIZE_CASE(kArm64FSub, Fsub);
2612 SIMD_BINOP_LANE_SIZE_CASE(kArm64FMul, Fmul);
2613 SIMD_BINOP_LANE_SIZE_CASE(kArm64FDiv, Fdiv);
2614 SIMD_UNOP_LANE_SIZE_CASE(kArm64FNeg, Fneg);
2615 SIMD_UNOP_LANE_SIZE_CASE(kArm64IAbs,
Abs);
2616 SIMD_UNOP_LANE_SIZE_CASE(kArm64INeg, Neg);
2617 SIMD_BINOP_LANE_SIZE_CASE(kArm64RoundingAverageU, Urhadd);
2618 SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinS, Smin);
2619 SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxS, Smax);
2620 SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinU, Umin);
2621 SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxU, Umax);
2622 SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mla, Mla);
2623 SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mls, Mls);
2627 __ Sxtl(
i.OutputSimd128Register().Format(wide),
2628 i.InputSimd128Register(0).Format(narrow));
2634 __ Sxtl2(
i.OutputSimd128Register().Format(wide),
2635 i.InputSimd128Register(0).Format(narrow));
2641 __ Uxtl(
i.OutputSimd128Register().Format(wide),
2642 i.InputSimd128Register(0).Format(narrow));
2648 __ Uxtl2(
i.OutputSimd128Register().Format(wide),
2649 i.InputSimd128Register(0).Format(narrow));
2652 case kArm64F64x2ConvertLowI32x4S: {
2653 VRegister dst =
i.OutputSimd128Register().V2D();
2654 __ Sxtl(dst,
i.InputSimd128Register(0).V2S());
2658 case kArm64F64x2ConvertLowI32x4U: {
2659 VRegister dst =
i.OutputSimd128Register().V2D();
2660 __ Uxtl(dst,
i.InputSimd128Register(0).V2S());
2664 case kArm64I32x4TruncSatF64x2SZero: {
2665 VRegister dst =
i.OutputSimd128Register();
2666 __ Fcvtzs(dst.V2D(),
i.InputSimd128Register(0).V2D());
2667 __ Sqxtn(dst.V2S(), dst.V2D());
2670 case kArm64I32x4TruncSatF64x2UZero: {
2671 VRegister dst =
i.OutputSimd128Register();
2672 __ Fcvtzu(dst.V2D(),
i.InputSimd128Register(0).V2D());
2673 __ Uqxtn(dst.V2S(), dst.V2D());
2676 case kArm64F32x4DemoteF64x2Zero: {
2677 __ Fcvtn(
i.OutputSimd128Register().V2S(),
2678 i.InputSimd128Register(0).V2D());
2681 case kArm64F64x2PromoteLowF32x4: {
2682 __ Fcvtl(
i.OutputSimd128Register().V2D(),
2683 i.InputSimd128Register(0).V2S());
2686 SIMD_UNOP_CASE(kArm64F16x8SConvertI16x8, Scvtf, 8
H);
2687 SIMD_UNOP_CASE(kArm64F16x8UConvertI16x8, Ucvtf, 8
H);
2688 SIMD_UNOP_CASE(kArm64I16x8UConvertF16x8, Fcvtzu, 8
H);
2689 SIMD_UNOP_CASE(kArm64I16x8SConvertF16x8, Fcvtzs, 8
H);
2690 case kArm64F16x8DemoteF32x4Zero: {
2691 __ Fcvtn(
i.OutputSimd128Register().V4H(),
2692 i.InputSimd128Register(0).V4S());
2695 case kArm64F16x8DemoteF64x2Zero: {
2700 __ Mov(fp_scratch.D(),
i.InputSimd128Register(0).V2D(), 1);
2701 __ Fcvt(fp_scratch.H(), fp_scratch.D());
2703 __ Fcvt(
i.OutputSimd128Register().H(),
i.InputSimd128Register(0).D());
2704 __ Mov(
i.OutputSimd128Register().V8H(), 1, fp_scratch.V8H(), 0);
2707 case kArm64F32x4PromoteLowF16x8: {
2708 __ Fcvtl(
i.OutputSimd128Register().V4S(),
2709 i.InputSimd128Register(0).V4H());
2712 case kArm64FExtractLane: {
2716 __ Mov(
i.OutputSimd128Register().Format(dst_f),
2717 i.InputSimd128Register(0).Format(src_f),
i.InputInt8(1));
2719 __ Fcvt(
i.OutputSimd128Register().S(),
i.OutputSimd128Register().H());
2723 case kArm64FReplaceLane: {
2725 VRegister dst =
i.OutputSimd128Register().Format(f),
2726 src1 =
i.InputSimd128Register(0).Format(f);
2731 UseScratchRegisterScope scope(
masm());
2732 VRegister tmp = scope.AcquireV(
kFormat8H);
2733 __ Fcvt(tmp.H(),
i.InputSimd128Register(2).S());
2734 __ Mov(dst,
i.InputInt8(1), tmp.Format(f), 0);
2736 __ Mov(dst,
i.InputInt8(1),
i.InputSimd128Register(2).Format(f), 0);
2740 SIMD_FCM_L_CASE(kArm64FEq,
eq,
eq);
2743 VRegister dst =
i.OutputSimd128Register().Format(f);
2744 if (
instr->InputCount() == 1) {
2745 __ Fcmeq(dst,
i.InputSimd128Register(0).Format(f), +0.0);
2747 __ Fcmeq(dst,
i.InputSimd128Register(0).Format(f),
2748 i.InputSimd128Register(1).Format(f));
2753 SIMD_FCM_L_CASE(kArm64FLt,
lt,
gt);
2754 SIMD_FCM_L_CASE(kArm64FLe,
le,
ge);
2755 SIMD_FCM_G_CASE(kArm64FGt,
gt);
2756 SIMD_FCM_G_CASE(kArm64FGe,
ge);
2757 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfma, Fmla, 2
D);
2758 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfms, Fmls, 2
D);
2759 case kArm64F64x2Pmin: {
2760 VRegister dst =
i.OutputSimd128Register().V2D();
2761 VRegister lhs =
i.InputSimd128Register(0).V2D();
2762 VRegister rhs =
i.InputSimd128Register(1).V2D();
2766 __ Fcmgt(dst, lhs, rhs);
2767 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2770 case kArm64F64x2Pmax: {
2771 VRegister dst =
i.OutputSimd128Register().V2D();
2772 VRegister lhs =
i.InputSimd128Register(0).V2D();
2773 VRegister rhs =
i.InputSimd128Register(1).V2D();
2776 __ Fcmgt(dst, rhs, lhs);
2777 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2780 SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4
S);
2781 SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4
S);
2782 case kArm64FMulElement: {
2786 __ Fmul(
i.OutputSimd128Register().Format(v_f),
2787 i.InputSimd128Register(0).Format(v_f),
2788 i.InputSimd128Register(1).Format(s_f),
i.InputInt8(2));
2791 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfma, Fmla, 4
S);
2792 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfms, Fmls, 4
S);
2793 case kArm64F32x4Pmin: {
2794 VRegister dst =
i.OutputSimd128Register().V4S();
2795 VRegister lhs =
i.InputSimd128Register(0).V4S();
2796 VRegister rhs =
i.InputSimd128Register(1).V4S();
2800 __ Fcmgt(dst, lhs, rhs);
2801 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2804 case kArm64F32x4Pmax: {
2805 VRegister dst =
i.OutputSimd128Register().V4S();
2806 VRegister lhs =
i.InputSimd128Register(0).V4S();
2807 VRegister rhs =
i.InputSimd128Register(1).V4S();
2810 __ Fcmgt(dst, rhs, lhs);
2811 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2814 case kArm64F16x8Pmin: {
2815 VRegister dst =
i.OutputSimd128Register().V8H();
2816 VRegister lhs =
i.InputSimd128Register(0).V8H();
2817 VRegister rhs =
i.InputSimd128Register(1).V8H();
2821 __ Fcmgt(dst, lhs, rhs);
2822 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2825 case kArm64F16x8Pmax: {
2826 VRegister dst =
i.OutputSimd128Register().V8H();
2827 VRegister lhs =
i.InputSimd128Register(0).V8H();
2828 VRegister rhs =
i.InputSimd128Register(1).V8H();
2831 __ Fcmgt(dst, rhs, lhs);
2832 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
2835 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F16x8Qfma, Fmla, 8
H);
2836 SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F16x8Qfms, Fmls, 8
H);
2837 case kArm64IExtractLane: {
2840 f ==
kFormat2D ?
i.OutputRegister64() :
i.OutputRegister32();
2841 __ Mov(dst,
i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
2844 case kArm64IReplaceLane: {
2846 VRegister dst =
i.OutputSimd128Register().Format(f),
2847 src1 =
i.InputSimd128Register(0).Format(f);
2849 f ==
kFormat2D ?
i.InputRegister64(2) :
i.InputRegister32(2);
2853 __ Mov(dst,
i.InputInt8(1), src2);
2856 case kArm64I64x2Shl: {
2860 case kArm64I64x2ShrS: {
2864 SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
2865 SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
2866 case kArm64I64x2Mul: {
2867 UseScratchRegisterScope scope(
masm());
2868 VRegister dst =
i.OutputSimd128Register();
2869 VRegister src1 =
i.InputSimd128Register(0);
2870 VRegister src2 =
i.InputSimd128Register(1);
2871 VRegister tmp1 = scope.AcquireSameSizeAs(dst);
2872 VRegister tmp2 = scope.AcquireSameSizeAs(dst);
2873 VRegister tmp3 =
i.ToSimd128Register(
instr->TempAt(0));
2896 __ Rev64(tmp2.V4S(), src2.V4S());
2900 __ Mul(tmp2.V4S(), tmp2.V4S(), src1.V4S());
2904 __ Xtn(tmp1.V2S(), src1.V2D());
2908 __ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S());
2912 __ Xtn(tmp3.V2S(), src2.V2D());
2916 __ Shll(dst.V2D(), tmp2.V2S(), 32);
2921 __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S());
2925 SIMD_CM_G_CASE(kArm64IEq,
eq);
2928 VRegister dst =
i.OutputSimd128Register().Format(f);
2929 if (
instr->InputCount() == 1) {
2930 __ Cmeq(dst,
i.InputSimd128Register(0).Format(f), 0);
2932 __ Cmeq(dst,
i.InputSimd128Register(0).Format(f),
2933 i.InputSimd128Register(1).Format(f));
2938 SIMD_CM_L_CASE(kArm64ILtS,
lt);
2939 SIMD_CM_L_CASE(kArm64ILeS,
le);
2940 SIMD_CM_G_CASE(kArm64IGtS,
gt);
2941 SIMD_CM_G_CASE(kArm64IGeS,
ge);
2942 case kArm64I64x2ShrU: {
2946 case kArm64I64x2BitMask: {
2947 __ I64x2BitMask(
i.OutputRegister32(),
i.InputSimd128Register(0));
2950 SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4
S);
2951 case kArm64I32x4Shl: {
2955 case kArm64I32x4ShrS: {
2959 SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4
S);
2960 SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4
S);
2961 case kArm64I32x4ShrU: {
2965 SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
2966 SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
2967 case kArm64I32x4BitMask: {
2968 __ I32x4BitMask(
i.OutputRegister32(),
i.InputSimd128Register(0));
2971 case kArm64I8x16Addv: {
2972 __ Addv(
i.OutputSimd128Register().B(),
i.InputSimd128Register(0).V16B());
2975 case kArm64I16x8Addv: {
2976 __ Addv(
i.OutputSimd128Register().H(),
i.InputSimd128Register(0).V8H());
2979 case kArm64I32x4Addv: {
2980 __ Addv(
i.OutputSimd128Register().S(),
i.InputSimd128Register(0).V4S());
2983 case kArm64I64x2AddPair: {
2984 __ Addp(
i.OutputSimd128Register().D(),
i.InputSimd128Register(0).V2D());
2987 case kArm64F32x4AddReducePairwise: {
2988 UseScratchRegisterScope scope(
masm());
2989 VRegister tmp = scope.AcquireV(
kFormat4S);
2990 __ Faddp(tmp.V4S(),
i.InputSimd128Register(0).V4S(),
2991 i.InputSimd128Register(0).V4S());
2992 __ Faddp(
i.OutputSimd128Register().S(), tmp.V2S());
2995 case kArm64F64x2AddPair: {
2996 __ Faddp(
i.OutputSimd128Register().D(),
i.InputSimd128Register(0).V2D());
2999 case kArm64I32x4DotI16x8S: {
3000 UseScratchRegisterScope scope(
masm());
3001 VRegister lhs =
i.InputSimd128Register(0);
3002 VRegister rhs =
i.InputSimd128Register(1);
3003 VRegister tmp1 = scope.AcquireV(
kFormat4S);
3004 VRegister tmp2 = scope.AcquireV(
kFormat4S);
3005 __ Smull(tmp1, lhs.V4H(), rhs.V4H());
3006 __ Smull2(tmp2, lhs.V8H(), rhs.V8H());
3007 __ Addp(
i.OutputSimd128Register().V4S(), tmp1, tmp2);
3010 case kArm64I16x8DotI8x16S: {
3011 UseScratchRegisterScope scope(
masm());
3012 VRegister lhs =
i.InputSimd128Register(0);
3013 VRegister rhs =
i.InputSimd128Register(1);
3014 VRegister tmp1 = scope.AcquireV(
kFormat8H);
3015 VRegister tmp2 = scope.AcquireV(
kFormat8H);
3016 __ Smull(tmp1, lhs.V8B(), rhs.V8B());
3017 __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
3018 __ Addp(
i.OutputSimd128Register().V8H(), tmp1, tmp2);
3021 case kArm64I32x4DotI8x16AddS: {
3023 CpuFeatureScope scope(
masm(), DOTPROD);
3025 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(2));
3026 __ Sdot(
i.InputSimd128Register(2).V4S(),
3027 i.InputSimd128Register(0).V16B(),
3028 i.InputSimd128Register(1).V16B());
3031 UseScratchRegisterScope scope(
masm());
3032 VRegister lhs =
i.InputSimd128Register(0);
3033 VRegister rhs =
i.InputSimd128Register(1);
3034 VRegister tmp1 = scope.AcquireV(
kFormat8H);
3035 VRegister tmp2 = scope.AcquireV(
kFormat8H);
3036 __ Smull(tmp1, lhs.V8B(), rhs.V8B());
3037 __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
3038 __ Addp(tmp1, tmp1, tmp2);
3039 __ Saddlp(tmp1.V4S(), tmp1);
3040 __ Add(
i.OutputSimd128Register().V4S(), tmp1.V4S(),
3041 i.InputSimd128Register(2).V4S());
3045 case kArm64IExtractLaneU: {
3047 __ Umov(
i.OutputRegister32(),
i.InputSimd128Register(0).Format(f),
3051 case kArm64IExtractLaneS: {
3053 __ Smov(
i.OutputRegister32(),
i.InputSimd128Register(0).Format(f),
3057 case kArm64I16x8Shl: {
3061 case kArm64I16x8ShrS: {
3065 case kArm64I16x8SConvertI32x4: {
3066 VRegister dst =
i.OutputSimd128Register(),
3067 src0 =
i.InputSimd128Register(0),
3068 src1 =
i.InputSimd128Register(1);
3069 UseScratchRegisterScope scope(
masm());
3070 VRegister temp = scope.AcquireV(
kFormat4S);
3072 __ Mov(temp, src1.V4S());
3075 __ Sqxtn(dst.V4H(), src0.V4S());
3076 __ Sqxtn2(dst.V8H(), src1.V4S());
3079 SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatS, Sqadd);
3080 SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatS, Sqsub);
3081 SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8
H);
3082 case kArm64I16x8ShrU: {
3086 case kArm64I16x8UConvertI32x4: {
3087 VRegister dst =
i.OutputSimd128Register(),
3088 src0 =
i.InputSimd128Register(0),
3089 src1 =
i.InputSimd128Register(1);
3090 UseScratchRegisterScope scope(
masm());
3091 VRegister temp = scope.AcquireV(
kFormat4S);
3093 __ Mov(temp, src1.V4S());
3096 __ Sqxtun(dst.V4H(), src0.V4S());
3097 __ Sqxtun2(dst.V8H(), src1.V4S());
3100 SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatU, Uqadd);
3101 SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
3102 SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8
H);
3103 case kArm64I16x8BitMask: {
3104 __ I16x8BitMask(
i.OutputRegister32(),
i.InputSimd128Register(0));
3107 case kArm64I8x16Shl: {
3111 case kArm64I8x16ShrS: {
3115 case kArm64I8x16SConvertI16x8: {
3116 VRegister dst =
i.OutputSimd128Register(),
3117 src0 =
i.InputSimd128Register(0),
3118 src1 =
i.InputSimd128Register(1);
3119 UseScratchRegisterScope scope(
masm());
3120 VRegister temp = scope.AcquireV(
kFormat8H);
3122 __ Mov(temp, src1.V8H());
3125 __ Sqxtn(dst.V8B(), src0.V8H());
3126 __ Sqxtn2(dst.V16B(), src1.V8H());
3129 case kArm64I8x16ShrU: {
3133 case kArm64I8x16UConvertI16x8: {
3134 VRegister dst =
i.OutputSimd128Register(),
3135 src0 =
i.InputSimd128Register(0),
3136 src1 =
i.InputSimd128Register(1);
3137 UseScratchRegisterScope scope(
masm());
3138 VRegister temp = scope.AcquireV(
kFormat8H);
3140 __ Mov(temp, src1.V8H());
3143 __ Sqxtun(dst.V8B(), src0.V8H());
3144 __ Sqxtun2(dst.V16B(), src1.V8H());
3147 case kArm64I8x16BitMask: {
3151 temp =
i.TempSimd128Register(0);
3154 __ I8x16BitMask(
i.OutputRegister32(),
i.InputSimd128Register(0), temp);
3157 case kArm64S128Const: {
3158 uint64_t imm1 =
make_uint64(
i.InputUint32(1),
i.InputUint32(0));
3159 uint64_t imm2 =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
3160 __ Movi(
i.OutputSimd128Register().V16B(), imm2, imm1);
3163 SIMD_BINOP_CASE(kArm64S128And, And, 16
B);
3164 SIMD_BINOP_CASE(kArm64S128Or, Orr, 16
B);
3165 SIMD_BINOP_CASE(kArm64S128Xor, Eor, 16
B);
3166 SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16
B);
3167 case kArm64S128Dup: {
3168 VRegister dst =
i.OutputSimd128Register(),
3169 src =
i.InputSimd128Register(0);
3170 int lanes =
i.InputInt32(1);
3171 int index =
i.InputInt32(2);
3174 __ Dup(dst.V4S(), src.V4S(), index);
3177 __ Dup(dst.V8H(), src.V8H(), index);
3180 __ Dup(dst.V16B(), src.V16B(), index);
3187 SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16
B);
3188 case kArm64S128AndNot:
3189 if (
instr->InputAt(1)->IsImmediate()) {
3191 VRegister dst =
i.OutputSimd128Register().Format(f);
3192 DCHECK_EQ(dst,
i.InputSimd128Register(0).Format(f));
3193 __ Bic(dst,
i.InputInt32(1),
i.InputInt8(2));
3195 __ Bic(
i.OutputSimd128Register().V16B(),
3196 i.InputSimd128Register(0).V16B(),
3197 i.InputSimd128Register(1).V16B());
3203 int8_t
mask = laneSize - 1;
3204 VRegister dst =
i.OutputSimd128Register().Format(f);
3205 DCHECK_EQ(dst,
i.InputSimd128Register(0).Format(f));
3206 __ Ssra(dst,
i.InputSimd128Register(1).Format(f),
i.InputInt8(2) &
mask);
3212 int8_t
mask = laneSize - 1;
3213 VRegister dst =
i.OutputSimd128Register().Format(f);
3214 DCHECK_EQ(dst,
i.InputSimd128Register(0).Format(f));
3215 __ Usra(dst,
i.InputSimd128Register(1).Format(f),
i.InputUint8(2) &
mask);
3218 case kArm64S8x2Shuffle: {
3222 case kArm64S16x1Shuffle: {
3226 case kArm64S16x2Shuffle: {
3230 case kArm64S32x1Shuffle: {
3234 case kArm64S32x2Shuffle: {
3238 case kArm64S32x4Shuffle: {
3242 case kArm64S64x1Shuffle: {
3246 case kArm64S64x2Shuffle: {
3250 SIMD_BINOP_CASE(kArm64S64x2UnzipLeft, Uzp1, 2
D);
3251 SIMD_BINOP_CASE(kArm64S64x2UnzipRight, Uzp2, 2
D);
3252 SIMD_BINOP_CASE(kArm64S32x4ZipLeft, Zip1, 4
S);
3253 SIMD_BINOP_CASE(kArm64S32x4ZipRight, Zip2, 4
S);
3254 SIMD_BINOP_CASE(kArm64S32x4UnzipLeft, Uzp1, 4
S);
3255 SIMD_BINOP_CASE(kArm64S32x4UnzipRight, Uzp2, 4
S);
3256 SIMD_BINOP_CASE(kArm64S32x4TransposeLeft, Trn1, 4
S);
3257 SIMD_BINOP_CASE(kArm64S32x4TransposeRight, Trn2, 4
S);
3258 SIMD_BINOP_CASE(kArm64S16x8ZipLeft, Zip1, 8
H);
3259 SIMD_BINOP_CASE(kArm64S16x8ZipRight, Zip2, 8
H);
3260 SIMD_BINOP_CASE(kArm64S16x8UnzipLeft, Uzp1, 8
H);
3261 SIMD_BINOP_CASE(kArm64S16x8UnzipRight, Uzp2, 8
H);
3262 SIMD_BINOP_CASE(kArm64S16x8TransposeLeft, Trn1, 8
H);
3263 SIMD_BINOP_CASE(kArm64S16x8TransposeRight, Trn2, 8
H);
3264 SIMD_BINOP_CASE(kArm64S8x16ZipLeft, Zip1, 16
B);
3265 SIMD_BINOP_CASE(kArm64S8x16ZipRight, Zip2, 16
B);
3266 SIMD_BINOP_CASE(kArm64S8x16UnzipLeft, Uzp1, 16
B);
3267 SIMD_BINOP_CASE(kArm64S8x16UnzipRight, Uzp2, 16
B);
3268 SIMD_BINOP_CASE(kArm64S8x16TransposeLeft, Trn1, 16
B);
3269 SIMD_BINOP_CASE(kArm64S8x16TransposeRight, Trn2, 16
B);
3270 case kArm64S8x16Concat: {
3271 __ Ext(
i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
3272 i.InputSimd128Register(1).V16B(),
i.InputInt4(2));
3275 case kArm64I8x16Swizzle: {
3276 __ Tbl(
i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
3277 i.InputSimd128Register(1).V16B());
3280 case kArm64I8x16Shuffle: {
3282 src0 =
i.InputSimd128Register(0).V16B(),
3283 src1 =
i.InputSimd128Register(1).V16B();
3290 int64_t imm1 =
make_uint64(
i.InputInt32(3),
i.InputInt32(2));
3291 int64_t imm2 =
make_uint64(
i.InputInt32(5),
i.InputInt32(4));
3292 DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
3293 : 0xE0E0E0E0E0E0E0E0));
3295 UseScratchRegisterScope scope(
masm());
3297 __ Movi(temp, imm2, imm1);
3300 __ Tbl(dst, src0, temp.V16B());
3302 __ Tbl(dst, src0, src1, temp.V16B());
3306 case kArm64S32x4Reverse: {
3308 src =
i.InputSimd128Register(0).V16B();
3309 __ Rev64(dst.V4S(), src.V4S());
3310 __ Ext(dst.V16B(), dst.V16B(), dst.V16B(), 8);
3313 SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4
S);
3314 SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8
H);
3315 SIMD_UNOP_CASE(kArm64S16x2Reverse, Rev32, 8
H);
3316 SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16
B);
3317 SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16
B);
3318 SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16
B);
3319 case kArm64LoadSplat: {
3322 __ ld1r(
i.OutputSimd128Register().Format(f),
i.MemoryOperand(0));
3325 case kArm64LoadLane: {
3326 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3329 int laneidx =
i.InputInt8(1);
3330 __ ld1(
i.OutputSimd128Register().Format(f), laneidx,
i.MemoryOperand(2));
3333 case kArm64StoreLane: {
3336 int laneidx =
i.InputInt8(1);
3337 __ st1(
i.InputSimd128Register(0).Format(f), laneidx,
i.MemoryOperand(2));
3340 case kArm64S128Load8x8S: {
3342 __ Ldr(
i.OutputSimd128Register().V8B(),
i.MemoryOperand(0));
3343 __ Sxtl(
i.OutputSimd128Register().V8H(),
i.OutputSimd128Register().V8B());
3346 case kArm64S128Load8x8U: {
3348 __ Ldr(
i.OutputSimd128Register().V8B(),
i.MemoryOperand(0));
3349 __ Uxtl(
i.OutputSimd128Register().V8H(),
i.OutputSimd128Register().V8B());
3352 case kArm64S128Load16x4S: {
3354 __ Ldr(
i.OutputSimd128Register().V4H(),
i.MemoryOperand(0));
3355 __ Sxtl(
i.OutputSimd128Register().V4S(),
i.OutputSimd128Register().V4H());
3358 case kArm64S128Load16x4U: {
3360 __ Ldr(
i.OutputSimd128Register().V4H(),
i.MemoryOperand(0));
3361 __ Uxtl(
i.OutputSimd128Register().V4S(),
i.OutputSimd128Register().V4H());
3364 case kArm64S128Load32x2S: {
3366 __ Ldr(
i.OutputSimd128Register().V2S(),
i.MemoryOperand(0));
3367 __ Sxtl(
i.OutputSimd128Register().V2D(),
i.OutputSimd128Register().V2S());
3370 case kArm64S128Load32x2U: {
3372 __ Ldr(
i.OutputSimd128Register().V2S(),
i.MemoryOperand(0));
3373 __ Uxtl(
i.OutputSimd128Register().V2D(),
i.OutputSimd128Register().V2S());
3376 case kArm64S128LoadPairDeinterleave: {
3379 __ Ld2(
i.OutputSimd128Register(0).Format(f),
3380 i.OutputSimd128Register(1).Format(f),
i.MemoryOperand(0));
3383 case kArm64I64x2AllTrue: {
3384 __ I64x2AllTrue(
i.OutputRegister32(),
i.InputSimd128Register(0));
3387 case kArm64V128AnyTrue: {
3388 UseScratchRegisterScope scope(
masm());
3391 VRegister temp = scope.AcquireV(
kFormat4S);
3392 __ Umaxp(temp,
i.InputSimd128Register(0).V4S(),
3393 i.InputSimd128Register(0).V4S());
3394 __ Fmov(
i.OutputRegister64(), temp.D());
3395 __ Cmp(
i.OutputRegister64(), 0);
3396 __ Cset(
i.OutputRegister32(),
ne);
3399 case kArm64S32x4OneLaneSwizzle: {
3401 src =
i.InputSimd128Register(0).V4S();
3402 int from =
i.InputInt32(1);
3403 int to =
i.InputInt32(2);
3407 __ Mov(dst, to, src, from);
3410#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
3412 UseScratchRegisterScope scope(masm()); \
3413 VRegister temp = scope.AcquireV(format); \
3414 __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
3415 __ Umov(i.OutputRegister32(), temp, 0); \
3416 __ Cmp(i.OutputRegister32(), 0); \
3417 __ Cset(i.OutputRegister32(), ne); \
3420 SIMD_REDUCE_OP_CASE(kArm64I32x4AllTrue, Uminv,
kFormatS, 4
S);
3421 SIMD_REDUCE_OP_CASE(kArm64I16x8AllTrue, Uminv,
kFormatH, 8
H);
3422 SIMD_REDUCE_OP_CASE(kArm64I8x16AllTrue, Uminv,
kFormatB, 16
B);
3428#undef SIMD_UNOP_CASE
3429#undef SIMD_UNOP_LANE_SIZE_CASE
3430#undef SIMD_BINOP_CASE
3431#undef SIMD_BINOP_LANE_SIZE_CASE
3432#undef SIMD_DESTRUCTIVE_BINOP_CASE
3433#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
3434#undef SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE
3435#undef SIMD_REDUCE_OP_CASE
3436#undef ASSEMBLE_SIMD_SHIFT_LEFT
3437#undef ASSEMBLE_SIMD_SHIFT_RIGHT
3441 Arm64OperandConverter
i(
this,
instr);
3442 Label* tlabel = branch->true_label;
3443 Label* flabel = branch->false_label;
3447 if (opcode == kArm64CompareAndBranch32) {
3450 __ Cbz(
i.InputRegister32(0), tlabel);
3453 __ Cbnz(
i.InputRegister32(0), tlabel);
3458 }
else if (opcode == kArm64CompareAndBranch) {
3461 __ Cbz(
i.InputRegister64(0), tlabel);
3464 __ Cbnz(
i.InputRegister64(0), tlabel);
3469 }
else if (opcode == kArm64TestAndBranch32) {
3472 __ Tbz(
i.InputRegister32(0),
i.InputInt5(1), tlabel);
3475 __ Tbnz(
i.InputRegister32(0),
i.InputInt5(1), tlabel);
3480 }
else if (opcode == kArm64TestAndBranch) {
3483 __ Tbz(
i.InputRegister64(0),
i.InputInt6(1), tlabel);
3486 __ Tbnz(
i.InputRegister64(0),
i.InputInt6(1), tlabel);
3495 if (!branch->fallthru)
__ B(flabel);
3499 BranchInfo* branch) {
3508#if V8_ENABLE_WEBASSEMBLY
3509void CodeGenerator::AssembleArchTrap(Instruction*
instr,
3511 auto ool =
zone()->
New<WasmOutOfLineTrap>(
this,
instr);
3512 Label* tlabel = ool->entry();
3521 Arm64OperandConverter
i(
this,
instr);
3587 size_t ccmp_base_index,
3599 for (
unsigned n = 0; n < num_ccmps; ++
n) {
3603 size_t default_condition_index =
3605 size_t compare_condition_index =
3614 i.ToConstant(
instr->
InputAt(default_condition_index)).ToInt64());
3620 i.ToConstant(
instr->
InputAt(compare_condition_index)).ToInt64());
3622 if (code == kArm64Cmp) {
3623 gen->masm()->Ccmp(
i.InputRegister64(compare_lhs_index),
3624 i.InputOperand64(compare_rhs_index), default_flags,
3626 }
else if (code == kArm64Cmp32) {
3627 gen->masm()->Ccmp(
i.InputRegister32(compare_lhs_index),
3628 i.InputOperand32(compare_rhs_index), default_flags,
3630 }
else if (code == kArm64Float64Cmp) {
3631 gen->masm()->Fccmp(
i.InputFloat64OrFPZeroRegister(compare_lhs_index),
3632 i.InputFloat64OrFPZeroRegister(compare_rhs_index),
3637 gen->masm()->Fccmp(
i.InputFloat32OrFPZeroRegister(compare_lhs_index),
3638 i.InputFloat32OrFPZeroRegister(compare_rhs_index),
3651 Arm64OperandConverter
i(
this,
instr);
3658 size_t num_ccmps_index =
3660 size_t set_condition_index =
3662 int64_t num_ccmps =
i.ToConstant(
instr->InputAt(num_ccmps_index)).ToInt64();
3663 size_t ccmp_base_index = set_condition_index -
kNumCcmpOperands * num_ccmps;
3667 i.ToConstant(
instr->InputAt(set_condition_index)).ToInt64());
3672 BranchInfo* branch) {
3674 Arm64OperandConverter
i(
this,
instr);
3680 size_t num_ccmps_index =
3682 int64_t num_ccmps =
i.ToConstant(
instr->InputAt(num_ccmps_index)).ToInt64();
3683 size_t ccmp_base_index =
instr->InputCount() -
3688 __ B(
cc, branch->true_label);
3689 if (!branch->fallthru)
__ B(branch->false_label);
3694 Arm64OperandConverter
i(
this,
instr);
3696 size_t output_index =
instr->OutputCount() - 1;
3703 size_t true_value_index =
instr->InputCount() - 2;
3704 size_t false_value_index =
instr->InputCount() - 1;
3706 __ Fcsel(
i.OutputFloat32Register(output_index),
3707 i.InputFloat32OrFPZeroRegister(true_value_index),
3708 i.InputFloat32OrFPZeroRegister(false_value_index),
cc);
3710 __ Fcsel(
i.OutputFloat64Register(output_index),
3711 i.InputFloat64OrFPZeroRegister(true_value_index),
3712 i.InputFloat64OrFPZeroRegister(false_value_index),
cc);
3714 __ Csel(
i.OutputRegister32(output_index),
3715 i.InputOrZeroRegister32(true_value_index),
3716 i.InputOrZeroRegister32(false_value_index),
cc);
3719 __ Csel(
i.OutputRegister64(output_index),
3720 i.InputOrZeroRegister64(true_value_index),
3721 i.InputOrZeroRegister64(false_value_index),
cc);
3726 Arm64OperandConverter
i(
this,
instr);
3728 std::vector<std::pair<int32_t, Label*>> cases;
3729 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
3730 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
3733 cases.data() + cases.size());
3737 Arm64OperandConverter
i(
this,
instr);
3738 UseScratchRegisterScope scope(
masm());
3740 size_t const case_count =
instr->InputCount() - 2;
3743 for (
size_t index = 0; index < case_count; ++
index) {
3746 Label* fallthrough =
GetLabel(
i.InputRpo(1));
3747 __ Cmp(input, Immediate(case_count));
3748 __ B(fallthrough,
hs);
3763 const size_t jump_table_size = targets.size() *
kInt32Size;
3764 MacroAssembler::BlockPoolsScope no_pool_inbetween(
masm(), jump_table_size);
3767 for (
auto* target : targets) {
3768 __ dc32(target->pos() - table_pos);
3776 CPURegList saves_fp =
3777 CPURegList(
kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
3778 int saved_count = saves_fp.Count();
3779 if (saved_count != 0) {
3781 frame->AllocateSavedCalleeRegisterSlots(saved_count *
3787 saved_count = saves.Count();
3788 if (saved_count != 0) {
3789 frame->AllocateSavedCalleeRegisterSlots(saved_count);
3791 frame->AlignFrame(16);
3796 __ AssertSpAligned();
3800 int required_slots =
3801 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
3806 CPURegList saves_fp =
3807 CPURegList(
kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
3810 const int returns =
frame()->GetReturnSlotCount();
3815 if (call_descriptor->IsJSFunctionCall()) {
3822#if V8_ENABLE_WEBASSEMBLY
3823 }
else if (call_descriptor->IsAnyWasmFunctionCall() ||
3824 call_descriptor->IsWasmCapiFunction() ||
3825 call_descriptor->IsWasmImportWrapper() ||
3826 (call_descriptor->IsCFunctionCall() &&
3827 info()->GetOutputStackFrameType() ==
3828 StackFrame::C_WASM_ENTRY)) {
3829 UseScratchRegisterScope temps(
masm());
3830 Register scratch = temps.AcquireX();
3836 __ Add(fp, sp, kSPToFPDelta);
3837 if (call_descriptor->IsWasmCapiFunction()) {
3843 UseScratchRegisterScope temps(
masm());
3844 Register scratch = temps.AcquireX();
3849 __ Add(fp, sp, kSPToFPDelta);
3863 if (
info()->is_osr()) {
3865 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3871 __ RecordComment(
"-- OSR entrypoint --");
3876#ifdef V8_ENABLE_SANDBOX_BOOL
3877 UseScratchRegisterScope temps(
masm());
3878 uint32_t expected_frame_size =
3882 Register scratch = temps.AcquireX();
3883 __ Add(scratch, sp, expected_frame_size);
3884 __ Cmp(scratch, fp);
3885 __ SbxCheck(
eq, AbortReason::kOsrUnexpectedStackSize);
3888 DCHECK(call_descriptor->IsJSFunctionCall());
3889 DCHECK_EQ(unoptimized_frame_slots % 2, 1);
3896#if V8_ENABLE_WEBASSEMBLY
3907 UseScratchRegisterScope temps(
masm());
3908 Register stack_limit = temps.AcquireX();
3911 __ Cmp(sp, stack_limit);
3915 if (
v8_flags.experimental_wasm_growable_stacks) {
3919 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
3924 __ PushCPURegList(fp_regs_to_save);
3928 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
3931 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
3932 __ PopCPURegList(fp_regs_to_save);
3935 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
3939 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
3948 required_slots -= saves.Count();
3949 required_slots -= saves_fp.Count();
3950 required_slots -= returns;
3952 __ Claim(required_slots);
3958 __ PushCPURegList(saves_fp);
3961 __ PushCPURegList(saves);
3977 const int returns =
RoundUp(
frame()->GetReturnSlotCount(), 2);
3985 __ PopCPURegList(saves);
3988 CPURegList saves_fp =
3989 CPURegList(
kDRegSizeInBits, call_descriptor->CalleeSavedFPRegisters());
3990 __ PopCPURegList(saves_fp);
3994 const int parameter_slots =
3995 static_cast<int>(call_descriptor->ParameterSlotCount());
3996 Arm64OperandConverter g(
this,
nullptr);
4000 if (parameter_slots != 0) {
4001 if (additional_pop_count->IsImmediate()) {
4002 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4004 __ cmp(g.ToRegister(additional_pop_count), Operand(0));
4005 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue);
4009#if V8_ENABLE_WEBASSEMBLY
4010 if (call_descriptor->IsAnyWasmFunctionCall() &&
4011 v8_flags.experimental_wasm_growable_stacks) {
4013 UseScratchRegisterScope temps{
masm()};
4014 Register scratch = temps.AcquireX();
4026 __ PushCPURegList(fp_regs_to_save);
4028 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
4030 __ PopCPURegList(fp_regs_to_save);
4035 UseScratchRegisterScope temps{
masm()};
4049 const bool drop_jsargs = parameter_slots != 0 &&
4051 call_descriptor->IsJSFunctionCall();
4052 if (call_descriptor->IsCFunctionCall()) {
4057 if (additional_pop_count->IsImmediate() &&
4058 g.ToConstant(additional_pop_count).ToInt32() == 0) {
4068 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
4077 Label argc_reg_has_final_count;
4078 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
4079 if (parameter_slots > 1) {
4080 __ Cmp(argc_reg, Operand(parameter_slots));
4081 __ B(&argc_reg_has_final_count,
ge);
4082 __ Mov(argc_reg, Operand(parameter_slots));
4083 __ Bind(&argc_reg_has_final_count);
4085 __ DropArguments(argc_reg);
4086 }
else if (additional_pop_count->IsImmediate()) {
4087 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4088 __ DropArguments(parameter_slots + additional_count);
4089 }
else if (parameter_slots == 0) {
4090 __ DropArguments(g.ToRegister(additional_pop_count));
4094 __ DropArguments(parameter_slots);
4096 __ AssertSpAligned();
4103 ZoneDeque<DeoptimizationExit*>* exits) {
4104 __ ForceConstantPoolEmissionWithoutJump();
4115 for (
auto exit : *exits) {
4116 saw_deopt_kind[
static_cast<int>(exit->kind())] =
true;
4120 UseScratchRegisterScope scope(
masm());
4121 Register scratch = scope.AcquireX();
4124 if (!saw_deopt_kind[
i])
continue;
4135 Arm64OperandConverter g(
this,
nullptr);
4136 int last_frame_slot_id =
4139 int slot_id = last_frame_slot_id + sp_delta + new_slots;
4141 if (source->IsRegister()) {
4144 }
else if (source->IsStackSlot()) {
4145 UseScratchRegisterScope temps(
masm());
4146 Register scratch = temps.AcquireX();
4147 __ Ldr(scratch, g.ToMemOperand(source,
masm()));
4163 Arm64OperandConverter g(
this,
nullptr);
4164 if (dest->IsRegister()) {
4167 }
else if (dest->IsStackSlot()) {
4169 UseScratchRegisterScope temps(
masm());
4170 Register scratch = temps.AcquireX();
4172 __ Str(scratch, g.ToMemOperand(dest,
masm()));
4174 int last_frame_slot_id =
4177 int slot_id = last_frame_slot_id + sp_delta;
4197 DCHECK(!source->IsImmediate());
4206 if (temps.CanAcquire()) {
4209 }
else if (temps.CanAcquireFP()) {
4233 scratch_reg.code());
4234 Arm64OperandConverter g(
this,
nullptr);
4235 if (source->IsStackSlot()) {
4236 __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source,
masm()));
4238 DCHECK(source->IsRegister());
4239 __ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
4263 Arm64OperandConverter g(
this,
nullptr);
4264 if (dest->IsStackSlot()) {
4265 __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest,
masm()));
4267 DCHECK(dest->IsRegister());
4268 __ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
4286 Arm64OperandConverter g(
this,
nullptr);
4289 UseScratchRegisterScope temps(
masm());
4290 if (move->source().IsSimd128StackSlot()) {
4291 VRegister temp = temps.AcquireQ();
4297 int64_t src_offset = src.offset();
4299 int64_t dst_offset = dst.offset();
4303 if ((src.IsImmediateOffset() &&
4304 !
masm()->IsImmLSScaled(src_offset, src_size_log2) &&
4305 !
masm()->IsImmLSUnscaled(src_offset)) ||
4306 (dst.IsImmediateOffset() &&
4307 !
masm()->IsImmLSScaled(dst_offset, dst_size_log2) &&
4308 !
masm()->IsImmLSUnscaled(dst_offset))) {
4317 Arm64OperandConverter g(
this,
nullptr);
4319 auto MoveConstantToRegister = [&](
Register dst, Constant src) {
4321 Handle<HeapObject> src_object = src.ToHeapObject();
4324 __ LoadRoot(dst, index);
4326 __ Mov(dst, src_object);
4329 Handle<HeapObject> src_object = src.ToHeapObject();
4332 __ LoadTaggedRoot(dst, index);
4341 __ Mov(dst, src.ToExternalReference());
4343 Operand src_op = g.ToImmediate(source);
4348 __ Mov(dst, src_op);
4353 if (source->IsRegister()) {
4356 DCHECK(source->IsSimd128Register() || source->IsFloatRegister() ||
4357 source->IsDoubleRegister());
4359 g.ToDoubleRegister(source).Q());
4364 if (source->IsRegister()) {
4365 __ Str(g.ToRegister(source), dst);
4367 VRegister src = g.ToDoubleRegister(source);
4368 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
4371 DCHECK(source->IsSimd128Register());
4372 __ Str(src.Q(), dst);
4387 __ Ldr(dst.Q(), src);
4395 if (source->IsSimd128StackSlot()) {
4396 UseScratchRegisterScope scope(
masm());
4397 VRegister temp = scope.AcquireQ();
4401 UseScratchRegisterScope scope(
masm());
4409 Constant src = g.ToConstant(source);
4411 MoveConstantToRegister(g.ToRegister(
destination), src);
4415 __ Fmov(dst.S(), src.ToFloat32());
4418 __ Fmov(dst, src.ToFloat64().value());
4424 Constant src = g.ToConstant(source);
4427 UseScratchRegisterScope scope(
masm());
4429 MoveConstantToRegister(temp, src);
4435 UseScratchRegisterScope scope(
masm());
4436 VRegister temp = scope.AcquireS();
4437 __ Fmov(temp, src.ToFloat32());
4442 if (src.ToFloat64().AsUint64() == 0) {
4445 UseScratchRegisterScope scope(
masm());
4446 VRegister temp = scope.AcquireD();
4447 __ Fmov(temp, src.ToFloat64().value());
4459 Arm64OperandConverter g(
this,
nullptr);
4462 if (source->IsRegister()) {
4465 VRegister src = g.ToDoubleRegister(source);
4467 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
4470 DCHECK(source->IsSimd128Register());
4471 __ Swap(src.Q(), dst.Q());
4476 UseScratchRegisterScope scope(
masm());
4478 if (source->IsRegister()) {
4480 Register src = g.ToRegister(source);
4485 UseScratchRegisterScope scope(
masm());
4486 VRegister src = g.ToDoubleRegister(source);
4487 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
4488 VRegister temp = scope.AcquireD();
4493 DCHECK(source->IsSimd128Register());
4494 VRegister temp = scope.AcquireQ();
4495 __ Mov(temp, src.Q());
4496 __ Ldr(src.Q(), dst);
4503 UseScratchRegisterScope scope(
masm());
4506 VRegister temp_0 = scope.AcquireD();
4507 VRegister temp_1 = scope.AcquireD();
4508 if (source->IsSimd128StackSlot()) {
4509 __ Ldr(temp_0.Q(), src);
4510 __ Ldr(temp_1.Q(), dst);
4511 __ Str(temp_0.Q(), dst);
4512 __ Str(temp_1.Q(), src);
4514 __ Ldr(temp_0, src);
4515 __ Ldr(temp_1, dst);
4516 __ Str(temp_0, dst);
4517 __ Str(temp_1, src);
#define Assert(condition)
static constexpr T decode(U value)
static constexpr bool IsImmLSScaled(int64_t offset, unsigned size_log2)
static constexpr bool IsImmAddSub(int64_t immediate)
static constexpr bool IsImmLSUnscaled(int64_t offset)
static constexpr bool IsBuiltinId(Builtin builtin)
static CPURegList GetCalleeSavedV(int size=kDRegSizeInBits)
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE Builtin GetDeoptimizationEntry(DeoptimizeKind kind)
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
Bootstrapper * bootstrapper()
RootsTable & roots_table()
Tagged_t ReadOnlyRootPtr(RootIndex index)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
static constexpr int kExtraSlotClaimedByPrologue
void Dup(const VRegister &vd, const VRegister &vn, int index)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
constexpr void set(RegisterT reg)
@ COMPRESSED_EMBEDDED_OBJECT
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFixedFrameSizeFromFp
static constexpr int kArgCOffset
static constexpr int kFixedFrameSize
static constexpr int kFrameTypeOffset
VRegister AcquireV(VectorFormat format)
static constexpr VRegister Create(int code, int size, int lane_count=1)
static constexpr Register GapRegister()
base::Vector< T > AllocateVector(size_t length)
CPURegister InputFloat32OrZeroRegister(size_t index)
Operand InputOperand64(size_t index)
DoubleRegister InputFloat64Register(size_t index)
DoubleRegister OutputFloat32Register(size_t index=0)
Operand ToImmediate(InstructionOperand *operand)
Operand InputOperand2_32(size_t index)
Register InputRegister32(size_t index)
Register InputRegister64(size_t index)
CPURegister InputFloat64OrZeroRegister(size_t index)
MemOperand SlotToMemOperand(int slot, MacroAssembler *masm) const
MemOperand ToMemOperand(InstructionOperand *op, MacroAssembler *masm) const
DoubleRegister InputFloat32Register(size_t index)
Register TempRegister32(size_t index)
Operand ToOperand(InstructionOperand *op)
DoubleRegister InputSimd128Register(size_t index)
Register InputOrZeroRegister64(size_t index)
Arm64OperandConverter(CodeGenerator *gen, Instruction *instr)
DoubleRegister InputFloat64OrFPZeroRegister(size_t index)
Operand InputOperand32(size_t index)
Operand InputOperand2_64(size_t index)
Operand InputOperand(size_t index)
Register InputOrZeroRegister32(size_t index)
DoubleRegister OutputFloat64Register(size_t index=0)
Operand ToOperand32(InstructionOperand *op)
Register OutputRegister32(size_t index=0)
MemOperand MemoryOperand(size_t index=0)
DoubleRegister InputFloat32OrFPZeroRegister(size_t index)
Register OutputRegister64(size_t index=0)
DoubleRegister OutputSimd128Register(size_t index=0)
static Type InferSwap(InstructionOperand *source, InstructionOperand *destination)
static Type InferMove(InstructionOperand *source, InstructionOperand *destination)
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
uint16_t parameter_count_
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount]
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
UnwindingInfoWriter unwinding_info_writer_
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
int GetSPToFPOffset() const
void IncreaseSPDelta(int amount)
static FrameOffset FromStackPointer(int offset)
FrameAccessState * frame_access_state() const
Register OutputRegister(size_t index=0) const
double InputDouble(size_t index)
float InputFloat32(size_t index)
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
DoubleRegister OutputDoubleRegister(size_t index=0)
int64_t InputInt64(size_t index)
uint8_t InputInt6(size_t index)
DoubleRegister InputDoubleRegister(size_t index)
Register InputRegister(size_t index) const
uint8_t InputInt5(size_t index)
Register ToRegister(InstructionOperand *op) const
bool IsFPStackSlot() const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
size_t OutputCount() const
size_t InputCount() const
const InstructionOperand * TempAt(size_t i) const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
void MarkFrameConstructed(int at_pc)
void MarkFrameDeconstructed(int at_pc)
#define ASSEMBLE_SHIFT(asm_instr, width)
#define ASSEMBLE_ATOMIC_AND(suffix, reg)
#define ASSEMBLE_ATOMIC_SUB(suffix, reg)
IndirectPointerTag indirect_pointer_tag_
UnwindingInfoWriter *const unwinding_info_writer_
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_SIMD_SHIFT_LEFT(asm_imm, width, sz, dt)
#define ASSEMBLE_IEEE754_BINOP(name)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order)
RecordWriteMode const mode_
#define ASSEMBLE_SIMD_SHIFT_RIGHT(asm_imm, width, sz, dt)
#define COMPRESS_POINTERS_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
DirectHandle< JSReceiver > options
LiftoffRegList regs_to_save
InstructionOperand destination
V8_INLINE Dest bit_cast(Source const &source)
void Add(RWDigits Z, Digits X, Digits Y)
constexpr size_t kCcmpOffsetOfOpcode
@ kValueIsIndirectPointer
constexpr size_t kNumCcmpOperands
constexpr size_t kConditionalSetEndOffsetOfNumCcmps
constexpr size_t kCcmpOffsetOfDefaultFlags
void AssembleConditionalCompareChain(Instruction *instr, int64_t num_ccmps, size_t ccmp_base_index, CodeGenerator *gen)
constexpr size_t kCcmpOffsetOfRhs
static Condition FlagsConditionToCondition(FlagsCondition condition)
@ kSignedGreaterThanOrEqual
@ kFloatGreaterThanOrEqualOrUnordered
@ kUnsignedLessThanOrEqual
@ kFloatLessThanOrEqualOrUnordered
@ kFloatGreaterThanOrUnordered
@ kFloatGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kFloatLessThanOrUnordered
void Shuffle4Helper(MacroAssembler *masm, Arm64OperandConverter i, VectorFormat f)
StatusFlags ConditionToDefaultFlags(Condition condition)
constexpr size_t kConditionalBranchEndOffsetOfCondition
constexpr size_t kCcmpOffsetOfCompareCondition
constexpr size_t kConditionalSetEndOffsetOfCondition
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
constexpr size_t kCcmpOffsetOfLhs
int32_t GetLaneMask(int32_t lane_count)
constexpr size_t kConditionalBranchEndOffsetOfNumCcmps
void Shuffle2Helper(MacroAssembler *masm, Arm64OperandConverter i, VectorFormat f)
void Shuffle1Helper(MacroAssembler *masm, Arm64OperandConverter i, VectorFormat f)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
V8_EXPORT_PRIVATE bool AreConsecutive(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg)
constexpr Register kRootRegister
VectorFormat ScalarFormatFromLaneSize(int lanesize)
RegListBase< DoubleRegister > DoubleRegList
std::make_unsigned< T >::type Abs(T a)
constexpr DeoptimizeKind kFirstDeoptimizeKind
V8_EXPORT_PRIVATE int LaneCountFromFormat(VectorFormat vform)
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
@ kIndirectPointerNullTag
RegListBase< Register > RegList
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
unsigned CalcLSDataSizeLog2(LoadStoreOp op)
VectorFormat VectorFormatHalfWidth(VectorFormat vform)
const Instr kImmExceptionIsSwitchStackLimit
constexpr Register kReturnRegister0
constexpr Register kWasmImplicitArgRegister
constexpr int kDeoptimizeKindCount
constexpr VRegister NoVReg
constexpr Register kSimulatorHltArgument
V8_EXPORT_PRIVATE FlagValues v8_flags
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and X(inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kXRegSizeInBits
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform)
constexpr Register kJavaScriptCallDispatchHandleRegister
const uint32_t kClearedWeakHeapObjectLower32
static int FrameSlotToFPOffset(int slot)
VectorFormat VectorFormatFillQ(int laneSize)
constexpr Register padreg
constexpr int kDRegSizeInBits
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps
DoubleRegList scratch_fp_regs
#define V8_STATIC_ROOTS_BOOL