18#if V8_ENABLE_WEBASSEMBLY
29#define TRACE(...) PrintF(__VA_ARGS__)
54 switch (constant.type()) {
87 switch (constant.type()) {
89 return Operand(constant.ToInt32());
91 return Operand(constant.ToInt64());
106 return Operand(constant.ToHeapObject());
128 const size_t index = *first_index;
163class OutOfLineRecordWrite final :
public OutOfLineCode {
165 OutOfLineRecordWrite(
169 : OutOfLineCode(
gen),
174#if V8_ENABLE_WEBASSEMBLY
175 stub_mode_(stub_mode),
182 void Generate() final {
183#ifdef V8_TARGET_ARCH_RISCV64
188 __ DecompressTagged(value_, value_);
194 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
202 __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
205 __ CallIndirectPointerBarrier(object_, offset_, save_fp_mode,
207#if V8_ENABLE_WEBASSEMBLY
208 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
212 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode,
213 StubCallMode::kCallWasmRuntimeStub);
216 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
228#if V8_ENABLE_WEBASSEMBLY
278#if V8_TARGET_ARCH_RISCV64
292FPUCondition FlagsConditionToConditionCmpFPU(
bool* predicate,
344#if V8_ENABLE_WEBASSEMBLY
345class WasmOutOfLineTrap :
public OutOfLineCode {
347 WasmOutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
349 void Generate()
override {
350 RiscvOperandConverter
i(
gen_, instr_);
352 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
353 GenerateCallToTrap(trap_id);
359 void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
362 void GenerateCallToTrap(TrapId trap_id) {
363 gen_->AssembleSourcePosition(instr_);
367 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
368 ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(
gen_->zone());
369 gen_->RecordSafepoint(reference_map);
370 __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
376void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
382 codegen->RecordProtectedInstruction(
pc);
386void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
394#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
396 __ asm_instr(i.OutputRegister(), i.MemoryOperand(), trapper); \
400#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
403 __ asm_instr(i.InputOrZeroRegister(0), i.MemoryOperand(1), trapper); \
407#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
410 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
413 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0), \
415 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
416 Operand(i.InputRegister(2))); \
417 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
418 __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
422#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
424 FrameScope scope(masm(), StackFrame::MANUAL); \
425 __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
426 __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
427 __ PrepareCallCFunction(3, 0, kScratchReg); \
428 __ CallCFunction(ExternalReference::external(), 3, 0); \
429 __ PopCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
432#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
434 FrameScope scope(masm(), StackFrame::MANUAL); \
435 __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
436 __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
437 __ PrepareCallCFunction(3, 0, kScratchReg); \
438 __ CallCFunction(ExternalReference::external(), 3, 0); \
439 __ PopCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
442#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
443 size, bin_instr, representation) \
446 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
447 if (representation == 32) { \
448 __ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
450 DCHECK_EQ(representation, 64); \
451 __ And(i.TempRegister(3), i.TempRegister(0), 0x7); \
453 __ SubWord(i.TempRegister(0), i.TempRegister(0), \
454 Operand(i.TempRegister(3))); \
455 __ Sll32(i.TempRegister(3), i.TempRegister(3), 3); \
458 __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0), \
460 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
461 size, sign_extend); \
462 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
463 Operand(i.InputRegister(2))); \
464 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
466 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
467 __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
471#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
475 __ bind(&exchange); \
476 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
477 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0), \
479 __ Move(i.TempRegister(1), i.InputRegister(2)); \
480 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
481 __ BranchShort(&exchange, ne, i.TempRegister(1), Operand(zero_reg)); \
485#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
486 load_linked, store_conditional, sign_extend, size, representation) \
489 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
490 if (representation == 32) { \
491 __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
493 DCHECK_EQ(representation, 64); \
494 __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
496 __ SubWord(i.TempRegister(0), i.TempRegister(0), \
497 Operand(i.TempRegister(1))); \
498 __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
500 __ bind(&exchange); \
501 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0), \
503 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
504 size, sign_extend); \
505 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
507 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
508 __ BranchShort(&exchange, ne, i.TempRegister(2), Operand(zero_reg)); \
512#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
515 Label compareExchange; \
517 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
519 __ bind(&compareExchange); \
520 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0), \
522 __ BranchShort(&exit, ne, i.InputRegister(2), \
523 Operand(i.OutputRegister(0))); \
524 __ Move(i.TempRegister(2), i.InputRegister(3)); \
525 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
526 __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
527 Operand(zero_reg)); \
532#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
533 load_linked, store_conditional, sign_extend, size, representation) \
535 Label compareExchange; \
537 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
538 if (representation == 32) { \
539 __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
541 DCHECK_EQ(representation, 64); \
542 __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
544 __ SubWord(i.TempRegister(0), i.TempRegister(0), \
545 Operand(i.TempRegister(1))); \
546 __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
548 __ bind(&compareExchange); \
549 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0), \
551 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
552 size, sign_extend); \
553 __ ExtractBits(i.InputRegister(2), i.InputRegister(2), 0, size, \
555 __ BranchShort(&exit, ne, i.InputRegister(2), \
556 Operand(i.OutputRegister(0))); \
557 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
559 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
560 __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
561 Operand(zero_reg)); \
566#define ASSEMBLE_IEEE754_BINOP(name) \
568 FrameScope scope(masm(), StackFrame::MANUAL); \
569 __ PrepareCallCFunction(0, 2, kScratchReg); \
570 __ MovToFloatParameters(i.InputDoubleRegister(0), \
571 i.InputDoubleRegister(1)); \
572 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
574 __ MovFromFloatResult(i.OutputDoubleRegister()); \
577#define ASSEMBLE_IEEE754_UNOP(name) \
579 FrameScope scope(masm(), StackFrame::MANUAL); \
580 __ PrepareCallCFunction(0, 1, kScratchReg); \
581 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
582 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
584 __ MovFromFloatResult(i.OutputDoubleRegister()); \
587#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
589 __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
590 i.InputSimd128Register(1)); \
593#define ASSEMBLE_RVV_BINOP_INTEGER(instr, OP) \
594 case kRiscvI8x16##instr: { \
595 __ VU.set(kScratchReg, E8, m1); \
596 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
597 i.InputSimd128Register(1)); \
600 case kRiscvI16x8##instr: { \
601 __ VU.set(kScratchReg, E16, m1); \
602 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
603 i.InputSimd128Register(1)); \
606 case kRiscvI32x4##instr: { \
607 __ VU.set(kScratchReg, E32, m1); \
608 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
609 i.InputSimd128Register(1)); \
613#define ASSEMBLE_RVV_UNOP_INTEGER_VR(instr, OP) \
614 case kRiscvI8x16##instr: { \
615 __ VU.set(kScratchReg, E8, m1); \
616 __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
619 case kRiscvI16x8##instr: { \
620 __ VU.set(kScratchReg, E16, m1); \
621 __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
624 case kRiscvI32x4##instr: { \
625 __ VU.set(kScratchReg, E32, m1); \
626 __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
630#define ASSEMBLE_RVV_UNOP_INTEGER_VV(instr, OP) \
631 case kRiscvI8x16##instr: { \
632 __ VU.set(kScratchReg, E8, m1); \
633 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
636 case kRiscvI16x8##instr: { \
637 __ VU.set(kScratchReg, E16, m1); \
638 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
641 case kRiscvI32x4##instr: { \
642 __ VU.set(kScratchReg, E32, m1); \
643 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
646 case kRiscvI64x2##instr: { \
647 __ VU.set(kScratchReg, E64, m1); \
648 __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
672void AdjustStackPointerForTailCall(MacroAssembler* masm,
673 FrameAccessState* state,
674 int new_slot_above_sp,
675 bool allow_shrinkage =
true) {
676 int current_sp_offset = state->GetSPToFPSlotCount() +
678 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
679 if (stack_slot_delta > 0) {
681 state->IncreaseSPDelta(stack_slot_delta);
682 }
else if (allow_shrinkage && stack_slot_delta < 0) {
684 state->IncreaseSPDelta(stack_slot_delta);
691 int first_unused_slot_offset) {
693 first_unused_slot_offset,
false);
697 int first_unused_slot_offset) {
699 first_unused_slot_offset);
705 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart,
709#ifdef V8_ENABLE_LEAPTIERING
711void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
712#ifdef V8_TARGET_ARCH_RISCV32
715 DCHECK(
linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
731 UseScratchRegisterScope temps(
masm());
732 Register actual_parameter_count = temps.Acquire();
734 UseScratchRegisterScope temps(
masm());
736 __ LoadParameterCountFromJSDispatchTable(
739 __ Assert(
eq, AbortReason::kWrongFunctionDispatchHandle,
756 Instruction*
instr) {
757 RiscvOperandConverter
i(
this,
instr);
763 switch (arch_opcode) {
764 case kArchCallCodeObject: {
765 if (
instr->InputAt(0)->IsImmediate()) {
770 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
774 __ CallCodeObject(
reg, tag);
780 case kArchCallBuiltinPointer: {
782 Register builtin_index =
i.InputRegister(0);
787 __ CallBuiltinByIndex(builtin_index, target);
792 case kArchCallWasmFunction:
793 case kArchCallWasmFunctionIndirect: {
794 if (
instr->InputAt(0)->IsImmediate()) {
795 DCHECK_EQ(arch_opcode, kArchCallWasmFunction);
796 Constant constant =
i.ToConstant(
instr->InputAt(0));
798 __ Call(wasm_code, constant.rmode());
799 }
else if (arch_opcode == kArchCallWasmFunctionIndirect) {
800 __ CallWasmCodePointer(
802 i.InputInt64(
instr->WasmSignatureHashInputIndex()));
804 __ Call(
i.InputRegister(0));
810 case kArchTailCallCodeObject: {
811 if (
instr->InputAt(0)->IsImmediate()) {
816 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
820 __ JumpCodeObject(
reg, tag);
826 case kArchTailCallWasm:
827 case kArchTailCallWasmIndirect: {
828 if (
instr->InputAt(0)->IsImmediate()) {
829 DCHECK_EQ(arch_opcode, kArchTailCallWasm);
830 Constant constant =
i.ToConstant(
instr->InputAt(0));
832 __ Jump(wasm_code, constant.rmode());
835 if (arch_opcode == kArchTailCallWasmIndirect) {
836 __ CallWasmCodePointer(
838 i.InputInt64(
instr->WasmSignatureHashInputIndex()),
848 case kArchTailCallAddress: {
859 case kArchCallJSFunction: {
860 Register func =
i.InputOrZeroRegister(0);
868 uint32_t num_arguments =
869 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
870 __ CallJSFunction(func, num_arguments);
875 case kArchPrepareCallCFunction: {
876#ifdef V8_TARGET_ARCH_RISCV64
879 __ PrepareCallCFunction(num_gp_parameters, num_fp_parameters,
889 case kArchSaveCallerRegisters: {
903 case kArchRestoreCallerRegisters: {
916 case kArchPrepareTailCall:
919 case kArchCallCFunctionWithFrameState:
920 case kArchCallCFunction: {
923 Label return_location;
925#if V8_ENABLE_WEBASSEMBLY
926 bool isWasmCapiFunction =
928 if (isWasmCapiFunction) {
933 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
938 if (
instr->InputAt(0)->IsImmediate()) {
939 ExternalReference ref =
i.InputExternalReference(0);
940 pc_offset =
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
941 set_isolate_data_slots, &return_location);
944 pc_offset =
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
945 set_isolate_data_slots, &return_location);
949 bool const needs_frame_state =
950 (arch_opcode == kArchCallCFunctionWithFrameState);
951 if (needs_frame_state) {
977 case kArchBinarySearchSwitch:
980 case kArchTableSwitch:
983 case kArchAbortCSADcheck:
984 DCHECK(
i.InputRegister(0) == a0);
989 __ CallBuiltin(Builtin::kAbortCSADcheck);
993 case kArchDebugBreak:
997 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)),
1001 case kArchThrowTerminator:
1004 case kArchDeoptimize: {
1005 DeoptimizationExit* exit =
1007 __ Branch(exit->label());
1013#if V8_ENABLE_WEBASSEMBLY
1014 case kArchStackPointer:
1017 __ Move(
i.OutputRegister(), sp);
1019 case kArchSetStackPointer: {
1022 __ RecordComment(
"-- Set simulator stack limit --");
1027 __ Move(sp,
i.InputRegister(0));
1031 case kArchStackPointerGreaterThan:
1034 case kArchStackCheckOffset:
1037 case kArchFramePointer:
1038 __ Move(
i.OutputRegister(), fp);
1040 case kArchParentFramePointer:
1044 __ Move(
i.OutputRegister(), fp);
1047 case kArchTruncateDoubleToI:
1051 case kArchStoreWithWriteBarrier: {
1059 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1063 __ JumpIfSmi(value, ool->exit());
1067 __ bind(ool->exit());
1070 case kArchAtomicStoreWithWriteBarrier: {
1071#ifdef V8_TARGET_ARCH_RISCV64
1079 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1088 __ JumpIfSmi(value, ool->exit());
1092 __ bind(ool->exit());
1098 case kArchStoreIndirectWithWriteBarrier: {
1099#ifdef V8_TARGET_ARCH_RISCV64
1108 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1114 __ bind(ool->exit());
1120 case kArchStackSlot: {
1124 __ AddWord(
i.OutputRegister(), base_reg, Operand(
offset.offset()));
1127 case kIeee754Float64Acos:
1130 case kIeee754Float64Acosh:
1133 case kIeee754Float64Asin:
1136 case kIeee754Float64Asinh:
1139 case kIeee754Float64Atan:
1142 case kIeee754Float64Atanh:
1145 case kIeee754Float64Atan2:
1148 case kIeee754Float64Cos:
1151 case kIeee754Float64Cosh:
1154 case kIeee754Float64Cbrt:
1157 case kIeee754Float64Exp:
1160 case kIeee754Float64Expm1:
1163 case kIeee754Float64Log:
1166 case kIeee754Float64Log1p:
1169 case kIeee754Float64Log2:
1172 case kIeee754Float64Log10:
1175 case kIeee754Float64Pow:
1178 case kIeee754Float64Sin:
1181 case kIeee754Float64Sinh:
1184 case kIeee754Float64Tan:
1187 case kIeee754Float64Tanh:
1191 __ Add32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1194 __ Sub32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1197 __ Mul32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1199 case kRiscvMulOvf32:
1200 __ MulOverflow32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1203#if V8_TARGET_ARCH_RISCV64
1205 __ AddWord(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1208 case kRiscvAddOvf64:
1209 __ AddOverflow64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1213 __ Sub64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1215 case kRiscvSubOvf64:
1216 __ SubOverflow64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1219 case kRiscvMulHigh32:
1220 __ Mulh32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1223 case kRiscvMulHighU32:
1224 __ Mulhu32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1227 case kRiscvMulHigh64:
1228 __ Mulh64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1231 case kRiscvMulHighU64:
1232 __ Mulhu64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1235 case kRiscvMulOvf64:
1236 __ MulOverflow64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1240 DCHECK_NE(
i.OutputRegister(),
i.InputRegister(1));
1241 __ Div32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1243 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1246 case kRiscvDivU32: {
1247 DCHECK_NE(
i.OutputRegister(),
i.InputRegister(1));
1248 __ Divu32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1251 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1255 __ Mod32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1258 __ Modu32(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1262 __ Mul64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1265 DCHECK_NE(
i.OutputRegister(),
i.InputRegister(1));
1266 __ Div64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1268 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1271 case kRiscvDivU64: {
1272 DCHECK_NE(
i.OutputRegister(),
i.InputRegister(1));
1273 __ Divu64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1276 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1280 __ Mod64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1283 __ Modu64(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1286#elif V8_TARGET_ARCH_RISCV32
1288 __ AddOverflow(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1292 __ SubOverflow(
i.OutputRegister(),
i.InputOrZeroRegister(0),
1295 case kRiscvMulHigh32:
1296 __ Mulh(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1298 case kRiscvMulHighU32:
1299 __ Mulhu(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1),
1303 __ Div(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1305 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1308 case kRiscvDivU32: {
1309 __ Divu(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1311 __ LoadZeroIfConditionZero(
i.OutputRegister(),
i.InputRegister(1));
1315 __ Mod(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1318 __ Modu(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1322 __ And(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1325 __ And(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1326 __ Sll32(
i.OutputRegister(),
i.OutputRegister(), 0x0);
1329 __ Or(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1332 __ Or(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1333 __ Sll32(
i.OutputRegister(),
i.OutputRegister(), 0x0);
1336 __ Xor(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1339 __ Xor(
i.OutputRegister(),
i.InputOrZeroRegister(0),
i.InputOperand(1));
1340 __ Sll32(
i.OutputRegister(),
i.OutputRegister(), 0x0);
1343 __ Clz32(
i.OutputRegister(),
i.InputOrZeroRegister(0));
1345#if V8_TARGET_ARCH_RISCV64
1347 __ Clz64(
i.OutputRegister(),
i.InputOrZeroRegister(0));
1351 if (
instr->InputAt(1)->IsRegister()) {
1352 __ Sll32(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1354 int64_t imm =
i.InputOperand(1).immediate();
1355 __ Sll32(
i.OutputRegister(),
i.InputRegister(0),
1360 if (
instr->InputAt(1)->IsRegister()) {
1361 __ Srl32(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1363 int64_t imm =
i.InputOperand(1).immediate();
1364 __ Srl32(
i.OutputRegister(),
i.InputRegister(0),
1369 if (
instr->InputAt(1)->IsRegister()) {
1370 __ Sra32(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1372 int64_t imm =
i.InputOperand(1).immediate();
1373 __ Sra32(
i.OutputRegister(),
i.InputRegister(0),
1377#if V8_TARGET_ARCH_RISCV64
1378 case kRiscvZeroExtendWord: {
1379 __ ZeroExtendWord(
i.OutputRegister(),
i.InputRegister(0));
1382 case kRiscvSignExtendWord: {
1383 __ SignExtendWord(
i.OutputRegister(),
i.InputRegister(0));
1387 __ Sll64(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1390 __ Srl64(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1393 __ Sra64(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1396 __ Dror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1404 __ rev8(
i.OutputRegister(),
i.InputRegister(0));
1407 __ andn(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1410 __ orn(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1413 __ xnor(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1416 __ clz(
i.OutputRegister(),
i.InputRegister(0));
1419 __ ctz(
i.OutputRegister(),
i.InputRegister(0));
1422 __ cpop(
i.OutputRegister(),
i.InputRegister(0));
1424#if V8_TARGET_ARCH_RISCV64
1426 __ clzw(
i.OutputRegister(),
i.InputRegister(0));
1429 __ ctzw(
i.OutputRegister(),
i.InputRegister(0));
1432 __ cpopw(
i.OutputRegister(),
i.InputRegister(0));
1436 __ max(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1439 __ maxu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1442 __ min(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1445 __ minu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1448 __ sextb(
i.OutputRegister(),
i.InputRegister(0));
1451 __ sexth(
i.OutputRegister(),
i.InputRegister(0));
1454 __ zexth(
i.OutputRegister(),
i.InputRegister(0));
1462 __ Ror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1465#ifdef V8_TARGET_ARCH_RISCV64
1467 case kRiscvCmpZero32:
1478 __ Move(
i.OutputRegister(),
i.InputRegister(0));
1480 __ li(
i.OutputRegister(),
i.InputOperand(0));
1485 FPURegister left =
i.InputOrZeroSingleRegister(0);
1486 FPURegister right =
i.InputOrZeroSingleRegister(1);
1489 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1492 !
__ IsSingleZeroRegSet()) {
1500 __ fadd_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1501 i.InputDoubleRegister(1));
1504 __ fsub_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1505 i.InputDoubleRegister(1));
1509 __ fmul_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1510 i.InputDoubleRegister(1));
1513 __ fdiv_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1514 i.InputDoubleRegister(1));
1521 __ MovToFloatParameters(
i.InputDoubleRegister(0),
1522 i.InputDoubleRegister(1));
1524 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1526 __ MovFromFloatResult(
i.OutputSingleRegister());
1530 __ fabs_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1533 __ Neg_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1536 __ fsqrt_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1540 __ fmax_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1541 i.InputDoubleRegister(1));
1544 __ fmin_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1545 i.InputDoubleRegister(1));
1548 FPURegister left =
i.InputOrZeroDoubleRegister(0);
1549 FPURegister right =
i.InputOrZeroDoubleRegister(1);
1552 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1554 !
__ IsDoubleZeroRegSet()) {
1560#if V8_TARGET_ARCH_RISCV32
1562 __ AddPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1563 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3),
1567 __ SubPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1568 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3),
1572 __ AndPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1573 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3));
1576 __ OrPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1577 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3));
1580 __ XorPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1581 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3));
1584 __ MulPair(
i.OutputRegister(0),
i.OutputRegister(1),
i.InputRegister(0),
1585 i.InputRegister(1),
i.InputRegister(2),
i.InputRegister(3),
1588 case kRiscvShlPair: {
1590 instr->OutputCount() >= 2 ?
i.OutputRegister(1) :
i.TempRegister(0);
1591 if (
instr->InputAt(2)->IsRegister()) {
1592 __ ShlPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1596 uint32_t imm =
i.InputOperand(2).immediate();
1597 __ ShlPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1601 case kRiscvShrPair: {
1603 instr->OutputCount() >= 2 ?
i.OutputRegister(1) :
i.TempRegister(0);
1604 if (
instr->InputAt(2)->IsRegister()) {
1605 __ ShrPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1609 uint32_t imm =
i.InputOperand(2).immediate();
1610 __ ShrPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1614 case kRiscvSarPair: {
1616 instr->OutputCount() >= 2 ?
i.OutputRegister(1) :
i.TempRegister(0);
1617 if (
instr->InputAt(2)->IsRegister()) {
1618 __ SarPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1622 uint32_t imm =
i.InputOperand(2).immediate();
1623 __ SarPair(
i.OutputRegister(0), second_output,
i.InputRegister(0),
1630 __ fadd_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1631 i.InputDoubleRegister(1));
1634 __ fsub_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1635 i.InputDoubleRegister(1));
1639 __ fmul_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1640 i.InputDoubleRegister(1));
1643 __ fdiv_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1644 i.InputDoubleRegister(1));
1651 __ MovToFloatParameters(
i.InputDoubleRegister(0),
1652 i.InputDoubleRegister(1));
1653 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1655 __ MovFromFloatResult(
i.OutputDoubleRegister());
1659 __ fabs_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1662 __ Neg_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1665 __ fsqrt_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1669 __ fmax_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1670 i.InputDoubleRegister(1));
1673 __ fmin_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1674 i.InputDoubleRegister(1));
1676#if V8_TARGET_ARCH_RISCV64
1677 case kRiscvFloat64RoundDown: {
1678 __ Floor_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1682 case kRiscvFloat64RoundTruncate: {
1683 __ Trunc_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1687 case kRiscvFloat64RoundUp: {
1688 __ Ceil_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1692 case kRiscvFloat64RoundTiesEven: {
1693 __ Round_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1698 case kRiscvFloat32RoundDown: {
1699 __ Floor_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1703 case kRiscvFloat32RoundTruncate: {
1704 __ Trunc_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1708 case kRiscvFloat32RoundUp: {
1709 __ Ceil_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1713 case kRiscvFloat32RoundTiesEven: {
1714 __ Round_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1718 case kRiscvFloat32Max: {
1719 __ Float32Max(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1720 i.InputSingleRegister(1));
1723 case kRiscvFloat64Max: {
1724 __ Float64Max(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1725 i.InputSingleRegister(1));
1728 case kRiscvFloat32Min: {
1729 __ Float32Min(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1730 i.InputSingleRegister(1));
1733 case kRiscvFloat64Min: {
1734 __ Float64Min(
i.OutputSingleRegister(),
i.InputSingleRegister(0),
1735 i.InputSingleRegister(1));
1738 case kRiscvFloat64SilenceNaN:
1739 __ FPUCanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1743 __ feq_d(
kScratchReg,
i.InputDoubleRegister(0),
i.InputDoubleRegister(0));
1744#if V8_TARGET_ARCH_RISCV64
1746#elif V8_TARGET_ARCH_RISCV32
1747 __ StoreDouble(
i.InputDoubleRegister(0),
1750 __ fcvt_s_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1752#if V8_TARGET_ARCH_RISCV64
1756#elif V8_TARGET_ARCH_RISCV32
1763 __ fsgnj_s(
i.OutputDoubleRegister(),
i.OutputDoubleRegister(),
1770 __ feq_s(
kScratchReg,
i.InputDoubleRegister(0),
i.InputDoubleRegister(0));
1771#if V8_TARGET_ARCH_RISCV64
1773#elif V8_TARGET_ARCH_RISCV32
1776 __ fcvt_d_s(
i.OutputDoubleRegister(),
i.InputSingleRegister(0));
1778#if V8_TARGET_ARCH_RISCV64
1782#elif V8_TARGET_ARCH_RISCV32
1786 __ fsgnj_d(
i.OutputDoubleRegister(),
i.OutputDoubleRegister(),
1792 __ fcvt_d_w(
i.OutputDoubleRegister(),
i.InputRegister(0));
1796 __ fcvt_s_w(
i.OutputDoubleRegister(),
i.InputRegister(0));
1799 case kRiscvCvtSUw: {
1800 __ Cvt_s_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1803#if V8_TARGET_ARCH_RISCV64
1805 __ fcvt_s_l(
i.OutputDoubleRegister(),
i.InputRegister(0));
1809 __ fcvt_d_l(
i.OutputDoubleRegister(),
i.InputRegister(0));
1812 case kRiscvCvtDUl: {
1813 __ Cvt_d_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1816 case kRiscvCvtSUl: {
1817 __ Cvt_s_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1821 case kRiscvCvtDUw: {
1822 __ Cvt_d_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1825 case kRiscvFloorWD: {
1827 __ Floor_w_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1830 case kRiscvCeilWD: {
1832 __ Ceil_w_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1835 case kRiscvRoundWD: {
1837 __ Round_w_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1840 case kRiscvTruncWD: {
1842 __ Trunc_w_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1845 case kRiscvFloorWS: {
1847 __ Floor_w_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1850 case kRiscvCeilWS: {
1852 __ Ceil_w_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1855 case kRiscvRoundWS: {
1857 __ Round_w_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1860 case kRiscvTruncWS: {
1864 __ Trunc_w_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1881 if (set_overflow_to_min_i32) {
1889#if V8_TARGET_ARCH_RISCV64
1890 case kRiscvTruncLS: {
1892 __ Trunc_l_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1895 case kRiscvTruncLD: {
1899 __ Trunc_l_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1900 if (set_overflow_to_min_i64) {
1909 case kRiscvTruncUwD: {
1911 __ Trunc_uw_d(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1914 case kRiscvTruncUwS: {
1917 __ Trunc_uw_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1930 if (set_overflow_to_min_u32) {
1937#if V8_TARGET_ARCH_RISCV64
1938 case kRiscvTruncUlS: {
1940 __ Trunc_ul_s(
i.OutputRegister(),
i.InputDoubleRegister(0),
result);
1943 case kRiscvTruncUlD: {
1945 __ Trunc_ul_d(
i.OutputRegister(0),
i.InputDoubleRegister(0),
result);
1948 case kRiscvBitcastDL:
1949 __ fmv_x_d(
i.OutputRegister(),
i.InputDoubleRegister(0));
1951 case kRiscvBitcastLD:
1952 __ fmv_d_x(
i.OutputDoubleRegister(),
i.InputRegister(0));
1955 case kRiscvBitcastInt32ToFloat32:
1956 __ fmv_w_x(
i.OutputDoubleRegister(),
i.InputRegister(0));
1958 case kRiscvBitcastFloat32ToInt32:
1959 __ fmv_x_w(
i.OutputRegister(),
i.InputDoubleRegister(0));
1961 case kRiscvFloat64ExtractLowWord32:
1962 __ ExtractLowWordFromF64(
i.OutputRegister(),
i.InputDoubleRegister(0));
1964 case kRiscvFloat64ExtractHighWord32:
1965 __ ExtractHighWordFromF64(
i.OutputRegister(),
i.InputDoubleRegister(0));
1967 case kRiscvFloat64InsertLowWord32:
1968 __ InsertLowWordF64(
i.OutputDoubleRegister(),
i.InputRegister(1));
1970 case kRiscvFloat64InsertHighWord32:
1971 __ InsertHighWordF64(
i.OutputDoubleRegister(),
i.InputRegister(1));
1975 case kRiscvSignExtendByte:
1976 __ SignExtendByte(
i.OutputRegister(),
i.InputRegister(0));
1978 case kRiscvSignExtendShort:
1979 __ SignExtendShort(
i.OutputRegister(),
i.InputRegister(0));
1982 __ Lbu(
i.OutputRegister(),
i.MemoryOperand(), trapper);
1985 __ Lb(
i.OutputRegister(),
i.MemoryOperand(), trapper);
1988 __ Sb(
i.InputOrZeroRegister(0),
i.MemoryOperand(1), trapper);
1991 __ Lhu(
i.OutputRegister(),
i.MemoryOperand(), trapper);
1994 __ Ulhu(
i.OutputRegister(),
i.MemoryOperand());
1997 __ Lh(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2000 __ Ulh(
i.OutputRegister(),
i.MemoryOperand());
2003 __ Sh(
i.InputOrZeroRegister(0),
i.MemoryOperand(1), trapper);
2006 __ Ush(
i.InputOrZeroRegister(2),
i.MemoryOperand());
2009 __ Lw(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2012 __ Ulw(
i.OutputRegister(),
i.MemoryOperand());
2014#if V8_TARGET_ARCH_RISCV64
2016 __ Lwu(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2019 __ Ulwu(
i.OutputRegister(),
i.MemoryOperand());
2022 __ Ld(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2025 __ Uld(
i.OutputRegister(),
i.MemoryOperand());
2028 __ Sd(
i.InputOrZeroRegister(0),
i.MemoryOperand(1), trapper);
2031 __ Usd(
i.InputOrZeroRegister(2),
i.MemoryOperand());
2035 __ Sw(
i.InputOrZeroRegister(0),
i.MemoryOperand(1), trapper);
2038 __ Usw(
i.InputOrZeroRegister(2),
i.MemoryOperand());
2040 case kRiscvLoadFloat: {
2041 __ LoadFloat(
i.OutputSingleRegister(),
i.MemoryOperand(), trapper);
2044 case kRiscvULoadFloat: {
2045 __ ULoadFloat(
i.OutputSingleRegister(),
i.MemoryOperand());
2048 case kRiscvStoreFloat: {
2050 FPURegister ft =
i.InputOrZeroSingleRegister(0);
2054 __ StoreFloat(ft, operand, trapper);
2057 case kRiscvUStoreFloat: {
2060 FPURegister ft =
i.InputOrZeroSingleRegister(index);
2064 __ UStoreFloat(ft, operand);
2067 case kRiscvLoadDouble:
2068 __ LoadDouble(
i.OutputDoubleRegister(),
i.MemoryOperand(), trapper);
2070 case kRiscvULoadDouble: {
2071 __ ULoadDouble(
i.OutputDoubleRegister(),
i.MemoryOperand());
2074 case kRiscvStoreDouble: {
2075 FPURegister ft =
i.InputOrZeroDoubleRegister(0);
2079 __ StoreDouble(ft,
i.MemoryOperand(1), trapper);
2082 case kRiscvUStoreDouble: {
2083 FPURegister ft =
i.InputOrZeroDoubleRegister(2);
2087 __ UStoreDouble(ft,
i.MemoryOperand());
2095 if (
instr->InputAt(0)->IsFPRegister()) {
2100 __ Push(
i.InputOrZeroRegister(0));
2105 int reverse_slot =
i.InputInt32(0);
2108 if (
instr->OutputAt(0)->IsFPRegister()) {
2115 i.OutputSingleRegister(0),
2123 case kRiscvStackClaim: {
2124 __ SubWord(sp, sp, Operand(
i.InputInt32(0)));
2129 case kRiscvStoreToStackSlot: {
2130 if (
instr->InputAt(0)->IsFPRegister()) {
2131 if (
instr->InputAt(0)->IsSimd128Register()) {
2133 if (
i.InputInt32(1) != 0) {
2138 __ vs(
i.InputSimd128Register(0), dst, 0, E8);
2140#if V8_TARGET_ARCH_RISCV64
2141 __ StoreDouble(
i.InputDoubleRegister(0),
2143#elif V8_TARGET_ARCH_RISCV32
2144 if (
instr->InputAt(0)->IsDoubleRegister()) {
2145 __ StoreDouble(
i.InputDoubleRegister(0),
2147 }
else if (
instr->InputAt(0)->IsFloatRegister()) {
2148 __ StoreFloat(
i.InputSingleRegister(0),
2154 __ StoreWord(
i.InputOrZeroRegister(0),
MemOperand(sp,
i.InputInt32(1)));
2158#if V8_TARGET_ARCH_RISCV64
2159 case kRiscvByteSwap64: {
2164 case kRiscvByteSwap32: {
2168 case kAtomicLoadInt8:
2169#if V8_TARGET_ARCH_RISCV64
2174 case kAtomicLoadUint8:
2177 case kAtomicLoadInt16:
2178#if V8_TARGET_ARCH_RISCV64
2183 case kAtomicLoadUint16:
2186 case kAtomicLoadWord32:
2187#if V8_TARGET_ARCH_RISCV64
2195#if V8_TARGET_ARCH_RISCV64
2196 case kRiscvWord64AtomicLoadUint64:
2199 case kRiscvWord64AtomicStoreWord64:
2203 case kAtomicStoreWord8:
2206 case kAtomicStoreWord16:
2209 case kAtomicStoreWord32:
2212#if V8_TARGET_ARCH_RISCV32
2213 case kRiscvWord32AtomicPairLoad: {
2215 __ AddWord(a0,
i.InputRegister(0),
i.InputRegister(1));
2218 __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
2222 case kRiscvWord32AtomicPairStore: {
2224 __ AddWord(a0,
i.InputRegister(0),
i.InputRegister(1));
2227 __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
2231#define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \
2232 case kRiscvWord32AtomicPair##op: \
2233 ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \
2235 ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function)
2236 ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function)
2237#undef ATOMIC64_BINOP_ARITH_CASE
2238#define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \
2239 case kRiscvWord32AtomicPair##op: \
2240 ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \
2242 ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function)
2243 ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
2244 ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
2245 case kRiscvWord32AtomicPairExchange: {
2249 __ AddWord(a0,
i.InputRegister(0),
i.InputRegister(1));
2250 __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
2255 case kRiscvWord32AtomicPairCompareExchange: {
2259 __ add(a0,
i.InputRegister(0),
i.InputRegister(1));
2261 ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
2266 case kAtomicExchangeInt8:
2270 case kAtomicExchangeUint8:
2276#if V8_TARGET_ARCH_RISCV64
2284 case kAtomicExchangeInt16:
2288 case kAtomicExchangeUint16:
2293#if V8_TARGET_ARCH_RISCV64
2302 case kAtomicExchangeWord32:
2307#if V8_TARGET_ARCH_RISCV64
2316#if V8_TARGET_ARCH_RISCV64
2317 case kRiscvWord64AtomicExchangeUint64:
2321 case kAtomicCompareExchangeInt8:
2325 case kAtomicCompareExchangeUint8:
2330#if V8_TARGET_ARCH_RISCV64
2339 case kAtomicCompareExchangeInt16:
2343 case kAtomicCompareExchangeUint16:
2348#if V8_TARGET_ARCH_RISCV64
2357 case kAtomicCompareExchangeWord32:
2360 __ Sll32(
i.InputRegister(2),
i.InputRegister(2), 0);
2363#if V8_TARGET_ARCH_RISCV64
2372#if V8_TARGET_ARCH_RISCV64
2373 case kRiscvWord64AtomicCompareExchangeUint64:
2376#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
2377 case kAtomic##op##Int8: \
2378 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2379 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
2381 case kAtomic##op##Uint8: \
2382 switch (AtomicWidthField::decode(opcode)) { \
2383 case AtomicWidth::kWord32: \
2384 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
2386 case AtomicWidth::kWord64: \
2387 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
2391 case kAtomic##op##Int16: \
2392 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2393 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
2395 case kAtomic##op##Uint16: \
2396 switch (AtomicWidthField::decode(opcode)) { \
2397 case AtomicWidth::kWord32: \
2398 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
2400 case AtomicWidth::kWord64: \
2401 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
2405 case kAtomic##op##Word32: \
2406 switch (AtomicWidthField::decode(opcode)) { \
2407 case AtomicWidth::kWord32: \
2408 ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
2410 case AtomicWidth::kWord64: \
2411 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
2415 case kRiscvWord64Atomic##op##Uint64: \
2416 ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
2423#undef ATOMIC_BINOP_CASE
2424#elif V8_TARGET_ARCH_RISCV32
2425#define ATOMIC_BINOP_CASE(op, inst32, inst64, amoinst32) \
2426 case kAtomic##op##Int8: \
2427 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
2429 case kAtomic##op##Uint8: \
2430 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
2432 case kAtomic##op##Int16: \
2433 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
2435 case kAtomic##op##Uint16: \
2436 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
2438 case kAtomic##op##Word32: \
2439 __ AddWord(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
2440 __ amoinst32(true, true, i.OutputRegister(0), i.TempRegister(0), \
2441 i.InputRegister(2)); \
2448#undef ATOMIC_BINOP_CASE
2450 case kRiscvAssertEqual:
2452 i.InputRegister(0), Operand(
i.InputRegister(1)));
2454#if V8_TARGET_ARCH_RISCV64
2455 case kRiscvStoreCompressTagged: {
2457 __ StoreTaggedField(
i.InputOrZeroRegister(0), mem, trapper);
2460 case kRiscvLoadDecompressTaggedSigned: {
2464 __ DecompressTaggedSigned(
result, operand, trapper);
2467 case kRiscvLoadDecompressTagged: {
2471 __ DecompressTagged(
result, operand, trapper);
2474 case kRiscvLoadDecodeSandboxedPointer: {
2475 __ LoadSandboxedPointerField(
i.OutputRegister(),
i.MemoryOperand(),
2479 case kRiscvStoreEncodeSandboxedPointer: {
2481 __ StoreSandboxedPointerField(
i.InputOrZeroRegister(0), mem, trapper);
2484 case kRiscvStoreIndirectPointer: {
2486 __ StoreIndirectPointerField(
i.InputOrZeroRegister(0), mem, trapper);
2489 case kRiscvAtomicLoadDecompressTaggedSigned:
2490 __ AtomicDecompressTaggedSigned(
i.OutputRegister(),
i.MemoryOperand(),
2493 case kRiscvAtomicLoadDecompressTagged:
2494 __ AtomicDecompressTagged(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2496 case kRiscvAtomicStoreCompressTagged: {
2499 __ AtomicStoreTaggedField(
i.InputOrZeroRegister(index), mem, trapper);
2502 case kRiscvLoadDecompressProtected: {
2503 __ DecompressProtected(
i.OutputRegister(),
i.MemoryOperand(), trapper);
2509 auto memOperand =
i.MemoryOperand(1);
2511 if (memOperand.offset() != 0) {
2512 __ AddWord(dst, memOperand.rm(), memOperand.offset());
2515 __ vs(
i.InputSimd128Register(0), dst, 0, VSew::E8);
2520 Register src =
i.MemoryOperand().offset() == 0 ?
i.MemoryOperand().rm()
2522 if (
i.MemoryOperand().offset() != 0) {
2523 __ AddWord(src,
i.MemoryOperand().rm(),
i.MemoryOperand().offset());
2526 __ vl(
i.OutputSimd128Register(), src, 0, VSew::E8);
2529 case kRiscvS128Zero: {
2532 __ vmv_vx(dst, zero_reg);
2535 case kRiscvS128Load32Zero: {
2542 case kRiscvS128Load64Zero: {
2545#if V8_TARGET_ARCH_RISCV64
2548#elif V8_TARGET_ARCH_RISCV32
2554 case kRiscvS128LoadLane: {
2558 __ LoadLane(sz, dst,
i.InputUint8(1),
i.MemoryOperand(2), trapper);
2561 case kRiscvS128StoreLane: {
2565 __ StoreLane(sz, src,
i.InputUint8(1),
i.MemoryOperand(2), trapper);
2568 case kRiscvS128Load64ExtendS: {
2570#if V8_TARGET_ARCH_RISCV64
2573#elif V8_TARGET_ARCH_RISCV32
2581 case kRiscvS128Load64ExtendU: {
2583#if V8_TARGET_ARCH_RISCV64
2586#elif V8_TARGET_ARCH_RISCV32
2594 case kRiscvS128LoadSplat: {
2596 switch (
i.InputInt8(2)) {
2610#if V8_TARGET_ARCH_RISCV64
2613#elif V8_TARGET_ARCH_RISCV32
2623 case kRiscvS128AllOnes: {
2625 __ vmv_vx(
i.OutputSimd128Register(), zero_reg);
2626 __ vnot_vv(
i.OutputSimd128Register(),
i.OutputSimd128Register());
2629 case kRiscvS128Select: {
2632 i.InputSimd128Register(0));
2642 __ vnot_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2645 case kRiscvS128Const: {
2648 *
reinterpret_cast<uint64_t*
>(imm) =
2650 *(
reinterpret_cast<uint64_t*
>(imm) + 1) =
2652 __ WasmRvvS128const(dst, imm);
2655 case kRiscvVrgather: {
2657 if (!(
instr->InputAt(1)->IsImmediate())) {
2658 index =
i.InputSimd128Register(1);
2660#if V8_TARGET_ARCH_RISCV64
2666#elif V8_TARGET_ARCH_RISCV32
2667 int64_t intput_int64 =
i.InputInt64(1);
2669 memcpy(input_int32, &intput_int64,
sizeof(intput_int64));
2679 if (
i.OutputSimd128Register() ==
i.InputSimd128Register(0)) {
2683 __ vrgather_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2688 case kRiscvVslidedown: {
2690 if (
instr->InputAt(1)->IsImmediate()) {
2691 DCHECK(is_uint5(
i.InputInt32(1)));
2692 __ vslidedown_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2695 __ vslidedown_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2696 i.InputRegister(1));
2700 case kRiscvI8x16ExtractLaneU: {
2705 __ slli(
i.OutputRegister(),
i.OutputRegister(),
sizeof(
void*) * 8 - 8);
2706 __ srli(
i.OutputRegister(),
i.OutputRegister(),
sizeof(
void*) * 8 - 8);
2709 case kRiscvI8x16ExtractLaneS: {
2716 case kRiscvI16x8ExtractLaneU: {
2721 __ slli(
i.OutputRegister(),
i.OutputRegister(),
sizeof(
void*) * 8 - 16);
2722 __ srli(
i.OutputRegister(),
i.OutputRegister(),
sizeof(
void*) * 8 - 16);
2725 case kRiscvI16x8ExtractLaneS: {
2732 case kRiscvI8x16ShrU: {
2734 if (
instr->InputAt(1)->IsRegister()) {
2735 __ andi(
i.InputRegister(1),
i.InputRegister(1), 8 - 1);
2736 __ vsrl_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2737 i.InputRegister(1));
2739 __ vsrl_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2740 i.InputInt5(1) % 8);
2744 case kRiscvI16x8ShrU: {
2746 if (
instr->InputAt(1)->IsRegister()) {
2747 __ andi(
i.InputRegister(1),
i.InputRegister(1), 16 - 1);
2748 __ vsrl_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2749 i.InputRegister(1));
2751 __ vsrl_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2752 i.InputInt5(1) % 16);
2756 case kRiscvI32x4TruncSatF64x2SZero: {
2759 __ vmfeq_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(0));
2762 __ VU.set(FPURoundingMode::RTZ);
2767 case kRiscvI32x4TruncSatF64x2UZero: {
2770 __ vmfeq_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(0));
2773 __ VU.set(FPURoundingMode::RTZ);
2778 case kRiscvI32x4ShrU: {
2780 if (
instr->InputAt(1)->IsRegister()) {
2781 __ vsrl_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2782 i.InputRegister(1));
2784 __ vsrl_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2785 i.InputInt5(1) % 32);
2789 case kRiscvI64x2ShrU: {
2791 if (
instr->InputAt(1)->IsRegister()) {
2792 __ vsrl_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2793 i.InputRegister(1));
2795 if (is_uint5(
i.InputInt6(1) % 64)) {
2796 __ vsrl_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2797 i.InputInt6(1) % 64);
2800 __ vsrl_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2806 case kRiscvI8x16ShrS: {
2808 if (
instr->InputAt(1)->IsRegister()) {
2809 __ vsra_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2810 i.InputRegister(1));
2812 __ vsra_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2813 i.InputInt5(1) % 8);
2817 case kRiscvI16x8ShrS: {
2819 if (
instr->InputAt(1)->IsRegister()) {
2820 __ vsra_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2821 i.InputRegister(1));
2823 __ vsra_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2824 i.InputInt5(1) % 16);
2828 case kRiscvI32x4ShrS: {
2830 if (
instr->InputAt(1)->IsRegister()) {
2831 __ vsra_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2832 i.InputRegister(1));
2834 __ vsra_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2835 i.InputInt5(1) % 32);
2839 case kRiscvI64x2ShrS: {
2841 if (
instr->InputAt(1)->IsRegister()) {
2842 __ vsra_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2843 i.InputRegister(1));
2845 if (is_uint5(
i.InputInt6(1) % 64)) {
2846 __ vsra_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2847 i.InputInt6(1) % 64);
2850 __ vsra_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2856 case kRiscvI32x4ExtractLane: {
2857 __ WasmRvvExtractLane(
i.OutputRegister(),
i.InputSimd128Register(0),
2858 i.InputInt8(1), E32, m1);
2863 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2864 __ vmslt_vx(v0,
i.InputSimd128Register(0), zero_reg);
2865 __ vneg_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2869#if V8_TARGET_ARCH_RISCV64
2870 case kRiscvI64x2ExtractLane: {
2871 __ WasmRvvExtractLane(
i.OutputRegister(),
i.InputSimd128Register(0),
2872 i.InputInt8(1), E64, m1);
2875#elif V8_TARGET_ARCH_RISCV32
2876 case kRiscvI64x2ExtractLane: {
2877 uint8_t imm_lane_idx =
i.InputInt8(1);
2880 (imm_lane_idx << 0x1) + 1);
2883 (imm_lane_idx << 0x1));
2888 case kRiscvI8x16Shl: {
2890 if (
instr->InputAt(1)->IsRegister()) {
2891 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2892 i.InputRegister(1));
2894 __ vsll_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2895 i.InputInt5(1) % 8);
2899 case kRiscvI16x8Shl: {
2901 if (
instr->InputAt(1)->IsRegister()) {
2902 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2903 i.InputRegister(1));
2905 __ vsll_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2906 i.InputInt5(1) % 16);
2910 case kRiscvI32x4Shl: {
2912 if (
instr->InputAt(1)->IsRegister()) {
2913 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2914 i.InputRegister(1));
2916 __ vsll_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2917 i.InputInt5(1) % 32);
2921 case kRiscvI64x2Shl: {
2923 if (
instr->InputAt(1)->IsRegister()) {
2924 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2925 i.InputRegister(1));
2927 if (is_int5(
i.InputInt6(1) % 64)) {
2928 __ vsll_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2929 i.InputInt6(1) % 64);
2932 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2938 case kRiscvI8x16ReplaceLane: {
2945 __ vmerge_vx(dst,
i.InputRegister(2), src);
2948 case kRiscvI16x8ReplaceLane: {
2954 __ vmerge_vx(dst,
i.InputRegister(2), src);
2957#if V8_TARGET_ARCH_RISCV64
2958 case kRiscvI64x2ReplaceLane: {
2964 __ vmerge_vx(dst,
i.InputRegister(2), src);
2967#elif V8_TARGET_ARCH_RISCV32
2968 case kRiscvI64x2ReplaceLaneI32Pair: {
2971 Register int64_low =
i.InputRegister(2);
2972 Register int64_high =
i.InputRegister(3);
2984 case kRiscvI32x4ReplaceLane: {
2990 __ vmerge_vx(dst,
i.InputRegister(2), src);
2993 case kRiscvV128AnyTrue: {
3001 __ beq(dst, zero_reg, &t);
3006 case kRiscvVAllTrue: {
3014 __ beqz(dst, ¬alltrue);
3016 __ bind(¬alltrue);
3019 case kRiscvI8x16Shuffle: {
3020 VRegister dst =
i.OutputSimd128Register(),
3021 src0 =
i.InputSimd128Register(0),
3022 src1 =
i.InputSimd128Register(1);
3024#if V8_TARGET_ARCH_RISCV64
3025 int64_t imm1 =
make_uint64(
i.InputInt32(3),
i.InputInt32(2));
3026 int64_t imm2 =
make_uint64(
i.InputInt32(5),
i.InputInt32(4));
3033#elif V8_TARGET_ARCH_RISCV32
3050 }
else if (dst == src1) {
3060 case kRiscvI8x16Popcnt: {
3061 VRegister dst =
i.OutputSimd128Register(),
3062 src =
i.InputSimd128Register(0);
3071 __ vadd_vi(dst, dst, 1,
Mask);
3079 case kRiscvF64x2NearestInt: {
3080 __ Round_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3084 case kRiscvF64x2Trunc: {
3085 __ Trunc_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3089 case kRiscvF64x2Sqrt: {
3091 __ vfsqrt_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3094 case kRiscvF64x2Abs: {
3096 __ vfabs_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3099 case kRiscvF64x2Ceil: {
3100 __ Ceil_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3104 case kRiscvF64x2Floor: {
3105 __ Floor_d(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3109 case kRiscvF64x2ReplaceLane: {
3113 __ vfmerge_vf(
i.OutputSimd128Register(),
i.InputSingleRegister(2),
3114 i.InputSimd128Register(0));
3117 case kRiscvF64x2Pmax: {
3119 __ vmflt_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
3120 __ vmerge_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3121 i.InputSimd128Register(0));
3124 case kRiscvF64x2Pmin: {
3126 __ vmflt_vv(v0,
i.InputSimd128Register(1),
i.InputSimd128Register(0));
3127 __ vmerge_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3128 i.InputSimd128Register(0));
3131 case kRiscvF64x2ExtractLane: {
3133 if (is_uint5(
i.InputInt8(1))) {
3144 case kRiscvF64x2PromoteLowF32x4: {
3146 if (
i.OutputSimd128Register() !=
i.InputSimd128Register(0)) {
3147 __ vfwcvt_f_f_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3155 case kRiscvF64x2ConvertLowI32x4S: {
3157 if (
i.OutputSimd128Register() !=
i.InputSimd128Register(0)) {
3158 __ vfwcvt_f_x_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3166 case kRiscvF64x2ConvertLowI32x4U: {
3168 if (
i.OutputSimd128Register() !=
i.InputSimd128Register(0)) {
3169 __ vfwcvt_f_xu_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3177 case kRiscvF64x2Qfma: {
3179 __ vfmadd_vv(
i.InputSimd128Register(0),
i.InputSimd128Register(1),
3180 i.InputSimd128Register(2));
3181 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3184 case kRiscvF64x2Qfms: {
3186 __ vfnmsub_vv(
i.InputSimd128Register(0),
i.InputSimd128Register(1),
3187 i.InputSimd128Register(2));
3188 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3191 case kRiscvF32x4ExtractLane: {
3198 case kRiscvF32x4Trunc: {
3199 __ Trunc_f(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3203 case kRiscvF32x4NearestInt: {
3204 __ Round_f(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3208 case kRiscvF32x4DemoteF64x2Zero: {
3210 __ vfncvt_f_f_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3213 __ vmerge_vx(
i.OutputSimd128Register(), zero_reg,
3214 i.OutputSimd128Register());
3217 case kRiscvF32x4Abs: {
3219 __ vfabs_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3222 case kRiscvF32x4Ceil: {
3223 __ Ceil_f(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3227 case kRiscvF32x4Floor: {
3228 __ Floor_f(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3232 case kRiscvF32x4UConvertI32x4: {
3234 __ VU.set(FPURoundingMode::RTZ);
3235 __ vfcvt_f_xu_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3238 case kRiscvF32x4SConvertI32x4: {
3240 __ VU.set(FPURoundingMode::RTZ);
3241 __ vfcvt_f_x_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3244 case kRiscvF32x4ReplaceLane: {
3250 i.InputSimd128Register(0));
3253 case kRiscvF32x4Pmax: {
3255 __ vmflt_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
3256 __ vmerge_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3257 i.InputSimd128Register(0));
3260 case kRiscvF32x4Pmin: {
3262 __ vmflt_vv(v0,
i.InputSimd128Register(1),
i.InputSimd128Register(0));
3263 __ vmerge_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
3264 i.InputSimd128Register(0));
3267 case kRiscvF32x4Sqrt: {
3269 __ vfsqrt_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3272 case kRiscvF32x4Qfma: {
3274 __ vfmadd_vv(
i.InputSimd128Register(0),
i.InputSimd128Register(1),
3275 i.InputSimd128Register(2));
3276 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3279 case kRiscvF32x4Qfms: {
3281 __ vfnmsub_vv(
i.InputSimd128Register(0),
i.InputSimd128Register(1),
3282 i.InputSimd128Register(2));
3283 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3286 case kRiscvI64x2SConvertI32x4Low: {
3293 case kRiscvI64x2SConvertI32x4High: {
3300 case kRiscvI64x2UConvertI32x4Low: {
3306 case kRiscvI64x2UConvertI32x4High: {
3313 case kRiscvI32x4SConvertF32x4: {
3315 __ VU.set(FPURoundingMode::RTZ);
3316 __ vmfeq_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(0));
3317 if (
i.OutputSimd128Register() !=
i.InputSimd128Register(0)) {
3318 __ vmv_vx(
i.OutputSimd128Register(), zero_reg);
3319 __ vfcvt_x_f_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3328 case kRiscvI32x4UConvertF32x4: {
3330 __ VU.set(FPURoundingMode::RTZ);
3331 __ vmfeq_vv(v0,
i.InputSimd128Register(0),
i.InputSimd128Register(0));
3332 if (
i.OutputSimd128Register() !=
i.InputSimd128Register(0)) {
3333 __ vmv_vx(
i.OutputSimd128Register(), zero_reg);
3334 __ vfcvt_xu_f_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3343#if V8_TARGET_ARCH_RISCV32
3344 case kRiscvI64x2SplatI32Pair: {
3346 __ vmv_vi(v0, 0b0101);
3348 __ vmerge_vx(
i.OutputSimd128Register(),
i.InputRegister(0),
3353 case kRiscvVwaddVv: {
3355 __ vwadd_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3356 i.InputSimd128Register(1));
3359 case kRiscvVwadduVv: {
3361 __ vwaddu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3362 i.InputSimd128Register(1));
3365 case kRiscvVwadduWx: {
3367 if (
instr->InputAt(1)->IsRegister()) {
3368 __ vwaddu_wx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3369 i.InputRegister(1));
3372 __ vwaddu_wx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3379 if (
instr->InputAt(1)->IsSimd128Register()) {
3380 __ vdivu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3381 i.InputSimd128Register(1));
3382 }
else if ((
instr->InputAt(1)->IsRegister())) {
3383 __ vdivu_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3384 i.InputRegister(1));
3387 __ vdivu_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3392 case kRiscvVnclipu: {
3395 if (
instr->InputAt(1)->IsSimd128Register()) {
3396 __ vnclipu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3397 i.InputSimd128Register(1));
3398 }
else if (
instr->InputAt(1)->IsRegister()) {
3399 __ vnclipu_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3400 i.InputRegister(1));
3403 __ vnclipu_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3408 case kRiscvVnclip: {
3411 if (
instr->InputAt(1)->IsSimd128Register()) {
3412 __ vnclip_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3413 i.InputSimd128Register(1));
3414 }
else if (
instr->InputAt(1)->IsRegister()) {
3415 __ vnclip_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3416 i.InputRegister(1));
3419 __ vnclip_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3426 __ vwmul_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3427 i.InputSimd128Register(1));
3430 case kRiscvVwmulu: {
3432 __ vwmulu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3433 i.InputSimd128Register(1));
3438 if (
instr->InputAt(0)->IsRegister()) {
3439 __ vmv_sx(
i.OutputSimd128Register(),
i.InputRegister(0));
3449 __ vmv_xs(
i.OutputRegister(),
i.InputSimd128Register(0));
3452 case kRiscvVcompress: {
3454 if (
instr->InputAt(1)->IsSimd128Register()) {
3455 __ vcompress_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3456 i.InputSimd128Register(1));
3461 __ vcompress_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3468 if (
instr->InputAt(1)->IsRegister()) {
3469 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3470 i.InputRegister(1));
3471 }
else if (
instr->InputAt(1)->IsSimd128Register()) {
3472 __ vsll_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3473 i.InputSimd128Register(1));
3476 if (is_int5(
i.InputInt64(1))) {
3477 __ vsll_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3481 __ vsll_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3489 if (
i.InputInt8(4)) {
3490 DCHECK(
i.OutputSimd128Register() !=
i.InputSimd128Register(0));
3491 __ vmv_vx(
i.OutputSimd128Register(), zero_reg);
3493 if (
instr->InputAt(1)->IsRegister()) {
3494 __ vmslt_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3495 i.InputRegister(1));
3496 }
else if (
instr->InputAt(1)->IsSimd128Register()) {
3497 __ vmslt_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3498 i.InputSimd128Register(1));
3501 if (is_int5(
i.InputInt64(1))) {
3502 __ vmslt_vi(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3506 __ vmslt_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3512 case kRiscvVaddVv: {
3514 __ vadd_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3515 i.InputSimd128Register(1));
3518 case kRiscvVsubVv: {
3520 __ vsub_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3521 i.InputSimd128Register(1));
3526 if (
instr->InputAt(0)->IsSimd128Register()) {
3527 __ vmv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3528 }
else if (
instr->InputAt(0)->IsRegister()) {
3529 __ vmv_vx(
i.OutputSimd128Register(),
i.InputRegister(0));
3531 if (
i.ToConstant(
instr->InputAt(0)).FitsInInt32() &&
3532 is_int8(
i.InputInt32(0))) {
3533 __ vmv_vi(
i.OutputSimd128Register(),
i.InputInt8(0));
3541 case kRiscvVfmvVf: {
3543 __ vfmv_vf(
i.OutputSimd128Register(),
i.InputDoubleRegister(0));
3546 case kRiscvVnegVv: {
3548 __ vneg_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3551 case kRiscvVfnegVv: {
3553 __ vfneg_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3556 case kRiscvVmaxuVv: {
3558 __ vmaxu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3559 i.InputSimd128Register(1));
3564 if (
instr->InputAt(1)->IsSimd128Register()) {
3565 __ vmax_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3566 i.InputSimd128Register(1));
3567 }
else if (
instr->InputAt(1)->IsRegister()) {
3568 __ vmax_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3569 i.InputRegister(1));
3574 __ vmax_vx(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3579 case kRiscvVminuVv: {
3581 __ vminu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3582 i.InputSimd128Register(1));
3585 case kRiscvVminsVv: {
3587 __ vmin_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3588 i.InputSimd128Register(1));
3591 case kRiscvVmulVv: {
3593 __ vmul_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3594 i.InputSimd128Register(1));
3597 case kRiscvVgtsVv: {
3598 __ WasmRvvGtS(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3599 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3603 case kRiscvVgesVv: {
3604 __ WasmRvvGeS(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3605 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3609 case kRiscvVgeuVv: {
3610 __ WasmRvvGeU(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3611 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3615 case kRiscvVgtuVv: {
3616 __ WasmRvvGtU(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3617 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3622 __ WasmRvvEq(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3623 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3628 __ WasmRvvNe(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3629 i.InputSimd128Register(1),
VSew(
i.InputInt8(2)),
3633 case kRiscvVaddSatSVv: {
3635 __ vsadd_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3636 i.InputSimd128Register(1));
3639 case kRiscvVaddSatUVv: {
3641 __ vsaddu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3642 i.InputSimd128Register(1));
3645 case kRiscvVsubSatSVv: {
3647 __ vssub_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3648 i.InputSimd128Register(1));
3651 case kRiscvVsubSatUVv: {
3653 __ vssubu_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3654 i.InputSimd128Register(1));
3657 case kRiscvVfaddVv: {
3659 __ vfadd_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3660 i.InputSimd128Register(1));
3663 case kRiscvVfsubVv: {
3665 __ vfsub_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3666 i.InputSimd128Register(1));
3669 case kRiscvVfmulVv: {
3671 __ vfmul_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3672 i.InputSimd128Register(1));
3675 case kRiscvVfdivVv: {
3677 __ vfdiv_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3678 i.InputSimd128Register(1));
3681 case kRiscvVmfeqVv: {
3683 __ vmfeq_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3684 i.InputSimd128Register(1));
3687 case kRiscvVmfneVv: {
3689 __ vmfne_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3690 i.InputSimd128Register(1));
3693 case kRiscvVmfltVv: {
3695 __ vmflt_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3696 i.InputSimd128Register(1));
3699 case kRiscvVmfleVv: {
3701 __ vmfle_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3702 i.InputSimd128Register(1));
3705 case kRiscvVfminVv: {
3707 __ vfmin_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3708 i.InputSimd128Register(1),
MaskType(
i.InputInt8(4)));
3711 case kRiscvVfmaxVv: {
3713 __ vfmax_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3714 i.InputSimd128Register(1),
MaskType(
i.InputInt8(4)));
3717 case kRiscvVandVv: {
3719 __ vand_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3720 i.InputSimd128Register(1));
3725 __ vor_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3726 i.InputSimd128Register(1));
3729 case kRiscvVxorVv: {
3731 __ vxor_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3732 i.InputSimd128Register(1));
3735 case kRiscvVnotVv: {
3737 __ vnot_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3740 case kRiscvVmergeVx: {
3742 if (
instr->InputAt(0)->IsRegister()) {
3743 __ vmerge_vx(
i.OutputSimd128Register(),
i.InputRegister(0),
3744 i.InputSimd128Register(1));
3746 DCHECK(is_int5(
i.InputInt32(0)));
3747 __ vmerge_vi(
i.OutputSimd128Register(),
i.InputInt8(0),
3748 i.InputSimd128Register(1));
3752 case kRiscvVsmulVv: {
3754 __ vsmul_vv(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3755 i.InputSimd128Register(1));
3758 case kRiscvVredminuVs: {
3759 __ vredminu_vs(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3760 i.InputSimd128Register(1));
3763 case kRiscvVzextVf2: {
3765 __ vzext_vf2(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3768 case kRiscvVsextVf2: {
3770 __ vsext_vf2(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3773 case kRiscvEnableDebugTrace: {
3781 case kRiscvDisableDebugTrace: {
3791 switch (arch_opcode) {
3792#define Print(name) \
3794 printf("k%s", #name); \
3807#define UNSUPPORTED_COND(opcode, condition) \
3808 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
3827 Label* tlabel,
Label* flabel,
bool fallthru) {
3838#if V8_TARGET_ARCH_RISCV64
3841#elif V8_TARGET_ARCH_RISCV32
3846#if V8_TARGET_ARCH_RISCV64
3855#elif V8_TARGET_ARCH_RISCV32
3870#if V8_TARGET_ARCH_RISCV64
3874#elif V8_TARGET_ARCH_RISCV32
3888#if V8_TARGET_ARCH_RISCV64
3891#elif V8_TARGET_ARCH_RISCV32
3896 Operand right =
i.InputOperand(1);
3898#if V8_TARGET_ARCH_RISCV64
3902 __ slliw(temp0, left, 0);
3905 __ slliw(temp1, right.rm(), 0);
3906 right = Operand(temp1);
3910 __ Branch(tlabel,
cc, left, right);
3915 }
else if (
i.InputOrZeroRegister(0) != zero_reg) {
3916 __ Branch(tlabel,
cc,
i.InputRegister(0), Operand(zero_reg));
3918#ifdef V8_TARGET_ARCH_RISCV64
3923 }
else if (
i.InputOrZeroRegister(0) != zero_reg) {
3925 __ slliw(temp0,
i.InputRegister(0), 0);
3926 __ Branch(tlabel,
cc, temp0, Operand(zero_reg));
3934 lhs_register =
i.TempRegister(0);
3935 __ SubWord(lhs_register, sp,
offset);
3937 __ Branch(tlabel,
cc, lhs_register, Operand(
i.InputRegister(0)));
3941 FlagsConditionToConditionCmpFPU(&predicate,
condition);
3949 std::cout <<
"AssembleArchBranch Unimplemented arch_opcode:"
3953 if (!fallthru)
__ Branch(flabel);
3960 Label* tlabel = branch->true_label;
3961 Label* flabel = branch->false_label;
3967#undef UNSUPPORTED_COND
3970 BranchInfo* branch) {
3979#if V8_ENABLE_WEBASSEMBLY
3980void CodeGenerator::AssembleArchTrap(Instruction*
instr,
3984 OutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
3986 void Generate()
override {
3987 RiscvOperandConverter
i(
gen_, instr_);
3989 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
3990 GenerateCallToTrap(trap_id);
3994 void GenerateCallToTrap(TrapId trap_id) {
3995 gen_->AssembleSourcePosition(instr_);
3999 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
4000 ReferenceMap* reference_map =
4001 gen_->zone()->New<ReferenceMap>(
gen_->zone());
4002 gen_->RecordSafepoint(reference_map);
4007 Instruction* instr_;
4008 CodeGenerator*
gen_;
4011 Label* tlabel = ool->entry();
4019 RiscvOperandConverter
i(
this,
instr);
4029#if V8_TARGET_ARCH_RISCV64
4030 if (
instr->arch_opcode() == kRiscvTst64 ||
4031 instr->arch_opcode() == kRiscvTst32) {
4032#elif V8_TARGET_ARCH_RISCV32
4033 if (
instr->arch_opcode() == kRiscvTst32) {
4042#if V8_TARGET_ARCH_RISCV64
4043 }
else if (
instr->arch_opcode() == kRiscvAdd64 ||
4044 instr->arch_opcode() == kRiscvSub64) {
4053 }
else if (
instr->arch_opcode() == kRiscvAddOvf64 ||
4054 instr->arch_opcode() == kRiscvSubOvf64) {
4055#elif V8_TARGET_ARCH_RISCV32
4056 }
else if (
instr->arch_opcode() == kRiscvAddOvf ||
4057 instr->arch_opcode() == kRiscvSubOvf) {
4061#if V8_TARGET_ARCH_RISCV64
4063 }
else if (
instr->arch_opcode() == kRiscvMulOvf32 ||
4064 instr->arch_opcode() == kRiscvMulOvf64) {
4065#elif V8_TARGET_ARCH_RISCV32
4066 }
else if (
instr->arch_opcode() == kRiscvMulOvf32) {
4070#if V8_TARGET_ARCH_RISCV64
4071 }
else if (
instr->arch_opcode() == kRiscvCmp ||
4072 instr->arch_opcode() == kRiscvCmp32) {
4073#elif V8_TARGET_ARCH_RISCV32
4074 }
else if (
instr->arch_opcode() == kRiscvCmp) {
4078 Operand right =
i.InputOperand(1);
4079#if V8_TARGET_ARCH_RISCV64
4083 __ slliw(temp0, left, 0);
4086 __ slliw(temp1, right.rm(), 0);
4087 right = Operand(temp1);
4094 if (
instr->InputAt(1)->IsImmediate()) {
4095 if (is_int12(-right.immediate())) {
4096 if (right.immediate() == 0) {
4103 __ AddWord(
result, left, Operand(-right.immediate()));
4111 if (is_uint12(right.immediate())) {
4134 Register left =
i.InputOrZeroRegister(0);
4135 Operand right =
i.InputOperand(1);
4143 Register left =
i.InputOrZeroRegister(1);
4144 Operand right =
i.InputOperand(0);
4152 Register left =
i.InputOrZeroRegister(0);
4153 Operand right =
i.InputOperand(1);
4162 Operand right =
i.InputOperand(0);
4172 }
else if (
instr->arch_opcode() == kRiscvCmpZero) {
4176 Register left =
i.InputOrZeroRegister(0);
4181 Register left =
i.InputOrZeroRegister(0);
4187 Register left =
i.InputOrZeroRegister(0);
4188 Operand right = Operand(zero_reg);
4196 Operand left =
i.InputOperand(0);
4204 Register left =
i.InputOrZeroRegister(0);
4205 Operand right = Operand(zero_reg);
4213 Register left = zero_reg;
4214 Operand right =
i.InputOperand(0);
4224#ifdef V8_TARGET_ARCH_RISCV64
4225 }
else if (
instr->arch_opcode() == kRiscvCmpZero32) {
4226 auto trim_reg = [&](
Register in) -> Register {
4228 __ slliw(temp, in, 0);
4231 auto trim_op = [&](Operand in) -> Register {
4234 __ slliw(temp, in.rm(), 0);
4236 __ Li(temp, in.immediate());
4237 __ slliw(temp, temp, 0);
4244 auto left = trim_reg(
i.InputOrZeroRegister(0));
4249 auto left = trim_reg(
i.InputOrZeroRegister(0));
4255 auto left = trim_reg(
i.InputOrZeroRegister(0));
4263 auto left = trim_op(
i.InputOperand(0));
4271 auto left = trim_reg(
i.InputOrZeroRegister(0));
4279 auto right = trim_op(
i.InputOperand(0));
4290 }
else if (
instr->arch_opcode() == kArchStackPointerGreaterThan) {
4294 lhs_register =
i.TempRegister(0);
4295 __ SubWord(lhs_register, sp,
offset);
4297 __ Sgtu(
result, lhs_register, Operand(
i.InputRegister(0)));
4299 }
else if (
instr->arch_opcode() == kRiscvCmpD ||
4300 instr->arch_opcode() == kRiscvCmpS) {
4301 if (
instr->arch_opcode() == kRiscvCmpD) {
4302 FPURegister left =
i.InputOrZeroDoubleRegister(0);
4303 FPURegister right =
i.InputOrZeroDoubleRegister(1);
4305 !
__ IsDoubleZeroRegSet()) {
4309 FPURegister left =
i.InputOrZeroSingleRegister(0);
4310 FPURegister right =
i.InputOrZeroSingleRegister(1);
4312 !
__ IsSingleZeroRegSet()) {
4317 FlagsConditionToConditionCmpFPU(&predicate,
condition);
4327 PrintF(
"AssembleArchBranch Unimplemented arch_opcode is : %d\n",
4328 instr->arch_opcode());
4329 TRACE(
"UNIMPLEMENTED code_generator_riscv64: %s at line %d\n", __FUNCTION__,
4340 BranchInfo* branch) {
4345 RiscvOperandConverter
i(
this,
instr);
4347 std::vector<std::pair<int32_t, Label*>> cases;
4348 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
4349 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
4352 cases.data() + cases.size());
4356 RiscvOperandConverter
i(
this,
instr);
4358 size_t const case_count =
instr->InputCount() - 2;
4361 Operand(case_count));
4362 __ GenerateSwitchTable(input, case_count, [&
i,
this](
size_t index) {
4370 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4371 if (!saves_fpu.is_empty()) {
4372 int count = saves_fpu.Count();
4374 frame->AllocateSavedCalleeRegisterSlots(count *
4378 const RegList saves = call_descriptor->CalleeSavedRegisters();
4379 if (!saves.is_empty()) {
4380 int count = saves.Count();
4381 frame->AllocateSavedCalleeRegisterSlots(count);
4389 if (call_descriptor->IsCFunctionCall()) {
4390 if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
4391 __ StubPrologue(StackFrame::C_WASM_ENTRY);
4398 }
else if (call_descriptor->IsJSFunctionCall()) {
4401 __ StubPrologue(
info()->GetOutputStackFrameType());
4402 if (call_descriptor->IsAnyWasmFunctionCall() ||
4403 call_descriptor->IsWasmImportWrapper() ||
4404 call_descriptor->IsWasmCapiFunction()) {
4407 if (call_descriptor->IsWasmCapiFunction()) {
4414 int required_slots =
4415 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
4417 if (
info()->is_osr()) {
4419 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
4425 __ RecordComment(
"-- OSR entrypoint --");
4427#ifdef V8_ENABLE_SANDBOX_BOOL
4428 UseScratchRegisterScope temps(
masm());
4429 uint32_t expected_frame_size =
4433 Register scratch = temps.Acquire();
4434 __ AddWord(scratch, sp, expected_frame_size);
4435 __ SbxCheck(
eq, AbortReason::kOsrUnexpectedStackSize, scratch, Operand(fp));
4440 const RegList saves = call_descriptor->CalleeSavedRegisters();
4441 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4443 if (required_slots > 0) {
4445 if (
info()->IsWasm() && required_slots > 128) {
4457 UseScratchRegisterScope temps(
masm());
4458 Register stack_limit = temps.Acquire();
4460 __ AddWord(stack_limit, stack_limit,
4462 __ Branch(&done,
uge, sp, Operand(stack_limit));
4465 if (
v8_flags.experimental_wasm_growable_stacks) {
4469 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
4474 __ MultiPushFPU(fp_regs_to_save);
4478 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
4481 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
4482 __ MultiPopFPU(fp_regs_to_save);
4485 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
4488 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
4498 const int returns =
frame()->GetReturnSlotCount();
4501 required_slots -= saves.Count();
4503 required_slots -= returns;
4504 if (required_slots > 0) {
4508 if (!saves_fpu.is_empty()) {
4510 __ MultiPushFPU(saves_fpu);
4514 if (!saves.is_empty()) {
4516 __ MultiPush(saves);
4534 const int returns =
frame()->GetReturnSlotCount();
4540 const RegList saves = call_descriptor->CalleeSavedRegisters();
4541 if (!saves.is_empty()) {
4546 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4547 if (!saves_fpu.is_empty()) {
4548 __ MultiPopFPU(saves_fpu);
4551 RiscvOperandConverter g(
this,
nullptr);
4553 const int parameter_slots =
4554 static_cast<int>(call_descriptor->ParameterSlotCount());
4558 if (parameter_slots != 0) {
4559 if (additional_pop_count->IsImmediate()) {
4560 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4562 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue,
4563 g.ToRegister(additional_pop_count),
4564 Operand(
static_cast<intptr_t
>(0)));
4568#if V8_ENABLE_WEBASSEMBLY
4569 if (call_descriptor->IsAnyWasmFunctionCall() &&
4570 v8_flags.experimental_wasm_growable_stacks) {
4573 UseScratchRegisterScope temps{
masm()};
4574 Register scratch = temps.Acquire();
4575 __ LoadWord(scratch,
4586 UseScratchRegisterScope temps{
masm()};
4587 Register scratch = temps.Acquire();
4588 __ PrepareCallCFunction(1, scratch);
4590 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
4602 call_descriptor->IsJSFunctionCall() &&
4603 parameter_slots != 0;
4605 if (call_descriptor->IsCFunctionCall()) {
4610 if (additional_pop_count->IsImmediate() &&
4611 g.ToConstant(additional_pop_count).ToInt32() == 0) {
4628 if (parameter_slots > 1) {
4636 __ AddWord(sp, sp, t0);
4637 }
else if (additional_pop_count->IsImmediate()) {
4640 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4641 __ Drop(parameter_slots + additional_count);
4643 Register pop_reg = g.ToRegister(additional_pop_count);
4644 __ Drop(parameter_slots);
4646 __ AddWord(sp, sp, pop_reg);
4654 ZoneDeque<DeoptimizationExit*>* exits) {
4655 __ ForceConstantPoolEmissionWithoutJump();
4663 __ CheckTrampolinePoolQuick(total_size);
4669 DCHECK(!source->IsImmediate());
4677 if (temps.CanAcquire()) {
4714 InstructionOperand* src = &move->source();
4715 InstructionOperand* dst = &move->destination();
4716 UseScratchRegisterScope temps(
masm());
4717 if (src->IsConstant() && dst->IsFPLocationOperand()) {
4720 }
else if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) {
4721 RiscvOperandConverter g(
this,
nullptr);
4722 bool src_need_scratch =
false;
4723 bool dst_need_scratch =
false;
4724 if (src->IsAnyStackSlot()) {
4727 (!is_int16(src_mem.offset())) || (((src_mem.offset() & 0b111) != 0) &&
4728 !is_int16(src_mem.offset() + 4));
4730 if (dst->IsAnyStackSlot()) {
4733 (!is_int16(dst_mem.offset())) || (((dst_mem.offset() & 0b111) != 0) &&
4734 !is_int16(dst_mem.offset() + 4));
4736 if (src_need_scratch || dst_need_scratch) {
4745 RiscvOperandConverter g(
this,
nullptr);
4748 if (source->IsRegister()) {
4750 Register src = g.ToRegister(source);
4756 }
else if (source->IsStackSlot()) {
4763 __ LoadWord(temp, src);
4766 }
else if (source->IsConstant()) {
4767 Constant src = g.ToConstant(source);
4771 switch (src.type()) {
4773 if (src.ToInt32() == 0 &&
destination->IsStackSlot() &&
4777 __ li(dst, Operand(src.ToInt32(), src.rmode()));
4784 if (src.ToInt64() == 0 &&
destination->IsStackSlot() &&
4788 __ li(dst, Operand(src.ToInt64(), src.rmode()));
4795 __ li(dst, src.ToExternalReference());
4798 Handle<HeapObject> src_object = src.ToHeapObject();
4801 __ LoadRoot(dst, index);
4803 __ li(dst, src_object);
4808 Handle<HeapObject> src_object = src.ToHeapObject();
4811 __ LoadCompressedTaggedRoot(dst, index);
4827 __ Sw(zero_reg, dst);
4835 __ LoadFPRImmediate(dst, src.ToFloat32());
4842 __ LoadFPRImmediate(dst, src.ToFloat64().value());
4847 }
else if (source->IsFPRegister()) {
4850 VRegister src = g.ToSimd128Register(source);
4854 __ vmv_vv(dst, src);
4860 if (dst.offset() != 0) {
4862 __ AddWord(dst_r, dst.rm(), dst.offset());
4864 __ vs(src, dst_r, 0, E8);
4867 FPURegister src = g.ToDoubleRegister(source);
4869 FPURegister dst = g.ToDoubleRegister(
destination);
4878 __ MoveDouble(dst, src);
4890 }
else if (source->IsFPStackSlot()) {
4897 if (src.offset() != 0) {
4899 __ AddWord(src_r, src.rm(), src.offset());
4908 if (dst.offset() != 0) {
4910 __ AddWord(dst_r, dst.rm(), dst.offset());
4912 __ vl(temp, src_r, 0, E8);
4913 __ vs(temp, dst_r, 0, E8);
4927 __ LoadFloat(temp, src);
4931 __ LoadDouble(temp, src);
4943 RiscvOperandConverter g(
this,
nullptr);
4946 if (source->IsRegister()) {
4948 Register src = g.ToRegister(source);
4954 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
4956 FPURegister src = g.ToDoubleRegister(source);
4957 FPURegister dst = g.ToDoubleRegister(
destination);
4962 DCHECK(source->IsSimd128Register());
4963 VRegister src = g.ToDoubleRegister(source).toV();
4964 VRegister dst = g.ToDoubleRegister(
destination).toV();
4967 __ vmv_vv(temp, src);
4968 __ vmv_vv(src, dst);
4969 __ vmv_vv(dst, temp);
4975 if (source->IsRegister()) {
4977 Register src = g.ToRegister(source);
4979 __ LoadWord(src, dst);
4980 __ StoreWord(temp, dst);
4983 if (source->IsFloatRegister()) {
4986 __ fmv_s(temp, src);
4987 __ LoadFloat(src, dst);
4988 __ StoreFloat(temp, dst);
4989 }
else if (source->IsDoubleRegister()) {
4992 __ fmv_d(temp, src);
4993 __ LoadDouble(src, dst);
4994 __ StoreDouble(temp, dst);
4996 DCHECK(source->IsSimd128Register());
4997 VRegister src = g.ToDoubleRegister(source).toV();
5000 __ vmv_vv(temp, src);
5002 if (dst.offset() != 0) {
5004 __ AddWord(dst_v, dst.rm(), Operand(dst.offset()));
5006 __ vl(src, dst_v, 0, E8);
5007 __ vs(temp, dst_v, 0, E8);
5014 if (source->IsSimd128StackSlot()) {
5018 if (src.offset() != 0) {
5020 __ AddWord(src_v, src.rm(), Operand(src.offset()));
5022 if (dst.offset() != 0) {
5024 __ AddWord(dst_v, dst.rm(), Operand(dst.offset()));
5031#if V8_TARGET_ARCH_RISCV32
5032 if (source->IsFPStackSlot()) {
5041 __ LoadDouble(temp_double, src);
5042 __ Lw(temp_word32, dst);
5043 __ Sw(temp_word32, src);
5044 __ Lw(temp_word32, dst_hi);
5045 __ Sw(temp_word32, src_hi);
5046 __ StoreDouble(temp_double, dst);
5051 UseScratchRegisterScope scope(
masm());
5054 __ LoadWord(temp_0, src);
5055 __ LoadWord(temp_1, dst);
5056 __ StoreWord(temp_0, dst);
5057 __ StoreWord(temp_1, src);
5068 RiscvOperandConverter g(
this,
nullptr);
5069 int last_frame_slot_id =
5072 int slot_id = last_frame_slot_id + sp_delta + new_slots;
5074 if (source->IsRegister()) {
5075 __ Push(g.ToRegister(source));
5077 }
else if (source->IsStackSlot()) {
5078 UseScratchRegisterScope temps(
masm());
5079 Register scratch = temps.Acquire();
5080 __ LoadWord(scratch, g.ToMemOperand(source));
5096 RiscvOperandConverter g(
this,
nullptr);
5097 if (dest->IsRegister()) {
5099 __ Pop(g.ToRegister(dest));
5100 }
else if (dest->IsStackSlot()) {
5102 UseScratchRegisterScope temps(
masm());
5103 Register scratch = temps.Acquire();
5105 __ StoreWord(scratch, g.ToMemOperand(dest));
5107 int last_frame_slot_id =
5110 int slot_id = last_frame_slot_id + sp_delta;
5132#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
5133#undef ASSEMBLE_ATOMIC_STORE_INTEGER
5134#undef ASSEMBLE_ATOMIC_BINOP
5135#undef ASSEMBLE_ATOMIC_BINOP_EXT
5136#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
5137#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
5138#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
5139#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
5140#undef ASSEMBLE_IEEE754_BINOP
5141#undef ASSEMBLE_IEEE754_UNOP
#define Assert(condition)
static constexpr T decode(U value)
static constexpr bool IsBuiltinId(Builtin builtin)
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
Bootstrapper * bootstrapper()
RootsTable & roots_table()
Tagged_t ReadOnlyRootPtr(RootIndex index)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
constexpr void set(RegisterT reg)
@ COMPRESSED_EMBEDDED_OBJECT
static constexpr bool IsNoInfo(Mode mode)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kFixedFrameSizeFromFp
static constexpr int kArgCOffset
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
static Type InferSwap(InstructionOperand *source, InstructionOperand *destination)
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
uint16_t parameter_count_
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
ZoneDeque< DeoptimizationExit * > deoptimization_exits_
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
friend class OutOfLineCode
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
FrameAccessState * frame_access_state() const
DoubleRegister ToDoubleRegister(InstructionOperand *op)
double InputDouble(size_t index)
float InputFloat32(size_t index)
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
DoubleRegister InputDoubleRegister(size_t index)
Register InputRegister(size_t index) const
Register ToRegister(InstructionOperand *op) const
bool IsFPStackSlot() const
ArchOpcode arch_opcode() const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
MemOperand MemoryOperand(size_t *first_index)
FloatRegister ToSingleRegister(InstructionOperand *op)
DoubleRegister InputOrZeroSingleRegister(size_t index)
Operand InputImmediate(size_t index)
RiscvOperandConverter(CodeGenerator *gen, Instruction *instr)
MemOperand SlotToMemOperand(int slot) const
Register InputOrZeroRegister(size_t index)
Operand InputOperand(size_t index)
MemOperand MemoryOperand(size_t index=0)
FloatRegister OutputSingleRegister(size_t index=0)
FloatRegister InputSingleRegister(size_t index)
DoubleRegister InputOrZeroDoubleRegister(size_t index)
MemOperand ToMemOperand(InstructionOperand *op) const
IndirectPointerTag indirect_pointer_tag_
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order)
RecordWriteMode const mode_
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define UNSUPPORTED_COND(opcode, condition)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
#define ARCH_OPCODE_LIST(V)
DirectHandle< JSReceiver > options
ZoneVector< RpoNumber > & result
LiftoffRegList regs_to_save
InstructionOperand destination
v8::SourceLocation SourceLocation
V8_INLINE Dest bit_cast(Source const &source)
constexpr T ByteSwap(T value)
bool IsInludeEqual(Condition cc)
@ kValueIsIndirectPointer
static bool HasRegisterInput(Instruction *instr, size_t index)
void AssembleBranchToLabels(CodeGenerator *gen, MacroAssembler *masm, Instruction *instr, FlagsCondition condition, Label *tlabel, Label *flabel, bool fallthru)
@ kSignedGreaterThanOrEqual
@ kFloatGreaterThanOrEqualOrUnordered
@ kUnsignedLessThanOrEqual
@ kFloatLessThanOrEqualOrUnordered
@ kFloatGreaterThanOrUnordered
@ kFloatGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kFloatLessThanOrUnordered
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Xor(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
RegListBase< DoubleRegister > DoubleRegList
const uint32_t kExceptionIsSwitchStackLimit
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
void PrintF(const char *format,...)
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
@ kIndirectPointerNullTag
RegListBase< Register > RegList
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr Register kSimulatorBreakArgument
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr DoubleRegister kSingleRegZero
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
constexpr Register kWasmImplicitArgRegister
constexpr VRegister kSimd128ScratchReg3
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
const int kNumCalleeSavedFPU
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
constexpr Register kJavaScriptCallDispatchHandleRegister
static int FrameSlotToFPOffset(int slot)
SwVfpRegister FloatRegister
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps
#define V8_STATIC_ROOTS_BOOL