25#define TRACE(...) PrintF(__VA_ARGS__)
69 switch (constant.type()) {
71 return Operand(constant.ToInt32());
73 return Operand(constant.ToInt64());
88 return Operand(constant.ToHeapObject());
108 const size_t index = *first_index;
145class OutOfLineRecordWrite final :
public OutOfLineCode {
147 OutOfLineRecordWrite(
151 : OutOfLineCode(
gen),
156#if V8_ENABLE_WEBASSEMBLY
157 stub_mode_(stub_mode),
164 void Generate() final {
169 __ DecompressTagged(value_, value_);
175 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
183 __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
186 __ CallIndirectPointerBarrier(object_, offset_, save_fp_mode,
188#if V8_ENABLE_WEBASSEMBLY
189 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
193 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode,
194 StubCallMode::kCallWasmRuntimeStub);
197 __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
209#if V8_ENABLE_WEBASSEMBLY
217#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
218 class ool_name final : public OutOfLineCode { \
220 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
221 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
223 void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
236#undef CREATE_OOL_CLASS
238#if V8_ENABLE_WEBASSEMBLY
239class WasmOutOfLineTrap :
public OutOfLineCode {
241 WasmOutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
243 void Generate()
override {
244 Loong64OperandConverter
i(
gen_, instr_);
246 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
247 GenerateCallToTrap(trap_id);
253 void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
256 void GenerateCallToTrap(TrapId trap_id) {
257 gen_->AssembleSourcePosition(instr_);
261 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
262 ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(
gen_->zone());
263 gen_->RecordSafepoint(reference_map);
264 __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
270void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
276 ReferenceMap* reference_map =
277 codegen->zone()->New<ReferenceMap>(codegen->zone());
282 codegen->RecordSafepoint(reference_map,
pc + 1);
283 codegen->RecordProtectedInstruction(
pc);
287void RecordTrapInfoIfNeeded(
Zone* zone, CodeGenerator* codegen,
349FPUCondition FlagsConditionToConditionCmpFPU(
bool* predicate,
403#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
405 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
406 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
411#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
414 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
415 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
420#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
423 __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
426 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
427 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
428 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
429 Operand(i.InputRegister(2))); \
430 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
431 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
436#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
437 size, bin_instr, representation) \
440 __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
441 if (representation == 32) { \
442 __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
444 DCHECK_EQ(representation, 64); \
445 __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
447 __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
448 Operand(i.TempRegister(3))); \
449 __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \
452 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
453 __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
454 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
455 size, sign_extend); \
456 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
457 Operand(i.InputRegister(2))); \
458 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
460 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
461 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
466#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
467 load_linked, store_conditional, sign_extend, size, representation) \
470 __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
471 if (representation == 32) { \
472 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
474 DCHECK_EQ(representation, 64); \
475 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
477 __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
478 Operand(i.TempRegister(1))); \
479 __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
481 __ bind(&exchange); \
482 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
483 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
484 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
485 size, sign_extend); \
486 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
488 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
489 __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
494#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
497 Label compareExchange; \
499 __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
501 __ bind(&compareExchange); \
502 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
503 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
504 __ BranchShort(&exit, ne, i.InputRegister(2), \
505 Operand(i.OutputRegister(0))); \
506 __ mov(i.TempRegister(2), i.InputRegister(3)); \
507 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
508 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
509 Operand(zero_reg)); \
515#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
516 load_linked, store_conditional, sign_extend, size, representation) \
518 Label compareExchange; \
520 __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
521 if (representation == 32) { \
522 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
524 DCHECK_EQ(representation, 64); \
525 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
527 __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
528 Operand(i.TempRegister(1))); \
529 __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
531 __ bind(&compareExchange); \
532 RecordTrapInfoIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
533 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
534 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
535 size, sign_extend); \
536 __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
538 __ BranchShort(&exit, ne, i.TempRegister(2), \
539 Operand(i.OutputRegister(0))); \
540 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
542 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
543 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
544 Operand(zero_reg)); \
549#define ASSEMBLE_IEEE754_BINOP(name) \
551 FrameScope scope(masm(), StackFrame::MANUAL); \
552 UseScratchRegisterScope temps(masm()); \
553 Register scratch = temps.Acquire(); \
554 __ PrepareCallCFunction(0, 2, scratch); \
555 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
558#define ASSEMBLE_IEEE754_UNOP(name) \
560 FrameScope scope(masm(), StackFrame::MANUAL); \
561 UseScratchRegisterScope temps(masm()); \
562 Register scratch = temps.Acquire(); \
563 __ PrepareCallCFunction(0, 1, scratch); \
564 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
567#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
569 __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
570 i.InputSimd128Register(1)); \
588void AdjustStackPointerForTailCall(MacroAssembler* masm,
589 FrameAccessState* state,
590 int new_slot_above_sp,
591 bool allow_shrinkage =
true) {
592 int current_sp_offset = state->GetSPToFPSlotCount() +
594 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
595 if (stack_slot_delta > 0) {
597 state->IncreaseSPDelta(stack_slot_delta);
598 }
else if (allow_shrinkage && stack_slot_delta < 0) {
600 state->IncreaseSPDelta(stack_slot_delta);
607 int first_unused_slot_offset) {
609 first_unused_slot_offset,
false);
613 int first_unused_slot_offset) {
615 first_unused_slot_offset);
620 UseScratchRegisterScope temps(
masm());
622 __ ComputeCodeStartAddress(scratch);
623 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart,
627#ifdef V8_ENABLE_LEAPTIERING
629void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
630 DCHECK(
linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
646 UseScratchRegisterScope temps(
masm());
647 Register actual_parameter_count = temps.Acquire();
649 __ LoadParameterCountFromJSDispatchTable(
651 __ Assert(
eq, AbortReason::kWrongFunctionDispatchHandle,
660 Instruction*
instr) {
661 Loong64OperandConverter
i(
this,
instr);
664 switch (arch_opcode) {
665 case kArchCallCodeObject: {
666 if (
instr->InputAt(0)->IsImmediate()) {
671 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
675 __ CallCodeObject(
reg, tag);
681 case kArchCallBuiltinPointer: {
683 Register builtin_index =
i.InputRegister(0);
688 __ CallBuiltinByIndex(builtin_index, target);
693#if V8_ENABLE_WEBASSEMBLY
694 case kArchCallWasmFunction:
695 case kArchCallWasmFunctionIndirect: {
696 if (
instr->InputAt(0)->IsImmediate()) {
697 DCHECK_EQ(arch_opcode, kArchCallWasmFunction);
698 Constant constant =
i.ToConstant(
instr->InputAt(0));
700 __ Call(wasm_code, constant.rmode());
701 }
else if (arch_opcode == kArchCallWasmFunctionIndirect) {
702 __ CallWasmCodePointer(
704 i.InputInt64(
instr->WasmSignatureHashInputIndex()));
706 __ Call(
i.InputRegister(0));
712 case kArchTailCallWasm:
713 case kArchTailCallWasmIndirect: {
714 if (
instr->InputAt(0)->IsImmediate()) {
715 DCHECK_EQ(arch_opcode, kArchTailCallWasm);
716 Constant constant =
i.ToConstant(
instr->InputAt(0));
718 __ Jump(wasm_code, constant.rmode());
719 }
else if (arch_opcode == kArchTailCallWasmIndirect) {
720 __ CallWasmCodePointer(
722 i.InputInt64(
instr->WasmSignatureHashInputIndex()),
725 __ Jump(
i.InputRegister(0));
732 case kArchTailCallCodeObject: {
733 if (
instr->InputAt(0)->IsImmediate()) {
738 i.InputCodeEntrypointTag(
instr->CodeEnrypointTagInputIndex());
742 __ JumpCodeObject(
reg, tag);
748 case kArchTailCallAddress: {
759 case kArchCallJSFunction: {
762 UseScratchRegisterScope temps(
masm());
765 __ LoadTaggedField(scratch,
767 __ Assert(
eq, AbortReason::kWrongFunctionContext,
cp, Operand(scratch));
769 uint32_t num_arguments =
770 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
771 __ CallJSFunction(func, num_arguments);
776 case kArchPrepareCallCFunction: {
777 UseScratchRegisterScope temps(
masm());
781 __ PrepareCallCFunction(num_gp_parameters, num_fp_parameters, scratch);
786 case kArchSaveCallerRegisters: {
800 case kArchRestoreCallerRegisters: {
813 case kArchPrepareTailCall:
816 case kArchCallCFunctionWithFrameState:
817 case kArchCallCFunction: {
821 Label return_location;
822#if V8_ENABLE_WEBASSEMBLY
823 bool isWasmCapiFunction =
825 if (isWasmCapiFunction) {
826 UseScratchRegisterScope temps(
masm());
828 __ LoadLabelRelative(scratch, &return_location);
830 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
835 if (
instr->InputAt(0)->IsImmediate()) {
836 ExternalReference ref =
i.InputExternalReference(0);
837 pc_offset =
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
838 set_isolate_data_slots, &return_location);
841 pc_offset =
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
842 set_isolate_data_slots, &return_location);
846 bool const needs_frame_state =
847 (arch_opcode == kArchCallCFunctionWithFrameState);
848 if (needs_frame_state) {
874 case kArchBinarySearchSwitch:
877 case kArchTableSwitch:
880 case kArchAbortCSADcheck:
881 DCHECK(
i.InputRegister(0) == a0);
886 __ CallBuiltin(Builtin::kAbortCSADcheck);
890 case kArchDebugBreak:
894 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)),
898 case kArchThrowTerminator:
901 case kArchDeoptimize: {
902 DeoptimizationExit* exit =
904 __ Branch(exit->label());
910#if V8_ENABLE_WEBASSEMBLY
911 case kArchStackPointer:
914 __ mov(
i.OutputRegister(), sp);
916 case kArchSetStackPointer: {
918 __ mov(sp,
i.InputRegister(0));
922 case kArchStackPointerGreaterThan: {
926 lhs_register =
i.TempRegister(1);
929 __ Sltu(
i.TempRegister(0),
i.InputRegister(0), lhs_register);
932 case kArchStackCheckOffset:
935 case kArchFramePointer:
936 __ mov(
i.OutputRegister(), fp);
938 case kArchParentFramePointer:
942 __ mov(
i.OutputRegister(), fp);
945 case kArchTruncateDoubleToI:
949 case kArchStoreWithWriteBarrier: {
958 if (addressing_mode == kMode_MRI) {
959 auto ool =
zone()->
New<OutOfLineRecordWrite>(
960 this, object, Operand(
i.InputInt64(1)),
value,
mode,
963 __ StoreTaggedField(value,
MemOperand(
object,
i.InputInt64(1)));
965 __ JumpIfSmi(value, ool->exit());
967 __ CheckPageFlag(
object,
970 __ bind(ool->exit());
973 auto ool =
zone()->
New<OutOfLineRecordWrite>(
974 this, object, Operand(
i.InputRegister(1)),
value,
mode,
977 __ StoreTaggedField(value,
MemOperand(
object,
i.InputRegister(1)));
979 __ JumpIfSmi(value, ool->exit());
981 __ CheckPageFlag(
object,
984 __ bind(ool->exit());
988 case kArchAtomicStoreWithWriteBarrier: {
993 int64_t
offset =
i.InputInt64(1);
996 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1004 __ JumpIfSmi(value, ool->exit());
1008 __ bind(ool->exit());
1011 case kArchStoreIndirectWithWriteBarrier: {
1021 if (addressing_mode == kMode_MRI) {
1022 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1023 this, object, Operand(
i.InputInt32(1)),
value,
mode,
1026 __ StoreIndirectPointerField(value,
1028 __ CheckPageFlag(
object,
1031 __ bind(ool->exit());
1034 auto ool =
zone()->
New<OutOfLineRecordWrite>(
1035 this, object, Operand(
i.InputRegister(1)),
value,
mode,
1038 __ StoreIndirectPointerField(value,
1040 __ CheckPageFlag(
object,
1043 __ bind(ool->exit());
1047 case kArchStackSlot: {
1048 UseScratchRegisterScope temps(
masm());
1049 Register scratch = temps.Acquire();
1053 __ Add_d(
i.OutputRegister(), base_reg, Operand(
offset.offset()));
1057 __ Assert(
eq, AbortReason::kAllocationIsNotDoubleAligned, scratch,
1062 case kIeee754Float64Acos:
1065 case kIeee754Float64Acosh:
1068 case kIeee754Float64Asin:
1071 case kIeee754Float64Asinh:
1074 case kIeee754Float64Atan:
1077 case kIeee754Float64Atanh:
1080 case kIeee754Float64Atan2:
1083 case kIeee754Float64Cos:
1086 case kIeee754Float64Cosh:
1089 case kIeee754Float64Cbrt:
1092 case kIeee754Float64Exp:
1095 case kIeee754Float64Expm1:
1098 case kIeee754Float64Log:
1101 case kIeee754Float64Log1p:
1104 case kIeee754Float64Log2:
1107 case kIeee754Float64Log10:
1110 case kIeee754Float64Pow:
1113 case kIeee754Float64Sin:
1116 case kIeee754Float64Sinh:
1119 case kIeee754Float64Tan:
1122 case kIeee754Float64Tanh:
1126 __ Add_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1129 __ Add_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1131 case kLoong64AddOvf_d: {
1132 UseScratchRegisterScope temps(
masm());
1133 DCHECK(temps.hasAvailable());
1135 __ AddOverflow_d(
i.OutputRegister(),
i.InputRegister(0),
1136 i.InputOperand(1), t8);
1140 __ Sub_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1143 __ Sub_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1145 case kLoong64SubOvf_d:
1146 __ SubOverflow_d(
i.OutputRegister(),
i.InputRegister(0),
1147 i.InputOperand(1), t8);
1150 __ Mul_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1152 case kLoong64MulOvf_w: {
1153 UseScratchRegisterScope temps(
masm());
1154 DCHECK(temps.hasAvailable());
1156 __ MulOverflow_w(
i.OutputRegister(),
i.InputRegister(0),
1157 i.InputOperand(1), t8);
1160 case kLoong64MulOvf_d: {
1161 UseScratchRegisterScope temps(
masm());
1162 DCHECK(temps.hasAvailable());
1164 __ MulOverflow_d(
i.OutputRegister(),
i.InputRegister(0),
1165 i.InputOperand(1), t8);
1168 case kLoong64Mulh_w:
1169 __ Mulh_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1171 case kLoong64Mulh_wu:
1172 __ Mulh_wu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1174 case kLoong64Mulh_d:
1175 __ Mulh_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1177 case kLoong64Mulh_du:
1178 __ Mulh_du(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1181 __ Div_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1182 __ maskeqz(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1184 case kLoong64Div_wu:
1185 __ Div_wu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1186 __ maskeqz(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1189 __ Mod_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1191 case kLoong64Mod_wu:
1192 __ Mod_wu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1195 __ Mul_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1198 __ Div_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1199 __ maskeqz(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1201 case kLoong64Div_du:
1202 __ Div_du(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1203 __ maskeqz(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1206 __ Mod_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1208 case kLoong64Mod_du:
1209 __ Mod_du(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1211 case kLoong64Alsl_d:
1213 __ Alsl_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1216 case kLoong64Alsl_w:
1218 __ Alsl_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1223 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1227 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1231 if (
instr->InputAt(1)->IsRegister()) {
1232 __ Nor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1235 __ Nor(
i.OutputRegister(),
i.InputRegister(0), zero_reg);
1240 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1243 __ clz_w(
i.OutputRegister(),
i.InputRegister(0));
1246 __ clz_d(
i.OutputRegister(),
i.InputRegister(0));
1249 if (
instr->InputAt(1)->IsRegister()) {
1250 __ sll_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1252 int64_t imm =
i.InputOperand(1).immediate();
1253 __ slli_w(
i.OutputRegister(),
i.InputRegister(0),
1258 if (
instr->InputAt(1)->IsRegister()) {
1259 __ srl_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1261 int64_t imm =
i.InputOperand(1).immediate();
1262 __ srli_w(
i.OutputRegister(),
i.InputRegister(0),
1267 if (
instr->InputAt(1)->IsRegister()) {
1268 __ sra_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1270 int64_t imm =
i.InputOperand(1).immediate();
1271 __ srai_w(
i.OutputRegister(),
i.InputRegister(0),
1275 case kLoong64Bstrpick_w:
1276 __ bstrpick_w(
i.OutputRegister(),
i.InputRegister(0),
1277 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1279 case kLoong64Bstrins_w:
1280 if (
instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1281 __ bstrins_w(
i.OutputRegister(), zero_reg,
1282 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1284 __ bstrins_w(
i.OutputRegister(),
i.InputRegister(0),
1285 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1288 case kLoong64Bstrpick_d: {
1289 __ bstrpick_d(
i.OutputRegister(),
i.InputRegister(0),
1290 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1293 case kLoong64Bstrins_d:
1294 if (
instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1295 __ bstrins_d(
i.OutputRegister(), zero_reg,
1296 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1298 __ bstrins_d(
i.OutputRegister(),
i.InputRegister(0),
1299 i.InputInt8(1) +
i.InputInt8(2) - 1,
i.InputInt8(1));
1303 if (
instr->InputAt(1)->IsRegister()) {
1304 __ sll_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1306 int64_t imm =
i.InputOperand(1).immediate();
1307 __ slli_d(
i.OutputRegister(),
i.InputRegister(0),
1312 if (
instr->InputAt(1)->IsRegister()) {
1313 __ srl_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1315 int64_t imm =
i.InputOperand(1).immediate();
1316 __ srli_d(
i.OutputRegister(),
i.InputRegister(0),
1321 if (
instr->InputAt(1)->IsRegister()) {
1322 __ sra_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1324 int64_t imm =
i.InputOperand(1).immediate();
1325 __ srai_d(
i.OutputRegister(),
i.InputRegister(0), imm);
1328 case kLoong64Rotr_w:
1329 __ Rotr_w(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1331 case kLoong64Rotr_d:
1332 __ Rotr_d(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1335 UseScratchRegisterScope temps(
masm());
1336 DCHECK(temps.hasAvailable());
1338 __ And(t8,
i.InputRegister(0),
i.InputOperand(1));
1350 __ mov(
i.OutputRegister(),
i.InputRegister(0));
1352 __ li(
i.OutputRegister(),
i.InputOperand(0));
1356 case kLoong64Float32Cmp: {
1357 FPURegister left =
i.InputOrZeroSingleRegister(0);
1358 FPURegister right =
i.InputOrZeroSingleRegister(1);
1361 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1364 !
__ IsDoubleZeroRegSet()) {
1368 __ CompareF32(left, right,
cc);
1370 case kLoong64Float32Add:
1371 __ fadd_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1372 i.InputDoubleRegister(1));
1374 case kLoong64Float32Sub:
1375 __ fsub_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1376 i.InputDoubleRegister(1));
1378 case kLoong64Float32Mul:
1379 __ fmul_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1380 i.InputDoubleRegister(1));
1382 case kLoong64Float32Div:
1383 __ fdiv_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1384 i.InputDoubleRegister(1));
1386 case kLoong64Float32Abs:
1387 __ fabs_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1389 case kLoong64Float32Neg:
1390 __ Neg_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1392 case kLoong64Float32Sqrt: {
1393 __ fsqrt_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1396 case kLoong64Float32Min: {
1397 FPURegister dst =
i.OutputSingleRegister();
1398 FPURegister src1 =
i.InputSingleRegister(0);
1399 FPURegister src2 =
i.InputSingleRegister(1);
1400 auto ool =
zone()->
New<OutOfLineFloat32Min>(
this, dst, src1, src2);
1401 __ Float32Min(dst, src1, src2, ool->entry());
1402 __ bind(ool->exit());
1405 case kLoong64Float32Max: {
1406 FPURegister dst =
i.OutputSingleRegister();
1407 FPURegister src1 =
i.InputSingleRegister(0);
1408 FPURegister src2 =
i.InputSingleRegister(1);
1409 auto ool =
zone()->
New<OutOfLineFloat32Max>(
this, dst, src1, src2);
1410 __ Float32Max(dst, src1, src2, ool->entry());
1411 __ bind(ool->exit());
1414 case kLoong64Float64Cmp: {
1415 FPURegister left =
i.InputOrZeroDoubleRegister(0);
1416 FPURegister right =
i.InputOrZeroDoubleRegister(1);
1419 FlagsConditionToConditionCmpFPU(&predicate,
instr->flags_condition());
1421 !
__ IsDoubleZeroRegSet()) {
1425 __ CompareF64(left, right,
cc);
1427 case kLoong64Float64Add:
1428 __ fadd_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1429 i.InputDoubleRegister(1));
1431 case kLoong64Float64Sub:
1432 __ fsub_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1433 i.InputDoubleRegister(1));
1435 case kLoong64Float64Mul:
1438 __ fmul_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1439 i.InputDoubleRegister(1));
1441 case kLoong64Float64Div:
1442 __ fdiv_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1443 i.InputDoubleRegister(1));
1445 case kLoong64Float64Mod: {
1448 UseScratchRegisterScope temps(
masm());
1449 Register scratch = temps.Acquire();
1450 __ PrepareCallCFunction(0, 2, scratch);
1451 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1454 case kLoong64Float64Abs:
1455 __ fabs_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1457 case kLoong64Float64Neg:
1458 __ Neg_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1460 case kLoong64Float64Sqrt: {
1461 __ fsqrt_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1464 case kLoong64Float64Min: {
1465 FPURegister dst =
i.OutputDoubleRegister();
1466 FPURegister src1 =
i.InputDoubleRegister(0);
1467 FPURegister src2 =
i.InputDoubleRegister(1);
1468 auto ool =
zone()->
New<OutOfLineFloat64Min>(
this, dst, src1, src2);
1469 __ Float64Min(dst, src1, src2, ool->entry());
1470 __ bind(ool->exit());
1473 case kLoong64Float64Max: {
1474 FPURegister dst =
i.OutputDoubleRegister();
1475 FPURegister src1 =
i.InputDoubleRegister(0);
1476 FPURegister src2 =
i.InputDoubleRegister(1);
1477 auto ool =
zone()->
New<OutOfLineFloat64Max>(
this, dst, src1, src2);
1478 __ Float64Max(dst, src1, src2, ool->entry());
1479 __ bind(ool->exit());
1482 case kLoong64Float64RoundDown: {
1483 __ Floor_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1486 case kLoong64Float32RoundDown: {
1487 __ Floor_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1490 case kLoong64Float64RoundTruncate: {
1491 __ Trunc_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1494 case kLoong64Float32RoundTruncate: {
1495 __ Trunc_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1498 case kLoong64Float64RoundUp: {
1499 __ Ceil_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1502 case kLoong64Float32RoundUp: {
1503 __ Ceil_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1506 case kLoong64Float64RoundTiesEven: {
1507 __ Round_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1510 case kLoong64Float32RoundTiesEven: {
1511 __ Round_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1514 case kLoong64Float64SilenceNaN:
1515 __ FPUCanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1517 case kLoong64Float64ToFloat32:
1518 __ fcvt_s_d(
i.OutputSingleRegister(),
i.InputDoubleRegister(0));
1520 case kLoong64Float32ToFloat64:
1521 __ fcvt_d_s(
i.OutputDoubleRegister(),
i.InputSingleRegister(0));
1523 case kLoong64Int32ToFloat64: {
1525 __ movgr2fr_w(scratch,
i.InputRegister(0));
1526 __ ffint_d_w(
i.OutputDoubleRegister(), scratch);
1529 case kLoong64Int32ToFloat32: {
1531 __ movgr2fr_w(scratch,
i.InputRegister(0));
1532 __ ffint_s_w(
i.OutputDoubleRegister(), scratch);
1535 case kLoong64Uint32ToFloat32: {
1536 __ Ffint_s_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1539 case kLoong64Int64ToFloat32: {
1541 __ movgr2fr_d(scratch,
i.InputRegister(0));
1542 __ ffint_s_l(
i.OutputDoubleRegister(), scratch);
1545 case kLoong64Int64ToFloat64: {
1547 __ movgr2fr_d(scratch,
i.InputRegister(0));
1548 __ ffint_d_l(
i.OutputDoubleRegister(), scratch);
1551 case kLoong64Uint32ToFloat64: {
1552 __ Ffint_d_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1555 case kLoong64Uint64ToFloat64: {
1556 __ Ffint_d_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1559 case kLoong64Uint64ToFloat32: {
1560 __ Ffint_s_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1563 case kLoong64Float64ToInt32: {
1565 __ ftintrz_w_d(scratch,
i.InputDoubleRegister(0));
1566 __ movfr2gr_s(
i.OutputRegister(), scratch);
1567 if (
instr->OutputCount() > 1) {
1569 __ li(
i.OutputRegister(1), 1);
1570 __ Move(scratch,
static_cast<double>(INT32_MIN));
1571 __ CompareF64(scratch,
i.InputDoubleRegister(0),
CLE);
1572 __ LoadZeroIfNotFPUCondition(
i.OutputRegister(1));
1573 __ Move(scratch,
static_cast<double>(INT32_MAX) + 1);
1574 __ CompareF64(scratch,
i.InputDoubleRegister(0),
CLE);
1575 __ LoadZeroIfFPUCondition(
i.OutputRegister(1));
1579 case kLoong64Float32ToInt32: {
1582 __ ftintrz_w_s(scratch_d,
i.InputDoubleRegister(0));
1583 __ movfr2gr_s(
i.OutputRegister(), scratch_d);
1584 if (set_overflow_to_min_i32) {
1585 UseScratchRegisterScope temps(
masm());
1586 Register scratch = temps.Acquire();
1589 __ addi_w(scratch,
i.OutputRegister(), 1);
1590 __ slt(scratch, scratch,
i.OutputRegister());
1591 __ add_w(
i.OutputRegister(),
i.OutputRegister(), scratch);
1595 case kLoong64Float32ToInt64: {
1598 bool load_status =
instr->OutputCount() > 1;
1600 __ ftintrz_l_s(scratch_d,
i.InputDoubleRegister(0));
1601 __ movfr2gr_d(
i.OutputRegister(), scratch_d);
1604 __ movfcsr2gr(output2,
FCSR2);
1606 __ And(output2, output2,
1608 __ Slt(output2, zero_reg, output2);
1609 __ xori(output2, output2, 1);
1613 case kLoong64Float64ToInt64: {
1614 UseScratchRegisterScope temps(
masm());
1615 Register scratch = temps.Acquire();
1619 bool load_status =
instr->OutputCount() > 1;
1621 __ ftintrz_l_d(scratch_d,
i.InputDoubleRegister(0));
1622 __ movfr2gr_d(
i.OutputRegister(0), scratch_d);
1625 __ movfcsr2gr(output2,
FCSR2);
1627 __ And(output2, output2,
1629 __ Slt(output2, zero_reg, output2);
1630 __ xori(output2, output2, 1);
1632 if (set_overflow_to_min_i64) {
1635 __ addi_d(scratch,
i.OutputRegister(), 1);
1636 __ slt(scratch, scratch,
i.OutputRegister());
1637 __ add_d(
i.OutputRegister(),
i.OutputRegister(), scratch);
1641 case kLoong64Float64ToUint32: {
1643 __ Ftintrz_uw_d(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1644 if (
instr->OutputCount() > 1) {
1645 __ li(
i.OutputRegister(1), 1);
1646 __ Move(scratch,
static_cast<double>(-1.0));
1647 __ CompareF64(scratch,
i.InputDoubleRegister(0),
CLT);
1648 __ LoadZeroIfNotFPUCondition(
i.OutputRegister(1));
1649 __ Move(scratch,
static_cast<double>(UINT32_MAX) + 1);
1650 __ CompareF64(scratch,
i.InputDoubleRegister(0),
CLE);
1651 __ LoadZeroIfFPUCondition(
i.OutputRegister(1));
1655 case kLoong64Float32ToUint32: {
1658 __ Ftintrz_uw_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1659 if (set_overflow_to_min_i32) {
1660 UseScratchRegisterScope temps(
masm());
1661 Register scratch = temps.Acquire();
1664 __ addi_w(scratch,
i.OutputRegister(), 1);
1665 __ Movz(
i.OutputRegister(), zero_reg, scratch);
1669 case kLoong64Float32ToUint64: {
1672 __ Ftintrz_ul_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch,
1676 case kLoong64Float64ToUint64: {
1679 __ Ftintrz_ul_d(
i.OutputRegister(0),
i.InputDoubleRegister(0), scratch,
1683 case kLoong64BitcastDL:
1684 __ movfr2gr_d(
i.OutputRegister(),
i.InputDoubleRegister(0));
1686 case kLoong64BitcastLD:
1687 __ movgr2fr_d(
i.OutputDoubleRegister(),
i.InputRegister(0));
1689 case kLoong64Float64ExtractLowWord32:
1690 __ FmoveLow(
i.OutputRegister(),
i.InputDoubleRegister(0));
1692 case kLoong64Float64ExtractHighWord32:
1693 __ movfrh2gr_s(
i.OutputRegister(),
i.InputDoubleRegister(0));
1695 case kLoong64Float64FromWord32Pair:
1696 __ movgr2fr_w(
i.OutputDoubleRegister(),
i.InputRegister(1));
1697 __ movgr2frh_w(
i.OutputDoubleRegister(),
i.InputRegister(0));
1699 case kLoong64Float64InsertLowWord32:
1700 __ FmoveLow(
i.OutputDoubleRegister(),
i.InputRegister(1));
1702 case kLoong64Float64InsertHighWord32:
1703 __ movgr2frh_w(
i.OutputDoubleRegister(),
i.InputRegister(1));
1707 case kLoong64Ext_w_b:
1708 __ ext_w_b(
i.OutputRegister(),
i.InputRegister(0));
1710 case kLoong64Ext_w_h:
1711 __ ext_w_h(
i.OutputRegister(),
i.InputRegister(0));
1715 __ Ld_bu(
i.OutputRegister(),
i.MemoryOperand());
1719 __ Ld_b(
i.OutputRegister(),
i.MemoryOperand());
1721 case kLoong64St_b: {
1725 __ St_b(
i.InputOrZeroRegister(index), mem);
1730 __ Ld_hu(
i.OutputRegister(),
i.MemoryOperand());
1734 __ Ld_h(
i.OutputRegister(),
i.MemoryOperand());
1736 case kLoong64St_h: {
1740 __ St_h(
i.InputOrZeroRegister(index), mem);
1745 __ Ld_w(
i.OutputRegister(),
i.MemoryOperand());
1749 __ Ld_wu(
i.OutputRegister(),
i.MemoryOperand());
1753 __ Ld_d(
i.OutputRegister(),
i.MemoryOperand());
1755 case kLoong64St_w: {
1759 __ St_w(
i.InputOrZeroRegister(index), mem);
1762 case kLoong64St_d: {
1766 __ St_d(
i.InputOrZeroRegister(index), mem);
1769 case kLoong64LoadDecompressTaggedSigned:
1771 __ DecompressTaggedSigned(
i.OutputRegister(),
i.MemoryOperand());
1773 case kLoong64LoadDecompressTagged:
1775 __ DecompressTagged(
i.OutputRegister(),
i.MemoryOperand());
1777 case kLoong64LoadDecompressProtected:
1779 __ DecompressProtected(
i.OutputRegister(),
i.MemoryOperand());
1781 case kLoong64StoreCompressTagged: {
1785 __ StoreTaggedField(
i.InputOrZeroRegister(index), mem);
1788 case kLoong64LoadDecodeSandboxedPointer:
1789 __ LoadSandboxedPointerField(
i.OutputRegister(),
i.MemoryOperand());
1791 case kLoong64StoreEncodeSandboxedPointer: {
1794 __ StoreSandboxedPointerField(
i.InputOrZeroRegister(index), mem);
1797 case kLoong64StoreIndirectPointer: {
1800 __ StoreIndirectPointerField(
i.InputOrZeroRegister(index), mem);
1803 case kLoong64AtomicLoadDecompressTaggedSigned:
1804 __ AtomicDecompressTaggedSigned(
i.OutputRegister(),
i.MemoryOperand());
1806 case kLoong64AtomicLoadDecompressTagged:
1807 __ AtomicDecompressTagged(
i.OutputRegister(),
i.MemoryOperand());
1809 case kLoong64AtomicStoreCompressTagged: {
1812 __ AtomicStoreTaggedField(
i.InputOrZeroRegister(index), mem);
1815 case kLoong64Fld_s: {
1817 __ Fld_s(
i.OutputSingleRegister(),
i.MemoryOperand());
1820 case kLoong64Fst_s: {
1823 FPURegister ft =
i.InputOrZeroSingleRegister(index);
1828 __ Fst_s(ft, operand);
1833 __ Fld_d(
i.OutputDoubleRegister(),
i.MemoryOperand());
1835 case kLoong64Fst_d: {
1838 FPURegister ft =
i.InputOrZeroDoubleRegister(index);
1843 __ Fst_d(ft, operand);
1846 case kLoong64Dbar: {
1851 if (
instr->InputAt(0)->IsFPRegister()) {
1860 case kLoong64Peek: {
1861 int reverse_slot =
i.InputInt32(0);
1864 if (
instr->OutputAt(0)->IsFPRegister()) {
1879 case kLoong64StackClaim: {
1880 __ Sub_d(sp, sp, Operand(
i.InputInt32(0)));
1885 case kLoong64Poke: {
1886 if (
instr->InputAt(0)->IsFPRegister()) {
1887 __ Fst_d(
i.InputDoubleRegister(0),
MemOperand(sp,
i.InputInt32(1)));
1893 case kLoong64ByteSwap64: {
1894 __ ByteSwap(
i.OutputRegister(0),
i.InputRegister(0), 8);
1897 case kLoong64ByteSwap32: {
1898 __ ByteSwap(
i.OutputRegister(0),
i.InputRegister(0), 4);
1901 case kAtomicLoadInt8:
1905 case kAtomicLoadUint8:
1908 case kAtomicLoadInt16:
1912 case kAtomicLoadUint16:
1915 case kAtomicLoadWord32:
1918 case kLoong64Word64AtomicLoadUint32:
1921 case kLoong64Word64AtomicLoadUint64:
1924 case kAtomicStoreWord8:
1927 case kAtomicStoreWord16:
1930 case kAtomicStoreWord32:
1933 case kLoong64Word64AtomicStoreWord64:
1936 case kAtomicExchangeInt8:
1940 case kAtomicExchangeUint8:
1950 case kAtomicExchangeInt16:
1954 case kAtomicExchangeUint16:
1964 case kAtomicExchangeWord32:
1967 __ add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
1969 __ amswap_db_w(
i.OutputRegister(0),
i.InputRegister(2),
1977 case kLoong64Word64AtomicExchangeUint64:
1978 __ add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
1980 __ amswap_db_d(
i.OutputRegister(0),
i.InputRegister(2),
1983 case kAtomicCompareExchangeInt8:
1987 case kAtomicCompareExchangeUint8:
1999 case kAtomicCompareExchangeInt16:
2003 case kAtomicCompareExchangeUint16:
2015 case kAtomicCompareExchangeWord32:
2018 __ slli_w(
i.InputRegister(2),
i.InputRegister(2), 0);
2027 case kLoong64Word64AtomicCompareExchangeUint64:
2030 case kAtomicAddWord32:
2033 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2035 __ amadd_db_w(
i.OutputRegister(0),
i.InputRegister(2),
2043 case kAtomicSubWord32:
2053 case kAtomicAndWord32:
2056 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2058 __ amand_db_w(
i.OutputRegister(0),
i.InputRegister(2),
2066 case kAtomicOrWord32:
2069 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2071 __ amor_db_w(
i.OutputRegister(0),
i.InputRegister(2),
2079 case kAtomicXorWord32:
2082 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2084 __ amxor_db_w(
i.OutputRegister(0),
i.InputRegister(2),
2092#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
2093 case kAtomic##op##Int8: \
2094 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2095 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst32, 32); \
2097 case kAtomic##op##Uint8: \
2098 switch (AtomicWidthField::decode(opcode)) { \
2099 case AtomicWidth::kWord32: \
2100 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst32, 32); \
2102 case AtomicWidth::kWord64: \
2103 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst64, 64); \
2107 case kAtomic##op##Int16: \
2108 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2109 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst32, 32); \
2111 case kAtomic##op##Uint16: \
2112 switch (AtomicWidthField::decode(opcode)) { \
2113 case AtomicWidth::kWord32: \
2114 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst32, 32); \
2116 case AtomicWidth::kWord64: \
2117 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst64, 64); \
2126#undef ATOMIC_BINOP_CASE
2128 case kLoong64Word64AtomicAddUint64:
2129 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2131 __ amadd_db_d(
i.OutputRegister(0),
i.InputRegister(2),
i.TempRegister(0));
2133 case kLoong64Word64AtomicSubUint64:
2136 case kLoong64Word64AtomicAndUint64:
2137 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2139 __ amand_db_d(
i.OutputRegister(0),
i.InputRegister(2),
i.TempRegister(0));
2141 case kLoong64Word64AtomicOrUint64:
2142 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2144 __ amor_db_d(
i.OutputRegister(0),
i.InputRegister(2),
i.TempRegister(0));
2146 case kLoong64Word64AtomicXorUint64:
2147 __ Add_d(
i.TempRegister(0),
i.InputRegister(0),
i.InputRegister(1));
2149 __ amxor_db_d(
i.OutputRegister(0),
i.InputRegister(2),
i.TempRegister(0));
2151#undef ATOMIC_BINOP_CASE
2152 case kLoong64S128Const:
2153 case kLoong64S128Zero:
2154 case kLoong64I32x4Splat:
2155 case kLoong64I32x4ExtractLane:
2156 case kLoong64I32x4Add:
2157 case kLoong64I32x4ReplaceLane:
2158 case kLoong64I32x4Sub:
2159 case kLoong64F64x2Abs:
2166#define UNSUPPORTED_COND(opcode, condition) \
2167 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
2173 bool need_signed =
false;
2179 masm->
slli_w(*temp0, *left, 0);
2188 if (need_signed && right->is_reg()) {
2190 masm->
slli_w(*temp1, right->rm(), 0);
2198 Label* tlabel,
Label* flabel,
bool fallthru) {
2221 __ srai_d(scratch,
i.OutputRegister(), 32);
2222 __ srai_w(scratch2,
i.OutputRegister(), 31);
2223 __ Branch(tlabel,
cc, scratch2,
Operand(scratch));
2265 __ Branch(tlabel,
cc, left, right);
2270 __ xori(
i.TempRegister(0),
i.TempRegister(0), 1);
2272 __ Branch(tlabel,
ne,
i.TempRegister(0),
Operand(zero_reg));
2276 FlagsConditionToConditionCmpFPU(&predicate,
condition);
2278 __ BranchTrueF(tlabel);
2280 __ BranchFalseF(tlabel);
2283 PrintF(
"AssembleArchBranch Unimplemented arch_opcode: %d\n",
2287 if (!fallthru)
__ Branch(flabel);
2294 Label* tlabel = branch->true_label;
2295 Label* flabel = branch->false_label;
2302 BranchInfo* branch) {
2306#undef UNSUPPORTED_COND
2309 BranchInfo* branch) {
2318#if V8_ENABLE_WEBASSEMBLY
2319void CodeGenerator::AssembleArchTrap(Instruction*
instr,
2321 auto ool =
zone()->
New<WasmOutOfLineTrap>(
this,
instr);
2322 Label* tlabel = ool->entry();
2330 Loong64OperandConverter
i(
this,
instr);
2340 if (
instr->arch_opcode() == kLoong64Tst) {
2347 UseScratchRegisterScope temps(
masm());
2350 }
else if (
instr->arch_opcode() == kLoong64Add_d ||
2351 instr->arch_opcode() == kLoong64Sub_d) {
2352 UseScratchRegisterScope temps(
masm());
2353 Register scratch = temps.Acquire();
2356 __ srli_d(scratch,
i.OutputRegister(), 63);
2357 __ srli_w(
result,
i.OutputRegister(), 31);
2362 }
else if (
instr->arch_opcode() == kLoong64AddOvf_d ||
2363 instr->arch_opcode() == kLoong64SubOvf_d) {
2366 UseScratchRegisterScope temps(
masm());
2368 }
else if (
instr->arch_opcode() == kLoong64MulOvf_w ||
2369 instr->arch_opcode() == kLoong64MulOvf_d) {
2372 UseScratchRegisterScope temps(
masm());
2374 }
else if (
instr->arch_opcode() == kLoong64Cmp32 ||
2375 instr->arch_opcode() == kLoong64Cmp64) {
2378 Operand right =
i.InputOperand(1);
2386 }
else if (
instr->arch_opcode() == kLoong64Float64Cmp ||
2387 instr->arch_opcode() == kLoong64Float32Cmp) {
2388 FPURegister left =
i.InputOrZeroDoubleRegister(0);
2389 FPURegister right =
i.InputOrZeroDoubleRegister(1);
2391 !
__ IsDoubleZeroRegSet()) {
2395 FlagsConditionToConditionCmpFPU(&predicate,
condition);
2403 }
else if (
instr->arch_opcode() == kArchStackPointerGreaterThan) {
2407 __ xori(
i.OutputRegister(),
i.TempRegister(0), 1);
2411 PrintF(
"AssembleArchBranch Unimplemented arch_opcode is : %d\n",
2412 instr->arch_opcode());
2413 TRACE(
"UNIMPLEMENTED code_generator_loong64: %s at line %d\n", __FUNCTION__,
2424 Loong64OperandConverter
i(
this,
instr);
2426 std::vector<std::pair<int32_t, Label*>> cases;
2427 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
2428 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
2431 UseScratchRegisterScope temps(
masm());
2432 Register scratch = temps.Acquire();
2435 __ slli_w(scratch, input, 0);
2437 cases.data() + cases.size());
2441 Loong64OperandConverter
i(
this,
instr);
2443 size_t const case_count =
instr->InputCount() - 2;
2445 UseScratchRegisterScope temps(
masm());
2446 Register scratch = temps.Acquire();
2449 __ slli_w(scratch, input, 0);
2450 __ Branch(
GetLabel(
i.InputRpo(1)),
hs, scratch, Operand(case_count));
2451 __ GenerateSwitchTable(scratch, case_count, [&
i,
this](
size_t index) {
2464 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
2465 if (!saves_fpu.is_empty()) {
2466 int count = saves_fpu.Count();
2468 frame->AllocateSavedCalleeRegisterSlots(count *
2472 const RegList saves = call_descriptor->CalleeSavedRegisters();
2473 if (!saves.is_empty()) {
2474 int count = saves.Count();
2475 frame->AllocateSavedCalleeRegisterSlots(count);
2483 if (call_descriptor->IsCFunctionCall()) {
2484#if V8_ENABLE_WEBASSEMBLY
2485 if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
2486 __ StubPrologue(StackFrame::C_WASM_ENTRY);
2497 }
else if (call_descriptor->IsJSFunctionCall()) {
2500 __ StubPrologue(
info()->GetOutputStackFrameType());
2501#if V8_ENABLE_WEBASSEMBLY
2502 if (call_descriptor->IsAnyWasmFunctionCall() ||
2503 call_descriptor->IsWasmImportWrapper() ||
2504 call_descriptor->IsWasmCapiFunction()) {
2511 if (call_descriptor->IsWasmCapiFunction()) {
2519 int required_slots =
2520 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
2522 if (
info()->is_osr()) {
2524 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
2530 __ RecordComment(
"-- OSR entrypoint --");
2535 const RegList saves = call_descriptor->CalleeSavedRegisters();
2536 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
2538 if (required_slots > 0) {
2540#if V8_ENABLE_WEBASSEMBLY
2552 UseScratchRegisterScope temps(
masm());
2553 Register stack_limit = temps.Acquire();
2554 __ LoadStackLimit(stack_limit,
2555 MacroAssembler::StackLimitKind::kRealStackLimit);
2556 __ Add_d(stack_limit, stack_limit,
2558 __ Branch(&done,
uge, sp, Operand(stack_limit));
2561 if (
v8_flags.experimental_wasm_growable_stacks) {
2565 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
2571 __ MultiPushFPU(fp_regs_to_save);
2575 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
2578 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
2579 __ MultiPopFPU(fp_regs_to_save);
2582 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
2586 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
2598 const int returns =
frame()->GetReturnSlotCount();
2601 required_slots -= saves.Count();
2602 required_slots -= saves_fpu.Count();
2603 required_slots -= returns;
2604 if (required_slots > 0) {
2608 if (!saves_fpu.is_empty()) {
2610 __ MultiPushFPU(saves_fpu);
2614 if (!saves.is_empty()) {
2616 __ MultiPush(saves);
2634 const int returns =
frame()->GetReturnSlotCount();
2640 const RegList saves = call_descriptor->CalleeSavedRegisters();
2641 if (!saves.is_empty()) {
2646 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
2647 if (!saves_fpu.is_empty()) {
2648 __ MultiPopFPU(saves_fpu);
2651 Loong64OperandConverter g(
this,
nullptr);
2653 const int parameter_slots =
2654 static_cast<int>(call_descriptor->ParameterSlotCount());
2658 if (parameter_slots != 0) {
2659 if (additional_pop_count->IsImmediate()) {
2660 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
2662 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue,
2663 g.ToRegister(additional_pop_count),
2664 Operand(
static_cast<int64_t
>(0)));
2668#if V8_ENABLE_WEBASSEMBLY
2669 if (call_descriptor->IsAnyWasmFunctionCall() &&
2670 v8_flags.experimental_wasm_growable_stacks) {
2673 UseScratchRegisterScope temps{
masm()};
2674 Register scratch = temps.Acquire();
2685 UseScratchRegisterScope temps{
masm()};
2686 Register scratch = temps.Acquire();
2687 __ PrepareCallCFunction(1, scratch);
2689 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
2701 call_descriptor->IsJSFunctionCall() &&
2702 parameter_slots != 0;
2704 if (call_descriptor->IsCFunctionCall()) {
2709 if (additional_pop_count->IsImmediate() &&
2710 g.ToConstant(additional_pop_count).ToInt32() == 0) {
2727 if (parameter_slots > 1) {
2728 __ li(t1, parameter_slots);
2730 __ Movn(t0, t1, t2);
2733 }
else if (additional_pop_count->IsImmediate()) {
2734 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
2735 __ Drop(parameter_slots + additional_count);
2737 Register pop_reg = g.ToRegister(additional_pop_count);
2738 __ Drop(parameter_slots);
2747 ZoneDeque<DeoptimizationExit*>* exits) {}
2752 Loong64OperandConverter g(
this,
nullptr);
2753 int last_frame_slot_id =
2756 int slot_id = last_frame_slot_id + sp_delta + new_slots;
2758 if (source->IsRegister()) {
2759 __ Push(g.ToRegister(source));
2761 }
else if (source->IsStackSlot()) {
2762 UseScratchRegisterScope temps(
masm());
2763 Register scratch = temps.Acquire();
2764 __ Ld_d(scratch, g.ToMemOperand(source));
2779 Loong64OperandConverter g(
this,
nullptr);
2781 if (dest->IsRegister()) {
2783 __ Pop(g.ToRegister(dest));
2784 }
else if (dest->IsStackSlot()) {
2786 UseScratchRegisterScope temps(
masm());
2787 Register scratch = temps.Acquire();
2789 __ St_d(scratch, g.ToMemOperand(dest));
2791 int last_frame_slot_id =
2794 int slot_id = last_frame_slot_id + sp_delta;
2814 DCHECK(!source->IsImmediate());
2823 if (temps.hasAvailable()) {
2826 }
else if (temps.hasAvailableFp()) {
2833 DCHECK(temps.hasAvailableFp());
2850 Loong64OperandConverter g(
this,
nullptr);
2851 if (source->IsStackSlot()) {
2852 __ Fld_d(g.ToDoubleRegister(&scratch), g.ToMemOperand(source));
2854 DCHECK(source->IsRegister());
2855 __ movgr2fr_d(g.ToDoubleRegister(&scratch), g.ToRegister(source));
2882 Loong64OperandConverter g(
this,
nullptr);
2883 if (dest->IsStackSlot()) {
2884 __ Fst_d(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest));
2886 DCHECK(dest->IsRegister());
2887 __ movfr2gr_d(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
2903 InstructionOperand* src = &move->source();
2904 InstructionOperand* dst = &move->destination();
2905 UseScratchRegisterScope temps(
masm());
2906 if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
2910 if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) {
2911 Loong64OperandConverter g(
this,
nullptr);
2912 bool src_need_scratch =
false;
2913 bool dst_need_scratch =
false;
2914 if (src->IsStackSlot()) {
2918 (!is_int16(src_mem.offset()) || (src_mem.offset() & 0b11) != 0) &&
2919 (!is_int12(src_mem.offset()) && !src_mem.hasIndexReg());
2920 }
else if (src->IsFPStackSlot()) {
2923 src_need_scratch = !is_int12(src_mem.offset()) && !src_mem.hasIndexReg();
2925 if (dst->IsStackSlot()) {
2929 (!is_int16(dst_mem.offset()) || (dst_mem.offset() & 0b11) != 0) &&
2930 (!is_int12(dst_mem.offset()) && !dst_mem.hasIndexReg());
2931 }
else if (dst->IsFPStackSlot()) {
2934 dst_need_scratch = !is_int12(dst_mem.offset()) && !dst_mem.hasIndexReg();
2936 if (src_need_scratch || dst_need_scratch) {
2945bool Is32BitOperand(InstructionOperand* operand) {
2946 DCHECK(operand->IsStackSlot() || operand->IsRegister());
2955bool Use32BitMove(InstructionOperand* source, InstructionOperand*
destination) {
2956 return Is32BitOperand(source) && Is32BitOperand(
destination);
2963 Loong64OperandConverter g(
this,
nullptr);
2966 if (source->IsRegister()) {
2968 Register src = g.ToRegister(source);
2974 }
else if (source->IsStackSlot()) {
2984 UseScratchRegisterScope temps(
masm());
2985 Register scratch = temps.Acquire();
2986 __ Ld_d(scratch, src);
2989 }
else if (source->IsConstant()) {
2990 Constant src = g.ToConstant(source);
2992 UseScratchRegisterScope temps(
masm());
2993 Register scratch = temps.Acquire();
2996 switch (src.type()) {
2998 __ li(dst, Operand(src.ToInt32(), src.rmode()));
3004 __ li(dst, Operand(src.ToInt64(), src.rmode()));
3010 __ li(dst, src.ToExternalReference());
3013 Handle<HeapObject> src_object = src.ToHeapObject();
3016 __ LoadRoot(dst, index);
3018 __ li(dst, src_object);
3023 Handle<HeapObject> src_object = src.ToHeapObject();
3026 __ LoadTaggedRoot(dst, index);
3040 __ St_d(zero_reg, dst);
3042 UseScratchRegisterScope temps(
masm());
3043 Register scratch = temps.Acquire();
3045 __ St_d(scratch, dst);
3050 __ Move(dst, src.ToFloat32());
3057 __ Move(dst, src.ToFloat64().value());
3062 }
else if (source->IsFPRegister()) {
3063 FPURegister src = g.ToDoubleRegister(source);
3065 FPURegister dst = g.ToDoubleRegister(
destination);
3071 }
else if (source->IsFPStackSlot()) {
3079 __ Fld_d(temp, src);
3089 Loong64OperandConverter g(
this,
nullptr);
3092 if (source->IsRegister()) {
3093 UseScratchRegisterScope temps(
masm());
3094 Register scratch = temps.Acquire();
3096 Register src = g.ToRegister(source);
3099 __ Move(scratch, src);
3101 __ Move(dst, scratch);
3105 __ mov(scratch, src);
3107 __ St_d(scratch, dst);
3109 }
else if (source->IsStackSlot()) {
3115 UseScratchRegisterScope temps(
masm());
3116 Register scratch = temps.Acquire();
3120 __ Ld_d(scratch, src);
3121 __ Fld_d(scratch_d, dst);
3122 __ St_d(scratch, dst);
3123 __ Fst_d(scratch_d, src);
3124 }
else if (source->IsFPRegister()) {
3126 FPURegister src = g.ToDoubleRegister(source);
3128 FPURegister dst = g.ToDoubleRegister(
destination);
3129 __ Move(scratch_d, src);
3131 __ Move(dst, scratch_d);
3135 __ Move(scratch_d, src);
3137 __ Fst_d(scratch_d, dst);
3139 }
else if (source->IsFPStackSlot()) {
3141 UseScratchRegisterScope temps(
masm());
3142 Register scratch = temps.Acquire();
3146 __ Fld_d(scratch_d, src);
3147 __ Ld_d(scratch, dst);
3148 __ Fst_d(scratch_d, dst);
3149 __ St_d(scratch, src);
3161#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
3162#undef ASSEMBLE_ATOMIC_STORE_INTEGER
3163#undef ASSEMBLE_ATOMIC_BINOP
3164#undef ASSEMBLE_ATOMIC_BINOP_EXT
3165#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
3166#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
3167#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
3168#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
3169#undef ASSEMBLE_IEEE754_BINOP
3170#undef ASSEMBLE_IEEE754_UNOP
#define Assert(condition)
static constexpr T decode(U value)
void slli_w(Register rd, Register rj, int32_t ui5)
static constexpr bool IsBuiltinId(Builtin builtin)
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
Bootstrapper * bootstrapper()
RootsTable & roots_table()
Tagged_t ReadOnlyRootPtr(RootIndex index)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
constexpr void set(RegisterT reg)
@ COMPRESSED_EMBEDDED_OBJECT
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kArgCOffset
static constexpr int kFrameTypeOffset
void Include(const Register ®1, const Register ®2=no_reg)
static constexpr Register GapRegister()
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
uint16_t parameter_count_
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
FrameAccessState * frame_access_state() const
DoubleRegister ToDoubleRegister(InstructionOperand *op)
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
DoubleRegister InputDoubleRegister(size_t index)
Register InputRegister(size_t index) const
Register ToRegister(InstructionOperand *op) const
bool IsFPStackSlot() const
ArchOpcode arch_opcode() const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
FloatRegister OutputSingleRegister(size_t index=0)
MemOperand SlotToMemOperand(int slot) const
Operand InputImmediate(size_t index)
MemOperand MemoryOperand(size_t *first_index)
Register InputOrZeroRegister(size_t index)
FloatRegister InputSingleRegister(size_t index)
Loong64OperandConverter(CodeGenerator *gen, Instruction *instr)
DoubleRegister InputOrZeroDoubleRegister(size_t index)
Operand InputOperand(size_t index)
DoubleRegister InputOrZeroSingleRegister(size_t index)
MemOperand MemoryOperand(size_t index=0)
MemOperand ToMemOperand(InstructionOperand *op) const
FloatRegister ToSingleRegister(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
IndirectPointerTag indirect_pointer_tag_
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order)
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr)
RecordWriteMode const mode_
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, size, bin_instr, representation)
#define UNSUPPORTED_COND(opcode, condition)
#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
ZoneVector< RpoNumber > & result
LiftoffRegList regs_to_save
InstructionOperand destination
v8::SourceLocation SourceLocation
V8_INLINE Dest bit_cast(Source const &source)
constexpr T ByteSwap(T value)
@ kValueIsIndirectPointer
void SignExtend(MacroAssembler *masm, Instruction *instr, Register *left, Operand *right, Register *temp0, Register *temp1)
static bool HasRegisterInput(Instruction *instr, size_t index)
void AssembleBranchToLabels(CodeGenerator *gen, MacroAssembler *masm, Instruction *instr, FlagsCondition condition, Label *tlabel, Label *flabel, bool fallthru)
@ kSignedGreaterThanOrEqual
@ kFloatGreaterThanOrEqualOrUnordered
@ kUnsignedLessThanOrEqual
@ kFloatLessThanOrEqualOrUnordered
@ kFloatGreaterThanOrUnordered
@ kFloatGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kFloatLessThanOrUnordered
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Xor(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
RegListBase< DoubleRegister > DoubleRegList
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
const uint32_t kFCSRInvalidOpCauseMask
void PrintF(const char *format,...)
constexpr DoubleRegister kScratchDoubleReg
@ kIndirectPointerNullTag
RegListBase< Register > RegList
constexpr FPUControlRegister FCSR2
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
constexpr int kSystemPointerSizeLog2
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool IsAnyCompressed(MachineRepresentation rep)
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
const uint32_t kFCSROverflowCauseMask
constexpr Register kWasmImplicitArgRegister
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
const int kNumCalleeSavedFPU
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
constexpr Register kJavaScriptCallDispatchHandleRegister
static int FrameSlotToFPOffset(int slot)
SwVfpRegister FloatRegister
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
std::optional< CPURegister > scratch_reg
DoubleRegList scratch_fpregs
std::optional< DoubleRegister > scratch_fpreg
std::optional< UseScratchRegisterScope > temps
#define V8_STATIC_ROOTS_BOOL