49#ifndef V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
50#define V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
67 typename =
typename std::enable_if<std::is_signed<T>::value>::type>
69 return a < 0 ?
a : -
a;
72#if defined(USE_SIMULATOR)
74typedef unsigned __uint128_t
__attribute__((__mode__(__TI__)));
93#ifdef V8_TARGET_ARCH_32_BIT
95using reg_t = uint32_t;
96using freg_t = uint64_t;
97using sfreg_t = int64_t;
98#elif V8_TARGET_ARCH_64_BIT
99using sreg_t = int64_t;
100using reg_t = uint64_t;
101using freg_t = uint64_t;
102using sfreg_t = int64_t;
104#error "Cannot detect Riscv's bitwidth"
107#define sext32(x) ((sreg_t)(int32_t)(x))
108#define zext32(x) ((reg_t)(uint32_t)(x))
110#ifdef V8_TARGET_ARCH_64_BIT
111#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
112#define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
113#elif V8_TARGET_ARCH_32_BIT
114#define sext_xlen(x) (((sreg_t)(x) << (32 - xlen)) >> (32 - xlen))
115#define zext_xlen(x) (((reg_t)(x) << (32 - xlen)) >> (32 - xlen))
118#define BIT(n) (0x1LL << n)
119#define QUIET_BIT_S(nan) (base::bit_cast<int32_t>(nan) & BIT(22))
120#define QUIET_BIT_D(nan) (base::bit_cast<int64_t>(nan) & BIT(51))
121static inline bool isSnan(
float fp) {
return !QUIET_BIT_S(fp); }
122static inline bool isSnan(
double fp) {
return !QUIET_BIT_D(fp); }
126#ifdef V8_TARGET_ARCH_64_BIT
127inline uint64_t mulhu(uint64_t a, uint64_t b) {
128 __uint128_t full_result = ((__uint128_t)
a) * ((__uint128_t)b);
129 return full_result >> 64;
132inline int64_t mulh(int64_t a, int64_t b) {
133 __int128_t full_result = ((__int128_t)
a) * ((__int128_t)b);
134 return full_result >> 64;
137inline int64_t mulhsu(int64_t a, uint64_t b) {
138 __int128_t full_result = ((__int128_t)
a) * ((__uint128_t)b);
139 return full_result >> 64;
141#elif V8_TARGET_ARCH_32_BIT
142inline uint32_t mulhu(uint32_t a, uint32_t b) {
143 uint64_t full_result = ((uint64_t)
a) * ((uint64_t)b);
144 uint64_t upper_part = full_result >> 32;
145 return (uint32_t)upper_part;
148inline int32_t mulh(int32_t a, int32_t b) {
149 int64_t full_result = ((int64_t)
a) * ((int64_t)b);
150 int64_t upper_part = full_result >> 32;
151 return (int32_t)upper_part;
154inline int32_t mulhsu(int32_t a, uint32_t b) {
155 int64_t full_result = ((int64_t)
a) * ((uint64_t)b);
156 int64_t upper_part = full_result >> 32;
157 return (int32_t)upper_part;
162#define F32_SIGN ((uint32_t)1 << 31)
167inline float fsgnj32(
float rs1,
float rs2,
bool n,
bool x) {
168 u32_f32 a = {.f = rs1}, b = {.f = rs2};
170 res.u = (a.u & ~F32_SIGN) | ((((
x) ? a.u
179 u32_f32 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
182 res.u = (a.u & ~F32_SIGN) | ((a.u ^ b.u) & F32_SIGN);
185 res.u = (a.u & ~F32_SIGN) | ((F32_SIGN ^ b.u) & F32_SIGN);
187 res.u = (a.u & ~F32_SIGN) | ((0 ^ b.u) & F32_SIGN);
190 return Float32::FromBits(res.u);
192#define F64_SIGN ((uint64_t)1 << 63)
197inline double fsgnj64(
double rs1,
double rs2,
bool n,
bool x) {
198 u64_f64 a = {.d = rs1}, b = {.d = rs2};
200 res.u = (a.u & ~F64_SIGN) | ((((
x) ? a.u
209 u64_f64 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
212 res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
215 res.u = (a.u & ~F64_SIGN) | ((F64_SIGN ^ b.u) & F64_SIGN);
217 res.u = (a.u & ~F64_SIGN) | ((0 ^ b.u) & F64_SIGN);
220 return Float64::FromBits(res.u);
222inline bool is_boxed_float(int64_t v) {
return (uint32_t)((v >> 32) + 1) == 0; }
223inline int64_t box_float(
float v) {
227inline uint64_t box_float(uint32_t v) {
return (0xFFFFFFFF00000000 | v); }
234 static const int LINE_VALID = 0;
235 static const int LINE_INVALID = 1;
237 static const int kPageShift = 12;
238 static const int kPageSize = 1 << kPageShift;
239 static const int kPageMask =
kPageSize - 1;
240 static const int kLineShift = 2;
241 static const int kLineLength = 1 << kLineShift;
242 static const int kLineMask = kLineLength - 1;
244 CachePage() { memset(&validity_map_, LINE_INVALID,
sizeof(validity_map_)); }
246 char* ValidityByte(
int offset) {
247 return &validity_map_[
offset >> kLineShift];
254 static const int kValidityMapSize =
kPageSize >> kLineShift;
255 char validity_map_[kValidityMapSize];
258class SimInstructionBase :
public InstructionBase {
260 Type InstructionType()
const {
return type_; }
261 inline Instruction*
instr()
const {
return instr_; }
266 explicit SimInstructionBase(Instruction*
instr) {}
276class SimInstruction :
public InstructionGetters<SimInstructionBase> {
280 explicit SimInstruction(Instruction*
instr) { *
this =
instr; }
282 SimInstruction& operator=(Instruction*
instr) {
285 type_ = InstructionBase::InstructionType();
291class Simulator :
public SimulatorBase {
293 friend class RiscvDebugger;
410 explicit Simulator(Isolate* isolate);
420 void set_register(
int reg, sreg_t value);
421 void set_register_word(
int reg, int32_t value);
423 double get_double_from_register_pair(
int reg);
426 void set_fpu_register(
int fpureg, int64_t value);
427 void set_fpu_register_word(
int fpureg, int32_t value);
428 void set_fpu_register_hi_word(
int fpureg, int32_t value);
429 void set_fpu_register_float(
int fpureg,
float value);
430 void set_fpu_register_float(
int fpureg, Float32 value);
431 void set_fpu_register_double(
int fpureg,
double value);
432 void set_fpu_register_double(
int fpureg, Float64 value);
434 int64_t get_fpu_register(
int fpureg)
const;
435 int32_t get_fpu_register_word(
int fpureg)
const;
436 int32_t get_fpu_register_signed_word(
int fpureg)
const;
437 int32_t get_fpu_register_hi_word(
int fpureg)
const;
438 float get_fpu_register_float(
int fpureg)
const;
439 Float32 get_fpu_register_Float32(
int fpureg,
bool check_nanbox =
true)
const;
440 double get_fpu_register_double(
int fpureg)
const;
441 Float64 get_fpu_register_Float64(
int fpureg)
const;
444 uint32_t read_csr_value(uint32_t csr);
445 void write_csr_value(uint32_t csr, reg_t value);
446 void set_csr_bits(uint32_t csr, reg_t flags);
447 void clear_csr_bits(uint32_t csr, reg_t flags);
449 void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
450 void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
452#ifdef CAN_USE_RVV_INSTRUCTIONS
454 __int128_t get_vregister(
int vreg)
const;
455 inline uint64_t rvv_vlen()
const {
return kRvvVLEN; }
456 inline uint64_t rvv_vtype()
const {
return vtype_; }
457 inline uint64_t rvv_vl()
const {
return vl_; }
458 inline uint64_t rvv_vstart()
const {
return vstart_; }
459 inline uint64_t rvv_vxsat()
const {
return vxsat_; }
460 inline uint64_t rvv_vxrm()
const {
return vxrm_; }
461 inline uint64_t rvv_vcsr()
const {
return vcsr_; }
462 inline uint64_t rvv_vlenb()
const {
return vlenb_; }
463 inline uint32_t rvv_zimm()
const {
return instr_.Rvvzimm(); }
464 inline uint32_t rvv_vlmul()
const {
return (rvv_vtype() & 0x7); }
465 inline float rvv_vflmul()
const {
466 if ((rvv_vtype() & 0b100) == 0) {
467 return static_cast<float>(0x1 << (rvv_vtype() & 0x7));
469 return 1.0 /
static_cast<float>(0x1 << (4 - rvv_vtype() & 0x3));
472 inline uint32_t rvv_vsew()
const {
return ((rvv_vtype() >> 3) & 0x7); }
474 inline const char* rvv_sew_s()
const {
475 uint32_t vsew = rvv_vsew();
477#define CAST_VSEW(name) \
487 inline const char* rvv_lmul_s()
const {
488 uint32_t vlmul = rvv_vlmul();
490#define CAST_VLMUL(name) \
501 inline uint32_t rvv_sew()
const {
503 return (0x1 << rvv_vsew()) * 8;
505 inline uint64_t rvv_vlmax()
const {
506 if ((rvv_vlmul() & 0b100) != 0) {
507 return (rvv_vlen() / rvv_sew()) >> (4 - (rvv_vlmul() & 0b11));
509 return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
514 inline uint32_t get_dynamic_rounding_mode();
515 inline bool test_fflags_bits(uint32_t
mask);
517 float RoundF2FHelper(
float input_val,
int rmode);
518 double RoundF2FHelper(
double input_val,
int rmode);
519 template <
typename I_TYPE,
typename F_TYPE>
520 I_TYPE RoundF2IHelper(F_TYPE original,
int rmode);
522 template <
typename T>
523 T FMaxMinHelper(T a, T b, MaxMinKind
kind);
525 template <
typename T>
526 bool CompareFHelper(T input1, T input2, FPUCondition cc);
529 void set_pc(sreg_t value);
532 Address get_sp()
const {
return static_cast<Address>(get_register(sp)); }
536 uintptr_t StackLimit(uintptr_t c_limit)
const;
537 uintptr_t StackBase()
const;
540 base::Vector<uint8_t> GetCentralStackView()
const;
545 void DoSwitchStackLimit(Instruction*
instr);
553 template <
typename T>
554 explicit CallArgument(T argument) {
556 DCHECK(
sizeof(argument) <=
sizeof(bits_));
557 bits_ = ConvertArg(argument);
560 explicit CallArgument(
double argument) {
561 DCHECK(
sizeof(argument) ==
sizeof(bits_));
562 memcpy(&bits_, &argument,
sizeof(argument));
565 explicit CallArgument(
float argument) {
571 static CallArgument End() {
return CallArgument(); }
572 int64_t bits()
const {
return bits_; }
573 bool IsEnd()
const {
return type_ == NO_ARG; }
574 bool IsGP()
const {
return type_ == GP_ARG; }
575 bool IsFP()
const {
return type_ == FP_ARG; }
578 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
582 CallArgumentType
type_;
583 CallArgument() {
type_ = NO_ARG; }
586 template <
typename Return,
typename... Args>
587 Return Call(Address entry, Args...
args) {
588#ifdef V8_TARGET_ARCH_RISCV64
590 CallArgument call_args[] = {CallArgument(
args)..., CallArgument::End()};
591 CallImpl(entry, call_args);
592 return ReadReturn<Return>();
594 return VariadicCall<Return>(
this, &Simulator::CallImpl, entry,
args...);
598 double CallFP(Address entry,
double d0,
double d1);
607 void set_last_debugger_input(
char* input);
608 char* last_debugger_input() {
return last_debugger_input_; }
611 static void SetRedirectInstruction(Instruction* instruction);
614 static bool ICacheMatch(
void*
one,
void* two);
615 static void FlushICache(base::CustomMatcherHashMap* i_cache,
void*
start,
620 bool has_bad_pc()
const;
623 enum special_values {
633 Unpredictable = 0xbadbeaf
636#ifdef V8_TARGET_ARCH_RISCV64
638 void CallAnyCTypeFunction(Address target_address,
639 const EncodedCSignature& signature);
641 template <
typename T>
642 typename std::enable_if<std::is_floating_point<T>::value, T>::type
644 return static_cast<T
>(get_fpu_register_double(fa0));
647 template <
typename T>
648 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
650 return ConvertReturn<T>(get_register(a0));
654 const intptr_t* arguments);
657 void Format(Instruction*
instr,
const char* format);
664#if V8_TARGET_ARCH_RISCV64
683 bool ProbeMemory(uintptr_t address, uintptr_t access_size);
686 template <
typename T>
687 T ReadMem(sreg_t addr, Instruction*
instr);
688 template <
typename T>
689 void WriteMem(sreg_t addr, T value, Instruction*
instr);
690 template <
typename T,
typename OP>
691 T amo(sreg_t addr,
OP f, Instruction*
instr, TraceType t) {
692 auto lhs = ReadMem<T>(addr,
instr);
694 WriteMem<T>(addr, (T)f(lhs),
instr);
699 inline void DieOrDebug();
701#if V8_TARGET_ARCH_RISCV32
702 template <
typename T>
703 void TraceRegWr(T value, TraceType t = WORD);
704#elif V8_TARGET_ARCH_RISCV64
705 void TraceRegWr(sreg_t value, TraceType t =
DWORD);
707 void TraceMemWr(sreg_t addr, sreg_t value, TraceType t);
708 template <
typename T>
709 void TraceMemRd(sreg_t addr, T value, sreg_t reg_value);
710 void TraceMemRdDouble(sreg_t addr,
double value, int64_t reg_value);
711 void TraceMemRdDouble(sreg_t addr, Float64 value, int64_t reg_value);
712 void TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value);
714 template <
typename T>
715 void TraceMemWr(sreg_t addr, T value);
716 void TraceMemWrDouble(sreg_t addr,
double value);
718 SimInstruction instr_;
721 inline int32_t rs1_reg()
const {
return instr_.Rs1Value(); }
722 inline sreg_t rs1()
const {
return get_register(rs1_reg()); }
723 inline float frs1()
const {
return get_fpu_register_float(rs1_reg()); }
724 inline double drs1()
const {
return get_fpu_register_double(rs1_reg()); }
725 inline Float32 frs1_boxed()
const {
726 return get_fpu_register_Float32(rs1_reg());
728 inline Float64 drs1_boxed()
const {
729 return get_fpu_register_Float64(rs1_reg());
731 inline int32_t rs2_reg()
const {
return instr_.Rs2Value(); }
732 inline sreg_t rs2()
const {
return get_register(rs2_reg()); }
733 inline float frs2()
const {
return get_fpu_register_float(rs2_reg()); }
734 inline double drs2()
const {
return get_fpu_register_double(rs2_reg()); }
735 inline Float32 frs2_boxed()
const {
736 return get_fpu_register_Float32(rs2_reg());
738 inline Float64 drs2_boxed()
const {
739 return get_fpu_register_Float64(rs2_reg());
741 inline int32_t rs3_reg()
const {
return instr_.Rs3Value(); }
742 inline sreg_t rs3()
const {
return get_register(rs3_reg()); }
743 inline float frs3()
const {
return get_fpu_register_float(rs3_reg()); }
744 inline double drs3()
const {
return get_fpu_register_double(rs3_reg()); }
745 inline Float32 frs3_boxed()
const {
746 return get_fpu_register_Float32(rs3_reg());
748 inline Float64 drs3_boxed()
const {
749 return get_fpu_register_Float64(rs3_reg());
751 inline int32_t rd_reg()
const {
return instr_.RdValue(); }
752 inline int32_t frd_reg()
const {
return instr_.RdValue(); }
753 inline int32_t rvc_rs1_reg()
const {
return instr_.RvcRs1Value(); }
754 inline sreg_t rvc_rs1()
const {
return get_register(rvc_rs1_reg()); }
755 inline int32_t rvc_rs2_reg()
const {
return instr_.RvcRs2Value(); }
756 inline sreg_t rvc_rs2()
const {
return get_register(rvc_rs2_reg()); }
757 inline double rvc_drs2()
const {
758 return get_fpu_register_double(rvc_rs2_reg());
760 inline int32_t rvc_rs1s_reg()
const {
return instr_.RvcRs1sValue(); }
761 inline sreg_t rvc_rs1s()
const {
return get_register(rvc_rs1s_reg()); }
762 inline int32_t rvc_rs2s_reg()
const {
return instr_.RvcRs2sValue(); }
763 inline sreg_t rvc_rs2s()
const {
return get_register(rvc_rs2s_reg()); }
764 inline double rvc_drs2s()
const {
765 return get_fpu_register_double(rvc_rs2s_reg());
767 inline int32_t rvc_rd_reg()
const {
return instr_.RvcRdValue(); }
768 inline int32_t rvc_frd_reg()
const {
return instr_.RvcRdValue(); }
769 inline int16_t boffset()
const {
return instr_.BranchOffset(); }
770 inline int16_t imm12()
const {
return instr_.Imm12Value(); }
771 inline int32_t imm20J()
const {
return instr_.Imm20JValue(); }
772 inline int32_t imm5CSR()
const {
return instr_.Rs1Value(); }
773 inline int16_t csr_reg()
const {
return instr_.CsrValue(); }
774 inline int16_t rvc_imm6()
const {
return instr_.RvcImm6Value(); }
775 inline int16_t rvc_imm6_addi16sp()
const {
776 return instr_.RvcImm6Addi16spValue();
778 inline int16_t rvc_imm8_addi4spn()
const {
779 return instr_.RvcImm8Addi4spnValue();
781 inline int16_t rvc_imm6_lwsp()
const {
return instr_.RvcImm6LwspValue(); }
782 inline int16_t rvc_imm6_ldsp()
const {
return instr_.RvcImm6LdspValue(); }
783 inline int16_t rvc_imm6_swsp()
const {
return instr_.RvcImm6SwspValue(); }
784 inline int16_t rvc_imm6_sdsp()
const {
return instr_.RvcImm6SdspValue(); }
785 inline int16_t rvc_imm5_w()
const {
return instr_.RvcImm5WValue(); }
786 inline int16_t rvc_imm5_d()
const {
return instr_.RvcImm5DValue(); }
787 inline int16_t rvc_imm8_b()
const {
return instr_.RvcImm8BValue(); }
789 inline void set_rd(sreg_t value,
bool trace =
true) {
790 set_register(rd_reg(), value);
791#if V8_TARGET_ARCH_RISCV64
792 if (trace) TraceRegWr(get_register(rd_reg()),
DWORD);
793#elif V8_TARGET_ARCH_RISCV32
794 if (trace) TraceRegWr(get_register(rd_reg()), WORD);
797 inline void set_frd(
float value,
bool trace =
true) {
798 set_fpu_register_float(rd_reg(), value);
799 if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
801 inline void set_frd(Float32 value,
bool trace =
true) {
802 set_fpu_register_float(rd_reg(), value);
803 if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
805 inline void set_drd(
double value,
bool trace =
true) {
806 set_fpu_register_double(rd_reg(), value);
807 if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
809 inline void set_drd(Float64 value,
bool trace =
true) {
810 set_fpu_register_double(rd_reg(), value);
811 if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
813 inline void set_rvc_rd(sreg_t value,
bool trace =
true) {
814 set_register(rvc_rd_reg(), value);
815#if V8_TARGET_ARCH_RISCV64
816 if (trace) TraceRegWr(get_register(rvc_rd_reg()),
DWORD);
817#elif V8_TARGET_ARCH_RISCV32
818 if (trace) TraceRegWr(get_register(rvc_rd_reg()), WORD);
821 inline void set_rvc_rs1s(sreg_t value,
bool trace =
true) {
822 set_register(rvc_rs1s_reg(), value);
823#if V8_TARGET_ARCH_RISCV64
824 if (trace) TraceRegWr(get_register(rvc_rs1s_reg()),
DWORD);
825#elif V8_TARGET_ARCH_RISCV32
826 if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), WORD);
829 inline void set_rvc_rs2(sreg_t value,
bool trace =
true) {
830 set_register(rvc_rs2_reg(), value);
831#if V8_TARGET_ARCH_RISCV64
832 if (trace) TraceRegWr(get_register(rvc_rs2_reg()),
DWORD);
833#elif V8_TARGET_ARCH_RISCV32
834 if (trace) TraceRegWr(get_register(rvc_rs2_reg()), WORD);
837 inline void set_rvc_drd(
double value,
bool trace =
true) {
838 set_fpu_register_double(rvc_rd_reg(), value);
839 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
841 inline void set_rvc_drd(Float64 value,
bool trace =
true) {
842 set_fpu_register_double(rvc_rd_reg(), value);
843 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
845 inline void set_rvc_frd(Float32 value,
bool trace =
true) {
846 set_fpu_register_float(rvc_rd_reg(), value);
847 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
849 inline void set_rvc_rs2s(sreg_t value,
bool trace =
true) {
850 set_register(rvc_rs2s_reg(), value);
851#if V8_TARGET_ARCH_RISCV64
852 if (trace) TraceRegWr(get_register(rvc_rs2s_reg()),
DWORD);
853#elif V8_TARGET_ARCH_RISCV32
854 if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), WORD);
857 inline void set_rvc_drs2s(
double value,
bool trace =
true) {
858 set_fpu_register_double(rvc_rs2s_reg(), value);
859 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
861 inline void set_rvc_drs2s(Float64 value,
bool trace =
true) {
862 set_fpu_register_double(rvc_rs2s_reg(), value);
863 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
866 inline void set_rvc_frs2s(Float32 value,
bool trace =
true) {
867 set_fpu_register_float(rvc_rs2s_reg(), value);
868 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), FLOAT);
870 inline int16_t shamt6()
const {
return (imm12() & 0x3F); }
871 inline int16_t shamt5()
const {
return (imm12() & 0x1F); }
872 inline int16_t rvc_shamt6()
const {
return instr_.RvcShamt6(); }
873 inline int32_t s_imm12()
const {
return instr_.StoreOffset(); }
874 inline int32_t u_imm20()
const {
return instr_.Imm20UValue() << 12; }
875 inline int32_t rvc_u_imm6()
const {
return instr_.RvcImm6Value() << 12; }
876 inline void require(
bool check) {
878 SignalException(kIllegalInstruction);
882#ifdef CAN_USE_RVV_INSTRUCTIONS
883 inline void rvv_trace_vd() {
885 __int128_t value = Vregister_[rvv_vd_reg()];
886 SNPrintF(trace_buf_,
"%016" PRIx64
"%016" PRIx64
" (%" PRId64
")",
887 *(
reinterpret_cast<int64_t*
>(&value) + 1),
888 *
reinterpret_cast<int64_t*
>(&value), icount_);
892 inline void rvv_trace_vs1() {
894 PrintF(
"\t%s:0x%016" PRIx64
"%016" PRIx64
"\n",
896 (uint64_t)(get_vregister(
static_cast<int>(rvv_vs1_reg())) >> 64),
897 (uint64_t)get_vregister(
static_cast<int>(rvv_vs1_reg())));
901 inline void rvv_trace_vs2() {
903 PrintF(
"\t%s:0x%016" PRIx64
"%016" PRIx64
"\n",
905 (uint64_t)(get_vregister(
static_cast<int>(rvv_vs2_reg())) >> 64),
906 (uint64_t)get_vregister(
static_cast<int>(rvv_vs2_reg())));
909 inline void rvv_trace_v0() {
911 PrintF(
"\t%s:0x%016" PRIx64
"%016" PRIx64
"\n",
913 (uint64_t)(get_vregister(v0) >> 64), (uint64_t)get_vregister(v0));
917 inline void rvv_trace_rs1() {
919 PrintF(
"\t%s:0x%016" PRIx64
"\n",
921 (uint64_t)(get_register(rs1_reg())));
925 inline void rvv_trace_status() {
928 for (;
i < trace_buf_.
length();
i++) {
929 if (trace_buf_[
i] ==
'\0')
break;
932 " sew:%s lmul:%s vstart:%" PRId64
"vl:%" PRId64, rvv_sew_s(),
933 rvv_lmul_s(), rvv_vstart(), rvv_vl());
938 T& Rvvelt(reg_t vReg, uint64_t n,
bool is_write =
false) {
940 CHECK_GT((rvv_vlen() >> 3) /
sizeof(T), 0);
941 reg_t elts_per_reg = (rvv_vlen() >> 3) / (
sizeof(T));
942 vReg += n / elts_per_reg;
943 n = n % elts_per_reg;
944 T* regStart =
reinterpret_cast<T*
>(
reinterpret_cast<char*
>(Vregister_) +
945 vReg * (rvv_vlen() >> 3));
949 inline int32_t rvv_vs1_reg() {
return instr_.Vs1Value(); }
951 inline int32_t rvv_vs2_reg() {
return instr_.Vs2Value(); }
953 inline int32_t rvv_vd_reg() {
return instr_.VdValue(); }
954 inline int32_t rvv_vs3_reg() {
return instr_.VdValue(); }
957 return (instr_.InstructionBits() & kRvvNfMask) >>
kRvvNfShift;
962 inline void set_rvv_vtype(uint64_t value,
bool trace =
true) {
965 inline void set_rvv_vl(uint64_t value,
bool trace =
true) { vl_ =
value; }
966 inline void set_rvv_vstart(uint64_t value,
bool trace =
true) {
969 inline void set_rvv_vxsat(uint64_t value,
bool trace =
true) {
972 inline void set_rvv_vxrm(uint64_t value,
bool trace =
true) { vxrm_ =
value; }
973 inline void set_rvv_vcsr(uint64_t value,
bool trace =
true) { vcsr_ =
value; }
974 inline void set_rvv_vlenb(uint64_t value,
bool trace =
true) {
979 template <
typename T,
typename Func>
980 inline T CanonicalizeFPUOpFMA(Func
fn, T dst, T src1, T src2) {
981 static_assert(std::is_floating_point<T>::value);
982 auto alu_out =
fn(dst, src1, src2);
984 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
987 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(dst))
988 set_fflags(kInvalidOperation);
989 alu_out = std::numeric_limits<T>::quiet_NaN();
994 template <
typename T,
typename Func>
995 inline T CanonicalizeFPUOp3(Func
fn) {
996 static_assert(std::is_floating_point<T>::value);
997 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
998 T src2 = std::is_same<float, T>::value ? frs2() : drs2();
999 T src3 = std::is_same<float, T>::value ? frs3() : drs3();
1000 auto alu_out =
fn(src1, src2, src3);
1002 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
1005 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(src3))
1006 set_fflags(kInvalidOperation);
1007 alu_out = std::numeric_limits<T>::quiet_NaN();
1012 template <
typename T,
typename Func>
1013 inline T CanonicalizeFPUOp2(Func
fn) {
1014 static_assert(std::is_floating_point<T>::value);
1015 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
1016 T src2 = std::is_same<float, T>::value ? frs2() : drs2();
1017 auto alu_out =
fn(src1, src2);
1019 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2)) {
1021 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2))
1022 set_fflags(kInvalidOperation);
1023 alu_out = std::numeric_limits<T>::quiet_NaN();
1028 template <
typename T,
typename Func>
1029 inline T CanonicalizeFPUOp1(Func
fn) {
1030 static_assert(std::is_floating_point<T>::value);
1031 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
1032 auto alu_out =
fn(src1);
1034 if (std::isnan(alu_out) || std::isnan(src1)) {
1036 if (isSnan(alu_out) || isSnan(src1)) set_fflags(kInvalidOperation);
1037 alu_out = std::numeric_limits<T>::quiet_NaN();
1042 template <
typename Func>
1043 inline float CanonicalizeDoubleToFloatOperation(Func
fn) {
1044 float alu_out =
fn(drs1());
1045 if (std::isnan(alu_out) || std::isnan(drs1()))
1046 alu_out = std::numeric_limits<float>::quiet_NaN();
1050 template <
typename Func>
1051 inline float CanonicalizeDoubleToFloatOperation(Func
fn,
double frs) {
1052 float alu_out =
fn(frs);
1053 if (std::isnan(alu_out) || std::isnan(drs1()))
1054 alu_out = std::numeric_limits<float>::quiet_NaN();
1058 template <
typename Func>
1059 inline float CanonicalizeFloatToDoubleOperation(Func
fn,
float frs) {
1060 double alu_out =
fn(frs);
1061 if (std::isnan(alu_out) || std::isnan(frs1()))
1062 alu_out = std::numeric_limits<double>::quiet_NaN();
1066 template <
typename Func>
1067 inline float CanonicalizeFloatToDoubleOperation(Func
fn) {
1068 double alu_out =
fn(frs1());
1069 if (std::isnan(alu_out) || std::isnan(frs1()))
1070 alu_out = std::numeric_limits<double>::quiet_NaN();
1076 void DecodeRVRType();
1077 void DecodeRVR4Type();
1078 void DecodeRVRFPType();
1079 void DecodeRVRAType();
1080 void DecodeRVIType();
1081 void DecodeRVSType();
1082 void DecodeRVBType();
1083 void DecodeRVUType();
1084 void DecodeRVJType();
1085 void DecodeCRType();
1086 void DecodeCAType();
1087 void DecodeCIType();
1088 void DecodeCIWType();
1089 void DecodeCSSType();
1090 void DecodeCLType();
1091 void DecodeCSType();
1092 void DecodeCJType();
1093 void DecodeCBType();
1094#ifdef CAN_USE_RVV_INSTRUCTIONS
1096 void DecodeRvvIVV();
1097 void DecodeRvvIVI();
1098 void DecodeRvvIVX();
1099 void DecodeRvvMVV();
1100 void DecodeRvvMVX();
1101 void DecodeRvvFVV();
1102 void DecodeRvvFVF();
1108 void SoftwareInterrupt();
1114 Instruction* location;
1118 std::vector<Breakpoint> breakpoints_;
1119 void SetBreakpoint(Instruction* breakpoint,
bool is_tbreak);
1120 void ListBreakpoints();
1121 void CheckBreakpoints();
1124 bool IsWatchpoint(reg_t code);
1125 bool IsTracepoint(reg_t code);
1126 bool IsSwitchStackLimit(reg_t code);
1127 void PrintWatchpoint(reg_t code);
1128 void HandleStop(reg_t code);
1129 bool IsStopInstruction(Instruction*
instr);
1130 bool IsEnabledStop(reg_t code);
1131 void EnableStop(reg_t code);
1132 void DisableStop(reg_t code);
1133 void IncreaseStopCounter(reg_t code);
1134 void PrintStopInfo(reg_t code);
1137 void InstructionDecode(Instruction*
instr);
1140 static void CheckICache(base::CustomMatcherHashMap* i_cache,
1141 Instruction*
instr);
1142 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
start,
1144 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
1154 kIllegalInstruction,
1158 void SignalException(Exception e);
1161 void GetFpArgs(
double*
x,
double*
y, int32_t*
z);
1162 void SetFpResult(
const double&
result);
1164 void CallInternal(Address entry);
1174#ifdef CAN_USE_RVV_INSTRUCTIONS
1177 static_assert(
sizeof(__int128_t) ==
kRvvVLEN / 8,
"unmatch vlen");
1178 uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
1185 static size_t AllocatedStackSize() {
1186#if V8_TARGET_ARCH_RISCV64
1187 size_t stack_size =
v8_flags.sim_stack_size *
KB;
1189 size_t stack_size = 1 *
MB;
1191 return stack_size + (2 * kStackProtectionSize);
1193 static size_t UsableStackSize() {
1194 return AllocatedStackSize() - kStackProtectionSize;
1199 static const int kAdditionalStackMargin = 4 *
KB;
1203 sreg_t* watch_address_ =
nullptr;
1204 sreg_t watch_value_ = 0;
1206 base::EmbeddedVector<char, 256> trace_buf_;
1209 char* last_debugger_input_;
1215 static const uint32_t kStopDisabledBit = 1 << 31;
1221 struct StopCountAndDesc {
1228 enum class MonitorAccess {
1233 enum class TransactionSize {
1241 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
1243 class LocalMonitor {
1252 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
1254 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
1259 MonitorAccess access_state_;
1260 uintptr_t tagged_addr_;
1261 TransactionSize
size_;
1264 class GlobalMonitor {
1266 class LinkedAddress {
1271 friend class GlobalMonitor;
1274 void Clear_Locked();
1275 void NotifyLoadLinked_Locked(uintptr_t addr);
1276 void NotifyStore_Locked();
1277 bool NotifyStoreConditional_Locked(uintptr_t addr,
1278 bool is_requesting_thread);
1280 MonitorAccess access_state_;
1281 uintptr_t tagged_addr_;
1282 LinkedAddress* next_;
1283 LinkedAddress*
prev_;
1288 static const int kMaxFailureCounter = 5;
1289 int failure_counter_;
1295 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
1296 void NotifyStore_Locked(LinkedAddress* linked_address);
1297 bool NotifyStoreConditional_Locked(uintptr_t addr,
1298 LinkedAddress* linked_address);
1301 void RemoveLinkedAddress(LinkedAddress* linked_address);
1303 static GlobalMonitor*
Get();
1307 GlobalMonitor() =
default;
1308 friend class base::LeakyObject<GlobalMonitor>;
1310 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address)
const;
1311 void PrependProcessor_Locked(LinkedAddress* linked_address);
1313 LinkedAddress* head_ =
nullptr;
1316 LocalMonitor local_monitor_;
1317 GlobalMonitor::LinkedAddress global_monitor_thread_;
uint8_t data_[MAX_STACK_LENGTH]
static const char * Name(int reg)
static const char * Name(int reg)
base::Vector< const DirectHandle< Object > > args
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
constexpr size_t kPageSize
int SNPrintF(Vector< char > str, const char *format,...)
V8_INLINE Dest bit_cast(Source const &source)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
FloatWithBits< 32 > Float32
UntaggedUnion< Word32, Word64 > Word
FloatWithBits< 64 > Float64
constexpr Register no_reg
void PrintF(const char *format,...)
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kNumSimuRegisters
constexpr int kSystemPointerSize
V8_EXPORT_PRIVATE FlagValues v8_flags
__attribute__((tls_model(V8_TLS_MODEL))) extern thread_local Isolate *g_current_isolate_ V8_CONSTINIT
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
int Compare(const T &a, const T &b)
#define CHECK_GT(lhs, rhs)
#define CHECK_NE(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DISALLOW_ASSIGN(TypeName)
#define V8_EXPORT_PRIVATE
std::unique_ptr< ValueMirror > value