5#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
6#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
29 typename =
typename std::enable_if<std::is_signed<T>::value>::type>
31 return a < 0 ?
a : -
a;
34#if defined(USE_SIMULATOR)
52 static const int LINE_VALID = 0;
53 static const int LINE_INVALID = 1;
55 static const int kPageShift = 12;
56 static const int kPageSize = 1 << kPageShift;
57 static const int kPageMask =
kPageSize - 1;
58 static const int kLineShift = 2;
59 static const int kLineLength = 1 << kLineShift;
60 static const int kLineMask = kLineLength - 1;
62 CachePage() { memset(&validity_map_, LINE_INVALID,
sizeof(validity_map_)); }
64 char* ValidityByte(
int offset) {
65 return &validity_map_[
offset >> kLineShift];
72 static const int kValidityMapSize =
kPageSize >> kLineShift;
73 char validity_map_[kValidityMapSize];
76class SimInstructionBase :
public InstructionBase {
78 Type InstructionType()
const {
return type_; }
79 inline Instruction*
instr()
const {
return instr_; }
84 explicit SimInstructionBase(Instruction*
instr) {}
94class SimInstruction :
public InstructionGetters<SimInstructionBase> {
98 explicit SimInstruction(Instruction*
instr) { *
this =
instr; }
100 SimInstruction& operator=(Instruction*
instr) {
103 type_ = InstructionBase::InstructionType();
109class Simulator :
public SimulatorBase {
111 friend class Loong64Debugger;
205 explicit Simulator(Isolate* isolate);
212 float ceil(
float value);
213 float floor(
float value);
214 float trunc(
float value);
215 double ceil(
double value);
216 double floor(
double value);
217 double trunc(
double value);
222 void set_register(
int reg, int64_t value);
223 void set_register_word(
int reg, int32_t value);
224 void set_dw_register(
int dreg,
const int* dbl);
226 double get_double_from_register_pair(
int reg);
228 void set_fpu_register(
int fpureg, int64_t value);
229 void set_fpu_register_word(
int fpureg, int32_t value);
230 void set_fpu_register_hi_word(
int fpureg, int32_t value);
231 void set_fpu_register_float(
int fpureg,
float value);
232 void set_fpu_register_double(
int fpureg,
double value);
233 void set_fpu_register_invalid_result64(
float original,
float rounded);
234 void set_fpu_register_invalid_result(
float original,
float rounded);
235 void set_fpu_register_word_invalid_result(
float original,
float rounded);
236 void set_fpu_register_invalid_result64(
double original,
double rounded);
237 void set_fpu_register_invalid_result(
double original,
double rounded);
238 void set_fpu_register_word_invalid_result(
double original,
double rounded);
239 int64_t get_fpu_register(
int fpureg)
const;
240 int32_t get_fpu_register_word(
int fpureg)
const;
241 int32_t get_fpu_register_signed_word(
int fpureg)
const;
242 int32_t get_fpu_register_hi_word(
int fpureg)
const;
243 float get_fpu_register_float(
int fpureg)
const;
244 double get_fpu_register_double(
int fpureg)
const;
245 void set_cf_register(
int cfreg,
bool value);
246 bool get_cf_register(
int cfreg)
const;
247 void set_fcsr_rounding_mode(FPURoundingMode mode);
248 unsigned int get_fcsr_rounding_mode();
249 void set_fcsr_bit(uint32_t cc,
bool value);
250 bool test_fcsr_bit(uint32_t cc);
251 bool set_fcsr_round_error(
double original,
double rounded);
252 bool set_fcsr_round64_error(
double original,
double rounded);
253 bool set_fcsr_round_error(
float original,
float rounded);
254 bool set_fcsr_round64_error(
float original,
float rounded);
255 void round_according_to_fcsr(
double toRound,
double* rounded,
256 int32_t* rounded_int);
257 void round64_according_to_fcsr(
double toRound,
double* rounded,
258 int64_t* rounded_int);
259 void round_according_to_fcsr(
float toRound,
float* rounded,
260 int32_t* rounded_int);
261 void round64_according_to_fcsr(
float toRound,
float* rounded,
262 int64_t* rounded_int);
264 void set_pc(int64_t value);
267 Address get_sp()
const {
return static_cast<Address>(get_register(sp)); }
271 uintptr_t StackLimit(uintptr_t c_limit)
const;
273 uintptr_t StackBase()
const;
277 base::Vector<uint8_t> GetCentralStackView()
const;
278 static constexpr int JSStackLimitMargin() {
return kAdditionalStackMargin; }
288 template <
typename T>
289 explicit CallArgument(T argument) {
291 DCHECK(
sizeof(argument) <=
sizeof(bits_));
292 bits_ = ConvertArg(argument);
296 explicit CallArgument(
double argument) {
297 DCHECK(
sizeof(argument) ==
sizeof(bits_));
298 memcpy(&bits_, &argument,
sizeof(argument));
302 explicit CallArgument(
float argument) {
309 static CallArgument End() {
return CallArgument(); }
311 int64_t bits()
const {
return bits_; }
312 bool IsEnd()
const {
return type_ == NO_ARG; }
313 bool IsGP()
const {
return type_ == GP_ARG; }
314 bool IsFP()
const {
return type_ == FP_ARG; }
317 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
322 CallArgumentType
type_;
324 CallArgument() {
type_ = NO_ARG; }
327 template <
typename Return,
typename... Args>
328 Return Call(Address entry, Args...
args) {
330 CallArgument call_args[] = {CallArgument(
args)..., CallArgument::End()};
331 CallImpl(entry, call_args);
332 return ReadReturn<Return>();
336 double CallFP(Address entry,
double d0,
double d1);
345 void set_last_debugger_input(
char* input);
346 char* last_debugger_input() {
return last_debugger_input_; }
349 static void SetRedirectInstruction(Instruction* instruction);
352 static bool ICacheMatch(
void*
one,
void* two);
353 static void FlushICache(base::CustomMatcherHashMap* i_cache,
void*
start,
358 bool has_bad_pc()
const;
361 enum special_values {
371 Unpredictable = 0xbadbeaf
376 void CallAnyCTypeFunction(Address target_address,
377 const EncodedCSignature& signature);
380 template <
typename T>
381 typename std::enable_if<std::is_floating_point<T>::value, T>::type
383 return static_cast<T
>(get_fpu_register_double(f0));
386 template <
typename T>
387 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
389 return ConvertReturn<T>(get_register(a0));
393 void Format(Instruction*
instr,
const char* format);
417 bool ProbeMemory(uintptr_t address, uintptr_t access_size);
420 inline uint32_t ReadBU(int64_t addr);
421 inline int32_t ReadB(int64_t addr);
422 inline void WriteB(int64_t addr, uint8_t value);
423 inline void WriteB(int64_t addr, int8_t value);
428 inline void WriteH(int64_t addr, uint16_t value, Instruction*
instr);
429 inline void WriteH(int64_t addr, int16_t value, Instruction*
instr);
431 inline uint32_t ReadWU(int64_t addr, Instruction*
instr);
432 inline int32_t ReadW(int64_t addr, Instruction*
instr, TraceType t = WORD);
433 inline void WriteW(int64_t addr, int32_t value, Instruction*
instr);
434 void WriteConditionalW(int64_t addr, int32_t value, Instruction*
instr,
436 inline int64_t Read2W(int64_t addr, Instruction*
instr);
437 inline void Write2W(int64_t addr, int64_t value, Instruction*
instr);
438 inline void WriteConditional2W(int64_t addr, int64_t value,
439 Instruction*
instr, int32_t* done);
441 inline double ReadD(int64_t addr, Instruction*
instr);
442 inline void WriteD(int64_t addr,
double value, Instruction*
instr);
444 template <
typename T>
445 T ReadMem(int64_t addr, Instruction*
instr);
446 template <
typename T>
447 void WriteMem(int64_t addr, T value, Instruction*
instr);
450 inline void DieOrDebug();
452 void TraceRegWr(int64_t value, TraceType t =
DWORD);
453 void TraceMemWr(int64_t addr, int64_t value, TraceType t);
454 void TraceMemRd(int64_t addr, int64_t value, TraceType t =
DWORD);
455 template <
typename T>
456 void TraceMemRd(int64_t addr, T value);
457 template <
typename T>
458 void TraceMemWr(int64_t addr, T value);
460 SimInstruction instr_;
463 void DecodeTypeOp6();
464 void DecodeTypeOp7();
465 void DecodeTypeOp8();
466 void DecodeTypeOp10();
467 void DecodeTypeOp12();
468 void DecodeTypeOp14();
469 void DecodeTypeOp17();
470 void DecodeTypeOp22();
472 inline int32_t rj_reg()
const {
return instr_.RjValue(); }
473 inline int64_t rj()
const {
return get_register(rj_reg()); }
474 inline uint64_t rj_u()
const {
475 return static_cast<uint64_t
>(get_register(rj_reg()));
477 inline int32_t rk_reg()
const {
return instr_.RkValue(); }
478 inline int64_t rk()
const {
return get_register(rk_reg()); }
479 inline uint64_t rk_u()
const {
480 return static_cast<uint64_t
>(get_register(rk_reg()));
482 inline int32_t rd_reg()
const {
return instr_.RdValue(); }
483 inline int64_t rd()
const {
return get_register(rd_reg()); }
484 inline uint64_t rd_u()
const {
485 return static_cast<uint64_t
>(get_register(rd_reg()));
487 inline int32_t fa_reg()
const {
return instr_.FaValue(); }
488 inline float fa_float()
const {
return get_fpu_register_float(fa_reg()); }
489 inline double fa_double()
const {
return get_fpu_register_double(fa_reg()); }
490 inline int32_t fj_reg()
const {
return instr_.FjValue(); }
491 inline float fj_float()
const {
return get_fpu_register_float(fj_reg()); }
492 inline double fj_double()
const {
return get_fpu_register_double(fj_reg()); }
493 inline int32_t fk_reg()
const {
return instr_.FkValue(); }
494 inline float fk_float()
const {
return get_fpu_register_float(fk_reg()); }
495 inline double fk_double()
const {
return get_fpu_register_double(fk_reg()); }
496 inline int32_t fd_reg()
const {
return instr_.FdValue(); }
497 inline float fd_float()
const {
return get_fpu_register_float(fd_reg()); }
498 inline double fd_double()
const {
return get_fpu_register_double(fd_reg()); }
499 inline int32_t cj_reg()
const {
return instr_.CjValue(); }
500 inline bool cj()
const {
return get_cf_register(cj_reg()); }
501 inline int32_t cd_reg()
const {
return instr_.CdValue(); }
502 inline bool cd()
const {
return get_cf_register(cd_reg()); }
503 inline int32_t ca_reg()
const {
return instr_.CaValue(); }
504 inline bool ca()
const {
return get_cf_register(ca_reg()); }
505 inline uint32_t sa2()
const {
return instr_.Sa2Value(); }
506 inline uint32_t sa3()
const {
return instr_.Sa3Value(); }
507 inline uint32_t ui5()
const {
return instr_.Ui5Value(); }
508 inline uint32_t ui6()
const {
return instr_.Ui6Value(); }
509 inline uint32_t lsbw()
const {
return instr_.LsbwValue(); }
510 inline uint32_t msbw()
const {
return instr_.MsbwValue(); }
511 inline uint32_t lsbd()
const {
return instr_.LsbdValue(); }
512 inline uint32_t msbd()
const {
return instr_.MsbdValue(); }
513 inline uint32_t cond()
const {
return instr_.CondValue(); }
514 inline int32_t si12()
const {
return (instr_.Si12Value() << 20) >> 20; }
515 inline uint32_t ui12()
const {
return instr_.Ui12Value(); }
516 inline int32_t si14()
const {
return (instr_.Si14Value() << 18) >> 18; }
517 inline int32_t si16()
const {
return (instr_.Si16Value() << 16) >> 16; }
518 inline int32_t si20()
const {
return (instr_.Si20Value() << 12) >> 12; }
520 inline void SetResult(
const int32_t rd_reg,
const int64_t alu_out) {
521 set_register(rd_reg, alu_out);
525 inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
526 set_fpu_register_word(fd_reg, alu_out);
527 TraceRegWr(get_fpu_register(fd_reg), WORD);
530 inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
531 set_fpu_register_word(fd_reg, alu_out);
532 TraceRegWr(get_fpu_register(fd_reg));
535 inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
536 set_fpu_register(fd_reg, alu_out);
537 TraceRegWr(get_fpu_register(fd_reg));
540 inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
541 set_fpu_register(fd_reg, alu_out);
542 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
545 inline void SetFPUFloatResult(int32_t fd_reg,
float alu_out) {
546 set_fpu_register_float(fd_reg, alu_out);
547 TraceRegWr(get_fpu_register(fd_reg), FLOAT);
550 inline void SetFPUDoubleResult(int32_t fd_reg,
double alu_out) {
551 set_fpu_register_double(fd_reg, alu_out);
552 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
556 void SoftwareInterrupt();
559 bool IsWatchpoint(uint64_t code);
560 void PrintWatchpoint(uint64_t code);
561 void HandleStop(uint64_t code, Instruction*
instr);
562 bool IsStopInstruction(Instruction*
instr);
563 bool IsEnabledStop(uint64_t code);
564 void EnableStop(uint64_t code);
565 void DisableStop(uint64_t code);
566 void IncreaseStopCounter(uint64_t code);
567 void PrintStopInfo(uint64_t code);
570 void InstructionDecode(Instruction*
instr);
574 static void CheckICache(base::CustomMatcherHashMap* i_cache,
576 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
start,
578 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
590 void SignalException(Exception e);
593 void GetFpArgs(
double*
x,
double*
y, int32_t*
z);
594 void SetFpResult(
const double&
result);
596 void CallInternal(Address entry);
604 bool CFregisters_[kNumCFRegisters];
610 static const size_t kStackProtectionSize =
KB;
612 static size_t AllocatedStackSize() {
613 return (
v8_flags.sim_stack_size * KB) + (2 * kStackProtectionSize);
615 static size_t UsableStackSize() {
return v8_flags.sim_stack_size *
KB; }
618 static const int kAdditionalStackMargin = 4 *
KB;
623 base::EmbeddedVector<char, 128> trace_buf_;
626 char* last_debugger_input_;
631 Instruction* break_pc_;
635 static const uint32_t kStopDisabledBit = 1 << 31;
641 struct StopCountAndDesc {
648 enum class MonitorAccess {
653 enum class TransactionSize {
661 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
672 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
674 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
679 MonitorAccess access_state_;
680 uintptr_t tagged_addr_;
681 TransactionSize
size_;
684 class GlobalMonitor {
686 class LinkedAddress {
691 friend class GlobalMonitor;
695 void NotifyLoadLinked_Locked(uintptr_t addr);
696 void NotifyStore_Locked();
697 bool NotifyStoreConditional_Locked(uintptr_t addr,
698 bool is_requesting_thread);
700 MonitorAccess access_state_;
701 uintptr_t tagged_addr_;
702 LinkedAddress* next_;
703 LinkedAddress*
prev_;
708 static const int kMaxFailureCounter = 5;
709 int failure_counter_;
715 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
716 void NotifyStore_Locked(LinkedAddress* linked_address);
717 bool NotifyStoreConditional_Locked(uintptr_t addr,
718 LinkedAddress* linked_address);
721 void RemoveLinkedAddress(LinkedAddress* linked_address);
723 static GlobalMonitor*
Get();
727 GlobalMonitor() =
default;
728 friend class base::LeakyObject<GlobalMonitor>;
730 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address)
const;
731 void PrependProcessor_Locked(LinkedAddress* linked_address);
733 LinkedAddress* head_ =
nullptr;
736 LocalMonitor local_monitor_;
737 GlobalMonitor::LinkedAddress global_monitor_thread_;
uint8_t data_[MAX_STACK_LENGTH]
base::Vector< const DirectHandle< Object > > args
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
constexpr size_t kPageSize
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
UntaggedUnion< Word32, Word64 > Word
constexpr Register no_reg
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kNumSimuRegisters
V8_EXPORT_PRIVATE FlagValues v8_flags
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
int Compare(const T &a, const T &b)
#define DCHECK(condition)
#define DISALLOW_ASSIGN(TypeName)
#define V8_EXPORT_PRIVATE