5#ifndef V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
6#define V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
30 typename =
typename std::enable_if<std::is_signed<T>::value>::type>
32 return a < 0 ?
a : -
a;
35#if defined(USE_SIMULATOR)
53 static const int LINE_VALID = 0;
54 static const int LINE_INVALID = 1;
56 static const int kPageShift = 12;
57 static const int kPageSize = 1 << kPageShift;
58 static const int kPageMask =
kPageSize - 1;
59 static const int kLineShift = 2;
60 static const int kLineLength = 1 << kLineShift;
61 static const int kLineMask = kLineLength - 1;
63 CachePage() { memset(&validity_map_, LINE_INVALID,
sizeof(validity_map_)); }
65 char* ValidityByte(
int offset) {
66 return &validity_map_[
offset >> kLineShift];
73 static const int kValidityMapSize =
kPageSize >> kLineShift;
74 char validity_map_[kValidityMapSize];
77class SimInstructionBase :
public InstructionBase {
79 Type InstructionType()
const {
return type_; }
80 inline Instruction*
instr()
const {
return instr_; }
85 explicit SimInstructionBase(Instruction*
instr) {}
95class SimInstruction :
public InstructionGetters<SimInstructionBase> {
99 explicit SimInstruction(Instruction*
instr) { *
this =
instr; }
101 SimInstruction& operator=(Instruction*
instr) {
104 type_ = InstructionBase::InstructionType();
110class Simulator :
public SimulatorBase {
112 friend class MipsDebugger;
233 explicit Simulator(Isolate* isolate);
243 void set_register(
int reg, int64_t value);
244 void set_register_word(
int reg, int32_t value);
245 void set_dw_register(
int dreg,
const int* dbl);
247 double get_double_from_register_pair(
int reg);
249 void set_fpu_register(
int fpureg, int64_t value);
250 void set_fpu_register_word(
int fpureg, int32_t value);
251 void set_fpu_register_hi_word(
int fpureg, int32_t value);
252 void set_fpu_register_float(
int fpureg,
float value);
253 void set_fpu_register_double(
int fpureg,
double value);
254 void set_fpu_register_invalid_result64(
float original,
float rounded);
255 void set_fpu_register_invalid_result(
float original,
float rounded);
256 void set_fpu_register_word_invalid_result(
float original,
float rounded);
257 void set_fpu_register_invalid_result64(
double original,
double rounded);
258 void set_fpu_register_invalid_result(
double original,
double rounded);
259 void set_fpu_register_word_invalid_result(
double original,
double rounded);
260 int64_t get_fpu_register(
int fpureg)
const;
261 int32_t get_fpu_register_word(
int fpureg)
const;
262 int32_t get_fpu_register_signed_word(
int fpureg)
const;
263 int32_t get_fpu_register_hi_word(
int fpureg)
const;
264 float get_fpu_register_float(
int fpureg)
const;
265 double get_fpu_register_double(
int fpureg)
const;
266 template <
typename T>
267 void get_msa_register(
int wreg, T* value);
268 template <
typename T>
269 void set_msa_register(
int wreg,
const T* value);
270 void set_fcsr_bit(uint32_t cc,
bool value);
271 bool test_fcsr_bit(uint32_t cc);
272 bool set_fcsr_round_error(
double original,
double rounded);
273 bool set_fcsr_round64_error(
double original,
double rounded);
274 bool set_fcsr_round_error(
float original,
float rounded);
275 bool set_fcsr_round64_error(
float original,
float rounded);
276 void round_according_to_fcsr(
double toRound,
double* rounded,
277 int32_t* rounded_int,
double fs);
278 void round64_according_to_fcsr(
double toRound,
double* rounded,
279 int64_t* rounded_int,
double fs);
280 void round_according_to_fcsr(
float toRound,
float* rounded,
281 int32_t* rounded_int,
float fs);
282 void round64_according_to_fcsr(
float toRound,
float* rounded,
283 int64_t* rounded_int,
float fs);
284 template <
typename T_fp,
typename T_
int>
285 void round_according_to_msacsr(T_fp toRound, T_fp* rounded,
287 void clear_fcsr_cause();
288 void set_fcsr_rounding_mode(FPURoundingMode mode);
289 void set_msacsr_rounding_mode(FPURoundingMode mode);
290 unsigned int get_fcsr_rounding_mode();
291 unsigned int get_msacsr_rounding_mode();
293 void set_pc(int64_t value);
296 Address get_sp()
const {
return static_cast<Address>(get_register(sp)); }
300 uintptr_t StackLimit(uintptr_t c_limit)
const;
302 uintptr_t StackBase()
const;
306 base::Vector<uint8_t> GetCentralStackView()
const;
307 static constexpr int JSStackLimitMargin() {
return kAdditionalStackMargin; }
317 template <
typename T>
318 explicit CallArgument(T argument) {
320 DCHECK(
sizeof(argument) <=
sizeof(bits_));
321 bits_ = ConvertArg(argument);
325 explicit CallArgument(
double argument) {
326 DCHECK(
sizeof(argument) ==
sizeof(bits_));
327 memcpy(&bits_, &argument,
sizeof(argument));
331 explicit CallArgument(
float argument) {
338 static CallArgument End() {
return CallArgument(); }
340 int64_t bits()
const {
return bits_; }
341 bool IsEnd()
const {
return type_ == NO_ARG; }
342 bool IsGP()
const {
return type_ == GP_ARG; }
343 bool IsFP()
const {
return type_ == FP_ARG; }
346 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
351 CallArgumentType
type_;
353 CallArgument() {
type_ = NO_ARG; }
356 template <
typename Return,
typename... Args>
357 Return Call(Address entry, Args...
args) {
359 CallArgument call_args[] = {CallArgument(
args)..., CallArgument::End()};
360 CallImpl(entry, call_args);
361 return ReadReturn<Return>();
365 double CallFP(Address entry,
double d0,
double d1);
374 void set_last_debugger_input(
char* input);
375 char* last_debugger_input() {
return last_debugger_input_; }
378 static void SetRedirectInstruction(Instruction* instruction);
381 static bool ICacheMatch(
void*
one,
void* two);
382 static void FlushICache(base::CustomMatcherHashMap* i_cache,
void*
start,
387 bool has_bad_pc()
const;
390 enum special_values {
400 Unpredictable = 0xbadbeaf
405 void CallAnyCTypeFunction(Address target_address,
406 const EncodedCSignature& signature);
409 template <
typename T>
410 typename std::enable_if<std::is_floating_point<T>::value, T>::type
412 return static_cast<T
>(get_fpu_register_double(f0));
415 template <
typename T>
416 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
418 return ConvertReturn<T>(get_register(v0));
422 void Format(Instruction*
instr,
const char* format);
437 enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
450 inline uint32_t ReadBU(int64_t addr);
451 inline int32_t ReadB(int64_t addr);
452 inline void WriteB(int64_t addr, uint8_t value);
453 inline void WriteB(int64_t addr, int8_t value);
458 inline void WriteH(int64_t addr, uint16_t value, Instruction*
instr);
459 inline void WriteH(int64_t addr, int16_t value, Instruction*
instr);
461 inline uint32_t ReadWU(int64_t addr, Instruction*
instr);
462 inline int32_t ReadW(int64_t addr, Instruction*
instr, TraceType t = WORD);
463 inline void WriteW(int64_t addr, int32_t value, Instruction*
instr);
464 void WriteConditionalW(int64_t addr, int32_t value, Instruction*
instr,
466 inline int64_t Read2W(int64_t addr, Instruction*
instr);
467 inline void Write2W(int64_t addr, int64_t value, Instruction*
instr);
468 inline void WriteConditional2W(int64_t addr, int64_t value,
469 Instruction*
instr, int32_t rt_reg);
471 inline double ReadD(int64_t addr, Instruction*
instr);
472 inline void WriteD(int64_t addr,
double value, Instruction*
instr);
474 template <
typename T>
475 T ReadMem(int64_t addr, Instruction*
instr);
476 template <
typename T>
477 void WriteMem(int64_t addr, T value, Instruction*
instr);
480 inline void DieOrDebug();
482 void TraceRegWr(int64_t value, TraceType t =
DWORD);
483 template <
typename T>
484 void TraceMSARegWr(T* value, TraceType t);
485 template <
typename T>
486 void TraceMSARegWr(T* value);
487 void TraceMemWr(int64_t addr, int64_t value, TraceType t);
488 void TraceMemRd(int64_t addr, int64_t value, TraceType t =
DWORD);
489 template <
typename T>
490 void TraceMemRd(int64_t addr, T value);
491 template <
typename T>
492 void TraceMemWr(int64_t addr, T value);
496 inline int32_t GetDoubleHIW(
double* addr);
497 inline int32_t GetDoubleLOW(
double* addr);
499 inline int32_t SetDoubleHIW(
double* addr);
500 inline int32_t SetDoubleLOW(
double* addr);
502 SimInstruction instr_;
505 void DecodeTypeRegisterCOP1();
507 void DecodeTypeRegisterCOP1X();
509 void DecodeTypeRegisterSPECIAL();
511 void DecodeTypeRegisterSPECIAL2();
513 void DecodeTypeRegisterSPECIAL3();
515 void DecodeTypeRegisterSRsType();
517 void DecodeTypeRegisterDRsType();
519 void DecodeTypeRegisterWRsType();
521 void DecodeTypeRegisterLRsType();
523 int DecodeMsaDataFormat();
524 void DecodeTypeMsaI8();
525 void DecodeTypeMsaI5();
526 void DecodeTypeMsaI10();
527 void DecodeTypeMsaELM();
528 void DecodeTypeMsaBIT();
529 void DecodeTypeMsaMI10();
530 void DecodeTypeMsa3R();
531 void DecodeTypeMsa3RF();
532 void DecodeTypeMsaVec();
533 void DecodeTypeMsa2R();
534 void DecodeTypeMsa2RF();
535 template <
typename T>
536 T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
537 template <
typename T>
538 T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t
m);
539 template <
typename T>
540 T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt);
543 void DecodeTypeRegister();
545 inline int32_t rs_reg()
const {
return instr_.RsValue(); }
546 inline int64_t rs()
const {
return get_register(rs_reg()); }
547 inline uint64_t rs_u()
const {
548 return static_cast<uint64_t
>(get_register(rs_reg()));
550 inline int32_t rt_reg()
const {
return instr_.RtValue(); }
551 inline int64_t rt()
const {
return get_register(rt_reg()); }
552 inline uint64_t rt_u()
const {
553 return static_cast<uint64_t
>(get_register(rt_reg()));
555 inline int32_t rd_reg()
const {
return instr_.RdValue(); }
556 inline int32_t fr_reg()
const {
return instr_.FrValue(); }
557 inline int32_t fs_reg()
const {
return instr_.FsValue(); }
558 inline int32_t ft_reg()
const {
return instr_.FtValue(); }
559 inline int32_t fd_reg()
const {
return instr_.FdValue(); }
560 inline int32_t sa()
const {
return instr_.SaValue(); }
561 inline int32_t lsa_sa()
const {
return instr_.LsaSaValue(); }
562 inline int32_t ws_reg()
const {
return instr_.WsValue(); }
563 inline int32_t wt_reg()
const {
return instr_.WtValue(); }
564 inline int32_t wd_reg()
const {
return instr_.WdValue(); }
566 inline void SetResult(
const int32_t rd_reg,
const int64_t alu_out) {
567 set_register(rd_reg, alu_out);
571 inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
572 set_fpu_register_word(fd_reg, alu_out);
573 TraceRegWr(get_fpu_register(fd_reg), WORD);
576 inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
577 set_fpu_register_word(fd_reg, alu_out);
578 TraceRegWr(get_fpu_register(fd_reg));
581 inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
582 set_fpu_register(fd_reg, alu_out);
583 TraceRegWr(get_fpu_register(fd_reg));
586 inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
587 set_fpu_register(fd_reg, alu_out);
588 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
591 inline void SetFPUFloatResult(int32_t fd_reg,
float alu_out) {
592 set_fpu_register_float(fd_reg, alu_out);
593 TraceRegWr(get_fpu_register(fd_reg), FLOAT);
596 inline void SetFPUDoubleResult(int32_t fd_reg,
double alu_out) {
597 set_fpu_register_double(fd_reg, alu_out);
598 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
601 void DecodeTypeImmediate();
602 void DecodeTypeJump();
605 void SoftwareInterrupt();
608 void CheckForbiddenSlot(int64_t current_pc) {
609 Instruction* instr_after_compact_branch =
610 reinterpret_cast<Instruction*
>(current_pc +
kInstrSize);
611 if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
613 "Error: Unexpected instruction 0x%08x immediately after a "
614 "compact branch instruction.",
615 *
reinterpret_cast<uint32_t*
>(instr_after_compact_branch));
620 bool IsWatchpoint(uint64_t code);
621 void PrintWatchpoint(uint64_t code);
622 void HandleStop(uint64_t code, Instruction*
instr);
623 bool IsStopInstruction(Instruction*
instr);
624 bool IsEnabledStop(uint64_t code);
625 void EnableStop(uint64_t code);
626 void DisableStop(uint64_t code);
627 void IncreaseStopCounter(uint64_t code);
628 void PrintStopInfo(uint64_t code);
631 void InstructionDecode(Instruction*
instr);
633 void BranchDelayInstructionDecode(Instruction*
instr) {
634 if (
instr->InstructionBits() == nopInstr) {
640 if (
instr->IsForbiddenAfterBranch()) {
641 FATAL(
"Eror:Unexpected %i opcode in a branch delay slot.",
642 instr->OpcodeValue());
644 InstructionDecode(
instr);
649 static void CheckICache(base::CustomMatcherHashMap* i_cache,
651 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
start,
653 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
665 void SignalException(Exception e);
668 void GetFpArgs(
double*
x,
double*
y, int32_t*
z);
669 void SetFpResult(
const double&
result);
671 void CallInternal(Address entry);
687 static const size_t kStackProtectionSize =
KB;
689 static size_t AllocatedStackSize() {
690 return (
v8_flags.sim_stack_size * KB) + (2 * kStackProtectionSize);
692 static size_t UsableStackSize() {
return v8_flags.sim_stack_size *
KB; }
695 static const int kAdditionalStackMargin = 4 *
KB;
700 base::EmbeddedVector<char, 128> trace_buf_;
703 char* last_debugger_input_;
708 Instruction* break_pc_;
712 static const uint32_t kStopDisabledBit = 1 << 31;
718 struct StopCountAndDesc {
725 enum class MonitorAccess {
730 enum class TransactionSize {
738 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
749 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
751 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
756 MonitorAccess access_state_;
757 uintptr_t tagged_addr_;
758 TransactionSize
size_;
761 class GlobalMonitor {
763 class LinkedAddress {
768 friend class GlobalMonitor;
772 void NotifyLoadLinked_Locked(uintptr_t addr);
773 void NotifyStore_Locked();
774 bool NotifyStoreConditional_Locked(uintptr_t addr,
775 bool is_requesting_thread);
777 MonitorAccess access_state_;
778 uintptr_t tagged_addr_;
779 LinkedAddress* next_;
780 LinkedAddress*
prev_;
785 static const int kMaxFailureCounter = 5;
786 int failure_counter_;
792 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
793 void NotifyStore_Locked(LinkedAddress* linked_address);
794 bool NotifyStoreConditional_Locked(uintptr_t addr,
795 LinkedAddress* linked_address);
798 void RemoveLinkedAddress(LinkedAddress* linked_address);
800 static GlobalMonitor*
Get();
804 GlobalMonitor() =
default;
805 friend class base::LeakyObject<GlobalMonitor>;
807 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address)
const;
808 void PrependProcessor_Locked(LinkedAddress* linked_address);
810 LinkedAddress* head_ =
nullptr;
813 LocalMonitor local_monitor_;
814 GlobalMonitor::LinkedAddress global_monitor_thread_;
uint8_t data_[MAX_STACK_LENGTH]
base::Vector< const DirectHandle< Object > > args
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
constexpr size_t kPageSize
int SNPrintF(Vector< char > str, const char *format,...)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
UntaggedUnion< Word32, Word64 > Word
constexpr Register no_reg
const int kNumMSARegisters
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kNumSimuRegisters
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint8_t kInstrSize
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
int Compare(const T &a, const T &b)
#define DCHECK(condition)
#define DISALLOW_ASSIGN(TypeName)
#define V8_EXPORT_PRIVATE