v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-mips64.h
Go to the documentation of this file.
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
6#define V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
7
8// Declares a Simulator for MIPS instructions if we are not generating a native
9// MIPS binary. This Simulator allows us to run and debug MIPS code generation
10// on regular desktop machines.
11// V8 calls into generated code via the GeneratedCode wrapper,
12// which will start execution in the Simulator or forwards to the real entry
13// on a MIPS HW platform.
14
15// globals.h defines USE_SIMULATOR.
16#include "src/common/globals.h"
17
18template <typename T>
19int Compare(const T& a, const T& b) {
20 if (a == b)
21 return 0;
22 else if (a < b)
23 return -1;
24 else
25 return 1;
26}
27
28// Returns the negative absolute value of its argument.
29template <typename T,
30 typename = typename std::enable_if<std::is_signed<T>::value>::type>
31T Nabs(T a) {
32 return a < 0 ? a : -a;
33}
34
35#if defined(USE_SIMULATOR)
36// Running with a simulator.
37
38#include "src/base/hashmap.h"
39#include "src/base/strings.h"
44
45namespace v8 {
46namespace internal {
47
48// -----------------------------------------------------------------------------
49// Utility functions
50
51class CachePage {
52 public:
53 static const int LINE_VALID = 0;
54 static const int LINE_INVALID = 1;
55
56 static const int kPageShift = 12;
57 static const int kPageSize = 1 << kPageShift;
58 static const int kPageMask = kPageSize - 1;
59 static const int kLineShift = 2; // The cache line is only 4 bytes right now.
60 static const int kLineLength = 1 << kLineShift;
61 static const int kLineMask = kLineLength - 1;
62
63 CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
64
65 char* ValidityByte(int offset) {
66 return &validity_map_[offset >> kLineShift];
67 }
68
69 char* CachedData(int offset) { return &data_[offset]; }
70
71 private:
72 char data_[kPageSize]; // The cached data.
73 static const int kValidityMapSize = kPageSize >> kLineShift;
74 char validity_map_[kValidityMapSize]; // One byte per line.
75};
76
77class SimInstructionBase : public InstructionBase {
78 public:
79 Type InstructionType() const { return type_; }
80 inline Instruction* instr() const { return instr_; }
81 inline int32_t operand() const { return operand_; }
82
83 protected:
84 SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
85 explicit SimInstructionBase(Instruction* instr) {}
86
88 Instruction* instr_;
89 Type type_;
90
91 private:
92 DISALLOW_ASSIGN(SimInstructionBase);
93};
94
95class SimInstruction : public InstructionGetters<SimInstructionBase> {
96 public:
97 SimInstruction() {}
98
99 explicit SimInstruction(Instruction* instr) { *this = instr; }
100
101 SimInstruction& operator=(Instruction* instr) {
102 operand_ = *reinterpret_cast<const int32_t*>(instr);
103 instr_ = instr;
104 type_ = InstructionBase::InstructionType();
105 DCHECK(reinterpret_cast<void*>(&operand_) == this);
106 return *this;
107 }
108};
109
110class Simulator : public SimulatorBase {
111 public:
112 friend class MipsDebugger;
113
114 // Registers are declared in order. See SMRL chapter 2.
115 enum Register {
116 no_reg = -1,
117 zero_reg = 0,
118 at,
119 v0,
120 v1,
121 a0,
122 a1,
123 a2,
124 a3,
125 a4,
126 a5,
127 a6,
128 a7,
129 t0,
130 t1,
131 t2,
132 t3,
133 s0,
134 s1,
135 s2,
136 s3,
137 s4,
138 s5,
139 s6,
140 s7,
141 t8,
142 t9,
143 k0,
144 k1,
145 gp,
146 sp,
147 s8,
148 ra,
149 // LO, HI, and pc.
150 LO,
151 HI,
152 pc, // pc must be the last register.
154 // aliases
155 fp = s8
156 };
157
158 // Coprocessor registers.
159 // Generated code will always use doubles. So we will only use even registers.
160 enum FPURegister {
161 f0,
162 f1,
163 f2,
164 f3,
165 f4,
166 f5,
167 f6,
168 f7,
169 f8,
170 f9,
171 f10,
172 f11,
173 f12,
174 f13,
175 f14,
176 f15, // f12 and f14 are arguments FPURegisters.
177 f16,
178 f17,
179 f18,
180 f19,
181 f20,
182 f21,
183 f22,
184 f23,
185 f24,
186 f25,
187 f26,
188 f27,
189 f28,
190 f29,
191 f30,
192 f31,
194 };
195
196 // MSA registers
197 enum MSARegister {
198 w0,
199 w1,
200 w2,
201 w3,
202 w4,
203 w5,
204 w6,
205 w7,
206 w8,
207 w9,
208 w10,
209 w11,
210 w12,
211 w13,
212 w14,
213 w15,
214 w16,
215 w17,
216 w18,
217 w19,
218 w20,
219 w21,
220 w22,
221 w23,
222 w24,
223 w25,
224 w26,
225 w27,
226 w28,
227 w29,
228 w30,
229 w31,
231 };
232
233 explicit Simulator(Isolate* isolate);
234 ~Simulator();
235
236 // The currently executing Simulator instance. Potentially there can be one
237 // for each native thread.
238 V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
239
240 // Accessors for register state. Reading the pc value adheres to the MIPS
241 // architecture specification and is off by a 8 from the currently executing
242 // instruction.
243 void set_register(int reg, int64_t value);
244 void set_register_word(int reg, int32_t value);
245 void set_dw_register(int dreg, const int* dbl);
246 V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
247 double get_double_from_register_pair(int reg);
248 // Same for FPURegisters.
249 void set_fpu_register(int fpureg, int64_t value);
250 void set_fpu_register_word(int fpureg, int32_t value);
251 void set_fpu_register_hi_word(int fpureg, int32_t value);
252 void set_fpu_register_float(int fpureg, float value);
253 void set_fpu_register_double(int fpureg, double value);
254 void set_fpu_register_invalid_result64(float original, float rounded);
255 void set_fpu_register_invalid_result(float original, float rounded);
256 void set_fpu_register_word_invalid_result(float original, float rounded);
257 void set_fpu_register_invalid_result64(double original, double rounded);
258 void set_fpu_register_invalid_result(double original, double rounded);
259 void set_fpu_register_word_invalid_result(double original, double rounded);
260 int64_t get_fpu_register(int fpureg) const;
261 int32_t get_fpu_register_word(int fpureg) const;
262 int32_t get_fpu_register_signed_word(int fpureg) const;
263 int32_t get_fpu_register_hi_word(int fpureg) const;
264 float get_fpu_register_float(int fpureg) const;
265 double get_fpu_register_double(int fpureg) const;
266 template <typename T>
267 void get_msa_register(int wreg, T* value);
268 template <typename T>
269 void set_msa_register(int wreg, const T* value);
270 void set_fcsr_bit(uint32_t cc, bool value);
271 bool test_fcsr_bit(uint32_t cc);
272 bool set_fcsr_round_error(double original, double rounded);
273 bool set_fcsr_round64_error(double original, double rounded);
274 bool set_fcsr_round_error(float original, float rounded);
275 bool set_fcsr_round64_error(float original, float rounded);
276 void round_according_to_fcsr(double toRound, double* rounded,
277 int32_t* rounded_int, double fs);
278 void round64_according_to_fcsr(double toRound, double* rounded,
279 int64_t* rounded_int, double fs);
280 void round_according_to_fcsr(float toRound, float* rounded,
281 int32_t* rounded_int, float fs);
282 void round64_according_to_fcsr(float toRound, float* rounded,
283 int64_t* rounded_int, float fs);
284 template <typename T_fp, typename T_int>
285 void round_according_to_msacsr(T_fp toRound, T_fp* rounded,
286 T_int* rounded_int);
287 void clear_fcsr_cause();
288 void set_fcsr_rounding_mode(FPURoundingMode mode);
289 void set_msacsr_rounding_mode(FPURoundingMode mode);
290 unsigned int get_fcsr_rounding_mode();
291 unsigned int get_msacsr_rounding_mode();
292 // Special case of set_register and get_register to access the raw PC value.
293 void set_pc(int64_t value);
294 V8_EXPORT_PRIVATE int64_t get_pc() const;
295
296 Address get_sp() const { return static_cast<Address>(get_register(sp)); }
297
298 // Accessor to the internal simulator stack area. Adds a safety
299 // margin to prevent overflows (kAdditionalStackMargin).
300 uintptr_t StackLimit(uintptr_t c_limit) const;
301
302 uintptr_t StackBase() const;
303
304 // Return central stack view, without additional safety margins.
305 // Users, for example wasm::StackMemory, can add their own.
306 base::Vector<uint8_t> GetCentralStackView() const;
307 static constexpr int JSStackLimitMargin() { return kAdditionalStackMargin; }
308
309 void IterateRegistersAndStack(::heap::base::StackVisitor* visitor);
310
311 // Executes MIPS instructions until the PC reaches end_sim_pc.
312 void Execute();
313
314 // Only arguments up to 64 bits in size are supported.
315 class CallArgument {
316 public:
317 template <typename T>
318 explicit CallArgument(T argument) {
319 bits_ = 0;
320 DCHECK(sizeof(argument) <= sizeof(bits_));
321 bits_ = ConvertArg(argument);
322 type_ = GP_ARG;
323 }
324
325 explicit CallArgument(double argument) {
326 DCHECK(sizeof(argument) == sizeof(bits_));
327 memcpy(&bits_, &argument, sizeof(argument));
328 type_ = FP_ARG;
329 }
330
331 explicit CallArgument(float argument) {
332 // TODO(all): CallArgument(float) is untested.
334 }
335
336 // This indicates the end of the arguments list, so that CallArgument
337 // objects can be passed into varargs functions.
338 static CallArgument End() { return CallArgument(); }
339
340 int64_t bits() const { return bits_; }
341 bool IsEnd() const { return type_ == NO_ARG; }
342 bool IsGP() const { return type_ == GP_ARG; }
343 bool IsFP() const { return type_ == FP_ARG; }
344
345 private:
346 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
347
348 // All arguments are aligned to at least 64 bits and we don't support
349 // passing bigger arguments, so the payload size can be fixed at 64 bits.
350 int64_t bits_;
351 CallArgumentType type_;
352
353 CallArgument() { type_ = NO_ARG; }
354 };
355
356 template <typename Return, typename... Args>
357 Return Call(Address entry, Args... args) {
358 // Convert all arguments to CallArgument.
359 CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
360 CallImpl(entry, call_args);
361 return ReadReturn<Return>();
362 }
363
364 // Alternative: call a 2-argument double function.
365 double CallFP(Address entry, double d0, double d1);
366
367 // Push an address onto the JS stack.
368 V8_EXPORT_PRIVATE uintptr_t PushAddress(uintptr_t address);
369
370 // Pop an address from the JS stack.
371 V8_EXPORT_PRIVATE uintptr_t PopAddress();
372
373 // Debugger input.
374 void set_last_debugger_input(char* input);
375 char* last_debugger_input() { return last_debugger_input_; }
376
377 // Redirection support.
378 static void SetRedirectInstruction(Instruction* instruction);
379
380 // ICache checking.
381 static bool ICacheMatch(void* one, void* two);
382 static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
383 size_t size);
384
385 // Returns true if pc register contains one of the 'special_values' defined
386 // below (bad_ra, end_sim_pc).
387 bool has_bad_pc() const;
388
389 private:
390 enum special_values {
391 // Known bad pc value to ensure that the simulator does not execute
392 // without being properly setup.
393 bad_ra = -1,
394 // A pc value used to signal the simulator to stop execution. Generally
395 // the ra is set to this value on transition from native C code to
396 // simulated execution, so that the simulator can "return" to the native
397 // C code.
398 end_sim_pc = -2,
399 // Unpredictable value.
400 Unpredictable = 0xbadbeaf
401 };
402
403 V8_EXPORT_PRIVATE void CallImpl(Address entry, CallArgument* args);
404
405 void CallAnyCTypeFunction(Address target_address,
406 const EncodedCSignature& signature);
407
408 // Read floating point return values.
409 template <typename T>
410 typename std::enable_if<std::is_floating_point<T>::value, T>::type
411 ReadReturn() {
412 return static_cast<T>(get_fpu_register_double(f0));
413 }
414 // Read non-float return values.
415 template <typename T>
416 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
417 ReadReturn() {
418 return ConvertReturn<T>(get_register(v0));
419 }
420
421 // Unsupported instructions use Format to print an error and stop execution.
422 void Format(Instruction* instr, const char* format);
423
424 // Helpers for data value tracing.
425 enum TraceType {
426 BYTE,
427 HALF,
428 WORD,
429 DWORD,
430 FLOAT,
431 DOUBLE,
432 FLOAT_DOUBLE,
433 WORD_DWORD
434 };
435
436 // MSA Data Format
437 enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
438 union msa_reg_t {
439 int8_t b[kMSALanesByte];
440 uint8_t ub[kMSALanesByte];
444 uint32_t uw[kMSALanesWord];
445 int64_t d[kMSALanesDword];
446 uint64_t ud[kMSALanesDword];
447 };
448
449 // Read and write memory.
450 inline uint32_t ReadBU(int64_t addr);
451 inline int32_t ReadB(int64_t addr);
452 inline void WriteB(int64_t addr, uint8_t value);
453 inline void WriteB(int64_t addr, int8_t value);
454
455 inline uint16_t ReadHU(int64_t addr, Instruction* instr);
456 inline int16_t ReadH(int64_t addr, Instruction* instr);
457 // Note: Overloaded on the sign of the value.
458 inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
459 inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
460
461 inline uint32_t ReadWU(int64_t addr, Instruction* instr);
462 inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
463 inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
464 void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
465 int32_t rt_reg);
466 inline int64_t Read2W(int64_t addr, Instruction* instr);
467 inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
468 inline void WriteConditional2W(int64_t addr, int64_t value,
469 Instruction* instr, int32_t rt_reg);
470
471 inline double ReadD(int64_t addr, Instruction* instr);
472 inline void WriteD(int64_t addr, double value, Instruction* instr);
473
474 template <typename T>
475 T ReadMem(int64_t addr, Instruction* instr);
476 template <typename T>
477 void WriteMem(int64_t addr, T value, Instruction* instr);
478
479 // Helper for debugging memory access.
480 inline void DieOrDebug();
481
482 void TraceRegWr(int64_t value, TraceType t = DWORD);
483 template <typename T>
484 void TraceMSARegWr(T* value, TraceType t);
485 template <typename T>
486 void TraceMSARegWr(T* value);
487 void TraceMemWr(int64_t addr, int64_t value, TraceType t);
488 void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
489 template <typename T>
490 void TraceMemRd(int64_t addr, T value);
491 template <typename T>
492 void TraceMemWr(int64_t addr, T value);
493
494 // Operations depending on endianness.
495 // Get Double Higher / Lower word.
496 inline int32_t GetDoubleHIW(double* addr);
497 inline int32_t GetDoubleLOW(double* addr);
498 // Set Double Higher / Lower word.
499 inline int32_t SetDoubleHIW(double* addr);
500 inline int32_t SetDoubleLOW(double* addr);
501
502 SimInstruction instr_;
503
504 // functions called from DecodeTypeRegister.
505 void DecodeTypeRegisterCOP1();
506
507 void DecodeTypeRegisterCOP1X();
508
509 void DecodeTypeRegisterSPECIAL();
510
511 void DecodeTypeRegisterSPECIAL2();
512
513 void DecodeTypeRegisterSPECIAL3();
514
515 void DecodeTypeRegisterSRsType();
516
517 void DecodeTypeRegisterDRsType();
518
519 void DecodeTypeRegisterWRsType();
520
521 void DecodeTypeRegisterLRsType();
522
523 int DecodeMsaDataFormat();
524 void DecodeTypeMsaI8();
525 void DecodeTypeMsaI5();
526 void DecodeTypeMsaI10();
527 void DecodeTypeMsaELM();
528 void DecodeTypeMsaBIT();
529 void DecodeTypeMsaMI10();
530 void DecodeTypeMsa3R();
531 void DecodeTypeMsa3RF();
532 void DecodeTypeMsaVec();
533 void DecodeTypeMsa2R();
534 void DecodeTypeMsa2RF();
535 template <typename T>
536 T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
537 template <typename T>
538 T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m);
539 template <typename T>
540 T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt);
541
542 // Executing is handled based on the instruction type.
543 void DecodeTypeRegister();
544
545 inline int32_t rs_reg() const { return instr_.RsValue(); }
546 inline int64_t rs() const { return get_register(rs_reg()); }
547 inline uint64_t rs_u() const {
548 return static_cast<uint64_t>(get_register(rs_reg()));
549 }
550 inline int32_t rt_reg() const { return instr_.RtValue(); }
551 inline int64_t rt() const { return get_register(rt_reg()); }
552 inline uint64_t rt_u() const {
553 return static_cast<uint64_t>(get_register(rt_reg()));
554 }
555 inline int32_t rd_reg() const { return instr_.RdValue(); }
556 inline int32_t fr_reg() const { return instr_.FrValue(); }
557 inline int32_t fs_reg() const { return instr_.FsValue(); }
558 inline int32_t ft_reg() const { return instr_.FtValue(); }
559 inline int32_t fd_reg() const { return instr_.FdValue(); }
560 inline int32_t sa() const { return instr_.SaValue(); }
561 inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
562 inline int32_t ws_reg() const { return instr_.WsValue(); }
563 inline int32_t wt_reg() const { return instr_.WtValue(); }
564 inline int32_t wd_reg() const { return instr_.WdValue(); }
565
566 inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
567 set_register(rd_reg, alu_out);
568 TraceRegWr(alu_out);
569 }
570
571 inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
572 set_fpu_register_word(fd_reg, alu_out);
573 TraceRegWr(get_fpu_register(fd_reg), WORD);
574 }
575
576 inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
577 set_fpu_register_word(fd_reg, alu_out);
578 TraceRegWr(get_fpu_register(fd_reg));
579 }
580
581 inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
582 set_fpu_register(fd_reg, alu_out);
583 TraceRegWr(get_fpu_register(fd_reg));
584 }
585
586 inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
587 set_fpu_register(fd_reg, alu_out);
588 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
589 }
590
591 inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
592 set_fpu_register_float(fd_reg, alu_out);
593 TraceRegWr(get_fpu_register(fd_reg), FLOAT);
594 }
595
596 inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
597 set_fpu_register_double(fd_reg, alu_out);
598 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
599 }
600
601 void DecodeTypeImmediate();
602 void DecodeTypeJump();
603
604 // Used for breakpoints and traps.
605 void SoftwareInterrupt();
606
607 // Compact branch guard.
608 void CheckForbiddenSlot(int64_t current_pc) {
609 Instruction* instr_after_compact_branch =
610 reinterpret_cast<Instruction*>(current_pc + kInstrSize);
611 if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
612 FATAL(
613 "Error: Unexpected instruction 0x%08x immediately after a "
614 "compact branch instruction.",
615 *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
616 }
617 }
618
619 // Stop helper functions.
620 bool IsWatchpoint(uint64_t code);
621 void PrintWatchpoint(uint64_t code);
622 void HandleStop(uint64_t code, Instruction* instr);
623 bool IsStopInstruction(Instruction* instr);
624 bool IsEnabledStop(uint64_t code);
625 void EnableStop(uint64_t code);
626 void DisableStop(uint64_t code);
627 void IncreaseStopCounter(uint64_t code);
628 void PrintStopInfo(uint64_t code);
629
630 // Executes one instruction.
631 void InstructionDecode(Instruction* instr);
632 // Execute one instruction placed in a branch delay slot.
633 void BranchDelayInstructionDecode(Instruction* instr) {
634 if (instr->InstructionBits() == nopInstr) {
635 // Short-cut generic nop instructions. They are always valid and they
636 // never change the simulator state.
637 return;
638 }
639
640 if (instr->IsForbiddenAfterBranch()) {
641 FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
642 instr->OpcodeValue());
643 }
644 InstructionDecode(instr);
645 SNPrintF(trace_buf_, " ");
646 }
647
648 // ICache.
649 static void CheckICache(base::CustomMatcherHashMap* i_cache,
650 Instruction* instr);
651 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
652 size_t size);
653 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
654 void* page);
655
656 enum Exception {
657 none,
658 kIntegerOverflow,
659 kIntegerUnderflow,
661 kNumExceptions
662 };
663
664 // Exceptions.
665 void SignalException(Exception e);
666
667 // Handle arguments and return value for runtime FP functions.
668 void GetFpArgs(double* x, double* y, int32_t* z);
669 void SetFpResult(const double& result);
670
671 void CallInternal(Address entry);
672
673 // Architecture state.
674 // Registers.
676 // Coprocessor Registers.
677 // Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in
678 // order to support MSA registers
679 int64_t FPUregisters_[kNumFPURegisters * 2];
680 // FPU control register.
681 uint32_t FCSR_;
682 // MSA control register.
683 uint32_t MSACSR_;
684
685 // Simulator support.
686 uintptr_t stack_;
687 static const size_t kStackProtectionSize = KB;
688 // This includes a protection margin at each end of the stack area.
689 static size_t AllocatedStackSize() {
690 return (v8_flags.sim_stack_size * KB) + (2 * kStackProtectionSize);
691 }
692 static size_t UsableStackSize() { return v8_flags.sim_stack_size * KB; }
693 uintptr_t stack_limit_;
694 // Added in Simulator::StackLimit()
695 static const int kAdditionalStackMargin = 4 * KB;
696
697 bool pc_modified_;
698 int64_t icount_;
699 int break_count_;
700 base::EmbeddedVector<char, 128> trace_buf_;
701
702 // Debugger input.
703 char* last_debugger_input_;
704
706
707 // Registered breakpoints.
708 Instruction* break_pc_;
709 Instr break_instr_;
710
711 // Stop is disabled if bit 31 is set.
712 static const uint32_t kStopDisabledBit = 1 << 31;
713
714 // A stop is enabled, meaning the simulator will stop when meeting the
715 // instruction, if bit 31 of watched_stops_[code].count is unset.
716 // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
717 // the breakpoint was hit or gone through.
718 struct StopCountAndDesc {
719 uint32_t count;
720 char* desc;
721 };
722 StopCountAndDesc watched_stops_[kMaxStopCode + 1];
723
724 // Synchronization primitives.
725 enum class MonitorAccess {
726 Open,
727 RMW,
728 };
729
730 enum class TransactionSize {
731 None = 0,
732 Word = 4,
733 DoubleWord = 8,
734 };
735
736 // The least-significant bits of the address are ignored. The number of bits
737 // is implementation-defined, between 3 and minimum page size.
738 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
739
740 class LocalMonitor {
741 public:
742 LocalMonitor();
743
744 // These functions manage the state machine for the local monitor, but do
745 // not actually perform loads and stores. NotifyStoreConditional only
746 // returns true if the store conditional is allowed; the global monitor will
747 // still have to be checked to see whether the memory should be updated.
748 void NotifyLoad();
749 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
750 void NotifyStore();
751 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
752
753 private:
754 void Clear();
755
756 MonitorAccess access_state_;
757 uintptr_t tagged_addr_;
758 TransactionSize size_;
759 };
760
761 class GlobalMonitor {
762 public:
763 class LinkedAddress {
764 public:
765 LinkedAddress();
766
767 private:
768 friend class GlobalMonitor;
769 // These functions manage the state machine for the global monitor, but do
770 // not actually perform loads and stores.
771 void Clear_Locked();
772 void NotifyLoadLinked_Locked(uintptr_t addr);
773 void NotifyStore_Locked();
774 bool NotifyStoreConditional_Locked(uintptr_t addr,
775 bool is_requesting_thread);
776
777 MonitorAccess access_state_;
778 uintptr_t tagged_addr_;
779 LinkedAddress* next_;
780 LinkedAddress* prev_;
781 // A scd can fail due to background cache evictions. Rather than
782 // simulating this, we'll just occasionally introduce cases where an
783 // store conditional fails. This will happen once after every
784 // kMaxFailureCounter exclusive stores.
785 static const int kMaxFailureCounter = 5;
786 int failure_counter_;
787 };
788
789 // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
790 base::Mutex mutex;
791
792 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
793 void NotifyStore_Locked(LinkedAddress* linked_address);
794 bool NotifyStoreConditional_Locked(uintptr_t addr,
795 LinkedAddress* linked_address);
796
797 // Called when the simulator is destroyed.
798 void RemoveLinkedAddress(LinkedAddress* linked_address);
799
800 static GlobalMonitor* Get();
801
802 private:
803 // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
804 GlobalMonitor() = default;
805 friend class base::LeakyObject<GlobalMonitor>;
806
807 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
808 void PrependProcessor_Locked(LinkedAddress* linked_address);
809
810 LinkedAddress* head_ = nullptr;
811 };
812
813 LocalMonitor local_monitor_;
814 GlobalMonitor::LinkedAddress global_monitor_thread_;
815};
816
817} // namespace internal
818} // namespace v8
819
820#endif // defined(USE_SIMULATOR)
821#endif // V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
Isolate * isolate_
#define T
#define one
uint8_t data_[MAX_STACK_LENGTH]
Operand const operand_
const int size_
Definition assembler.cc:132
const ObjectRef type_
int start
uint32_t count
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
int32_t offset
std::optional< TNode< JSArray > > a
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
base::Mutex mutex
int m
Definition mul-fft.cc:294
constexpr size_t kPageSize
Definition globals.h:42
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
int SNPrintF(Vector< char > str, const char *format,...)
Definition strings.cc:20
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
constexpr Register no_reg
const int kNumMSARegisters
const int kMSALanesDword
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kMSALanesWord
const int kNumSimuRegisters
const int kMSALanesByte
V8_EXPORT_PRIVATE FlagValues v8_flags
const int kMSALanesHalf
constexpr uint8_t kInstrSize
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
Node * prev_
int Compare(const T &a, const T &b)
T Nabs(T a)
#define FATAL(...)
Definition logging.h:47
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DISALLOW_ASSIGN(TypeName)
Definition macros.h:125
#define V8_EXPORT_PRIVATE
Definition macros.h:460
unsigned long DWORD