v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-loong64.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
6#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
7
8// Declares a Simulator for loongisa instructions if we are not generating a
9// native loongisa binary. This Simulator allows us to run and debug loongisa
10// code generation on regular desktop machines. V8 calls into generated code via
11// the GeneratedCode wrapper, which will start execution in the Simulator or
12// forwards to the real entry on a loongisa HW platform.
13
14// globals.h defines USE_SIMULATOR.
15#include "src/common/globals.h"
16
17template <typename T>
18int Compare(const T& a, const T& b) {
19 if (a == b)
20 return 0;
21 else if (a < b)
22 return -1;
23 else
24 return 1;
25}
26
27// Returns the negative absolute value of its argument.
28template <typename T,
29 typename = typename std::enable_if<std::is_signed<T>::value>::type>
30T Nabs(T a) {
31 return a < 0 ? a : -a;
32}
33
34#if defined(USE_SIMULATOR)
35// Running with a simulator.
36
37#include "src/base/hashmap.h"
38#include "src/base/strings.h"
43
44namespace v8 {
45namespace internal {
46
47// -----------------------------------------------------------------------------
48// Utility functions
49
50class CachePage {
51 public:
52 static const int LINE_VALID = 0;
53 static const int LINE_INVALID = 1;
54
55 static const int kPageShift = 12;
56 static const int kPageSize = 1 << kPageShift;
57 static const int kPageMask = kPageSize - 1;
58 static const int kLineShift = 2; // The cache line is only 4 bytes right now.
59 static const int kLineLength = 1 << kLineShift;
60 static const int kLineMask = kLineLength - 1;
61
62 CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
63
64 char* ValidityByte(int offset) {
65 return &validity_map_[offset >> kLineShift];
66 }
67
68 char* CachedData(int offset) { return &data_[offset]; }
69
70 private:
71 char data_[kPageSize]; // The cached data.
72 static const int kValidityMapSize = kPageSize >> kLineShift;
73 char validity_map_[kValidityMapSize]; // One byte per line.
74};
75
76class SimInstructionBase : public InstructionBase {
77 public:
78 Type InstructionType() const { return type_; }
79 inline Instruction* instr() const { return instr_; }
80 inline int32_t operand() const { return operand_; }
81
82 protected:
83 SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
84 explicit SimInstructionBase(Instruction* instr) {}
85
87 Instruction* instr_;
88 Type type_;
89
90 private:
91 DISALLOW_ASSIGN(SimInstructionBase);
92};
93
94class SimInstruction : public InstructionGetters<SimInstructionBase> {
95 public:
96 SimInstruction() {}
97
98 explicit SimInstruction(Instruction* instr) { *this = instr; }
99
100 SimInstruction& operator=(Instruction* instr) {
101 operand_ = *reinterpret_cast<const int32_t*>(instr);
102 instr_ = instr;
103 type_ = InstructionBase::InstructionType();
104 DCHECK(reinterpret_cast<void*>(&operand_) == this);
105 return *this;
106 }
107};
108
109class Simulator : public SimulatorBase {
110 public:
111 friend class Loong64Debugger;
112
113 // Registers are declared in order.
114 enum Register {
115 no_reg = -1,
116 zero_reg = 0,
117 ra,
118 gp,
119 sp,
120 a0,
121 a1,
122 a2,
123 a3,
124 a4,
125 a5,
126 a6,
127 a7,
128 t0,
129 t1,
130 t2,
131 t3,
132 t4,
133 t5,
134 t6,
135 t7,
136 t8,
137 tp,
138 fp,
139 s0,
140 s1,
141 s2,
142 s3,
143 s4,
144 s5,
145 s6,
146 s7,
147 s8,
148 pc, // pc must be the last register.
150 // aliases
151 v0 = a0,
152 v1 = a1
153 };
154
155 // Condition flag registers.
156 enum CFRegister {
157 fcc0,
158 fcc1,
159 fcc2,
160 fcc3,
161 fcc4,
162 fcc5,
163 fcc6,
164 fcc7,
165 kNumCFRegisters
166 };
167
168 // Floating point registers.
169 enum FPURegister {
170 f0,
171 f1,
172 f2,
173 f3,
174 f4,
175 f5,
176 f6,
177 f7,
178 f8,
179 f9,
180 f10,
181 f11,
182 f12,
183 f13,
184 f14,
185 f15,
186 f16,
187 f17,
188 f18,
189 f19,
190 f20,
191 f21,
192 f22,
193 f23,
194 f24,
195 f25,
196 f26,
197 f27,
198 f28,
199 f29,
200 f30,
201 f31,
203 };
204
205 explicit Simulator(Isolate* isolate);
206 ~Simulator();
207
208 // The currently executing Simulator instance. Potentially there can be one
209 // for each native thread.
210 V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
211
212 float ceil(float value);
213 float floor(float value);
214 float trunc(float value);
215 double ceil(double value);
216 double floor(double value);
217 double trunc(double value);
218
219 // Accessors for register state. Reading the pc value adheres to the LOONG64
220 // architecture specification and is off by a 8 from the currently executing
221 // instruction.
222 void set_register(int reg, int64_t value);
223 void set_register_word(int reg, int32_t value);
224 void set_dw_register(int dreg, const int* dbl);
225 V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
226 double get_double_from_register_pair(int reg);
227 // Same for FPURegisters.
228 void set_fpu_register(int fpureg, int64_t value);
229 void set_fpu_register_word(int fpureg, int32_t value);
230 void set_fpu_register_hi_word(int fpureg, int32_t value);
231 void set_fpu_register_float(int fpureg, float value);
232 void set_fpu_register_double(int fpureg, double value);
233 void set_fpu_register_invalid_result64(float original, float rounded);
234 void set_fpu_register_invalid_result(float original, float rounded);
235 void set_fpu_register_word_invalid_result(float original, float rounded);
236 void set_fpu_register_invalid_result64(double original, double rounded);
237 void set_fpu_register_invalid_result(double original, double rounded);
238 void set_fpu_register_word_invalid_result(double original, double rounded);
239 int64_t get_fpu_register(int fpureg) const;
240 int32_t get_fpu_register_word(int fpureg) const;
241 int32_t get_fpu_register_signed_word(int fpureg) const;
242 int32_t get_fpu_register_hi_word(int fpureg) const;
243 float get_fpu_register_float(int fpureg) const;
244 double get_fpu_register_double(int fpureg) const;
245 void set_cf_register(int cfreg, bool value);
246 bool get_cf_register(int cfreg) const;
247 void set_fcsr_rounding_mode(FPURoundingMode mode);
248 unsigned int get_fcsr_rounding_mode();
249 void set_fcsr_bit(uint32_t cc, bool value);
250 bool test_fcsr_bit(uint32_t cc);
251 bool set_fcsr_round_error(double original, double rounded);
252 bool set_fcsr_round64_error(double original, double rounded);
253 bool set_fcsr_round_error(float original, float rounded);
254 bool set_fcsr_round64_error(float original, float rounded);
255 void round_according_to_fcsr(double toRound, double* rounded,
256 int32_t* rounded_int);
257 void round64_according_to_fcsr(double toRound, double* rounded,
258 int64_t* rounded_int);
259 void round_according_to_fcsr(float toRound, float* rounded,
260 int32_t* rounded_int);
261 void round64_according_to_fcsr(float toRound, float* rounded,
262 int64_t* rounded_int);
263 // Special case of set_register and get_register to access the raw PC value.
264 void set_pc(int64_t value);
265 V8_EXPORT_PRIVATE int64_t get_pc() const;
266
267 Address get_sp() const { return static_cast<Address>(get_register(sp)); }
268
269 // Accessor to the internal simulator stack area. Adds a safety
270 // margin to prevent overflows (kAdditionalStackMargin).
271 uintptr_t StackLimit(uintptr_t c_limit) const;
272
273 uintptr_t StackBase() const;
274
275 // Return central stack view, without additional safety margins.
276 // Users, for example wasm::StackMemory, can add their own.
277 base::Vector<uint8_t> GetCentralStackView() const;
278 static constexpr int JSStackLimitMargin() { return kAdditionalStackMargin; }
279
280 void IterateRegistersAndStack(::heap::base::StackVisitor* visitor);
281
282 // Executes LOONG64 instructions until the PC reaches end_sim_pc.
283 void Execute();
284
285 // Only arguments up to 64 bits in size are supported.
286 class CallArgument {
287 public:
288 template <typename T>
289 explicit CallArgument(T argument) {
290 bits_ = 0;
291 DCHECK(sizeof(argument) <= sizeof(bits_));
292 bits_ = ConvertArg(argument);
293 type_ = GP_ARG;
294 }
295
296 explicit CallArgument(double argument) {
297 DCHECK(sizeof(argument) == sizeof(bits_));
298 memcpy(&bits_, &argument, sizeof(argument));
299 type_ = FP_ARG;
300 }
301
302 explicit CallArgument(float argument) {
303 // TODO(all): CallArgument(float) is untested.
305 }
306
307 // This indicates the end of the arguments list, so that CallArgument
308 // objects can be passed into varargs functions.
309 static CallArgument End() { return CallArgument(); }
310
311 int64_t bits() const { return bits_; }
312 bool IsEnd() const { return type_ == NO_ARG; }
313 bool IsGP() const { return type_ == GP_ARG; }
314 bool IsFP() const { return type_ == FP_ARG; }
315
316 private:
317 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
318
319 // All arguments are aligned to at least 64 bits and we don't support
320 // passing bigger arguments, so the payload size can be fixed at 64 bits.
321 int64_t bits_;
322 CallArgumentType type_;
323
324 CallArgument() { type_ = NO_ARG; }
325 };
326
327 template <typename Return, typename... Args>
328 Return Call(Address entry, Args... args) {
329 // Convert all arguments to CallArgument.
330 CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
331 CallImpl(entry, call_args);
332 return ReadReturn<Return>();
333 }
334
335 // Alternative: call a 2-argument double function.
336 double CallFP(Address entry, double d0, double d1);
337
338 // Push an address onto the JS stack.
339 V8_EXPORT_PRIVATE uintptr_t PushAddress(uintptr_t address);
340
341 // Pop an address from the JS stack.
342 V8_EXPORT_PRIVATE uintptr_t PopAddress();
343
344 // Debugger input.
345 void set_last_debugger_input(char* input);
346 char* last_debugger_input() { return last_debugger_input_; }
347
348 // Redirection support.
349 static void SetRedirectInstruction(Instruction* instruction);
350
351 // ICache checking.
352 static bool ICacheMatch(void* one, void* two);
353 static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
354 size_t size);
355
356 // Returns true if pc register contains one of the 'special_values' defined
357 // below (bad_ra, end_sim_pc).
358 bool has_bad_pc() const;
359
360 private:
361 enum special_values {
362 // Known bad pc value to ensure that the simulator does not execute
363 // without being properly setup.
364 bad_ra = -1,
365 // A pc value used to signal the simulator to stop execution. Generally
366 // the ra is set to this value on transition from native C code to
367 // simulated execution, so that the simulator can "return" to the native
368 // C code.
369 end_sim_pc = -2,
370 // Unpredictable value.
371 Unpredictable = 0xbadbeaf
372 };
373
374 V8_EXPORT_PRIVATE void CallImpl(Address entry, CallArgument* args);
375
376 void CallAnyCTypeFunction(Address target_address,
377 const EncodedCSignature& signature);
378
379 // Read floating point return values.
380 template <typename T>
381 typename std::enable_if<std::is_floating_point<T>::value, T>::type
382 ReadReturn() {
383 return static_cast<T>(get_fpu_register_double(f0));
384 }
385 // Read non-float return values.
386 template <typename T>
387 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
388 ReadReturn() {
389 return ConvertReturn<T>(get_register(a0));
390 }
391
392 // Unsupported instructions use Format to print an error and stop execution.
393 void Format(Instruction* instr, const char* format);
394
395 // Helpers for data value tracing.
396 enum TraceType {
397 BYTE,
398 HALF,
399 WORD,
400 DWORD,
401 FLOAT,
402 DOUBLE,
403 FLOAT_DOUBLE,
404 WORD_DWORD
405 };
406
407 // "Probe" if an address range can be read. This is currently implemented
408 // by doing a 1-byte read of the last accessed byte, since the assumption is
409 // that if the last byte is accessible, also all lower bytes are accessible
410 // (which holds true for Wasm).
411 // Returns true if the access was successful, false if the access raised a
412 // signal which was then handled by the trap handler (also see
413 // {trap_handler::ProbeMemory}). If the access raises a signal which is not
414 // handled by the trap handler (e.g. because the current PC is not registered
415 // as a protected instruction), the signal will propagate and make the process
416 // crash. If no trap handler is available, this always returns true.
417 bool ProbeMemory(uintptr_t address, uintptr_t access_size);
418
419 // Read and write memory.
420 inline uint32_t ReadBU(int64_t addr);
421 inline int32_t ReadB(int64_t addr);
422 inline void WriteB(int64_t addr, uint8_t value);
423 inline void WriteB(int64_t addr, int8_t value);
424
425 inline uint16_t ReadHU(int64_t addr, Instruction* instr);
426 inline int16_t ReadH(int64_t addr, Instruction* instr);
427 // Note: Overloaded on the sign of the value.
428 inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
429 inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
430
431 inline uint32_t ReadWU(int64_t addr, Instruction* instr);
432 inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
433 inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
434 void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
435 int32_t* done);
436 inline int64_t Read2W(int64_t addr, Instruction* instr);
437 inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
438 inline void WriteConditional2W(int64_t addr, int64_t value,
439 Instruction* instr, int32_t* done);
440
441 inline double ReadD(int64_t addr, Instruction* instr);
442 inline void WriteD(int64_t addr, double value, Instruction* instr);
443
444 template <typename T>
445 T ReadMem(int64_t addr, Instruction* instr);
446 template <typename T>
447 void WriteMem(int64_t addr, T value, Instruction* instr);
448
449 // Helper for debugging memory access.
450 inline void DieOrDebug();
451
452 void TraceRegWr(int64_t value, TraceType t = DWORD);
453 void TraceMemWr(int64_t addr, int64_t value, TraceType t);
454 void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
455 template <typename T>
456 void TraceMemRd(int64_t addr, T value);
457 template <typename T>
458 void TraceMemWr(int64_t addr, T value);
459
460 SimInstruction instr_;
461
462 // Executing is handled based on the instruction type.
463 void DecodeTypeOp6();
464 void DecodeTypeOp7();
465 void DecodeTypeOp8();
466 void DecodeTypeOp10();
467 void DecodeTypeOp12();
468 void DecodeTypeOp14();
469 void DecodeTypeOp17();
470 void DecodeTypeOp22();
471
472 inline int32_t rj_reg() const { return instr_.RjValue(); }
473 inline int64_t rj() const { return get_register(rj_reg()); }
474 inline uint64_t rj_u() const {
475 return static_cast<uint64_t>(get_register(rj_reg()));
476 }
477 inline int32_t rk_reg() const { return instr_.RkValue(); }
478 inline int64_t rk() const { return get_register(rk_reg()); }
479 inline uint64_t rk_u() const {
480 return static_cast<uint64_t>(get_register(rk_reg()));
481 }
482 inline int32_t rd_reg() const { return instr_.RdValue(); }
483 inline int64_t rd() const { return get_register(rd_reg()); }
484 inline uint64_t rd_u() const {
485 return static_cast<uint64_t>(get_register(rd_reg()));
486 }
487 inline int32_t fa_reg() const { return instr_.FaValue(); }
488 inline float fa_float() const { return get_fpu_register_float(fa_reg()); }
489 inline double fa_double() const { return get_fpu_register_double(fa_reg()); }
490 inline int32_t fj_reg() const { return instr_.FjValue(); }
491 inline float fj_float() const { return get_fpu_register_float(fj_reg()); }
492 inline double fj_double() const { return get_fpu_register_double(fj_reg()); }
493 inline int32_t fk_reg() const { return instr_.FkValue(); }
494 inline float fk_float() const { return get_fpu_register_float(fk_reg()); }
495 inline double fk_double() const { return get_fpu_register_double(fk_reg()); }
496 inline int32_t fd_reg() const { return instr_.FdValue(); }
497 inline float fd_float() const { return get_fpu_register_float(fd_reg()); }
498 inline double fd_double() const { return get_fpu_register_double(fd_reg()); }
499 inline int32_t cj_reg() const { return instr_.CjValue(); }
500 inline bool cj() const { return get_cf_register(cj_reg()); }
501 inline int32_t cd_reg() const { return instr_.CdValue(); }
502 inline bool cd() const { return get_cf_register(cd_reg()); }
503 inline int32_t ca_reg() const { return instr_.CaValue(); }
504 inline bool ca() const { return get_cf_register(ca_reg()); }
505 inline uint32_t sa2() const { return instr_.Sa2Value(); }
506 inline uint32_t sa3() const { return instr_.Sa3Value(); }
507 inline uint32_t ui5() const { return instr_.Ui5Value(); }
508 inline uint32_t ui6() const { return instr_.Ui6Value(); }
509 inline uint32_t lsbw() const { return instr_.LsbwValue(); }
510 inline uint32_t msbw() const { return instr_.MsbwValue(); }
511 inline uint32_t lsbd() const { return instr_.LsbdValue(); }
512 inline uint32_t msbd() const { return instr_.MsbdValue(); }
513 inline uint32_t cond() const { return instr_.CondValue(); }
514 inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; }
515 inline uint32_t ui12() const { return instr_.Ui12Value(); }
516 inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; }
517 inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; }
518 inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; }
519
520 inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
521 set_register(rd_reg, alu_out);
522 TraceRegWr(alu_out);
523 }
524
525 inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
526 set_fpu_register_word(fd_reg, alu_out);
527 TraceRegWr(get_fpu_register(fd_reg), WORD);
528 }
529
530 inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
531 set_fpu_register_word(fd_reg, alu_out);
532 TraceRegWr(get_fpu_register(fd_reg));
533 }
534
535 inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
536 set_fpu_register(fd_reg, alu_out);
537 TraceRegWr(get_fpu_register(fd_reg));
538 }
539
540 inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
541 set_fpu_register(fd_reg, alu_out);
542 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
543 }
544
545 inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
546 set_fpu_register_float(fd_reg, alu_out);
547 TraceRegWr(get_fpu_register(fd_reg), FLOAT);
548 }
549
550 inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
551 set_fpu_register_double(fd_reg, alu_out);
552 TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
553 }
554
555 // Used for breakpoints.
556 void SoftwareInterrupt();
557
558 // Stop helper functions.
559 bool IsWatchpoint(uint64_t code);
560 void PrintWatchpoint(uint64_t code);
561 void HandleStop(uint64_t code, Instruction* instr);
562 bool IsStopInstruction(Instruction* instr);
563 bool IsEnabledStop(uint64_t code);
564 void EnableStop(uint64_t code);
565 void DisableStop(uint64_t code);
566 void IncreaseStopCounter(uint64_t code);
567 void PrintStopInfo(uint64_t code);
568
569 // Executes one instruction.
570 void InstructionDecode(Instruction* instr);
571 // Execute one instruction placed in a branch delay slot.
572
573 // ICache.
574 static void CheckICache(base::CustomMatcherHashMap* i_cache,
575 Instruction* instr);
576 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
577 size_t size);
578 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
579 void* page);
580
581 enum Exception {
582 none,
583 kIntegerOverflow,
584 kIntegerUnderflow,
586 kNumExceptions
587 };
588
589 // Exceptions.
590 void SignalException(Exception e);
591
592 // Handle arguments and return value for runtime FP functions.
593 void GetFpArgs(double* x, double* y, int32_t* z);
594 void SetFpResult(const double& result);
595
596 void CallInternal(Address entry);
597
598 // Architecture state.
599 // Registers.
601 // Floating point Registers.
602 int64_t FPUregisters_[kNumFPURegisters];
603 // Condition flags Registers.
604 bool CFregisters_[kNumCFRegisters];
605 // FPU control register.
606 uint32_t FCSR_;
607
608 // Simulator support.
609 uintptr_t stack_;
610 static const size_t kStackProtectionSize = KB;
611 // This includes a protection margin at each end of the stack area.
612 static size_t AllocatedStackSize() {
613 return (v8_flags.sim_stack_size * KB) + (2 * kStackProtectionSize);
614 }
615 static size_t UsableStackSize() { return v8_flags.sim_stack_size * KB; }
616 uintptr_t stack_limit_;
617 // Added in Simulator::StackLimit()
618 static const int kAdditionalStackMargin = 4 * KB;
619
620 bool pc_modified_;
621 int64_t icount_;
622 int break_count_;
623 base::EmbeddedVector<char, 128> trace_buf_;
624
625 // Debugger input.
626 char* last_debugger_input_;
627
629
630 // Registered breakpoints.
631 Instruction* break_pc_;
632 Instr break_instr_;
633
634 // Stop is disabled if bit 31 is set.
635 static const uint32_t kStopDisabledBit = 1 << 31;
636
637 // A stop is enabled, meaning the simulator will stop when meeting the
638 // instruction, if bit 31 of watched_stops_[code].count is unset.
639 // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
640 // the breakpoint was hit or gone through.
641 struct StopCountAndDesc {
642 uint32_t count;
643 char* desc;
644 };
645 StopCountAndDesc watched_stops_[kMaxStopCode + 1];
646
647 // Synchronization primitives.
648 enum class MonitorAccess {
649 Open,
650 RMW,
651 };
652
653 enum class TransactionSize {
654 None = 0,
655 Word = 4,
656 DoubleWord = 8,
657 };
658
659 // The least-significant bits of the address are ignored. The number of bits
660 // is implementation-defined, between 3 and minimum page size.
661 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
662
663 class LocalMonitor {
664 public:
665 LocalMonitor();
666
667 // These functions manage the state machine for the local monitor, but do
668 // not actually perform loads and stores. NotifyStoreConditional only
669 // returns true if the store conditional is allowed; the global monitor will
670 // still have to be checked to see whether the memory should be updated.
671 void NotifyLoad();
672 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
673 void NotifyStore();
674 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
675
676 private:
677 void Clear();
678
679 MonitorAccess access_state_;
680 uintptr_t tagged_addr_;
681 TransactionSize size_;
682 };
683
684 class GlobalMonitor {
685 public:
686 class LinkedAddress {
687 public:
688 LinkedAddress();
689
690 private:
691 friend class GlobalMonitor;
692 // These functions manage the state machine for the global monitor, but do
693 // not actually perform loads and stores.
694 void Clear_Locked();
695 void NotifyLoadLinked_Locked(uintptr_t addr);
696 void NotifyStore_Locked();
697 bool NotifyStoreConditional_Locked(uintptr_t addr,
698 bool is_requesting_thread);
699
700 MonitorAccess access_state_;
701 uintptr_t tagged_addr_;
702 LinkedAddress* next_;
703 LinkedAddress* prev_;
704 // A scd can fail due to background cache evictions. Rather than
705 // simulating this, we'll just occasionally introduce cases where an
706 // store conditional fails. This will happen once after every
707 // kMaxFailureCounter exclusive stores.
708 static const int kMaxFailureCounter = 5;
709 int failure_counter_;
710 };
711
712 // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
713 base::Mutex mutex;
714
715 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
716 void NotifyStore_Locked(LinkedAddress* linked_address);
717 bool NotifyStoreConditional_Locked(uintptr_t addr,
718 LinkedAddress* linked_address);
719
720 // Called when the simulator is destroyed.
721 void RemoveLinkedAddress(LinkedAddress* linked_address);
722
723 static GlobalMonitor* Get();
724
725 private:
726 // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
727 GlobalMonitor() = default;
728 friend class base::LeakyObject<GlobalMonitor>;
729
730 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
731 void PrependProcessor_Locked(LinkedAddress* linked_address);
732
733 LinkedAddress* head_ = nullptr;
734 };
735
736 LocalMonitor local_monitor_;
737 GlobalMonitor::LinkedAddress global_monitor_thread_;
738};
739
740} // namespace internal
741} // namespace v8
742
743#endif // defined(USE_SIMULATOR)
744#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
Isolate * isolate_
#define T
#define one
uint8_t data_[MAX_STACK_LENGTH]
Operand const operand_
const int size_
Definition assembler.cc:132
const ObjectRef type_
int start
uint32_t count
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
int32_t offset
std::optional< TNode< JSArray > > a
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
base::Mutex mutex
constexpr size_t kPageSize
Definition globals.h:42
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
constexpr Register no_reg
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kNumSimuRegisters
V8_EXPORT_PRIVATE FlagValues v8_flags
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
Node * prev_
int Compare(const T &a, const T &b)
T Nabs(T a)
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DISALLOW_ASSIGN(TypeName)
Definition macros.h:125
#define V8_EXPORT_PRIVATE
Definition macros.h:460
unsigned long DWORD