v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-riscv.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Copyright(c) 2010 - 2017,
6// The Regents of the University of California(Regents).All Rights Reserved.
7//
8// Redistribution and use in source and binary forms,
9// with or without modification,
10// are permitted provided that the following
11// conditions are met : 1. Redistributions of source code must retain the
12// above copyright notice, this list of conditions and the following
13// disclaimer.2. Redistributions in binary form must reproduce the above
14// copyright notice, this list of conditions and the following disclaimer in
15// the
16// documentation and /
17// or
18// other materials provided with the distribution.3. Neither the name of
19// the Regents nor the names of its contributors may be used to endorse
20// or
21// promote products derived from
22// this software without specific prior written permission.
23//
24// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT,
25// INDIRECT, SPECIAL,
26// INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
27// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
28// EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29//
30// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
31// INCLUDING, BUT NOT LIMITED TO,
32// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
33// PARTICULAR PURPOSE.THE SOFTWARE AND ACCOMPANYING DOCUMENTATION,
34// IF ANY,
35// PROVIDED HEREUNDER IS PROVIDED
36// "AS IS".REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
37// SUPPORT, UPDATES, ENHANCEMENTS,
38// OR MODIFICATIONS.
39
40// The original source code covered by the above license above has been
41// modified significantly by the v8 project authors.
42
43// Declares a Simulator for RISC-V instructions if we are not generating a
44// native RISC-V binary. This Simulator allows us to run and debug RISC-V code
45// generation on regular desktop machines. V8 calls into generated code via the
46// GeneratedCode wrapper, which will start execution in the Simulator or
47// forwards to the real entry on a RISC-V HW platform.
48
49#ifndef V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
50#define V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
51
52// globals.h defines USE_SIMULATOR.
53#include "src/common/globals.h"
54
55template <typename T>
56int Compare(const T& a, const T& b) {
57 if (a == b)
58 return 0;
59 else if (a < b)
60 return -1;
61 else
62 return 1;
63}
64
65// Returns the negative absolute value of its argument.
66template <typename T,
67 typename = typename std::enable_if<std::is_signed<T>::value>::type>
68T Nabs(T a) {
69 return a < 0 ? a : -a;
70}
71
72#if defined(USE_SIMULATOR)
73typedef signed __int128_t __attribute__((__mode__(__TI__)));
74typedef unsigned __uint128_t __attribute__((__mode__(__TI__)));
75// Running with a simulator.
76
77#include "src/base/hashmap.h"
83
84namespace heap::base {
85class StackVisitor;
86}
87
88namespace v8 {
89namespace internal {
90
91// -----------------------------------------------------------------------------
92// Utility types and functions for RISCV
93#ifdef V8_TARGET_ARCH_32_BIT
94using sreg_t = int32_t;
95using reg_t = uint32_t;
96using freg_t = uint64_t;
97using sfreg_t = int64_t;
98#elif V8_TARGET_ARCH_64_BIT
99using sreg_t = int64_t;
100using reg_t = uint64_t;
101using freg_t = uint64_t;
102using sfreg_t = int64_t;
103#else
104#error "Cannot detect Riscv's bitwidth"
105#endif
106
107#define sext32(x) ((sreg_t)(int32_t)(x))
108#define zext32(x) ((reg_t)(uint32_t)(x))
109
110#ifdef V8_TARGET_ARCH_64_BIT
111#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
112#define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
113#elif V8_TARGET_ARCH_32_BIT
114#define sext_xlen(x) (((sreg_t)(x) << (32 - xlen)) >> (32 - xlen))
115#define zext_xlen(x) (((reg_t)(x) << (32 - xlen)) >> (32 - xlen))
116#endif
117
118#define BIT(n) (0x1LL << n)
119#define QUIET_BIT_S(nan) (base::bit_cast<int32_t>(nan) & BIT(22))
120#define QUIET_BIT_D(nan) (base::bit_cast<int64_t>(nan) & BIT(51))
121static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
122static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
123#undef QUIET_BIT_S
124#undef QUIET_BIT_D
125
126#ifdef V8_TARGET_ARCH_64_BIT
127inline uint64_t mulhu(uint64_t a, uint64_t b) {
128 __uint128_t full_result = ((__uint128_t)a) * ((__uint128_t)b);
129 return full_result >> 64;
130}
131
132inline int64_t mulh(int64_t a, int64_t b) {
133 __int128_t full_result = ((__int128_t)a) * ((__int128_t)b);
134 return full_result >> 64;
135}
136
137inline int64_t mulhsu(int64_t a, uint64_t b) {
138 __int128_t full_result = ((__int128_t)a) * ((__uint128_t)b);
139 return full_result >> 64;
140}
141#elif V8_TARGET_ARCH_32_BIT
142inline uint32_t mulhu(uint32_t a, uint32_t b) {
143 uint64_t full_result = ((uint64_t)a) * ((uint64_t)b);
144 uint64_t upper_part = full_result >> 32;
145 return (uint32_t)upper_part;
146}
147
148inline int32_t mulh(int32_t a, int32_t b) {
149 int64_t full_result = ((int64_t)a) * ((int64_t)b);
150 int64_t upper_part = full_result >> 32;
151 return (int32_t)upper_part;
152}
153
154inline int32_t mulhsu(int32_t a, uint32_t b) {
155 int64_t full_result = ((int64_t)a) * ((uint64_t)b);
156 int64_t upper_part = full_result >> 32;
157 return (int32_t)upper_part;
158}
159#endif
160
161// Floating point helpers
162#define F32_SIGN ((uint32_t)1 << 31)
163union u32_f32 {
164 uint32_t u;
165 float f;
166};
167inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
168 u32_f32 a = {.f = rs1}, b = {.f = rs2};
169 u32_f32 res;
170 res.u = (a.u & ~F32_SIGN) | ((((x) ? a.u
171 : (n) ? F32_SIGN
172 : 0) ^
173 b.u) &
174 F32_SIGN);
175 return res.f;
176}
177
178inline Float32 fsgnj32(Float32 rs1, Float32 rs2, bool n, bool x) {
179 u32_f32 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
180 u32_f32 res;
181 if (x) { // RO_FSQNJX_S
182 res.u = (a.u & ~F32_SIGN) | ((a.u ^ b.u) & F32_SIGN);
183 } else {
184 if (n) { // RO_FSGNJN_S
185 res.u = (a.u & ~F32_SIGN) | ((F32_SIGN ^ b.u) & F32_SIGN);
186 } else { // RO_FSGNJ_S
187 res.u = (a.u & ~F32_SIGN) | ((0 ^ b.u) & F32_SIGN);
188 }
189 }
190 return Float32::FromBits(res.u);
191}
192#define F64_SIGN ((uint64_t)1 << 63)
193union u64_f64 {
194 uint64_t u;
195 double d;
196};
197inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
198 u64_f64 a = {.d = rs1}, b = {.d = rs2};
199 u64_f64 res;
200 res.u = (a.u & ~F64_SIGN) | ((((x) ? a.u
201 : (n) ? F64_SIGN
202 : 0) ^
203 b.u) &
204 F64_SIGN);
205 return res.d;
206}
207
208inline Float64 fsgnj64(Float64 rs1, Float64 rs2, bool n, bool x) {
209 u64_f64 a = {.u = rs1.get_bits()}, b = {.u = rs2.get_bits()};
210 u64_f64 res;
211 if (x) { // RO_FSQNJX_D
212 res.u = (a.u & ~F64_SIGN) | ((a.u ^ b.u) & F64_SIGN);
213 } else {
214 if (n) { // RO_FSGNJN_D
215 res.u = (a.u & ~F64_SIGN) | ((F64_SIGN ^ b.u) & F64_SIGN);
216 } else { // RO_FSGNJ_D
217 res.u = (a.u & ~F64_SIGN) | ((0 ^ b.u) & F64_SIGN);
218 }
219 }
220 return Float64::FromBits(res.u);
221}
222inline bool is_boxed_float(int64_t v) { return (uint32_t)((v >> 32) + 1) == 0; }
223inline int64_t box_float(float v) {
224 return (0xFFFFFFFF00000000 | base::bit_cast<int32_t>(v));
225}
226
227inline uint64_t box_float(uint32_t v) { return (0xFFFFFFFF00000000 | v); }
228
229// -----------------------------------------------------------------------------
230// Utility functions
231
232class CachePage {
233 public:
234 static const int LINE_VALID = 0;
235 static const int LINE_INVALID = 1;
236
237 static const int kPageShift = 12;
238 static const int kPageSize = 1 << kPageShift;
239 static const int kPageMask = kPageSize - 1;
240 static const int kLineShift = 2; // The cache line is only 4 bytes right now.
241 static const int kLineLength = 1 << kLineShift;
242 static const int kLineMask = kLineLength - 1;
243
244 CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
245
246 char* ValidityByte(int offset) {
247 return &validity_map_[offset >> kLineShift];
248 }
249
250 char* CachedData(int offset) { return &data_[offset]; }
251
252 private:
253 char data_[kPageSize]; // The cached data.
254 static const int kValidityMapSize = kPageSize >> kLineShift;
255 char validity_map_[kValidityMapSize]; // One byte per line.
256};
257
258class SimInstructionBase : public InstructionBase {
259 public:
260 Type InstructionType() const { return type_; }
261 inline Instruction* instr() const { return instr_; }
262 inline int32_t operand() const { return operand_; }
263
264 protected:
265 SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
266 explicit SimInstructionBase(Instruction* instr) {}
267
269 Instruction* instr_;
270 Type type_;
271
272 private:
273 DISALLOW_ASSIGN(SimInstructionBase);
274};
275
276class SimInstruction : public InstructionGetters<SimInstructionBase> {
277 public:
278 SimInstruction() {}
279
280 explicit SimInstruction(Instruction* instr) { *this = instr; }
281
282 SimInstruction& operator=(Instruction* instr) {
283 operand_ = *reinterpret_cast<const int32_t*>(instr);
284 instr_ = instr;
285 type_ = InstructionBase::InstructionType();
286 DCHECK(reinterpret_cast<void*>(&operand_) == this);
287 return *this;
288 }
289};
290
291class Simulator : public SimulatorBase {
292 public:
293 friend class RiscvDebugger;
294
295 // Registers are declared in order. See SMRL chapter 2.
296 enum Register {
297 no_reg = -1,
298 zero_reg = 0,
299 ra,
300 sp,
301 gp,
302 tp,
303 t0,
304 t1,
305 t2,
306 s0,
307 s1,
308 a0,
309 a1,
310 a2,
311 a3,
312 a4,
313 a5,
314 a6,
315 a7,
316 s2,
317 s3,
318 s4,
319 s5,
320 s6,
321 s7,
322 s8,
323 s9,
324 s10,
325 s11,
326 t3,
327 t4,
328 t5,
329 t6,
330 pc, // pc must be the last register.
332 // aliases
333 fp = s0
334 };
335
336 // Coprocessor registers.
337 // Generated code will always use doubles. So we will only use even registers.
338 enum FPURegister {
339 ft0,
340 ft1,
341 ft2,
342 ft3,
343 ft4,
344 ft5,
345 ft6,
346 ft7,
347 fs0,
348 fs1,
349 fa0,
350 fa1,
351 fa2,
352 fa3,
353 fa4,
354 fa5,
355 fa6,
356 fa7,
357 fs2,
358 fs3,
359 fs4,
360 fs5,
361 fs6,
362 fs7,
363 fs8,
364 fs9,
365 fs10,
366 fs11,
367 ft8,
368 ft9,
369 ft10,
370 ft11,
372 };
373
374 enum VRegister {
375 v0,
376 v1,
377 v2,
378 v3,
379 v4,
380 v5,
381 v6,
382 v7,
383 v8,
384 v9,
385 v10,
386 v11,
387 v12,
388 v13,
389 v14,
390 v15,
391 v16,
392 v17,
393 v18,
394 v19,
395 v20,
396 v21,
397 v22,
398 v23,
399 v24,
400 v25,
401 v26,
402 v27,
403 v28,
404 v29,
405 v30,
406 v31,
408 };
409
410 explicit Simulator(Isolate* isolate);
411 ~Simulator();
412
413 // The currently executing Simulator instance. Potentially there can be one
414 // for each native thread.
415 V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
416
417 // Accessors for register state. Reading the pc value adheres to the RISC-V
418 // architecture specification and is off by a 8 from the currently executing
419 // instruction.
420 void set_register(int reg, sreg_t value);
421 void set_register_word(int reg, int32_t value);
422 V8_EXPORT_PRIVATE sreg_t get_register(int reg) const;
423 double get_double_from_register_pair(int reg);
424
425 // Same for FPURegisters.
426 void set_fpu_register(int fpureg, int64_t value);
427 void set_fpu_register_word(int fpureg, int32_t value);
428 void set_fpu_register_hi_word(int fpureg, int32_t value);
429 void set_fpu_register_float(int fpureg, float value);
430 void set_fpu_register_float(int fpureg, Float32 value);
431 void set_fpu_register_double(int fpureg, double value);
432 void set_fpu_register_double(int fpureg, Float64 value);
433
434 int64_t get_fpu_register(int fpureg) const;
435 int32_t get_fpu_register_word(int fpureg) const;
436 int32_t get_fpu_register_signed_word(int fpureg) const;
437 int32_t get_fpu_register_hi_word(int fpureg) const;
438 float get_fpu_register_float(int fpureg) const;
439 Float32 get_fpu_register_Float32(int fpureg, bool check_nanbox = true) const;
440 double get_fpu_register_double(int fpureg) const;
441 Float64 get_fpu_register_Float64(int fpureg) const;
442
443 // RV CSR manipulation
444 uint32_t read_csr_value(uint32_t csr);
445 void write_csr_value(uint32_t csr, reg_t value);
446 void set_csr_bits(uint32_t csr, reg_t flags);
447 void clear_csr_bits(uint32_t csr, reg_t flags);
448
449 void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
450 void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
451
452#ifdef CAN_USE_RVV_INSTRUCTIONS
453 // RVV CSR
454 __int128_t get_vregister(int vreg) const;
455 inline uint64_t rvv_vlen() const { return kRvvVLEN; }
456 inline uint64_t rvv_vtype() const { return vtype_; }
457 inline uint64_t rvv_vl() const { return vl_; }
458 inline uint64_t rvv_vstart() const { return vstart_; }
459 inline uint64_t rvv_vxsat() const { return vxsat_; }
460 inline uint64_t rvv_vxrm() const { return vxrm_; }
461 inline uint64_t rvv_vcsr() const { return vcsr_; }
462 inline uint64_t rvv_vlenb() const { return vlenb_; }
463 inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
464 inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
465 inline float rvv_vflmul() const {
466 if ((rvv_vtype() & 0b100) == 0) {
467 return static_cast<float>(0x1 << (rvv_vtype() & 0x7));
468 } else {
469 return 1.0 / static_cast<float>(0x1 << (4 - rvv_vtype() & 0x3));
470 }
471 }
472 inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
473
474 inline const char* rvv_sew_s() const {
475 uint32_t vsew = rvv_vsew();
476 switch (vsew) {
477#define CAST_VSEW(name) \
478 case name: \
479 return #name;
481 default:
482 return "unknown";
483#undef CAST_VSEW
484 }
485 }
486
487 inline const char* rvv_lmul_s() const {
488 uint32_t vlmul = rvv_vlmul();
489 switch (vlmul) {
490#define CAST_VLMUL(name) \
491 case name: \
492 return #name;
494 default:
495 return "unknown";
496#undef CAST_VLMUL
497 }
498 }
499
500 // return size of lane.8 16 32 64
501 inline uint32_t rvv_sew() const {
502 DCHECK_EQ(rvv_vsew() & (~0x7), 0x0);
503 return (0x1 << rvv_vsew()) * 8;
504 }
505 inline uint64_t rvv_vlmax() const {
506 if ((rvv_vlmul() & 0b100) != 0) {
507 return (rvv_vlen() / rvv_sew()) >> (4 - (rvv_vlmul() & 0b11));
508 } else {
509 return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
510 }
511 }
512#endif
513
514 inline uint32_t get_dynamic_rounding_mode();
515 inline bool test_fflags_bits(uint32_t mask);
516
517 float RoundF2FHelper(float input_val, int rmode);
518 double RoundF2FHelper(double input_val, int rmode);
519 template <typename I_TYPE, typename F_TYPE>
520 I_TYPE RoundF2IHelper(F_TYPE original, int rmode);
521
522 template <typename T>
523 T FMaxMinHelper(T a, T b, MaxMinKind kind);
524
525 template <typename T>
526 bool CompareFHelper(T input1, T input2, FPUCondition cc);
527
528 // Special case of set_register and get_register to access the raw PC value.
529 void set_pc(sreg_t value);
530 V8_EXPORT_PRIVATE sreg_t get_pc() const;
531
532 Address get_sp() const { return static_cast<Address>(get_register(sp)); }
533
534 // Accessor to the internal simulator stack area. Adds a safety
535 // margin to prevent overflows (kAdditionalStackMargin).
536 uintptr_t StackLimit(uintptr_t c_limit) const;
537 uintptr_t StackBase() const;
538 // Return central stack view, without additional safety margins.
539 // Users, for example wasm::StackMemory, can add their own.
540 base::Vector<uint8_t> GetCentralStackView() const;
541
542 void IterateRegistersAndStack(::heap::base::StackVisitor* visitor);
543
544 // Pseudo instruction for switching stack limit
545 void DoSwitchStackLimit(Instruction* instr);
546
547 // Executes RISC-V instructions until the PC reaches end_sim_pc.
548 void Execute();
549
550 // Only arguments up to 64 bits in size are supported.
551 class CallArgument {
552 public:
553 template <typename T>
554 explicit CallArgument(T argument) {
555 bits_ = 0;
556 DCHECK(sizeof(argument) <= sizeof(bits_));
557 bits_ = ConvertArg(argument);
558 type_ = GP_ARG;
559 }
560 explicit CallArgument(double argument) {
561 DCHECK(sizeof(argument) == sizeof(bits_));
562 memcpy(&bits_, &argument, sizeof(argument));
563 type_ = FP_ARG;
564 }
565 explicit CallArgument(float argument) {
566 // TODO(all): CallArgument(float) is untested.
568 }
569 // This indicates the end of the arguments list, so that CallArgument
570 // objects can be passed into varargs functions.
571 static CallArgument End() { return CallArgument(); }
572 int64_t bits() const { return bits_; }
573 bool IsEnd() const { return type_ == NO_ARG; }
574 bool IsGP() const { return type_ == GP_ARG; }
575 bool IsFP() const { return type_ == FP_ARG; }
576
577 private:
578 enum CallArgumentType { GP_ARG, FP_ARG, NO_ARG };
579 // All arguments are aligned to at least 64 bits and we don't support
580 // passing bigger arguments, so the payload size can be fixed at 64 bits.
581 int64_t bits_;
582 CallArgumentType type_;
583 CallArgument() { type_ = NO_ARG; }
584 };
585
586 template <typename Return, typename... Args>
587 Return Call(Address entry, Args... args) {
588#ifdef V8_TARGET_ARCH_RISCV64
589 // Convert all arguments to CallArgument.
590 CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
591 CallImpl(entry, call_args);
592 return ReadReturn<Return>();
593#else
594 return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
595#endif
596 }
597 // Alternative: call a 2-argument double function.
598 double CallFP(Address entry, double d0, double d1);
599
600 // Push an address onto the JS stack.
601 V8_EXPORT_PRIVATE uintptr_t PushAddress(uintptr_t address);
602
603 // Pop an address from the JS stack.
604 V8_EXPORT_PRIVATE uintptr_t PopAddress();
605
606 // Debugger input.
607 void set_last_debugger_input(char* input);
608 char* last_debugger_input() { return last_debugger_input_; }
609
610 // Redirection support.
611 static void SetRedirectInstruction(Instruction* instruction);
612
613 // ICache checking.
614 static bool ICacheMatch(void* one, void* two);
615 static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
616 size_t size);
617
618 // Returns true if pc register contains one of the 'special_values' defined
619 // below (bad_ra, end_sim_pc).
620 bool has_bad_pc() const;
621
622 private:
623 enum special_values {
624 // Known bad pc value to ensure that the simulator does not execute
625 // without being properly setup.
626 bad_ra = -1,
627 // A pc value used to signal the simulator to stop execution. Generally
628 // the ra is set to this value on transition from native C code to
629 // simulated execution, so that the simulator can "return" to the native
630 // C code.
631 end_sim_pc = -2,
632 // Unpredictable value.
633 Unpredictable = 0xbadbeaf
634 };
635
636#ifdef V8_TARGET_ARCH_RISCV64
637 V8_EXPORT_PRIVATE void CallImpl(Address entry, CallArgument* args);
638 void CallAnyCTypeFunction(Address target_address,
639 const EncodedCSignature& signature);
640 // Read floating point return values.
641 template <typename T>
642 typename std::enable_if<std::is_floating_point<T>::value, T>::type
643 ReadReturn() {
644 return static_cast<T>(get_fpu_register_double(fa0));
645 }
646 // Read non-float return values.
647 template <typename T>
648 typename std::enable_if<!std::is_floating_point<T>::value, T>::type
649 ReadReturn() {
650 return ConvertReturn<T>(get_register(a0));
651 }
652#else
653 V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
654 const intptr_t* arguments);
655#endif
656 // Unsupported instructions use Format to print an error and stop execution.
657 void Format(Instruction* instr, const char* format);
658
659 // Helpers for data value tracing.
660 enum TraceType {
661 BYTE,
662 HALF,
663 WORD,
664#if V8_TARGET_ARCH_RISCV64
665 DWORD,
666#endif
667 FLOAT,
668 DOUBLE,
669 // FLOAT_DOUBLE,
670 // WORD_DWORD
671 };
672
673 // "Probe" if an address range can be read. This is currently implemented
674 // by doing a 1-byte read of the last accessed byte, since the assumption is
675 // that if the last byte is accessible, also all lower bytes are accessible
676 // (which holds true for Wasm).
677 // Returns true if the access was successful, false if the access raised a
678 // signal which was then handled by the trap handler (also see
679 // {trap_handler::ProbeMemory}). If the access raises a signal which is not
680 // handled by the trap handler (e.g. because the current PC is not registered
681 // as a protected instruction), the signal will propagate and make the process
682 // crash. If no trap handler is available, this always returns true.
683 bool ProbeMemory(uintptr_t address, uintptr_t access_size);
684
685 // RISCV Memory read/write methods
686 template <typename T>
687 T ReadMem(sreg_t addr, Instruction* instr);
688 template <typename T>
689 void WriteMem(sreg_t addr, T value, Instruction* instr);
690 template <typename T, typename OP>
691 T amo(sreg_t addr, OP f, Instruction* instr, TraceType t) {
692 auto lhs = ReadMem<T>(addr, instr);
693 // TODO(RISCV): trace memory read for AMO
694 WriteMem<T>(addr, (T)f(lhs), instr);
695 return lhs;
696 }
697
698 // Helper for debugging memory access.
699 inline void DieOrDebug();
700
701#if V8_TARGET_ARCH_RISCV32
702 template <typename T>
703 void TraceRegWr(T value, TraceType t = WORD);
704#elif V8_TARGET_ARCH_RISCV64
705 void TraceRegWr(sreg_t value, TraceType t = DWORD);
706#endif
707 void TraceMemWr(sreg_t addr, sreg_t value, TraceType t);
708 template <typename T>
709 void TraceMemRd(sreg_t addr, T value, sreg_t reg_value);
710 void TraceMemRdDouble(sreg_t addr, double value, int64_t reg_value);
711 void TraceMemRdDouble(sreg_t addr, Float64 value, int64_t reg_value);
712 void TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value);
713
714 template <typename T>
715 void TraceMemWr(sreg_t addr, T value);
716 void TraceMemWrDouble(sreg_t addr, double value);
717
718 SimInstruction instr_;
719
720 // RISCV utlity API to access register value
721 inline int32_t rs1_reg() const { return instr_.Rs1Value(); }
722 inline sreg_t rs1() const { return get_register(rs1_reg()); }
723 inline float frs1() const { return get_fpu_register_float(rs1_reg()); }
724 inline double drs1() const { return get_fpu_register_double(rs1_reg()); }
725 inline Float32 frs1_boxed() const {
726 return get_fpu_register_Float32(rs1_reg());
727 }
728 inline Float64 drs1_boxed() const {
729 return get_fpu_register_Float64(rs1_reg());
730 }
731 inline int32_t rs2_reg() const { return instr_.Rs2Value(); }
732 inline sreg_t rs2() const { return get_register(rs2_reg()); }
733 inline float frs2() const { return get_fpu_register_float(rs2_reg()); }
734 inline double drs2() const { return get_fpu_register_double(rs2_reg()); }
735 inline Float32 frs2_boxed() const {
736 return get_fpu_register_Float32(rs2_reg());
737 }
738 inline Float64 drs2_boxed() const {
739 return get_fpu_register_Float64(rs2_reg());
740 }
741 inline int32_t rs3_reg() const { return instr_.Rs3Value(); }
742 inline sreg_t rs3() const { return get_register(rs3_reg()); }
743 inline float frs3() const { return get_fpu_register_float(rs3_reg()); }
744 inline double drs3() const { return get_fpu_register_double(rs3_reg()); }
745 inline Float32 frs3_boxed() const {
746 return get_fpu_register_Float32(rs3_reg());
747 }
748 inline Float64 drs3_boxed() const {
749 return get_fpu_register_Float64(rs3_reg());
750 }
751 inline int32_t rd_reg() const { return instr_.RdValue(); }
752 inline int32_t frd_reg() const { return instr_.RdValue(); }
753 inline int32_t rvc_rs1_reg() const { return instr_.RvcRs1Value(); }
754 inline sreg_t rvc_rs1() const { return get_register(rvc_rs1_reg()); }
755 inline int32_t rvc_rs2_reg() const { return instr_.RvcRs2Value(); }
756 inline sreg_t rvc_rs2() const { return get_register(rvc_rs2_reg()); }
757 inline double rvc_drs2() const {
758 return get_fpu_register_double(rvc_rs2_reg());
759 }
760 inline int32_t rvc_rs1s_reg() const { return instr_.RvcRs1sValue(); }
761 inline sreg_t rvc_rs1s() const { return get_register(rvc_rs1s_reg()); }
762 inline int32_t rvc_rs2s_reg() const { return instr_.RvcRs2sValue(); }
763 inline sreg_t rvc_rs2s() const { return get_register(rvc_rs2s_reg()); }
764 inline double rvc_drs2s() const {
765 return get_fpu_register_double(rvc_rs2s_reg());
766 }
767 inline int32_t rvc_rd_reg() const { return instr_.RvcRdValue(); }
768 inline int32_t rvc_frd_reg() const { return instr_.RvcRdValue(); }
769 inline int16_t boffset() const { return instr_.BranchOffset(); }
770 inline int16_t imm12() const { return instr_.Imm12Value(); }
771 inline int32_t imm20J() const { return instr_.Imm20JValue(); }
772 inline int32_t imm5CSR() const { return instr_.Rs1Value(); }
773 inline int16_t csr_reg() const { return instr_.CsrValue(); }
774 inline int16_t rvc_imm6() const { return instr_.RvcImm6Value(); }
775 inline int16_t rvc_imm6_addi16sp() const {
776 return instr_.RvcImm6Addi16spValue();
777 }
778 inline int16_t rvc_imm8_addi4spn() const {
779 return instr_.RvcImm8Addi4spnValue();
780 }
781 inline int16_t rvc_imm6_lwsp() const { return instr_.RvcImm6LwspValue(); }
782 inline int16_t rvc_imm6_ldsp() const { return instr_.RvcImm6LdspValue(); }
783 inline int16_t rvc_imm6_swsp() const { return instr_.RvcImm6SwspValue(); }
784 inline int16_t rvc_imm6_sdsp() const { return instr_.RvcImm6SdspValue(); }
785 inline int16_t rvc_imm5_w() const { return instr_.RvcImm5WValue(); }
786 inline int16_t rvc_imm5_d() const { return instr_.RvcImm5DValue(); }
787 inline int16_t rvc_imm8_b() const { return instr_.RvcImm8BValue(); }
788
789 inline void set_rd(sreg_t value, bool trace = true) {
790 set_register(rd_reg(), value);
791#if V8_TARGET_ARCH_RISCV64
792 if (trace) TraceRegWr(get_register(rd_reg()), DWORD);
793#elif V8_TARGET_ARCH_RISCV32
794 if (trace) TraceRegWr(get_register(rd_reg()), WORD);
795#endif
796 }
797 inline void set_frd(float value, bool trace = true) {
798 set_fpu_register_float(rd_reg(), value);
799 if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
800 }
801 inline void set_frd(Float32 value, bool trace = true) {
802 set_fpu_register_float(rd_reg(), value);
803 if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
804 }
805 inline void set_drd(double value, bool trace = true) {
806 set_fpu_register_double(rd_reg(), value);
807 if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
808 }
809 inline void set_drd(Float64 value, bool trace = true) {
810 set_fpu_register_double(rd_reg(), value);
811 if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
812 }
813 inline void set_rvc_rd(sreg_t value, bool trace = true) {
814 set_register(rvc_rd_reg(), value);
815#if V8_TARGET_ARCH_RISCV64
816 if (trace) TraceRegWr(get_register(rvc_rd_reg()), DWORD);
817#elif V8_TARGET_ARCH_RISCV32
818 if (trace) TraceRegWr(get_register(rvc_rd_reg()), WORD);
819#endif
820 }
821 inline void set_rvc_rs1s(sreg_t value, bool trace = true) {
822 set_register(rvc_rs1s_reg(), value);
823#if V8_TARGET_ARCH_RISCV64
824 if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), DWORD);
825#elif V8_TARGET_ARCH_RISCV32
826 if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), WORD);
827#endif
828 }
829 inline void set_rvc_rs2(sreg_t value, bool trace = true) {
830 set_register(rvc_rs2_reg(), value);
831#if V8_TARGET_ARCH_RISCV64
832 if (trace) TraceRegWr(get_register(rvc_rs2_reg()), DWORD);
833#elif V8_TARGET_ARCH_RISCV32
834 if (trace) TraceRegWr(get_register(rvc_rs2_reg()), WORD);
835#endif
836 }
837 inline void set_rvc_drd(double value, bool trace = true) {
838 set_fpu_register_double(rvc_rd_reg(), value);
839 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
840 }
841 inline void set_rvc_drd(Float64 value, bool trace = true) {
842 set_fpu_register_double(rvc_rd_reg(), value);
843 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
844 }
845 inline void set_rvc_frd(Float32 value, bool trace = true) {
846 set_fpu_register_float(rvc_rd_reg(), value);
847 if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
848 }
849 inline void set_rvc_rs2s(sreg_t value, bool trace = true) {
850 set_register(rvc_rs2s_reg(), value);
851#if V8_TARGET_ARCH_RISCV64
852 if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), DWORD);
853#elif V8_TARGET_ARCH_RISCV32
854 if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), WORD);
855#endif
856 }
857 inline void set_rvc_drs2s(double value, bool trace = true) {
858 set_fpu_register_double(rvc_rs2s_reg(), value);
859 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
860 }
861 inline void set_rvc_drs2s(Float64 value, bool trace = true) {
862 set_fpu_register_double(rvc_rs2s_reg(), value);
863 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
864 }
865
866 inline void set_rvc_frs2s(Float32 value, bool trace = true) {
867 set_fpu_register_float(rvc_rs2s_reg(), value);
868 if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), FLOAT);
869 }
870 inline int16_t shamt6() const { return (imm12() & 0x3F); }
871 inline int16_t shamt5() const { return (imm12() & 0x1F); }
872 inline int16_t rvc_shamt6() const { return instr_.RvcShamt6(); }
873 inline int32_t s_imm12() const { return instr_.StoreOffset(); }
874 inline int32_t u_imm20() const { return instr_.Imm20UValue() << 12; }
875 inline int32_t rvc_u_imm6() const { return instr_.RvcImm6Value() << 12; }
876 inline void require(bool check) {
877 if (!check) {
878 SignalException(kIllegalInstruction);
879 }
880 }
881
882#ifdef CAN_USE_RVV_INSTRUCTIONS
883 inline void rvv_trace_vd() {
884 if (v8_flags.trace_sim) {
885 __int128_t value = Vregister_[rvv_vd_reg()];
886 SNPrintF(trace_buf_, "%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
887 *(reinterpret_cast<int64_t*>(&value) + 1),
888 *reinterpret_cast<int64_t*>(&value), icount_);
889 }
890 }
891
892 inline void rvv_trace_vs1() {
893 if (v8_flags.trace_sim) {
894 PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
895 v8::internal::VRegisters::Name(static_cast<int>(rvv_vs1_reg())),
896 (uint64_t)(get_vregister(static_cast<int>(rvv_vs1_reg())) >> 64),
897 (uint64_t)get_vregister(static_cast<int>(rvv_vs1_reg())));
898 }
899 }
900
901 inline void rvv_trace_vs2() {
902 if (v8_flags.trace_sim) {
903 PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
904 v8::internal::VRegisters::Name(static_cast<int>(rvv_vs2_reg())),
905 (uint64_t)(get_vregister(static_cast<int>(rvv_vs2_reg())) >> 64),
906 (uint64_t)get_vregister(static_cast<int>(rvv_vs2_reg())));
907 }
908 }
909 inline void rvv_trace_v0() {
910 if (v8_flags.trace_sim) {
911 PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
913 (uint64_t)(get_vregister(v0) >> 64), (uint64_t)get_vregister(v0));
914 }
915 }
916
917 inline void rvv_trace_rs1() {
918 if (v8_flags.trace_sim) {
919 PrintF("\t%s:0x%016" PRIx64 "\n",
920 v8::internal::Registers::Name(static_cast<int>(rs1_reg())),
921 (uint64_t)(get_register(rs1_reg())));
922 }
923 }
924
925 inline void rvv_trace_status() {
926 if (v8_flags.trace_sim) {
927 int i = 0;
928 for (; i < trace_buf_.length(); i++) {
929 if (trace_buf_[i] == '\0') break;
930 }
931 SNPrintF(trace_buf_.SubVector(i, trace_buf_.length()),
932 " sew:%s lmul:%s vstart:%" PRId64 "vl:%" PRId64, rvv_sew_s(),
933 rvv_lmul_s(), rvv_vstart(), rvv_vl());
934 }
935 }
936
937 template <class T>
938 T& Rvvelt(reg_t vReg, uint64_t n, bool is_write = false) {
939 CHECK_NE(rvv_sew(), 0);
940 CHECK_GT((rvv_vlen() >> 3) / sizeof(T), 0);
941 reg_t elts_per_reg = (rvv_vlen() >> 3) / (sizeof(T));
942 vReg += n / elts_per_reg;
943 n = n % elts_per_reg;
944 T* regStart = reinterpret_cast<T*>(reinterpret_cast<char*>(Vregister_) +
945 vReg * (rvv_vlen() >> 3));
946 return regStart[n];
947 }
948
949 inline int32_t rvv_vs1_reg() { return instr_.Vs1Value(); }
950 inline reg_t rvv_vs1() { UNIMPLEMENTED(); }
951 inline int32_t rvv_vs2_reg() { return instr_.Vs2Value(); }
952 inline reg_t rvv_vs2() { UNIMPLEMENTED(); }
953 inline int32_t rvv_vd_reg() { return instr_.VdValue(); }
954 inline int32_t rvv_vs3_reg() { return instr_.VdValue(); }
955 inline reg_t rvv_vd() { UNIMPLEMENTED(); }
956 inline int32_t rvv_nf() {
957 return (instr_.InstructionBits() & kRvvNfMask) >> kRvvNfShift;
958 }
959
960 inline void set_vrd() { UNIMPLEMENTED(); }
961
962 inline void set_rvv_vtype(uint64_t value, bool trace = true) {
963 vtype_ = value;
964 }
965 inline void set_rvv_vl(uint64_t value, bool trace = true) { vl_ = value; }
966 inline void set_rvv_vstart(uint64_t value, bool trace = true) {
967 vstart_ = value;
968 }
969 inline void set_rvv_vxsat(uint64_t value, bool trace = true) {
970 vxsat_ = value;
971 }
972 inline void set_rvv_vxrm(uint64_t value, bool trace = true) { vxrm_ = value; }
973 inline void set_rvv_vcsr(uint64_t value, bool trace = true) { vcsr_ = value; }
974 inline void set_rvv_vlenb(uint64_t value, bool trace = true) {
975 vlenb_ = value;
976 }
977#endif
978
979 template <typename T, typename Func>
980 inline T CanonicalizeFPUOpFMA(Func fn, T dst, T src1, T src2) {
981 static_assert(std::is_floating_point<T>::value);
982 auto alu_out = fn(dst, src1, src2);
983 // if any input or result is NaN, the result is quiet_NaN
984 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
985 std::isnan(dst)) {
986 // signaling_nan sets kInvalidOperation bit
987 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(dst))
988 set_fflags(kInvalidOperation);
989 alu_out = std::numeric_limits<T>::quiet_NaN();
990 }
991 return alu_out;
992 }
993
994 template <typename T, typename Func>
995 inline T CanonicalizeFPUOp3(Func fn) {
996 static_assert(std::is_floating_point<T>::value);
997 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
998 T src2 = std::is_same<float, T>::value ? frs2() : drs2();
999 T src3 = std::is_same<float, T>::value ? frs3() : drs3();
1000 auto alu_out = fn(src1, src2, src3);
1001 // if any input or result is NaN, the result is quiet_NaN
1002 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
1003 std::isnan(src3)) {
1004 // signaling_nan sets kInvalidOperation bit
1005 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(src3))
1006 set_fflags(kInvalidOperation);
1007 alu_out = std::numeric_limits<T>::quiet_NaN();
1008 }
1009 return alu_out;
1010 }
1011
1012 template <typename T, typename Func>
1013 inline T CanonicalizeFPUOp2(Func fn) {
1014 static_assert(std::is_floating_point<T>::value);
1015 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
1016 T src2 = std::is_same<float, T>::value ? frs2() : drs2();
1017 auto alu_out = fn(src1, src2);
1018 // if any input or result is NaN, the result is quiet_NaN
1019 if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2)) {
1020 // signaling_nan sets kInvalidOperation bit
1021 if (isSnan(alu_out) || isSnan(src1) || isSnan(src2))
1022 set_fflags(kInvalidOperation);
1023 alu_out = std::numeric_limits<T>::quiet_NaN();
1024 }
1025 return alu_out;
1026 }
1027
1028 template <typename T, typename Func>
1029 inline T CanonicalizeFPUOp1(Func fn) {
1030 static_assert(std::is_floating_point<T>::value);
1031 T src1 = std::is_same<float, T>::value ? frs1() : drs1();
1032 auto alu_out = fn(src1);
1033 // if any input or result is NaN, the result is quiet_NaN
1034 if (std::isnan(alu_out) || std::isnan(src1)) {
1035 // signaling_nan sets kInvalidOperation bit
1036 if (isSnan(alu_out) || isSnan(src1)) set_fflags(kInvalidOperation);
1037 alu_out = std::numeric_limits<T>::quiet_NaN();
1038 }
1039 return alu_out;
1040 }
1041
1042 template <typename Func>
1043 inline float CanonicalizeDoubleToFloatOperation(Func fn) {
1044 float alu_out = fn(drs1());
1045 if (std::isnan(alu_out) || std::isnan(drs1()))
1046 alu_out = std::numeric_limits<float>::quiet_NaN();
1047 return alu_out;
1048 }
1049
1050 template <typename Func>
1051 inline float CanonicalizeDoubleToFloatOperation(Func fn, double frs) {
1052 float alu_out = fn(frs);
1053 if (std::isnan(alu_out) || std::isnan(drs1()))
1054 alu_out = std::numeric_limits<float>::quiet_NaN();
1055 return alu_out;
1056 }
1057
1058 template <typename Func>
1059 inline float CanonicalizeFloatToDoubleOperation(Func fn, float frs) {
1060 double alu_out = fn(frs);
1061 if (std::isnan(alu_out) || std::isnan(frs1()))
1062 alu_out = std::numeric_limits<double>::quiet_NaN();
1063 return alu_out;
1064 }
1065
1066 template <typename Func>
1067 inline float CanonicalizeFloatToDoubleOperation(Func fn) {
1068 double alu_out = fn(frs1());
1069 if (std::isnan(alu_out) || std::isnan(frs1()))
1070 alu_out = std::numeric_limits<double>::quiet_NaN();
1071 return alu_out;
1072 }
1073
1074 Builtin LookUp(Address pc);
1075 // RISCV decoding routine
1076 void DecodeRVRType();
1077 void DecodeRVR4Type();
1078 void DecodeRVRFPType(); // Special routine for R/OP_FP type
1079 void DecodeRVRAType(); // Special routine for R/AMO type
1080 void DecodeRVIType();
1081 void DecodeRVSType();
1082 void DecodeRVBType();
1083 void DecodeRVUType();
1084 void DecodeRVJType();
1085 void DecodeCRType();
1086 void DecodeCAType();
1087 void DecodeCIType();
1088 void DecodeCIWType();
1089 void DecodeCSSType();
1090 void DecodeCLType();
1091 void DecodeCSType();
1092 void DecodeCJType();
1093 void DecodeCBType();
1094#ifdef CAN_USE_RVV_INSTRUCTIONS
1095 void DecodeVType();
1096 void DecodeRvvIVV();
1097 void DecodeRvvIVI();
1098 void DecodeRvvIVX();
1099 void DecodeRvvMVV();
1100 void DecodeRvvMVX();
1101 void DecodeRvvFVV();
1102 void DecodeRvvFVF();
1103 bool DecodeRvvVL();
1104 bool DecodeRvvVS();
1105#endif
1106
1107 // Used for breakpoints and traps.
1108 void SoftwareInterrupt();
1109
1110 // Debug helpers
1111
1112 // Simulator breakpoints.
1113 struct Breakpoint {
1114 Instruction* location;
1115 bool enabled;
1116 bool is_tbreak;
1117 };
1118 std::vector<Breakpoint> breakpoints_;
1119 void SetBreakpoint(Instruction* breakpoint, bool is_tbreak);
1120 void ListBreakpoints();
1121 void CheckBreakpoints();
1122
1123 // Stop helper functions.
1124 bool IsWatchpoint(reg_t code);
1125 bool IsTracepoint(reg_t code);
1126 bool IsSwitchStackLimit(reg_t code);
1127 void PrintWatchpoint(reg_t code);
1128 void HandleStop(reg_t code);
1129 bool IsStopInstruction(Instruction* instr);
1130 bool IsEnabledStop(reg_t code);
1131 void EnableStop(reg_t code);
1132 void DisableStop(reg_t code);
1133 void IncreaseStopCounter(reg_t code);
1134 void PrintStopInfo(reg_t code);
1135
1136 // Executes one instruction.
1137 void InstructionDecode(Instruction* instr);
1138
1139 // ICache.
1140 static void CheckICache(base::CustomMatcherHashMap* i_cache,
1141 Instruction* instr);
1142 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
1143 size_t size);
1144 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
1145 void* page);
1146
1147 enum Exception {
1148 none,
1149 kIntegerOverflow,
1150 kIntegerUnderflow,
1152 kNumExceptions,
1153 // RISCV illegual instruction exception
1154 kIllegalInstruction,
1155 };
1156
1157 // Exceptions.
1158 void SignalException(Exception e);
1159
1160 // Handle arguments and return value for runtime FP functions.
1161 void GetFpArgs(double* x, double* y, int32_t* z);
1162 void SetFpResult(const double& result);
1163
1164 void CallInternal(Address entry);
1165
1166 // Architecture state.
1167 // Registers.
1169 // Coprocessor Registers.
1170 sfreg_t FPUregisters_[kNumFPURegisters];
1171 // Floating-point control and status register.
1172 uint32_t FCSR_;
1173
1174#ifdef CAN_USE_RVV_INSTRUCTIONS
1175 // RVV registers
1176 __int128_t Vregister_[kNumVRegisters];
1177 static_assert(sizeof(__int128_t) == kRvvVLEN / 8, "unmatch vlen");
1178 uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
1179#endif
1180 // Simulator support.
1181 // Allocate 1MB for stack.
1182 uintptr_t stack_;
1183 static const size_t kStackProtectionSize = 256 * kSystemPointerSize;
1184 // This includes a protection margin at each end of the stack area.
1185 static size_t AllocatedStackSize() {
1186#if V8_TARGET_ARCH_RISCV64
1187 size_t stack_size = v8_flags.sim_stack_size * KB;
1188#else
1189 size_t stack_size = 1 * MB; // allocate 1MB for stack
1190#endif
1191 return stack_size + (2 * kStackProtectionSize);
1192 }
1193 static size_t UsableStackSize() {
1194 return AllocatedStackSize() - kStackProtectionSize;
1195 }
1196
1197 uintptr_t stack_limit_;
1198 // Added in Simulator::StackLimit()
1199 static const int kAdditionalStackMargin = 4 * KB;
1200
1201 bool pc_modified_;
1202 int64_t icount_;
1203 sreg_t* watch_address_ = nullptr;
1204 sreg_t watch_value_ = 0;
1205 int break_count_;
1206 base::EmbeddedVector<char, 256> trace_buf_;
1207
1208 // Debugger input.
1209 char* last_debugger_input_;
1210
1212 v8::internal::Builtins builtins_;
1213
1214 // Stop is disabled if bit 31 is set.
1215 static const uint32_t kStopDisabledBit = 1 << 31;
1216
1217 // A stop is enabled, meaning the simulator will stop when meeting the
1218 // instruction, if bit 31 of watched_stops_[code].count is unset.
1219 // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
1220 // the breakpoint was hit or gone through.
1221 struct StopCountAndDesc {
1222 uint32_t count;
1223 char* desc;
1224 };
1225 StopCountAndDesc watched_stops_[kMaxStopCode + 1];
1226
1227 // Synchronization primitives.
1228 enum class MonitorAccess {
1229 Open,
1230 RMW,
1231 };
1232
1233 enum class TransactionSize {
1234 None = 0,
1235 Word = 4,
1236 DoubleWord = 8,
1237 };
1238
1239 // The least-significant bits of the address are ignored. The number of bits
1240 // is implementation-defined, between 3 and minimum page size.
1241 static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
1242
1243 class LocalMonitor {
1244 public:
1245 LocalMonitor();
1246
1247 // These functions manage the state machine for the local monitor, but do
1248 // not actually perform loads and stores. NotifyStoreConditional only
1249 // returns true if the store conditional is allowed; the global monitor will
1250 // still have to be checked to see whether the memory should be updated.
1251 void NotifyLoad();
1252 void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
1253 void NotifyStore();
1254 bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
1255
1256 private:
1257 void Clear();
1258
1259 MonitorAccess access_state_;
1260 uintptr_t tagged_addr_;
1261 TransactionSize size_;
1262 };
1263
1264 class GlobalMonitor {
1265 public:
1266 class LinkedAddress {
1267 public:
1268 LinkedAddress();
1269
1270 private:
1271 friend class GlobalMonitor;
1272 // These functions manage the state machine for the global monitor, but do
1273 // not actually perform loads and stores.
1274 void Clear_Locked();
1275 void NotifyLoadLinked_Locked(uintptr_t addr);
1276 void NotifyStore_Locked();
1277 bool NotifyStoreConditional_Locked(uintptr_t addr,
1278 bool is_requesting_thread);
1279
1280 MonitorAccess access_state_;
1281 uintptr_t tagged_addr_;
1282 LinkedAddress* next_;
1283 LinkedAddress* prev_;
1284 // A scd can fail due to background cache evictions. Rather than
1285 // simulating this, we'll just occasionally introduce cases where an
1286 // store conditional fails. This will happen once after every
1287 // kMaxFailureCounter exclusive stores.
1288 static const int kMaxFailureCounter = 5;
1289 int failure_counter_;
1290 };
1291
1292 // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
1293 base::Mutex mutex;
1294
1295 void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
1296 void NotifyStore_Locked(LinkedAddress* linked_address);
1297 bool NotifyStoreConditional_Locked(uintptr_t addr,
1298 LinkedAddress* linked_address);
1299
1300 // Called when the simulator is destroyed.
1301 void RemoveLinkedAddress(LinkedAddress* linked_address);
1302
1303 static GlobalMonitor* Get();
1304
1305 private:
1306 // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
1307 GlobalMonitor() = default;
1308 friend class base::LeakyObject<GlobalMonitor>;
1309
1310 bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
1311 void PrependProcessor_Locked(LinkedAddress* linked_address);
1312
1313 LinkedAddress* head_ = nullptr;
1314 };
1315
1316 LocalMonitor local_monitor_;
1317 GlobalMonitor::LinkedAddress global_monitor_thread_;
1318};
1319} // namespace internal
1320} // namespace v8
1321
1322#endif // defined(USE_SIMULATOR)
1323#endif // V8_EXECUTION_RISCV_SIMULATOR_RISCV_H_
Isolate * isolate_
#define RVV_SEW(V)
#define RVV_LMUL(V)
#define CAST_VLMUL(name)
#define CAST_VSEW(name)
#define T
#define one
uint8_t data_[MAX_STACK_LENGTH]
Builtins::Kind kind
Definition builtins.cc:40
static const char * Name(int reg)
static const char * Name(int reg)
Operand const operand_
const int size_
Definition assembler.cc:132
const ObjectRef type_
int start
uint32_t count
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
int32_t offset
std::optional< TNode< JSArray > > a
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
EmitFn fn
int y
int x
uint32_t const mask
#define OP(kType)
base::Mutex mutex
int n
Definition mul-fft.cc:296
constexpr size_t kPageSize
Definition globals.h:42
int int32_t
Definition unicode.cc:40
signed short int16_t
Definition unicode.cc:38
int SNPrintF(Vector< char > str, const char *format,...)
Definition strings.cc:20
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
FloatWithBits< 32 > Float32
Definition index.h:233
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
FloatWithBits< 64 > Float64
Definition index.h:234
constexpr Register no_reg
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr uint32_t kMaxStopCode
const int kNumFPURegisters
const int kNumSimuRegisters
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE FlagValues v8_flags
__attribute__((tls_model(V8_TLS_MODEL))) extern thread_local Isolate *g_current_isolate_ V8_CONSTINIT
constexpr int kRvvVLEN
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
Node * prev_
int Compare(const T &a, const T &b)
T Nabs(T a)
#define CHECK_GT(lhs, rhs)
#define CHECK_NE(lhs, rhs)
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DISALLOW_ASSIGN(TypeName)
Definition macros.h:125
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::unique_ptr< ValueMirror > value
unsigned long DWORD