46#if defined(USE_SIMULATOR)
66#if V8_ENABLE_WEBASSEMBLY
70#if V8_TARGET_ARCH_RISCV64
71#define REGIx_FORMAT PRIx64
72#define REGId_FORMAT PRId64
73#elif V8_TARGET_ARCH_RISCV32
74#define REGIx_FORMAT PRIx32
75#define REGId_FORMAT PRId32
104#ifdef CAN_USE_RVV_INSTRUCTIONS
105static inline bool is_aligned(
const unsigned val,
const unsigned pos) {
106 return pos ? (val & (
pos - 1)) == 0 : true;
109static inline bool is_overlapped(
const int astart,
int asize,
const int bstart,
111 asize = asize == 0 ? 1 : asize;
112 bsize = bsize == 0 ? 1 : bsize;
114 const int aend = astart + asize;
115 const int bend = bstart + bsize;
117 return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
119static inline bool is_overlapped_widen(
const int astart,
int asize,
120 const int bstart,
int bsize) {
121 asize = asize == 0 ? 1 : asize;
122 bsize = bsize == 0 ? 1 : bsize;
124 const int aend = astart + asize;
125 const int bend = bstart + bsize;
127 if (astart < bstart && is_overlapped(astart, asize, bstart, bsize) &&
128 !is_overlapped(astart, asize, bstart + bsize, bsize)) {
131 return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
136#define require_align(val, pos) \
137 if (!is_aligned(val, pos)) { \
138 std::cout << val << " " << pos << std::endl; \
140 CHECK_EQ(is_aligned(val, pos), true)
142#define require_align(val, pos) CHECK_EQ(is_aligned(val, pos), true)
175struct type_usew_t<8> {
176 using type = uint8_t;
180struct type_usew_t<16> {
185struct type_usew_t<32> {
186 using type = uint32_t;
190struct type_usew_t<64> {
191 using type = uint64_t;
195struct type_usew_t<128> {
196 using type = __uint128_t;
202struct type_sew_t<8> {
207struct type_sew_t<16> {
212struct type_sew_t<32> {
217struct type_sew_t<64> {
218 using type = int64_t;
222struct type_sew_t<128> {
223 using type = __int128_t;
226#define VV_PARAMS(x) \
227 type_sew_t<x>::type& vd = \
228 Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
229 type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
230 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
232#define VV_UPARAMS(x) \
233 type_usew_t<x>::type& vd = \
234 Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
235 type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
236 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
238#define VX_PARAMS(x) \
239 type_sew_t<x>::type& vd = \
240 Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
241 type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
242 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
244#define VX_UPARAMS(x) \
245 type_usew_t<x>::type& vd = \
246 Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
247 type_usew_t<x>::type rs1 = (type_usew_t<x>::type)(get_register(rs1_reg())); \
248 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
250#define VI_PARAMS(x) \
251 type_sew_t<x>::type& vd = \
252 Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
253 type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5()); \
254 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
256#define VI_UPARAMS(x) \
257 type_usew_t<x>::type& vd = \
258 Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
259 type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)(instr_.RvvUimm5()); \
260 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
262#define VN_PARAMS(x) \
263 constexpr int half_x = x >> 1; \
264 type_sew_t<half_x>::type& vd = \
265 Rvvelt<type_sew_t<half_x>::type>(rvv_vd_reg(), i, true); \
266 type_sew_t<x>::type uimm5 = (type_sew_t<x>::type)(instr_.RvvUimm5()); \
267 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
269#define VN_UPARAMS(x) \
270 constexpr int half_x = x >> 1; \
271 type_usew_t<half_x>::type& vd = \
272 Rvvelt<type_usew_t<half_x>::type>(rvv_vd_reg(), i, true); \
273 type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)(instr_.RvvUimm5()); \
274 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
276#define VXI_PARAMS(x) \
277 type_sew_t<x>::type& vd = \
278 Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
279 type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
280 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
281 type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
282 type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5());
284#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
285 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
286 auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + off);
288#define VI_XI_SLIDEUP_PARAMS(x, offset) \
289 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
290 auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset);
292#define VX_SLIDE1DOWN_PARAMS(x, off) \
293 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
294 if ((i + off) == rvv_vlmax()) { \
295 type_sew_t<x>::type src = (type_sew_t<x>::type)(get_register(rs1_reg())); \
298 auto src = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + off); \
302#define VX_SLIDE1UP_PARAMS(x, offset) \
303 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
304 if (i == 0 && rvv_vstart() == 0) { \
305 type_sew_t<x>::type src = (type_sew_t<x>::type)(get_register(rs1_reg())); \
308 auto src = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset); \
312#define VF_SLIDE1DOWN_PARAMS(x, offset) \
313 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
314 if ((i + offset) == rvv_vlmax()) { \
315 auto src = base::bit_cast<type_sew_t<x>::type>( \
316 get_fpu_register_Float##x(rs1_reg()).get_bits()); \
319 auto src = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + offset); \
323#define VF_SLIDE1UP_PARAMS(x, offset) \
324 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
325 if (i == rvv_vstart() && i == 0) { \
326 auto src = base::bit_cast<type_sew_t<x>::type>( \
327 get_fpu_register_Float##x(rs1_reg()).get_bits()); \
330 auto src = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset); \
335#define VI_VIE_PARAMS(x, scale) \
336 if ((x / scale) < 8) UNREACHABLE(); \
337 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
338 auto vs2 = Rvvelt<type_sew_t<x / scale>::type>(rvv_vs2_reg(), i);
340#define VI_VIE_UPARAMS(x, scale) \
341 if ((x / scale) < 8) UNREACHABLE(); \
342 auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
343 auto vs2 = Rvvelt<type_usew_t<x / scale>::type>(rvv_vs2_reg(), i);
345#define require_noover(astart, asize, bstart, bsize) \
346 CHECK_EQ(!is_overlapped(astart, asize, bstart, bsize), true)
347#define require_noover_widen(astart, asize, bstart, bsize) \
348 CHECK_EQ(!is_overlapped_widen(astart, asize, bstart, bsize), true)
350#define RVV_VI_GENERAL_LOOP_BASE \
351 for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
352#define RVV_VI_LOOP_END \
356#define RVV_VI_MASK_VARS \
357 const uint8_t midx = i / 64; \
358 const uint8_t mpos = i % 64;
360#define RVV_VI_LOOP_MASK_SKIP(BODY) \
362 if (instr_.RvvVM() == 0) { \
363 bool skip = ((Rvvelt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
369#define RVV_VI_VV_LOOP(BODY) \
370 RVV_VI_GENERAL_LOOP_BASE \
371 RVV_VI_LOOP_MASK_SKIP() \
372 if (rvv_vsew() == E8) { \
375 } else if (rvv_vsew() == E16) { \
378 } else if (rvv_vsew() == E32) { \
381 } else if (rvv_vsew() == E64) { \
390#define RVV_VI_VV_ULOOP(BODY) \
391 RVV_VI_GENERAL_LOOP_BASE \
392 RVV_VI_LOOP_MASK_SKIP() \
393 if (rvv_vsew() == E8) { \
396 } else if (rvv_vsew() == E16) { \
399 } else if (rvv_vsew() == E32) { \
402 } else if (rvv_vsew() == E64) { \
411#define RVV_VI_VX_LOOP(BODY) \
412 RVV_VI_GENERAL_LOOP_BASE \
413 RVV_VI_LOOP_MASK_SKIP() \
414 if (rvv_vsew() == E8) { \
417 } else if (rvv_vsew() == E16) { \
420 } else if (rvv_vsew() == E32) { \
423 } else if (rvv_vsew() == E64) { \
432#define RVV_VI_VX_ULOOP(BODY) \
433 RVV_VI_GENERAL_LOOP_BASE \
434 RVV_VI_LOOP_MASK_SKIP() \
435 if (rvv_vsew() == E8) { \
438 } else if (rvv_vsew() == E16) { \
441 } else if (rvv_vsew() == E32) { \
444 } else if (rvv_vsew() == E64) { \
453#define RVV_VI_VI_LOOP(BODY) \
454 RVV_VI_GENERAL_LOOP_BASE \
455 RVV_VI_LOOP_MASK_SKIP() \
456 if (rvv_vsew() == E8) { \
459 } else if (rvv_vsew() == E16) { \
462 } else if (rvv_vsew() == E32) { \
465 } else if (rvv_vsew() == E64) { \
474#define RVV_VI_VI_ULOOP(BODY) \
475 RVV_VI_GENERAL_LOOP_BASE \
476 RVV_VI_LOOP_MASK_SKIP() \
477 if (rvv_vsew() == E8) { \
480 } else if (rvv_vsew() == E16) { \
483 } else if (rvv_vsew() == E32) { \
486 } else if (rvv_vsew() == E64) { \
497#define VI_WIDE_CHECK_COMMON \
498 CHECK_LE(rvv_vflmul(), 4); \
499 CHECK_LE(rvv_vsew() * 2, kRvvELEN); \
500 require_align(rvv_vd_reg(), rvv_vflmul() * 2); \
503#define VI_NARROW_CHECK_COMMON \
504 CHECK_LE(rvv_vflmul(), 4); \
505 CHECK_LE(rvv_vsew() * 2, kRvvELEN); \
506 require_align(rvv_vs2_reg(), rvv_vflmul() * 2); \
507 require_align(rvv_vd_reg(), rvv_vflmul()); \
510#define RVV_VI_CHECK_SLIDE(is_over) \
511 require_align(rvv_vs2_reg(), rvv_vflmul()); \
512 require_align(rvv_vd_reg(), rvv_vflmul()); \
514 if (is_over) require(rvv_vd_reg() != rvv_vs2_reg());
516#define RVV_VI_CHECK_DDS(is_rs) \
517 VI_WIDE_CHECK_COMMON; \
518 require_align(rvv_vs2_reg(), rvv_vflmul() * 2); \
520 require_align(rvv_vs1_reg(), rvv_vflmul()); \
521 if (rvv_vflmul() < 1) { \
522 require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
525 require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
530#define RVV_VI_CHECK_DSS(is_vs1) \
531 VI_WIDE_CHECK_COMMON; \
532 require_align(rvv_vs2_reg(), rvv_vflmul()); \
533 if (rvv_vflmul() < 1) { \
534 require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs2_reg(), \
537 require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs2_reg(), \
541 require_align(rvv_vs1_reg(), rvv_vflmul()); \
542 if (rvv_vflmul() < 1) { \
543 require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
546 require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
551#define RVV_VI_CHECK_SDS(is_vs1) \
552 VI_NARROW_CHECK_COMMON; \
553 if (rvv_vd_reg() != rvv_vs2_reg()) \
554 require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
556 if (is_vs1) require_align(rvv_vs1_reg(), rvv_vflmul());
558#define RVV_VI_VV_LOOP_WIDEN(BODY) \
559 RVV_VI_GENERAL_LOOP_BASE \
560 RVV_VI_LOOP_MASK_SKIP() \
561 if (rvv_vsew() == E8) { \
564 } else if (rvv_vsew() == E16) { \
567 } else if (rvv_vsew() == E32) { \
574#define RVV_VI_VX_LOOP_WIDEN(BODY) \
575 RVV_VI_GENERAL_LOOP_BASE \
576 if (rvv_vsew() == E8) { \
579 } else if (rvv_vsew() == E16) { \
582 } else if (rvv_vsew() == E32) { \
589#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \
590 switch (rvv_vsew()) { \
592 Rvvelt<uint16_t>(rvv_vd_reg(), i, true) = \
593 op1((sign##16_t)(sign##8_t)var0 op0(sign##16_t)(sign##8_t) var1) + \
597 Rvvelt<uint32_t>(rvv_vd_reg(), i, true) = \
598 op1((sign##32_t)(sign##16_t)var0 op0(sign##32_t)(sign##16_t) var1) + \
602 Rvvelt<uint64_t>(rvv_vd_reg(), i, true) = \
603 op1((sign##64_t)(sign##32_t)var0 op0(sign##64_t)(sign##32_t) var1) + \
608#define VI_WIDE_WVX_OP(var0, op0, sign) \
609 switch (rvv_vsew()) { \
611 sign##16_t & vd_w = Rvvelt<sign##16_t>(rvv_vd_reg(), i, true); \
612 sign##16_t vs2_w = Rvvelt<sign##16_t>(rvv_vs2_reg(), i); \
613 vd_w = vs2_w op0(sign##16_t)(sign##8_t) var0; \
616 sign##32_t & vd_w = Rvvelt<sign##32_t>(rvv_vd_reg(), i, true); \
617 sign##32_t vs2_w = Rvvelt<sign##32_t>(rvv_vs2_reg(), i); \
618 vd_w = vs2_w op0(sign##32_t)(sign##16_t) var0; \
621 sign##64_t & vd_w = Rvvelt<sign##64_t>(rvv_vd_reg(), i, true); \
622 sign##64_t vs2_w = Rvvelt<sign##64_t>(rvv_vs2_reg(), i); \
623 vd_w = vs2_w op0(sign##64_t)(sign##32_t) var0; \
627#define RVV_VI_VVXI_MERGE_LOOP(BODY) \
628 RVV_VI_GENERAL_LOOP_BASE \
629 if (rvv_vsew() == E8) { \
632 } else if (rvv_vsew() == E16) { \
635 } else if (rvv_vsew() == E32) { \
638 } else if (rvv_vsew() == E64) { \
645#define VV_WITH_CARRY_PARAMS(x) \
646 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
647 type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
648 type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
650#define XI_WITH_CARRY_PARAMS(x) \
651 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
652 type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
653 type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
654 type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
657#define RVV_VI_VV_LOOP_WITH_CARRY(BODY) \
658 CHECK_NE(rvv_vd_reg(), 0); \
659 RVV_VI_GENERAL_LOOP_BASE \
661 if (rvv_vsew() == E8) { \
662 VV_WITH_CARRY_PARAMS(8) \
664 } else if (rvv_vsew() == E16) { \
665 VV_WITH_CARRY_PARAMS(16) \
667 } else if (rvv_vsew() == E32) { \
668 VV_WITH_CARRY_PARAMS(32) \
670 } else if (rvv_vsew() == E64) { \
671 VV_WITH_CARRY_PARAMS(64) \
676#define RVV_VI_XI_LOOP_WITH_CARRY(BODY) \
677 CHECK_NE(rvv_vd_reg(), 0); \
678 RVV_VI_GENERAL_LOOP_BASE \
680 if (rvv_vsew() == E8) { \
681 XI_WITH_CARRY_PARAMS(8) \
683 } else if (rvv_vsew() == E16) { \
684 XI_WITH_CARRY_PARAMS(16) \
686 } else if (rvv_vsew() == E32) { \
687 XI_WITH_CARRY_PARAMS(32) \
689 } else if (rvv_vsew() == E64) { \
690 XI_WITH_CARRY_PARAMS(64) \
695#define VV_CMP_PARAMS(x) \
696 type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
697 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
699#define VX_CMP_PARAMS(x) \
700 type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
701 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
703#define VI_CMP_PARAMS(x) \
704 type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
705 type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
707#define VV_UCMP_PARAMS(x) \
708 type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
709 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
711#define VX_UCMP_PARAMS(x) \
712 type_usew_t<x>::type rs1 = \
713 (type_sew_t<x>::type)(get_register(rvv_vs1_reg())); \
714 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
716#define VI_UCMP_PARAMS(x) \
717 type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)instr_.RvvUimm5(); \
718 type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
720#define float32_t float
721#define float64_t double
723#define RVV_VI_LOOP_CMP_BASE \
724 CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64); \
725 for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
726 RVV_VI_LOOP_MASK_SKIP(); \
727 uint64_t mmask = uint64_t(1) << mpos; \
728 uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
731#define RVV_VI_LOOP_CMP_END \
732 vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
738#define RVV_VI_VV_LOOP_CMP(BODY) \
739 RVV_VI_LOOP_CMP_BASE \
740 if (rvv_vsew() == E8) { \
743 } else if (rvv_vsew() == E16) { \
746 } else if (rvv_vsew() == E32) { \
749 } else if (rvv_vsew() == E64) { \
755#define RVV_VI_VX_LOOP_CMP(BODY) \
756 RVV_VI_LOOP_CMP_BASE \
757 if (rvv_vsew() == E8) { \
760 } else if (rvv_vsew() == E16) { \
763 } else if (rvv_vsew() == E32) { \
766 } else if (rvv_vsew() == E64) { \
772#define RVV_VI_VI_LOOP_CMP(BODY) \
773 RVV_VI_LOOP_CMP_BASE \
774 if (rvv_vsew() == E8) { \
777 } else if (rvv_vsew() == E16) { \
780 } else if (rvv_vsew() == E32) { \
783 } else if (rvv_vsew() == E64) { \
789#define RVV_VI_VV_ULOOP_CMP(BODY) \
790 RVV_VI_LOOP_CMP_BASE \
791 if (rvv_vsew() == E8) { \
794 } else if (rvv_vsew() == E16) { \
795 VV_UCMP_PARAMS(16); \
797 } else if (rvv_vsew() == E32) { \
798 VV_UCMP_PARAMS(32); \
800 } else if (rvv_vsew() == E64) { \
801 VV_UCMP_PARAMS(64); \
806#define RVV_VI_VX_ULOOP_CMP(BODY) \
807 RVV_VI_LOOP_CMP_BASE \
808 if (rvv_vsew() == E8) { \
811 } else if (rvv_vsew() == E16) { \
812 VX_UCMP_PARAMS(16); \
814 } else if (rvv_vsew() == E32) { \
815 VX_UCMP_PARAMS(32); \
817 } else if (rvv_vsew() == E64) { \
818 VX_UCMP_PARAMS(64); \
823#define RVV_VI_VI_ULOOP_CMP(BODY) \
824 RVV_VI_LOOP_CMP_BASE \
825 if (rvv_vsew() == E8) { \
828 } else if (rvv_vsew() == E16) { \
829 VI_UCMP_PARAMS(16); \
831 } else if (rvv_vsew() == E32) { \
832 VI_UCMP_PARAMS(32); \
834 } else if (rvv_vsew() == E64) { \
835 VI_UCMP_PARAMS(64); \
840#define RVV_VI_VF_MERGE_LOOP_BASE \
841 for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
842#define RVV_VI_VF_MERGE_LOOP_END \
846#define RVV_VI_VF_MERGE_LOOP(BODY16, BODY32, BODY64) \
847 RVV_VI_VF_MERGE_LOOP_BASE \
848 switch (rvv_vsew()) { \
853 int32_t& vd = Rvvelt<int32_t>(rvv_vd_reg(), i, true); \
854 int32_t fs1 = base::bit_cast<int32_t>( \
855 get_fpu_register_Float32(rs1_reg()).get_bits()); \
856 int32_t vs2 = Rvvelt<int32_t>(rvv_vs2_reg(), i); \
861 int64_t& vd = Rvvelt<int64_t>(rvv_vd_reg(), i, true); \
862 int64_t fs1 = base::bit_cast<int64_t>( \
863 get_fpu_register_Float64(rs1_reg()).get_bits()); \
864 int64_t vs2 = Rvvelt<int64_t>(rvv_vs2_reg(), i); \
872 RVV_VI_VF_MERGE_LOOP_END \
875#define RVV_VI_VFP_LOOP_BASE \
876 for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
877 RVV_VI_LOOP_MASK_SKIP();
879#define RVV_VI_VFP_LOOP_END \
883#define RVV_VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \
884 RVV_VI_VFP_LOOP_BASE \
885 switch (rvv_vsew()) { \
890 float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
891 float fs1 = get_fpu_register_float(rs1_reg()); \
892 float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
897 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
898 double fs1 = get_fpu_register_double(rs1_reg()); \
899 double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
907 RVV_VI_VFP_LOOP_END \
910#define RVV_VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
911 RVV_VI_VFP_LOOP_BASE \
912 switch (rvv_vsew()) { \
918 float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
919 float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
920 float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
925 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
926 double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
927 double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
935 RVV_VI_VFP_LOOP_END \
938#define RVV_VFSGNJ_VV_VF_LOOP(BODY16, BODY32, BODY64) \
939 RVV_VI_VFP_LOOP_BASE \
940 switch (rvv_vsew()) { \
946 uint32_t& vd = Rvvelt<uint32_t>(rvv_vd_reg(), i, true); \
947 uint32_t vs1 = Rvvelt<uint32_t>(rvv_vs1_reg(), i); \
948 uint32_t vs2 = Rvvelt<uint32_t>(rvv_vs2_reg(), i); \
949 Float32 fs1 = get_fpu_register_Float32(rs1_reg()); \
954 uint64_t& vd = Rvvelt<uint64_t>(rvv_vd_reg(), i, true); \
955 uint64_t vs1 = Rvvelt<uint64_t>(rvv_vs1_reg(), i); \
956 uint64_t vs2 = Rvvelt<uint64_t>(rvv_vs2_reg(), i); \
957 Float64 fs1 = get_fpu_register_Float64(rs1_reg()); \
965 RVV_VI_VFP_LOOP_END \
968#define RVV_VI_VFP_VF_LOOP_WIDEN(BODY32, vs2_is_widen) \
969 RVV_VI_VFP_LOOP_BASE \
970 switch (rvv_vsew()) { \
977 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
978 double fs1 = static_cast<double>(get_fpu_register_float(rs1_reg())); \
979 double vs2 = vs2_is_widen \
980 ? Rvvelt<double>(rvv_vs2_reg(), i) \
981 : static_cast<double>(Rvvelt<float>(rvv_vs2_reg(), i)); \
982 double vs3 = Rvvelt<double>(rvv_vd_reg(), i); \
990 RVV_VI_VFP_LOOP_END \
993#define RVV_VI_VFP_VV_LOOP_WIDEN(BODY32, vs2_is_widen) \
994 RVV_VI_VFP_LOOP_BASE \
995 switch (rvv_vsew()) { \
1002 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
1003 double vs2 = vs2_is_widen \
1004 ? static_cast<double>(Rvvelt<double>(rvv_vs2_reg(), i)) \
1005 : static_cast<double>(Rvvelt<float>(rvv_vs2_reg(), i)); \
1006 double vs1 = static_cast<double>(Rvvelt<float>(rvv_vs1_reg(), i)); \
1007 double vs3 = Rvvelt<double>(rvv_vd_reg(), i); \
1015 RVV_VI_VFP_LOOP_END \
1018#define RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(type, check_fn, op) \
1019 auto fn = [this](type frs1, type frs2) { \
1020 if (check_fn(frs1, frs2)) { \
1021 this->set_fflags(kInvalidOperation); \
1022 return std::numeric_limits<type>::quiet_NaN(); \
1024 return frs2 op frs1; \
1027 auto alu_out = fn(vs1, vs2); \
1029 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) { \
1031 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2)) \
1032 set_fflags(kInvalidOperation); \
1033 alu_out = std::numeric_limits<type>::quiet_NaN(); \
1037#define RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(type, check_fn, op) \
1038 auto fn = [this](type frs1, type frs2) { \
1039 if (check_fn(frs1, frs2)) { \
1040 this->set_fflags(kInvalidOperation); \
1041 return std::numeric_limits<type>::quiet_NaN(); \
1043 return frs2 op frs1; \
1046 auto alu_out = fn(fs1, vs2); \
1048 if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) { \
1050 if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2)) \
1051 set_fflags(kInvalidOperation); \
1052 alu_out = std::numeric_limits<type>::quiet_NaN(); \
1056#define RVV_VI_VFP_FMA(type, _f1, _f2, _a) \
1057 auto fn = [](type f1, type f2, type a) { return std::fma(f1, f2, a); }; \
1058 vd = CanonicalizeFPUOpFMA<type>(fn, _f1, _f2, _a);
1060#define RVV_VI_VFP_FMA_VV_LOOP(BODY32, BODY64) \
1061 RVV_VI_VFP_LOOP_BASE \
1062 switch (rvv_vsew()) { \
1067 float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
1068 float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
1069 float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
1074 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
1075 double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
1076 double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
1084 RVV_VI_VFP_LOOP_END \
1087#define RVV_VI_VFP_FMA_VF_LOOP(BODY32, BODY64) \
1088 RVV_VI_VFP_LOOP_BASE \
1089 switch (rvv_vsew()) { \
1094 float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
1095 float fs1 = get_fpu_register_float(rs1_reg()); \
1096 float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
1101 double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
1102 float fs1 = get_fpu_register_float(rs1_reg()); \
1103 double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
1111 RVV_VI_VFP_LOOP_END \
1114#define RVV_VI_VFP_LOOP_CMP_BASE \
1115 for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
1116 RVV_VI_LOOP_MASK_SKIP(); \
1117 uint64_t mmask = uint64_t(1) << mpos; \
1118 uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
1121#define RVV_VI_VFP_LOOP_CMP_END \
1122 switch (rvv_vsew()) { \
1126 vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
1134 set_rvv_vstart(0); \
1137#define RVV_VI_VFP_LOOP_CMP(BODY16, BODY32, BODY64, is_vs1) \
1138 RVV_VI_VFP_LOOP_CMP_BASE \
1139 switch (rvv_vsew()) { \
1144 float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
1145 float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
1150 double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
1151 double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
1159 RVV_VI_VFP_LOOP_CMP_END
1162#define RVV_VI_LOOP_REDUCTION_BASE(x) \
1163 auto& vd_0_des = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true); \
1164 auto vd_0_res = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), 0); \
1165 for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
1166 RVV_VI_LOOP_MASK_SKIP(); \
1167 auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
1169#define RVV_VI_LOOP_REDUCTION_END(x) \
1171 if (rvv_vl() > 0) { \
1172 vd_0_des = vd_0_res; \
1176#define REDUCTION_LOOP(x, BODY) \
1177 RVV_VI_LOOP_REDUCTION_BASE(x) \
1179 RVV_VI_LOOP_REDUCTION_END(x)
1181#define RVV_VI_VV_LOOP_REDUCTION(BODY) \
1182 if (rvv_vsew() == E8) { \
1183 REDUCTION_LOOP(8, BODY) \
1184 } else if (rvv_vsew() == E16) { \
1185 REDUCTION_LOOP(16, BODY) \
1186 } else if (rvv_vsew() == E32) { \
1187 REDUCTION_LOOP(32, BODY) \
1188 } else if (rvv_vsew() == E64) { \
1189 REDUCTION_LOOP(64, BODY) \
1193#define VI_VFP_LOOP_REDUCTION_BASE(width) \
1194 float##width##_t vd_0 = Rvvelt<float##width##_t>(rvv_vd_reg(), 0); \
1195 float##width##_t vs1_0 = Rvvelt<float##width##_t>(rvv_vs1_reg(), 0); \
1198 for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
1199 RVV_VI_LOOP_MASK_SKIP(); \
1200 float##width##_t vs2 = Rvvelt<float##width##_t>(rvv_vs2_reg(), i); \
1203#define VI_VFP_LOOP_REDUCTION_END(x) \
1205 set_rvv_vstart(0); \
1206 if (rvv_vl() > 0) { \
1207 Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true) = vd_0; \
1210#define RVV_VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \
1211 if (rvv_vsew() == E16) { \
1213 } else if (rvv_vsew() == E32) { \
1214 VI_VFP_LOOP_REDUCTION_BASE(32) \
1216 VI_VFP_LOOP_REDUCTION_END(32) \
1217 } else if (rvv_vsew() == E64) { \
1218 VI_VFP_LOOP_REDUCTION_BASE(64) \
1220 VI_VFP_LOOP_REDUCTION_END(64) \
1225#define RVV_VI_ULOOP_REDUCTION_BASE(x) \
1226 auto& vd_0_des = Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), 0, true); \
1227 auto vd_0_res = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), 0); \
1228 for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
1229 RVV_VI_LOOP_MASK_SKIP(); \
1230 auto vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
1232#define REDUCTION_ULOOP(x, BODY) \
1233 RVV_VI_ULOOP_REDUCTION_BASE(x) \
1235 RVV_VI_LOOP_REDUCTION_END(x)
1237#define RVV_VI_VV_ULOOP_REDUCTION(BODY) \
1238 if (rvv_vsew() == E8) { \
1239 REDUCTION_ULOOP(8, BODY) \
1240 } else if (rvv_vsew() == E16) { \
1241 REDUCTION_ULOOP(16, BODY) \
1242 } else if (rvv_vsew() == E32) { \
1243 REDUCTION_ULOOP(32, BODY) \
1244 } else if (rvv_vsew() == E64) { \
1245 REDUCTION_ULOOP(64, BODY) \
1249#define VI_STRIP(inx) reg_t vreg_inx = inx;
1251#define VI_ELEMENT_SKIP(inx) \
1254 } else if (inx < rvv_vstart()) { \
1257 RVV_VI_LOOP_MASK_SKIP(); \
1262 if (instr_.RvvVM() == 0) CHECK_NE(rvv_vd_reg(), 0); \
1265#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
1266 reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8;
1273#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
1274 VI_CHECK_STORE(elt_width, is_mask_ldst); \
1278#define RVV_VI_LD(stride, offset, elt_width, is_mask_ldst) \
1279 const reg_t nf = rvv_nf() + 1; \
1280 const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
1281 const int64_t baseAddr = rs1(); \
1282 for (reg_t i = 0; i < vl; ++i) { \
1283 VI_ELEMENT_SKIP(i); \
1285 set_rvv_vstart(i); \
1286 for (reg_t fn = 0; fn < nf; ++fn) { \
1287 auto addr = baseAddr + (stride) + (offset) * sizeof(elt_width##_t); \
1288 if (!ProbeMemory(addr, sizeof(elt_width##_t))) { \
1289 set_rvv_vstart(0); \
1292 auto val = ReadMem<elt_width##_t>(addr, instr_.instr()); \
1293 type_sew_t<sizeof(elt_width##_t) * 8>::type& vd = \
1294 Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>(rvv_vd_reg(), \
1299 set_rvv_vstart(0); \
1300 if (v8_flags.trace_sim) { \
1301 __int128_t value = Vregister_[rvv_vd_reg()]; \
1302 SNPrintF(trace_buf_, \
1303 "%016" PRIx64 "%016" PRIx64 " (%" PRId64 ") vlen:%" PRId64 \
1304 " <-- [addr: %" REGIx_FORMAT "]", \
1305 *(reinterpret_cast<int64_t*>(&value) + 1), \
1306 *reinterpret_cast<int64_t*>(&value), icount_, rvv_vlen(), \
1307 (sreg_t)(get_register(rs1_reg()))); \
1310#define RVV_VI_ST(stride, offset, elt_width, is_mask_ldst) \
1311 const reg_t nf = rvv_nf() + 1; \
1312 const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
1313 const int64_t baseAddr = rs1(); \
1314 for (reg_t i = 0; i < vl; ++i) { \
1316 VI_ELEMENT_SKIP(i); \
1317 set_rvv_vstart(i); \
1318 for (reg_t fn = 0; fn < nf; ++fn) { \
1319 auto addr = baseAddr + (stride) + (offset) * sizeof(elt_width##_t); \
1320 if (!ProbeMemory(addr, sizeof(elt_width##_t))) { \
1321 set_rvv_vstart(0); \
1324 elt_width##_t vs1 = Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>( \
1325 rvv_vs3_reg(), vreg_inx); \
1326 WriteMem(addr, vs1, instr_.instr()); \
1329 set_rvv_vstart(0); \
1330 if (v8_flags.trace_sim) { \
1331 __int128_t value = Vregister_[rvv_vd_reg()]; \
1332 SNPrintF(trace_buf_, \
1333 "%016" PRIx64 "%016" PRIx64 " (%" PRId64 ") vlen:%" PRId64 \
1334 " --> [addr: %" REGIx_FORMAT "]", \
1335 *(reinterpret_cast<int64_t*>(&value) + 1), \
1336 *reinterpret_cast<int64_t*>(&value), icount_, rvv_vlen(), \
1337 (sreg_t)(get_register(rs1_reg()))); \
1340#define VI_VFP_LOOP_SCALE_BASE \
1342 for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
1343 RVV_VI_LOOP_MASK_SKIP();
1345#define RVV_VI_VFP_CVT_SCALE(BODY8, BODY16, BODY32, CHECK8, CHECK16, CHECK32, \
1346 is_widen, eew_check) \
1348 RVV_VI_CHECK_DSS(false); \
1350 RVV_VI_CHECK_SDS(false); \
1353 switch (rvv_vsew()) { \
1356 VI_VFP_LOOP_SCALE_BASE \
1358 RVV_VI_VFP_LOOP_END \
1362 VI_VFP_LOOP_SCALE_BASE \
1364 RVV_VI_VFP_LOOP_END \
1368 VI_VFP_LOOP_SCALE_BASE \
1370 RVV_VI_VFP_LOOP_END \
1379static inline uint8_t get_round(
int vxrm, uint64_t v, uint8_t shift) {
1384 if (shift == 0 || shift > 64) {
1392 }
else if (vxrm == 1) {
1395 return d1 & ((D2 != 0) | d);
1399 }
else if (vxrm == 3) {
1400 return !d & (D1 != 0);
1405template <
typename Src,
typename Dst>
1406inline Dst signed_saturation(Src v, uint n) {
1407 Dst smax = (Dst)(INTPTR_MAX >> (
sizeof(intptr_t) * 8 - n));
1408 Dst smin = (Dst)(INTPTR_MIN >> (
sizeof(intptr_t) * 8 - n));
1409 return (v > smax) ? smax : ((v < smin) ? smin : (Dst)v);
1412template <
typename Src,
typename Dst>
1413inline Dst unsigned_saturation(Src v, uint n) {
1414 Dst umax = (Dst)(UINTPTR_MAX >> (
sizeof(uintptr_t) * 8 - n));
1415 return (v > umax) ? umax : ((v < 0) ? 0 : (Dst)v);
1418#define RVV_VN_CLIPU_VI_LOOP() \
1419 RVV_VI_GENERAL_LOOP_BASE \
1420 RVV_VI_LOOP_MASK_SKIP() \
1421 if (rvv_vsew() == E8) { \
1423 vd = unsigned_saturation<uint16_t, uint8_t>( \
1424 (static_cast<uint16_t>(vs2) >> uimm5) + \
1425 get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1427 } else if (rvv_vsew() == E16) { \
1429 vd = unsigned_saturation<uint32_t, uint16_t>( \
1430 (static_cast<uint32_t>(vs2) >> uimm5) + \
1431 get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1433 } else if (rvv_vsew() == E32) { \
1435 vd = unsigned_saturation<uint64_t, uint32_t>( \
1436 (static_cast<uint64_t>(vs2) >> uimm5) + \
1437 get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1439 } else if (rvv_vsew() == E64) { \
1447#define RVV_VN_CLIP_VI_LOOP() \
1448 RVV_VI_GENERAL_LOOP_BASE \
1449 RVV_VI_LOOP_MASK_SKIP() \
1450 if (rvv_vsew() == E8) { \
1452 vd = signed_saturation<int16_t, int8_t>( \
1453 (vs2 >> uimm5) + get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1455 } else if (rvv_vsew() == E16) { \
1457 vd = signed_saturation<int32_t, int16_t>( \
1458 (vs2 >> uimm5) + get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1460 } else if (rvv_vsew() == E32) { \
1462 vd = signed_saturation<int64_t, int32_t>( \
1463 (vs2 >> uimm5) + get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
1465 } else if (rvv_vsew() == E64) { \
1473#define CHECK_EXT(div) \
1474 CHECK_NE(rvv_vd_reg(), rvv_vs2_reg()); \
1475 reg_t from = rvv_vsew() / div; \
1476 CHECK(from >= E8 && from <= E64); \
1477 CHECK_GE((float)rvv_vflmul() / div, 0.125); \
1478 CHECK_LE((float)rvv_vflmul() / div, 8); \
1479 require_align(rvv_vd_reg(), rvv_vflmul()); \
1480 require_align(rvv_vs2_reg(), rvv_vflmul() / div); \
1481 if ((rvv_vflmul() / div) < 1) { \
1482 require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
1483 rvv_vflmul() / div); \
1485 require_noover_widen(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
1486 rvv_vflmul() / div); \
1489#define RVV_VI_VIE_8_LOOP(signed) \
1491 RVV_VI_GENERAL_LOOP_BASE \
1492 RVV_VI_LOOP_MASK_SKIP() \
1493 if (rvv_vsew() == E64) { \
1495 VI_VIE_PARAMS(64, 8); \
1496 vd = static_cast<int64_t>(vs2); \
1498 VI_VIE_UPARAMS(64, 8); \
1499 vd = static_cast<uint64_t>(vs2); \
1507#define RVV_VI_VIE_4_LOOP(signed) \
1509 RVV_VI_GENERAL_LOOP_BASE \
1510 RVV_VI_LOOP_MASK_SKIP() \
1511 if (rvv_vsew() == E32) { \
1513 VI_VIE_PARAMS(32, 4); \
1514 vd = static_cast<int32_t>(vs2); \
1516 VI_VIE_UPARAMS(32, 4); \
1517 vd = static_cast<uint32_t>(vs2); \
1519 } else if (rvv_vsew() == E64) { \
1521 VI_VIE_PARAMS(64, 4); \
1522 vd = static_cast<int64_t>(vs2); \
1524 VI_VIE_UPARAMS(64, 4); \
1525 vd = static_cast<uint64_t>(vs2); \
1533#define RVV_VI_VIE_2_LOOP(signed) \
1535 RVV_VI_GENERAL_LOOP_BASE \
1536 RVV_VI_LOOP_MASK_SKIP() \
1537 if (rvv_vsew() == E16) { \
1539 VI_VIE_PARAMS(16, 2); \
1540 vd = static_cast<int16_t>(vs2); \
1542 VI_VIE_UPARAMS(16, 2); \
1543 vd = static_cast<uint16_t>(vs2); \
1545 } else if (rvv_vsew() == E32) { \
1547 VI_VIE_PARAMS(32, 2); \
1548 vd = static_cast<int32_t>(vs2); \
1550 VI_VIE_UPARAMS(32, 2); \
1551 vd = static_cast<uint32_t>(vs2); \
1553 } else if (rvv_vsew() == E64) { \
1555 VI_VIE_PARAMS(64, 2); \
1556 vd = static_cast<int64_t>(vs2); \
1558 VI_VIE_UPARAMS(64, 2); \
1559 vd = static_cast<uint64_t>(vs2); \
1572 Simulator::GlobalMonitor::Get)
1575inline bool HaveSameSign(int64_t a, int64_t b) {
return ((a ^ b) >= 0); }
1577uint32_t get_fcsr_condition_bit(uint32_t
cc) {
1587static inline int32_t get_ebreak_code(Instruction*
instr) {
1589 uint8_t* cur =
reinterpret_cast<uint8_t*
>(
instr);
1590 Instruction* next_instr =
reinterpret_cast<Instruction*
>(cur +
kInstrSize);
1591 if (next_instr->BaseOpcodeFieldRaw() ==
LUI)
1592 return (next_instr->Imm20UValue());
1601#define SScanF sscanf
1605class RiscvDebugger {
1607 explicit RiscvDebugger(Simulator* sim) : sim_(sim) {}
1611 void PrintRegs(
char name_prefix,
int start_index,
int end_index);
1612 void PrintAllRegs();
1613 void PrintAllRegsIncludingFPU();
1615 static const Instr kNopInstr = 0x0;
1620 sreg_t GetRegisterValue(
int regnum);
1621 int64_t GetFPURegisterValue(
int regnum);
1622 float GetFPURegisterValueFloat(
int regnum);
1623 double GetFPURegisterValueDouble(
int regnum);
1624#ifdef CAN_USE_RVV_INSTRUCTIONS
1625 __int128_t GetVRegisterValue(
int regnum);
1627 bool GetValue(
const char* desc, sreg_t* value);
1630#define UNSUPPORTED() \
1631 v8::base::EmbeddedVector<char, 256> buffer; \
1632 disasm::NameConverter converter; \
1633 disasm::Disassembler dasm(converter); \
1634 dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(&instr_)); \
1635 printf("Sim: Unsupported inst. Func:%s Line:%d PC:0x%" REGIx_FORMAT, \
1636 __FUNCTION__, __LINE__, get_pc()); \
1637 PrintF(" %-44s\n", buffer.begin()); \
1640sreg_t RiscvDebugger::GetRegisterValue(
int regnum) {
1642 return sim_->get_pc();
1644 return sim_->get_register(regnum);
1648int64_t RiscvDebugger::GetFPURegisterValue(
int regnum) {
1650 return sim_->get_pc();
1652 return sim_->get_fpu_register(regnum);
1656float RiscvDebugger::GetFPURegisterValueFloat(
int regnum) {
1658 return sim_->get_pc();
1660 return sim_->get_fpu_register_float(regnum);
1664double RiscvDebugger::GetFPURegisterValueDouble(
int regnum) {
1666 return sim_->get_pc();
1668 return sim_->get_fpu_register_double(regnum);
1672#ifdef CAN_USE_RVV_INSTRUCTIONS
1673__int128_t RiscvDebugger::GetVRegisterValue(
int regnum) {
1675 return sim_->get_pc();
1677 return sim_->get_vregister(regnum);
1682bool RiscvDebugger::GetValue(
const char* desc, sreg_t* value) {
1687 *value = GetRegisterValue(regnum);
1690 *value = GetFPURegisterValue(fpuregnum);
1692 }
else if (strncmp(desc,
"0x", 2) == 0) {
1693#if V8_TARGET_ARCH_RISCV64
1694 return SScanF(desc + 2,
"%" SCNx64,
reinterpret_cast<reg_t*
>(value)) == 1;
1695#elif V8_TARGET_ARCH_RISCV32
1696 return SScanF(desc + 2,
"%" SCNx32,
reinterpret_cast<reg_t*
>(value)) == 1;
1699#if V8_TARGET_ARCH_RISCV64
1700 return SScanF(desc,
"%" SCNu64,
reinterpret_cast<reg_t*
>(value)) == 1;
1701#elif V8_TARGET_ARCH_RISCV32
1702 return SScanF(desc,
"%" SCNu32,
reinterpret_cast<reg_t*
>(value)) == 1;
1707#define REG_INFO(name) \
1708 name, GetRegisterValue(Registers::Number(name)), \
1709 GetRegisterValue(Registers::Number(name))
1711void RiscvDebugger::PrintRegs(
char name_prefix,
int start_index,
1713 base::EmbeddedVector<char, 10> name1, name2;
1714 DCHECK(name_prefix ==
'a' || name_prefix ==
't' || name_prefix ==
's');
1715 DCHECK(start_index >= 0 && end_index <= 99);
1716 int num_registers = (end_index - start_index) + 1;
1717 for (
int i = 0;
i < num_registers / 2;
i++) {
1718 SNPrintF(name1,
"%c%d", name_prefix, start_index + 2 *
i);
1719 SNPrintF(name2,
"%c%d", name_prefix, start_index + 2 *
i + 1);
1720 PrintF(
"%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
1721 " \t%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
" \n",
1722 REG_INFO(name1.begin()), REG_INFO(name2.begin()));
1724 if (num_registers % 2 == 1) {
1725 SNPrintF(name1,
"%c%d", name_prefix, end_index);
1726 PrintF(
"%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
" \n",
1727 REG_INFO(name1.begin()));
1731void RiscvDebugger::PrintAllRegs() {
1734 PrintF(
"%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
1735 "\t%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
1736 "\t%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
"\n",
1737 REG_INFO(
"ra"), REG_INFO(
"sp"), REG_INFO(
"gp"));
1740 PrintF(
"%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
1741 "\t%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
1742 "\t%3s: 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
"\n",
1743 REG_INFO(
"tp"), REG_INFO(
"fp"), REG_INFO(
"pc"));
1746 PrintRegs(
'a', 0, 7);
1748 PrintRegs(
's', 1, 11);
1750 PrintRegs(
't', 0, 6);
1755void RiscvDebugger::PrintAllRegsIncludingFPU() {
1756#define FPU_REG_INFO(n) \
1757 FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
1765 PrintF(
"%3s: 0x%016" PRIx64
" %16.4e \t%3s: 0x%016" PRIx64
" %16.4e\n",
1766 FPU_REG_INFO(
i), FPU_REG_INFO(
i + 1));
1770void RiscvDebugger::Debug() {
1771 intptr_t last_pc = -1;
1774#define COMMAND_SIZE 63
1778#define XSTR(a) STR(a)
1780 char cmd[COMMAND_SIZE + 1];
1781 char arg1[ARG_SIZE + 1];
1782 char arg2[ARG_SIZE + 1];
1783 char* argv[3] = {cmd, arg1, arg2};
1786 cmd[COMMAND_SIZE] = 0;
1790 while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
1791 if (last_pc != sim_->get_pc()) {
1796 const char* name = sim_->builtins_.Lookup((
Address)sim_->get_pc());
1797 if (name !=
nullptr) {
1798 PrintF(
"Call builtin: %s\n", name);
1800 dasm.InstructionDecode(buffer,
1801 reinterpret_cast<uint8_t*
>(sim_->get_pc()));
1802 PrintF(
" 0x%016" REGIx_FORMAT
" %s\n", sim_->get_pc(), buffer.
begin());
1803 last_pc = sim_->get_pc();
1806 if (line ==
nullptr) {
1809 char* last_input = sim_->last_debugger_input();
1810 if (strcmp(line,
"\n") == 0 && last_input !=
nullptr) {
1814 sim_->set_last_debugger_input(line);
1820 "%" XSTR(COMMAND_SIZE)
"s "
1821 "%" XSTR(ARG_SIZE)
"s "
1822 "%" XSTR(ARG_SIZE)
"s",
1824 if ((strcmp(cmd,
"si") == 0) || (strcmp(cmd,
"stepi") == 0)) {
1825 Instruction*
instr =
reinterpret_cast<Instruction*
>(sim_->get_pc());
1829 sim_->InstructionDecode(
1830 reinterpret_cast<Instruction*
>(sim_->get_pc()));
1833 PrintF(
"/!\\ Jumping over generated breakpoint.\n");
1836 }
else if ((strcmp(cmd,
"c") == 0) || (strcmp(cmd,
"cont") == 0)) {
1838 sim_->InstructionDecode(
reinterpret_cast<Instruction*
>(sim_->get_pc()));
1841 }
else if ((strcmp(cmd,
"p") == 0) || (strcmp(cmd,
"print") == 0)) {
1846 if (strcmp(arg1,
"all") == 0) {
1848 }
else if (strcmp(arg1,
"allf") == 0) {
1849 PrintAllRegsIncludingFPU();
1853#ifdef CAN_USE_RVV_INSTRUCTIONS
1857 value = GetRegisterValue(regnum);
1858 PrintF(
"%s: 0x%08" REGIx_FORMAT
" %" REGId_FORMAT
" \n", arg1,
1861 fvalue = GetFPURegisterValue(fpuregnum);
1862 dvalue = GetFPURegisterValueDouble(fpuregnum);
1863 PrintF(
"%3s: 0x%016" PRIx64
" %16.4e\n",
1865#ifdef CAN_USE_RVV_INSTRUCTIONS
1867 __int128_t v = GetVRegisterValue(vregnum);
1868 PrintF(
"\t%s:0x%016" PRIx64
"%016" PRIx64
"\n",
1873 PrintF(
"%s unrecognized\n", arg1);
1878 if (strcmp(arg2,
"single") == 0) {
1884 value = GetFPURegisterValue(fpuregnum);
1885 value &= 0xFFFFFFFFUL;
1886 fvalue = GetFPURegisterValueFloat(fpuregnum);
1887 PrintF(
"%s: 0x%08" PRIx64
" %11.4e\n", arg1, value, fvalue);
1889 PrintF(
"%s unrecognized\n", arg1);
1892 PrintF(
"print <fpu register> single\n");
1895 PrintF(
"print <register> or print <fpu register> single\n");
1898 }
else if ((strcmp(cmd,
"po") == 0) ||
1899 (strcmp(cmd,
"printobject") == 0)) {
1903 if (GetValue(arg1, &value)) {
1905 os << arg1 <<
": \n";
1910 os << Brief(obj) <<
"\n";
1913 os << arg1 <<
" unrecognized\n";
1916 PrintF(
"printobject <value>\n");
1918 }
else if (strcmp(cmd,
"stack") == 0 || strcmp(cmd,
"mem") == 0) {
1919 sreg_t* cur =
nullptr;
1920 sreg_t*
end =
nullptr;
1923 if (strcmp(cmd,
"stack") == 0) {
1924 cur =
reinterpret_cast<sreg_t*
>(sim_->get_register(Simulator::sp));
1927 PrintF(
"Need to specify <address> to mem command\n");
1931 if (!GetValue(arg1, &value)) {
1932 PrintF(
"%s unrecognized\n", arg1);
1935 cur =
reinterpret_cast<sreg_t*
>(
value);
1940 if (argc == next_arg) {
1943 if (!GetValue(argv[next_arg], &words)) {
1950 PrintF(
" 0x%012" PRIxPTR
" : 0x%016" REGIx_FORMAT
1951 " %14" REGId_FORMAT
" ",
1952 reinterpret_cast<intptr_t
>(cur), *cur, *cur);
1966 }
else if (strcmp(cmd,
"memhex") == 0) {
1967 sreg_t* cur =
nullptr;
1968 sreg_t*
end =
nullptr;
1971 PrintF(
"Need to specify <address> to memhex command\n");
1975 if (!GetValue(arg1, &value)) {
1976 PrintF(
"%s unrecognized\n", arg1);
1979 cur =
reinterpret_cast<sreg_t*
>(
value);
1983 if (argc == next_arg) {
1986 if (!GetValue(argv[next_arg], &words)) {
1993 PrintF(
" 0x%012" PRIxPTR
" : 0x%016" REGIx_FORMAT
1994 " %14" REGId_FORMAT
" ",
1995 reinterpret_cast<intptr_t
>(cur), *cur, *cur);
1999 }
else if ((strcmp(cmd,
"watch") == 0)) {
2001 PrintF(
"Need to specify <address> to mem command\n");
2005 if (!GetValue(arg1, &value)) {
2006 PrintF(
"%s unrecognized\n", arg1);
2009 sim_->watch_address_ =
reinterpret_cast<sreg_t*
>(
value);
2010 sim_->watch_value_ = *(sim_->watch_address_);
2011 }
else if ((strcmp(cmd,
"disasm") == 0) || (strcmp(cmd,
"dpc") == 0) ||
2012 (strcmp(cmd,
"di") == 0)) {
2018 uint8_t* cur =
nullptr;
2019 uint8_t*
end =
nullptr;
2022 cur =
reinterpret_cast<uint8_t*
>(sim_->get_pc());
2024 }
else if (argc == 2) {
2029 if (GetValue(arg1, &value)) {
2030 cur =
reinterpret_cast<uint8_t*
>(
value);
2037 if (GetValue(arg1, &value)) {
2038 cur =
reinterpret_cast<uint8_t*
>(sim_->get_pc());
2046 if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
2047 cur =
reinterpret_cast<uint8_t*
>(value1);
2053 dasm.InstructionDecode(buffer, cur);
2054 PrintF(
" 0x%08" PRIxPTR
" %s\n",
reinterpret_cast<intptr_t
>(cur),
2058 }
else if (strcmp(cmd,
"gdb") == 0) {
2059 PrintF(
"relinquishing control to gdb\n");
2061 PrintF(
"regaining control from gdb\n");
2062 }
else if (strcmp(cmd,
"trace") == 0) {
2063 PrintF(
"enable trace sim\n");
2065 }
else if (strcmp(cmd,
"break") == 0 || strcmp(cmd,
"b") == 0 ||
2066 strcmp(cmd,
"tbreak") == 0) {
2067 bool is_tbreak = strcmp(cmd,
"tbreak") == 0;
2070 if (GetValue(arg1, &value)) {
2071 sim_->SetBreakpoint(
reinterpret_cast<Instruction*
>(value),
2074 PrintF(
"%s unrecognized\n", arg1);
2077 sim_->ListBreakpoints();
2078 PrintF(
"Use `break <address>` to set or disable a breakpoint\n");
2080 "Use `tbreak <address>` to set or disable a temporary "
2083 }
else if (strcmp(cmd,
"flags") == 0) {
2084 PrintF(
"No flags on RISC-V !\n");
2085 }
else if (strcmp(cmd,
"stop") == 0) {
2089 if (strcmp(arg1,
"info") == 0) {
2090 if (strcmp(arg2,
"all") == 0) {
2091 PrintF(
"Stop information:\n");
2094 sim_->PrintStopInfo(
i);
2096 }
else if (GetValue(arg2, &value)) {
2097 sim_->PrintStopInfo(value);
2099 PrintF(
"Unrecognized argument.\n");
2101 }
else if (strcmp(arg1,
"enable") == 0) {
2103 if (strcmp(arg2,
"all") == 0) {
2106 sim_->EnableStop(
i);
2108 }
else if (GetValue(arg2, &value)) {
2109 sim_->EnableStop(value);
2111 PrintF(
"Unrecognized argument.\n");
2113 }
else if (strcmp(arg1,
"disable") == 0) {
2115 if (strcmp(arg2,
"all") == 0) {
2118 sim_->DisableStop(
i);
2120 }
else if (GetValue(arg2, &value)) {
2121 sim_->DisableStop(value);
2123 PrintF(
"Unrecognized argument.\n");
2127 PrintF(
"Wrong usage. Use help command for more information.\n");
2129 }
else if ((strcmp(cmd,
"stat") == 0) || (strcmp(cmd,
"st") == 0)) {
2139 uint8_t* cur =
nullptr;
2140 uint8_t*
end =
nullptr;
2143 cur =
reinterpret_cast<uint8_t*
>(sim_->get_pc());
2145 }
else if (argc == 2) {
2147 if (GetValue(arg1, &value)) {
2148 cur =
reinterpret_cast<uint8_t*
>(
value);
2155 if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
2156 cur =
reinterpret_cast<uint8_t*
>(value1);
2162 dasm.InstructionDecode(buffer, cur);
2163 PrintF(
" 0x%08" PRIxPTR
" %s\n",
reinterpret_cast<intptr_t
>(cur),
2167 }
else if ((strcmp(cmd,
"h") == 0) || (strcmp(cmd,
"help") == 0)) {
2168 PrintF(
"cont (alias 'c')\n");
2169 PrintF(
" Continue execution\n");
2170 PrintF(
"stepi (alias 'si')\n");
2171 PrintF(
" Step one instruction\n");
2172 PrintF(
"print (alias 'p')\n");
2173 PrintF(
" print <register>\n");
2174 PrintF(
" Print register content\n");
2175 PrintF(
" Use register name 'all' to print all GPRs\n");
2176 PrintF(
" Use register name 'allf' to print all GPRs and FPRs\n");
2177 PrintF(
"printobject (alias 'po')\n");
2178 PrintF(
" printobject <register>\n");
2179 PrintF(
" Print an object from a register\n");
2181 PrintF(
" stack [<words>]\n");
2182 PrintF(
" Dump stack content, default dump 10 words)\n");
2184 PrintF(
" mem <address> [<words>]\n");
2185 PrintF(
" Dump memory content, default dump 10 words)\n");
2187 PrintF(
" watch <address> \n");
2188 PrintF(
" watch memory content.)\n");
2190 PrintF(
" print flags\n");
2191 PrintF(
"disasm (alias 'di')\n");
2192 PrintF(
" disasm [<instructions>]\n");
2193 PrintF(
" disasm [<address/register>] (e.g., disasm pc) \n");
2194 PrintF(
" disasm [[<address/register>] <instructions>]\n");
2195 PrintF(
" Disassemble code, default is 10 instructions\n");
2198 PrintF(
" Return to gdb if the simulator was started with gdb\n");
2199 PrintF(
"break (alias 'b')\n");
2200 PrintF(
" break : list all breakpoints\n");
2201 PrintF(
" break <address> : set / enable / disable a breakpoint.\n");
2203 PrintF(
" tbreak : list all breakpoints\n");
2205 " tbreak <address> : set / enable / disable a temporary "
2207 PrintF(
" Set a breakpoint enabled only for one stop. \n");
2208 PrintF(
"stop feature:\n");
2209 PrintF(
" Description:\n");
2210 PrintF(
" Stops are debug instructions inserted by\n");
2211 PrintF(
" the Assembler::stop() function.\n");
2212 PrintF(
" When hitting a stop, the Simulator will\n");
2213 PrintF(
" stop and give control to the Debugger.\n");
2214 PrintF(
" All stop codes are watched:\n");
2215 PrintF(
" - They can be enabled / disabled: the Simulator\n");
2216 PrintF(
" will / won't stop when hitting them.\n");
2217 PrintF(
" - The Simulator keeps track of how many times they \n");
2218 PrintF(
" are met. (See the info command.) Going over a\n");
2219 PrintF(
" disabled stop still increases its counter. \n");
2221 PrintF(
" stop info all/<code> : print infos about number <code>\n");
2222 PrintF(
" or all stop(s).\n");
2223 PrintF(
" stop enable/disable all/<code> : enables / disables\n");
2224 PrintF(
" all or number <code> stop(s)\n");
2226 PrintF(
"Unknown command: %s\n", cmd);
2238void Simulator::SetBreakpoint(Instruction* location,
bool is_tbreak) {
2239 for (
unsigned i = 0;
i < breakpoints_.
size();
i++) {
2240 if (breakpoints_.at(
i).location == location) {
2241 if (breakpoints_.at(
i).is_tbreak != is_tbreak) {
2242 PrintF(
"Change breakpoint at %p to %s breakpoint\n",
2243 reinterpret_cast<void*
>(location),
2244 is_tbreak ?
"temporary" :
"regular");
2245 breakpoints_.at(
i).is_tbreak = is_tbreak;
2248 PrintF(
"Existing breakpoint at %p was %s\n",
2249 reinterpret_cast<void*
>(location),
2250 breakpoints_.at(
i).enabled ?
"disabled" :
"enabled");
2251 breakpoints_.at(
i).enabled = !breakpoints_.at(
i).enabled;
2255 Breakpoint new_breakpoint = {location,
true, is_tbreak};
2256 breakpoints_.push_back(new_breakpoint);
2257 PrintF(
"Set a %sbreakpoint at %p\n", is_tbreak ?
"temporary " :
"",
2258 reinterpret_cast<void*
>(location));
2261void Simulator::ListBreakpoints() {
2262 PrintF(
"Breakpoints:\n");
2263 for (
unsigned i = 0;
i < breakpoints_.
size();
i++) {
2265 reinterpret_cast<void*
>(breakpoints_.at(
i).location),
2266 breakpoints_.at(
i).enabled ?
"enabled" :
"disabled",
2267 breakpoints_.at(
i).is_tbreak ?
": temporary" :
"");
2271void Simulator::CheckBreakpoints() {
2272 bool hit_a_breakpoint =
false;
2273 bool is_tbreak =
false;
2274 Instruction*
pc_ =
reinterpret_cast<Instruction*
>(get_pc());
2275 for (
unsigned i = 0;
i < breakpoints_.
size();
i++) {
2276 if ((breakpoints_.at(
i).location ==
pc_) && breakpoints_.at(
i).enabled) {
2277 hit_a_breakpoint =
true;
2278 if (breakpoints_.at(
i).is_tbreak) {
2281 breakpoints_.at(
i).enabled =
false;
2286 if (hit_a_breakpoint) {
2287 PrintF(
"Hit %sa breakpoint at %p.\n", is_tbreak ?
"and disabled " :
"",
2288 reinterpret_cast<void*
>(
pc_));
2289 RiscvDebugger dbg(
this);
2294bool Simulator::ICacheMatch(
void*
one,
void* two) {
2295 DCHECK_EQ(
reinterpret_cast<intptr_t
>(
one) & CachePage::kPageMask, 0);
2296 DCHECK_EQ(
reinterpret_cast<intptr_t
>(two) & CachePage::kPageMask, 0);
2300static uint32_t ICacheHash(
void*
key) {
2301 return static_cast<uint32_t
>(
reinterpret_cast<uintptr_t
>(
key)) >> 2;
2304static bool AllOnOnePage(uintptr_t
start,
size_t size) {
2305 intptr_t start_page = (
start & ~CachePage::kPageMask);
2306 intptr_t end_page = ((
start +
size) & ~CachePage::kPageMask);
2307 return start_page == end_page;
2310void Simulator::set_last_debugger_input(
char* input) {
2312 last_debugger_input_ = input;
2315void Simulator::SetRedirectInstruction(Instruction* instruction) {
2320 void* start_addr,
size_t size) {
2321 int64_t
start =
reinterpret_cast<int64_t
>(start_addr);
2322 int64_t intra_line = (
start & CachePage::kLineMask);
2323 start -= intra_line;
2325 size = ((size - 1) | CachePage::kLineMask) + 1;
2327 while (!AllOnOnePage(
start, size - 1)) {
2328 int bytes_to_flush = CachePage::kPageSize -
offset;
2329 FlushOnePage(i_cache,
start, bytes_to_flush);
2330 start += bytes_to_flush;
2331 size -= bytes_to_flush;
2336 FlushOnePage(i_cache,
start, size);
2343 if (entry->value ==
nullptr) {
2344 CachePage* new_page =
new CachePage();
2345 entry->value = new_page;
2347 return reinterpret_cast<CachePage*
>(entry->value);
2352 intptr_t
start,
size_t size) {
2356 DCHECK_EQ(size & CachePage::kLineMask, 0);
2357 void* page =
reinterpret_cast<void*
>(
start & (~CachePage::kPageMask));
2359 CachePage* cache_page = GetCachePage(i_cache, page);
2360 char* valid_bytemap = cache_page->ValidityByte(
offset);
2361 memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
2365 Instruction*
instr) {
2366 sreg_t address =
reinterpret_cast<sreg_t
>(
instr);
2367 void* page =
reinterpret_cast<void*
>(address & (~CachePage::kPageMask));
2368 void* line =
reinterpret_cast<void*
>(address & (~CachePage::kLineMask));
2369 int offset = (address & CachePage::kPageMask);
2370 CachePage* cache_page = GetCachePage(i_cache, page);
2371 char* cache_valid_byte = cache_page->ValidityByte(
offset);
2372 bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
2373 char* cached_line = cache_page->CachedData(
offset & ~CachePage::kLineMask);
2380 memcpy(cached_line, line, CachePage::kLineLength);
2381 *cache_valid_byte = CachePage::LINE_VALID;
2385Simulator::Simulator(Isolate* isolate) :
isolate_(isolate), builtins_(isolate) {
2389 size_t stack_size = AllocatedStackSize();
2391 stack_ =
reinterpret_cast<uintptr_t
>(
new uint8_t[stack_size]());
2394 pc_modified_ =
false;
2398 breakpoints_.clear();
2409 FPUregisters_[
i] = 0;
2423 last_debugger_input_ =
nullptr;
2424#ifdef CAN_USE_RVV_INSTRUCTIONS
2439Simulator::~Simulator() {
2440 GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
2441 delete[]
reinterpret_cast<uint8_t*
>(stack_);
2445Simulator* Simulator::current(Isolate* isolate) {
2447 isolate->FindOrAllocatePerThreadDataForThisThread();
2450 Simulator* sim = isolate_data->simulator();
2451 if (sim ==
nullptr) {
2453 sim =
new Simulator(isolate);
2454 isolate_data->set_simulator(sim);
2461void Simulator::set_register(
int reg, sreg_t value) {
2464 pc_modified_ =
true;
2471void Simulator::set_fpu_register(
int fpureg, int64_t value) {
2472 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2473 FPUregisters_[fpureg] =
value;
2476void Simulator::set_fpu_register_word(
int fpureg, int32_t value) {
2478 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2481 pword =
reinterpret_cast<int32_t*
>(&FPUregisters_[fpureg]);
2483 pword =
reinterpret_cast<int32_t*
>(&FPUregisters_[fpureg]) + 1;
2488void Simulator::set_fpu_register_hi_word(
int fpureg, int32_t value) {
2490 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2493 phiword = (
reinterpret_cast<int32_t*
>(&FPUregisters_[fpureg])) + 1;
2495 phiword =
reinterpret_cast<int32_t*
>(&FPUregisters_[fpureg]);
2500void Simulator::set_fpu_register_float(
int fpureg,
float value) {
2501 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2502 FPUregisters_[fpureg] = box_float(value);
2505void Simulator::set_fpu_register_float(
int fpureg, Float32 value) {
2506 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2507 Float64 t = Float64::FromBits(box_float(value.get_bits()));
2508 memcpy(&FPUregisters_[fpureg], &t, 8);
2511void Simulator::set_fpu_register_double(
int fpureg,
double value) {
2512 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2513 FPUregisters_[fpureg] = base::bit_cast<int64_t>(value);
2516void Simulator::set_fpu_register_double(
int fpureg, Float64 value) {
2517 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2518 memcpy(&FPUregisters_[fpureg], &value, 8);
2522sreg_t Simulator::get_register(
int reg)
const {
2530double Simulator::get_double_from_register_pair(
int reg) {
2534 double dm_val = 0.0;
2539 memcpy(&dm_val, buffer,
sizeof(
registers_[0]));
2543int64_t Simulator::get_fpu_register(
int fpureg)
const {
2544 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2545 return FPUregisters_[fpureg];
2548int32_t Simulator::get_fpu_register_word(
int fpureg)
const {
2549 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2550 return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
2553int32_t Simulator::get_fpu_register_signed_word(
int fpureg)
const {
2554 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2555 return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
2558int32_t Simulator::get_fpu_register_hi_word(
int fpureg)
const {
2559 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2560 return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF);
2563float Simulator::get_fpu_register_float(
int fpureg)
const {
2564 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2565 if (!is_boxed_float(FPUregisters_[fpureg])) {
2566 return std::numeric_limits<float>::quiet_NaN();
2568 return Float32::FromBits(FPUregisters_[fpureg] & 0xFFFF'FFFF).get_scalar();
2573Float32 Simulator::get_fpu_register_Float32(
int fpureg,
2574 bool check_nanbox)
const {
2575 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2576 if (check_nanbox && !is_boxed_float(FPUregisters_[fpureg])) {
2577 std::cout << std::hex << FPUregisters_[fpureg] << std::endl;
2578 return Float32::FromBits(0x7fc00000);
2580 return Float32::FromBits(FPUregisters_[fpureg] & 0xFFFF'FFFF);
2583double Simulator::get_fpu_register_double(
int fpureg)
const {
2584 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2585 return base::bit_cast<double>(FPUregisters_[fpureg]);
2588Float64 Simulator::get_fpu_register_Float64(
int fpureg)
const {
2589 DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
2590 return Float64::FromBits(FPUregisters_[fpureg]);
2593#ifdef CAN_USE_RVV_INSTRUCTIONS
2594__int128_t Simulator::get_vregister(
int vreg)
const {
2595 DCHECK((vreg >= 0) && (vreg < kNumVRegisters));
2596 return Vregister_[vreg];
2603void Simulator::GetFpArgs(
double*
x,
double*
y, int32_t*
z) {
2604 *
x = get_fpu_register_double(fa0);
2605 *
y = get_fpu_register_double(fa1);
2606 *
z =
static_cast<int32_t>(get_register(a0));
2610void Simulator::SetFpResult(
const double&
result) {
2611 set_fpu_register_double(fa0,
result);
2615uint32_t Simulator::read_csr_value(uint32_t csr) {
2618 return (FCSR_ & kFcsrFlagsMask);
2622 return (FCSR_ & kFcsrMask);
2628uint32_t Simulator::get_dynamic_rounding_mode() {
2629 return read_csr_value(csr_frm);
2632void Simulator::write_csr_value(uint32_t csr, reg_t val) {
2633 uint32_t value = (uint32_t)val;
2636 DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
2637 FCSR_ = (FCSR_ & (~kFcsrFlagsMask)) | value;
2640 DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
2641 FCSR_ = (FCSR_ & (~kFcsrFrmMask)) | (value << kFcsrFrmShift);
2644 DCHECK(value <= ((1 << kFcsrBits) - 1));
2645 FCSR_ = (FCSR_ & (~kFcsrMask)) | value;
2652void Simulator::set_csr_bits(uint32_t csr, reg_t val) {
2653 uint32_t value = (uint32_t)val;
2656 DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
2657 FCSR_ = FCSR_ |
value;
2660 DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
2664 DCHECK(value <= ((1 << kFcsrBits) - 1));
2665 FCSR_ = FCSR_ |
value;
2672void Simulator::clear_csr_bits(uint32_t csr, reg_t val) {
2673 uint32_t value = (uint32_t)val;
2676 DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
2677 FCSR_ = FCSR_ & (~value);
2680 DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
2684 DCHECK(value <= ((1 << kFcsrBits) - 1));
2685 FCSR_ = FCSR_ & (~value);
2692bool Simulator::test_fflags_bits(uint32_t
mask) {
2693 return (FCSR_ & kFcsrFlagsMask &
mask) != 0;
2696template <
typename T>
2697T Simulator::FMaxMinHelper(T a, T b, MaxMinKind
kind) {
2699 if ((a == std::numeric_limits<T>::signaling_NaN()) ||
2700 (b == std::numeric_limits<T>::signaling_NaN())) {
2701 set_csr_bits(csr_fflags, kInvalidOperation);
2705 if (std::isnan(a) && std::isnan(b)) {
2706 result = std::numeric_limits<float>::quiet_NaN();
2707 }
else if (std::isnan(a)) {
2709 }
else if (std::isnan(b)) {
2711 }
else if (b == a) {
2712 if (
kind == MaxMinKind::kMax) {
2713 result = std::signbit(b) ?
a : b;
2715 result = std::signbit(b) ? b :
a;
2718 result = (
kind == MaxMinKind::kMax) ? fmax(a, b) : fmin(a, b);
2725void Simulator::set_pc(sreg_t value) {
2726 pc_modified_ =
true;
2728 DCHECK(has_bad_pc() || ((value % kInstrSize) == 0) ||
2729 ((value % kShortInstrSize) == 0));
2732bool Simulator::has_bad_pc()
const {
2737sreg_t Simulator::get_pc()
const {
return registers_[
pc]; }
2744void Simulator::DieOrDebug() {
2745 if (
v8_flags.riscv_trap_to_simulator_debugger) {
2746 RiscvDebugger dbg(
this);
2753#if V8_TARGET_ARCH_RISCV64
2754void Simulator::TraceRegWr(int64_t value, TraceType t) {
2762 v.fmt_int64 =
value;
2767 "%016" REGIx_FORMAT
" (%" PRId64
") int32:%" PRId32
2769 v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
2773 "%016" REGIx_FORMAT
" (%" PRId64
") int64:%" REGId_FORMAT
2775 value, icount_, value, value);
2778 SNPrintF(trace_buf_,
"%016" REGIx_FORMAT
" (%" PRId64
") flt:%e",
2779 v.fmt_int64, icount_, v.fmt_float[0]);
2782 SNPrintF(trace_buf_,
"%016" REGIx_FORMAT
" (%" PRId64
") dbl:%e",
2783 v.fmt_int64, icount_, v.fmt_double);
2791#elif V8_TARGET_ARCH_RISCV32
2792template <
typename T>
2793void Simulator::TraceRegWr(T value, TraceType t) {
2801 v.fmt_int32 =
value;
2804 v.fmt_double =
value;
2809 "%016" REGIx_FORMAT
" (%" PRId64
") int32:%" REGId_FORMAT
2811 v.fmt_int32, icount_, v.fmt_int32, v.fmt_int32);
2814 SNPrintF(trace_buf_,
"%016" REGIx_FORMAT
" (%" PRId64
") flt:%e",
2815 v.fmt_int32, icount_, v.fmt_float);
2818 SNPrintF(trace_buf_,
"%016" PRIx64
" (%" PRId64
") dbl:%e",
2819 static_cast<int64_t
>(v.fmt_double), icount_, v.fmt_double);
2829template <
typename T>
2830void Simulator::TraceMemRd(sreg_t addr, T value, sreg_t reg_value) {
2832 if (std::is_integral<T>::value) {
2833 switch (
sizeof(T)) {
2836 "%016" REGIx_FORMAT
" (%" PRId64
") int8:%" PRId8
2837 " uint8:%" PRIu8
" <-- [addr: %" REGIx_FORMAT
"]",
2838 reg_value, icount_,
static_cast<int8_t
>(value),
2839 static_cast<uint8_t
>(value), addr);
2843 "%016" REGIx_FORMAT
" (%" PRId64
") int16:%" PRId16
2844 " uint16:%" PRIu16
" <-- [addr: %" REGIx_FORMAT
"]",
2845 reg_value, icount_,
static_cast<int16_t>(value),
2846 static_cast<uint16_t>(value), addr);
2850 "%016" REGIx_FORMAT
" (%" PRId64
") int32:%" PRId32
2851 " uint32:%" PRIu32
" <-- [addr: %" REGIx_FORMAT
"]",
2852 reg_value, icount_,
static_cast<int32_t>(value),
2853 static_cast<uint32_t
>(value), addr);
2857 "%016" REGIx_FORMAT
" (%" PRId64
") int64:%" PRId64
2858 " uint64:%" PRIu64
" <-- [addr: %" REGIx_FORMAT
"]",
2859 reg_value, icount_,
static_cast<int64_t
>(value),
2860 static_cast<uint64_t
>(value), addr);
2865 }
else if (std::is_same<float, T>::value) {
2867 "%016" REGIx_FORMAT
" (%" PRId64
2868 ") flt:%e <-- [addr: %" REGIx_FORMAT
"]",
2869 reg_value, icount_,
static_cast<float>(value), addr);
2870 }
else if (std::is_same<double, T>::value) {
2872 "%016" REGIx_FORMAT
" (%" PRId64
2873 ") dbl:%e <-- [addr: %" REGIx_FORMAT
"]",
2874 reg_value, icount_,
static_cast<double>(value), addr);
2881void Simulator::TraceMemRdFloat(sreg_t addr, Float32 value, int64_t reg_value) {
2884 "%016" PRIx64
" (%" PRId64
2885 ") flt:%e <-- [addr: %" REGIx_FORMAT
"]",
2886 reg_value, icount_,
static_cast<float>(value.get_scalar()), addr);
2890void Simulator::TraceMemRdDouble(sreg_t addr,
double value, int64_t reg_value) {
2893 "%016" PRIx64
" (%" PRId64
2894 ") dbl:%e <-- [addr: %" REGIx_FORMAT
"]",
2895 reg_value, icount_,
static_cast<double>(value), addr);
2899void Simulator::TraceMemRdDouble(sreg_t addr, Float64 value,
2900 int64_t reg_value) {
2903 "%016" PRIx64
" (%" PRId64
2904 ") dbl:%e <-- [addr: %" REGIx_FORMAT
"]",
2905 reg_value, icount_,
static_cast<double>(value.get_scalar()), addr);
2909template <
typename T>
2910void Simulator::TraceMemWr(sreg_t addr, T value) {
2912 switch (
sizeof(T)) {
2915 " (%" PRIu64
") int8:%" PRId8
2916 " uint8:%" PRIu8
" --> [addr: %" REGIx_FORMAT
"]",
2917 icount_,
static_cast<int8_t
>(value),
2918 static_cast<uint8_t
>(value), addr);
2922 " (%" PRIu64
") int16:%" PRId16
2923 " uint16:%" PRIu16
" --> [addr: %" REGIx_FORMAT
"]",
2924 icount_,
static_cast<int16_t>(value),
2925 static_cast<uint16_t>(value), addr);
2928 if (std::is_integral<T>::value) {
2930 " (%" PRIu64
") int32:%" PRId32
2931 " uint32:%" PRIu32
" --> [addr: %" REGIx_FORMAT
"]",
2932 icount_,
static_cast<int32_t>(value),
2933 static_cast<uint32_t
>(value), addr);
2937 ") flt:%e bit:%x --> [addr: %" REGIx_FORMAT
"]",
2938 icount_,
static_cast<float>(value),
2939 base::bit_cast<int32_t, float>(value), addr);
2943 if (std::is_integral<T>::value) {
2945 " (%" PRIu64
") int64:%" PRId64
2946 " uint64:%" PRIu64
" --> [addr: %" REGIx_FORMAT
"]",
2947 icount_,
static_cast<int64_t
>(value),
2948 static_cast<uint64_t
>(value), addr);
2951 " (%" PRIu64
") dbl:%e bit:%" PRIx64
2952 " --> [addr: %" REGIx_FORMAT
"]",
2953 icount_,
static_cast<double>(value),
2954 base::bit_cast<int64_t, double>(value), addr);
2963void Simulator::TraceMemWrDouble(sreg_t addr,
double value) {
2966 " (%" PRIu64
") dbl:%e bit:%" PRIx64
2967 "--> [addr: %" REGIx_FORMAT
"]",
2968 icount_, value, base::bit_cast<int64_t, double>(value), addr);
2973bool Simulator::ProbeMemory(uintptr_t address, uintptr_t access_size) {
2974#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
2975 uintptr_t last_accessed_byte = address + access_size - 1;
2977 uintptr_t landing_pad =
2978 trap_handler::ProbeMemory(last_accessed_byte, current_pc);
2979 if (!landing_pad)
return true;
2980 set_pc(landing_pad);
2991template <
typename T>
2992T Simulator::ReadMem(sreg_t addr, Instruction*
instr) {
2993 if (addr >= 0 && addr < 0x400) {
2995 PrintF(
"Memory read from bad address: 0x%08" REGIx_FORMAT
2996 " , pc=0x%08" PRIxPTR
" \n",
2997 addr,
reinterpret_cast<intptr_t
>(
instr));
3000#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
3002 if (!
v8_flags.riscv_c_extension && ((addr & (
sizeof(T) - 1)) != 0)) {
3003 PrintF(
"Unaligned read at 0x%08" REGIx_FORMAT
" , pc=0x%08" V8PRIxPTR "\n",
3004 addr,
reinterpret_cast<intptr_t
>(
instr));
3008 T* ptr =
reinterpret_cast<T*
>(addr);
3013template <
typename T>
3014void Simulator::WriteMem(sreg_t addr, T value, Instruction*
instr) {
3015 if (addr >= 0 && addr < 0x400) {
3017 PrintF(
"Memory write to bad address: 0x%08" REGIx_FORMAT
3018 " , pc=0x%08" PRIxPTR
" \n",
3019 addr,
reinterpret_cast<intptr_t
>(
instr));
3022#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
3024 if (!
v8_flags.riscv_c_extension && ((addr & (
sizeof(T) - 1)) != 0)) {
3025 PrintF(
"Unaligned write at 0x%08" REGIx_FORMAT
" , pc=0x%08" V8PRIxPTR "\n",
3026 addr,
reinterpret_cast<intptr_t
>(
instr));
3030 T* ptr =
reinterpret_cast<T*
>(addr);
3031 if (!std::is_same<double, T>::value) {
3032 TraceMemWr(addr, value);
3034 TraceMemWrDouble(addr, value);
3040void Simulator::WriteMem(sreg_t addr, Float32 value, Instruction*
instr) {
3041 if (addr >= 0 && addr < 0x400) {
3043 PrintF(
"Memory write to bad address: 0x%08" REGIx_FORMAT
3044 " , pc=0x%08" PRIxPTR
" \n",
3045 addr,
reinterpret_cast<intptr_t
>(
instr));
3048#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
3050 if (!
v8_flags.riscv_c_extension && ((addr & (
sizeof(T) - 1)) != 0)) {
3051 PrintF(
"Unaligned write at 0x%08" REGIx_FORMAT
" , pc=0x%08" V8PRIxPTR "\n",
3052 addr,
reinterpret_cast<intptr_t
>(
instr));
3056 float* ptr =
reinterpret_cast<float*
>(addr);
3057 TraceMemWr(addr, value.get_scalar());
3058 memcpy(ptr, &value, 4);
3062void Simulator::WriteMem(sreg_t addr, Float64 value, Instruction*
instr) {
3063 if (addr >= 0 && addr < 0x400) {
3065 PrintF(
"Memory write to bad address: 0x%08" REGIx_FORMAT
3066 " , pc=0x%08" PRIxPTR
" \n",
3067 addr,
reinterpret_cast<intptr_t
>(
instr));
3070#if !defined(V8_COMPRESS_POINTERS) && defined(RISCV_HAS_NO_UNALIGNED)
3072 if (!
v8_flags.riscv_c_extension && ((addr & (
sizeof(T) - 1)) != 0)) {
3073 PrintF(
"Unaligned write at 0x%08" REGIx_FORMAT
" , pc=0x%08" V8PRIxPTR "\n",
3074 addr,
reinterpret_cast<intptr_t
>(
instr));
3078 double* ptr =
reinterpret_cast<double*
>(addr);
3079 TraceMemWrDouble(addr, value.get_scalar());
3080 memcpy(ptr, &value, 8);
3084uintptr_t Simulator::StackLimit(uintptr_t c_limit)
const {
3088 return reinterpret_cast<uintptr_t
>(get_sp());
3096uintptr_t Simulator::StackBase()
const {
return stack_ + UsableStackSize(); }
3098base::Vector<uint8_t> Simulator::GetCentralStackView()
const {
3102 return base::VectorOf(
3103 reinterpret_cast<uint8_t*
>(stack_ + kStackProtectionSize),
3112 visitor->
VisitPointer(
reinterpret_cast<const void*
>(get_register(
i)));
3114 for (
const void*
const* current =
3115 reinterpret_cast<const void* const*
>(get_sp());
3117 const void* address = *
current;
3118 if (address ==
nullptr) {
3126void Simulator::Format(Instruction*
instr,
const char* format) {
3127 PrintF(
"Simulator found unsupported instruction:\n 0x%08" PRIxPTR
" : %s\n",
3128 reinterpret_cast<intptr_t
>(
instr), format);
3138#if V8_TARGET_ARCH_RISCV64
3140#elif V8_TARGET_ARCH_RISCV32
3141using SimulatorRuntimeCall = int64_t (*)(
3143 sreg_t arg0, sreg_t arg1, sreg_t arg2, sreg_t arg3, sreg_t arg4,
3144 sreg_t arg5, sreg_t arg6, sreg_t arg7, sreg_t arg8, sreg_t arg9,
3145 sreg_t arg10, sreg_t arg11, sreg_t arg12, sreg_t arg13, sreg_t arg14,
3146 sreg_t arg15, sreg_t arg16, sreg_t arg17, sreg_t arg18, sreg_t arg19);
3149using SimulatorRuntimeCompareCall = int64_t (*)(
double darg0,
double darg1);
3150using SimulatorRuntimeFPFPCall = double (*)(
double darg0,
double darg1);
3151using SimulatorRuntimeFPCall = double (*)(
double darg0);
3152using SimulatorRuntimeFPIntCall = double (*)(
double darg0,
int32_t arg0);
3153using SimulatorRuntimeIntFPCall =
int32_t (*)(
double darg0);
3157using SimulatorRuntimeDirectApiCall = void (*)(sreg_t arg0);
3160using SimulatorRuntimeDirectGetterCall = void (*)(sreg_t arg0, sreg_t arg1);
3164using SimulatorRuntimeFPTaggedCall = double (*)(int64_t arg0, int64_t arg1,
3165 int64_t arg2, int64_t arg3);
3167#ifdef V8_TARGET_ARCH_RISCV64
3168using MixedRuntimeCall_0 = AnyCType (*)();
3169#define BRACKETS(ident, N) ident[N]
3170#define REP_0(expr, FMT)
3171#define REP_1(expr, FMT) FMT(expr, 0)
3172#define REP_2(expr, FMT) REP_1(expr, FMT), FMT(expr, 1)
3173#define REP_3(expr, FMT) REP_2(expr, FMT), FMT(expr, 2)
3174#define REP_4(expr, FMT) REP_3(expr, FMT), FMT(expr, 3)
3175#define REP_5(expr, FMT) REP_4(expr, FMT), FMT(expr, 4)
3176#define REP_6(expr, FMT) REP_5(expr, FMT), FMT(expr, 5)
3177#define REP_7(expr, FMT) REP_6(expr, FMT), FMT(expr, 6)
3178#define REP_8(expr, FMT) REP_7(expr, FMT), FMT(expr, 7)
3179#define REP_9(expr, FMT) REP_8(expr, FMT), FMT(expr, 8)
3180#define REP_10(expr, FMT) REP_9(expr, FMT), FMT(expr, 9)
3181#define REP_11(expr, FMT) REP_10(expr, FMT), FMT(expr, 10)
3182#define REP_12(expr, FMT) REP_11(expr, FMT), FMT(expr, 11)
3183#define REP_13(expr, FMT) REP_12(expr, FMT), FMT(expr, 12)
3184#define REP_14(expr, FMT) REP_13(expr, FMT), FMT(expr, 13)
3185#define REP_15(expr, FMT) REP_14(expr, FMT), FMT(expr, 14)
3186#define REP_16(expr, FMT) REP_15(expr, FMT), FMT(expr, 15)
3187#define REP_17(expr, FMT) REP_16(expr, FMT), FMT(expr, 16)
3188#define REP_18(expr, FMT) REP_17(expr, FMT), FMT(expr, 17)
3189#define REP_19(expr, FMT) REP_18(expr, FMT), FMT(expr, 18)
3190#define REP_20(expr, FMT) REP_19(expr, FMT), FMT(expr, 19)
3191#define GEN_MAX_PARAM_COUNT(V) \
3213#define MIXED_RUNTIME_CALL(N) \
3214 using MixedRuntimeCall_##N = AnyCType (*)(REP_##N(AnyCType arg, CONCAT));
3215GEN_MAX_PARAM_COUNT(MIXED_RUNTIME_CALL)
3216#undef MIXED_RUNTIME_CALL
3217#define CALL_ARGS(N) REP_##N(args, BRACKETS)
3218#define CALL_TARGET_VARARG(N) \
3219 if (signature.ParameterCount() == N) { \
3220 MixedRuntimeCall_##N target = \
3221 reinterpret_cast<MixedRuntimeCall_##N>(target_address); \
3222 result = target(CALL_ARGS(N)); \
3224#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
3225#define RETURN_REGISTER a0
3226#define FP_PARAM_REGISTERS fa0, fa1, fa2, fa3, fa4, fa5, fa6, fa7
3227#define FP_RETURN_REGISTER fa0
3228void Simulator::CallAnyCTypeFunction(Address target_address,
3229 const EncodedCSignature& signature) {
3230 const int64_t* stack_pointer =
reinterpret_cast<int64_t*
>(get_register(sp));
3231 const double* double_stack_pointer =
3232 reinterpret_cast<double*
>(get_register(sp));
3233 const Register kParamRegisters[] = {PARAM_REGISTERS};
3234 const FPURegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
3235 CHECK_LE(signature.ParameterCount(), kMaxCParameters);
3236 static_assert(
sizeof(AnyCType) == 8,
"AnyCType is assumed to be 64-bit.");
3237 AnyCType
args[kMaxCParameters];
3238 int num_gp_params = 0, num_fp_params = 0, num_stack_params = 0;
3239 for (
int i = 0;
i < signature.ParameterCount(); ++
i) {
3240 if (signature.IsFloat(
i)) {
3241 if (num_fp_params < 8) {
3242 args[
i].double_value =
3243 get_fpu_register_double(kFPParamRegisters[num_fp_params++]);
3245 args[
i].double_value = double_stack_pointer[num_stack_params++];
3248 if (num_gp_params < 8) {
3249 args[
i].int64_value = get_register(kParamRegisters[num_gp_params++]);
3251 args[
i].int64_value = stack_pointer[num_stack_params++];
3256 GEN_MAX_PARAM_COUNT(CALL_TARGET_VARARG)
3260 static_assert(20 == kMaxCParameters,
3261 "If you've changed kMaxCParameters, please change the "
3262 "GEN_MAX_PARAM_COUNT macro.");
3264 printf(
"CallAnyCTypeFunction end result \n");
3266#undef CALL_TARGET_VARARG
3268#undef GEN_MAX_PARAM_COUNT
3269 if (signature.IsReturnFloat()) {
3270 if (signature.IsReturnFloat64()) {
3271 set_fpu_register_double(FP_RETURN_REGISTER, result.double_value);
3273 set_fpu_register_float(FP_RETURN_REGISTER, result.float_value);
3276 set_register(RETURN_REGISTER,
result.int64_value);
3279#undef PARAM_REGISTERS
3280#undef RETURN_REGISTER
3281#undef FP_PARAM_REGISTERS
3282#undef FP_RETURN_REGISTER
3287void Simulator::SoftwareInterrupt() {
3291 int32_t func = instr_.Imm12Value();
3293 if (instr_.InstructionBits() == rtCallRedirInstr) {
3294 Redirection* redirection = Redirection::FromInstruction(instr_.instr());
3297 int64_t saved_ra = get_register(ra);
3299 reinterpret_cast<intptr_t
>(redirection->external_function());
3300#ifdef V8_TARGET_ARCH_RISCV64
3302 reinterpret_cast<Address>(redirection->external_function());
3303 SimulatorData* simulator_data =
isolate_->simulator_data();
3305 const EncodedCSignature& signature =
3306 simulator_data->GetSignatureForTarget(func_addr);
3307 if (signature.IsValid()) {
3308 CHECK_EQ(redirection->type(), ExternalReference::FAST_C_CALL);
3309 CallAnyCTypeFunction(external, signature);
3310 set_register(ra, saved_ra);
3311 set_pc(get_register(ra));
3316 sreg_t* stack_pointer =
reinterpret_cast<sreg_t*
>(get_register(sp));
3318 const sreg_t arg0 = get_register(a0);
3319 const sreg_t arg1 = get_register(a1);
3320 const sreg_t arg2 = get_register(a2);
3321 const sreg_t arg3 = get_register(a3);
3322 const sreg_t arg4 = get_register(a4);
3323 const sreg_t arg5 = get_register(a5);
3324 const sreg_t arg6 = get_register(a6);
3325 const sreg_t arg7 = get_register(a7);
3326 const sreg_t arg8 = stack_pointer[0];
3327 const sreg_t arg9 = stack_pointer[1];
3328 const sreg_t arg10 = stack_pointer[2];
3329 const sreg_t arg11 = stack_pointer[3];
3330 const sreg_t arg12 = stack_pointer[4];
3331 const sreg_t arg13 = stack_pointer[5];
3332 const sreg_t arg14 = stack_pointer[6];
3333 const sreg_t arg15 = stack_pointer[7];
3334 const sreg_t arg16 = stack_pointer[8];
3335 const sreg_t arg17 = stack_pointer[9];
3336 const sreg_t arg18 = stack_pointer[10];
3337 const sreg_t arg19 = stack_pointer[11];
3338 static_assert(kMaxCParameters == 20);
3341 (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
3342 (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
3343 (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
3344 (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL) ||
3345 (redirection->type() == ExternalReference::BUILTIN_INT_FP_CALL);
3347 sreg_t
pc = get_pc();
3350 double dval0, dval1;
3352 int64_t iresult = 0;
3354 GetFpArgs(&dval0, &dval1, &ival);
3355 SimulatorRuntimeCall generic_target =
3356 reinterpret_cast<SimulatorRuntimeCall
>(external);
3358 switch (redirection->type()) {
3359 case ExternalReference::BUILTIN_FP_FP_CALL:
3360 case ExternalReference::BUILTIN_COMPARE_CALL:
3361 PrintF(
"Call to host function %s at %p with args %f, %f",
3362 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3363 pc, IsolateGroup::current()->external_ref_table()),
3367 case ExternalReference::BUILTIN_FP_CALL:
3368 PrintF(
"Call to host function %s at %p with arg %f",
3369 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3370 pc, IsolateGroup::current()->external_ref_table()),
3374 case ExternalReference::BUILTIN_FP_INT_CALL:
3375 PrintF(
"Call to host function %s at %p with args %f, %d",
3376 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3377 pc, IsolateGroup::current()->external_ref_table()),
3381 case ExternalReference::BUILTIN_INT_FP_CALL:
3382 PrintF(
"Call to host function %s at %p with args %f",
3383 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3384 pc, IsolateGroup::current()->external_ref_table()),
3392 switch (redirection->type()) {
3393 case ExternalReference::BUILTIN_COMPARE_CALL: {
3394 SimulatorRuntimeCompareCall target =
3395 reinterpret_cast<SimulatorRuntimeCompareCall
>(external);
3396 iresult =
target(dval0, dval1);
3397 set_register(a0,
static_cast<sreg_t
>(iresult));
3401 case ExternalReference::BUILTIN_FP_FP_CALL: {
3402 SimulatorRuntimeFPFPCall target =
3403 reinterpret_cast<SimulatorRuntimeFPFPCall
>(external);
3404 dresult =
target(dval0, dval1);
3405 SetFpResult(dresult);
3408 case ExternalReference::BUILTIN_FP_CALL: {
3409 SimulatorRuntimeFPCall target =
3410 reinterpret_cast<SimulatorRuntimeFPCall
>(external);
3412 SetFpResult(dresult);
3415 case ExternalReference::BUILTIN_FP_INT_CALL: {
3416 SimulatorRuntimeFPIntCall target =
3417 reinterpret_cast<SimulatorRuntimeFPIntCall
>(external);
3418 dresult =
target(dval0, ival);
3419 SetFpResult(dresult);
3422 case ExternalReference::BUILTIN_INT_FP_CALL: {
3423 SimulatorRuntimeIntFPCall target =
3424 reinterpret_cast<SimulatorRuntimeIntFPCall
>(external);
3426 set_register(a0,
static_cast<int64_t
>(iresult));
3433 switch (redirection->type()) {
3434 case ExternalReference::BUILTIN_COMPARE_CALL:
3435 case ExternalReference::BUILTIN_INT_FP_CALL:
3438 case ExternalReference::BUILTIN_FP_FP_CALL:
3439 case ExternalReference::BUILTIN_FP_CALL:
3440 case ExternalReference::BUILTIN_FP_INT_CALL:
3441 PrintF(
"Returned %f\n", dresult);
3447 }
else if (redirection->type() ==
3448 ExternalReference::BUILTIN_FP_POINTER_CALL) {
3450 PrintF(
"Call to host function at %p args %08" REGIx_FORMAT
" \n",
3451 reinterpret_cast<void*
>(external), arg0);
3453 SimulatorRuntimeFPTaggedCall target =
3454 reinterpret_cast<SimulatorRuntimeFPTaggedCall
>(external);
3455 double dresult =
target(arg0, arg1, arg2, arg3);
3456 SetFpResult(dresult);
3458 PrintF(
"Returned %f\n", dresult);
3460 }
else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
3465 PrintF(
"Call to host function %s at %p args %08" REGIx_FORMAT
" \n",
3466 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3467 pc, IsolateGroup::current()->external_ref_table()),
3468 reinterpret_cast<void*
>(external), arg0);
3470 SimulatorRuntimeDirectApiCall target =
3471 reinterpret_cast<SimulatorRuntimeDirectApiCall
>(external);
3473 }
else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
3478 PrintF(
"Call to host function at %p args %08" REGIx_FORMAT
3479 " %08" REGIx_FORMAT
" \n",
3480 reinterpret_cast<void*
>(external), arg0, arg1);
3482 SimulatorRuntimeDirectGetterCall target =
3483 reinterpret_cast<SimulatorRuntimeDirectGetterCall
>(external);
3486#ifdef V8_TARGET_ARCH_RISCV64
3487 DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
3488 redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
3500 DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
3501 redirection->type() == ExternalReference::BUILTIN_CALL_PAIR ||
3502 redirection->type() == ExternalReference::FAST_C_CALL);
3504 SimulatorRuntimeCall target =
3505 reinterpret_cast<SimulatorRuntimeCall
>(external);
3508 "Call to host function %s at %p "
3509 "args %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
3510 " , %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
3511 " , %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
" , %08" REGIx_FORMAT
3512 " , %08" REGIx_FORMAT
" , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
3513 " , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
3514 " , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
3515 " , %016" REGIx_FORMAT
" , %016" REGIx_FORMAT
" \n",
3516 ExternalReferenceTable::NameOfIsolateIndependentAddress(
3517 pc, IsolateGroup::current()->external_ref_table()),
3518 reinterpret_cast<void*
>(
FUNCTION_ADDR(target)), arg0, arg1, arg2,
3519 arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12,
3520 arg13, arg14, arg15, arg16, arg17, arg18, arg19);
3522#if V8_TARGET_ARCH_RISCV64
3524 arg8, arg9, arg10, arg11, arg12, arg13, arg14,
3525 arg15, arg16, arg17, arg18, arg19);
3526 set_register(a0, (sreg_t)(
result.x));
3527 set_register(a1, (sreg_t)(
result.y));
3529#elif V8_TARGET_ARCH_RISCV32
3530 int64_t
result =
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
3531 arg8, arg9, arg10, arg11, arg12, arg13, arg14,
3532 arg15, arg16, arg17, arg18, arg19);
3533 set_register(a0, (sreg_t)
result);
3534 set_register(a1, (sreg_t)(
result >> 32));
3538 PrintF(
"Returned %08" REGIx_FORMAT
" : %08" REGIx_FORMAT
" \n",
3539 get_register(a1), get_register(a0));
3541 set_register(ra, saved_ra);
3542 set_pc(get_register(ra));
3544 }
else if (func == 1) {
3545 int32_t code = get_ebreak_code(instr_.instr());
3546 set_pc(get_pc() + kInstrSize * 2);
3547 if (code != -1 &&
static_cast<uint32_t
>(code) <= kMaxStopCode) {
3548 if (IsWatchpoint(code)) {
3549 PrintWatchpoint(code);
3550 }
else if (IsTracepoint(code)) {
3552 PrintF(
"Add --debug-sim when tracepoint instruction is used.\n");
3555 Builtin builtin = LookUp((Address)get_pc());
3556 printf(
"%d %d %d %d\n", code, code & LOG_TRACE, code & LOG_REGS,
3557 code & kDebuggerTracingDirectivesMask);
3558 if (builtin != Builtin::kNoBuiltinId) {
3559 printf(
"Builitin: %s\n", builtins_.name(builtin));
3561 switch (code & kDebuggerTracingDirectivesMask) {
3563 if (code & LOG_TRACE) {
3566 if (code & LOG_REGS) {
3567 RiscvDebugger dbg(
this);
3572 if (code & LOG_TRACE) {
3580 IncreaseStopCounter(code);
3583 }
else if (IsSwitchStackLimit(code)) {
3585 PrintF(
"Switching stack limit\n");
3587 DoSwitchStackLimit(instr_.instr());
3590 RiscvDebugger dbg(
this);
3599bool Simulator::IsWatchpoint(reg_t code) {
3600 return (code <= kMaxWatchpointCode);
3603bool Simulator::IsTracepoint(reg_t code) {
3607bool Simulator::IsSwitchStackLimit(reg_t code) {
3611void Simulator::PrintWatchpoint(reg_t code) {
3612 RiscvDebugger dbg(
this);
3614 PrintF(
"\n---- watchpoint %" REGId_FORMAT
3615 " marker: %3d (instr count: %8" PRId64
3617 "----------------------------------",
3618 code, break_count_, icount_);
3622void Simulator::HandleStop(reg_t code) {
3625 if (IsEnabledStop(code)) {
3626 PrintF(
"Simulator hit stop (%" REGId_FORMAT
")\n", code);
3631bool Simulator::IsStopInstruction(Instruction*
instr) {
3632 if (
instr->InstructionBits() != kBreakInstr)
return false;
3634 return code != -1 &&
static_cast<uint32_t
>(
code) > kMaxWatchpointCode &&
3638bool Simulator::IsEnabledStop(reg_t code) {
3641 return !(watched_stops_[
code].count & kStopDisabledBit);
3644void Simulator::EnableStop(reg_t code) {
3645 if (!IsEnabledStop(code)) {
3646 watched_stops_[
code].count &= ~kStopDisabledBit;
3650void Simulator::DisableStop(reg_t code) {
3651 if (IsEnabledStop(code)) {
3652 watched_stops_[
code].count |= kStopDisabledBit;
3656void Simulator::IncreaseStopCounter(reg_t code) {
3658 if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
3659 PrintF(
"Stop counter for code %" REGId_FORMAT
3660 " has overflowed.\n"
3661 "Enabling this code and reseting the counter to 0.\n",
3663 watched_stops_[
code].count = 0;
3666 watched_stops_[
code].count++;
3671void Simulator::PrintStopInfo(reg_t code) {
3672 if (code <= kMaxWatchpointCode) {
3673 PrintF(
"That is a watchpoint, not a stop.\n");
3675 }
else if (code > kMaxStopCode) {
3676 PrintF(
"Code too large, only %u stops can be used\n", kMaxStopCode + 1);
3679 const char* state = IsEnabledStop(code) ?
"Enabled" :
"Disabled";
3680 int32_t count = watched_stops_[
code].count & ~kStopDisabledBit;
3683 if (watched_stops_[code].desc) {
3684 PrintF(
"stop %" REGId_FORMAT
" - 0x%" REGIx_FORMAT
3685 " : \t%s, \tcounter = %i, \t%s\n",
3686 code, code, state, count, watched_stops_[code].desc);
3688 PrintF(
"stop %" REGId_FORMAT
" - 0x%" REGIx_FORMAT
3689 " : \t%s, \tcounter = %i\n",
3690 code, code, state, count);
3695void Simulator::SignalException(Exception e) {
3696 FATAL(
"Error: Exception %i raised.",
static_cast<int>(e));
3700void Simulator::DecodeRVRType() {
3701 switch (instr_.InstructionBits() & kRTypeMask) {
3703 set_rd(sext_xlen(rs1() + rs2()));
3707 set_rd(sext_xlen(rs1() - rs2()));
3711 set_rd(sext_xlen(rs1() << (rs2() & (
xlen - 1))));
3715 set_rd(sreg_t(rs1()) < sreg_t(rs2()));
3719 set_rd(reg_t(rs1()) < reg_t(rs2()));
3723 set_rd(rs1() ^ rs2());
3727 set_rd(sext_xlen(zext_xlen(rs1()) >> (rs2() & (
xlen - 1))));
3731 set_rd(sext_xlen(sext_xlen(rs1()) >> (rs2() & (
xlen - 1))));
3735 set_rd(rs1() | rs2());
3739 set_rd(rs1() & rs2());
3743 set_rd(rs1() & ~rs2());
3746 set_rd(rs1() | (~rs2()));
3749 set_rd((~rs1()) ^ (~rs2()));
3751#ifdef V8_TARGET_ARCH_RISCV64
3753 set_rd(sext32(rs1() + rs2()));
3757 set_rd(zext32(rs1()) + rs2());
3760 set_rd(sext32(rs1() - rs2()));
3764 set_rd(sext32(rs1() << (rs2() & 0x1F)));
3768 set_rd(sext32(uint32_t(rs1()) >> (rs2() & 0x1F)));
3772 set_rd(sext32(
int32_t(rs1()) >> (rs2() & 0x1F)));
3776 set_rd(rs2() + (zext32(rs1()) << 1));
3780 set_rd(rs2() + (zext32(rs1()) << 2));
3784 set_rd(rs2() + (zext32(rs1()) << 3));
3788 reg_t extz_rs1 = zext32(rs1());
3789 sreg_t shamt = rs2() & 31;
3790 set_rd(sext32((extz_rs1 << shamt) | (extz_rs1 >> (32 - shamt))));
3794 reg_t extz_rs1 = zext32(rs1());
3795 sreg_t shamt = rs2() & 31;
3796 set_rd(sext32((extz_rs1 >> shamt) | (extz_rs1 << (32 - shamt))));
3802 set_rd(rs1() * rs2());
3806 set_rd(mulh(rs1(), rs2()));
3810 set_rd(mulhsu(rs1(), rs2()));
3814 set_rd(mulhu(rs1(), rs2()));
3818 sreg_t lhs = sext_xlen(rs1());
3819 sreg_t rhs = sext_xlen(rs2());
3822 }
else if (lhs == INTPTR_MIN && rhs == -1) {
3825 set_rd(sext_xlen(lhs / rhs));
3830 reg_t lhs = zext_xlen(rs1());
3831 reg_t rhs = zext_xlen(rs2());
3833 set_rd(UINTPTR_MAX);
3835 set_rd(zext_xlen(lhs / rhs));
3840 sreg_t lhs = sext_xlen(rs1());
3841 sreg_t rhs = sext_xlen(rs2());
3844 }
else if (lhs == INTPTR_MIN && rhs == -1) {
3847 set_rd(sext_xlen(lhs % rhs));
3852 reg_t lhs = zext_xlen(rs1());
3853 reg_t rhs = zext_xlen(rs2());
3857 set_rd(zext_xlen(lhs % rhs));
3861#ifdef V8_TARGET_ARCH_RISCV64
3863 set_rd(sext32(sext32(rs1()) * sext32(rs2())));
3867 sreg_t lhs = sext32(rs1());
3868 sreg_t rhs = sext32(rs2());
3871 }
else if (lhs == INT32_MIN && rhs == -1) {
3874 set_rd(sext32(lhs / rhs));
3879 reg_t lhs = zext32(rs1());
3880 reg_t rhs = zext32(rs2());
3884 set_rd(zext32(lhs / rhs));
3889 sreg_t lhs = sext32(rs1());
3890 sreg_t rhs = sext32(rs2());
3893 }
else if (lhs == INT32_MIN && rhs == -1) {
3896 set_rd(sext32(lhs % rhs));
3901 reg_t lhs = zext32(rs1());
3902 reg_t rhs = zext32(rs2());
3904 set_rd(zext32(lhs));
3906 set_rd(zext32(lhs % rhs));
3912 set_rd(rs2() + (rs1() << 1));
3915 set_rd(rs2() + (rs1() << 2));
3918 set_rd(rs2() + (rs1() << 3));
3921 set_rd(rs1() < rs2() ? rs2() : rs1());
3924 set_rd(reg_t(rs1()) < reg_t(rs2()) ? rs2() : rs1());
3927 set_rd(rs1() < rs2() ? rs1() : rs2());
3930 set_rd(reg_t(rs1()) < reg_t(rs2()) ? rs1() : rs2());
3933 set_rd(zext_xlen(
uint16_t(rs1())));
3936 sreg_t shamt = rs2() & (
xlen - 1);
3937 set_rd((reg_t(rs1()) << shamt) | (reg_t(rs1()) >> (
xlen - shamt)));
3941 sreg_t shamt = rs2() & (
xlen - 1);
3942 set_rd((reg_t(rs1()) >> shamt) | (reg_t(rs1()) << (
xlen - shamt)));
3946 sreg_t index = rs2() & (
xlen - 1);
3947 set_rd(rs1() & ~(1l << index));
3951 sreg_t index = rs2() & (
xlen - 1);
3952 set_rd((rs1() >> index) & 1);
3956 sreg_t index = rs2() & (
xlen - 1);
3957 set_rd(rs1() ^ (1 << index));
3961 sreg_t index = rs2() & (
xlen - 1);
3962 set_rd(rs1() | (1 << index));
3976 switch (instr_.BaseOpcode()) {
3990float Simulator::RoundF2FHelper(
float input_val,
int rmode) {
3991 if (rmode == DYN) rmode = get_dynamic_rounding_mode();
3996 rounded = floorf(input_val);
3997 float error = input_val - rounded;
4001 if ((-0.5 <= input_val) && (input_val < 0.0)) {
4006 }
else if ((error > 0.5) ||
4007 ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
4013 rounded = std::truncf(input_val);
4016 rounded = floorf(input_val);
4019 rounded = ceilf(input_val);
4022 rounded = std::roundf(input_val);
4031double Simulator::RoundF2FHelper(
double input_val,
int rmode) {
4032 if (rmode == DYN) rmode = get_dynamic_rounding_mode();
4037 rounded = std::floor(input_val);
4038 double error = input_val - rounded;
4042 if ((-0.5 <= input_val) && (input_val < 0.0)) {
4047 }
else if ((error > 0.5) ||
4048 ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
4054 rounded = std::trunc(input_val);
4057 rounded = std::floor(input_val);
4060 rounded = std::ceil(input_val);
4063 rounded = std::round(input_val);
4073template <
typename I_TYPE,
typename F_TYPE>
4074I_TYPE Simulator::RoundF2IHelper(F_TYPE original,
int rmode) {
4075 DCHECK(std::is_integral<I_TYPE>::value);
4077 DCHECK((std::is_same<F_TYPE, float>::value ||
4078 std::is_same<F_TYPE, double>::value));
4080 I_TYPE max_i = std::numeric_limits<I_TYPE>::max();
4081 I_TYPE min_i = std::numeric_limits<I_TYPE>::min();
4083 if (!std::isfinite(original)) {
4084 set_fflags(kInvalidOperation);
4085 if (std::isnan(original) ||
4086 original == std::numeric_limits<F_TYPE>::infinity()) {
4089 DCHECK(original == -std::numeric_limits<F_TYPE>::infinity());
4094 F_TYPE rounded = RoundF2FHelper(original, rmode);
4095 if (original != rounded) set_fflags(kInexact);
4097 if (!std::isfinite(rounded)) {
4098 set_fflags(kInvalidOperation);
4099 if (std::isnan(rounded) ||
4100 rounded == std::numeric_limits<F_TYPE>::infinity()) {
4103 DCHECK(rounded == -std::numeric_limits<F_TYPE>::infinity());
4113 float max_i_plus_1 =
4114 std::is_same<uint64_t, I_TYPE>::value
4117 :
static_cast<float>(
static_cast<uint64_t
>(max_i) + 1);
4118 if (rounded >= max_i_plus_1) {
4119 set_fflags(kFPUOverflow | kInvalidOperation);
4125 if (rounded <= min_i) {
4126 if (rounded < min_i) set_fflags(kFPUOverflow | kInvalidOperation);
4130 F_TYPE underflow_fval =
4131 std::is_same<F_TYPE, float>::value ? FLT_MIN : DBL_MIN;
4132 if (rounded < underflow_fval && rounded > -underflow_fval && rounded != 0) {
4133 set_fflags(kUnderflow);
4136 return static_cast<I_TYPE
>(rounded);
4139template <
typename T>
4140static int64_t FclassHelper(T value) {
4141 switch (std::fpclassify(value)) {
4143 return (std::signbit(value) ? kNegativeInfinity : kPositiveInfinity);
4145 return (isSnan(value) ? kSignalingNaN : kQuietNaN);
4147 return (std::signbit(value) ? kNegativeNormalNumber
4148 : kPositiveNormalNumber);
4150 return (std::signbit(value) ? kNegativeSubnormalNumber
4151 : kPositiveSubnormalNumber);
4153 return (std::signbit(value) ? kNegativeZero : kPositiveZero);
4159template <
typename T>
4160bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
4161 DCHECK(std::is_floating_point<T>::value);
4167 if (std::isnan(input1) || std::isnan(input2)) {
4168 set_fflags(kInvalidOperation);
4171 result = (
cc ==
LT) ? (input1 < input2) : (input1 <= input2);
4176 if (std::numeric_limits<T>::signaling_NaN() == input1 ||
4177 std::numeric_limits<T>::signaling_NaN() == input2) {
4178 set_fflags(kInvalidOperation);
4180 if (std::isnan(input1) || std::isnan(input2)) {
4183 result = (input1 == input2);
4187 if (std::numeric_limits<T>::signaling_NaN() == input1 ||
4188 std::numeric_limits<T>::signaling_NaN() == input2) {
4189 set_fflags(kInvalidOperation);
4191 if (std::isnan(input1) || std::isnan(input2)) {
4194 result = (input1 != input2);
4203template <
typename T>
4204static inline bool is_invalid_fmul(T src1, T src2) {
4205 return (isinf(src1) && src2 ==
static_cast<T
>(0.0)) ||
4206 (src1 ==
static_cast<T
>(0.0) && isinf(src2));
4209template <
typename T>
4210static inline bool is_invalid_fadd(T src1, T src2) {
4211 return (isinf(src1) && isinf(src2) &&
4212 std::signbit(src1) != std::signbit(src2));
4215template <
typename T>
4216static inline bool is_invalid_fsub(T src1, T src2) {
4217 return (isinf(src1) && isinf(src2) &&
4218 std::signbit(src1) == std::signbit(src2));
4221template <
typename T>
4222static inline bool is_invalid_fdiv(T src1, T src2) {
4223 return ((src1 == 0 && src2 == 0) || (isinf(src1) && isinf(src2)));
4226template <
typename T>
4227static inline bool is_invalid_fsqrt(T src1) {
4231void Simulator::DecodeRVRAType() {
4236 switch (instr_.InstructionBits() & kRATypeMask) {
4238 sreg_t addr = rs1();
4239 if (!ProbeMemory(addr,
sizeof(int32_t)))
return;
4241 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
4242 if ((addr & 0x3) != 0) {
4245 auto val = ReadMem<int32_t>(addr, instr_.instr());
4246 set_rd(sext32(val),
false);
4247 TraceMemRd(addr, val, get_register(rd_reg()));
4248 local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
4249 GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
4250 &global_monitor_thread_);
4255 sreg_t addr = rs1();
4256 if (!ProbeMemory(addr,
sizeof(int32_t)))
return;
4257 if ((addr & 0x3) != 0) {
4260 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
4261 if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
4262 GlobalMonitor::Get()->NotifyStoreConditional_Locked(
4263 addr, &global_monitor_thread_)) {
4264 local_monitor_.NotifyStore();
4265 GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
4266 WriteMem<int32_t>(rs1(), (int32_t)rs2(), instr_.instr());
4274 if ((rs1() & 0x3) != 0) {
4277 set_rd(sext32(amo<uint32_t>(
4278 rs1(), [&](uint32_t lhs) {
return (uint32_t)rs2(); }, instr_.instr(),
4283 if ((rs1() & 0x3) != 0) {
4286 set_rd(sext32(amo<uint32_t>(
4287 rs1(), [&](uint32_t lhs) {
return lhs + (uint32_t)rs2(); },
4288 instr_.instr(), WORD)));
4292 if ((rs1() & 0x3) != 0) {
4295 set_rd(sext32(amo<uint32_t>(
4296 rs1(), [&](uint32_t lhs) {
return lhs ^ (uint32_t)rs2(); },
4297 instr_.instr(), WORD)));
4301 if ((rs1() & 0x3) != 0) {
4304 set_rd(sext32(amo<uint32_t>(
4305 rs1(), [&](uint32_t lhs) {
return lhs & (uint32_t)rs2(); },
4306 instr_.instr(), WORD)));
4310 if ((rs1() & 0x3) != 0) {
4313 set_rd(sext32(amo<uint32_t>(
4314 rs1(), [&](uint32_t lhs) {
return lhs | (uint32_t)rs2(); },
4315 instr_.instr(), WORD)));
4319 if ((rs1() & 0x3) != 0) {
4322 set_rd(sext32(amo<int32_t>(
4323 rs1(), [&](int32_t lhs) {
return std::min(lhs, (int32_t)rs2()); },
4324 instr_.instr(), WORD)));
4328 if ((rs1() & 0x3) != 0) {
4331 set_rd(sext32(amo<int32_t>(
4332 rs1(), [&](int32_t lhs) {
return std::max(lhs, (int32_t)rs2()); },
4333 instr_.instr(), WORD)));
4337 if ((rs1() & 0x3) != 0) {
4340 set_rd(sext32(amo<uint32_t>(
4341 rs1(), [&](uint32_t lhs) {
return std::min(lhs, (uint32_t)rs2()); },
4342 instr_.instr(), WORD)));
4346 if ((rs1() & 0x3) != 0) {
4349 set_rd(sext32(amo<uint32_t>(
4350 rs1(), [&](uint32_t lhs) {
return std::max(lhs, (uint32_t)rs2()); },
4351 instr_.instr(), WORD)));
4354#ifdef V8_TARGET_ARCH_RISCV64
4356 int64_t addr = rs1();
4357 if (!ProbeMemory(addr,
sizeof(int64_t)))
return;
4359 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
4360 auto val = ReadMem<int64_t>(addr, instr_.instr());
4362 TraceMemRd(addr, val, get_register(rd_reg()));
4363 local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
4364 GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
4365 &global_monitor_thread_);
4370 int64_t addr = rs1();
4371 if (!ProbeMemory(addr,
sizeof(int64_t)))
return;
4372 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
4373 if (local_monitor_.NotifyStoreConditional(addr,
4374 TransactionSize::DoubleWord) &&
4375 (GlobalMonitor::Get()->NotifyStoreConditional_Locked(
4376 addr, &global_monitor_thread_))) {
4377 GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
4378 WriteMem<int64_t>(rs1(), rs2(), instr_.instr());
4385 case RO_AMOSWAP_D: {
4386 set_rd(amo<int64_t>(
4387 rs1(), [&](int64_t lhs) {
return rs2(); }, instr_.instr(),
DWORD));
4391 set_rd(amo<int64_t>(
4392 rs1(), [&](int64_t lhs) {
return lhs + rs2(); }, instr_.instr(),
4397 set_rd(amo<int64_t>(
4398 rs1(), [&](int64_t lhs) {
return lhs ^ rs2(); }, instr_.instr(),
4403 set_rd(amo<int64_t>(
4404 rs1(), [&](int64_t lhs) {
return lhs & rs2(); }, instr_.instr(),
4409 set_rd(amo<int64_t>(
4410 rs1(), [&](int64_t lhs) {
return lhs | rs2(); }, instr_.instr(),
4415 set_rd(amo<int64_t>(
4416 rs1(), [&](int64_t lhs) {
return std::min(lhs, rs2()); },
4417 instr_.instr(),
DWORD));
4421 set_rd(amo<int64_t>(
4422 rs1(), [&](int64_t lhs) {
return std::max(lhs, rs2()); },
4423 instr_.instr(),
DWORD));
4426 case RO_AMOMINU_D: {
4427 set_rd(amo<uint64_t>(
4428 rs1(), [&](uint64_t lhs) {
return std::min(lhs, (uint64_t)rs2()); },
4429 instr_.instr(),
DWORD));
4432 case RO_AMOMAXU_D: {
4433 set_rd(amo<uint64_t>(
4434 rs1(), [&](uint64_t lhs) {
return std::max(lhs, (uint64_t)rs2()); },
4435 instr_.instr(),
DWORD));
4446void Simulator::DecodeRVRFPType() {
4451 switch (instr_.InstructionBits() & kRFPTypeMask) {
4455 auto fn = [
this](
float frs1,
float frs2) {
4456 if (is_invalid_fadd(frs1, frs2)) {
4457 this->set_fflags(kInvalidOperation);
4458 return std::numeric_limits<float>::quiet_NaN();
4463 set_frd(CanonicalizeFPUOp2<float>(
fn));
4468 auto fn = [
this](
float frs1,
float frs2) {
4469 if (is_invalid_fsub(frs1, frs2)) {
4470 this->set_fflags(kInvalidOperation);
4471 return std::numeric_limits<float>::quiet_NaN();
4476 set_frd(CanonicalizeFPUOp2<float>(
fn));
4481 auto fn = [
this](
float frs1,
float frs2) {
4482 if (is_invalid_fmul(frs1, frs2)) {
4483 this->set_fflags(kInvalidOperation);
4484 return std::numeric_limits<float>::quiet_NaN();
4489 set_frd(CanonicalizeFPUOp2<float>(
fn));
4494 auto fn = [
this](
float frs1,
float frs2) {
4495 if (is_invalid_fdiv(frs1, frs2)) {
4496 this->set_fflags(kInvalidOperation);
4497 return std::numeric_limits<float>::quiet_NaN();
4498 }
else if (frs2 == 0.0f) {
4499 this->set_fflags(kDivideByZero);
4500 return (std::signbit(frs1) == std::signbit(frs2)
4501 ? std::numeric_limits<float>::infinity()
4502 : -std::numeric_limits<float>::infinity());
4507 set_frd(CanonicalizeFPUOp2<float>(
fn));
4511 if (instr_.Rs2Value() == 0b00000) {
4513 auto fn = [
this](
float frs) {
4514 if (is_invalid_fsqrt(frs)) {
4515 this->set_fflags(kInvalidOperation);
4516 return std::numeric_limits<float>::quiet_NaN();
4518 return std::sqrt(frs);
4521 set_frd(CanonicalizeFPUOp1<float>(
fn));
4528 switch (instr_.Funct3Value()) {
4530 set_frd(fsgnj32(frs1_boxed(), frs2_boxed(),
false,
false));
4534 set_frd(fsgnj32(frs1_boxed(), frs2_boxed(),
true,
false));
4538 set_frd(fsgnj32(frs1_boxed(), frs2_boxed(),
false,
true));
4548 switch (instr_.Funct3Value()) {
4550 set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMin));
4554 set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMax));
4564 float original_val = frs1();
4565 switch (instr_.Rs2Value()) {
4567 set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
4572 RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
4575#ifdef V8_TARGET_ARCH_RISCV64
4577 set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
4581 set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
4592 switch (instr_.Funct3Value()) {
4594 if (instr_.Rs2Value() == 0b00000) {
4596 set_rd(sext32(get_fpu_register_word(rs1_reg())));
4603 set_rd(FclassHelper(frs1()));
4613 switch (instr_.Funct3Value()) {
4615 set_rd(CompareFHelper(frs1(), frs2(), EQ));
4619 set_rd(CompareFHelper(frs1(), frs2(), LT));
4623 set_rd(CompareFHelper(frs1(), frs2(), LE));
4633 switch (instr_.Rs2Value()) {
4635 set_frd(
static_cast<float>((int32_t)rs1()));
4639 set_frd(
static_cast<float>((uint32_t)rs1()));
4642#ifdef V8_TARGET_ARCH_RISCV64
4644 set_frd(
static_cast<float>((int64_t)rs1()));
4648 set_frd(
static_cast<float>((uint64_t)rs1()));
4659 if (instr_.Funct3Value() == 0b000) {
4671 auto fn = [
this](
double drs1,
double drs2) {
4672 if (is_invalid_fadd(drs1, drs2)) {
4673 this->set_fflags(kInvalidOperation);
4674 return std::numeric_limits<double>::quiet_NaN();
4679 set_drd(CanonicalizeFPUOp2<double>(
fn));
4684 auto fn = [
this](
double drs1,
double drs2) {
4685 if (is_invalid_fsub(drs1, drs2)) {
4686 this->set_fflags(kInvalidOperation);
4687 return std::numeric_limits<double>::quiet_NaN();
4692 set_drd(CanonicalizeFPUOp2<double>(
fn));
4697 auto fn = [
this](
double drs1,
double drs2) {
4698 if (is_invalid_fmul(drs1, drs2)) {
4699 this->set_fflags(kInvalidOperation);
4700 return std::numeric_limits<double>::quiet_NaN();
4705 set_drd(CanonicalizeFPUOp2<double>(
fn));
4710 auto fn = [
this](
double drs1,
double drs2) {
4711 if (is_invalid_fdiv(drs1, drs2)) {
4712 this->set_fflags(kInvalidOperation);
4713 return std::numeric_limits<double>::quiet_NaN();
4714 }
else if (drs2 == 0.0) {
4715 this->set_fflags(kDivideByZero);
4716 return (std::signbit(drs1) == std::signbit(drs2)
4717 ? std::numeric_limits<double>::infinity()
4718 : -std::numeric_limits<double>::infinity());
4723 set_drd(CanonicalizeFPUOp2<double>(
fn));
4727 if (instr_.Rs2Value() == 0b00000) {
4729 auto fn = [
this](
double drs) {
4730 if (is_invalid_fsqrt(drs)) {
4731 this->set_fflags(kInvalidOperation);
4732 return std::numeric_limits<double>::quiet_NaN();
4734 return std::sqrt(drs);
4737 set_drd(CanonicalizeFPUOp1<double>(
fn));
4744 switch (instr_.Funct3Value()) {
4746 set_drd(fsgnj64(drs1_boxed(), drs2_boxed(),
false,
false));
4750 set_drd(fsgnj64(drs1_boxed(), drs2_boxed(),
true,
false));
4754 set_drd(fsgnj64(drs1_boxed(), drs2_boxed(),
false,
true));
4764 switch (instr_.Funct3Value()) {
4766 set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMin));
4770 set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMax));
4779 case (RO_FCVT_S_D & kRFPTypeMask): {
4780 if (instr_.Rs2Value() == 0b00001) {
4781 auto fn = [](
double drs) {
return static_cast<float>(drs); };
4782 set_frd(CanonicalizeDoubleToFloatOperation(
fn));
4789 if (instr_.Rs2Value() == 0b00000) {
4790 auto fn = [](
float frs) {
return static_cast<double>(frs); };
4791 set_drd(CanonicalizeFloatToDoubleOperation(
fn));
4798 switch (instr_.Funct3Value()) {
4800 set_rd(CompareFHelper(drs1(), drs2(), EQ));
4804 set_rd(CompareFHelper(drs1(), drs2(), LT));
4808 set_rd(CompareFHelper(drs1(), drs2(), LE));
4817 case (RO_FCLASS_D & kRFPTypeMask): {
4818 if (instr_.Rs2Value() != 0b00000) {
4821 switch (instr_.Funct3Value()) {
4823 set_rd(FclassHelper(drs1()));
4826#ifdef V8_TARGET_ARCH_RISCV64
4828 set_rd(base::bit_cast<int64_t>(drs1()));
4839 double original_val = drs1();
4840 switch (instr_.Rs2Value()) {
4842 set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
4847 RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
4850#ifdef V8_TARGET_ARCH_RISCV64
4852 set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
4856 set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
4867 switch (instr_.Rs2Value()) {
4869 set_drd((int32_t)rs1());
4873 set_drd((uint32_t)rs1());
4876#ifdef V8_TARGET_ARCH_RISCV64
4878 set_drd((int64_t)rs1());
4882 set_drd((uint64_t)rs1());
4892#ifdef V8_TARGET_ARCH_RISCV64
4894 if (instr_.Funct3Value() == 0b000 && instr_.Rs2Value() == 0b00000) {
4896 set_drd(base::bit_cast<double>(rs1()));
4909void Simulator::DecodeRVR4Type() {
4910 switch (instr_.InstructionBits() & kR4TypeMask) {
4914 auto fn = [
this](
float frs1,
float frs2,
float frs3) {
4915 if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
4916 this->set_fflags(kInvalidOperation);
4917 return std::numeric_limits<float>::quiet_NaN();
4919 return std::fma(frs1, frs2, frs3);
4922 set_frd(CanonicalizeFPUOp3<float>(
fn));
4927 auto fn = [
this](
float frs1,
float frs2,
float frs3) {
4928 if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs1 * frs2, frs3)) {
4929 this->set_fflags(kInvalidOperation);
4930 return std::numeric_limits<float>::quiet_NaN();
4932 return std::fma(frs1, frs2, -frs3);
4935 set_frd(CanonicalizeFPUOp3<float>(
fn));
4940 auto fn = [
this](
float frs1,
float frs2,
float frs3) {
4941 if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs3, frs1 * frs2)) {
4942 this->set_fflags(kInvalidOperation);
4943 return std::numeric_limits<float>::quiet_NaN();
4945 return -std::fma(frs1, frs2, -frs3);
4948 set_frd(CanonicalizeFPUOp3<float>(
fn));
4953 auto fn = [
this](
float frs1,
float frs2,
float frs3) {
4954 if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
4955 this->set_fflags(kInvalidOperation);
4956 return std::numeric_limits<float>::quiet_NaN();
4958 return -std::fma(frs1, frs2, frs3);
4961 set_frd(CanonicalizeFPUOp3<float>(
fn));
4967 auto fn = [
this](
double drs1,
double drs2,
double drs3) {
4968 if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
4969 this->set_fflags(kInvalidOperation);
4970 return std::numeric_limits<double>::quiet_NaN();
4972 return std::fma(drs1, drs2, drs3);
4975 set_drd(CanonicalizeFPUOp3<double>(
fn));
4980 auto fn = [
this](
double drs1,
double drs2,
double drs3) {
4981 if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs1 * drs2, drs3)) {
4982 this->set_fflags(kInvalidOperation);
4983 return std::numeric_limits<double>::quiet_NaN();
4985 return std::fma(drs1, drs2, -drs3);
4988 set_drd(CanonicalizeFPUOp3<double>(
fn));
4993 auto fn = [
this](
double drs1,
double drs2,
double drs3) {
4994 if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs3, drs1 * drs2)) {
4995 this->set_fflags(kInvalidOperation);
4996 return std::numeric_limits<double>::quiet_NaN();
4998 return -std::fma(drs1, drs2, -drs3);
5001 set_drd(CanonicalizeFPUOp3<double>(
fn));
5006 auto fn = [
this](
double drs1,
double drs2,
double drs3) {
5007 if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
5008 this->set_fflags(kInvalidOperation);
5009 return std::numeric_limits<double>::quiet_NaN();
5011 return -std::fma(drs1, drs2, drs3);
5014 set_drd(CanonicalizeFPUOp3<double>(
fn));
5022#ifdef CAN_USE_RVV_INSTRUCTIONS
5023bool Simulator::DecodeRvvVL() {
5024 uint32_t instr_temp =
5026 if (RO_V_VL == instr_temp) {
5027 if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
5028 switch (instr_.vl_vs_width()) {
5030 RVV_VI_LD(0, (
i * nf +
fn), int8,
false);
5034 RVV_VI_LD(0, (
i * nf +
fn), int16,
false);
5038 RVV_VI_LD(0, (
i * nf +
fn), int32,
false);
5042 RVV_VI_LD(0, (
i * nf +
fn), int64,
false);
5054 }
else if (RO_V_VLS == instr_temp) {
5057 }
else if (RO_V_VLX == instr_temp) {
5060 }
else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
5061 RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
5062 RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
5063 RO_V_VLSEG8 == instr_temp) {
5064 if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
5071 }
else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
5072 RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
5073 RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
5074 RO_V_VLSSEG8 == instr_temp) {
5077 }
else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
5078 RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
5079 RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
5080 RO_V_VLXSEG8 == instr_temp) {
5088bool Simulator::DecodeRvvVS() {
5089 uint32_t instr_temp =
5091 if (RO_V_VS == instr_temp) {
5092 if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
5093 switch (instr_.vl_vs_width()) {
5095 RVV_VI_ST(0, (
i * nf +
fn), uint8,
false);
5099 RVV_VI_ST(0, (
i * nf +
fn), uint16,
false);
5103 RVV_VI_ST(0, (
i * nf +
fn), uint32,
false);
5107 RVV_VI_ST(0, (
i * nf +
fn), uint64,
false);
5118 }
else if (RO_V_VSS == instr_temp) {
5121 }
else if (RO_V_VSX == instr_temp) {
5124 }
else if (RO_V_VSU == instr_temp) {
5127 }
else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
5128 RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
5129 RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
5130 RO_V_VSSEG8 == instr_temp) {
5133 }
else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
5134 RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
5135 RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
5136 RO_V_VSSSEG8 == instr_temp) {
5139 }
else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
5140 RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
5141 RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
5142 RO_V_VSXSEG8 == instr_temp) {
5152 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
5156 return Builtin::kNoBuiltinId;
5159void Simulator::DecodeRVIType() {
5160 switch (instr_.InstructionBits() & kITypeMask) {
5162 set_rd(get_pc() + kInstrSize);
5164 sreg_t next_pc = (rs1() + imm12()) & ~sreg_t(1);
5167 Builtin builtin = LookUp((Address)get_pc());
5168 if (builtin != Builtin::kNoBuiltinId) {
5169 auto code = builtins_.code(builtin);
5170 if ((rs1_reg() != ra || imm12() != 0)) {
5171 if ((Address)get_pc() == code->instruction_start()) {
5172 sreg_t arg0 = get_register(a0);
5173 sreg_t arg1 = get_register(a1);
5174 sreg_t arg2 = get_register(a2);
5175 sreg_t arg3 = get_register(a3);
5176 sreg_t arg4 = get_register(a4);
5177 sreg_t arg5 = get_register(a5);
5178 sreg_t arg6 = get_register(a6);
5179 sreg_t arg7 = get_register(a7);
5180 sreg_t* stack_pointer =
5181 reinterpret_cast<sreg_t*
>(get_register(sp));
5182 sreg_t arg8 = stack_pointer[0];
5183 sreg_t arg9 = stack_pointer[1];
5185 "Call to Builtin at %s "
5186 "a0 %08" REGIx_FORMAT
" ,a1 %08" REGIx_FORMAT
5187 " ,a2 %08" REGIx_FORMAT
" ,a3 %08" REGIx_FORMAT
5188 " ,a4 %08" REGIx_FORMAT
" ,a5 %08" REGIx_FORMAT
5189 " ,a6 %08" REGIx_FORMAT
" ,a7 %08" REGIx_FORMAT
5190 " ,0(sp) %08" REGIx_FORMAT
" ,8(sp) %08" REGIx_FORMAT
5191 " ,sp %08" REGIx_FORMAT
",fp %08" REGIx_FORMAT
" \n",
5192 builtins_.name(builtin), arg0, arg1, arg2, arg3, arg4, arg5,
5193 arg6, arg7, arg8, arg9, get_register(sp), get_register(fp));
5195 }
else if (rd_reg() == zero_reg) {
5196 PrintF(
"Return to Builtin at %s \n", builtins_.name(builtin));
5203 sreg_t addr = rs1() + imm12();
5204 if (!ProbeMemory(addr,
sizeof(int8_t)))
return;
5205 int8_t val = ReadMem<int8_t>(addr, instr_.instr());
5206 set_rd(sext_xlen(val),
false);
5207 TraceMemRd(addr, val, get_register(rd_reg()));
5211 sreg_t addr = rs1() + imm12();
5212 if (!ProbeMemory(addr,
sizeof(int16_t)))
return;
5213 int16_t val = ReadMem<int16_t>(addr, instr_.instr());
5214 set_rd(sext_xlen(val),
false);
5215 TraceMemRd(addr, val, get_register(rd_reg()));
5219 sreg_t addr = rs1() + imm12();
5220 if (!ProbeMemory(addr,
sizeof(int32_t)))
return;
5221 int32_t val = ReadMem<int32_t>(addr, instr_.instr());
5222 set_rd(sext_xlen(val),
false);
5223 TraceMemRd(addr, val, get_register(rd_reg()));
5227 sreg_t addr = rs1() + imm12();
5228 if (!ProbeMemory(addr,
sizeof(int8_t)))
return;
5229 uint8_t val = ReadMem<uint8_t>(addr, instr_.instr());
5230 set_rd(zext_xlen(val),
false);
5231 TraceMemRd(addr, val, get_register(rd_reg()));
5235 sreg_t addr = rs1() + imm12();
5236 if (!ProbeMemory(addr,
sizeof(int16_t)))
return;
5237 uint16_t val = ReadMem<uint16_t>(addr, instr_.instr());
5238 set_rd(zext_xlen(val),
false);
5239 TraceMemRd(addr, val, get_register(rd_reg()));
5242#ifdef V8_TARGET_ARCH_RISCV64
5244 int64_t addr = rs1() + imm12();
5245 if (!ProbeMemory(addr,
sizeof(int32_t)))
return;
5246 uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
5247 set_rd(zext_xlen(val),
false);
5248 TraceMemRd(addr, val, get_register(rd_reg()));
5252 int64_t addr = rs1() + imm12();
5253 if (!ProbeMemory(addr,
sizeof(int64_t)))
return;
5254 int64_t val = ReadMem<int64_t>(addr, instr_.instr());
5255 set_rd(sext_xlen(val),
false);
5256 TraceMemRd(addr, val, get_register(rd_reg()));
5261 set_rd(sext_xlen(rs1() + imm12()));
5265 set_rd(sreg_t(rs1()) < sreg_t(imm12()));
5269 set_rd(reg_t(rs1()) < reg_t(imm12()));
5273 set_rd(imm12() ^ rs1());
5277 set_rd(imm12() | rs1());
5281 set_rd(imm12() & rs1());
5285 switch (instr_.Funct6FieldRaw() | OP_SHL) {
5287 require(shamt6() <
xlen);
5288 set_rd(sext_xlen(rs1() << shamt6()));
5291 require(shamt6() <
xlen);
5292 sreg_t index = shamt6() & (
xlen - 1);
5293 set_rd(rs1() & ~(1l << index));
5297 require(shamt6() <
xlen);
5298 sreg_t index = shamt6() & (
xlen - 1);
5299 set_rd(rs1() ^ (1l << index));
5303 require(shamt6() <
xlen);
5304 sreg_t index = shamt6() & (
xlen - 1);
5305 set_rd(rs1() | (1l << index));
5309 switch (instr_.Shamt()) {
5312 int highest_setbit = -1;
5313 for (
auto i =
xlen - 1;
i >= 0;
i--) {
5314 if ((
x & (1l <<
i))) {
5319 set_rd(
xlen - 1 - highest_setbit);
5324 int lowest_setbit =
xlen;
5325 for (
auto i = 0;
i <
xlen;
i++) {
5326 if ((
x & (1l <<
i))) {
5331 set_rd(lowest_setbit);
5345 set_rd(int8_t(rs1()));
5360 switch (instr_.Funct6FieldRaw() | OP_SHR) {
5362 require(shamt6() <
xlen);
5363 set_rd(sext_xlen(zext_xlen(rs1()) >> shamt6()));
5366 require(shamt6() <
xlen);
5367 set_rd(sext_xlen(sext_xlen(rs1()) >> shamt6()));
5370 require(shamt6() <
xlen);
5371 sreg_t index = shamt6() & (
xlen - 1);
5372 set_rd((rs1() >> index) & 1);
5376 reg_t rs1_val = rs1();
5380 for (reg_t
i = 0;
i <
xlen;
i += step) {
5381 if ((rs1_val &
mask) != 0) {
5390#ifdef V8_TARGET_ARCH_RISCV64
5395 set_rd((reg_t(rs1()) >> shamt) | (reg_t(rs1()) << (
xlen - shamt)));
5399 if (imm12() == RO_REV8_IMM12) {
5400 reg_t input = rs1();
5403 for (
int i = 0;
i <
xlen;
i += 8) {
5404 output |= ((input >> (j - 7)) & 0xff) <<
i;
5417#ifdef V8_TARGET_ARCH_RISCV64
5419 set_rd(sext32(rs1() + imm12()));
5423 switch (instr_.Funct7FieldRaw() | OP_SHLW) {
5425 set_rd(sext32(rs1() << shamt5()));
5428 set_rd(zext32(rs1()) << shamt6());
5431 switch (instr_.Shamt()) {
5434 int highest_setbit = -1;
5435 for (
auto i = 31;
i >= 0;
i--) {
5436 if ((
x & (1l <<
i))) {
5441 set_rd(31 - highest_setbit);
5446 int lowest_setbit = 32;
5447 for (
auto i = 0;
i < 32;
i++) {
5448 if ((
x & (1l <<
i))) {
5453 set_rd(lowest_setbit);
5476 switch (instr_.Funct7FieldRaw() | OP_SHRW) {
5478 set_rd(sext32(uint32_t(rs1()) >> shamt5()));
5481 set_rd(sext32(
int32_t(rs1()) >> shamt5()));
5484 reg_t extz_rs1 = zext32(rs1());
5486 set_rd(sext32((extz_rs1 >> shamt) | (extz_rs1 << (32 - shamt))));
5500 if (instr_.Imm12Value() == 0) {
5501 SoftwareInterrupt();
5502 }
else if (instr_.Imm12Value() == 1) {
5503 SoftwareInterrupt();
5516 if (rd_reg() != zero_reg) {
5517 set_rd(zext_xlen(read_csr_value(csr_reg())));
5519 write_csr_value(csr_reg(), rs1());
5523 set_rd(zext_xlen(read_csr_value(csr_reg())));
5524 if (rs1_reg() != zero_reg) {
5525 set_csr_bits(csr_reg(), rs1());
5530 set_rd(zext_xlen(read_csr_value(csr_reg())));
5531 if (rs1_reg() != zero_reg) {
5532 clear_csr_bits(csr_reg(), rs1());
5537 if (rd_reg() != zero_reg) {
5538 set_rd(zext_xlen(read_csr_value(csr_reg())));
5540 write_csr_value(csr_reg(), imm5CSR());
5544 set_rd(zext_xlen(read_csr_value(csr_reg())));
5545 if (imm5CSR() != 0) {
5546 set_csr_bits(csr_reg(), imm5CSR());
5551 set_rd(zext_xlen(read_csr_value(csr_reg())));
5552 if (imm5CSR() != 0) {
5553 clear_csr_bits(csr_reg(), imm5CSR());
5559 sreg_t addr = rs1() + imm12();
5560 if (!ProbeMemory(addr,
sizeof(
float)))
return;
5561 uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
5562 set_frd(Float32::FromBits(val),
false);
5563 TraceMemRdFloat(addr, Float32::FromBits(val),
5564 get_fpu_register(frd_reg()));
5569 sreg_t addr = rs1() + imm12();
5570 if (!ProbeMemory(addr,
sizeof(
double)))
return;
5571 uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
5572 set_drd(Float64::FromBits(val),
false);
5573 TraceMemRdDouble(addr, Float64::FromBits(val),
5574 get_fpu_register(frd_reg()));
5578#ifdef CAN_USE_RVV_INSTRUCTIONS
5579 if (!DecodeRvvVL()) {
5590void Simulator::DecodeRVSType() {
5591 switch (instr_.InstructionBits() & kSTypeMask) {
5593 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(int8_t)))
return;
5594 WriteMem<uint8_t>(rs1() + s_imm12(), (uint8_t)rs2(), instr_.instr());
5597 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(int16_t)))
return;
5598 WriteMem<uint16_t>(rs1() + s_imm12(), (uint16_t)rs2(), instr_.instr());
5601 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(int32_t)))
return;
5602 WriteMem<uint32_t>(rs1() + s_imm12(), (uint32_t)rs2(), instr_.instr());
5604#ifdef V8_TARGET_ARCH_RISCV64
5606 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(int64_t)))
return;
5607 WriteMem<uint64_t>(rs1() + s_imm12(), (uint64_t)rs2(), instr_.instr());
5612 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(
float)))
return;
5613 WriteMem<Float32>(rs1() + s_imm12(),
5614 get_fpu_register_Float32(rs2_reg(),
false),
5620 if (!ProbeMemory(rs1() + s_imm12(),
sizeof(
double)))
return;
5621 WriteMem<Float64>(rs1() + s_imm12(), get_fpu_register_Float64(rs2_reg()),
5626#ifdef CAN_USE_RVV_INSTRUCTIONS
5627 if (!DecodeRvvVS()) {
5637void Simulator::DecodeRVBType() {
5638 switch (instr_.InstructionBits() & kBTypeMask) {
5640 if (rs1() == rs2()) {
5641 int64_t next_pc = get_pc() + boffset();
5646 if (rs1() != rs2()) {
5647 int64_t next_pc = get_pc() + boffset();
5652 if (rs1() < rs2()) {
5653 int64_t next_pc = get_pc() + boffset();
5658 if (rs1() >= rs2()) {
5659 int64_t next_pc = get_pc() + boffset();
5664 if ((reg_t)rs1() < (reg_t)rs2()) {
5665 int64_t next_pc = get_pc() + boffset();
5670 if ((reg_t)rs1() >= (reg_t)rs2()) {
5671 int64_t next_pc = get_pc() + boffset();
5679void Simulator::DecodeRVUType() {
5681 switch (instr_.BaseOpcodeFieldRaw()) {
5686 set_rd(sext_xlen(u_imm20() + get_pc()));
5692void Simulator::DecodeRVJType() {
5694 switch (instr_.BaseOpcodeValue()) {
5696 set_rd(get_pc() + kInstrSize);
5697 int64_t next_pc = get_pc() + imm20J();
5705void Simulator::DecodeCRType() {
5706 switch (instr_.RvcFunct4Value()) {
5708 if (instr_.RvcRs1Value() != 0 && instr_.RvcRs2Value() == 0) {
5710 }
else if (instr_.RvcRdValue() != 0 &&
5711 instr_.RvcRs2Value() != 0) {
5712 set_rvc_rd(sext_xlen(rvc_rs2()));
5718 if (instr_.RvcRs1Value() == 0 && instr_.RvcRs2Value() == 0) {
5720 }
else if (instr_.RvcRdValue() != 0 &&
5721 instr_.RvcRs2Value() == 0) {
5722 set_register(ra, get_pc() + kShortInstrSize);
5724 }
else if (instr_.RvcRdValue() != 0 &&
5725 instr_.RvcRs2Value() != 0) {
5726 set_rvc_rd(sext_xlen(rvc_rs1() + rvc_rs2()));
5736void Simulator::DecodeCAType() {
5737 switch (instr_.InstructionBits() & kCATypeMask) {
5739 set_rvc_rs1s(sext_xlen(rvc_rs1s() - rvc_rs2s()));
5742 set_rvc_rs1s(rvc_rs1s() ^ rvc_rs2s());
5745 set_rvc_rs1s(rvc_rs1s() | rvc_rs2s());
5748 set_rvc_rs1s(rvc_rs1s() & rvc_rs2s());
5750#if V8_TARGET_ARCH_RISCV64
5752 set_rvc_rs1s(sext32(rvc_rs1s() - rvc_rs2s()));
5755 set_rvc_rs1s(sext32(rvc_rs1s() + rvc_rs2s()));
5763void Simulator::DecodeCIType() {
5764 switch (instr_.RvcOpcode()) {
5766 if (instr_.RvcRdValue() == 0)
5769 set_rvc_rd(sext_xlen(rvc_rs1() + rvc_imm6()));
5771#if V8_TARGET_ARCH_RISCV64
5773 set_rvc_rd(sext32(rvc_rs1() + rvc_imm6()));
5777 set_rvc_rd(sext_xlen(rvc_imm6()));
5780 if (instr_.RvcRdValue() == 2) {
5782 int64_t value = get_register(sp) + rvc_imm6_addi16sp();
5783 set_register(sp, value);
5784 }
else if (instr_.RvcRdValue() != 0 && instr_.RvcRdValue() != 2) {
5786 set_rvc_rd(rvc_u_imm6());
5792 set_rvc_rd(sext_xlen(rvc_rs1() << rvc_shamt6()));
5795 sreg_t addr = get_register(sp) + rvc_imm6_ldsp();
5796 uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
5797 set_rvc_drd(Float64::FromBits(val),
false);
5798 TraceMemRdDouble(addr, Float64::FromBits(val),
5799 get_fpu_register(rvc_frd_reg()));
5802#if V8_TARGET_ARCH_RISCV64
5804 sreg_t addr = get_register(sp) + rvc_imm6_lwsp();
5805 int64_t val = ReadMem<int32_t>(addr, instr_.instr());
5806 set_rvc_rd(sext_xlen(val),
false);
5807 TraceMemRd(addr, val, get_register(rvc_rd_reg()));
5811 sreg_t addr = get_register(sp) + rvc_imm6_ldsp();
5812 int64_t val = ReadMem<int64_t>(addr, instr_.instr());
5813 set_rvc_rd(sext_xlen(val),
false);
5814 TraceMemRd(addr, val, get_register(rvc_rd_reg()));
5817#elif V8_TARGET_ARCH_RISCV32
5819 sreg_t addr = get_register(sp) + rvc_imm6_ldsp();
5820 uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
5821 set_rvc_frd(Float32::FromBits(val),
false);
5822 TraceMemRdFloat(addr, Float32::FromBits(val),
5823 get_fpu_register(rvc_frd_reg()));
5827 sreg_t addr = get_register(sp) + rvc_imm6_lwsp();
5828 int32_t val = ReadMem<int32_t>(addr, instr_.instr());
5829 set_rvc_rd(sext_xlen(val),
false);
5830 TraceMemRd(addr, val, get_register(rvc_rd_reg()));
5839void Simulator::DecodeCIWType() {
5840 switch (instr_.RvcOpcode()) {
5842 set_rvc_rs2s(get_register(sp) + rvc_imm8_addi4spn());
5850void Simulator::DecodeCSSType() {
5851 switch (instr_.RvcOpcode()) {
5853 sreg_t addr = get_register(sp) + rvc_imm6_sdsp();
5854 WriteMem<Float64>(addr, get_fpu_register_Float64(rvc_rs2_reg()),
5858#if V8_TARGET_ARCH_RISCV32
5860 sreg_t addr = get_register(sp) + rvc_imm6_sdsp();
5861 WriteMem<Float32>(addr, get_fpu_register_Float32(rvc_rs2_reg(),
false),
5867 sreg_t addr = get_register(sp) + rvc_imm6_swsp();
5868 WriteMem<int32_t>(addr, (int32_t)rvc_rs2(), instr_.instr());
5871#if V8_TARGET_ARCH_RISCV64
5873 sreg_t addr = get_register(sp) + rvc_imm6_sdsp();
5874 WriteMem<int64_t>(addr, (int64_t)rvc_rs2(), instr_.instr());
5883void Simulator::DecodeCLType() {
5884 switch (instr_.RvcOpcode()) {
5886 sreg_t addr = rvc_rs1s() + rvc_imm5_w();
5887 int64_t val = ReadMem<int32_t>(addr, instr_.instr());
5888 set_rvc_rs2s(sext_xlen(val),
false);
5889 TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
5893 sreg_t addr = rvc_rs1s() + rvc_imm5_d();
5894 uint64_t val = ReadMem<uint64_t>(addr, instr_.instr());
5895 set_rvc_drs2s(Float64::FromBits(val),
false);
5898#if V8_TARGET_ARCH_RISCV64
5900 sreg_t addr = rvc_rs1s() + rvc_imm5_d();
5901 int64_t val = ReadMem<int64_t>(addr, instr_.instr());
5902 set_rvc_rs2s(sext_xlen(val),
false);
5903 TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
5906#elif V8_TARGET_ARCH_RISCV32
5908 sreg_t addr = rvc_rs1s() + rvc_imm5_d();
5909 uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
5910 set_rvc_frs2s(Float32::FromBits(val),
false);
5919void Simulator::DecodeCSType() {
5920 switch (instr_.RvcOpcode()) {
5922 sreg_t addr = rvc_rs1s() + rvc_imm5_w();
5923 WriteMem<int32_t>(addr, (int32_t)rvc_rs2s(), instr_.instr());
5926#if V8_TARGET_ARCH_RISCV64
5928 sreg_t addr = rvc_rs1s() + rvc_imm5_d();
5929 WriteMem<int64_t>(addr, (int64_t)rvc_rs2s(), instr_.instr());
5934 sreg_t addr = rvc_rs1s() + rvc_imm5_d();
5935 WriteMem<double>(addr,
static_cast<double>(rvc_drs2s()), instr_.instr());
5943void Simulator::DecodeCJType() {
5944 switch (instr_.RvcOpcode()) {
5946 set_pc(get_pc() + instr_.RvcImm11CJValue());
5954void Simulator::DecodeCBType() {
5955 switch (instr_.RvcOpcode()) {
5957 if (rvc_rs1() != 0) {
5958 sreg_t next_pc = get_pc() + rvc_imm8_b();
5963 if (rvc_rs1() == 0) {
5964 sreg_t next_pc = get_pc() + rvc_imm8_b();
5969 if (instr_.RvcFunct2BValue() == 0b00) {
5970 set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
5971 }
else if (instr_.RvcFunct2BValue() == 0b01) {
5972 require(rvc_shamt6() <
xlen);
5973 set_rvc_rs1s(sext_xlen(sext_xlen(rvc_rs1s()) >> rvc_shamt6()));
5974 }
else if (instr_.RvcFunct2BValue() == 0b10) {
5975 set_rvc_rs1s(rvc_imm6() & rvc_rs1s());
5993template <
typename T,
typename UT>
5994static inline T sat_add(T
x, T
y,
bool& sat) {
5999 int sh =
sizeof(
T) * 8 - 1;
6002 ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
6005 if ((T)((ux ^ uy) | ~(uy ^ res)) >= 0) {
6013template <
typename T,
typename UT>
6014static inline T sat_sub(T
x, T
y,
bool& sat) {
6019 int sh =
sizeof(
T) * 8 - 1;
6022 ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
6025 if ((T)((ux ^ uy) & (ux ^ res)) < 0) {
6033template <
typename T>
6034T sat_addu(T
x, T
y,
bool& sat) {
6044template <
typename T>
6045T sat_subu(T
x, T
y,
bool& sat) {
6055#ifdef CAN_USE_RVV_INSTRUCTIONS
6056void Simulator::DecodeRvvIVV() {
6057 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
6058 switch (instr_.InstructionBits() & kVTypeMask) {
6060 RVV_VI_VV_LOOP({ vd = vs1 + vs2; });
6064 RVV_VI_GENERAL_LOOP_BASE
6066 switch (rvv_vsew()) {
6069 vd = sat_add<int8_t, uint8_t>(vs2, vs1, sat);
6074 vd = sat_add<int16_t, uint16_t>(vs2, vs1, sat);
6079 vd = sat_add<int32_t, uint32_t>(vs2, vs1, sat);
6084 vd = sat_add<int64_t, uint64_t>(vs2, vs1, sat);
6099 RVV_VI_VV_LOOP({ vd = vs2 - vs1; })
6103 RVV_VI_GENERAL_LOOP_BASE
6105 switch (rvv_vsew()) {
6108 vd = sat_sub<int8_t, uint8_t>(vs2, vs1, sat);
6113 vd = sat_sub<int16_t, uint16_t>(vs2, vs1, sat);
6118 vd = sat_sub<int32_t, uint32_t>(vs2, vs1, sat);
6123 vd = sat_sub<int64_t, uint64_t>(vs2, vs1, sat);
6132 RVV_VI_GENERAL_LOOP_BASE
6134 switch (rvv_vsew()) {
6137 vd = sat_subu<uint8_t>(vs2, vs1, sat);
6142 vd = sat_subu<uint16_t>(vs2, vs1, sat);
6147 vd = sat_subu<uint32_t>(vs2, vs1, sat);
6152 vd = sat_subu<uint64_t>(vs2, vs1, sat);
6161 RVV_VI_VV_LOOP({ vd = vs1 & vs2; })
6165 RVV_VI_VV_LOOP({ vd = vs1 | vs2; })
6169 RVV_VI_VV_LOOP({ vd = vs1 ^ vs2; })
6213 if (instr_.RvvVM()) {
6214 RVV_VI_VVXI_MERGE_LOOP({
6221 RVV_VI_VVXI_MERGE_LOOP({
6222 bool use_first = (Rvvelt<uint64_t>(0, (
i / 64)) >> (
i % 64)) & 0x1;
6223 vd = use_first ? vs1 : vs2;
6231 RVV_VI_VV_LOOP_CMP({ res = vs1 == vs2; })
6235 RVV_VI_VV_LOOP_CMP({ res = vs1 != vs2; })
6239 RVV_VI_VV_ULOOP_CMP({ res = vs2 < vs1; })
6243 RVV_VI_VV_LOOP_CMP({ res = vs2 < vs1; })
6247 RVV_VI_VV_LOOP_CMP({ res = vs2 <= vs1; })
6251 RVV_VI_VV_ULOOP_CMP({ res = vs2 <= vs1; })
6255 if (instr_.RvvVM()) {
6256 RVV_VI_VV_LOOP_WITH_CARRY({
6257 auto& v0 = Rvvelt<uint64_t>(0, midx);
6258 vd = vs1 + vs2 + (v0 >> mpos) & 0x1;
6265 RVV_VI_VV_LOOP({ vd = vs2 << (vs1 & (rvv_sew() - 1)); })
6269 RVV_VI_VV_ULOOP({ vd = vs2 >> (vs1 & (rvv_sew() - 1)); })
6272 RVV_VI_VV_LOOP({ vd = vs2 >> (vs1 & (rvv_sew() - 1)); })
6275 RVV_VI_GENERAL_LOOP_BASE
6276 RVV_VI_LOOP_MASK_SKIP()
6277 if (rvv_vsew() == E8) {
6280 uint8_t round = get_round(
static_cast<int>(rvv_vxrm()),
result, 7);
6282 vd = signed_saturation<int16_t, int8_t>(
result, 8);
6283 }
else if (rvv_vsew() == E16) {
6286 uint8_t round = get_round(
static_cast<int>(rvv_vxrm()),
result, 15);
6288 vd = signed_saturation<int32_t, int16_t>(
result, 16);
6289 }
else if (rvv_vsew() == E32) {
6291 int64_t
result = (int64_t)vs1 * (int64_t)vs2;
6292 uint8_t round = get_round(
static_cast<int>(rvv_vxrm()),
result, 31);
6294 vd = signed_saturation<int64_t, int32_t>(
result, 32);
6295 }
else if (rvv_vsew() == E64) {
6297 __int128_t
result = (__int128_t)vs1 * (__int128_t)vs2;
6298 uint8_t round = get_round(
static_cast<int>(rvv_vxrm()),
result, 63);
6300 vd = signed_saturation<__int128_t, int64_t>(
result, 64);
6309 RVV_VI_GENERAL_LOOP_BASE
6310 CHECK_NE(rvv_vs1_reg(), rvv_vd_reg());
6311 CHECK_NE(rvv_vs2_reg(), rvv_vd_reg());
6312 switch (rvv_vsew()) {
6314 auto vs1 = Rvvelt<uint8_t>(rvv_vs1_reg(),
i);
6316 Rvvelt<uint8_t>(rvv_vd_reg(),
i,
true) =
6317 vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint8_t>(rvv_vs2_reg(), vs1);
6321 auto vs1 = Rvvelt<uint16_t>(rvv_vs1_reg(),
i);
6322 Rvvelt<uint16_t>(rvv_vd_reg(),
i,
true) =
6323 vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint16_t>(rvv_vs2_reg(), vs1);
6327 auto vs1 = Rvvelt<uint32_t>(rvv_vs1_reg(),
i);
6328 Rvvelt<uint32_t>(rvv_vd_reg(),
i,
true) =
6329 vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint32_t>(rvv_vs2_reg(), vs1);
6333 auto vs1 = Rvvelt<uint64_t>(rvv_vs1_reg(),
i);
6334 Rvvelt<uint64_t>(rvv_vd_reg(),
i,
true) =
6335 vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint64_t>(rvv_vs2_reg(), vs1);
6359void Simulator::DecodeRvvIVI() {
6360 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
6361 switch (instr_.InstructionBits() & kVTypeMask) {
6363 RVV_VI_VI_LOOP({ vd = simm5 + vs2; })
6367 RVV_VI_GENERAL_LOOP_BASE
6369 switch (rvv_vsew()) {
6372 vd = sat_add<int8_t, uint8_t>(vs2, simm5, sat);
6377 vd = sat_add<int16_t, uint16_t>(vs2, simm5, sat);
6382 vd = sat_add<int32_t, uint32_t>(vs2, simm5, sat);
6387 vd = sat_add<int64_t, uint64_t>(vs2, simm5, sat);
6403 RVV_VI_VI_LOOP({ vd = simm5 - vs2; })
6407 RVV_VI_VI_LOOP({ vd = simm5 & vs2; })
6411 RVV_VI_VI_LOOP({ vd = simm5 | vs2; })
6415 RVV_VI_VI_LOOP({ vd = simm5 ^ vs2; })
6419 if (instr_.RvvVM()) {
6420 RVV_VI_VVXI_MERGE_LOOP({
6427 RVV_VI_VVXI_MERGE_LOOP({
6428 bool use_first = (Rvvelt<uint64_t>(0, (
i / 64)) >> (
i % 64)) & 0x1;
6429 vd = use_first ? simm5 : vs2;
6436 RVV_VI_VI_LOOP_CMP({ res = simm5 == vs2; })
6439 RVV_VI_VI_LOOP_CMP({ res = simm5 != vs2; })
6442 RVV_VI_VI_ULOOP_CMP({ res = vs2 <= uimm5; })
6445 RVV_VI_VI_LOOP_CMP({ res = vs2 <= simm5; })
6448 RVV_VI_VI_LOOP_CMP({ res = vs2 > simm5; })
6451 RVV_VI_CHECK_SLIDE(
false);
6452 const uint8_t sh = instr_.RvvUimm5();
6453 RVV_VI_GENERAL_LOOP_BASE
6456 bool is_valid = (
i + sh) < rvv_vlmax();
6462 switch (rvv_vsew()) {
6464 VI_XI_SLIDEDOWN_PARAMS(8,
offset);
6465 vd = is_valid ? vs2 : 0;
6468 VI_XI_SLIDEDOWN_PARAMS(16,
offset);
6469 vd = is_valid ? vs2 : 0;
6472 VI_XI_SLIDEDOWN_PARAMS(32,
offset);
6473 vd = is_valid ? vs2 : 0;
6476 VI_XI_SLIDEDOWN_PARAMS(64,
offset);
6477 vd = is_valid ? vs2 : 0;
6484 RVV_VI_CHECK_SLIDE(
true);
6486 const uint8_t
offset = instr_.RvvUimm5();
6487 RVV_VI_GENERAL_LOOP_BASE
6490 switch (rvv_vsew()) {
6492 VI_XI_SLIDEUP_PARAMS(8,
offset);
6496 VI_XI_SLIDEUP_PARAMS(16,
offset);
6500 VI_XI_SLIDEUP_PARAMS(32,
offset);
6504 VI_XI_SLIDEUP_PARAMS(64,
offset);
6512 RVV_VI_VI_ULOOP({ vd = vs2 >> (uimm5 & (rvv_sew() - 1)); })
6515 RVV_VI_VI_LOOP({ vd = vs2 >> (simm5 & (rvv_sew() - 1) & 0x1f); })
6518 RVV_VI_VI_ULOOP({ vd = vs2 << (uimm5 & (rvv_sew() - 1)); })
6521 if (instr_.RvvVM()) {
6522 RVV_VI_XI_LOOP_WITH_CARRY({
6523 auto& v0 = Rvvelt<uint64_t>(0, midx);
6524 vd = simm5 + vs2 + (v0 >> mpos) & 0x1;
6532 RVV_VN_CLIP_VI_LOOP()
6534 case RO_V_VNCLIPU_WI:
6535 RVV_VN_CLIPU_VI_LOOP()
6543void Simulator::DecodeRvvIVX() {
6544 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
6545 switch (instr_.InstructionBits() & kVTypeMask) {
6547 RVV_VI_VX_LOOP({ vd = rs1 + vs2; })
6551 RVV_VI_GENERAL_LOOP_BASE
6553 switch (rvv_vsew()) {
6556 vd = sat_add<int8_t, uint8_t>(vs2, rs1, sat);
6561 vd = sat_add<int16_t, uint16_t>(vs2, rs1, sat);
6566 vd = sat_add<int32_t, uint32_t>(vs2, rs1, sat);
6571 vd = sat_add<int64_t, uint64_t>(vs2, rs1, sat);
6587 RVV_VI_VX_LOOP({ vd = vs2 - rs1; })
6591 RVV_VI_GENERAL_LOOP_BASE
6593 switch (rvv_vsew()) {
6596 vd = sat_sub<int8_t, uint8_t>(vs2, rs1, sat);
6601 vd = sat_sub<int16_t, uint16_t>(vs2, rs1, sat);
6606 vd = sat_sub<int32_t, uint32_t>(vs2, rs1, sat);
6611 vd = sat_sub<int64_t, uint64_t>(vs2, rs1, sat);
6620 RVV_VI_VX_LOOP({ vd = rs1 - vs2; })
6624 RVV_VI_VX_LOOP({ vd = rs1 & vs2; })
6628 RVV_VI_VX_LOOP({ vd = rs1 | vs2; })
6632 RVV_VI_VX_LOOP({ vd = rs1 ^ vs2; })
6676 if (instr_.RvvVM()) {
6677 RVV_VI_VVXI_MERGE_LOOP({
6684 RVV_VI_VVXI_MERGE_LOOP({
6685 bool use_first = (Rvvelt<uint64_t>(0, (
i / 64)) >> (
i % 64)) & 0x1;
6686 vd = use_first ? rs1 : vs2;
6693 RVV_VI_VX_LOOP_CMP({ res = vs2 == rs1; })
6696 RVV_VI_VX_LOOP_CMP({ res = vs2 != rs1; })
6699 RVV_VI_VX_LOOP_CMP({ res = vs2 < rs1; })
6702 RVV_VI_VX_ULOOP_CMP({ res = vs2 < rs1; })
6705 RVV_VI_VX_LOOP_CMP({ res = vs2 <= rs1; })
6708 RVV_VI_VX_ULOOP_CMP({ res = vs2 <= rs1; })
6711 RVV_VI_VX_LOOP_CMP({ res = vs2 > rs1; })
6714 RVV_VI_VX_ULOOP_CMP({ res = vs2 > rs1; })
6717 RVV_VI_CHECK_SLIDE(
false);
6719 const sreg_t sh = get_register(rs1_reg());
6720 RVV_VI_GENERAL_LOOP_BASE
6723 bool is_valid = (
i + sh) < rvv_vlmax();
6729 switch (rvv_vsew()) {
6731 VI_XI_SLIDEDOWN_PARAMS(8,
offset);
6732 vd = is_valid ? vs2 : 0;
6735 VI_XI_SLIDEDOWN_PARAMS(16,
offset);
6736 vd = is_valid ? vs2 : 0;
6739 VI_XI_SLIDEDOWN_PARAMS(32,
offset);
6740 vd = is_valid ? vs2 : 0;
6743 VI_XI_SLIDEDOWN_PARAMS(64,
offset);
6744 vd = is_valid ? vs2 : 0;
6751 RVV_VI_CHECK_SLIDE(
true);
6753 const reg_t
offset = get_register(rs1_reg());
6754 RVV_VI_GENERAL_LOOP_BASE
6757 switch (rvv_vsew()) {
6759 VI_XI_SLIDEUP_PARAMS(8,
offset);
6763 VI_XI_SLIDEUP_PARAMS(16,
offset);
6767 VI_XI_SLIDEUP_PARAMS(32,
offset);
6771 VI_XI_SLIDEUP_PARAMS(64,
offset);
6779 if (instr_.RvvVM()) {
6780 RVV_VI_XI_LOOP_WITH_CARRY({
6781 auto& v0 = Rvvelt<uint64_t>(0, midx);
6782 vd = rs1 + vs2 + (v0 >> mpos) & 0x1;
6790 RVV_VI_VX_LOOP({ vd = vs2 << (rs1 & (rvv_sew() - 1)); })
6794 RVV_VI_VX_ULOOP({ vd = (vs2 >> (rs1 & (rvv_sew() - 1))); })
6798 RVV_VI_VX_LOOP({ vd = ((vs2) >> (rs1 & (rvv_sew() - 1))); })
6807void Simulator::DecodeRvvMVV() {
6808 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
6809 switch (instr_.InstructionBits() & kVTypeMask) {
6811 if (instr_.Vs1Value() == VID_V) {
6812 CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64);
6813 uint8_t rd_num = rvv_vd_reg();
6814 require_align(rd_num, rvv_vflmul());
6816 for (uint8_t
i = rvv_vstart();
i < rvv_vl(); ++
i) {
6817 RVV_VI_LOOP_MASK_SKIP();
6818 switch (rvv_vsew()) {
6820 Rvvelt<uint8_t>(rd_num,
i,
true) =
i;
6823 Rvvelt<uint16_t>(rd_num,
i,
true) =
i;
6826 Rvvelt<uint32_t>(rd_num,
i,
true) =
i;
6829 Rvvelt<uint64_t>(rd_num,
i,
true) =
i;
6840 RVV_VI_VV_LOOP({ vd = vs2 * vs1; })
6844 RVV_VI_CHECK_DSS(
true);
6845 RVV_VI_VV_LOOP_WIDEN({
6846 VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +,
int);
6852 RVV_VI_CHECK_DSS(
true);
6853 RVV_VI_VV_LOOP_WIDEN({
6854 VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, uint);
6860 RVV_VI_VV_LOOP({ vd = ((__uint128_t)vs2 * vs1) >> rvv_sew(); })
6864 RVV_VI_VV_LOOP({ vd = ((__int128_t)vs2 * vs1) >> rvv_sew(); })
6868 RVV_VI_VV_LOOP({ vd = vs2 / vs1; })
6872 RVV_VI_VV_LOOP({ vd = vs2 / vs1; })
6876 if (rvv_vs1_reg() == 0) {
6878 switch (rvv_vsew()) {
6880 set_rd(Rvvelt<type_sew_t<8>::type>(rvv_vs2_reg(), 0));
6883 set_rd(Rvvelt<type_sew_t<16>::type>(rvv_vs2_reg(), 0));
6886 set_rd(Rvvelt<type_sew_t<32>::type>(rvv_vs2_reg(), 0));
6889 set_rd(Rvvelt<type_sew_t<64>::type>(rvv_vs2_reg(), 0));
6896 }
else if (rvv_vs1_reg() == 0b10000) {
6899 RVV_VI_GENERAL_LOOP_BASE
6900 RVV_VI_LOOP_MASK_SKIP()
6901 const uint8_t idx =
i / 64;
6902 const uint8_t
pos =
i % 64;
6903 bool mask = (Rvvelt<uint64_t>(rvv_vs2_reg(), idx) >>
pos) & 0x1;
6906 set_register(rd_reg(), cnt);
6908 } else if (rvv_vs1_reg() == 0b10001) {
6911 RVV_VI_GENERAL_LOOP_BASE
6912 RVV_VI_LOOP_MASK_SKIP()
6913 const uint8_t idx =
i / 64;
6914 const uint8_t
pos =
i % 64;
6915 bool mask = (Rvvelt<uint64_t>(rvv_vs2_reg(), idx) >>
pos) & 0x1;
6921 set_register(rd_reg(), index);
6927 dasm.InstructionDecode(buffer,
reinterpret_cast<uint8_t*
>(&instr_));
6928 PrintF(
"EXECUTING 0x%08" PRIxPTR
" %-44s\n",
6929 reinterpret_cast<intptr_t
>(&instr_), buffer.
begin());
6934 RVV_VI_VV_ULOOP_REDUCTION(
6935 { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
6938 RVV_VI_VV_LOOP_REDUCTION(
6939 { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
6942 RVV_VI_VV_ULOOP_REDUCTION(
6943 { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
6946 RVV_VI_VV_LOOP_REDUCTION(
6947 { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
6950 if (rvv_vs1_reg() == 0b00010) {
6951 RVV_VI_VIE_8_LOOP(
false);
6952 }
else if (rvv_vs1_reg() == 0b00011) {
6953 RVV_VI_VIE_8_LOOP(
true);
6954 }
else if (rvv_vs1_reg() == 0b00100) {
6955 RVV_VI_VIE_4_LOOP(
false);
6956 }
else if (rvv_vs1_reg() == 0b00101) {
6957 RVV_VI_VIE_4_LOOP(
true);
6958 }
else if (rvv_vs1_reg() == 0b00110) {
6959 RVV_VI_VIE_2_LOOP(
false);
6960 }
else if (rvv_vs1_reg() == 0b00111) {
6961 RVV_VI_VIE_2_LOOP(
true);
6967 RVV_VI_CHECK_DSS(
true);
6968 RVV_VI_VV_LOOP_WIDEN({
6969 VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, uint);
6974 RVV_VI_CHECK_DSS(
true);
6975 RVV_VI_VV_LOOP_WIDEN({
6976 VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +,
int);
6982 require_align(rvv_vd_reg(), rvv_vflmul());
6983 require_align(rvv_vs2_reg(), rvv_vflmul());
6984 require(rvv_vd_reg() != rvv_vs2_reg());
6985 require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs1_reg(), 1);
6989 RVV_VI_GENERAL_LOOP_BASE
6990 const uint64_t midx =
i / 64;
6991 const uint64_t mpos =
i % 64;
6993 bool do_mask = (Rvvelt<uint64_t>(rvv_vs1_reg(), midx) >> mpos) & 0x1;
6995 switch (rvv_vsew()) {
6997 Rvvelt<uint8_t>(rvv_vd_reg(),
pos,
true) =
6998 Rvvelt<uint8_t>(rvv_vs2_reg(),
i);
7001 Rvvelt<uint16_t>(rvv_vd_reg(),
pos,
true) =
7002 Rvvelt<uint16_t>(rvv_vs2_reg(),
i);
7005 Rvvelt<uint32_t>(rvv_vd_reg(),
pos,
true) =
7006 Rvvelt<uint32_t>(rvv_vs2_reg(),
i);
7009 Rvvelt<uint64_t>(rvv_vd_reg(),
pos,
true) =
7010 Rvvelt<uint64_t>(rvv_vs2_reg(),
i);
7023 dasm.InstructionDecode(buffer,
reinterpret_cast<uint8_t*
>(&instr_));
7024 PrintF(
"EXECUTING 0x%08" PRIxPTR
" %-44s\n",
7025 reinterpret_cast<intptr_t
>(&instr_), buffer.
begin());
7031void Simulator::DecodeRvvMVX() {
7032 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
7033 switch (instr_.InstructionBits() & kVTypeMask) {
7036 if (instr_.Vs2Value() == 0x0) {
7037 if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
7038 switch (rvv_vsew()) {
7040 Rvvelt<uint8_t>(rvv_vd_reg(), 0,
true) =
7041 (uint8_t)get_register(rs1_reg());
7044 Rvvelt<uint16_t>(rvv_vd_reg(), 0,
true) =
7048 Rvvelt<uint32_t>(rvv_vd_reg(), 0,
true) =
7049 (uint32_t)get_register(rs1_reg());
7052 Rvvelt<uint64_t>(rvv_vd_reg(), 0,
true) =
7053 (uint64_t)get_register(rs1_reg());
7066 RVV_VI_VX_LOOP({ vd = vs2 / rs1; })
7070 RVV_VI_VX_ULOOP({ vd = vs2 / rs1; })
7074 RVV_VI_VX_LOOP({ vd = vs2 * rs1; })
7078 RVV_VI_CHECK_DDS(
false);
7079 RVV_VI_VX_LOOP_WIDEN({
7080 VI_WIDE_WVX_OP(rs1, +, uint);
7087 RVV_VI_CHECK_SLIDE(
false);
7088 RVV_VI_GENERAL_LOOP_BASE
7089 switch (rvv_vsew()) {
7091 VX_SLIDE1DOWN_PARAMS(8, 1);
7094 VX_SLIDE1DOWN_PARAMS(16, 1);
7097 VX_SLIDE1DOWN_PARAMS(32, 1);
7100 VX_SLIDE1DOWN_PARAMS(64, 1);
7107 RVV_VI_CHECK_SLIDE(
true);
7108 RVV_VI_GENERAL_LOOP_BASE
7109 if (
i < rvv_vstart())
continue;
7110 switch (rvv_vsew()) {
7112 VX_SLIDE1UP_PARAMS(8, 1);
7115 VX_SLIDE1UP_PARAMS(16, 1);
7118 VX_SLIDE1UP_PARAMS(32, 1);
7121 VX_SLIDE1UP_PARAMS(64, 1);
7131 dasm.InstructionDecode(buffer,
reinterpret_cast<uint8_t*
>(&instr_));
7132 PrintF(
"EXECUTING 0x%08" PRIxPTR
" %-44s\n",
7133 reinterpret_cast<intptr_t
>(&instr_), buffer.
begin());
7139void Simulator::DecodeRvvFVV() {
7140 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVV);
7141 switch (instr_.InstructionBits() & kVTypeMask) {
7147 auto fn = [
this](
float vs1,
float vs2) {
7148 if (is_invalid_fdiv(vs1, vs2)) {
7149 this->set_fflags(kInvalidOperation);
7150 return std::numeric_limits<float>::quiet_NaN();
7151 }
else if (vs1 == 0.0f) {
7152 this->set_fflags(kDivideByZero);
7153 return (std::signbit(vs1) == std::signbit(vs2)
7154 ? std::numeric_limits<float>::infinity()
7155 : -std::numeric_limits<float>::infinity());
7160 auto alu_out =
fn(vs1, vs2);
7162 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7164 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7165 set_fflags(kInvalidOperation);
7166 alu_out = std::numeric_limits<float>::quiet_NaN();
7172 auto fn = [
this](
double vs1,
double vs2) {
7173 if (is_invalid_fdiv(vs1, vs2)) {
7174 this->set_fflags(kInvalidOperation);
7175 return std::numeric_limits<double>::quiet_NaN();
7176 }
else if (vs1 == 0.0f) {
7177 this->set_fflags(kDivideByZero);
7178 return (std::signbit(vs1) == std::signbit(vs2)
7179 ? std::numeric_limits<double>::infinity()
7180 : -std::numeric_limits<double>::infinity());
7185 auto alu_out =
fn(vs1, vs2);
7187 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7189 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7190 set_fflags(kInvalidOperation);
7191 alu_out = std::numeric_limits<double>::quiet_NaN();
7202 auto fn = [
this](
double drs1,
double drs2) {
7203 if (is_invalid_fmul(drs1, drs2)) {
7204 this->set_fflags(kInvalidOperation);
7205 return std::numeric_limits<double>::quiet_NaN();
7210 auto alu_out =
fn(vs1, vs2);
7212 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7214 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7215 set_fflags(kInvalidOperation);
7216 alu_out = std::numeric_limits<float>::quiet_NaN();
7222 auto fn = [
this](
double drs1,
double drs2) {
7223 if (is_invalid_fmul(drs1, drs2)) {
7224 this->set_fflags(kInvalidOperation);
7225 return std::numeric_limits<double>::quiet_NaN();
7230 auto alu_out =
fn(vs1, vs2);
7232 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7234 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7235 set_fflags(kInvalidOperation);
7236 alu_out = std::numeric_limits<double>::quiet_NaN();
7243 switch (instr_.Vs1Value()) {
7248 Rvvelt<int32_t>(rvv_vd_reg(),
i) =
7249 RoundF2IHelper<int32_t>(vs2, read_csr_value(csr_frm));
7254 Rvvelt<int64_t>(rvv_vd_reg(),
i) =
7255 RoundF2IHelper<int64_t>(vs2, read_csr_value(csr_frm));
7264 Rvvelt<uint32_t>(rvv_vd_reg(),
i) =
7265 RoundF2IHelper<uint32_t>(vs2, read_csr_value(csr_frm));
7270 Rvvelt<uint64_t>(rvv_vd_reg(),
i) =
7271 RoundF2IHelper<uint64_t>(vs2, read_csr_value(csr_frm));
7279 auto vs2_i = Rvvelt<uint32_t>(rvv_vs2_reg(),
i);
7280 vd =
static_cast<float>(vs2_i);
7285 auto vs2_i = Rvvelt<uint64_t>(rvv_vs2_reg(),
i);
7286 vd =
static_cast<double>(vs2_i);
7294 auto vs2_i = Rvvelt<int32_t>(rvv_vs2_reg(),
i);
7295 vd =
static_cast<float>(vs2_i);
7300 auto vs2_i = Rvvelt<int64_t>(rvv_vs2_reg(),
i);
7301 vd =
static_cast<double>(vs2_i);
7307 RVV_VI_VFP_CVT_SCALE(
7310 auto vs2 = Rvvelt<double>(rvv_vs2_reg(),
i);
7311 Rvvelt<float>(rvv_vd_reg(),
i,
true) =
7312 CanonicalizeDoubleToFloatOperation(
7313 [](
double drs) {
return static_cast<float>(drs); },
7316 { ; }, { ; }, { ; },
false, (rvv_vsew() >= E16))
7319 RVV_VI_VFP_CVT_SCALE(
7322 auto vs2 = Rvvelt<double>(rvv_vs2_reg(),
i);
7323 int32_t& vd = Rvvelt<int32_t>(rvv_vd_reg(),
i,
true);
7324 vd = RoundF2IHelper<int32_t>(vs2, read_csr_value(csr_frm));
7326 { ; }, { ; }, { ; },
false, (rvv_vsew() <= E32))
7329 RVV_VI_VFP_CVT_SCALE(
7332 auto vs2 = Rvvelt<double>(rvv_vs2_reg(),
i);
7333 uint32_t& vd = Rvvelt<uint32_t>(rvv_vd_reg(),
i,
true);
7334 vd = RoundF2IHelper<uint32_t>(vs2, read_csr_value(csr_frm));
7336 { ; }, { ; }, { ; },
false, (rvv_vsew() <= E32))
7341 auto vs2 = Rvvelt<int16_t>(rvv_vs2_reg(),
i);
7342 Rvvelt<float32_t>(rvv_vd_reg(),
i,
true) =
7343 static_cast<float>(vs2);
7346 auto vs2 = Rvvelt<int32_t>(rvv_vs2_reg(),
i);
7347 Rvvelt<double>(rvv_vd_reg(),
i,
true) =
7348 static_cast<double>(vs2);
7350 { ; }, { ; }, { ; },
true, (rvv_vsew() >= E8))
7355 auto vs2 = Rvvelt<uint16_t>(rvv_vs2_reg(),
i);
7356 Rvvelt<float32_t>(rvv_vd_reg(),
i,
true) =
7357 static_cast<float>(vs2);
7360 auto vs2 = Rvvelt<uint32_t>(rvv_vs2_reg(),
i);
7361 Rvvelt<double>(rvv_vd_reg(),
i,
true) =
7362 static_cast<double>(vs2);
7364 { ; }, { ; }, { ; },
true, (rvv_vsew() >= E8))
7369 auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(),
i);
7370 Rvvelt<uint64_t>(rvv_vd_reg(),
i,
true) =
7371 static_cast<uint64_t
>(vs2);
7373 { ; }, { ; }, { ; },
true, (rvv_vsew() >= E16))
7378 auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(),
i);
7379 Rvvelt<int64_t>(rvv_vd_reg(),
i,
true) =
7380 static_cast<int64_t
>(vs2);
7382 { ; }, { ; }, { ; },
true, (rvv_vsew() >= E16))
7387 auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(),
i);
7388 Rvvelt<double>(rvv_vd_reg(),
i,
true) =
7389 static_cast<double>(vs2);
7391 { ; }, { ; }, { ; },
true, (rvv_vsew() >= E16))
7398 switch (instr_.Vs1Value()) {
7403 int32_t& vd_i = Rvvelt<int32_t>(rvv_vd_reg(),
i,
true);
7404 vd_i =
int32_t(FclassHelper(vs2));
7409 int64_t& vd_i = Rvvelt<int64_t>(rvv_vd_reg(),
i,
true);
7410 vd_i = FclassHelper(vs2);
7418 vd = std::sqrt(vs2);
7422 vd = std::sqrt(vs2);
7430 vd = base::RecipSqrt(vs2);
7434 vd = base::RecipSqrt(vs2);
7442 vd = base::Recip(vs2);
7446 vd = base::Recip(vs2);
7456 { res = CompareFHelper(vs2, vs1, EQ); },
7457 { res = CompareFHelper(vs2, vs1, EQ); },
true)
7461 { res = CompareFHelper(vs2, vs1, NE); },
7462 { res = CompareFHelper(vs2, vs1, NE); },
true)
7466 { res = CompareFHelper(vs2, vs1, LT); },
7467 { res = CompareFHelper(vs2, vs1, LT); },
true)
7471 { res = CompareFHelper(vs2, vs1, LE); },
7472 { res = CompareFHelper(vs2, vs1, LE); },
true)
7476 { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); },
7477 { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); })
7481 RVV_VI_VFP_VV_LOOP_REDUCTION(
7483 { vd_0 = FMaxMinHelper(vd_0, vs2, MaxMinKind::kMax); },
7484 { vd_0 = FMaxMinHelper(vd_0, vs2, MaxMinKind::kMax); })
7489 { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); },
7490 { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); })
7496 vd = fsgnj32(Float32::FromBits(vs2),
7497 Float32::FromBits(vs1),
false,
false)
7502 vd = fsgnj64(Float64::FromBits(vs2),
7503 Float64::FromBits(vs1),
false,
false)
7511 vd = fsgnj32(Float32::FromBits(vs2),
7512 Float32::FromBits(vs1),
true,
false)
7517 vd = fsgnj64(Float64::FromBits(vs2),
7518 Float64::FromBits(vs1),
true,
false)
7526 vd = fsgnj32(Float32::FromBits(vs2),
7527 Float32::FromBits(vs1),
false,
true)
7532 vd = fsgnj64(Float64::FromBits(vs2),
7533 Float64::FromBits(vs1),
false,
true)
7542 auto fn = [
this](
float frs1,
float frs2) {
7543 if (is_invalid_fadd(frs1, frs2)) {
7544 this->set_fflags(kInvalidOperation);
7545 return std::numeric_limits<float>::quiet_NaN();
7550 auto alu_out =
fn(vs1, vs2);
7552 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7554 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7555 set_fflags(kInvalidOperation);
7556 alu_out = std::numeric_limits<float>::quiet_NaN();
7561 auto fn = [
this](
double frs1,
double frs2) {
7562 if (is_invalid_fadd(frs1, frs2)) {
7563 this->set_fflags(kInvalidOperation);
7564 return std::numeric_limits<double>::quiet_NaN();
7569 auto alu_out =
fn(vs1, vs2);
7571 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7573 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7574 set_fflags(kInvalidOperation);
7575 alu_out = std::numeric_limits<double>::quiet_NaN();
7584 auto fn = [
this](
float frs1,
float frs2) {
7585 if (is_invalid_fsub(frs1, frs2)) {
7586 this->set_fflags(kInvalidOperation);
7587 return std::numeric_limits<float>::quiet_NaN();
7592 auto alu_out =
fn(vs1, vs2);
7594 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7596 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7597 set_fflags(kInvalidOperation);
7598 alu_out = std::numeric_limits<float>::quiet_NaN();
7604 auto fn = [
this](
double frs1,
double frs2) {
7605 if (is_invalid_fsub(frs1, frs2)) {
7606 this->set_fflags(kInvalidOperation);
7607 return std::numeric_limits<double>::quiet_NaN();
7612 auto alu_out =
fn(vs1, vs2);
7614 if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
7616 if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
7617 set_fflags(kInvalidOperation);
7618 alu_out = std::numeric_limits<double>::quiet_NaN();
7624 RVV_VI_CHECK_DSS(
true);
7625 RVV_VI_VFP_VV_LOOP_WIDEN(
7627 RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(
double, is_invalid_fadd, +);
7633 RVV_VI_CHECK_DSS(
true);
7634 RVV_VI_VFP_VV_LOOP_WIDEN(
7636 RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(
double, is_invalid_fsub, -);
7642 RVV_VI_CHECK_DSS(
true);
7643 RVV_VI_VFP_VV_LOOP_WIDEN(
7645 RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(
double, is_invalid_fadd, +);
7651 RVV_VI_CHECK_DSS(
true);
7652 RVV_VI_VFP_VV_LOOP_WIDEN(
7654 RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(
double, is_invalid_fsub, -);
7660 RVV_VI_CHECK_DSS(
true);
7661 RVV_VI_VFP_VV_LOOP_WIDEN(
7663 RVV_VI_VFP_VV_ARITH_CHECK_COMPUTE(
double, is_invalid_fmul, *);
7670 RVV_VI_CHECK_DSS(
true);
7671 switch (rvv_vsew()) {
7677 double& vd = Rvvelt<double>(rvv_vd_reg(), 0,
true);
7678 double vs1 = Rvvelt<double>(rvv_vs1_reg(), 0);
7679 double alu_out = vs1;
7680 for (uint64_t
i = rvv_vstart();
i < rvv_vl(); ++
i) {
7681 double vs2 =
static_cast<double>(Rvvelt<float>(rvv_vs2_reg(),
i));
7682 if (is_invalid_fadd(alu_out, vs2)) {
7683 set_fflags(kInvalidOperation);
7684 alu_out = std::numeric_limits<float>::quiet_NaN();
7687 alu_out = alu_out + vs2;
7688 if (std::isnan(alu_out) || std::isnan(vs2)) {
7690 if (isSnan(alu_out) || isSnan(vs2)) set_fflags(kInvalidOperation);
7691 alu_out = std::numeric_limits<float>::quiet_NaN();
7705 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, vd, vs1, vs2)},
7706 {RVV_VI_VFP_FMA(
double, vd, vs1, vs2)})
7709 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, -vd, vs1, -vs2)},
7710 {RVV_VI_VFP_FMA(
double, -vd, vs1, -vs2)})
7713 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, vd, vs1, -vs2)},
7714 {RVV_VI_VFP_FMA(
double, vd, vs1, -vs2)})
7717 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, -vd, vs1, +vs2)},
7718 {RVV_VI_VFP_FMA(
double, -vd, vs1, +vs2)})
7721 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, vs2, vs1, vd)},
7722 {RVV_VI_VFP_FMA(
double, vs2, vs1, vd)})
7725 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, -vs2, vs1, -vd)},
7726 {RVV_VI_VFP_FMA(
double, -vs2, vs1, -vd)})
7729 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, vs2, vs1, -vd)},
7730 {RVV_VI_VFP_FMA(
double, vs2, vs1, -vd)})
7733 RVV_VI_VFP_FMA_VV_LOOP({RVV_VI_VFP_FMA(
float, -vs2, vs1, +vd)},
7734 {RVV_VI_VFP_FMA(
double, -vs2, vs1, +vd)})
7737 RVV_VI_CHECK_DSS(
true);
7738 RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, vs2, vs1, vs3)},
false)
7741 RVV_VI_CHECK_DSS(
true);
7742 RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, -vs2, vs1, -vs3)},
false)
7745 RVV_VI_CHECK_DSS(
true);
7746 RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, vs2, vs1, -vs3)},
false)
7749 RVV_VI_CHECK_DSS(
true);
7750 RVV_VI_VFP_VV_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, -vs2, vs1, +vs3)},
false)
7753 switch (rvv_vsew()) {
7758 uint32_t fs2 = Rvvelt<uint32_t>(rvv_vs2_reg(), 0);
7759 set_frd(Float32::FromBits(fs2));
7763 uint64_t fs2 = Rvvelt<uint64_t>(rvv_vs2_reg(), 0);
7764 set_drd(Float64::FromBits(fs2));
7777void Simulator::DecodeRvvFVF() {
7778 DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVF);
7779 switch (instr_.InstructionBits() & kVTypeMask) {
7781 RVV_VFSGNJ_VV_VF_LOOP(
7784 vd = fsgnj32(Float32::FromBits(vs2), fs1,
false,
false).get_bits();
7788 vd = fsgnj64(Float64::FromBits(vs2), fs1,
false,
false).get_bits();
7793 RVV_VFSGNJ_VV_VF_LOOP(
7796 vd = fsgnj32(Float32::FromBits(vs2), fs1,
true,
false).get_bits();
7800 vd = fsgnj64(Float64::FromBits(vs2), fs1,
true,
false).get_bits();
7805 RVV_VFSGNJ_VV_VF_LOOP(
7808 vd = fsgnj32(Float32::FromBits(vs2), fs1,
false,
true).get_bits();
7812 vd = fsgnj64(Float64::FromBits(vs2), fs1,
false,
true).get_bits();
7817 if (instr_.RvvVM()) {
7818 RVV_VI_VF_MERGE_LOOP(
7829 RVV_VI_VF_MERGE_LOOP(
7833 (Rvvelt<uint64_t>(0, (
i / 64)) >> (
i % 64)) & 0x1;
7834 vd = use_first ? fs1 : vs2;
7838 (Rvvelt<uint64_t>(0, (
i / 64)) >> (
i % 64)) & 0x1;
7839 vd = use_first ? fs1 : vs2;
7847 auto fn = [
this](
float frs1,
float frs2) {
7848 if (is_invalid_fadd(frs1, frs2)) {
7849 this->set_fflags(kInvalidOperation);
7850 return std::numeric_limits<float>::quiet_NaN();
7855 auto alu_out =
fn(fs1, vs2);
7857 if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) {
7859 if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2))
7860 set_fflags(kInvalidOperation);
7861 alu_out = std::numeric_limits<float>::quiet_NaN();
7866 auto fn = [
this](
double frs1,
double frs2) {
7867 if (is_invalid_fadd(frs1, frs2)) {
7868 this->set_fflags(kInvalidOperation);
7869 return std::numeric_limits<double>::quiet_NaN();
7874 auto alu_out =
fn(fs1, vs2);
7876 if (std::isnan(alu_out) || std::isnan(fs1) || std::isnan(vs2)) {
7878 if (isSnan(alu_out) || isSnan(fs1) || isSnan(vs2))
7879 set_fflags(kInvalidOperation);
7880 alu_out = std::numeric_limits<double>::quiet_NaN();
7886 RVV_VI_CHECK_DSS(
true);
7887 RVV_VI_VFP_VF_LOOP_WIDEN(
7889 RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(
double, is_invalid_fadd, +);
7895 RVV_VI_CHECK_DSS(
true);
7896 RVV_VI_VFP_VF_LOOP_WIDEN(
7898 RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(
double, is_invalid_fsub, -);
7904 RVV_VI_CHECK_DSS(
true);
7905 RVV_VI_VFP_VF_LOOP_WIDEN(
7907 RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(
double, is_invalid_fadd, +);
7913 RVV_VI_CHECK_DSS(
true);
7914 RVV_VI_VFP_VF_LOOP_WIDEN(
7916 RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(
double, is_invalid_fsub, -);
7922 RVV_VI_CHECK_DSS(
true);
7923 RVV_VI_VFP_VF_LOOP_WIDEN(
7925 RVV_VI_VFP_VF_ARITH_CHECK_COMPUTE(
double, is_invalid_fmul, *);
7931 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, vd, fs1, vs2)},
7932 {RVV_VI_VFP_FMA(
double, vd, fs1, vs2)})
7935 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, -vd, fs1, -vs2)},
7936 {RVV_VI_VFP_FMA(
double, -vd, fs1, -vs2)})
7939 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, vd, fs1, -vs2)},
7940 {RVV_VI_VFP_FMA(
double, vd, fs1, -vs2)})
7943 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, -vd, fs1, vs2)},
7944 {RVV_VI_VFP_FMA(
double, -vd, fs1, vs2)})
7947 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, vs2, fs1, vd)},
7948 {RVV_VI_VFP_FMA(
double, vs2, fs1, vd)})
7951 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, -vs2, fs1, -vd)},
7952 {RVV_VI_VFP_FMA(
double, -vs2, fs1, -vd)})
7955 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, vs2, fs1, -vd)},
7956 {RVV_VI_VFP_FMA(
double, vs2, fs1, -vd)})
7959 RVV_VI_VFP_FMA_VF_LOOP({RVV_VI_VFP_FMA(
float, -vs2, fs1, vd)},
7960 {RVV_VI_VFP_FMA(
double, -vs2, fs1, vd)})
7963 RVV_VI_CHECK_DSS(
true);
7964 RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, vs2, fs1, vs3)},
false)
7967 RVV_VI_CHECK_DSS(
true);
7968 RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, -vs2, fs1, -vs3)},
false)
7971 RVV_VI_CHECK_DSS(
true);
7972 RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, vs2, fs1, -vs3)},
false)
7975 RVV_VI_CHECK_DSS(
true);
7976 RVV_VI_VFP_VF_LOOP_WIDEN({RVV_VI_VFP_FMA(
double, -vs2, fs1, vs3)},
false)
7979 if (instr_.Vs2Value() == 0x0) {
7980 if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
7981 switch (rvv_vsew()) {
7987 Rvvelt<uint32_t>(rvv_vd_reg(), 0,
true) =
7988 (uint32_t)(get_fpu_register_Float32(rs1_reg()).get_bits());
7991 Rvvelt<uint64_t>(rvv_vd_reg(), 0,
true) =
7992 (uint64_t)(get_fpu_register_Float64(rs1_reg()).get_bits());
8005 RVV_VI_CHECK_SLIDE(
false);
8006 RVV_VI_GENERAL_LOOP_BASE
8007 switch (rvv_vsew()) {
8015 VF_SLIDE1DOWN_PARAMS(32, 1);
8018 VF_SLIDE1DOWN_PARAMS(64, 1);
8025 RVV_VI_CHECK_SLIDE(
true);
8026 RVV_VI_GENERAL_LOOP_BASE
8027 if (
i < rvv_vstart())
continue;
8028 switch (rvv_vsew()) {
8036 VF_SLIDE1UP_PARAMS(32, 1);
8039 VF_SLIDE1UP_PARAMS(64, 1);
8049void Simulator::DecodeVType() {
8050 switch (instr_.InstructionBits() & (kFunct3Mask | kBaseOpcodeMask)) {
8073 switch (instr_.InstructionBits() &
8074 (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
8077 set_rvv_vtype(rvv_zimm());
8080 if (rs1_reg() != zero_reg) {
8082 }
else if (rd_reg() != zero_reg) {
8087 avl = avl <= rvv_vlmax() ? avl : rvv_vlmax();
8095 if (!(instr_.InstructionBits() & 0x40000000)) {
8097 set_rvv_vtype(rs2());
8100 if (rs1_reg() != zero_reg) {
8102 }
else if (rd_reg() != zero_reg) {
8107 avl = avl <= rvv_vlmax() ? avl
8108 : avl < (rvv_vlmax() * 2) ? avl / 2
8115 (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
8118 set_rvv_vtype(rvv_zimm());
8119 avl = instr_.Rvvuimm();
8120 avl = avl <= rvv_vlmax() ? avl
8121 : avl < (rvv_vlmax() * 2) ? avl / 2
8131 FATAL(
"Error: Unsupport on FILE:%s:%d.", __FILE__, __LINE__);
8137void Simulator::InstructionDecode(Instruction*
instr) {
8139 CheckICache(i_cache(),
instr);
8141 pc_modified_ =
false;
8150 dasm.InstructionDecode(buffer,
reinterpret_cast<uint8_t*
>(
instr));
8156 switch (instr_.InstructionType()) {
8157 case Instruction::kRType:
8160 case Instruction::kR4Type:
8163 case Instruction::kIType:
8166 case Instruction::kSType:
8169 case Instruction::kBType:
8172 case Instruction::kUType:
8175 case Instruction::kJType:
8178 case Instruction::kCRType:
8181 case Instruction::kCAType:
8184 case Instruction::kCJType:
8187 case Instruction::kCBType:
8190 case Instruction::kCIType:
8193 case Instruction::kCIWType:
8196 case Instruction::kCSSType:
8199 case Instruction::kCLType:
8202 case Instruction::kCSType:
8205#ifdef CAN_USE_RVV_INSTRUCTIONS
8206 case Instruction::kVType:
8212 std::cout <<
"Unrecognized instruction [@pc=0x" << std::hex
8220 PrintF(
" 0x%012" PRIxPTR
" %-44s\t%s\n",
8221 reinterpret_cast<intptr_t
>(
instr), buffer.
begin(),
8222 trace_buf_.begin());
8225 if (!pc_modified_) {
8227 reinterpret_cast<sreg_t
>(
instr) +
instr->InstructionSize());
8230 if (watch_address_ !=
nullptr) {
8231 PrintF(
" 0x%012" PRIxPTR
" : 0x%016" REGIx_FORMAT
" %14" REGId_FORMAT
8233 reinterpret_cast<intptr_t
>(watch_address_), *watch_address_,
8248 if (watch_value_ != *watch_address_) {
8249 RiscvDebugger dbg(
this);
8251 watch_value_ = *watch_address_;
8256void Simulator::Execute() {
8259 sreg_t program_counter = get_pc();
8260 while (program_counter != end_sim_pc) {
8261 Instruction*
instr =
reinterpret_cast<Instruction*
>(program_counter);
8263 if (icount_ ==
static_cast<sreg_t
>(
v8_flags.stop_sim_at)) {
8264 RiscvDebugger dbg(
this);
8267 InstructionDecode(
instr);
8270 program_counter = get_pc();
8274void Simulator::CallInternal(Address entry) {
8276 isolate_->stack_guard()->AdjustStackLimitForSimulator();
8279 set_register(
pc,
static_cast<sreg_t
>(entry));
8283 set_register(ra, end_sim_pc);
8286 sreg_t s0_val = get_register(s0);
8287 sreg_t s1_val = get_register(s1);
8288 sreg_t s2_val = get_register(s2);
8289 sreg_t s3_val = get_register(s3);
8290 sreg_t s4_val = get_register(s4);
8291 sreg_t s5_val = get_register(s5);
8292 sreg_t s6_val = get_register(s6);
8293 sreg_t s7_val = get_register(s7);
8294 sreg_t s8_val = get_register(s8);
8295 sreg_t s9_val = get_register(s9);
8296 sreg_t s10_val = get_register(s10);
8297 sreg_t s11_val = get_register(s11);
8298 sreg_t gp_val = get_register(gp);
8299 sreg_t sp_val = get_register(sp);
8304 sreg_t callee_saved_value = icount_ != 0 ? icount_ & ~kSmiTagMask : -1;
8305 set_register(s0, callee_saved_value);
8306 set_register(s1, callee_saved_value);
8307 set_register(s2, callee_saved_value);
8308 set_register(s3, callee_saved_value);
8309 set_register(s4, callee_saved_value);
8310 set_register(s5, callee_saved_value);
8311 set_register(s6, callee_saved_value);
8312 set_register(s7, callee_saved_value);
8313 set_register(s8, callee_saved_value);
8314 set_register(s9, callee_saved_value);
8315 set_register(s10, callee_saved_value);
8316 set_register(s11, callee_saved_value);
8317 set_register(gp, callee_saved_value);
8323 CHECK_EQ(callee_saved_value, get_register(s0));
8324 CHECK_EQ(callee_saved_value, get_register(s1));
8325 CHECK_EQ(callee_saved_value, get_register(s2));
8326 CHECK_EQ(callee_saved_value, get_register(s3));
8327 CHECK_EQ(callee_saved_value, get_register(s4));
8328 CHECK_EQ(callee_saved_value, get_register(s5));
8329 CHECK_EQ(callee_saved_value, get_register(s6));
8330 CHECK_EQ(callee_saved_value, get_register(s7));
8331 CHECK_EQ(callee_saved_value, get_register(s8));
8332 CHECK_EQ(callee_saved_value, get_register(s9));
8333 CHECK_EQ(callee_saved_value, get_register(s10));
8334 CHECK_EQ(callee_saved_value, get_register(s11));
8335 CHECK_EQ(callee_saved_value, get_register(gp));
8338 set_register(s0, s0_val);
8339 set_register(s1, s1_val);
8340 set_register(s2, s2_val);
8341 set_register(s3, s3_val);
8342 set_register(s4, s4_val);
8343 set_register(s5, s5_val);
8344 set_register(s6, s6_val);
8345 set_register(s7, s7_val);
8346 set_register(s8, s8_val);
8347 set_register(s9, s9_val);
8348 set_register(s10, s10_val);
8349 set_register(s11, s11_val);
8350 set_register(gp, gp_val);
8351 set_register(sp, sp_val);
8354#ifdef V8_TARGET_ARCH_RISCV64
8355void Simulator::CallImpl(Address entry, CallArgument*
args) {
8358 std::vector<int64_t> stack_args(0);
8359 for (
int i = 0; !
args[
i].IsEnd();
i++) {
8360 CallArgument arg =
args[
i];
8361 if (arg.IsGP() && (index_gp < 8)) {
8362 set_register(index_gp + kRegCode_a0, arg.bits());
8364 }
else if (arg.IsFP() && (index_fp < 8)) {
8365 set_fpu_register(index_fp + kDoubleCode_fa0, arg.bits());
8368 DCHECK(arg.IsFP() || arg.IsGP());
8369 stack_args.push_back(arg.bits());
8373 std::cout <<
"CallImpl: reg_arg_count = " << index_fp + index_gp << std::hex
8374 <<
" entry-pc (JSEntry) = 0x" << entry
8375 <<
" a0 (Isolate-root) = 0x" << get_register(a0)
8376 <<
" a1 (orig_func/new_target) = 0x" << get_register(a1)
8377 <<
" a2 (func/target) = 0x" << get_register(a2)
8378 <<
" a3 (receiver) = 0x" << get_register(a3) <<
" a4 (argc) = 0x"
8379 << get_register(a4) <<
" a5 (argv) = 0x" << get_register(a5)
8383 int64_t original_stack = get_register(sp);
8385 int64_t stack_args_size =
8386 stack_args.size() *
sizeof(stack_args[0]) + kCArgsSlotsSize;
8387 int64_t entry_stack = original_stack - stack_args_size;
8388 if (base::OS::ActivationFrameAlignment() != 0) {
8389 entry_stack &= -base::OS::ActivationFrameAlignment();
8392 char* stack_argument =
reinterpret_cast<char*
>(entry_stack);
8393 memcpy(stack_argument + kCArgSlotCount, stack_args.data(),
8394 stack_args.size() *
sizeof(int64_t));
8395 set_register(sp, entry_stack);
8396 CallInternal(entry);
8398 CHECK_EQ(entry_stack, get_register(sp));
8399 set_register(sp, original_stack);
8402intptr_t Simulator::CallImpl(Address entry,
int argument_count,
8403 const intptr_t* arguments) {
8404 constexpr int kRegisterPassedArguments = 8;
8407 int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
8408 if (reg_arg_count > 0) set_register(a0, arguments[0]);
8409 if (reg_arg_count > 1) set_register(a1, arguments[1]);
8410 if (reg_arg_count > 2) set_register(a2, arguments[2]);
8411 if (reg_arg_count > 3) set_register(a3, arguments[3]);
8412 if (reg_arg_count > 4) set_register(a4, arguments[4]);
8413 if (reg_arg_count > 5) set_register(a5, arguments[5]);
8414 if (reg_arg_count > 6) set_register(a6, arguments[6]);
8415 if (reg_arg_count > 7) set_register(a7, arguments[7]);
8417 std::cout <<
"CallImpl: reg_arg_count = " << reg_arg_count << std::hex
8418 <<
" entry-pc (JSEntry) = 0x" << entry
8419 <<
" a0 (Isolate-root) = 0x" << get_register(a0)
8420 <<
" a1 (orig_func/new_target) = 0x" << get_register(a1)
8421 <<
" a2 (func/target) = 0x" << get_register(a2)
8422 <<
" a3 (receiver) = 0x" << get_register(a3) <<
" a4 (argc) = 0x"
8423 << get_register(a4) <<
" a5 (argv) = 0x" << get_register(a5)
8427 sreg_t original_stack = get_register(sp);
8429 int stack_args_count = argument_count - reg_arg_count;
8430 int stack_args_size = stack_args_count *
sizeof(*arguments) +
kCArgsSlotsSize;
8431 sreg_t entry_stack = original_stack - stack_args_size;
8432 if (base::OS::ActivationFrameAlignment() != 0) {
8433 entry_stack &= -base::OS::ActivationFrameAlignment();
8436 intptr_t* stack_argument =
reinterpret_cast<intptr_t*
>(entry_stack);
8437 memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
8438 stack_args_count *
sizeof(*arguments));
8439 set_register(sp, entry_stack);
8440 CallInternal(entry);
8442 CHECK_EQ(entry_stack, get_register(sp));
8443 set_register(sp, original_stack);
8446 return get_register(a0);
8450double Simulator::CallFP(Address entry,
double d0,
double d1) {
8451 set_fpu_register_double(fa0, d0);
8452 set_fpu_register_double(fa1, d1);
8453 CallInternal(entry);
8454 return get_fpu_register_double(fa0);
8457uintptr_t Simulator::PushAddress(uintptr_t address) {
8458 int64_t new_sp = get_register(sp) -
sizeof(uintptr_t);
8459 uintptr_t* stack_slot =
reinterpret_cast<uintptr_t*
>(new_sp);
8460 *stack_slot = address;
8461 set_register(sp, new_sp);
8465uintptr_t Simulator::PopAddress() {
8466 int64_t current_sp = get_register(sp);
8467 uintptr_t* stack_slot =
reinterpret_cast<uintptr_t*
>(current_sp);
8468 uintptr_t address = *stack_slot;
8469 set_register(sp, current_sp +
sizeof(uintptr_t));
8473Simulator::LocalMonitor::LocalMonitor()
8474 : access_state_(MonitorAccess::Open),
8478void Simulator::LocalMonitor::Clear() {
8479 access_state_ = MonitorAccess::Open;
8481 size_ = TransactionSize::None;
8484void Simulator::LocalMonitor::NotifyLoad() {
8485 if (access_state_ == MonitorAccess::RMW) {
8492void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
8493 TransactionSize size) {
8494 access_state_ = MonitorAccess::RMW;
8495 tagged_addr_ = addr;
8499void Simulator::LocalMonitor::NotifyStore() {
8500 if (access_state_ == MonitorAccess::RMW) {
8507bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
8508 TransactionSize size) {
8509 if (access_state_ == MonitorAccess::RMW) {
8510 if (addr == tagged_addr_ &&
size_ == size) {
8517 DCHECK(access_state_ == MonitorAccess::Open);
8522Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
8523 : access_state_(MonitorAccess::Open),
8527 failure_counter_(0) {}
8529void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
8530 access_state_ = MonitorAccess::Open;
8534void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
8536 access_state_ = MonitorAccess::RMW;
8537 tagged_addr_ = addr;
8540void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
8541 if (access_state_ == MonitorAccess::RMW) {
8548bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
8549 uintptr_t addr,
bool is_requesting_thread) {
8550 if (access_state_ == MonitorAccess::RMW) {
8551 if (is_requesting_thread) {
8552 if (addr == tagged_addr_) {
8557 if (failure_counter_++ >= kMaxFailureCounter) {
8558 failure_counter_ = 0;
8564 }
else if ((addr & kExclusiveTaggedAddrMask) ==
8565 (tagged_addr_ & kExclusiveTaggedAddrMask)) {
8576void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
8577 uintptr_t addr, LinkedAddress* linked_address) {
8578 linked_address->NotifyLoadLinked_Locked(addr);
8579 PrependProcessor_Locked(linked_address);
8582void Simulator::GlobalMonitor::NotifyStore_Locked(
8583 LinkedAddress* linked_address) {
8585 for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
8586 iter->NotifyStore_Locked();
8590bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
8591 uintptr_t addr, LinkedAddress* linked_address) {
8592 DCHECK(IsProcessorInLinkedList_Locked(linked_address));
8593 if (linked_address->NotifyStoreConditional_Locked(addr,
true)) {
8595 for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
8596 if (iter != linked_address) {
8597 iter->NotifyStoreConditional_Locked(addr,
false);
8606bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
8607 LinkedAddress* linked_address)
const {
8608 return head_ == linked_address || linked_address->next_ ||
8609 linked_address->prev_;
8612void Simulator::GlobalMonitor::PrependProcessor_Locked(
8613 LinkedAddress* linked_address) {
8614 if (IsProcessorInLinkedList_Locked(linked_address)) {
8619 head_->prev_ = linked_address;
8621 linked_address->prev_ =
nullptr;
8622 linked_address->next_ = head_;
8623 head_ = linked_address;
8626void Simulator::GlobalMonitor::RemoveLinkedAddress(
8627 LinkedAddress* linked_address) {
8628 base::MutexGuard lock_guard(&
mutex);
8629 if (!IsProcessorInLinkedList_Locked(linked_address)) {
8633 if (linked_address->prev_) {
8634 linked_address->prev_->next_ = linked_address->next_;
8636 head_ = linked_address->next_;
8638 if (linked_address->next_) {
8639 linked_address->next_->prev_ = linked_address->prev_;
8641 linked_address->prev_ =
nullptr;
8642 linked_address->next_ =
nullptr;
8648void Simulator::DoSwitchStackLimit(Instruction*
instr) {
8656 stack_limit_ =
static_cast<uintptr_t
>(stack_limit - kAdditionalStackMargin);
#define UNSUPPORTED_RISCV()
#define UNIMPLEMENTED_RISCV()
virtual void VisitPointer(const void *address)=0
TemplateHashMapEntry< void *, void * > Entry
constexpr T * begin() const
static const char * Name(int reg)
static int Number(const char *name)
Instr InstructionBits() const
constexpr int8_t code() const
static int Number(const char *name)
static const char * Name(int reg)
static int Number(const char *name)
base::Vector< const DirectHandle< Object > > args
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
int SNPrintF(Vector< char > str, const char *format,...)
CustomMatcherTemplateHashMapImpl< DefaultAllocationPolicy > CustomMatcherHashMap
FloatWithBits< 32 > Float32
FloatWithBits< 64 > Float64
void DeleteArray(T *array)
constexpr Opcode RO_FLE_S
constexpr Opcode RO_V_VFWADD_VV
constexpr Opcode RO_V_VFSLIDE1DOWN_VF
constexpr Opcode RO_V_VMSGT_VI
constexpr Opcode RO_FMADD_S
constexpr Opcode RO_C_AND
constexpr Opcode RO_V_VFWSUB_VF
constexpr Opcode RO_CZERO_EQZ
constexpr Opcode RO_FCVT_S_W
constexpr Opcode RO_V_VSLIDEUP_VX
constexpr Opcode RO_V_VADD_VI
constexpr Opcode RO_C_SUB
constexpr Opcode RO_V_VFNMADD_VF
constexpr Opcode RO_V_VFNMADD_VV
constexpr Opcode RO_V_VFADD_VF
constexpr Opcode RO_FSGNJ_D
constexpr Opcode RO_V_VMULH_VV
constexpr Opcode RO_MULHU
constexpr Opcode RO_V_VMIN_VX
constexpr Opcode RO_C_MISC_ALU
constexpr Opcode RO_V_VFMADD_VV
constexpr Opcode RO_V_VMULHU_VV
constexpr Opcode RO_V_VFWNMSAC_VV
constexpr Opcode RO_V_VFSGNJN_VF
constexpr Opcode RO_V_VMSLEU_VI
constexpr Opcode RO_SLTIU
constexpr Opcode RO_V_VFWMSAC_VF
constexpr Opcode RO_V_VFNMACC_VF
const uint32_t kRvvNfMask
constexpr Opcode RO_V_VFNMSUB_VV
constexpr Opcode RO_FCVT_D_W
constexpr Opcode RO_V_VSRL_VI
const uint32_t kExceptionIsSwitchStackLimit
constexpr Opcode RO_V_VMSLTU_VX
constexpr Opcode RO_V_VMUL_VX
constexpr Opcode RO_V_VMINU_VV
constexpr Opcode RO_V_VSLL_VX
constexpr Opcode RO_BCLRI
constexpr Opcode VFRSQRT7_V
constexpr Opcode VFWCVT_F_X_V
constexpr Opcode RO_V_VFWADD_W_VF
constexpr Opcode RO_V_VREDMIN
constexpr Opcode RO_C_NOP_ADDI
constexpr Opcode RO_V_VSSUB_VX
constexpr Opcode RO_V_VSLIDE1UP_VX
constexpr Opcode RO_V_VFWMUL_VV
void PrintF(const char *format,...)
char * ReadLine(const char *prompt)
const Instr rtCallRedirInstr
constexpr Opcode RO_V_VMSEQ_VX
constexpr Opcode RO_V_VSLL_VI
constexpr uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
constexpr Opcode RO_V_VMSLT_VX
constexpr Opcode RO_FCVT_W_D
constexpr Opcode RO_V_VSLIDEDOWN_VX
constexpr Opcode VFNCVT_XU_F_W
constexpr Opcode RO_V_VDIVU_VX
constexpr Opcode RO_V_VFSUB_VV
const uint32_t kRvvMopMask
constexpr Opcode RO_V_VMFNE_VV
constexpr Opcode RO_V_VFUNARY1
constexpr Opcode RO_V_VFMACC_VF
constexpr Opcode RO_V_VMSEQ_VV
constexpr Opcode RO_V_VXOR_VV
constexpr Opcode RO_V_VMSGT_VX
constexpr Opcode RO_V_VADD_VV
constexpr Opcode RO_C_LWSP
constexpr Opcode RO_V_VFMV_FS
constexpr Opcode RO_V_VMV_VV
constexpr Opcode RO_AMOMAX_W
constexpr Opcode RO_C_ADDI4SPN
constexpr Opcode RO_V_VFWSUB_VV
constexpr Opcode RO_V_VFWMACC_VV
constexpr Opcode RO_V_VFMV_SF
constexpr Opcode RO_V_VMFLT_VV
constexpr Opcode RO_V_VDIVU_VV
constexpr Opcode RO_V_VMSLEU_VX
constexpr Opcode RO_V_VFWMACC_VF
constexpr Opcode RO_V_VFMSAC_VV
constexpr Opcode RO_C_FLD
constexpr Opcode RO_V_VMSEQ_VI
constexpr Opcode RO_V_VMSLEU_VV
constexpr Opcode RO_V_VMUL_VV
constexpr Opcode RO_V_VMV_VI
constexpr Opcode RO_CZERO_NEZ
constexpr Opcode RO_V_VSLIDEUP_VI
constexpr Opcode RO_V_VFREDMAX_VV
constexpr Opcode RO_V_VFWSUB_W_VV
constexpr Opcode VFNCVT_X_F_W
constexpr Opcode RO_C_XOR
constexpr Opcode VFCVT_X_F_V
constexpr Opcode RO_V_VFMAX_VV
constexpr Opcode RO_V_VFWREDOSUM_VS
constexpr Opcode RO_V_VWMUL_VV
constexpr Opcode VFSQRT_V
const int kNumFPURegisters
constexpr Opcode RO_V_VXOR_VX
constexpr Register kSimulatorBreakArgument
constexpr Opcode RO_V_VFWNMACC_VF
const int kInvalidFPURegister
constexpr Opcode RO_V_VFNMSAC_VV
constexpr Opcode RO_V_VFSGNJN_VV
void Print(Tagged< Object > obj)
constexpr Opcode RO_V_VMFEQ_VV
constexpr Opcode VFCLASS_V
constexpr Opcode RO_FCVT_W_S
constexpr Opcode RO_CSRRWI
constexpr Opcode RO_V_VSADDU_VI
constexpr Opcode RO_AMOOR_W
const int kNumSimuRegisters
constexpr Opcode VFWCVT_F_F_V
constexpr Opcode RO_V_VFMSUB_VF
constexpr Opcode RO_V_VSADDU_VX
constexpr Opcode RO_V_VFWMUL_VF
constexpr Opcode RO_ECALL
constexpr Opcode RO_V_VSLIDE1DOWN_VX
constexpr Opcode RO_V_VWXUNARY0
constexpr Opcode RO_V_VNCLIP_WI
constexpr Opcode RO_CSRRS
const int kInvalidRegister
constexpr Opcode RO_V_VSLIDEDOWN_VI
const int kInvalidVRegister
constexpr Opcode RO_BEXTI
constexpr Opcode RO_V_VDIV_VV
constexpr Opcode RO_FMIN_S
constexpr Opcode RO_V_VMSLE_VV
constexpr Opcode RO_V_VMAX_VV
constexpr Opcode RO_V_VMFLE_VV
uintptr_t GetCurrentStackPosition()
constexpr Opcode RO_V_VSETVL
constexpr Opcode RO_V_VSADD_VX
constexpr Opcode RO_V_VFWSUB_W_VF
constexpr Opcode RO_FCVT_D_S
constexpr Opcode RO_FSQRT_D
constexpr Opcode RO_V_VSRL_VX
constexpr Opcode RO_V_VADD_VX
constexpr Opcode RO_V_VMIN_VV
constexpr Opcode RO_SH2ADD
constexpr Opcode RO_AMOADD_W
constexpr Opcode RO_FSUB_S
constexpr Opcode RO_V_VSUB_VX
constexpr Opcode RO_V_VXOR_VI
constexpr Opcode RO_V_VFSGNJX_VF
constexpr Opcode RO_C_FSDSP
constexpr Opcode RO_C_BEQZ
constexpr Opcode RO_FNMADD_D
constexpr Opcode RO_CSRRCI
constexpr Opcode RO_V_VMSLE_VX
constexpr Opcode RO_FADD_D
constexpr Opcode RO_FSGNJ_S
constexpr Opcode RO_V_VRSUB_VX
constexpr Opcode RO_V_VMV_VX
constexpr Opcode RO_V_VMINU_VX
const uint32_t kFunct6Mask
constexpr Opcode RO_C_SWSP
constexpr Opcode RO_V_VFUNARY0
constexpr Opcode RO_V_VFNMSUB_VF
constexpr Opcode RO_V_VFMSAC_VF
constexpr Opcode RO_FMADD_D
constexpr Opcode RO_V_VFWNMACC_VV
constexpr Opcode RO_V_VMAXU_VX
constexpr Opcode RO_V_VFMSUB_VV
constexpr Opcode VFCVT_F_X_V
constexpr Opcode RO_V_VOR_VV
constexpr Opcode RO_V_VSRA_VI
constexpr Opcode RO_C_BNEZ
const int kCArgsSlotsSize
constexpr Opcode RO_V_VFSLIDE1UP_VF
constexpr Opcode RO_V_VREDMAXU
constexpr Opcode RO_V_VSUB_VV
constexpr Opcode RO_SH3ADD
constexpr Opcode RO_V_VWADD_VV
constexpr Opcode RO_V_VADC_VV
constexpr Opcode RO_V_VCOMPRESS_VV
constexpr Opcode RO_V_VFSGNJX_VV
constexpr Opcode RO_V_VMSNE_VX
constexpr Register kWasmTrapHandlerFaultAddressRegister
constexpr Opcode RO_FADD_S
constexpr Opcode RO_V_VSMUL_VV
constexpr Opcode RO_MULHSU
constexpr Opcode RO_AMOMINU_W
constexpr Opcode RO_V_VMAXU_VV
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Opcode RO_AMOMAXU_W
constexpr Opcode RO_CSRRC
constexpr Opcode RO_AMOAND_W
constexpr Opcode RO_V_VMUNARY0
constexpr Opcode RO_V_VMSNE_VI
constexpr Opcode RO_V_VAND_VV
constexpr Opcode RO_V_VMSLE_VI
constexpr Opcode VFWCVT_X_F_V
constexpr Opcode RO_V_VFWADD_W_VV
constexpr Opcode RO_V_VXUNARY0
constexpr Opcode RO_V_VWADDUW_VX
constexpr Opcode RO_V_VFMADD_VF
constexpr Opcode RO_V_VADC_VI
constexpr Opcode RO_CSRRSI
constexpr Opcode RO_V_VMSLT_VV
constexpr Opcode RO_V_VSETVLI
constexpr Opcode RO_FMSUB_D
constexpr Opcode VFNCVT_F_F_W
constexpr Opcode RO_FSUB_D
constexpr Opcode RO_V_VSRA_VV
constexpr Opcode RO_FENCE_I
constexpr Opcode RO_V_VRSUB_VI
constexpr Opcode RO_V_VFSGNJ_VF
constexpr Opcode VFWCVT_F_XU_V
constexpr Opcode RO_V_VREDMINU
constexpr Opcode VFCVT_F_XU_V
constexpr Opcode RO_AMOMIN_W
constexpr Opcode RO_FMUL_D
constexpr Opcode RO_V_VFMUL_VV
constexpr Opcode RO_V_VMSLTU_VV
constexpr Opcode RO_FMV_W_X
constexpr Opcode RO_FNMSUB_D
constexpr Opcode RO_V_VSADD_VV
uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x)
constexpr Opcode RO_V_VDIV_VX
constexpr Opcode RO_V_VFSGNJ_VV
constexpr uint8_t kInstrSize
constexpr Opcode RO_V_VOR_VX
constexpr Opcode RO_V_VFMACC_VV
constexpr Opcode RO_V_VREDMAX
constexpr Opcode RO_V_VWADDU_VV
constexpr Opcode RO_FMUL_S
constexpr Opcode RO_V_VRGATHER_VV
constexpr Opcode RO_BSETI
constexpr Opcode RO_C_FSD
constexpr Opcode RO_V_VSLL_VV
constexpr Opcode RO_FDIV_D
constexpr Opcode RO_V_VMAX_VX
constexpr Opcode RO_V_VFWMSAC_VV
constexpr Opcode VFCVT_XU_F_V
constexpr Opcode RO_FLE_D
constexpr Opcode RO_V_VMSGTU_VX
constexpr Opcode RO_C_FLDSP
constexpr Opcode VFREC7_V
constexpr Opcode RO_AMOSWAP_W
constexpr Opcode RO_V_VRXUNARY0
constexpr Opcode RO_V_VSADDU_VV
constexpr Opcode RO_V_VAND_VI
constexpr Opcode RO_FMIN_D
constexpr Opcode RO_V_VSRA_VX
constexpr Opcode RO_V_VADC_VX
constexpr Opcode RO_SH1ADD
constexpr Opcode RO_V_VSSUB_VV
constexpr Opcode RO_V_VFMIN_VV
constexpr Opcode RO_CSRRW
constexpr Opcode RO_V_VSRL_VV
constexpr Opcode RO_V_VFWNMSAC_VF
constexpr Opcode RO_FENCE
constexpr Opcode RO_V_VFMV_VF
constexpr Opcode RO_C_LUI_ADD
constexpr Opcode RO_FNMSUB_S
constexpr Opcode RO_FSQRT_S
constexpr Opcode RO_V_VFNMSAC_VF
const uint32_t kBaseOpcodeMask
constexpr Opcode RO_V_VFADD_VV
constexpr Opcode RO_V_VOR_VI
constexpr Opcode RO_V_VSADD_VI
constexpr Opcode RO_C_SLLI
constexpr Opcode RO_V_VFWREDUSUM_VS
constexpr Opcode RO_V_VMSNE_VV
constexpr Opcode RO_FNMADD_S
constexpr Opcode RO_V_VAND_VX
constexpr Opcode RO_V_VFDIV_VV
constexpr Opcode VFWCVT_XU_F_V
constexpr Opcode RO_FDIV_S
constexpr Opcode RO_AMOXOR_W
constexpr Opcode RO_V_VSSUBU_VV
constexpr Opcode RO_V_VFWADD_VF
constexpr Opcode RO_V_VWMULU_VV
constexpr Opcode OP_COUNT
constexpr Opcode RO_FMSUB_S
constexpr Opcode RO_BINVI
constexpr Opcode RO_V_VFNMACC_VV
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
#define DCHECK_LE(v1, v2)
#define CHECK_GE(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
std::unique_ptr< ValueMirror > value