v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
extension-riscv-v.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
6#define V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
7
13
14namespace v8 {
15namespace internal {
16
18 public:
19 // RVV
20 static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
22 return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
23 }
24
25 void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
27 void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
29 void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
31
32 void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
34 void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
36 void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
38
39 void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
41
42#define SegInstr(OP) \
43 void OP##seg2(ARG); \
44 void OP##seg3(ARG); \
45 void OP##seg4(ARG); \
46 void OP##seg5(ARG); \
47 void OP##seg6(ARG); \
48 void OP##seg7(ARG); \
49 void OP##seg8(ARG);
50
51#define ARG \
52 VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
53
55#undef ARG
56
57#define ARG \
58 VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
59
61#undef ARG
62
63#define ARG \
64 VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
65
67#undef ARG
68#undef SegInstr
69
70 // RVV Vector Arithmetic Instruction
71 void vmv_vv(VRegister vd, VRegister vs1);
72 void vmv_vx(VRegister vd, Register rs1);
73 void vmv_vi(VRegister vd, uint8_t simm5);
74 void vmv_xs(Register rd, VRegister vs2);
75 void vmv_sx(VRegister vd, Register rs1);
77 void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
78 void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
79
88
90 void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
91 void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
92
94 void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
95 void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
96
97 void vfmv_vf(VRegister vd, FPURegister fs1);
98 void vfmv_fs(FPURegister fd, VRegister vs2);
99 void vfmv_sf(VRegister vd, FPURegister fs);
100 void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2);
101
102 void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
104 void vid_v(VRegister vd, MaskType mask = Mask);
105
106#define DEFINE_OPIVV(name, funct6) \
107 void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
108 MaskType mask = NoMask);
109
110#define DEFINE_OPIVX(name, funct6) \
111 void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
112 MaskType mask = NoMask);
113
114#define DEFINE_OPIVI(name, funct6) \
115 void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
116 MaskType mask = NoMask);
117
118#define DEFINE_OPMVV(name, funct6) \
119 void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
120 MaskType mask = NoMask);
121
122#define DEFINE_OPMVX(name, funct6) \
123 void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
124 MaskType mask = NoMask);
125
126#define DEFINE_OPFVV(name, funct6) \
127 void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
128 MaskType mask = NoMask);
129
130#define DEFINE_OPFWV(name, funct6) \
131 void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
132 MaskType mask = NoMask);
133
134#define DEFINE_OPFRED(name, funct6) \
135 void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
136 MaskType mask = NoMask);
137
138#define DEFINE_OPFVF(name, funct6) \
139 void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
140 MaskType mask = NoMask);
141
142#define DEFINE_OPFWF(name, funct6) \
143 void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
144 MaskType mask = NoMask);
145
146#define DEFINE_OPFVV_FMA(name, funct6) \
147 void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
148 MaskType mask = NoMask);
149
150#define DEFINE_OPFVF_FMA(name, funct6) \
151 void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
152 MaskType mask = NoMask);
153
154#define DEFINE_OPMVV_VIE(name) \
155 void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
156
211
214 DEFINE_OPMVX(vslide1down, VSLIDEDOWN_FUNCT6)
215 DEFINE_OPFVF(vfslide1down, VSLIDEDOWN_FUNCT6)
218 DEFINE_OPMVX(vslide1up, VSLIDEUP_FUNCT6)
219 DEFINE_OPFVF(vfslide1up, VSLIDEUP_FUNCT6)
220
224
228
231
234
238
242
245
248
252
256
260
263
272
273 // Vector Widening Floating-Point Add/Subtract Instructions
282
283 // Vector Widening Floating-Point Reduction Instructions
286
287 // Vector Widening Floating-Point Multiply
290
295 DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
296 DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
298
305
306 // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
323
324 // Vector Widening Floating-Point Fused Multiply-Add Instructions
333
334 // Vector Narrowing Fixed-Point Clip Instructions
341
342 // Vector Integer Extension
343 DEFINE_OPMVV_VIE(vzext_vf8)
344 DEFINE_OPMVV_VIE(vsext_vf8)
345 DEFINE_OPMVV_VIE(vzext_vf4)
346 DEFINE_OPMVV_VIE(vsext_vf4)
347 DEFINE_OPMVV_VIE(vzext_vf2)
348 DEFINE_OPMVV_VIE(vsext_vf2)
349
350#undef DEFINE_OPIVI
351#undef DEFINE_OPIVV
352#undef DEFINE_OPIVX
353#undef DEFINE_OPMVV
354#undef DEFINE_OPMVX
355#undef DEFINE_OPFVV
356#undef DEFINE_OPFWV
357#undef DEFINE_OPFVF
358#undef DEFINE_OPFWF
359#undef DEFINE_OPFVV_FMA
360#undef DEFINE_OPFVF_FMA
361#undef DEFINE_OPMVV_VIE
362#undef DEFINE_OPFRED
363
364#define DEFINE_VFUNARY(name, funct6, vs1) \
365 void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
366 GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
367 }
368
378
382
387#undef DEFINE_VFUNARY
388
390 vxor_vi(dst, src, -1, mask);
391 }
392
394 vrsub_vx(dst, src, zero_reg, mask);
395 }
396
398 vfsngjn_vv(dst, src, src, mask);
399 }
401 vfsngjx_vv(dst, src, src, mask);
402 }
404
405 void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
406
407 void vmslt_vi(VRegister vd, VRegister vs1, int8_t imm5,
408 MaskType mask = NoMask) {
409 DCHECK(imm5 >= -15 && imm5 <= 16);
410 vmsle_vi(vd, vs1, imm5 - 1, mask);
411 }
412
413 void vmsltu_vi(VRegister vd, VRegister vs1, int8_t imm5,
414 MaskType mask = NoMask) {
415 DCHECK(imm5 >= 1 && imm5 <= 16);
416 vmsleu_vi(vd, vs1, imm5 - 1, mask);
417 }
418
419 protected:
420 void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
422
423 void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
425
426 inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
427 TailAgnosticType tail = tu,
429 vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
430 }
431
432 inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
434 vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
435 }
436
437 void vsetvl(Register rd, Register rs1, Register rs2);
438
439 // ----------------------------RVV------------------------------------------
440 // vsetvl
442 // vsetvli
443 void GenInstrV(Register rd, Register rs1, uint32_t zimm);
444 // OPIVV OPFVV OPMVV
445 void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
447 void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
449 void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2,
451 // OPMVV OPFVV
452 void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
454 // OPFVV
455 void GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd, VRegister vs1,
457
458 // OPIVX OPMVX
459 void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
461 // OPFVF
462 void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, FPURegister fs1,
464 // OPMVX
465 void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
467 // OPIVI
468 void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
470
471 // VL VS
472 void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
473 uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
474 uint8_t Nf);
475
476 void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
477 Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
478 uint8_t Nf);
479 // VL VS AMO
480 void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
481 VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
482 uint8_t Nf);
483 // vmv_xs vcpop_m vfirst_m
484 void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, uint8_t vs1,
485 VRegister vs2, MaskType mask);
486};
487
489 public:
490 int sz;
491 uint8_t laneidx;
492
494
495 private:
496 LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
497 : sz(sz), laneidx(laneidx % lanes) {}
498};
499} // namespace internal
500} // namespace v8
501
502#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_V_H_
void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vls(VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask=NoMask)
void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2)
void GenInstrV(Register rd, Register rs1, uint32_t zimm)
void vmv_vi(VRegister vd, uint8_t simm5)
SegInstr(vl) SegInstr(vs) SegInstr(vls) SegInstr(vss) SegInstr(vsx) SegInstr(vlx) void vmv_vv(VRegister vd
static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmadc_vx(VRegister vd, Register rs1, VRegister vs2)
void vfabs_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vfmv_vf(VRegister vd, FPURegister fs1)
void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vadc_vx(VRegister vd, Register rs1, VRegister vs2)
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vfneg_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vfirst_m(Register rd, VRegister vs2, MaskType mask=NoMask)
void vnot_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vmslt_vi(VRegister vd, VRegister vs1, int8_t imm5, MaskType mask=NoMask)
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2, MaskType mask=NoMask)
void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void GenInstrV(Register rd, Register rs1, Register rs2)
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1, MaskType mask=NoMask)
void vmsltu_vi(VRegister vd, VRegister vs1, int8_t imm5, MaskType mask=NoMask)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vcpop_m(Register rd, VRegister vs2, MaskType mask=NoMask)
void vfmv_sf(VRegister vd, FPURegister fs)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void vss(VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask=NoMask)
void vid_v(VRegister vd, MaskType mask=Mask)
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void vneg_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx)
LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
#define DEFINE_OPFVV(name, funct6)
#define DEFINE_OPFWF(name, funct6)
#define DEFINE_OPFWV(name, funct6)
#define DEFINE_OPFVV_FMA(name, funct6)
#define DEFINE_OPIVV(name, funct6)
#define DEFINE_OPMVV(name, funct6)
#define DEFINE_OPMVX(name, funct6)
#define DEFINE_OPFVF(name, funct6)
#define DEFINE_OPMVV_VIE(name, vs1)
#define DEFINE_OPFRED(name, funct6)
#define DEFINE_OPFVF_FMA(name, funct6)
#define DEFINE_OPIVX(name, funct6)
#define DEFINE_OPIVI(name, funct6)
#define SegInstr(OP)
#define DEFINE_VFUNARY(name, funct6, vs1)
uint32_t const mask
constexpr Opcode VMAXU_FUNCT6
constexpr Opcode VFMACC_FUNCT6
constexpr Opcode VMUL_FUNCT6
constexpr Opcode VMSGTU_FUNCT6
constexpr Opcode VMULHU_FUNCT6
constexpr Opcode VWADDU_FUNCT6
constexpr Opcode VSMUL_FUNCT6
constexpr Opcode VFMADD_FUNCT6
constexpr Opcode VFDIV_FUNCT6
constexpr Opcode VFWMACC_FUNCT6
constexpr Opcode VFRSQRT7_V
constexpr Opcode VFWCVT_F_X_V
constexpr Opcode VMSLE_FUNCT6
constexpr Opcode VWADD_FUNCT6
constexpr Opcode VFSGNJ_FUNCT6
constexpr Opcode VRSUB_FUNCT6
constexpr Opcode VFMSUB_FUNCT6
constexpr Opcode VFSUB_FUNCT6
constexpr Opcode VMULH_FUNCT6
constexpr Opcode VFNCVT_XU_F_W
constexpr Opcode VFUNARY1_FUNCT6
constexpr Opcode VFWSUB_W_FUNCT6
constexpr Opcode VWMULU_FUNCT6
constexpr Opcode VFWNMACC_FUNCT6
constexpr Opcode VSLL_FUNCT6
constexpr Opcode VSSUB_FUNCT6
constexpr Opcode VXOR_FUNCT6
constexpr Opcode VFNCVT_X_F_W
constexpr Opcode VFCVT_X_F_V
constexpr Opcode VFSQRT_V
constexpr Opcode VWMUL_FUNCT6
constexpr Opcode VDIV_FUNCT6
constexpr Opcode VFREDMAX_FUNCT6
constexpr Opcode VMSLT_FUNCT6
constexpr Opcode VFCLASS_V
constexpr Opcode VFWCVT_F_F_V
constexpr Opcode VMSNE_FUNCT6
constexpr Opcode VFNMADD_FUNCT6
constexpr Opcode VSADDU_FUNCT6
constexpr Opcode VMAX_FUNCT6
constexpr Opcode VOR_FUNCT6
constexpr Opcode VAND_FUNCT6
constexpr Opcode VMSLTU_FUNCT6
constexpr Opcode VFWNMSAC_FUNCT6
constexpr Opcode VMFEQ_FUNCT6
constexpr Opcode VFMSAC_FUNCT6
constexpr Opcode VFADD_FUNCT6
constexpr Opcode VCOMPRESS_FUNCT6
constexpr Opcode VSLIDEDOWN_FUNCT6
constexpr Opcode VFSGNJX_FUNCT6
constexpr Opcode VMINU_FUNCT6
constexpr Opcode VFCVT_F_X_V
constexpr Opcode VSRL_FUNCT6
constexpr Opcode VMFLT_FUNCT6
constexpr Opcode VFWREDOSUM_FUNCT6
constexpr Opcode VFWCVT_X_F_V
constexpr Opcode VFUNARY0_FUNCT6
constexpr Opcode VFNMACC_FUNCT6
constexpr Opcode VFMUL_FUNCT6
constexpr Opcode VFNCVT_F_F_W
constexpr Opcode VDIVU_FUNCT6
constexpr Opcode VNCLIPU_FUNCT6
constexpr Opcode VFWCVT_F_XU_V
constexpr Opcode VFCVT_F_XU_V
constexpr Opcode VMIN_FUNCT6
constexpr Opcode VMSLEU_FUNCT6
constexpr Opcode VSLIDEUP_FUNCT6
constexpr Opcode VFWSUB_FUNCT6
constexpr Opcode VMSGT_FUNCT6
constexpr Opcode VSSUBU_FUNCT6
constexpr Opcode VADD_FUNCT6
constexpr Opcode VFNMSUB_FUNCT6
constexpr Opcode VFSGNJN_FUNCT6
constexpr Opcode VMFLE_FUNCT6
constexpr Opcode VFCVT_XU_F_V
constexpr Opcode VFREC7_V
constexpr Opcode VMULHSU_FUNCT6
constexpr Opcode VSUB_FUNCT6
constexpr Opcode VFNMSAC_FUNCT6
constexpr Opcode VMFNE_FUNCT6
constexpr Opcode VNCLIP_FUNCT6
constexpr Opcode VMSEQ_FUNCT6
constexpr Opcode VFWADD_FUNCT6
constexpr Opcode VFWMUL_FUNCT6
constexpr Opcode VFWADD_W_FUNCT6
constexpr Opcode VFWMSAC_FUNCT6
constexpr Opcode VFWCVT_XU_F_V
constexpr Opcode VSRA_FUNCT6
constexpr Opcode VFWREDUSUM_FUNCT6
constexpr Opcode VRGATHER_FUNCT6
constexpr Opcode VSADD_FUNCT6
#define DCHECK(condition)
Definition logging.h:482