v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
extension-riscv-v.cc
Go to the documentation of this file.
1
2// Copyright 2022 the V8 project authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5
7
11
12namespace v8 {
13namespace internal {
14
15// RVV
16
21
26
31
36
37void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
39}
40
44
45void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
46 GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
47}
48
52
56
60
64
65void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
66 GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
67}
68
72
76
77void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
78 GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
79}
80
84
88
89void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
90 GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
91}
92
93void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
94 MaskType mask) {
95 DCHECK_NE(vd, vs1);
96 DCHECK_NE(vd, vs2);
98}
99
100void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
101 MaskType mask) {
102 DCHECK_NE(vd, vs2);
103 GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
104}
105
106void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
107 MaskType mask) {
108 DCHECK_NE(vd, vs2);
109 GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
110}
111
116
120
121#define DEFINE_OPIVV(name, funct6) \
122 void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
123 MaskType mask) { \
124 GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
125 }
126
127#define DEFINE_OPFVV(name, funct6) \
128 void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
129 MaskType mask) { \
130 GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
131 }
132
133#define DEFINE_OPFWV(name, funct6) \
134 void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
135 MaskType mask) { \
136 GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
137 }
138
139#define DEFINE_OPFRED(name, funct6) \
140 void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
141 MaskType mask) { \
142 GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
143 }
144
145#define DEFINE_OPIVX(name, funct6) \
146 void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
147 MaskType mask) { \
148 GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
149 }
150
151#define DEFINE_OPIVI(name, funct6) \
152 void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
153 MaskType mask) { \
154 GenInstrV(funct6, vd, imm5, vs2, mask); \
155 }
156
157#define DEFINE_OPMVV(name, funct6) \
158 void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
159 MaskType mask) { \
160 GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
161 }
162
163// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register
164// rs1,
165// VRegister vs2, MaskType mask = NoMask);
166#define DEFINE_OPMVX(name, funct6) \
167 void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
168 MaskType mask) { \
169 GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
170 }
171
172#define DEFINE_OPFVF(name, funct6) \
173 void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2, \
174 FPURegister fs1, MaskType mask) { \
175 GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
176 }
177
178#define DEFINE_OPFWF(name, funct6) \
179 void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2, \
180 FPURegister fs1, MaskType mask) { \
181 GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
182 }
183
184#define DEFINE_OPFVV_FMA(name, funct6) \
185 void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
186 MaskType mask) { \
187 GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
188 }
189
190#define DEFINE_OPFVF_FMA(name, funct6) \
191 void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1, \
192 VRegister vs2, MaskType mask) { \
193 GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
194 }
195
196// vector integer extension
197#define DEFINE_OPMVV_VIE(name, vs1) \
198 void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
199 GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
200 }
201
205
209
213
217
269
273DEFINE_OPFVF(vfslide1down, VSLIDEDOWN_FUNCT6)
277DEFINE_OPFVF(vfslide1up, VSLIDEUP_FUNCT6)
278
282
286
289
292
296
300
303
306
310
314
318
321
336
337// Vector Widening Floating-Point Add/Subtract Instructions
346
347// Vector Widening Floating-Point Reduction Instructions
350
351// Vector Widening Floating-Point Multiply
354
356
363
364// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
381
382// Vector Widening Floating-Point Fused Multiply-Add Instructions
391
392// Vector Narrowing Fixed-Point Clip Instructions
399
400// Vector Integer Extension
401DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
402DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
403DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
404DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
405DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
406DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
407
408#undef DEFINE_OPIVI
409#undef DEFINE_OPIVV
410#undef DEFINE_OPIVX
411#undef DEFINE_OPFVV
412#undef DEFINE_OPFWV
413#undef DEFINE_OPFVF
414#undef DEFINE_OPFWF
415#undef DEFINE_OPFVV_FMA
416#undef DEFINE_OPFVF_FMA
417#undef DEFINE_OPMVV_VIE
418
421 int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
422 Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
423 ((rs1.code() & 0x1F) << kRvvRs1Shift) |
424 (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
425 emit(instr);
426}
427
428void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
429 Vlmul vlmul, TailAgnosticType tail,
431 DCHECK(is_uint5(uimm));
432 int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
433 Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
434 ((uimm & 0x1F) << kRvvUimmShift) |
435 (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
436 emit(instr);
437}
438
440 Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
441 ((rs1.code() & 0x1F) << kRvvRs1Shift) |
442 ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
443 emit(instr);
444}
445
446uint8_t vsew_switch(VSew vsew) {
447 uint8_t width;
448 switch (vsew) {
449 case E8:
450 width = 0b000;
451 break;
452 case E16:
453 width = 0b101;
454 break;
455 case E32:
456 width = 0b110;
457 break;
458 default:
459 width = 0b111;
460 break;
461 }
462 return width;
463}
464
465// OPIVV OPFVV OPMVV
466void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
467 VRegister vs1, VRegister vs2, MaskType mask) {
468 DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
469 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
470 ((vd.code() & 0x1F) << kRvvVdShift) |
471 ((vs1.code() & 0x1F) << kRvvVs1Shift) |
472 ((vs2.code() & 0x1F) << kRvvVs2Shift);
473 emit(instr);
474}
475
476void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
477 int8_t vs1, VRegister vs2, MaskType mask) {
478 DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
479 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
480 ((vd.code() & 0x1F) << kRvvVdShift) |
481 ((vs1 & 0x1F) << kRvvVs1Shift) |
482 ((vs2.code() & 0x1F) << kRvvVs2Shift);
483 emit(instr);
484}
485// OPMVV OPFVV
486void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
487 VRegister vs1, VRegister vs2, MaskType mask) {
488 DCHECK(opcode == OP_MVV || opcode == OP_FVV);
489 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
490 ((rd.code() & 0x1F) << kRvvVdShift) |
491 ((vs1.code() & 0x1F) << kRvvVs1Shift) |
492 ((vs2.code() & 0x1F) << kRvvVs2Shift);
493 emit(instr);
494}
495
496// OPFVV
497void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, FPURegister fd,
498 VRegister vs1, VRegister vs2, MaskType mask) {
499 DCHECK(opcode == OP_FVV);
500 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
501 ((fd.code() & 0x1F) << kRvvVdShift) |
502 ((vs1.code() & 0x1F) << kRvvVs1Shift) |
503 ((vs2.code() & 0x1F) << kRvvVs2Shift);
504 emit(instr);
505}
506
507// OPIVX OPMVX
508void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
509 Register rs1, VRegister vs2, MaskType mask) {
510 DCHECK(opcode == OP_IVX || opcode == OP_MVX);
511 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
512 ((vd.code() & 0x1F) << kRvvVdShift) |
513 ((rs1.code() & 0x1F) << kRvvRs1Shift) |
514 ((vs2.code() & 0x1F) << kRvvVs2Shift);
515 emit(instr);
516}
517
518// OPFVF
519void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
521 DCHECK(opcode == OP_FVF);
522 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
523 ((vd.code() & 0x1F) << kRvvVdShift) |
524 ((fs1.code() & 0x1F) << kRvvRs1Shift) |
525 ((vs2.code() & 0x1F) << kRvvVs2Shift);
526 emit(instr);
527}
528
529// OPMVX
530void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
531 VRegister vs2, MaskType mask) {
532 Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
533 ((rd.code() & 0x1F) << kRvvVdShift) |
534 ((rs1.code() & 0x1F) << kRvvRs1Shift) |
535 ((vs2.code() & 0x1F) << kRvvVs2Shift);
536 emit(instr);
537}
538// OPIVI
539void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
540 VRegister vs2, MaskType mask) {
541 DCHECK(is_uint5(imm5) || is_int5(imm5));
542 Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
543 ((vd.code() & 0x1F) << kRvvVdShift) |
544 (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
545 ((vs2.code() & 0x1F) << kRvvVs2Shift);
546 emit(instr);
547}
548
549// VL VS
550void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
551 Register rs1, uint8_t umop, MaskType mask,
552 uint8_t IsMop, bool IsMew, uint8_t Nf) {
553 DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
554 Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
555 ((width << kRvvWidthShift) & kRvvWidthMask) |
556 ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
557 ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
558 ((mask << kRvvVmShift) & kRvvVmMask) |
559 ((IsMop << kRvvMopShift) & kRvvMopMask) |
560 ((IsMew << kRvvMewShift) & kRvvMewMask) |
561 ((Nf << kRvvNfShift) & kRvvNfMask);
562 emit(instr);
563}
564void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
565 Register rs1, Register rs2, MaskType mask,
566 uint8_t IsMop, bool IsMew, uint8_t Nf) {
567 DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
568 Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
569 ((width << kRvvWidthShift) & kRvvWidthMask) |
570 ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
571 ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
572 ((mask << kRvvVmShift) & kRvvVmMask) |
573 ((IsMop << kRvvMopShift) & kRvvMopMask) |
574 ((IsMew << kRvvMewShift) & kRvvMewMask) |
575 ((Nf << kRvvNfShift) & kRvvNfMask);
576 emit(instr);
577}
578// VL VS AMO
579void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
581 uint8_t IsMop, bool IsMew, uint8_t Nf) {
582 DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
583 Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
584 ((width << kRvvWidthShift) & kRvvWidthMask) |
585 ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
586 ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
587 ((mask << kRvvVmShift) & kRvvVmMask) |
588 ((IsMop << kRvvMopShift) & kRvvMopMask) |
589 ((IsMew << kRvvMewShift) & kRvvMewMask) |
590 ((Nf << kRvvNfShift) & kRvvNfMask);
591 emit(instr);
592}
593// vmv_xs vcpop_m vfirst_m
594void AssemblerRISCVV::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
595 uint8_t vs1, VRegister vs2, MaskType mask) {
596 DCHECK(opcode == OP_MVV);
597 Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
598 ((rd.code() & 0x1F) << kRvvVdShift) |
599 ((vs1 & 0x1F) << kRvvVs1Shift) |
600 ((vs2.code() & 0x1F) << kRvvVs2Shift);
601 emit(instr);
602}
603
604void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
605 MaskType mask) {
606 uint8_t width = vsew_switch(vsew);
607 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
608}
610 MaskType mask) {
611 uint8_t width = vsew_switch(vsew);
612 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
613}
615 MaskType mask) {
616 uint8_t width = vsew_switch(vsew);
617 GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
618}
619
620void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
621 MaskType mask) {
622 uint8_t width = vsew_switch(vsew);
623 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
624}
626 MaskType mask) {
627 uint8_t width = vsew_switch(vsew);
628 GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
629}
630
632 MaskType mask) {
633 uint8_t width = vsew_switch(vsew);
634 GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
635}
637 MaskType mask) {
638 uint8_t width = vsew_switch(vsew);
639 GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
640}
641
642void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
643 VSew vsew, MaskType mask) {
644 uint8_t width = vsew_switch(vsew);
645 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
646}
647
648void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
649 VSew vsew, MaskType mask) {
650 uint8_t width = vsew_switch(vsew);
651 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
652}
653
654void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
655 VSew vsew, MaskType mask) {
656 uint8_t width = vsew_switch(vsew);
657 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
658}
659
660void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
661 VSew vsew, MaskType mask) {
662 uint8_t width = vsew_switch(vsew);
663 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
664}
665
666void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
667 VSew vsew, MaskType mask) {
668 uint8_t width = vsew_switch(vsew);
669 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
670}
671
672void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
673 VSew vsew, MaskType mask) {
674 uint8_t width = vsew_switch(vsew);
675 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
676}
677
678void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
679 VSew vsew, MaskType mask) {
680 uint8_t width = vsew_switch(vsew);
681 GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
682}
683void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
684 VSew vsew, MaskType mask) {
685 uint8_t width = vsew_switch(vsew);
686 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
687}
688void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
689 VSew vsew, MaskType mask) {
690 uint8_t width = vsew_switch(vsew);
691 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
692}
693void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
694 VSew vsew, MaskType mask) {
695 uint8_t width = vsew_switch(vsew);
696 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
697}
698void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
699 VSew vsew, MaskType mask) {
700 uint8_t width = vsew_switch(vsew);
701 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
702}
703void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
704 VSew vsew, MaskType mask) {
705 uint8_t width = vsew_switch(vsew);
706 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
707}
708void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
709 VSew vsew, MaskType mask) {
710 uint8_t width = vsew_switch(vsew);
711 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
712}
713void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
714 VSew vsew, MaskType mask) {
715 uint8_t width = vsew_switch(vsew);
716 GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
717}
718
719void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
720 VSew vsew, MaskType mask) {
721 uint8_t width = vsew_switch(vsew);
722 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
723}
724void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
725 VSew vsew, MaskType mask) {
726 uint8_t width = vsew_switch(vsew);
727 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
728}
729void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
730 VSew vsew, MaskType mask) {
731 uint8_t width = vsew_switch(vsew);
732 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
733}
734void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
735 VSew vsew, MaskType mask) {
736 uint8_t width = vsew_switch(vsew);
737 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
738}
739void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
740 VSew vsew, MaskType mask) {
741 uint8_t width = vsew_switch(vsew);
742 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
743}
744void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
745 VSew vsew, MaskType mask) {
746 uint8_t width = vsew_switch(vsew);
747 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
748}
749void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
750 VSew vsew, MaskType mask) {
751 uint8_t width = vsew_switch(vsew);
752 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
753}
754void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
755 VSew vsew, MaskType mask) {
756 uint8_t width = vsew_switch(vsew);
757 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
758}
759void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
760 VSew vsew, MaskType mask) {
761 uint8_t width = vsew_switch(vsew);
762 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
763}
764void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
765 VSew vsew, MaskType mask) {
766 uint8_t width = vsew_switch(vsew);
767 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
768}
769void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
770 VSew vsew, MaskType mask) {
771 uint8_t width = vsew_switch(vsew);
772 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
773}
774void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
775 VSew vsew, MaskType mask) {
776 uint8_t width = vsew_switch(vsew);
777 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
778}
779void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
780 VSew vsew, MaskType mask) {
781 uint8_t width = vsew_switch(vsew);
782 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
783}
784void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
785 VSew vsew, MaskType mask) {
786 uint8_t width = vsew_switch(vsew);
787 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
788}
789
790void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
791 VSew vsew, MaskType mask) {
792 uint8_t width = vsew_switch(vsew);
793 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
794}
795void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
796 VSew vsew, MaskType mask) {
797 uint8_t width = vsew_switch(vsew);
798 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
799}
800void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
801 VSew vsew, MaskType mask) {
802 uint8_t width = vsew_switch(vsew);
803 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
804}
805void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
806 VSew vsew, MaskType mask) {
807 uint8_t width = vsew_switch(vsew);
808 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
809}
810void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
811 VSew vsew, MaskType mask) {
812 uint8_t width = vsew_switch(vsew);
813 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
814}
815void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
816 VSew vsew, MaskType mask) {
817 uint8_t width = vsew_switch(vsew);
818 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
819}
820void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
821 VSew vsew, MaskType mask) {
822 uint8_t width = vsew_switch(vsew);
823 GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
824}
825void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
826 VSew vsew, MaskType mask) {
827 uint8_t width = vsew_switch(vsew);
828 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
829}
830void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
831 VSew vsew, MaskType mask) {
832 uint8_t width = vsew_switch(vsew);
833 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
834}
835void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
836 VSew vsew, MaskType mask) {
837 uint8_t width = vsew_switch(vsew);
838 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
839}
840void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
841 VSew vsew, MaskType mask) {
842 uint8_t width = vsew_switch(vsew);
843 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
844}
845void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
846 VSew vsew, MaskType mask) {
847 uint8_t width = vsew_switch(vsew);
848 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
849}
850void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
851 VSew vsew, MaskType mask) {
852 uint8_t width = vsew_switch(vsew);
853 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
854}
855void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
856 VSew vsew, MaskType mask) {
857 uint8_t width = vsew_switch(vsew);
858 GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
859}
860
864
868
870 uint8_t laneidx) {
871#ifdef CAN_USE_RVV_INSTRUCTIONS
872 switch (rep) {
874 *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
875 break;
877 *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
878 break;
880 *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
881 break;
883 *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
884 break;
885 default:
886 UNREACHABLE();
887 }
888#else
889 UNREACHABLE();
890#endif
891}
892
893} // namespace internal
894} // namespace v8
void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vls(VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask=NoMask)
void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vmadc_vx(VRegister vd, Register rs1, VRegister vs2)
void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vfmv_vf(VRegister vd, FPURegister fs1)
void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vadc_vx(VRegister vd, Register rs1, VRegister vs2)
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew, MaskType mask=NoMask)
void vfirst_m(Register rd, VRegister vs2, MaskType mask=NoMask)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail=tu, MaskAgnosticType mask=mu)
void GenInstrV(Register rd, Register rs1, Register rs2)
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1, MaskType mask=NoMask)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vcpop_m(Register rd, VRegister vs2, MaskType mask=NoMask)
void vfmv_sf(VRegister vd, FPURegister fs)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void vss(VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask=NoMask)
void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew, MaskType mask=NoMask)
void vid_v(VRegister vd, MaskType mask=Mask)
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2)
virtual void emit(Instr x)=0
LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx)
constexpr int8_t code() const
#define DEFINE_OPFVV(name, funct6)
#define DEFINE_OPFWF(name, funct6)
#define DEFINE_OPFWV(name, funct6)
#define DEFINE_OPFVV_FMA(name, funct6)
#define DEFINE_OPIVV(name, funct6)
#define DEFINE_OPMVV(name, funct6)
#define DEFINE_OPMVX(name, funct6)
#define DEFINE_OPFVF(name, funct6)
#define DEFINE_OPMVV_VIE(name, vs1)
#define DEFINE_OPFRED(name, funct6)
#define DEFINE_OPFVF_FMA(name, funct6)
#define DEFINE_OPIVX(name, funct6)
#define DEFINE_OPIVI(name, funct6)
Instruction * instr
uint32_t const mask
constexpr Opcode VMAXU_FUNCT6
constexpr Opcode OP_MVV
const uint32_t kRvvMewMask
constexpr Opcode VFMACC_FUNCT6
constexpr Opcode VMUL_FUNCT6
constexpr Opcode VMSGTU_FUNCT6
constexpr Opcode VMULHU_FUNCT6
constexpr Opcode VWADDU_FUNCT6
constexpr Opcode VSMUL_FUNCT6
constexpr Opcode VFMADD_FUNCT6
constexpr Opcode VFDIV_FUNCT6
const uint32_t kRvvNfMask
constexpr Opcode VFWMACC_FUNCT6
constexpr Opcode VREDMAXU_FUNCT6
constexpr Opcode VMADC_FUNCT6
constexpr Opcode VMSLE_FUNCT6
constexpr Opcode VWADD_FUNCT6
constexpr Opcode VFSGNJ_FUNCT6
constexpr Opcode VRSUB_FUNCT6
constexpr Opcode OP_IVX
constexpr Opcode VFMSUB_FUNCT6
constexpr Opcode VFSUB_FUNCT6
constexpr Opcode VMULH_FUNCT6
const uint32_t kRvvMopMask
constexpr Opcode VFWSUB_W_FUNCT6
constexpr Opcode VID_V
constexpr Opcode OP_MVX
constexpr Opcode VWMULU_FUNCT6
constexpr Opcode VFWNMACC_FUNCT6
constexpr Opcode VSLL_FUNCT6
constexpr Opcode VSSUB_FUNCT6
constexpr Opcode VMV_FUNCT6
constexpr Opcode VXOR_FUNCT6
constexpr Opcode VWMUL_FUNCT6
constexpr Opcode VDIV_FUNCT6
constexpr Opcode VFREDMAX_FUNCT6
constexpr Opcode VMSLT_FUNCT6
const uint32_t kRvvZimmMask
constexpr Opcode VMSNE_FUNCT6
constexpr Opcode VFNMADD_FUNCT6
constexpr Opcode VSADDU_FUNCT6
constexpr Opcode OP_IVI
constexpr Opcode VMAX_FUNCT6
constexpr Opcode VOR_FUNCT6
constexpr Opcode VAND_FUNCT6
constexpr Opcode VMSLTU_FUNCT6
const uint32_t kRvvWidthMask
constexpr Opcode VFWNMSAC_FUNCT6
constexpr Opcode VMFEQ_FUNCT6
constexpr Opcode VREDMAX_FUNCT6
constexpr Opcode VFMSAC_FUNCT6
constexpr Opcode VFADD_FUNCT6
constexpr Opcode VCOMPRESS_FUNCT6
constexpr Opcode VADC_FUNCT6
constexpr Opcode VREDMINU_FUNCT6
constexpr Opcode VSLIDEDOWN_FUNCT6
constexpr Opcode VFSGNJX_FUNCT6
constexpr Opcode VMINU_FUNCT6
constexpr Opcode VRXUNARY0_FUNCT6
uint8_t vsew_switch(VSew vsew)
constexpr Opcode VFMAX_FUNCT6
const uint32_t kRvvVmMask
constexpr Opcode VSRL_FUNCT6
constexpr Opcode VWXUNARY0_FUNCT6
const uint32_t kRvvImm5Mask
constexpr Opcode VWFUNARY0_FUNCT6
const uint32_t kRvvRs1Mask
constexpr Opcode VMFLT_FUNCT6
constexpr Opcode VFWREDOSUM_FUNCT6
constexpr Opcode VFMIN_FUNCT6
constexpr Opcode VFNMACC_FUNCT6
constexpr Opcode VFMUL_FUNCT6
constexpr Opcode VMUNARY0_FUNCT6
constexpr Opcode VDIVU_FUNCT6
const uint32_t kRvvRs2Mask
constexpr Opcode VNCLIPU_FUNCT6
constexpr Opcode VMIN_FUNCT6
constexpr Opcode VMSLEU_FUNCT6
constexpr Opcode VSLIDEUP_FUNCT6
constexpr Opcode VFWSUB_FUNCT6
constexpr Opcode VMSGT_FUNCT6
constexpr Opcode VSSUBU_FUNCT6
constexpr Opcode VADD_FUNCT6
constexpr Opcode VFNMSUB_FUNCT6
constexpr Opcode VFSGNJN_FUNCT6
constexpr Opcode VMFLE_FUNCT6
constexpr Opcode VMULHSU_FUNCT6
constexpr Opcode VSUB_FUNCT6
constexpr Opcode OP_IVV
constexpr Opcode VFNMSAC_FUNCT6
constexpr Opcode OP_FVV
constexpr Opcode VMFNE_FUNCT6
constexpr Opcode VNCLIP_FUNCT6
constexpr Opcode VMSEQ_FUNCT6
constexpr Opcode VFWADD_FUNCT6
constexpr Opcode VFWMUL_FUNCT6
constexpr Opcode VREDMIN_FUNCT6
constexpr Opcode OP_FVF
constexpr int kRvvVLEN
constexpr Opcode VFWADD_W_FUNCT6
constexpr Opcode VFWMSAC_FUNCT6
const uint32_t kRvvVdMask
constexpr Opcode VRFUNARY0_FUNCT6
constexpr Opcode VWADDUW_FUNCT6
constexpr Opcode VSRA_FUNCT6
constexpr Opcode VFWREDUSUM_FUNCT6
constexpr Opcode VRGATHER_FUNCT6
constexpr Opcode VSADD_FUNCT6
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482