v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-codes-riscv.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_
6#define V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// RISC-V-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#if V8_TARGET_ARCH_RISCV64
15// Opcodes that support a MemoryAccessMode.
16#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
17 V(RiscvLd) \
18 V(RiscvSd) \
19 V(RiscvLwu) \
20 V(RiscvWord64AtomicLoadUint64) \
21 V(RiscvWord64AtomicStoreWord64) \
22 V(RiscvLb) \
23 V(RiscvLbu) \
24 V(RiscvSb) \
25 V(RiscvLh) \
26 V(RiscvLhu) \
27 V(RiscvSh) \
28 V(RiscvLw) \
29 V(RiscvSw) \
30 V(RiscvLoadDouble) \
31 V(RiscvStoreDouble) \
32 V(RiscvStoreFloat) \
33 V(RiscvLoadFloat) \
34 V(RiscvStoreCompressTagged) \
35 V(RiscvLoadDecompressTaggedSigned) \
36 V(RiscvLoadDecompressTagged) \
37 V(RiscvS128LoadSplat) \
38 V(RiscvS128Load64ExtendS) \
39 V(RiscvS128Load64ExtendU) \
40 V(RiscvS128Load64Zero) \
41 V(RiscvS128Load32Zero) \
42 V(RiscvS128LoadLane) \
43 V(RiscvS128StoreLane) \
44 V(RiscvRvvLd) \
45 V(RiscvRvvSt)
46
47#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
48 TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
49 V(RiscvAdd64) \
50 V(RiscvAddOvf64) \
51 V(RiscvSub64) \
52 V(RiscvSubOvf64) \
53 V(RiscvMulHigh64) \
54 V(RiscvMulHighU64) \
55 V(RiscvMul64) \
56 V(RiscvMulOvf64) \
57 V(RiscvDiv64) \
58 V(RiscvDivU64) \
59 V(RiscvMod64) \
60 V(RiscvModU64) \
61 V(RiscvZeroExtendWord) \
62 V(RiscvSignExtendWord) \
63 V(RiscvClz64) \
64 V(RiscvShl64) \
65 V(RiscvShr64) \
66 V(RiscvSar64) \
67 V(RiscvRor64) \
68 V(RiscvFloat64RoundDown) \
69 V(RiscvFloat64RoundTruncate) \
70 V(RiscvFloat64RoundUp) \
71 V(RiscvFloat64RoundTiesEven) \
72 V(RiscvTruncLS) \
73 V(RiscvTruncLD) \
74 V(RiscvTruncUlS) \
75 V(RiscvTruncUlD) \
76 V(RiscvCvtSL) \
77 V(RiscvCvtSUl) \
78 V(RiscvCvtDL) \
79 V(RiscvCvtDUl) \
80 V(RiscvUsd) \
81 V(RiscvUlwu) \
82 V(RiscvBitcastDL) \
83 V(RiscvBitcastLD) \
84 V(RiscvByteSwap64) \
85 V(RiscvWord64AtomicAddUint64) \
86 V(RiscvWord64AtomicSubUint64) \
87 V(RiscvWord64AtomicAndUint64) \
88 V(RiscvWord64AtomicOrUint64) \
89 V(RiscvWord64AtomicXorUint64) \
90 V(RiscvWord64AtomicExchangeUint64) \
91 V(RiscvLoadDecodeSandboxedPointer) \
92 V(RiscvStoreEncodeSandboxedPointer) \
93 V(RiscvStoreIndirectPointer) \
94 V(RiscvAtomicLoadDecompressTaggedSigned) \
95 V(RiscvAtomicLoadDecompressTagged) \
96 V(RiscvLoadDecompressProtected) \
97 V(RiscvAtomicStoreCompressTagged) \
98 V(RiscvWord64AtomicCompareExchangeUint64) \
99 V(RiscvCmp32) \
100 V(RiscvCmpZero32) \
101 V(RiscvTst64)
102#elif V8_TARGET_ARCH_RISCV32
103#define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
104 V(RiscvAddOvf) \
105 V(RiscvSubOvf) \
106 V(RiscvAddPair) \
107 V(RiscvSubPair) \
108 V(RiscvMulPair) \
109 V(RiscvAndPair) \
110 V(RiscvOrPair) \
111 V(RiscvXorPair) \
112 V(RiscvShlPair) \
113 V(RiscvShrPair) \
114 V(RiscvSarPair) \
115 V(RiscvWord32AtomicPairLoad) \
116 V(RiscvWord32AtomicPairStore) \
117 V(RiscvWord32AtomicPairAdd) \
118 V(RiscvWord32AtomicPairSub) \
119 V(RiscvWord32AtomicPairAnd) \
120 V(RiscvWord32AtomicPairOr) \
121 V(RiscvWord32AtomicPairXor) \
122 V(RiscvWord32AtomicPairExchange) \
123 V(RiscvWord32AtomicPairCompareExchange) \
124 V(RiscvLb) \
125 V(RiscvLbu) \
126 V(RiscvSb) \
127 V(RiscvLh) \
128 V(RiscvLhu) \
129 V(RiscvSh) \
130 V(RiscvLw) \
131 V(RiscvSw) \
132 V(RiscvLoadDouble) \
133 V(RiscvStoreDouble) \
134 V(RiscvStoreFloat) \
135 V(RiscvLoadFloat) \
136 V(RiscvS128LoadSplat) \
137 V(RiscvS128Load64ExtendS) \
138 V(RiscvS128Load64ExtendU) \
139 V(RiscvS128Load64Zero) \
140 V(RiscvS128Load32Zero) \
141 V(RiscvS128LoadLane) \
142 V(RiscvS128StoreLane) \
143 V(RiscvRvvLd) \
144 V(RiscvRvvSt)
145#endif
146
147#define TARGET_ARCH_OPCODE_LIST_COMMON(V) \
148 V(RiscvAdd32) \
149 V(RiscvSub32) \
150 V(RiscvMul32) \
151 V(RiscvMulOvf32) \
152 V(RiscvMulHigh32) \
153 V(RiscvMulHighU32) \
154 V(RiscvDiv32) \
155 V(RiscvDivU32) \
156 V(RiscvMod32) \
157 V(RiscvModU32) \
158 V(RiscvAnd) \
159 V(RiscvAnd32) \
160 V(RiscvOr) \
161 V(RiscvOr32) \
162 V(RiscvXor) \
163 V(RiscvXor32) \
164 V(RiscvClz32) \
165 V(RiscvShl32) \
166 V(RiscvShr32) \
167 V(RiscvSar32) \
168 V(RiscvRor32) \
169 V(RiscvMov) \
170 V(RiscvTst32) \
171 V(RiscvCmp) \
172 V(RiscvCmpZero) \
173 V(RiscvCmpS) \
174 V(RiscvAddS) \
175 V(RiscvSubS) \
176 V(RiscvMulS) \
177 V(RiscvDivS) \
178 V(RiscvModS) \
179 V(RiscvAbsS) \
180 V(RiscvNegS) \
181 V(RiscvSqrtS) \
182 V(RiscvMaxS) \
183 V(RiscvMinS) \
184 V(RiscvCmpD) \
185 V(RiscvAddD) \
186 V(RiscvSubD) \
187 V(RiscvMulD) \
188 V(RiscvDivD) \
189 V(RiscvModD) \
190 V(RiscvAbsD) \
191 V(RiscvNegD) \
192 V(RiscvSqrtD) \
193 V(RiscvMaxD) \
194 V(RiscvMinD) \
195 V(RiscvFloat32RoundDown) \
196 V(RiscvFloat32RoundTruncate) \
197 V(RiscvFloat32RoundUp) \
198 V(RiscvFloat32RoundTiesEven) \
199 V(RiscvCvtSD) \
200 V(RiscvCvtDS) \
201 V(RiscvTruncWD) \
202 V(RiscvRoundWD) \
203 V(RiscvFloorWD) \
204 V(RiscvCeilWD) \
205 V(RiscvTruncWS) \
206 V(RiscvRoundWS) \
207 V(RiscvFloorWS) \
208 V(RiscvCeilWS) \
209 V(RiscvTruncUwD) \
210 V(RiscvTruncUwS) \
211 V(RiscvCvtDW) \
212 V(RiscvCvtSW) \
213 V(RiscvCvtSUw) \
214 V(RiscvCvtDUw) \
215 V(RiscvUlh) \
216 V(RiscvUlhu) \
217 V(RiscvUsh) \
218 V(RiscvUld) \
219 V(RiscvUlw) \
220 V(RiscvUsw) \
221 V(RiscvUStoreFloat) \
222 V(RiscvULoadFloat) \
223 V(RiscvULoadDouble) \
224 V(RiscvUStoreDouble) \
225 V(RiscvEnableDebugTrace) \
226 V(RiscvDisableDebugTrace) \
227 V(RiscvBitcastInt32ToFloat32) \
228 V(RiscvBitcastFloat32ToInt32) \
229 V(RiscvFloat64ExtractLowWord32) \
230 V(RiscvFloat64ExtractHighWord32) \
231 V(RiscvFloat64InsertLowWord32) \
232 V(RiscvFloat64InsertHighWord32) \
233 V(RiscvFloat32Max) \
234 V(RiscvFloat64Max) \
235 V(RiscvFloat32Min) \
236 V(RiscvFloat64Min) \
237 V(RiscvFloat64SilenceNaN) \
238 V(RiscvPush) \
239 V(RiscvPeek) \
240 V(RiscvByteSwap32) \
241 V(RiscvStoreToStackSlot) \
242 V(RiscvStackClaim) \
243 V(RiscvSignExtendByte) \
244 V(RiscvSignExtendShort) \
245 V(RiscvSync) \
246 V(RiscvAssertEqual) \
247 V(RiscvS128Const) \
248 V(RiscvS128Zero) \
249 V(RiscvS128AllOnes) \
250 V(RiscvI32x4ExtractLane) \
251 V(RiscvI32x4ReplaceLane) \
252 V(RiscvF64x2Abs) \
253 V(RiscvF32x4ExtractLane) \
254 V(RiscvF32x4ReplaceLane) \
255 V(RiscvF32x4SConvertI32x4) \
256 V(RiscvF32x4UConvertI32x4) \
257 V(RiscvI64x2SConvertI32x4Low) \
258 V(RiscvI64x2SConvertI32x4High) \
259 V(RiscvI64x2UConvertI32x4Low) \
260 V(RiscvI64x2UConvertI32x4High) \
261 V(RiscvI32x4Shl) \
262 V(RiscvI32x4ShrS) \
263 V(RiscvI32x4ShrU) \
264 V(RiscvF64x2Sqrt) \
265 V(RiscvF64x2ConvertLowI32x4S) \
266 V(RiscvF64x2ConvertLowI32x4U) \
267 V(RiscvF64x2PromoteLowF32x4) \
268 V(RiscvF64x2ExtractLane) \
269 V(RiscvF64x2ReplaceLane) \
270 V(RiscvF64x2Pmin) \
271 V(RiscvF64x2Pmax) \
272 V(RiscvF64x2Ceil) \
273 V(RiscvF64x2Floor) \
274 V(RiscvF64x2Trunc) \
275 V(RiscvF64x2NearestInt) \
276 V(RiscvI64x2SplatI32Pair) \
277 V(RiscvI64x2ExtractLane) \
278 V(RiscvI64x2ReplaceLane) \
279 V(RiscvI64x2ReplaceLaneI32Pair) \
280 V(RiscvI64x2Shl) \
281 V(RiscvI64x2ShrS) \
282 V(RiscvI64x2ShrU) \
283 V(RiscvF32x4Abs) \
284 V(RiscvF32x4Sqrt) \
285 V(RiscvF32x4Qfma) \
286 V(RiscvF32x4Qfms) \
287 V(RiscvF64x2Qfma) \
288 V(RiscvF64x2Qfms) \
289 V(RiscvF32x4Pmin) \
290 V(RiscvF32x4Pmax) \
291 V(RiscvF32x4DemoteF64x2Zero) \
292 V(RiscvF32x4Ceil) \
293 V(RiscvF32x4Floor) \
294 V(RiscvF32x4Trunc) \
295 V(RiscvF32x4NearestInt) \
296 V(RiscvI32x4SConvertF32x4) \
297 V(RiscvI32x4UConvertF32x4) \
298 V(RiscvI32x4TruncSatF64x2SZero) \
299 V(RiscvI32x4TruncSatF64x2UZero) \
300 V(RiscvI16x8ExtractLaneU) \
301 V(RiscvI16x8ExtractLaneS) \
302 V(RiscvI16x8ReplaceLane) \
303 V(RiscvI16x8Shl) \
304 V(RiscvI16x8ShrS) \
305 V(RiscvI16x8ShrU) \
306 V(RiscvI8x16ExtractLaneU) \
307 V(RiscvI8x16ExtractLaneS) \
308 V(RiscvI8x16ReplaceLane) \
309 V(RiscvI8x16Shl) \
310 V(RiscvI8x16ShrS) \
311 V(RiscvI8x16ShrU) \
312 V(RiscvI8x16RoundingAverageU) \
313 V(RiscvI8x16Popcnt) \
314 V(RiscvVnot) \
315 V(RiscvS128Select) \
316 V(RiscvV128AnyTrue) \
317 V(RiscvI8x16Shuffle) \
318 V(RiscvVmv) \
319 V(RiscvVandVv) \
320 V(RiscvVnotVv) \
321 V(RiscvVorVv) \
322 V(RiscvVxorVv) \
323 V(RiscvVwmul) \
324 V(RiscvVwmulu) \
325 V(RiscvVmvSx) \
326 V(RiscvVmvXs) \
327 V(RiscvVcompress) \
328 V(RiscvVaddVv) \
329 V(RiscvVsubVv) \
330 V(RiscvVwaddVv) \
331 V(RiscvVwadduVv) \
332 V(RiscvVwadduWx) \
333 V(RiscvVrgather) \
334 V(RiscvVslidedown) \
335 V(RiscvVAbs) \
336 V(RiscvVsll) \
337 V(RiscvVfmvVf) \
338 V(RiscvVnegVv) \
339 V(RiscvVfnegVv) \
340 V(RiscvVmaxuVv) \
341 V(RiscvVmax) \
342 V(RiscvVminuVv) \
343 V(RiscvVminsVv) \
344 V(RiscvVmulVv) \
345 V(RiscvVdivu) \
346 V(RiscvVmslt) \
347 V(RiscvVgtsVv) \
348 V(RiscvVgesVv) \
349 V(RiscvVgeuVv) \
350 V(RiscvVgtuVv) \
351 V(RiscvVeqVv) \
352 V(RiscvVneVv) \
353 V(RiscvVaddSatSVv) \
354 V(RiscvVaddSatUVv) \
355 V(RiscvVsubSatSVv) \
356 V(RiscvVsubSatUVv) \
357 V(RiscvVmfeqVv) \
358 V(RiscvVmfneVv) \
359 V(RiscvVmfleVv) \
360 V(RiscvVmfltVv) \
361 V(RiscvVfaddVv) \
362 V(RiscvVfsubVv) \
363 V(RiscvVfmulVv) \
364 V(RiscvVfdivVv) \
365 V(RiscvVfminVv) \
366 V(RiscvVfmaxVv) \
367 V(RiscvVmergeVx) \
368 V(RiscvVsmulVv) \
369 V(RiscvVnclipu) \
370 V(RiscvVnclip) \
371 V(RiscvVredminuVs) \
372 V(RiscvVAllTrue) \
373 V(RiscvVzextVf2) \
374 V(RiscvVsextVf2)
375
376#define TARGET_ARCH_OPCODE_LIST_ZBB(V) \
377 V(RiscvAndn) \
378 V(RiscvOrn) \
379 V(RiscvXnor) \
380 V(RiscvClz) \
381 V(RiscvCtz) \
382 V(RiscvCpop) \
383 V(RiscvMax) \
384 V(RiscvMaxu) \
385 V(RiscvMin) \
386 V(RiscvMinu) \
387 V(RiscvSextb) \
388 V(RiscvSexth) \
389 V(RiscvZexth) \
390 V(RiscvRev8)
391
392#ifdef V8_TARGET_ARCH_RISCV64
393#define TARGET_ARCH_OPCODE_LIST_ZBB_32(V) \
394 V(RiscvClzw) \
395 V(RiscvCtzw) \
396 V(RiscvCpopw)
397#else
398#define TARGET_ARCH_OPCODE_LIST_ZBB_32(V)
399#endif
400
401#define TARGET_ARCH_OPCODE_LIST_ZBA(V) \
402 V(RiscvSh1add) \
403 V(RiscvSh2add) \
404 V(RiscvSh3add)
405
406#ifdef V8_TARGET_ARCH_RISCV64
407#define TARGET_ARCH_OPCODE_LIST_ZBA_32(V) \
408 V(RiscvAdduw) \
409 V(RiscvSh1adduw) \
410 V(RiscvSh2adduw) \
411 V(RiscvSh3adduw) \
412 V(RiscvSlliuw)
413#else
414#define TARGET_ARCH_OPCODE_LIST_ZBA_32(V)
415#endif
416
417#define TARGET_ARCH_OPCODE_LIST_ZBS(V) \
418 V(RiscvBclr) \
419 V(RiscvBclri) \
420 V(RiscvBext) \
421 V(RiscvBexti) \
422 V(RiscvBinv) \
423 V(RiscvBinvi) \
424 V(RiscvBset) \
425 V(RiscvBseti)
426
427#define TARGET_ARCH_OPCODE_LIST(V) \
428 TARGET_ARCH_OPCODE_LIST_COMMON(V) \
429 TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \
430 TARGET_ARCH_OPCODE_LIST_ZBB(V) \
431 TARGET_ARCH_OPCODE_LIST_ZBS(V) \
432 TARGET_ARCH_OPCODE_LIST_ZBA(V) \
433 TARGET_ARCH_OPCODE_LIST_ZBA_32(V) \
434 TARGET_ARCH_OPCODE_LIST_ZBB_32(V)
435
436// Addressing modes represent the "shape" of inputs to an instruction.
437// Many instructions support multiple addressing modes. Addressing modes
438// are encoded into the InstructionCode of the instruction and tell the
439// code generator after register allocation which assembler method to call.
440//
441// We use the following local notation for addressing modes:
442//
443// R = register
444// O = register or stack slot
445// D = double register
446// I = immediate (handle, external, int32)
447// MRI = [register + immediate]
448// MRR = [register + register]
449// Root = [kRootregister + immediate]
450// TODO(plind): Add the new r6 address modes.
451#define TARGET_ADDRESSING_MODE_LIST(V) \
452 V(MRI) /* [%r0 + K] */ \
453 V(MRR) /* [%r0 + %r1] */ \
454 V(Root) /* [root + k] */
455
456} // namespace compiler
457} // namespace internal
458} // namespace v8
459
460#endif // V8_COMPILER_BACKEND_RISCV_INSTRUCTION_CODES_RISCV_H_