v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-codes-loong64.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
6#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// LOONG64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14
15// Opcodes that support a MemoryAccessMode.
16#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
17 V(Loong64Ld_b) \
18 V(Loong64Ld_bu) \
19 V(Loong64St_b) \
20 V(Loong64Ld_h) \
21 V(Loong64Ld_hu) \
22 V(Loong64St_h) \
23 V(Loong64Ld_w) \
24 V(Loong64Ld_wu) \
25 V(Loong64St_w) \
26 V(Loong64Ld_d) \
27 V(Loong64St_d) \
28 V(Loong64LoadDecompressTaggedSigned) \
29 V(Loong64LoadDecompressTagged) \
30 V(Loong64LoadDecompressProtected) \
31 V(Loong64StoreCompressTagged) \
32 V(Loong64Fld_s) \
33 V(Loong64Fst_s) \
34 V(Loong64Fld_d) \
35 V(Loong64Fst_d) \
36 V(Loong64LoadLane) \
37 V(Loong64StoreLane) \
38 V(Loong64S128LoadSplat) \
39 V(Loong64S128Load8x8S) \
40 V(Loong64S128Load8x8U) \
41 V(Loong64S128Load16x4S) \
42 V(Loong64S128Load16x4U) \
43 V(Loong64S128Load32x2S) \
44 V(Loong64S128Load32x2U) \
45 V(Loong64Word64AtomicLoadUint32) \
46 V(Loong64Word64AtomicLoadUint64) \
47 V(Loong64Word64AtomicStoreWord64)
48
49#define TARGET_ARCH_OPCODE_LIST(V) \
50 TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
51 V(Loong64Add_d) \
52 V(Loong64Add_w) \
53 V(Loong64AddOvf_d) \
54 V(Loong64Sub_d) \
55 V(Loong64Sub_w) \
56 V(Loong64SubOvf_d) \
57 V(Loong64Mul_d) \
58 V(Loong64MulOvf_w) \
59 V(Loong64MulOvf_d) \
60 V(Loong64Mulh_d) \
61 V(Loong64Mulh_w) \
62 V(Loong64Mulh_du) \
63 V(Loong64Mulh_wu) \
64 V(Loong64Mul_w) \
65 V(Loong64Div_d) \
66 V(Loong64Div_w) \
67 V(Loong64Div_du) \
68 V(Loong64Div_wu) \
69 V(Loong64Mod_d) \
70 V(Loong64Mod_w) \
71 V(Loong64Mod_du) \
72 V(Loong64Mod_wu) \
73 V(Loong64And) \
74 V(Loong64And32) \
75 V(Loong64Or) \
76 V(Loong64Or32) \
77 V(Loong64Nor) \
78 V(Loong64Nor32) \
79 V(Loong64Xor) \
80 V(Loong64Xor32) \
81 V(Loong64Alsl_d) \
82 V(Loong64Alsl_w) \
83 V(Loong64Sll_d) \
84 V(Loong64Sll_w) \
85 V(Loong64Srl_d) \
86 V(Loong64Srl_w) \
87 V(Loong64Sra_d) \
88 V(Loong64Sra_w) \
89 V(Loong64Rotr_d) \
90 V(Loong64Rotr_w) \
91 V(Loong64Bstrpick_d) \
92 V(Loong64Bstrpick_w) \
93 V(Loong64Bstrins_d) \
94 V(Loong64Bstrins_w) \
95 V(Loong64ByteSwap64) \
96 V(Loong64ByteSwap32) \
97 V(Loong64Clz_d) \
98 V(Loong64Clz_w) \
99 V(Loong64Mov) \
100 V(Loong64Tst) \
101 V(Loong64Cmp32) \
102 V(Loong64Cmp64) \
103 V(Loong64Float32Cmp) \
104 V(Loong64Float32Add) \
105 V(Loong64Float32Sub) \
106 V(Loong64Float32Mul) \
107 V(Loong64Float32Div) \
108 V(Loong64Float32Abs) \
109 V(Loong64Float32Neg) \
110 V(Loong64Float32Sqrt) \
111 V(Loong64Float32Max) \
112 V(Loong64Float32Min) \
113 V(Loong64Float32ToFloat64) \
114 V(Loong64Float32RoundDown) \
115 V(Loong64Float32RoundUp) \
116 V(Loong64Float32RoundTruncate) \
117 V(Loong64Float32RoundTiesEven) \
118 V(Loong64Float32ToInt32) \
119 V(Loong64Float32ToInt64) \
120 V(Loong64Float32ToUint32) \
121 V(Loong64Float32ToUint64) \
122 V(Loong64Float64Cmp) \
123 V(Loong64Float64Add) \
124 V(Loong64Float64Sub) \
125 V(Loong64Float64Mul) \
126 V(Loong64Float64Div) \
127 V(Loong64Float64Mod) \
128 V(Loong64Float64Abs) \
129 V(Loong64Float64Neg) \
130 V(Loong64Float64Sqrt) \
131 V(Loong64Float64Max) \
132 V(Loong64Float64Min) \
133 V(Loong64Float64ToFloat32) \
134 V(Loong64Float64RoundDown) \
135 V(Loong64Float64RoundUp) \
136 V(Loong64Float64RoundTruncate) \
137 V(Loong64Float64RoundTiesEven) \
138 V(Loong64Float64ToInt32) \
139 V(Loong64Float64ToInt64) \
140 V(Loong64Float64ToUint32) \
141 V(Loong64Float64ToUint64) \
142 V(Loong64Int32ToFloat32) \
143 V(Loong64Int32ToFloat64) \
144 V(Loong64Int64ToFloat32) \
145 V(Loong64Int64ToFloat64) \
146 V(Loong64Uint32ToFloat32) \
147 V(Loong64Uint32ToFloat64) \
148 V(Loong64Uint64ToFloat32) \
149 V(Loong64Uint64ToFloat64) \
150 V(Loong64Float64ExtractLowWord32) \
151 V(Loong64Float64ExtractHighWord32) \
152 V(Loong64Float64FromWord32Pair) \
153 V(Loong64Float64InsertLowWord32) \
154 V(Loong64Float64InsertHighWord32) \
155 V(Loong64BitcastDL) \
156 V(Loong64BitcastLD) \
157 V(Loong64Float64SilenceNaN) \
158 V(Loong64LoadDecodeSandboxedPointer) \
159 V(Loong64StoreEncodeSandboxedPointer) \
160 V(Loong64StoreIndirectPointer) \
161 V(Loong64Push) \
162 V(Loong64Peek) \
163 V(Loong64Poke) \
164 V(Loong64StackClaim) \
165 V(Loong64Ext_w_b) \
166 V(Loong64Ext_w_h) \
167 V(Loong64Dbar) \
168 V(Loong64S128Const) \
169 V(Loong64S128Zero) \
170 V(Loong64S128AllOnes) \
171 V(Loong64I32x4Splat) \
172 V(Loong64I32x4ExtractLane) \
173 V(Loong64I32x4ReplaceLane) \
174 V(Loong64I32x4Add) \
175 V(Loong64I32x4Sub) \
176 V(Loong64F64x2Abs) \
177 V(Loong64F64x2Neg) \
178 V(Loong64F32x4Splat) \
179 V(Loong64F32x4ExtractLane) \
180 V(Loong64F32x4ReplaceLane) \
181 V(Loong64F32x4SConvertI32x4) \
182 V(Loong64F32x4UConvertI32x4) \
183 V(Loong64I32x4Mul) \
184 V(Loong64I32x4MaxS) \
185 V(Loong64I32x4MinS) \
186 V(Loong64I32x4Eq) \
187 V(Loong64I32x4Ne) \
188 V(Loong64I32x4Shl) \
189 V(Loong64I32x4ShrS) \
190 V(Loong64I32x4ShrU) \
191 V(Loong64I32x4MaxU) \
192 V(Loong64I32x4MinU) \
193 V(Loong64F64x2Sqrt) \
194 V(Loong64F64x2Add) \
195 V(Loong64F64x2Sub) \
196 V(Loong64F64x2Mul) \
197 V(Loong64F64x2Div) \
198 V(Loong64F64x2Min) \
199 V(Loong64F64x2Max) \
200 V(Loong64F64x2Eq) \
201 V(Loong64F64x2Ne) \
202 V(Loong64F64x2Lt) \
203 V(Loong64F64x2Le) \
204 V(Loong64F64x2Splat) \
205 V(Loong64F64x2ExtractLane) \
206 V(Loong64F64x2ReplaceLane) \
207 V(Loong64F64x2Pmin) \
208 V(Loong64F64x2Pmax) \
209 V(Loong64F64x2Ceil) \
210 V(Loong64F64x2Floor) \
211 V(Loong64F64x2Trunc) \
212 V(Loong64F64x2NearestInt) \
213 V(Loong64F64x2ConvertLowI32x4S) \
214 V(Loong64F64x2ConvertLowI32x4U) \
215 V(Loong64F64x2PromoteLowF32x4) \
216 V(Loong64F64x2RelaxedMin) \
217 V(Loong64F64x2RelaxedMax) \
218 V(Loong64I64x2Splat) \
219 V(Loong64I64x2ExtractLane) \
220 V(Loong64I64x2ReplaceLane) \
221 V(Loong64I64x2Add) \
222 V(Loong64I64x2Sub) \
223 V(Loong64I64x2Mul) \
224 V(Loong64I64x2Neg) \
225 V(Loong64I64x2Shl) \
226 V(Loong64I64x2ShrS) \
227 V(Loong64I64x2ShrU) \
228 V(Loong64I64x2BitMask) \
229 V(Loong64I64x2Eq) \
230 V(Loong64I64x2Ne) \
231 V(Loong64I64x2GtS) \
232 V(Loong64I64x2GeS) \
233 V(Loong64I64x2Abs) \
234 V(Loong64I64x2SConvertI32x4Low) \
235 V(Loong64I64x2SConvertI32x4High) \
236 V(Loong64I64x2UConvertI32x4Low) \
237 V(Loong64I64x2UConvertI32x4High) \
238 V(Loong64ExtMulLow) \
239 V(Loong64ExtMulHigh) \
240 V(Loong64ExtAddPairwise) \
241 V(Loong64F32x4Abs) \
242 V(Loong64F32x4Neg) \
243 V(Loong64F32x4Sqrt) \
244 V(Loong64F32x4Add) \
245 V(Loong64F32x4Sub) \
246 V(Loong64F32x4Mul) \
247 V(Loong64F32x4Div) \
248 V(Loong64F32x4Max) \
249 V(Loong64F32x4Min) \
250 V(Loong64F32x4Eq) \
251 V(Loong64F32x4Ne) \
252 V(Loong64F32x4Lt) \
253 V(Loong64F32x4Le) \
254 V(Loong64F32x4Pmin) \
255 V(Loong64F32x4Pmax) \
256 V(Loong64F32x4Ceil) \
257 V(Loong64F32x4Floor) \
258 V(Loong64F32x4Trunc) \
259 V(Loong64F32x4NearestInt) \
260 V(Loong64F32x4DemoteF64x2Zero) \
261 V(Loong64F32x4RelaxedMin) \
262 V(Loong64F32x4RelaxedMax) \
263 V(Loong64I32x4SConvertF32x4) \
264 V(Loong64I32x4UConvertF32x4) \
265 V(Loong64I32x4Neg) \
266 V(Loong64I32x4GtS) \
267 V(Loong64I32x4GeS) \
268 V(Loong64I32x4GtU) \
269 V(Loong64I32x4GeU) \
270 V(Loong64I32x4Abs) \
271 V(Loong64I32x4BitMask) \
272 V(Loong64I32x4DotI16x8S) \
273 V(Loong64I32x4TruncSatF64x2SZero) \
274 V(Loong64I32x4TruncSatF64x2UZero) \
275 V(Loong64I32x4RelaxedTruncF32x4S) \
276 V(Loong64I32x4RelaxedTruncF32x4U) \
277 V(Loong64I32x4RelaxedTruncF64x2SZero) \
278 V(Loong64I32x4RelaxedTruncF64x2UZero) \
279 V(Loong64I16x8Splat) \
280 V(Loong64I16x8ExtractLaneU) \
281 V(Loong64I16x8ExtractLaneS) \
282 V(Loong64I16x8ReplaceLane) \
283 V(Loong64I16x8Neg) \
284 V(Loong64I16x8Shl) \
285 V(Loong64I16x8ShrS) \
286 V(Loong64I16x8ShrU) \
287 V(Loong64I16x8Add) \
288 V(Loong64I16x8AddSatS) \
289 V(Loong64I16x8Sub) \
290 V(Loong64I16x8SubSatS) \
291 V(Loong64I16x8Mul) \
292 V(Loong64I16x8MaxS) \
293 V(Loong64I16x8MinS) \
294 V(Loong64I16x8Eq) \
295 V(Loong64I16x8Ne) \
296 V(Loong64I16x8GtS) \
297 V(Loong64I16x8GeS) \
298 V(Loong64I16x8AddSatU) \
299 V(Loong64I16x8SubSatU) \
300 V(Loong64I16x8MaxU) \
301 V(Loong64I16x8MinU) \
302 V(Loong64I16x8GtU) \
303 V(Loong64I16x8GeU) \
304 V(Loong64I16x8RoundingAverageU) \
305 V(Loong64I16x8Abs) \
306 V(Loong64I16x8BitMask) \
307 V(Loong64I16x8Q15MulRSatS) \
308 V(Loong64I16x8RelaxedQ15MulRS) \
309 V(Loong64I8x16Splat) \
310 V(Loong64I8x16ExtractLaneU) \
311 V(Loong64I8x16ExtractLaneS) \
312 V(Loong64I8x16ReplaceLane) \
313 V(Loong64I8x16Neg) \
314 V(Loong64I8x16Shl) \
315 V(Loong64I8x16ShrS) \
316 V(Loong64I8x16Add) \
317 V(Loong64I8x16AddSatS) \
318 V(Loong64I8x16Sub) \
319 V(Loong64I8x16SubSatS) \
320 V(Loong64I8x16MaxS) \
321 V(Loong64I8x16MinS) \
322 V(Loong64I8x16Eq) \
323 V(Loong64I8x16Ne) \
324 V(Loong64I8x16GtS) \
325 V(Loong64I8x16GeS) \
326 V(Loong64I8x16ShrU) \
327 V(Loong64I8x16AddSatU) \
328 V(Loong64I8x16SubSatU) \
329 V(Loong64I8x16MaxU) \
330 V(Loong64I8x16MinU) \
331 V(Loong64I8x16GtU) \
332 V(Loong64I8x16GeU) \
333 V(Loong64I8x16RoundingAverageU) \
334 V(Loong64I8x16Abs) \
335 V(Loong64I8x16Popcnt) \
336 V(Loong64I8x16BitMask) \
337 V(Loong64S128And) \
338 V(Loong64S128Or) \
339 V(Loong64S128Xor) \
340 V(Loong64S128Not) \
341 V(Loong64S128Select) \
342 V(Loong64S128AndNot) \
343 V(Loong64I64x2AllTrue) \
344 V(Loong64I32x4AllTrue) \
345 V(Loong64I16x8AllTrue) \
346 V(Loong64I8x16AllTrue) \
347 V(Loong64V128AnyTrue) \
348 V(Loong64S32x4InterleaveRight) \
349 V(Loong64S32x4InterleaveLeft) \
350 V(Loong64S32x4PackEven) \
351 V(Loong64S32x4PackOdd) \
352 V(Loong64S32x4InterleaveEven) \
353 V(Loong64S32x4InterleaveOdd) \
354 V(Loong64S32x4Shuffle) \
355 V(Loong64S16x8InterleaveRight) \
356 V(Loong64S16x8InterleaveLeft) \
357 V(Loong64S16x8PackEven) \
358 V(Loong64S16x8PackOdd) \
359 V(Loong64S16x8InterleaveEven) \
360 V(Loong64S16x8InterleaveOdd) \
361 V(Loong64S16x4Reverse) \
362 V(Loong64S16x2Reverse) \
363 V(Loong64S8x16InterleaveRight) \
364 V(Loong64S8x16InterleaveLeft) \
365 V(Loong64S8x16PackEven) \
366 V(Loong64S8x16PackOdd) \
367 V(Loong64S8x16InterleaveEven) \
368 V(Loong64S8x16InterleaveOdd) \
369 V(Loong64I8x16Shuffle) \
370 V(Loong64I8x16Swizzle) \
371 V(Loong64S8x16Concat) \
372 V(Loong64S8x8Reverse) \
373 V(Loong64S8x4Reverse) \
374 V(Loong64S8x2Reverse) \
375 V(Loong64S128Load32Zero) \
376 V(Loong64S128Load64Zero) \
377 V(Loong64I32x4SConvertI16x8Low) \
378 V(Loong64I32x4SConvertI16x8High) \
379 V(Loong64I32x4UConvertI16x8Low) \
380 V(Loong64I32x4UConvertI16x8High) \
381 V(Loong64I16x8SConvertI8x16Low) \
382 V(Loong64I16x8SConvertI8x16High) \
383 V(Loong64I16x8SConvertI32x4) \
384 V(Loong64I16x8UConvertI32x4) \
385 V(Loong64I16x8UConvertI8x16Low) \
386 V(Loong64I16x8UConvertI8x16High) \
387 V(Loong64I8x16SConvertI16x8) \
388 V(Loong64I8x16UConvertI16x8) \
389 V(Loong64AtomicLoadDecompressTaggedSigned) \
390 V(Loong64AtomicLoadDecompressTagged) \
391 V(Loong64AtomicStoreCompressTagged) \
392 V(Loong64Word64AtomicAddUint64) \
393 V(Loong64Word64AtomicSubUint64) \
394 V(Loong64Word64AtomicAndUint64) \
395 V(Loong64Word64AtomicOrUint64) \
396 V(Loong64Word64AtomicXorUint64) \
397 V(Loong64Word64AtomicExchangeUint64) \
398 V(Loong64Word64AtomicCompareExchangeUint64)
399
400// Addressing modes represent the "shape" of inputs to an instruction.
401// Many instructions support multiple addressing modes. Addressing modes
402// are encoded into the InstructionCode of the instruction and tell the
403// code generator after register allocation which assembler method to call.
404//
405// We use the following local notation for addressing modes:
406//
407// R = register
408// O = register or stack slot
409// D = double register
410// I = immediate (handle, external, int32)
411// MRI = [register + immediate]
412// MRR = [register + register]
413#define TARGET_ADDRESSING_MODE_LIST(V) \
414 V(MRI) /* [%r0 + K] */ \
415 V(MRR) /* [%r0 + %r1] */ \
416 V(Root) /* [%rr + K] */
417
418} // namespace compiler
419} // namespace internal
420} // namespace v8
421
422#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_