v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-codes-arm64.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
6#define V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// ARM64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14
15// Opcodes that support a MemoryAccessMode.
16#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
17 V(Arm64Ldr) \
18 V(Arm64Ldrb) \
19 V(Arm64LdrD) \
20 V(Arm64Ldrh) \
21 V(Arm64LdrQ) \
22 V(Arm64LdrS) \
23 V(Arm64LdrH) \
24 V(Arm64Ldrsb) \
25 V(Arm64LdrsbW) \
26 V(Arm64Ldrsh) \
27 V(Arm64LdrshW) \
28 V(Arm64Ldrsw) \
29 V(Arm64LdrW) \
30 IF_WASM(V, Arm64LoadLane) \
31 IF_WASM(V, Arm64LoadSplat) \
32 IF_WASM(V, Arm64S128Load16x4S) \
33 IF_WASM(V, Arm64S128Load16x4U) \
34 IF_WASM(V, Arm64S128Load32x2S) \
35 IF_WASM(V, Arm64S128Load32x2U) \
36 IF_WASM(V, Arm64S128Load8x8S) \
37 IF_WASM(V, Arm64S128Load8x8U) \
38 IF_WASM(V, Arm64StoreLane) \
39 IF_WASM(V, Arm64S128LoadPairDeinterleave) \
40 V(Arm64Str) \
41 V(Arm64StrPair) \
42 V(Arm64Strb) \
43 V(Arm64StrD) \
44 V(Arm64Strh) \
45 V(Arm64StrQ) \
46 V(Arm64StrS) \
47 V(Arm64StrH) \
48 V(Arm64StrW) \
49 V(Arm64StrWPair) \
50 V(Arm64LdrDecompressTaggedSigned) \
51 V(Arm64LdrDecompressTagged) \
52 V(Arm64LdrDecompressProtected) \
53 V(Arm64StrCompressTagged) \
54 V(Arm64Word64AtomicLoadUint64) \
55 V(Arm64Word64AtomicStoreWord64)
56
57#define TARGET_ARCH_SIMD_OPCODE_LIST(V) \
58 V(Arm64F64x2Qfma) \
59 V(Arm64F64x2Qfms) \
60 V(Arm64F64x2Pmin) \
61 V(Arm64F64x2Pmax) \
62 V(Arm64F64x2ConvertLowI32x4S) \
63 V(Arm64F64x2ConvertLowI32x4U) \
64 V(Arm64F64x2PromoteLowF32x4) \
65 V(Arm64F32x4SConvertI32x4) \
66 V(Arm64F32x4UConvertI32x4) \
67 V(Arm64F32x4Qfma) \
68 V(Arm64F32x4Qfms) \
69 V(Arm64F32x4Pmin) \
70 V(Arm64F32x4Pmax) \
71 V(Arm64F32x4DemoteF64x2Zero) \
72 V(Arm64F16x8Pmin) \
73 V(Arm64F16x8Pmax) \
74 V(Arm64F32x4PromoteLowF16x8) \
75 V(Arm64F16x8SConvertI16x8) \
76 V(Arm64F16x8UConvertI16x8) \
77 V(Arm64F16x8DemoteF32x4Zero) \
78 V(Arm64F16x8DemoteF64x2Zero) \
79 V(Arm64I16x8SConvertF16x8) \
80 V(Arm64I16x8UConvertF16x8) \
81 V(Arm64F16x8Qfma) \
82 V(Arm64F16x8Qfms) \
83 V(Arm64I64x2ShrU) \
84 V(Arm64I64x2BitMask) \
85 V(Arm64I32x4SConvertF32x4) \
86 V(Arm64I32x4Shl) \
87 V(Arm64I32x4ShrS) \
88 V(Arm64I32x4Mul) \
89 V(Arm64I16x8Q15MulRSatS) \
90 V(Arm64I16x8BitMask) \
91 V(Arm64I8x16Shl) \
92 V(Arm64I8x16ShrS) \
93 V(Arm64I8x16SConvertI16x8) \
94 V(Arm64I8x16ShrU) \
95 V(Arm64I8x16UConvertI16x8) \
96 V(Arm64I8x16BitMask) \
97 V(Arm64S128Const) \
98 V(Arm64S128Dup) \
99 V(Arm64S128And) \
100 V(Arm64S128Or) \
101 V(Arm64S128Xor) \
102 V(Arm64S128Not) \
103 V(Arm64S128Select) \
104 V(Arm64S128AndNot) \
105 V(Arm64Ssra) \
106 V(Arm64Usra) \
107 V(Arm64S64x2UnzipLeft) \
108 V(Arm64S64x2UnzipRight) \
109 V(Arm64S32x4ZipLeft) \
110 V(Arm64S32x4ZipRight) \
111 V(Arm64S32x4UnzipLeft) \
112 V(Arm64S32x4UnzipRight) \
113 V(Arm64S32x4TransposeLeft) \
114 V(Arm64S32x4TransposeRight) \
115 V(Arm64S64x2Shuffle) \
116 V(Arm64S64x1Shuffle) \
117 V(Arm64S32x4Shuffle) \
118 V(Arm64S32x2Shuffle) \
119 V(Arm64S32x1Shuffle) \
120 V(Arm64S16x2Shuffle) \
121 V(Arm64S16x1Shuffle) \
122 V(Arm64S8x2Shuffle) \
123 V(Arm64S16x8ZipLeft) \
124 V(Arm64S16x8ZipRight) \
125 V(Arm64S16x8UnzipLeft) \
126 V(Arm64S16x8UnzipRight) \
127 V(Arm64S16x8TransposeLeft) \
128 V(Arm64S16x8TransposeRight) \
129 V(Arm64S8x16ZipLeft) \
130 V(Arm64S8x16ZipRight) \
131 V(Arm64S8x16UnzipLeft) \
132 V(Arm64S8x16UnzipRight) \
133 V(Arm64S8x16TransposeLeft) \
134 V(Arm64S8x16TransposeRight) \
135 V(Arm64S8x16Concat) \
136 V(Arm64I8x16Swizzle) \
137 V(Arm64I8x16Shuffle) \
138 V(Arm64S32x4Reverse) \
139 V(Arm64S32x4OneLaneSwizzle) \
140 V(Arm64S32x2Reverse) \
141 V(Arm64S16x4Reverse) \
142 V(Arm64S16x2Reverse) \
143 V(Arm64S8x8Reverse) \
144 V(Arm64S8x4Reverse) \
145 V(Arm64S8x2Reverse) \
146 V(Arm64V128AnyTrue) \
147 V(Arm64I64x2AllTrue) \
148 V(Arm64I32x4AllTrue) \
149 V(Arm64I16x8AllTrue) \
150 V(Arm64I8x16AllTrue) \
151 V(Arm64Sxtl) \
152 V(Arm64Sxtl2) \
153 V(Arm64Uxtl) \
154 V(Arm64Uxtl2) \
155 V(Arm64FSplat) \
156 V(Arm64FAbs) \
157 V(Arm64FSqrt) \
158 V(Arm64FNeg) \
159 V(Arm64FExtractLane) \
160 V(Arm64FReplaceLane) \
161 V(Arm64ISplat) \
162 V(Arm64IAbs) \
163 V(Arm64INeg) \
164 V(Arm64IExtractLane) \
165 V(Arm64IReplaceLane) \
166 V(Arm64I64x2Shl) \
167 V(Arm64I64x2ShrS) \
168 V(Arm64I64x2Mul) \
169 V(Arm64I32x4UConvertF32x4) \
170 V(Arm64I32x4ShrU) \
171 V(Arm64I32x4BitMask) \
172 V(Arm64I32x4DotI16x8S) \
173 V(Arm64I16x8DotI8x16S) \
174 V(Arm64I32x4DotI8x16AddS) \
175 V(Arm64I8x16Addv) \
176 V(Arm64I16x8Addv) \
177 V(Arm64I32x4Addv) \
178 V(Arm64I64x2AddPair) \
179 V(Arm64F32x4AddReducePairwise) \
180 V(Arm64F64x2AddPair) \
181 V(Arm64I32x4TruncSatF64x2SZero) \
182 V(Arm64I32x4TruncSatF64x2UZero) \
183 V(Arm64IExtractLaneU) \
184 V(Arm64IExtractLaneS) \
185 V(Arm64I16x8Shl) \
186 V(Arm64I16x8ShrS) \
187 V(Arm64I16x8SConvertI32x4) \
188 V(Arm64I16x8Mul) \
189 V(Arm64I16x8ShrU) \
190 V(Arm64I16x8UConvertI32x4) \
191 V(Arm64Mla) \
192 V(Arm64Mls) \
193 V(Arm64FAdd) \
194 V(Arm64FSub) \
195 V(Arm64FMul) \
196 V(Arm64FMulElement) \
197 V(Arm64FDiv) \
198 V(Arm64FMin) \
199 V(Arm64FMax) \
200 V(Arm64FEq) \
201 V(Arm64FNe) \
202 V(Arm64FLt) \
203 V(Arm64FLe) \
204 V(Arm64FGt) \
205 V(Arm64FGe) \
206 V(Arm64IAdd) \
207 V(Arm64ISub) \
208 V(Arm64IEq) \
209 V(Arm64INe) \
210 V(Arm64IGtS) \
211 V(Arm64IGeS) \
212 V(Arm64ILtS) \
213 V(Arm64ILeS) \
214 V(Arm64IMinS) \
215 V(Arm64IMaxS) \
216 V(Arm64IMinU) \
217 V(Arm64IMaxU) \
218 V(Arm64IGtU) \
219 V(Arm64IGeU) \
220 V(Arm64IAddSatS) \
221 V(Arm64ISubSatS) \
222 V(Arm64IAddSatU) \
223 V(Arm64ISubSatU) \
224 V(Arm64RoundingAverageU) \
225 V(Arm64Smlal) \
226 V(Arm64Smlal2) \
227 V(Arm64Sadalp) \
228 V(Arm64Saddlp) \
229 V(Arm64Bcax) \
230 V(Arm64Eor3) \
231 V(Arm64Uadalp) \
232 V(Arm64Uaddlp) \
233 V(Arm64Umlal) \
234 V(Arm64Umlal2)
235
236#define TARGET_ARCH_OPCODE_LIST(V) \
237 TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
238 V(Arm64Add) \
239 V(Arm64Add32) \
240 V(Arm64And) \
241 V(Arm64And32) \
242 V(Arm64Bic) \
243 V(Arm64Bic32) \
244 V(Arm64Clz) \
245 V(Arm64Clz32) \
246 V(Arm64Cmp) \
247 V(Arm64Cmp32) \
248 V(Arm64Cmn) \
249 V(Arm64Cmn32) \
250 V(Arm64Cnt) \
251 V(Arm64Cnt32) \
252 V(Arm64Cnt64) \
253 V(Arm64Tst) \
254 V(Arm64Tst32) \
255 V(Arm64Or) \
256 V(Arm64Or32) \
257 V(Arm64Orn) \
258 V(Arm64Orn32) \
259 V(Arm64Eor) \
260 V(Arm64Eor32) \
261 V(Arm64Eon) \
262 V(Arm64Eon32) \
263 V(Arm64Sub) \
264 V(Arm64Sub32) \
265 V(Arm64Mul) \
266 V(Arm64Mul32) \
267 V(Arm64Smulh) \
268 V(Arm64Smull) \
269 V(Arm64Smull2) \
270 V(Arm64Umull) \
271 V(Arm64Umulh) \
272 V(Arm64Umull2) \
273 V(Arm64Madd) \
274 V(Arm64Madd32) \
275 V(Arm64Msub) \
276 V(Arm64Msub32) \
277 V(Arm64Mneg) \
278 V(Arm64Mneg32) \
279 V(Arm64Idiv) \
280 V(Arm64Idiv32) \
281 V(Arm64Udiv) \
282 V(Arm64Udiv32) \
283 V(Arm64Imod) \
284 V(Arm64Imod32) \
285 V(Arm64Umod) \
286 V(Arm64Umod32) \
287 V(Arm64Not) \
288 V(Arm64Not32) \
289 V(Arm64Lsl) \
290 V(Arm64Lsl32) \
291 V(Arm64Lsr) \
292 V(Arm64Lsr32) \
293 V(Arm64Asr) \
294 V(Arm64Asr32) \
295 V(Arm64Ror) \
296 V(Arm64Ror32) \
297 V(Arm64Mov32) \
298 V(Arm64Sxtb32) \
299 V(Arm64Sxth32) \
300 V(Arm64Sxtb) \
301 V(Arm64Sxth) \
302 V(Arm64Sxtw) \
303 V(Arm64Sbfx) \
304 V(Arm64Sbfx32) \
305 V(Arm64Ubfx) \
306 V(Arm64Ubfx32) \
307 V(Arm64Ubfiz32) \
308 V(Arm64Sbfiz) \
309 V(Arm64Bfi) \
310 V(Arm64Rbit) \
311 V(Arm64Rbit32) \
312 V(Arm64Rev) \
313 V(Arm64Rev32) \
314 V(Arm64TestAndBranch32) \
315 V(Arm64TestAndBranch) \
316 V(Arm64CompareAndBranch32) \
317 V(Arm64CompareAndBranch) \
318 V(Arm64Claim) \
319 V(Arm64Poke) \
320 V(Arm64PokePair) \
321 V(Arm64Peek) \
322 V(Arm64Float16RoundDown) \
323 V(Arm64Float16RoundUp) \
324 V(Arm64Float16RoundTruncate) \
325 V(Arm64Float16RoundTiesEven) \
326 V(Arm64Float32Cmp) \
327 V(Arm64Float32Add) \
328 V(Arm64Float32Sub) \
329 V(Arm64Float32Mul) \
330 V(Arm64Float32Div) \
331 V(Arm64Float32Abs) \
332 V(Arm64Float32Abd) \
333 V(Arm64Float32Neg) \
334 V(Arm64Float32Sqrt) \
335 V(Arm64Float32Fnmul) \
336 V(Arm64Float32RoundDown) \
337 V(Arm64Float32Max) \
338 V(Arm64Float32Min) \
339 V(Arm64Float64Cmp) \
340 V(Arm64Float64Add) \
341 V(Arm64Float64Sub) \
342 V(Arm64Float64Mul) \
343 V(Arm64Float64Div) \
344 V(Arm64Float64Mod) \
345 V(Arm64Float64Max) \
346 V(Arm64Float64Min) \
347 V(Arm64Float64Abs) \
348 V(Arm64Float64Abd) \
349 V(Arm64Float64Neg) \
350 V(Arm64Float64Sqrt) \
351 V(Arm64Float64Fnmul) \
352 V(Arm64Float64RoundDown) \
353 V(Arm64Float32RoundUp) \
354 V(Arm64Float64RoundUp) \
355 V(Arm64Float64RoundTiesAway) \
356 V(Arm64Float32RoundTruncate) \
357 V(Arm64Float64RoundTruncate) \
358 V(Arm64Float32RoundTiesEven) \
359 V(Arm64Float64RoundTiesEven) \
360 V(Arm64Float64SilenceNaN) \
361 V(Arm64Float32ToFloat64) \
362 V(Arm64Float64ToFloat32) \
363 V(Arm64Float64ToFloat16RawBits) \
364 V(Arm64Float16RawBitsToFloat64) \
365 V(Arm64Float32ToInt32) \
366 V(Arm64Float64ToInt32) \
367 V(Arm64Float32ToUint32) \
368 V(Arm64Float64ToUint32) \
369 V(Arm64Float32ToInt64) \
370 V(Arm64Float64ToInt64) \
371 V(Arm64Float32ToUint64) \
372 V(Arm64Float64ToUint64) \
373 V(Arm64Int32ToFloat32) \
374 V(Arm64Int32ToFloat64) \
375 V(Arm64Int64ToFloat32) \
376 V(Arm64Int64ToFloat64) \
377 V(Arm64Uint32ToFloat32) \
378 V(Arm64Uint32ToFloat64) \
379 V(Arm64Uint64ToFloat32) \
380 V(Arm64Uint64ToFloat64) \
381 V(Arm64Float64ExtractLowWord32) \
382 V(Arm64Float64ExtractHighWord32) \
383 V(Arm64Float64InsertLowWord32) \
384 V(Arm64Float64InsertHighWord32) \
385 V(Arm64Float64MoveU64) \
386 V(Arm64U64MoveFloat64) \
387 V(Arm64LdarDecompressTaggedSigned) \
388 V(Arm64LdarDecompressTagged) \
389 V(Arm64StlrCompressTagged) \
390 V(Arm64StrIndirectPointer) \
391 V(Arm64LdrDecodeSandboxedPointer) \
392 V(Arm64StrEncodeSandboxedPointer) \
393 V(Arm64DmbIsh) \
394 V(Arm64DsbIsb) \
395 V(Arm64Word64AtomicAddUint64) \
396 V(Arm64Word64AtomicSubUint64) \
397 V(Arm64Word64AtomicAndUint64) \
398 V(Arm64Word64AtomicOrUint64) \
399 V(Arm64Word64AtomicXorUint64) \
400 V(Arm64Word64AtomicExchangeUint64) \
401 V(Arm64Word64AtomicCompareExchangeUint64) \
402 IF_WASM(TARGET_ARCH_SIMD_OPCODE_LIST, V)
403
404// Addressing modes represent the "shape" of inputs to an instruction.
405// Many instructions support multiple addressing modes. Addressing modes
406// are encoded into the InstructionCode of the instruction and tell the
407// code generator after register allocation which assembler method to call.
408//
409// We use the following local notation for addressing modes:
410//
411// R = register
412// O = register or stack slot
413// D = double register
414// I = immediate (handle, external, int32)
415// MRI = [register + immediate]
416// MRR = [register + register]
417#define TARGET_ADDRESSING_MODE_LIST(V) \
418 V(MRI) /* [%r0 + K] */ \
419 V(MRR) /* [%r0 + %r1] */ \
420 V(Operand2_R_LSL_I) /* %r0 LSL K */ \
421 V(Operand2_R_LSR_I) /* %r0 LSR K */ \
422 V(Operand2_R_ASR_I) /* %r0 ASR K */ \
423 V(Operand2_R_ROR_I) /* %r0 ROR K */ \
424 V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
425 V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
426 V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
427 V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
428 V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */ \
429 V(Root) /* [%rr + K] */
430
431} // namespace compiler
432} // namespace internal
433} // namespace v8
434
435#endif // V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_