v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-codes-x64.h
Go to the documentation of this file.
1
// Copyright 2014 the V8 project authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#ifndef V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
6
#define V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
7
8
namespace
v8
{
9
namespace
internal
{
10
namespace
compiler {
11
12
// X64-specific opcodes that specify which assembly sequence to emit.
13
// Most opcodes specify a single instruction.
14
15
// Opcodes that support a MemoryAccessMode.
16
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
17
V(X64F64x2PromoteLowF32x4) \
18
V(X64Movb) \
19
V(X64Movdqu) \
20
V(X64Movl) \
21
V(X64Movq) \
22
V(X64Movsd) \
23
V(X64Movss) \
24
V(X64Movsh) \
25
V(X64Movsxbl) \
26
V(X64Movsxbq) \
27
V(X64Movsxlq) \
28
V(X64Movsxwl) \
29
V(X64Movsxwq) \
30
V(X64Movw) \
31
V(X64Movzxbl) \
32
V(X64Movzxbq) \
33
V(X64Movzxwl) \
34
V(X64Movzxwq) \
35
V(X64Pextrb) \
36
V(X64Pextrw) \
37
V(X64Pinsrb) \
38
V(X64Pinsrd) \
39
V(X64Pinsrq) \
40
V(X64Pinsrw) \
41
V(X64S128Load16Splat) \
42
V(X64S128Load16x4S) \
43
V(X64S128Load16x4U) \
44
V(X64S128Load32Splat) \
45
V(X64S128Load32x2S) \
46
V(X64S128Load32x2U) \
47
V(X64S128Load64Splat) \
48
V(X64S128Load8Splat) \
49
V(X64S128Load8x8S) \
50
V(X64S128Load8x8U) \
51
V(X64S128Store32Lane) \
52
V(X64S128Store64Lane) \
53
V(X64Word64AtomicStoreWord64) \
54
V(X64Word64AtomicAddUint64) \
55
V(X64Word64AtomicSubUint64) \
56
V(X64Word64AtomicAndUint64) \
57
V(X64Word64AtomicOrUint64) \
58
V(X64Word64AtomicXorUint64) \
59
V(X64Word64AtomicExchangeUint64) \
60
V(X64Word64AtomicCompareExchangeUint64) \
61
V(X64Movdqu256) \
62
V(X64MovqDecompressTaggedSigned) \
63
V(X64MovqDecompressTagged) \
64
V(X64MovqCompressTagged) \
65
V(X64MovqDecompressProtected) \
66
V(X64S256Load8Splat) \
67
V(X64S256Load16Splat) \
68
V(X64S256Load32Splat) \
69
V(X64S256Load64Splat) \
70
V(X64S256Load8x16S) \
71
V(X64S256Load8x16U) \
72
V(X64S256Load8x8U) \
73
V(X64S256Load16x8S) \
74
V(X64S256Load16x8U) \
75
V(X64S256Load32x4S) \
76
V(X64S256Load32x4U) \
77
V(SSEFloat32Add) \
78
V(SSEFloat32Sub) \
79
V(SSEFloat32Mul) \
80
V(SSEFloat32Div) \
81
V(SSEFloat64Add) \
82
V(SSEFloat64Sub) \
83
V(SSEFloat64Mul) \
84
V(SSEFloat64Div) \
85
V(AVXFloat32Add) \
86
V(AVXFloat32Sub) \
87
V(AVXFloat32Mul) \
88
V(AVXFloat32Div) \
89
V(AVXFloat64Add) \
90
V(AVXFloat64Sub) \
91
V(AVXFloat64Mul) \
92
V(AVXFloat64Div)
93
94
#define TARGET_ARCH_OPCODE_LIST(V) \
95
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
96
V(X64Add) \
97
V(X64Add32) \
98
V(X64And) \
99
V(X64And32) \
100
V(X64Cmp) \
101
V(X64Cmp32) \
102
V(X64Cmp16) \
103
V(X64Cmp8) \
104
V(X64Test) \
105
V(X64Test32) \
106
V(X64Test16) \
107
V(X64Test8) \
108
V(X64Or) \
109
V(X64Or32) \
110
V(X64Xor) \
111
V(X64Xor32) \
112
V(X64Sub) \
113
V(X64Sub32) \
114
V(X64Imul) \
115
V(X64Imul32) \
116
V(X64ImulHigh32) \
117
V(X64ImulHigh64) \
118
V(X64UmulHigh32) \
119
V(X64UmulHigh64) \
120
V(X64Idiv) \
121
V(X64Idiv32) \
122
V(X64Udiv) \
123
V(X64Udiv32) \
124
V(X64Not) \
125
V(X64Not32) \
126
V(X64Neg) \
127
V(X64Neg32) \
128
V(X64Shl) \
129
V(X64Shl32) \
130
V(X64Shr) \
131
V(X64Shr32) \
132
V(X64Sar) \
133
V(X64Sar32) \
134
V(X64Rol) \
135
V(X64Rol32) \
136
V(X64Ror) \
137
V(X64Ror32) \
138
V(X64Lzcnt) \
139
V(X64Lzcnt32) \
140
V(X64Tzcnt) \
141
V(X64Tzcnt32) \
142
V(X64Popcnt) \
143
V(X64Popcnt32) \
144
V(X64Bswap) \
145
V(X64Bswap32) \
146
V(X64MFence) \
147
V(X64LFence) \
148
V(SSEFloat32Cmp) \
149
V(SSEFloat32Sqrt) \
150
V(SSEFloat32ToFloat64) \
151
V(SSEFloat32ToInt32) \
152
V(SSEFloat32ToUint32) \
153
V(SSEFloat32Round) \
154
V(SSEFloat64Cmp) \
155
V(SSEFloat64Mod) \
156
V(SSEFloat64Sqrt) \
157
V(SSEFloat64Round) \
158
V(SSEFloat32Max) \
159
V(SSEFloat64Max) \
160
V(SSEFloat32Min) \
161
V(SSEFloat64Min) \
162
V(SSEFloat64ToFloat32) \
163
V(SSEFloat64ToFloat16RawBits) \
164
V(SSEFloat16RawBitsToFloat64) \
165
V(SSEFloat64ToInt32) \
166
V(SSEFloat64ToUint32) \
167
V(SSEFloat32ToInt64) \
168
V(SSEFloat64ToInt64) \
169
V(SSEFloat32ToUint64) \
170
V(SSEFloat64ToUint64) \
171
V(SSEInt32ToFloat64) \
172
V(SSEInt32ToFloat32) \
173
V(SSEInt64ToFloat32) \
174
V(SSEInt64ToFloat64) \
175
V(SSEUint64ToFloat32) \
176
V(SSEUint64ToFloat64) \
177
V(SSEUint32ToFloat64) \
178
V(SSEUint32ToFloat32) \
179
V(SSEFloat64ExtractLowWord32) \
180
V(SSEFloat64ExtractHighWord32) \
181
V(SSEFloat64InsertLowWord32) \
182
V(SSEFloat64InsertHighWord32) \
183
V(SSEFloat64LoadLowWord32) \
184
V(SSEFloat64SilenceNaN) \
185
V(AVXFloat32Cmp) \
186
V(AVXFloat64Cmp) \
187
V(X64Float64Abs) \
188
V(X64Float64Neg) \
189
V(X64Float32Abs) \
190
V(X64Float32Neg) \
191
V(X64MovqStoreIndirectPointer) \
192
V(X64MovqEncodeSandboxedPointer) \
193
V(X64MovqDecodeSandboxedPointer) \
194
V(X64BitcastFI) \
195
V(X64BitcastDL) \
196
V(X64BitcastIF) \
197
V(X64BitcastLD) \
198
V(X64Lea32) \
199
V(X64Lea) \
200
V(X64Dec32) \
201
V(X64Inc32) \
202
V(X64Push) \
203
V(X64Poke) \
204
V(X64Peek) \
205
V(X64Cvttps2dq) \
206
V(X64Cvttpd2dq) \
207
V(X64I32x4TruncF64x2UZero) \
208
V(X64I32x4TruncF32x4U) \
209
V(X64I32x8TruncF32x8U) \
210
V(X64FSplat) \
211
V(X64FExtractLane) \
212
V(X64FReplaceLane) \
213
V(X64FAbs) \
214
V(X64FNeg) \
215
V(X64FSqrt) \
216
V(X64FAdd) \
217
V(X64FSub) \
218
V(X64FMul) \
219
V(X64FDiv) \
220
V(X64FMin) \
221
V(X64FMax) \
222
V(X64FEq) \
223
V(X64FNe) \
224
V(X64FLt) \
225
V(X64FLe) \
226
V(X64F64x2Qfma) \
227
V(X64F64x2Qfms) \
228
V(X64Minpd) \
229
V(X64Maxpd) \
230
V(X64F64x2Round) \
231
V(X64F64x2ConvertLowI32x4S) \
232
V(X64F64x4ConvertI32x4S) \
233
V(X64F64x2ConvertLowI32x4U) \
234
V(X64F32x4SConvertI32x4) \
235
V(X64F32x8SConvertI32x8) \
236
V(X64F32x4UConvertI32x4) \
237
V(X64F32x8UConvertI32x8) \
238
V(X64F32x4Qfma) \
239
V(X64F32x4Qfms) \
240
V(X64Minps) \
241
V(X64Maxps) \
242
V(X64F32x4Round) \
243
V(X64F32x4DemoteF64x2Zero) \
244
V(X64F32x4DemoteF64x4) \
245
V(X64F16x8Round) \
246
V(X64I16x8SConvertF16x8) \
247
V(X64I16x8UConvertF16x8) \
248
V(X64F16x8SConvertI16x8) \
249
V(X64F16x8UConvertI16x8) \
250
V(X64F16x8DemoteF32x4Zero) \
251
V(X64F16x8DemoteF64x2Zero) \
252
V(X64F32x4PromoteLowF16x8) \
253
V(X64F16x8Qfma) \
254
V(X64F16x8Qfms) \
255
V(X64Minph) \
256
V(X64Maxph) \
257
V(X64ISplat) \
258
V(X64IExtractLane) \
259
V(X64IAbs) \
260
V(X64INeg) \
261
V(X64IBitMask) \
262
V(X64IShl) \
263
V(X64IShrS) \
264
V(X64IAdd) \
265
V(X64ISub) \
266
V(X64IMul) \
267
V(X64IEq) \
268
V(X64IGtS) \
269
V(X64IGeS) \
270
V(X64INe) \
271
V(X64IShrU) \
272
V(X64I64x2ExtMulLowI32x4S) \
273
V(X64I64x2ExtMulHighI32x4S) \
274
V(X64I64x2ExtMulLowI32x4U) \
275
V(X64I64x2ExtMulHighI32x4U) \
276
V(X64I64x2SConvertI32x4Low) \
277
V(X64I64x2SConvertI32x4High) \
278
V(X64I64x4SConvertI32x4) \
279
V(X64I64x2UConvertI32x4Low) \
280
V(X64I64x2UConvertI32x4High) \
281
V(X64I64x4UConvertI32x4) \
282
V(X64I32x4SConvertF32x4) \
283
V(X64I32x8SConvertF32x8) \
284
V(X64I32x4SConvertI16x8Low) \
285
V(X64I32x4SConvertI16x8High) \
286
V(X64I32x8SConvertI16x8) \
287
V(X64IMinS) \
288
V(X64IMaxS) \
289
V(X64I32x4UConvertF32x4) \
290
V(X64I32x8UConvertF32x8) \
291
V(X64I32x4UConvertI16x8Low) \
292
V(X64I32x4UConvertI16x8High) \
293
V(X64I32x8UConvertI16x8) \
294
V(X64IMinU) \
295
V(X64IMaxU) \
296
V(X64IGtU) \
297
V(X64IGeU) \
298
V(X64I32x4DotI16x8S) \
299
V(X64I32x8DotI16x16S) \
300
V(X64I32x4DotI8x16I7x16AddS) \
301
V(X64I32x4ExtMulLowI16x8S) \
302
V(X64I32x4ExtMulHighI16x8S) \
303
V(X64I32x4ExtMulLowI16x8U) \
304
V(X64I32x4ExtMulHighI16x8U) \
305
V(X64I32x4ExtAddPairwiseI16x8S) \
306
V(X64I32x8ExtAddPairwiseI16x16S) \
307
V(X64I32x4ExtAddPairwiseI16x8U) \
308
V(X64I32x8ExtAddPairwiseI16x16U) \
309
V(X64I32x4TruncSatF64x2SZero) \
310
V(X64I32x4TruncSatF64x2UZero) \
311
V(X64I32X4ShiftZeroExtendI8x16) \
312
V(X64IExtractLaneS) \
313
V(X64I16x8SConvertI8x16Low) \
314
V(X64I16x8SConvertI8x16High) \
315
V(X64I16x16SConvertI8x16) \
316
V(X64I16x8SConvertI32x4) \
317
V(X64I16x16SConvertI32x8) \
318
V(X64IAddSatS) \
319
V(X64ISubSatS) \
320
V(X64I16x8UConvertI8x16Low) \
321
V(X64I16x8UConvertI8x16High) \
322
V(X64I16x16UConvertI8x16) \
323
V(X64I16x8UConvertI32x4) \
324
V(X64I16x16UConvertI32x8) \
325
V(X64IAddSatU) \
326
V(X64ISubSatU) \
327
V(X64IRoundingAverageU) \
328
V(X64I16x8ExtMulLowI8x16S) \
329
V(X64I16x8ExtMulHighI8x16S) \
330
V(X64I16x8ExtMulLowI8x16U) \
331
V(X64I16x8ExtMulHighI8x16U) \
332
V(X64I16x8ExtAddPairwiseI8x16S) \
333
V(X64I16x16ExtAddPairwiseI8x32S) \
334
V(X64I16x8ExtAddPairwiseI8x16U) \
335
V(X64I16x16ExtAddPairwiseI8x32U) \
336
V(X64I16x8Q15MulRSatS) \
337
V(X64I16x8RelaxedQ15MulRS) \
338
V(X64I16x8DotI8x16I7x16S) \
339
V(X64I8x16SConvertI16x8) \
340
V(X64I8x32SConvertI16x16) \
341
V(X64I8x16UConvertI16x8) \
342
V(X64I8x32UConvertI16x16) \
343
V(X64S128Const) \
344
V(X64S256Const) \
345
V(X64SZero) \
346
V(X64SAllOnes) \
347
V(X64SNot) \
348
V(X64SAnd) \
349
V(X64SOr) \
350
V(X64SXor) \
351
V(X64SSelect) \
352
V(X64SAndNot) \
353
V(X64I8x16Swizzle) \
354
V(X64I8x16Shuffle) \
355
V(X64Vpshufd) \
356
V(X64I8x16Popcnt) \
357
V(X64Shufps) \
358
V(X64S32x4Rotate) \
359
V(X64S32x4Swizzle) \
360
V(X64S32x4Shuffle) \
361
V(X64S16x8Blend) \
362
V(X64S16x8HalfShuffle1) \
363
V(X64S16x8HalfShuffle2) \
364
V(X64S8x16Alignr) \
365
V(X64S16x8Dup) \
366
V(X64S8x16Dup) \
367
V(X64S16x8UnzipHigh) \
368
V(X64S16x8UnzipLow) \
369
V(X64S8x16UnzipHigh) \
370
V(X64S8x16UnzipLow) \
371
V(X64S64x2UnpackHigh) \
372
V(X64S32x4UnpackHigh) \
373
V(X64S16x8UnpackHigh) \
374
V(X64S8x16UnpackHigh) \
375
V(X64S32x8UnpackHigh) \
376
V(X64S64x2UnpackLow) \
377
V(X64S32x4UnpackLow) \
378
V(X64S16x8UnpackLow) \
379
V(X64S8x16UnpackLow) \
380
V(X64S32x8UnpackLow) \
381
V(X64S8x16TransposeLow) \
382
V(X64S8x16TransposeHigh) \
383
V(X64S8x8Reverse) \
384
V(X64S8x4Reverse) \
385
V(X64S8x2Reverse) \
386
V(X64V128AnyTrue) \
387
V(X64IAllTrue) \
388
V(X64Blendvpd) \
389
V(X64Blendvps) \
390
V(X64Pblendvb) \
391
V(X64I64x4ExtMulI32x4S) \
392
V(X64I64x4ExtMulI32x4U) \
393
V(X64I32x8ExtMulI16x8S) \
394
V(X64I32x8ExtMulI16x8U) \
395
V(X64I16x16ExtMulI8x16S) \
396
V(X64I16x16ExtMulI8x16U) \
397
V(X64TraceInstruction) \
398
V(X64F32x8Pmin) \
399
V(X64F32x8Pmax) \
400
V(X64F64x4Pmin) \
401
V(X64F64x4Pmax) \
402
V(X64ExtractF128) \
403
V(X64F32x8Qfma) \
404
V(X64F32x8Qfms) \
405
V(X64F64x4Qfma) \
406
V(X64F64x4Qfms) \
407
V(X64InsertI128) \
408
V(X64I32x8DotI8x32I7x32AddS) \
409
V(X64I16x16DotI8x32I7x32S)
410
411
// Addressing modes represent the "shape" of inputs to an instruction.
412
// Many instructions support multiple addressing modes. Addressing modes
413
// are encoded into the InstructionCode of the instruction and tell the
414
// code generator after register allocation which assembler method to call.
415
//
416
// We use the following local notation for addressing modes:
417
//
418
// M = memory operand
419
// R = base register
420
// N = index register * N for N in {1, 2, 4, 8}
421
// I = immediate displacement (32-bit signed integer)
422
423
#define TARGET_ADDRESSING_MODE_LIST(V) \
424
V(MR)
/* [%r1 ] */
\
425
V(MRI)
/* [%r1 + K] */
\
426
V(MR1)
/* [%r1 + %r2*1 ] */
\
427
V(MR2)
/* [%r1 + %r2*2 ] */
\
428
V(MR4)
/* [%r1 + %r2*4 ] */
\
429
V(MR8)
/* [%r1 + %r2*8 ] */
\
430
V(MR1I)
/* [%r1 + %r2*1 + K] */
\
431
V(MR2I)
/* [%r1 + %r2*2 + K] */
\
432
V(MR4I)
/* [%r1 + %r2*4 + K] */
\
433
V(MR8I)
/* [%r1 + %r2*8 + K] */
\
434
V(M1)
/* [ %r2*1 ] */
\
435
V(M2)
/* [ %r2*2 ] */
\
436
V(M4)
/* [ %r2*4 ] */
\
437
V(M8)
/* [ %r2*8 ] */
\
438
V(M1I)
/* [ %r2*1 + K] */
\
439
V(M2I)
/* [ %r2*2 + K] */
\
440
V(M4I)
/* [ %r2*4 + K] */
\
441
V(M8I)
/* [ %r2*8 + K] */
\
442
V(Root)
/* [%root + K] */
\
443
V(MCR)
/* [%compressed_base + %r1] */
\
444
V(MCRI)
/* [%compressed_base + %r1 + K] */
445
446
}
// namespace compiler
447
}
// namespace internal
448
}
// namespace v8
449
450
#endif
// V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
v8::internal::internal
internal
Definition
wasm-objects-inl.h:458
v8
Definition
api-arguments-inl.h:19
src
compiler
backend
x64
instruction-codes-x64.h
Generated on Sun Apr 6 2025 21:08:51 for v8 by
1.12.0