v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
constants-ppc.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_PPC_CONSTANTS_PPC_H_
6#define V8_CODEGEN_PPC_CONSTANTS_PPC_H_
7
8#include <stdint.h>
9
10#include "src/base/logging.h"
11#include "src/base/macros.h"
13#include "src/common/globals.h"
14
15// UNIMPLEMENTED_ macro for PPC.
16#ifdef DEBUG
17#define UNIMPLEMENTED_PPC() \
18 v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
19 __FILE__, __LINE__, __func__)
20#else
21#define UNIMPLEMENTED_PPC()
22#endif
23
24#if V8_HOST_ARCH_PPC64 && \
25 (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
26 (!defined(_CALL_ELF) || _CALL_ELF == 1)))
27#define ABI_USES_FUNCTION_DESCRIPTORS 1
28#else
29#define ABI_USES_FUNCTION_DESCRIPTORS 0
30#endif
31
32#if !V8_HOST_ARCH_PPC64 || V8_OS_AIX || V8_TARGET_ARCH_PPC64
33#define ABI_PASSES_HANDLES_IN_REGS 1
34#else
35#define ABI_PASSES_HANDLES_IN_REGS 0
36#endif
37
38#if !V8_HOST_ARCH_PPC64 || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN || \
39 (defined(_CALL_ELF) && _CALL_ELF == 2)
40#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
41#else
42#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
43#endif
44
45#if !V8_HOST_ARCH_PPC64 || \
46 (V8_TARGET_ARCH_PPC64 && \
47 (V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)))
48#define ABI_CALL_VIA_IP 1
49#else
50#define ABI_CALL_VIA_IP 0
51#endif
52
53#if !V8_HOST_ARCH_PPC64 || V8_OS_AIX || V8_TARGET_ARCH_PPC64
54#define ABI_TOC_REGISTER 2
55#else
56#define ABI_TOC_REGISTER 13
57#endif
58namespace v8 {
59namespace internal {
60
61// TODO(sigurds): Change this value once we use relative jumps.
62constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
63
64// Used to encode a boolean value when emitting 32 bit
65// opcodes which will indicate the presence of function descriptors
69
70// Number of registers
71const int kNumRegisters = 32;
72
73// FP support.
74const int kNumDoubleRegisters = 32;
75
76const int kNoRegister = -1;
77
78// Used in embedded constant pool builder - max reach in bits for
79// various load instructions (one less due to unsigned)
80const int kLoadPtrMaxReachBits = 15;
82
83// The actual value of the kRootRegister is offset from the IsolateData's start
84// to take advantage of negative displacement values.
85constexpr int kRootRegisterBias = 128;
86
87// sign-extend the least significant 5-bits of value <imm>
88#define SIGN_EXT_IMM5(imm) ((static_cast<int>(imm) << 27) >> 27)
89
90// sign-extend the least significant 16-bits of value <imm>
91#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
92
93// sign-extend the least significant 14-bits of value <imm>
94#define SIGN_EXT_IMM18(imm) ((static_cast<int>(imm) << 14) >> 14)
95
96// sign-extend the least significant 22-bits of value <imm>
97#define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
98
99// sign-extend the least significant 26-bits of value <imm>
100#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
101
102// sign-extend the least significant 34-bits of prefix+suffix value <imm>
103#define SIGN_EXT_IMM34(imm) ((static_cast<int64_t>(imm) << 30) >> 30)
104
105// -----------------------------------------------------------------------------
106// Conditions.
107
108// Defines constants and accessor classes to assemble, disassemble and
109// simulate PPC instructions.
110//
111// Section references in the code refer to the "PowerPC Microprocessor
112// Family: The Programmer.s Reference Guide" from 10/95
113// https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
114//
115
116// Constants for specific fields are defined in their respective named enums.
117// General constants are in an anonymous enum in class Instr.
118enum Condition : int {
119 kNoCondition = -1,
120 eq = 0, // Equal.
121 ne = 1, // Not equal.
122 ge = 2, // Greater or equal.
123 lt = 3, // Less than.
124 gt = 4, // Greater than.
125 le = 5, // Less then or equal
126 unordered = 6, // Floating-point unordered
128 overflow = 8, // Summary overflow
130 al = 10, // Always.
131
132 // Unified cross-platform condition names/aliases.
133 // Do not set unsigned constants equal to their signed variants.
134 // We need to be able to differentiate between signed and unsigned enum
135 // constants in order to emit the right instructions (i.e CmpS64 vs CmpU64).
136 kEqual = eq,
137 kNotEqual = ne,
138 kLessThan = lt,
148 kZero = 15,
149 kNotZero = 16,
150};
151
153 switch (cond) {
155 return lt;
157 return gt;
159 return le;
161 return ge;
162 case kZero:
163 return eq;
164 case kNotZero:
165 return ne;
166 default:
167 break;
168 }
169 return cond;
170}
171
172inline bool is_signed(Condition cond) {
173 switch (cond) {
174 case kEqual:
175 case kNotEqual:
176 case kLessThan:
177 case kGreaterThan:
178 case kLessThanEqual:
180 case kOverflow:
181 case kNoOverflow:
182 case kZero:
183 case kNotZero:
184 return true;
185
190 return false;
191
192 default:
193 UNREACHABLE();
194 }
195}
196
198 DCHECK(cond != al);
199 return static_cast<Condition>(cond ^ ne);
200}
201
202// -----------------------------------------------------------------------------
203// Instructions encoding.
204
205// Instr is merely used by the Assembler to distinguish 32bit integers
206// representing instructions from usual 32 bit values.
207// Instruction objects are pointers to 32bit values, and provide methods to
208// access the various ISA fields.
209using Instr = uint32_t;
210
211#define PPC_XX3_OPCODE_SCALAR_LIST(V) \
212 /* VSX Scalar Add Double-Precision */ \
213 V(xsadddp, XSADDDP, 0xF0000100) \
214 /* VSX Scalar Add Single-Precision */ \
215 V(xsaddsp, XSADDSP, 0xF0000000) \
216 /* VSX Scalar Compare Ordered Double-Precision */ \
217 V(xscmpodp, XSCMPODP, 0xF0000158) \
218 /* VSX Scalar Compare Unordered Double-Precision */ \
219 V(xscmpudp, XSCMPUDP, 0xF0000118) \
220 /* VSX Scalar Copy Sign Double-Precision */ \
221 V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
222 /* VSX Scalar Divide Double-Precision */ \
223 V(xsdivdp, XSDIVDP, 0xF00001C0) \
224 /* VSX Scalar Divide Single-Precision */ \
225 V(xsdivsp, XSDIVSP, 0xF00000C0) \
226 /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
227 V(xsmaddadp, XSMADDADP, 0xF0000108) \
228 /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
229 V(xsmaddasp, XSMADDASP, 0xF0000008) \
230 /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
231 V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
232 /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
233 V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
234 /* VSX Scalar Maximum Double-Precision */ \
235 V(xsmaxdp, XSMAXDP, 0xF0000500) \
236 /* VSX Scalar Minimum Double-Precision */ \
237 V(xsmindp, XSMINDP, 0xF0000540) \
238 /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
239 V(xsmsubadp, XSMSUBADP, 0xF0000188) \
240 /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
241 V(xsmsubasp, XSMSUBASP, 0xF0000088) \
242 /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
243 V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
244 /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
245 V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
246 /* VSX Scalar Multiply Double-Precision */ \
247 V(xsmuldp, XSMULDP, 0xF0000180) \
248 /* VSX Scalar Multiply Single-Precision */ \
249 V(xsmulsp, XSMULSP, 0xF0000080) \
250 /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
251 V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
252 /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
253 V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
254 /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
255 V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
256 /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
257 V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
258 /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
259 V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
260 /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
261 V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
262 /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
263 V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
264 /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
265 V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
266 /* VSX Scalar Reciprocal Estimate Double-Precision */ \
267 V(xsredp, XSREDP, 0xF0000168) \
268 /* VSX Scalar Subtract Double-Precision */ \
269 V(xssubdp, XSSUBDP, 0xF0000140) \
270 /* VSX Scalar Subtract Single-Precision */ \
271 V(xssubsp, XSSUBSP, 0xF0000040) \
272 /* VSX Scalar Test for software Divide Double-Precision */ \
273 V(xstdivdp, XSTDIVDP, 0xF00001E8)
274
275#define PPC_XX3_OPCODE_VECTOR_A_FORM_LIST(V) \
276 /* VSX Vector Compare Equal To Single-Precision */ \
277 V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
278 /* VSX Vector Compare Equal To Double-Precision */ \
279 V(xvcmpeqdp, XVCMPEQDP, 0xF0000318)
280
281#define PPC_XX3_OPCODE_VECTOR_B_FORM_LIST(V) \
282 /* VSX Vector Add Double-Precision */ \
283 V(xvadddp, XVADDDP, 0xF0000300) \
284 /* VSX Vector Add Single-Precision */ \
285 V(xvaddsp, XVADDSP, 0xF0000200) \
286 /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
287 V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
288 /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
289 V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
290 /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
291 V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
292 /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
293 /* CR6 */ \
294 V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
295 /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
296 V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
297 /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
298 /* CR6 */ \
299 V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
300 /* VSX Vector Compare Greater Than Double-Precision */ \
301 V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
302 /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
303 V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
304 /* VSX Vector Compare Greater Than Single-Precision */ \
305 V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
306 /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
307 V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
308 /* VSX Vector Copy Sign Double-Precision */ \
309 V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
310 /* VSX Vector Copy Sign Single-Precision */ \
311 V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
312 /* VSX Vector Divide Double-Precision */ \
313 V(xvdivdp, XVDIVDP, 0xF00003C0) \
314 /* VSX Vector Divide Single-Precision */ \
315 V(xvdivsp, XVDIVSP, 0xF00002C0) \
316 /* VSX Vector Multiply-Add Type-A Double-Precision */ \
317 V(xvmaddadp, XVMADDADP, 0xF0000308) \
318 /* VSX Vector Multiply-Add Type-A Single-Precision */ \
319 V(xvmaddasp, XVMADDASP, 0xF0000208) \
320 /* VSX Vector Multiply-Add Type-M Double-Precision */ \
321 V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
322 /* VSX Vector Multiply-Add Type-M Single-Precision */ \
323 V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
324 /* VSX Vector Maximum Double-Precision */ \
325 V(xvmaxdp, XVMAXDP, 0xF0000700) \
326 /* VSX Vector Maximum Single-Precision */ \
327 V(xvmaxsp, XVMAXSP, 0xF0000600) \
328 /* VSX Vector Minimum Double-Precision */ \
329 V(xvmindp, XVMINDP, 0xF0000740) \
330 /* VSX Vector Minimum Single-Precision */ \
331 V(xvminsp, XVMINSP, 0xF0000640) \
332 /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
333 V(xvmsubadp, XVMSUBADP, 0xF0000388) \
334 /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
335 V(xvmsubasp, XVMSUBASP, 0xF0000288) \
336 /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
337 V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
338 /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
339 V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
340 /* VSX Vector Multiply Double-Precision */ \
341 V(xvmuldp, XVMULDP, 0xF0000380) \
342 /* VSX Vector Multiply Single-Precision */ \
343 V(xvmulsp, XVMULSP, 0xF0000280) \
344 /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
345 V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
346 /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
347 V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
348 /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
349 V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
350 /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
351 V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
352 /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
353 V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
354 /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
355 V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
356 /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
357 V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
358 /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
359 V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
360 /* VSX Vector Reciprocal Estimate Double-Precision */ \
361 V(xvredp, XVREDP, 0xF0000368) \
362 /* VSX Vector Subtract Double-Precision */ \
363 V(xvsubdp, XVSUBDP, 0xF0000340) \
364 /* VSX Vector Subtract Single-Precision */ \
365 V(xvsubsp, XVSUBSP, 0xF0000240) \
366 /* VSX Vector Test for software Divide Double-Precision */ \
367 V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
368 /* VSX Vector Test for software Divide Single-Precision */ \
369 V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
370 /* VSX Logical AND */ \
371 V(xxland, XXLAND, 0xF0000410) \
372 /* VSX Logical AND with Complement */ \
373 V(xxlandc, XXLANDC, 0xF0000450) \
374 /* VSX Logical Equivalence */ \
375 V(xxleqv, XXLEQV, 0xF00005D0) \
376 /* VSX Logical NAND */ \
377 V(xxlnand, XXLNAND, 0xF0000590) \
378 /* VSX Logical NOR */ \
379 V(xxlnor, XXLNOR, 0xF0000510) \
380 /* VSX Logical OR */ \
381 V(xxlor, XXLOR, 0xF0000490) \
382 /* VSX Logical OR with Complement */ \
383 V(xxlorc, XXLORC, 0xF0000550) \
384 /* VSX Logical XOR */ \
385 V(xxlxor, XXLXOR, 0xF00004D0) \
386 /* VSX Merge High Word */ \
387 V(xxmrghw, XXMRGHW, 0xF0000090) \
388 /* VSX Merge Low Word */ \
389 V(xxmrglw, XXMRGLW, 0xF0000190) \
390 /* VSX Permute Doubleword Immediate */ \
391 V(xxpermdi, XXPERMDI, 0xF0000050) \
392 /* VSX Shift Left Double by Word Immediate */ \
393 V(xxsldwi, XXSLDWI, 0xF0000010) \
394 /* VSX Splat Word */ \
395 V(xxspltw, XXSPLTW, 0xF0000290)
396
397#define PPC_XX3_OPCODE_VECTOR_LIST(V) \
398 PPC_XX3_OPCODE_VECTOR_A_FORM_LIST(V) \
399 PPC_XX3_OPCODE_VECTOR_B_FORM_LIST(V)
400
401#define PPC_Z23_OPCODE_LIST(V) \
402 /* Decimal Quantize */ \
403 V(dqua, DQUA, 0xEC000006) \
404 /* Decimal Quantize Immediate */ \
405 V(dquai, DQUAI, 0xEC000086) \
406 /* Decimal Quantize Immediate Quad */ \
407 V(dquaiq, DQUAIQ, 0xFC000086) \
408 /* Decimal Quantize Quad */ \
409 V(dquaq, DQUAQ, 0xFC000006) \
410 /* Decimal Floating Round To FP Integer Without Inexact */ \
411 V(drintn, DRINTN, 0xEC0001C6) \
412 /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
413 V(drintnq, DRINTNQ, 0xFC0001C6) \
414 /* Decimal Floating Round To FP Integer With Inexact */ \
415 V(drintx, DRINTX, 0xEC0000C6) \
416 /* Decimal Floating Round To FP Integer With Inexact Quad */ \
417 V(drintxq, DRINTXQ, 0xFC0000C6) \
418 /* Decimal Floating Reround */ \
419 V(drrnd, DRRND, 0xEC000046) \
420 /* Decimal Floating Reround Quad */ \
421 V(drrndq, DRRNDQ, 0xFC000046)
422
423#define PPC_Z22_OPCODE_LIST(V) \
424 /* Decimal Floating Shift Coefficient Left Immediate */ \
425 V(dscli, DSCLI, 0xEC000084) \
426 /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
427 V(dscliq, DSCLIQ, 0xFC000084) \
428 /* Decimal Floating Shift Coefficient Right Immediate */ \
429 V(dscri, DSCRI, 0xEC0000C4) \
430 /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
431 V(dscriq, DSCRIQ, 0xFC0000C4) \
432 /* Decimal Floating Test Data Class */ \
433 V(dtstdc, DTSTDC, 0xEC000184) \
434 /* Decimal Floating Test Data Class Quad */ \
435 V(dtstdcq, DTSTDCQ, 0xFC000184) \
436 /* Decimal Floating Test Data Group */ \
437 V(dtstdg, DTSTDG, 0xEC0001C4) \
438 /* Decimal Floating Test Data Group Quad */ \
439 V(dtstdgq, DTSTDGQ, 0xFC0001C4)
440
441#define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
442 /* VSX Vector Absolute Value Double-Precision */ \
443 V(xvabsdp, XVABSDP, 0xF0000764) \
444 /* VSX Vector Negate Double-Precision */ \
445 V(xvnegdp, XVNEGDP, 0xF00007E4) \
446 /* VSX Vector Square Root Double-Precision */ \
447 V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
448 /* VSX Vector Absolute Value Single-Precision */ \
449 V(xvabssp, XVABSSP, 0xF0000664) \
450 /* VSX Vector Negate Single-Precision */ \
451 V(xvnegsp, XVNEGSP, 0xF00006E4) \
452 /* VSX Vector Reciprocal Estimate Single-Precision */ \
453 V(xvresp, XVRESP, 0xF0000268) \
454 /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
455 V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
456 /* VSX Vector Square Root Single-Precision */ \
457 V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
458 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
459 /* Saturate */ \
460 V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
461 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
462 /* Saturate */ \
463 V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
464 /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
465 V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
466 /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
467 V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
468 /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
469 V(xvrdpip, XVRDPIP, 0xF00003A4) \
470 /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
471 V(xvrdpim, XVRDPIM, 0xF00003E4) \
472 /* VSX Vector Round to Double-Precision Integer toward Zero */ \
473 V(xvrdpiz, XVRDPIZ, 0xF0000364) \
474 /* VSX Vector Round to Double-Precision Integer */ \
475 V(xvrdpi, XVRDPI, 0xF0000324) \
476 /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
477 V(xvrspip, XVRSPIP, 0xF00002A4) \
478 /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
479 V(xvrspim, XVRSPIM, 0xF00002E4) \
480 /* VSX Vector Round to Single-Precision Integer toward Zero */ \
481 V(xvrspiz, XVRSPIZ, 0xF0000264) \
482 /* VSX Vector Round to Single-Precision Integer */ \
483 V(xvrspi, XVRSPI, 0xF0000224) \
484 /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
485 V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
486 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
487 /* Precision */ \
488 V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
489 /* VSX Vector Convert Single-Precision to Double-Precision */ \
490 V(xvcvspdp, XVCVSPDP, 0xF0000724) \
491 /* VSX Vector Convert Double-Precision to Single-Precision */ \
492 V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
493 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
494 /* Saturate */ \
495 V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
496 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
497 /* Saturate */ \
498 V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
499
500#define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
501 /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
502 /* signalling */ \
503 V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
504 /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
505 /* signalling */ \
506 V(xscvspdpn, XSCVSPDPN, 0xF000052C)
507
508#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
509 /* Vector Byte-Reverse Quadword */ \
510 V(xxbrq, XXBRQ, 0xF01F076C) \
511 /* Vector Byte-Reverse Doubleword */ \
512 V(xxbrd, XXBRD, 0xF017076C) \
513 /* Vector Byte-Reverse Word */ \
514 V(xxbrw, XXBRW, 0xF00F076C) \
515 /* Vector Byte-Reverse Halfword */ \
516 V(xxbrh, XXBRH, 0xF007076C)
517
518#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
519 /* VSX Scalar Square Root Double-Precision */ \
520 V(xssqrtdp, XSSQRTDP, 0xF000012C) \
521 /* VSX Scalar Reciprocal Estimate Single-Precision */ \
522 V(xsresp, XSRESP, 0xF0000068) \
523 /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
524 V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
525 /* VSX Scalar Square Root Single-Precision */ \
526 V(xssqrtsp, XSSQRTSP, 0xF000002C) \
527 /* VSX Scalar Absolute Value Double-Precision */ \
528 V(xsabsdp, XSABSDP, 0xF0000564) \
529 /* VSX Scalar Convert Double-Precision to Single-Precision */ \
530 V(xscvdpsp, XSCVDPSP, 0xF0000424) \
531 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
532 /* Saturate */ \
533 V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
534 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
535 /* Saturate */ \
536 V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
537 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
538 /* Doubleword Saturate */ \
539 V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
540 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
541 /* Saturate */ \
542 V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
543 /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
544 V(xscvspdp, XSCVSPDP, 0xF0000524) \
545 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
546 V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
547 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
548 V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
549 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
550 /* Precision */ \
551 V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
552 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
553 /* Precision */ \
554 V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
555 /* VSX Scalar Negative Absolute Value Double-Precision */ \
556 V(xsnabsdp, XSNABSDP, 0xF00005A4) \
557 /* VSX Scalar Negate Double-Precision */ \
558 V(xsnegdp, XSNEGDP, 0xF00005E4) \
559 /* VSX Scalar Round to Double-Precision Integer */ \
560 V(xsrdpi, XSRDPI, 0xF0000124) \
561 /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
562 /* mode */ \
563 V(xsrdpic, XSRDPIC, 0xF00001AC) \
564 /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
565 V(xsrdpim, XSRDPIM, 0xF00001E4) \
566 /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
567 V(xsrdpip, XSRDPIP, 0xF00001A4) \
568 /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
569 V(xsrdpiz, XSRDPIZ, 0xF0000164) \
570 /* VSX Scalar Round to Single-Precision */ \
571 V(xsrsp, XSRSP, 0xF0000464) \
572 /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
573 V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
574 /* VSX Scalar Test for software Square Root Double-Precision */ \
575 V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
576 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
577 /* Saturate */ \
578 V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
579 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
580 /* Doubleword Saturate */ \
581 V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
582 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
583 /* Saturate */ \
584 V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
585 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
586 /* Doubleword Saturate */ \
587 V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
588 /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
589 V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
590 /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
591 V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
592 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
593 /* Precision */ \
594 V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
595 /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
596 V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
597 /* VSX Vector Negative Absolute Value Double-Precision */ \
598 V(xvnabsdp, XVNABSDP, 0xF00007A4) \
599 /* VSX Vector Negative Absolute Value Single-Precision */ \
600 V(xvnabssp, XVNABSSP, 0xF00006A4) \
601 /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
602 /* mode */ \
603 V(xvrdpic, XVRDPIC, 0xF00003AC) \
604 /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
605 /* mode */ \
606 V(xvrspic, XVRSPIC, 0xF00002AC) \
607 /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
608 V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
609 /* VSX Vector Test for software Square Root Double-Precision */ \
610 V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
611 /* VSX Vector Test for software Square Root Single-Precision */ \
612 V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) \
613 /* Vector Splat Immediate Byte */ \
614 V(xxspltib, XXSPLTIB, 0xF00002D0)
615
616#define PPC_XX2_OPCODE_LIST(V) \
617 PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
618 PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
619 PPC_XX2_OPCODE_B_FORM_LIST(V) \
620 PPC_XX2_OPCODE_UNUSED_LIST(V)
621
622#define PPC_EVX_OPCODE_LIST(V) \
623 /* Vector Load Double Word into Double Word by External PID Indexed */ \
624 V(evlddepx, EVLDDEPX, 0x7C00063E) \
625 /* Vector Store Double of Double by External PID Indexed */ \
626 V(evstddepx, EVSTDDEPX, 0x7C00073E) \
627 /* Bit Reversed Increment */ \
628 V(brinc, BRINC, 0x1000020F) \
629 /* Vector Absolute Value */ \
630 V(evabs, EVABS, 0x10000208) \
631 /* Vector Add Immediate Word */ \
632 V(evaddiw, EVADDIW, 0x10000202) \
633 /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
634 V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
635 /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
636 V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
637 /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
638 V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
639 /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
640 V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
641 /* Vector Add Word */ \
642 V(evaddw, EVADDW, 0x10000200) \
643 /* Vector AND */ \
644 V(evand, EVAND, 0x10000211) \
645 /* Vector AND with Complement */ \
646 V(evandc, EVANDC, 0x10000212) \
647 /* Vector Compare Equal */ \
648 V(evcmpeq, EVCMPEQ, 0x10000234) \
649 /* Vector Compare Greater Than Signed */ \
650 V(evcmpgts, EVCMPGTS, 0x10000231) \
651 /* Vector Compare Greater Than Unsigned */ \
652 V(evcmpgtu, EVCMPGTU, 0x10000230) \
653 /* Vector Compare Less Than Signed */ \
654 V(evcmplts, EVCMPLTS, 0x10000233) \
655 /* Vector Compare Less Than Unsigned */ \
656 V(evcmpltu, EVCMPLTU, 0x10000232) \
657 /* Vector Count Leading Signed Bits Word */ \
658 V(evcntlsw, EVCNTLSW, 0x1000020E) \
659 /* Vector Count Leading Zeros Word */ \
660 V(evcntlzw, EVCNTLZW, 0x1000020D) \
661 /* Vector Divide Word Signed */ \
662 V(evdivws, EVDIVWS, 0x100004C6) \
663 /* Vector Divide Word Unsigned */ \
664 V(evdivwu, EVDIVWU, 0x100004C7) \
665 /* Vector Equivalent */ \
666 V(eveqv, EVEQV, 0x10000219) \
667 /* Vector Extend Sign Byte */ \
668 V(evextsb, EVEXTSB, 0x1000020A) \
669 /* Vector Extend Sign Half Word */ \
670 V(evextsh, EVEXTSH, 0x1000020B) \
671 /* Vector Load Double Word into Double Word */ \
672 V(evldd, EVLDD, 0x10000301) \
673 /* Vector Load Double Word into Double Word Indexed */ \
674 V(evlddx, EVLDDX, 0x10000300) \
675 /* Vector Load Double into Four Half Words */ \
676 V(evldh, EVLDH, 0x10000305) \
677 /* Vector Load Double into Four Half Words Indexed */ \
678 V(evldhx, EVLDHX, 0x10000304) \
679 /* Vector Load Double into Two Words */ \
680 V(evldw, EVLDW, 0x10000303) \
681 /* Vector Load Double into Two Words Indexed */ \
682 V(evldwx, EVLDWX, 0x10000302) \
683 /* Vector Load Half Word into Half Words Even and Splat */ \
684 V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
685 /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
686 V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
687 /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
688 V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
689 /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
690 V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
691 /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
692 V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
693 /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
694 V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
695 /* Vector Load Word into Two Half Words Even */ \
696 V(evlwhe, EVLWHE, 0x10000311) \
697 /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
698 V(evlwhos, EVLWHOS, 0x10000317) \
699 /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
700 /* extension) */ \
701 V(evlwhosx, EVLWHOSX, 0x10000316) \
702 /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
703 V(evlwhou, EVLWHOU, 0x10000315) \
704 /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
705 /* extended) */ \
706 V(evlwhoux, EVLWHOUX, 0x10000314) \
707 /* Vector Load Word into Two Half Words and Splat */ \
708 V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
709 /* Vector Load Word into Two Half Words and Splat Indexed */ \
710 V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
711 /* Vector Load Word into Word and Splat */ \
712 V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
713 /* Vector Load Word into Word and Splat Indexed */ \
714 V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
715 /* Vector Merge High */ \
716 V(evmergehi, EVMERGEHI, 0x1000022C) \
717 /* Vector Merge High/Low */ \
718 V(evmergehilo, EVMERGEHILO, 0x1000022E) \
719 /* Vector Merge Low */ \
720 V(evmergelo, EVMERGELO, 0x1000022D) \
721 /* Vector Merge Low/High */ \
722 V(evmergelohi, EVMERGELOHI, 0x1000022F) \
723 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
724 /* and Accumulate */ \
725 V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
726 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
727 /* and Accumulate Negative */ \
728 V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
729 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
730 /* and Accumulate */ \
731 V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
732 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
733 /* and Accumulate Negative */ \
734 V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
735 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
736 /* and Accumulate */ \
737 V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
738 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
739 /* and Accumulate Negative */ \
740 V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
741 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
742 V(evmhesmf, EVMHESMF, 0x1000040B) \
743 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
744 /* Accumulator */ \
745 V(evmhesmfa, EVMHESMFA, 0x1000042B) \
746 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
747 /* Accumulate into Words */ \
748 V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
749 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
750 /* Accumulate Negative into Words */ \
751 V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
752 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
753 V(evmhesmi, EVMHESMI, 0x10000409) \
754 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
755 /* Accumulator */ \
756 V(evmhesmia, EVMHESMIA, 0x10000429) \
757 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
758 /* Accumulate into Words */ \
759 V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
760 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
761 /* Accumulate Negative into Words */ \
762 V(evmhesmianw, EVMHESMIANW, 0x10000589) \
763 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
764 V(evmhessf, EVMHESSF, 0x10000403) \
765 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
766 /* Accumulator */ \
767 V(evmhessfa, EVMHESSFA, 0x10000423) \
768 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
769 /* Accumulate into Words */ \
770 V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
771 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
772 /* Accumulate Negative into Words */ \
773 V(evmhessfanw, EVMHESSFANW, 0x10000583) \
774 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
775 /* Accumulate into Words */ \
776 V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
777 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
778 /* Accumulate Negative into Words */ \
779 V(evmhessianw, EVMHESSIANW, 0x10000581) \
780 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
781 V(evmheumi, EVMHEUMI, 0x10000408) \
782 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
783 /* Accumulator */ \
784 V(evmheumia, EVMHEUMIA, 0x10000428) \
785 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
786 /* Accumulate into Words */ \
787 V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
788 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
789 /* Accumulate Negative into Words */ \
790 V(evmheumianw, EVMHEUMIANW, 0x10000588) \
791 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
792 /* Accumulate into Words */ \
793 V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
794 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
795 /* Accumulate Negative into Words */ \
796 V(evmheusianw, EVMHEUSIANW, 0x10000580) \
797 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
798 /* and Accumulate */ \
799 V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
800 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
801 /* and Accumulate Negative */ \
802 V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
803 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
804 /* and Accumulate */ \
805 V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
806 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
807 /* Accumulate Negative */ \
808 V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
809 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
810 /* and Accumulate */ \
811 V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
812 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
813 /* and Accumulate Negative */ \
814 V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
815 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
816 V(evmhosmf, EVMHOSMF, 0x1000040F) \
817 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
818 /* Accumulator */ \
819 V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
820 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
821 /* Accumulate into Words */ \
822 V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
823 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
824 /* Accumulate Negative into Words */ \
825 V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
826 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
827 V(evmhosmi, EVMHOSMI, 0x1000040D) \
828 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
829 /* Accumulator */ \
830 V(evmhosmia, EVMHOSMIA, 0x1000042D) \
831 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
832 /* Accumulate into Words */ \
833 V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
834 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
835 /* Accumulate Negative into Words */ \
836 V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
837 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
838 V(evmhossf, EVMHOSSF, 0x10000407) \
839 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
840 /* Accumulator */ \
841 V(evmhossfa, EVMHOSSFA, 0x10000427) \
842 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
843 /* Accumulate into Words */ \
844 V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
845 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
846 /* Accumulate Negative into Words */ \
847 V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
848 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
849 /* Accumulate into Words */ \
850 V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
851 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
852 /* Accumulate Negative into Words */ \
853 V(evmhossianw, EVMHOSSIANW, 0x10000585) \
854 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
855 V(evmhoumi, EVMHOUMI, 0x1000040C) \
856 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
857 /* Accumulator */ \
858 V(evmhoumia, EVMHOUMIA, 0x1000042C) \
859 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
860 /* Accumulate into Words */ \
861 V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
862 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
863 /* Accumulate Negative into Words */ \
864 V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
865 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
866 /* Accumulate into Words */ \
867 V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
868 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
869 /* Accumulate Negative into Words */ \
870 V(evmhousianw, EVMHOUSIANW, 0x10000584) \
871 /* Initialize Accumulator */ \
872 V(evmra, EVMRA, 0x100004C4) \
873 /* Vector Multiply Word High Signed, Modulo, Fractional */ \
874 V(evmwhsmf, EVMWHSMF, 0x1000044F) \
875 /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
876 V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
877 /* Vector Multiply Word High Signed, Modulo, Integer */ \
878 V(evmwhsmi, EVMWHSMI, 0x1000044D) \
879 /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
880 V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
881 /* Vector Multiply Word High Signed, Saturate, Fractional */ \
882 V(evmwhssf, EVMWHSSF, 0x10000447) \
883 /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
884 V(evmwhssfa, EVMWHSSFA, 0x10000467) \
885 /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
886 V(evmwhumi, EVMWHUMI, 0x1000044C) \
887 /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
888 V(evmwhumia, EVMWHUMIA, 0x1000046C) \
889 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
890 /* Words */ \
891 V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
892 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
893 /* Negative in Words */ \
894 V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
895 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
896 /* Words */ \
897 V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
898 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
899 /* Negative in Words */ \
900 V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
901 /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
902 V(evmwlumi, EVMWLUMI, 0x10000448) \
903 /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
904 V(evmwlumia, EVMWLUMIA, 0x10000468) \
905 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
906 /* Words */ \
907 V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
908 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
909 /* Negative in Words */ \
910 V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
911 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
912 /* in Words */ \
913 V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
914 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
915 /* Negative in Words */ \
916 V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
917 /* Vector Multiply Word Signed, Modulo, Fractional */ \
918 V(evmwsmf, EVMWSMF, 0x1000045B) \
919 /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
920 V(evmwsmfa, EVMWSMFA, 0x1000047B) \
921 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
922 V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
923 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
924 /* Negative */ \
925 V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
926 /* Vector Multiply Word Signed, Modulo, Integer */ \
927 V(evmwsmi, EVMWSMI, 0x10000459) \
928 /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
929 V(evmwsmia, EVMWSMIA, 0x10000479) \
930 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
931 V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
932 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
933 V(evmwsmian, EVMWSMIAN, 0x100005D9) \
934 /* Vector Multiply Word Signed, Saturate, Fractional */ \
935 V(evmwssf, EVMWSSF, 0x10000453) \
936 /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
937 V(evmwssfa, EVMWSSFA, 0x10000473) \
938 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
939 V(evmwssfaa, EVMWSSFAA, 0x10000553) \
940 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
941 /* Negative */ \
942 V(evmwssfan, EVMWSSFAN, 0x100005D3) \
943 /* Vector Multiply Word Unsigned, Modulo, Integer */ \
944 V(evmwumi, EVMWUMI, 0x10000458) \
945 /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
946 V(evmwumia, EVMWUMIA, 0x10000478) \
947 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
948 V(evmwumiaa, EVMWUMIAA, 0x10000558) \
949 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
950 /* Negative */ \
951 V(evmwumian, EVMWUMIAN, 0x100005D8) \
952 /* Vector NAND */ \
953 V(evnand, EVNAND, 0x1000021E) \
954 /* Vector Negate */ \
955 V(evneg, EVNEG, 0x10000209) \
956 /* Vector NOR */ \
957 V(evnor, EVNOR, 0x10000218) \
958 /* Vector OR */ \
959 V(evor, EVOR, 0x10000217) \
960 /* Vector OR with Complement */ \
961 V(evorc, EVORC, 0x1000021B) \
962 /* Vector Rotate Left Word */ \
963 V(evrlw, EVRLW, 0x10000228) \
964 /* Vector Rotate Left Word Immediate */ \
965 V(evrlwi, EVRLWI, 0x1000022A) \
966 /* Vector Round Word */ \
967 V(evrndw, EVRNDW, 0x1000020C) \
968 /* Vector Shift Left Word */ \
969 V(evslw, EVSLW, 0x10000224) \
970 /* Vector Shift Left Word Immediate */ \
971 V(evslwi, EVSLWI, 0x10000226) \
972 /* Vector Splat Fractional Immediate */ \
973 V(evsplatfi, EVSPLATFI, 0x1000022B) \
974 /* Vector Splat Immediate */ \
975 V(evsplati, EVSPLATI, 0x10000229) \
976 /* Vector Shift Right Word Immediate Signed */ \
977 V(evsrwis, EVSRWIS, 0x10000223) \
978 /* Vector Shift Right Word Immediate Unsigned */ \
979 V(evsrwiu, EVSRWIU, 0x10000222) \
980 /* Vector Shift Right Word Signed */ \
981 V(evsrws, EVSRWS, 0x10000221) \
982 /* Vector Shift Right Word Unsigned */ \
983 V(evsrwu, EVSRWU, 0x10000220) \
984 /* Vector Store Double of Double */ \
985 V(evstdd, EVSTDD, 0x10000321) \
986 /* Vector Store Double of Double Indexed */ \
987 V(evstddx, EVSTDDX, 0x10000320) \
988 /* Vector Store Double of Four Half Words */ \
989 V(evstdh, EVSTDH, 0x10000325) \
990 /* Vector Store Double of Four Half Words Indexed */ \
991 V(evstdhx, EVSTDHX, 0x10000324) \
992 /* Vector Store Double of Two Words */ \
993 V(evstdw, EVSTDW, 0x10000323) \
994 /* Vector Store Double of Two Words Indexed */ \
995 V(evstdwx, EVSTDWX, 0x10000322) \
996 /* Vector Store Word of Two Half Words from Even */ \
997 V(evstwhe, EVSTWHE, 0x10000331) \
998 /* Vector Store Word of Two Half Words from Even Indexed */ \
999 V(evstwhex, EVSTWHEX, 0x10000330) \
1000 /* Vector Store Word of Two Half Words from Odd */ \
1001 V(evstwho, EVSTWHO, 0x10000335) \
1002 /* Vector Store Word of Two Half Words from Odd Indexed */ \
1003 V(evstwhox, EVSTWHOX, 0x10000334) \
1004 /* Vector Store Word of Word from Even */ \
1005 V(evstwwe, EVSTWWE, 0x10000339) \
1006 /* Vector Store Word of Word from Even Indexed */ \
1007 V(evstwwex, EVSTWWEX, 0x10000338) \
1008 /* Vector Store Word of Word from Odd */ \
1009 V(evstwwo, EVSTWWO, 0x1000033D) \
1010 /* Vector Store Word of Word from Odd Indexed */ \
1011 V(evstwwox, EVSTWWOX, 0x1000033C) \
1012 /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
1013 V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
1014 /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
1015 V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
1016 /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
1017 V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
1018 /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
1019 V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
1020 /* Vector Subtract from Word */ \
1021 V(evsubfw, EVSUBFW, 0x10000204) \
1022 /* Vector Subtract Immediate from Word */ \
1023 V(evsubifw, EVSUBIFW, 0x10000206) \
1024 /* Vector XOR */ \
1025 V(evxor, EVXOR, 0x10000216) \
1026 /* Floating-Point Double-Precision Absolute Value */ \
1027 V(efdabs, EFDABS, 0x100002E4) \
1028 /* Floating-Point Double-Precision Add */ \
1029 V(efdadd, EFDADD, 0x100002E0) \
1030 /* Floating-Point Double-Precision Convert from Single-Precision */ \
1031 V(efdcfs, EFDCFS, 0x100002EF) \
1032 /* Convert Floating-Point Double-Precision from Signed Fraction */ \
1033 V(efdcfsf, EFDCFSF, 0x100002F3) \
1034 /* Convert Floating-Point Double-Precision from Signed Integer */ \
1035 V(efdcfsi, EFDCFSI, 0x100002F1) \
1036 /* Convert Floating-Point Double-Precision from Signed Integer */ \
1037 /* Doubleword */ \
1038 V(efdcfsid, EFDCFSID, 0x100002E3) \
1039 /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
1040 V(efdcfuf, EFDCFUF, 0x100002F2) \
1041 /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
1042 V(efdcfui, EFDCFUI, 0x100002F0) \
1043 /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
1044 /* Doubleword */ \
1045 V(efdcfuid, EFDCFUID, 0x100002E2) \
1046 /* Floating-Point Double-Precision Compare Equal */ \
1047 V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
1048 /* Floating-Point Double-Precision Compare Greater Than */ \
1049 V(efdcmpgt, EFDCMPGT, 0x100002EC) \
1050 /* Floating-Point Double-Precision Compare Less Than */ \
1051 V(efdcmplt, EFDCMPLT, 0x100002ED) \
1052 /* Convert Floating-Point Double-Precision to Signed Fraction */ \
1053 V(efdctsf, EFDCTSF, 0x100002F7) \
1054 /* Convert Floating-Point Double-Precision to Signed Integer */ \
1055 V(efdctsi, EFDCTSI, 0x100002F5) \
1056 /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
1057 /* with Round toward Zero */ \
1058 V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
1059 /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
1060 /* toward Zero */ \
1061 V(efdctsiz, EFDCTSIZ, 0x100002FA) \
1062 /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
1063 V(efdctuf, EFDCTUF, 0x100002F6) \
1064 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
1065 V(efdctui, EFDCTUI, 0x100002F4) \
1066 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
1067 /* Doubleword with Round toward Zero */ \
1068 V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
1069 /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
1070 /* Round toward Zero */ \
1071 V(efdctuiz, EFDCTUIZ, 0x100002F8) \
1072 /* Floating-Point Double-Precision Divide */ \
1073 V(efddiv, EFDDIV, 0x100002E9) \
1074 /* Floating-Point Double-Precision Multiply */ \
1075 V(efdmul, EFDMUL, 0x100002E8) \
1076 /* Floating-Point Double-Precision Negative Absolute Value */ \
1077 V(efdnabs, EFDNABS, 0x100002E5) \
1078 /* Floating-Point Double-Precision Negate */ \
1079 V(efdneg, EFDNEG, 0x100002E6) \
1080 /* Floating-Point Double-Precision Subtract */ \
1081 V(efdsub, EFDSUB, 0x100002E1) \
1082 /* Floating-Point Double-Precision Test Equal */ \
1083 V(efdtsteq, EFDTSTEQ, 0x100002FE) \
1084 /* Floating-Point Double-Precision Test Greater Than */ \
1085 V(efdtstgt, EFDTSTGT, 0x100002FC) \
1086 /* Floating-Point Double-Precision Test Less Than */ \
1087 V(efdtstlt, EFDTSTLT, 0x100002FD) \
1088 /* Floating-Point Single-Precision Convert from Double-Precision */ \
1089 V(efscfd, EFSCFD, 0x100002CF) \
1090 /* Floating-Point Absolute Value */ \
1091 V(efsabs, EFSABS, 0x100002C4) \
1092 /* Floating-Point Add */ \
1093 V(efsadd, EFSADD, 0x100002C0) \
1094 /* Convert Floating-Point from Signed Fraction */ \
1095 V(efscfsf, EFSCFSF, 0x100002D3) \
1096 /* Convert Floating-Point from Signed Integer */ \
1097 V(efscfsi, EFSCFSI, 0x100002D1) \
1098 /* Convert Floating-Point from Unsigned Fraction */ \
1099 V(efscfuf, EFSCFUF, 0x100002D2) \
1100 /* Convert Floating-Point from Unsigned Integer */ \
1101 V(efscfui, EFSCFUI, 0x100002D0) \
1102 /* Floating-Point Compare Equal */ \
1103 V(efscmpeq, EFSCMPEQ, 0x100002CE) \
1104 /* Floating-Point Compare Greater Than */ \
1105 V(efscmpgt, EFSCMPGT, 0x100002CC) \
1106 /* Floating-Point Compare Less Than */ \
1107 V(efscmplt, EFSCMPLT, 0x100002CD) \
1108 /* Convert Floating-Point to Signed Fraction */ \
1109 V(efsctsf, EFSCTSF, 0x100002D7) \
1110 /* Convert Floating-Point to Signed Integer */ \
1111 V(efsctsi, EFSCTSI, 0x100002D5) \
1112 /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
1113 V(efsctsiz, EFSCTSIZ, 0x100002DA) \
1114 /* Convert Floating-Point to Unsigned Fraction */ \
1115 V(efsctuf, EFSCTUF, 0x100002D6) \
1116 /* Convert Floating-Point to Unsigned Integer */ \
1117 V(efsctui, EFSCTUI, 0x100002D4) \
1118 /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
1119 V(efsctuiz, EFSCTUIZ, 0x100002D8) \
1120 /* Floating-Point Divide */ \
1121 V(efsdiv, EFSDIV, 0x100002C9) \
1122 /* Floating-Point Multiply */ \
1123 V(efsmul, EFSMUL, 0x100002C8) \
1124 /* Floating-Point Negative Absolute Value */ \
1125 V(efsnabs, EFSNABS, 0x100002C5) \
1126 /* Floating-Point Negate */ \
1127 V(efsneg, EFSNEG, 0x100002C6) \
1128 /* Floating-Point Subtract */ \
1129 V(efssub, EFSSUB, 0x100002C1) \
1130 /* Floating-Point Test Equal */ \
1131 V(efststeq, EFSTSTEQ, 0x100002DE) \
1132 /* Floating-Point Test Greater Than */ \
1133 V(efststgt, EFSTSTGT, 0x100002DC) \
1134 /* Floating-Point Test Less Than */ \
1135 V(efststlt, EFSTSTLT, 0x100002DD) \
1136 /* Vector Floating-Point Absolute Value */ \
1137 V(evfsabs, EVFSABS, 0x10000284) \
1138 /* Vector Floating-Point Add */ \
1139 V(evfsadd, EVFSADD, 0x10000280) \
1140 /* Vector Convert Floating-Point from Signed Fraction */ \
1141 V(evfscfsf, EVFSCFSF, 0x10000293) \
1142 /* Vector Convert Floating-Point from Signed Integer */ \
1143 V(evfscfsi, EVFSCFSI, 0x10000291) \
1144 /* Vector Convert Floating-Point from Unsigned Fraction */ \
1145 V(evfscfuf, EVFSCFUF, 0x10000292) \
1146 /* Vector Convert Floating-Point from Unsigned Integer */ \
1147 V(evfscfui, EVFSCFUI, 0x10000290) \
1148 /* Vector Floating-Point Compare Equal */ \
1149 V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
1150 /* Vector Floating-Point Compare Greater Than */ \
1151 V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
1152 /* Vector Floating-Point Compare Less Than */ \
1153 V(evfscmplt, EVFSCMPLT, 0x1000028D) \
1154 /* Vector Convert Floating-Point to Signed Fraction */ \
1155 V(evfsctsf, EVFSCTSF, 0x10000297) \
1156 /* Vector Convert Floating-Point to Signed Integer */ \
1157 V(evfsctsi, EVFSCTSI, 0x10000295) \
1158 /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
1159 /* Zero */ \
1160 V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
1161 /* Vector Convert Floating-Point to Unsigned Fraction */ \
1162 V(evfsctuf, EVFSCTUF, 0x10000296) \
1163 /* Vector Convert Floating-Point to Unsigned Integer */ \
1164 V(evfsctui, EVFSCTUI, 0x10000294) \
1165 /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
1166 /* Zero */ \
1167 V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
1168 /* Vector Floating-Point Divide */ \
1169 V(evfsdiv, EVFSDIV, 0x10000289) \
1170 /* Vector Floating-Point Multiply */ \
1171 V(evfsmul, EVFSMUL, 0x10000288) \
1172 /* Vector Floating-Point Negative Absolute Value */ \
1173 V(evfsnabs, EVFSNABS, 0x10000285) \
1174 /* Vector Floating-Point Negate */ \
1175 V(evfsneg, EVFSNEG, 0x10000286) \
1176 /* Vector Floating-Point Subtract */ \
1177 V(evfssub, EVFSSUB, 0x10000281) \
1178 /* Vector Floating-Point Test Equal */ \
1179 V(evfststeq, EVFSTSTEQ, 0x1000029E) \
1180 /* Vector Floating-Point Test Greater Than */ \
1181 V(evfststgt, EVFSTSTGT, 0x1000029C) \
1182 /* Vector Floating-Point Test Less Than */ \
1183 V(evfststlt, EVFSTSTLT, 0x1000029D)
1184
1185#define PPC_VC_OPCODE_LIST(V) \
1186 /* Vector Compare Bounds Single-Precision */ \
1187 V(vcmpbfp, VCMPBFP, 0x100003C6) \
1188 /* Vector Compare Equal To Single-Precision */ \
1189 V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
1190 /* Vector Compare Equal To Unsigned Byte */ \
1191 V(vcmpequb, VCMPEQUB, 0x10000006) \
1192 /* Vector Compare Equal To Unsigned Doubleword */ \
1193 V(vcmpequd, VCMPEQUD, 0x100000C7) \
1194 /* Vector Compare Equal To Unsigned Halfword */ \
1195 V(vcmpequh, VCMPEQUH, 0x10000046) \
1196 /* Vector Compare Equal To Unsigned Word */ \
1197 V(vcmpequw, VCMPEQUW, 0x10000086) \
1198 /* Vector Compare Greater Than or Equal To Single-Precision */ \
1199 V(vcmpgefp, VCMPGEFP, 0x100001C6) \
1200 /* Vector Compare Greater Than Single-Precision */ \
1201 V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
1202 /* Vector Compare Greater Than Signed Byte */ \
1203 V(vcmpgtsb, VCMPGTSB, 0x10000306) \
1204 /* Vector Compare Greater Than Signed Doubleword */ \
1205 V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
1206 /* Vector Compare Greater Than Signed Halfword */ \
1207 V(vcmpgtsh, VCMPGTSH, 0x10000346) \
1208 /* Vector Compare Greater Than Signed Word */ \
1209 V(vcmpgtsw, VCMPGTSW, 0x10000386) \
1210 /* Vector Compare Greater Than Unsigned Byte */ \
1211 V(vcmpgtub, VCMPGTUB, 0x10000206) \
1212 /* Vector Compare Greater Than Unsigned Doubleword */ \
1213 V(vcmpgtud, VCMPGTUD, 0x100002C7) \
1214 /* Vector Compare Greater Than Unsigned Halfword */ \
1215 V(vcmpgtuh, VCMPGTUH, 0x10000246) \
1216 /* Vector Compare Greater Than Unsigned Word */ \
1217 V(vcmpgtuw, VCMPGTUW, 0x10000286)
1218
1219#define PPC_X_OPCODE_A_FORM_LIST(V) \
1220 /* Modulo Signed Dword */ \
1221 V(modsd, MODSD, 0x7C000612) \
1222 /* Modulo Unsigned Dword */ \
1223 V(modud, MODUD, 0x7C000212) \
1224 /* Modulo Signed Word */ \
1225 V(modsw, MODSW, 0x7C000616) \
1226 /* Modulo Unsigned Word */ \
1227 V(moduw, MODUW, 0x7C000216)
1228
1229#define PPC_X_OPCODE_B_FORM_LIST(V) \
1230 /* XOR */ \
1231 V(xor_, XORX, 0x7C000278) \
1232 /* AND */ \
1233 V(and_, ANDX, 0x7C000038) \
1234 /* AND with Complement */ \
1235 V(andc, ANDCX, 0x7C000078) \
1236 /* OR */ \
1237 V(orx, ORX, 0x7C000378) \
1238 /* OR with Complement */ \
1239 V(orc, ORC, 0x7C000338) \
1240 /* NOR */ \
1241 V(nor, NORX, 0x7C0000F8) \
1242 /* Shift Right Word */ \
1243 V(srw, SRWX, 0x7C000430) \
1244 /* Shift Left Word */ \
1245 V(slw, SLWX, 0x7C000030) \
1246 /* Shift Right Algebraic Word */ \
1247 V(sraw, SRAW, 0x7C000630) \
1248 /* Shift Left Doubleword */ \
1249 V(sld, SLDX, 0x7C000036) \
1250 /* Shift Right Algebraic Doubleword */ \
1251 V(srad, SRAD, 0x7C000634) \
1252 /* Shift Right Doubleword */ \
1253 V(srd, SRDX, 0x7C000436)
1254
1255#define PPC_X_OPCODE_C_FORM_LIST(V) \
1256 /* Count Leading Zeros Word */ \
1257 V(cntlzw, CNTLZWX, 0x7C000034) \
1258 /* Count Leading Zeros Doubleword */ \
1259 V(cntlzd, CNTLZDX, 0x7C000074) \
1260 /* Count Tailing Zeros Word */ \
1261 V(cnttzw, CNTTZWX, 0x7C000434) \
1262 /* Count Tailing Zeros Doubleword */ \
1263 V(cnttzd, CNTTZDX, 0x7C000474) \
1264 /* Population Count Byte-wise */ \
1265 V(popcntb, POPCNTB, 0x7C0000F4) \
1266 /* Population Count Words */ \
1267 V(popcntw, POPCNTW, 0x7C0002F4) \
1268 /* Population Count Doubleword */ \
1269 V(popcntd, POPCNTD, 0x7C0003F4) \
1270 /* Extend Sign Byte */ \
1271 V(extsb, EXTSB, 0x7C000774) \
1272 /* Extend Sign Halfword */ \
1273 V(extsh, EXTSH, 0x7C000734)
1274
1275#define PPC_X_OPCODE_D_FORM_LIST(V) \
1276 /* Load Halfword Byte-Reverse Indexed */ \
1277 V(lhbrx, LHBRX, 0x7C00062C) \
1278 /* Load Word Byte-Reverse Indexed */ \
1279 V(lwbrx, LWBRX, 0x7C00042C) \
1280 /* Load Doubleword Byte-Reverse Indexed */ \
1281 V(ldbrx, LDBRX, 0x7C000428) \
1282 /* Load Byte and Zero Indexed */ \
1283 V(lbzx, LBZX, 0x7C0000AE) \
1284 /* Load Byte and Zero with Update Indexed */ \
1285 V(lbzux, LBZUX, 0x7C0000EE) \
1286 /* Load Halfword and Zero Indexed */ \
1287 V(lhzx, LHZX, 0x7C00022E) \
1288 /* Load Halfword and Zero with Update Indexed */ \
1289 V(lhzux, LHZUX, 0x7C00026E) \
1290 /* Load Halfword Algebraic Indexed */ \
1291 V(lhax, LHAX, 0x7C0002AE) \
1292 /* Load Word and Zero Indexed */ \
1293 V(lwzx, LWZX, 0x7C00002E) \
1294 /* Load Word and Zero with Update Indexed */ \
1295 V(lwzux, LWZUX, 0x7C00006E) \
1296 /* Load Doubleword Indexed */ \
1297 V(ldx, LDX, 0x7C00002A) \
1298 /* Load Doubleword with Update Indexed */ \
1299 V(ldux, LDUX, 0x7C00006A) \
1300 /* Load Floating-Point Double Indexed */ \
1301 V(lfdx, LFDX, 0x7C0004AE) \
1302 /* Load Floating-Point Single Indexed */ \
1303 V(lfsx, LFSX, 0x7C00042E) \
1304 /* Load Floating-Point Double with Update Indexed */ \
1305 V(lfdux, LFDUX, 0x7C0004EE) \
1306 /* Load Floating-Point Single with Update Indexed */ \
1307 V(lfsux, LFSUX, 0x7C00046E) \
1308 /* Store Byte with Update Indexed */ \
1309 V(stbux, STBUX, 0x7C0001EE) \
1310 /* Store Byte Indexed */ \
1311 V(stbx, STBX, 0x7C0001AE) \
1312 /* Store Halfword with Update Indexed */ \
1313 V(sthux, STHUX, 0x7C00036E) \
1314 /* Store Halfword Indexed */ \
1315 V(sthx, STHX, 0x7C00032E) \
1316 /* Store Word with Update Indexed */ \
1317 V(stwux, STWUX, 0x7C00016E) \
1318 /* Store Word Indexed */ \
1319 V(stwx, STWX, 0x7C00012E) \
1320 /* Store Doubleword with Update Indexed */ \
1321 V(stdux, STDUX, 0x7C00016A) \
1322 /* Store Doubleword Indexed */ \
1323 V(stdx, STDX, 0x7C00012A) \
1324 /* Store Floating-Point Double with Update Indexed */ \
1325 V(stfdux, STFDUX, 0x7C0005EE) \
1326 /* Store Floating-Point Double Indexed */ \
1327 V(stfdx, STFDX, 0x7C0005AE) \
1328 /* Store Floating-Point Single with Update Indexed */ \
1329 V(stfsux, STFSUX, 0x7C00056E) \
1330 /* Store Floating-Point Single Indexed */ \
1331 V(stfsx, STFSX, 0x7C00052E) \
1332 /* Store Doubleword Byte-Reverse Indexed */ \
1333 V(stdbrx, STDBRX, 0x7C000528) \
1334 /* Store Word Byte-Reverse Indexed */ \
1335 V(stwbrx, STWBRX, 0x7C00052C) \
1336 /* Store Halfword Byte-Reverse Indexed */ \
1337 V(sthbrx, STHBRX, 0x7C00072C) \
1338 /* Load Vector Indexed */ \
1339 V(lvx, LVX, 0x7C0000CE) \
1340 /* Store Vector Indexed */ \
1341 V(stvx, STVX, 0x7C0001CE)
1342
1343#define PPC_X_OPCODE_E_FORM_LIST(V) \
1344 /* Shift Right Algebraic Word Immediate */ \
1345 V(srawi, SRAWIX, 0x7C000670)
1346
1347#define PPC_X_OPCODE_F_FORM_LIST(V) \
1348 /* Compare */ \
1349 V(cmp, CMP, 0x7C000000) \
1350 /* Compare Logical */ \
1351 V(cmpl, CMPL, 0x7C000040)
1352
1353#define PPC_X_OPCODE_G_FORM_LIST(V) \
1354 /* Byte-Reverse Halfword */ \
1355 V(brh, BRH, 0x7C0001B6) \
1356 /* Byte-Reverse Word */ \
1357 V(brw, BRW, 0x7C000136) \
1358 /* Byte-Reverse Doubleword */ \
1359 V(brd, BRD, 0x7C000176)
1360
1361#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
1362 /* Store Byte Conditional Indexed */ \
1363 V(stbcx, STBCX, 0x7C00056D) \
1364 /* Store Halfword Conditional Indexed Xform */ \
1365 V(sthcx, STHCX, 0x7C0005AD) \
1366 /* Store Word Conditional Indexed & record CR0 */ \
1367 V(stwcx, STWCX, 0x7C00012D) \
1368 /* Store Doubleword Conditional Indexed & record CR0 */ \
1369 V(stdcx, STDCX, 0x7C0001AD)
1370
1371#define PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1372 /* Load Byte And Reserve Indexed */ \
1373 V(lbarx, LBARX, 0x7C000068) \
1374 /* Load Halfword And Reserve Indexed Xform */ \
1375 V(lharx, LHARX, 0x7C0000E8) \
1376 /* Load Word and Reserve Indexed */ \
1377 V(lwarx, LWARX, 0x7C000028) \
1378 /* Load Doubleword And Reserve Indexed */ \
1379 V(ldarx, LDARX, 0x7C0000A8)
1380
1381#define PPC_X_OPCODE_UNUSED_LIST(V) \
1382 /* Bit Permute Doubleword */ \
1383 V(bpermd, BPERMD, 0x7C0001F8) \
1384 /* Extend Sign Word */ \
1385 V(extsw, EXTSW, 0x7C0007B4) \
1386 /* Load Word Algebraic with Update Indexed */ \
1387 V(lwaux, LWAUX, 0x7C0002EA) \
1388 /* Load Word Algebraic Indexed */ \
1389 V(lwax, LWAX, 0x7C0002AA) \
1390 /* Parity Doubleword */ \
1391 V(prtyd, PRTYD, 0x7C000174) \
1392 /* Trap Doubleword */ \
1393 V(td, TD, 0x7C000088) \
1394 /* Branch Conditional to Branch Target Address Register */ \
1395 V(bctar, BCTAR, 0x4C000460) \
1396 /* Compare Byte */ \
1397 V(cmpb, CMPB, 0x7C0003F8) \
1398 /* Data Cache Block Flush */ \
1399 V(dcbf, DCBF, 0x7C0000AC) \
1400 /* Data Cache Block Store */ \
1401 V(dcbst, DCBST, 0x7C00006C) \
1402 /* Data Cache Block Touch */ \
1403 V(dcbt, DCBT, 0x7C00022C) \
1404 /* Data Cache Block Touch for Store */ \
1405 V(dcbtst, DCBTST, 0x7C0001EC) \
1406 /* Data Cache Block Zero */ \
1407 V(dcbz, DCBZ, 0x7C0007EC) \
1408 /* Equivalent */ \
1409 V(eqv, EQV, 0x7C000238) \
1410 /* Instruction Cache Block Invalidate */ \
1411 V(icbi, ICBI, 0x7C0007AC) \
1412 /* NAND */ \
1413 V(nand, NAND, 0x7C0003B8) \
1414 /* Parity Word */ \
1415 V(prtyw, PRTYW, 0x7C000134) \
1416 /* Synchronize */ \
1417 V(sync, SYNC, 0x7C0004AC) \
1418 /* Trap Word */ \
1419 V(tw, TW, 0x7C000008) \
1420 /* ExecuExecuted No Operation */ \
1421 V(xnop, XNOP, 0x68000000) \
1422 /* Convert Binary Coded Decimal To Declets */ \
1423 V(cbcdtd, CBCDTD, 0x7C000274) \
1424 /* Convert Declets To Binary Coded Decimal */ \
1425 V(cdtbcd, CDTBCD, 0x7C000234) \
1426 /* Decimal Floating Add */ \
1427 V(dadd, DADD, 0xEC000004) \
1428 /* Decimal Floating Add Quad */ \
1429 V(daddq, DADDQ, 0xFC000004) \
1430 /* Decimal Floating Convert From Fixed */ \
1431 V(dcffix, DCFFIX, 0xEC000644) \
1432 /* Decimal Floating Convert From Fixed Quad */ \
1433 V(dcffixq, DCFFIXQ, 0xFC000644) \
1434 /* Decimal Floating Compare Ordered */ \
1435 V(dcmpo, DCMPO, 0xEC000104) \
1436 /* Decimal Floating Compare Ordered Quad */ \
1437 V(dcmpoq, DCMPOQ, 0xFC000104) \
1438 /* Decimal Floating Compare Unordered */ \
1439 V(dcmpu, DCMPU, 0xEC000504) \
1440 /* Decimal Floating Compare Unordered Quad */ \
1441 V(dcmpuq, DCMPUQ, 0xFC000504) \
1442 /* Decimal Floating Convert To DFP Long */ \
1443 V(dctdp, DCTDP, 0xEC000204) \
1444 /* Decimal Floating Convert To Fixed */ \
1445 V(dctfix, DCTFIX, 0xEC000244) \
1446 /* Decimal Floating Convert To Fixed Quad */ \
1447 V(dctfixq, DCTFIXQ, 0xFC000244) \
1448 /* Decimal Floating Convert To DFP Extended */ \
1449 V(dctqpq, DCTQPQ, 0xFC000204) \
1450 /* Decimal Floating Decode DPD To BCD */ \
1451 V(ddedpd, DDEDPD, 0xEC000284) \
1452 /* Decimal Floating Decode DPD To BCD Quad */ \
1453 V(ddedpdq, DDEDPDQ, 0xFC000284) \
1454 /* Decimal Floating Divide */ \
1455 V(ddiv, DDIV, 0xEC000444) \
1456 /* Decimal Floating Divide Quad */ \
1457 V(ddivq, DDIVQ, 0xFC000444) \
1458 /* Decimal Floating Encode BCD To DPD */ \
1459 V(denbcd, DENBCD, 0xEC000684) \
1460 /* Decimal Floating Encode BCD To DPD Quad */ \
1461 V(denbcdq, DENBCDQ, 0xFC000684) \
1462 /* Decimal Floating Insert Exponent */ \
1463 V(diex, DIEX, 0xEC0006C4) \
1464 /* Decimal Floating Insert Exponent Quad */ \
1465 V(diexq, DIEXQ, 0xFC0006C4) \
1466 /* Decimal Floating Multiply */ \
1467 V(dmul, DMUL, 0xEC000044) \
1468 /* Decimal Floating Multiply Quad */ \
1469 V(dmulq, DMULQ, 0xFC000044) \
1470 /* Decimal Floating Round To DFP Long */ \
1471 V(drdpq, DRDPQ, 0xFC000604) \
1472 /* Decimal Floating Round To DFP Short */ \
1473 V(drsp, DRSP, 0xEC000604) \
1474 /* Decimal Floating Subtract */ \
1475 V(dsub, DSUB, 0xEC000404) \
1476 /* Decimal Floating Subtract Quad */ \
1477 V(dsubq, DSUBQ, 0xFC000404) \
1478 /* Decimal Floating Test Exponent */ \
1479 V(dtstex, DTSTEX, 0xEC000144) \
1480 /* Decimal Floating Test Exponent Quad */ \
1481 V(dtstexq, DTSTEXQ, 0xFC000144) \
1482 /* Decimal Floating Test Significance */ \
1483 V(dtstsf, DTSTSF, 0xEC000544) \
1484 /* Decimal Floating Test Significance Quad */ \
1485 V(dtstsfq, DTSTSFQ, 0xFC000544) \
1486 /* Decimal Floating Extract Exponent */ \
1487 V(dxex, DXEX, 0xEC0002C4) \
1488 /* Decimal Floating Extract Exponent Quad */ \
1489 V(dxexq, DXEXQ, 0xFC0002C4) \
1490 /* Decorated Storage Notify */ \
1491 V(dsn, DSN, 0x7C0003C6) \
1492 /* Load Byte with Decoration Indexed */ \
1493 V(lbdx, LBDX, 0x7C000406) \
1494 /* Load Doubleword with Decoration Indexed */ \
1495 V(lddx, LDDX, 0x7C0004C6) \
1496 /* Load Floating Doubleword with Decoration Indexed */ \
1497 V(lfddx, LFDDX, 0x7C000646) \
1498 /* Load Halfword with Decoration Indexed */ \
1499 V(lhdx, LHDX, 0x7C000446) \
1500 /* Load Word with Decoration Indexed */ \
1501 V(lwdx, LWDX, 0x7C000486) \
1502 /* Store Byte with Decoration Indexed */ \
1503 V(stbdx, STBDX, 0x7C000506) \
1504 /* Store Doubleword with Decoration Indexed */ \
1505 V(stddx, STDDX, 0x7C0005C6) \
1506 /* Store Floating Doubleword with Decoration Indexed */ \
1507 V(stfddx, STFDDX, 0x7C000746) \
1508 /* Store Halfword with Decoration Indexed */ \
1509 V(sthdx, STHDX, 0x7C000546) \
1510 /* Store Word with Decoration Indexed */ \
1511 V(stwdx, STWDX, 0x7C000586) \
1512 /* Data Cache Block Allocate */ \
1513 V(dcba, DCBA, 0x7C0005EC) \
1514 /* Data Cache Block Invalidate */ \
1515 V(dcbi, DCBI, 0x7C0003AC) \
1516 /* Instruction Cache Block Touch */ \
1517 V(icbt, ICBT, 0x7C00002C) \
1518 /* Move to Condition Register from XER */ \
1519 V(mcrxr, MCRXR, 0x7C000400) \
1520 /* TLB Invalidate Local Indexed */ \
1521 V(tlbilx, TLBILX, 0x7C000024) \
1522 /* TLB Invalidate Virtual Address Indexed */ \
1523 V(tlbivax, TLBIVAX, 0x7C000624) \
1524 /* TLB Read Entry */ \
1525 V(tlbre, TLBRE, 0x7C000764) \
1526 /* TLB Search Indexed */ \
1527 V(tlbsx, TLBSX, 0x7C000724) \
1528 /* TLB Write Entry */ \
1529 V(tlbwe, TLBWE, 0x7C0007A4) \
1530 /* Write External Enable */ \
1531 V(wrtee, WRTEE, 0x7C000106) \
1532 /* Write External Enable Immediate */ \
1533 V(wrteei, WRTEEI, 0x7C000146) \
1534 /* Data Cache Read */ \
1535 V(dcread, DCREAD, 0x7C00028C) \
1536 /* Instruction Cache Read */ \
1537 V(icread, ICREAD, 0x7C0007CC) \
1538 /* Data Cache Invalidate */ \
1539 V(dci, DCI, 0x7C00038C) \
1540 /* Instruction Cache Invalidate */ \
1541 V(ici, ICI, 0x7C00078C) \
1542 /* Move From Device Control Register User Mode Indexed */ \
1543 V(mfdcrux, MFDCRUX, 0x7C000246) \
1544 /* Move From Device Control Register Indexed */ \
1545 V(mfdcrx, MFDCRX, 0x7C000206) \
1546 /* Move To Device Control Register User Mode Indexed */ \
1547 V(mtdcrux, MTDCRUX, 0x7C000346) \
1548 /* Move To Device Control Register Indexed */ \
1549 V(mtdcrx, MTDCRX, 0x7C000306) \
1550 /* Return From Debug Interrupt */ \
1551 V(rfdi, RFDI, 0x4C00004E) \
1552 /* Data Cache Block Flush by External PID */ \
1553 V(dcbfep, DCBFEP, 0x7C0000FE) \
1554 /* Data Cache Block Store by External PID */ \
1555 V(dcbstep, DCBSTEP, 0x7C00007E) \
1556 /* Data Cache Block Touch by External PID */ \
1557 V(dcbtep, DCBTEP, 0x7C00027E) \
1558 /* Data Cache Block Touch for Store by External PID */ \
1559 V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
1560 /* Data Cache Block Zero by External PID */ \
1561 V(dcbzep, DCBZEP, 0x7C0007FE) \
1562 /* Instruction Cache Block Invalidate by External PID */ \
1563 V(icbiep, ICBIEP, 0x7C0007BE) \
1564 /* Load Byte and Zero by External PID Indexed */ \
1565 V(lbepx, LBEPX, 0x7C0000BE) \
1566 /* Load Floating-Point Double by External PID Indexed */ \
1567 V(lfdepx, LFDEPX, 0x7C0004BE) \
1568 /* Load Halfword and Zero by External PID Indexed */ \
1569 V(lhepx, LHEPX, 0x7C00023E) \
1570 /* Load Vector by External PID Indexed */ \
1571 V(lvepx, LVEPX, 0x7C00024E) \
1572 /* Load Vector by External PID Indexed Last */ \
1573 V(lvepxl, LVEPXL, 0x7C00020E) \
1574 /* Load Word and Zero by External PID Indexed */ \
1575 V(lwepx, LWEPX, 0x7C00003E) \
1576 /* Store Byte by External PID Indexed */ \
1577 V(stbepx, STBEPX, 0x7C0001BE) \
1578 /* Store Floating-Point Double by External PID Indexed */ \
1579 V(stfdepx, STFDEPX, 0x7C0005BE) \
1580 /* Store Halfword by External PID Indexed */ \
1581 V(sthepx, STHEPX, 0x7C00033E) \
1582 /* Store Vector by External PID Indexed */ \
1583 V(stvepx, STVEPX, 0x7C00064E) \
1584 /* Store Vector by External PID Indexed Last */ \
1585 V(stvepxl, STVEPXL, 0x7C00060E) \
1586 /* Store Word by External PID Indexed */ \
1587 V(stwepx, STWEPX, 0x7C00013E) \
1588 /* Load Doubleword by External PID Indexed */ \
1589 V(ldepx, LDEPX, 0x7C00003A) \
1590 /* Store Doubleword by External PID Indexed */ \
1591 V(stdepx, STDEPX, 0x7C00013A) \
1592 /* TLB Search and Reserve Indexed */ \
1593 V(tlbsrx, TLBSRX, 0x7C0006A5) \
1594 /* External Control In Word Indexed */ \
1595 V(eciwx, ECIWX, 0x7C00026C) \
1596 /* External Control Out Word Indexed */ \
1597 V(ecowx, ECOWX, 0x7C00036C) \
1598 /* Data Cache Block Lock Clear */ \
1599 V(dcblc, DCBLC, 0x7C00030C) \
1600 /* Data Cache Block Lock Query */ \
1601 V(dcblq, DCBLQ, 0x7C00034D) \
1602 /* Data Cache Block Touch and Lock Set */ \
1603 V(dcbtls, DCBTLS, 0x7C00014C) \
1604 /* Data Cache Block Touch for Store and Lock Set */ \
1605 V(dcbtstls, DCBTSTLS, 0x7C00010C) \
1606 /* Instruction Cache Block Lock Clear */ \
1607 V(icblc, ICBLC, 0x7C0001CC) \
1608 /* Instruction Cache Block Lock Query */ \
1609 V(icblq, ICBLQ, 0x7C00018D) \
1610 /* Instruction Cache Block Touch and Lock Set */ \
1611 V(icbtls, ICBTLS, 0x7C0003CC) \
1612 /* Floating Compare Ordered */ \
1613 V(fcmpo, FCMPO, 0xFC000040) \
1614 /* Floating Compare Unordered */ \
1615 V(fcmpu, FCMPU, 0xFC000000) \
1616 /* Floating Test for software Divide */ \
1617 V(ftdiv, FTDIV, 0xFC000100) \
1618 /* Floating Test for software Square Root */ \
1619 V(ftsqrt, FTSQRT, 0xFC000140) \
1620 /* Load Floating-Point as Integer Word Algebraic Indexed */ \
1621 V(lfiwax, LFIWAX, 0x7C0006AE) \
1622 /* Load Floating-Point as Integer Word and Zero Indexed */ \
1623 V(lfiwzx, LFIWZX, 0x7C0006EE) \
1624 /* Move To Condition Register from FPSCR */ \
1625 V(mcrfs, MCRFS, 0xFC000080) \
1626 /* Store Floating-Point as Integer Word Indexed */ \
1627 V(stfiwx, STFIWX, 0x7C0007AE) \
1628 /* Load Floating-Point Double Pair Indexed */ \
1629 V(lfdpx, LFDPX, 0x7C00062E) \
1630 /* Store Floating-Point Double Pair Indexed */ \
1631 V(stfdpx, STFDPX, 0x7C00072E) \
1632 /* Floating Absolute Value */ \
1633 V(fabs, FABS, 0xFC000210) \
1634 /* Floating Convert From Integer Doubleword */ \
1635 V(fcfid, FCFID, 0xFC00069C) \
1636 /* Floating Convert From Integer Doubleword Single */ \
1637 V(fcfids, FCFIDS, 0xEC00069C) \
1638 /* Floating Convert From Integer Doubleword Unsigned */ \
1639 V(fcfidu, FCFIDU, 0xFC00079C) \
1640 /* Floating Convert From Integer Doubleword Unsigned Single */ \
1641 V(fcfidus, FCFIDUS, 0xEC00079C) \
1642 /* Floating Copy Sign */ \
1643 V(fcpsgn, FCPSGN, 0xFC000010) \
1644 /* Floating Convert To Integer Doubleword */ \
1645 V(fctid, FCTID, 0xFC00065C) \
1646 /* Floating Convert To Integer Doubleword Unsigned */ \
1647 V(fctidu, FCTIDU, 0xFC00075C) \
1648 /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
1649 /* Zero */ \
1650 V(fctiduz, FCTIDUZ, 0xFC00075E) \
1651 /* Floating Convert To Integer Doubleword with round toward Zero */ \
1652 V(fctidz, FCTIDZ, 0xFC00065E) \
1653 /* Floating Convert To Integer Word */ \
1654 V(fctiw, FCTIW, 0xFC00001C) \
1655 /* Floating Convert To Integer Word Unsigned */ \
1656 V(fctiwu, FCTIWU, 0xFC00011C) \
1657 /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
1658 V(fctiwuz, FCTIWUZ, 0xFC00011E) \
1659 /* Floating Convert To Integer Word with round to Zero */ \
1660 V(fctiwz, FCTIWZ, 0xFC00001E) \
1661 /* Floating Move Register */ \
1662 V(fmr, FMR, 0xFC000090) \
1663 /* Floating Negative Absolute Value */ \
1664 V(fnabs, FNABS, 0xFC000110) \
1665 /* Floating Negate */ \
1666 V(fneg, FNEG, 0xFC000050) \
1667 /* Floating Round to Single-Precision */ \
1668 V(frsp, FRSP, 0xFC000018) \
1669 /* Move From FPSCR */ \
1670 V(mffs, MFFS, 0xFC00048E) \
1671 /* Move To FPSCR Bit 0 */ \
1672 V(mtfsb0, MTFSB0, 0xFC00008C) \
1673 /* Move To FPSCR Bit 1 */ \
1674 V(mtfsb1, MTFSB1, 0xFC00004C) \
1675 /* Move To FPSCR Field Immediate */ \
1676 V(mtfsfi, MTFSFI, 0xFC00010C) \
1677 /* Floating Round To Integer Minus */ \
1678 V(frim, FRIM, 0xFC0003D0) \
1679 /* Floating Round To Integer Nearest */ \
1680 V(frin, FRIN, 0xFC000310) \
1681 /* Floating Round To Integer Plus */ \
1682 V(frip, FRIP, 0xFC000390) \
1683 /* Floating Round To Integer toward Zero */ \
1684 V(friz, FRIZ, 0xFC000350) \
1685 /* Multiply Cross Halfword to Word Signed */ \
1686 V(mulchw, MULCHW, 0x10000150) \
1687 /* Multiply Cross Halfword to Word Unsigned */ \
1688 V(mulchwu, MULCHWU, 0x10000110) \
1689 /* Multiply High Halfword to Word Signed */ \
1690 V(mulhhw, MULHHW, 0x10000050) \
1691 /* Multiply High Halfword to Word Unsigned */ \
1692 V(mulhhwu, MULHHWU, 0x10000010) \
1693 /* Multiply Low Halfword to Word Signed */ \
1694 V(mullhw, MULLHW, 0x10000350) \
1695 /* Multiply Low Halfword to Word Unsigned */ \
1696 V(mullhwu, MULLHWU, 0x10000310) \
1697 /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
1698 V(dlmzb, DLMZB, 0x7C00009C) \
1699 /* Load Quadword And Reserve Indexed */ \
1700 V(lqarx, LQARX, 0x7C000228) \
1701 /* Store Quadword Conditional Indexed and record CR0 */ \
1702 V(stqcx, STQCX, 0x7C00016D) \
1703 /* Load String Word Immediate */ \
1704 V(lswi, LSWI, 0x7C0004AA) \
1705 /* Load String Word Indexed */ \
1706 V(lswx, LSWX, 0x7C00042A) \
1707 /* Store String Word Immediate */ \
1708 V(stswi, STSWI, 0x7C0005AA) \
1709 /* Store String Word Indexed */ \
1710 V(stswx, STSWX, 0x7C00052A) \
1711 /* Clear BHRB */ \
1712 V(clrbhrb, CLRBHRB, 0x7C00035C) \
1713 /* Enforce In-order Execution of I/O */ \
1714 V(eieio, EIEIO, 0x7C0006AC) \
1715 /* Load Byte and Zero Caching Inhibited Indexed */ \
1716 V(lbzcix, LBZCIX, 0x7C0006AA) \
1717 /* Load Doubleword Caching Inhibited Indexed */ \
1718 V(ldcix, LDCIX, 0x7C0006EA) \
1719 /* Load Halfword and Zero Caching Inhibited Indexed */ \
1720 V(lhzcix, LHZCIX, 0x7C00066A) \
1721 /* Load Word and Zero Caching Inhibited Indexed */ \
1722 V(lwzcix, LWZCIX, 0x7C00062A) \
1723 /* Move From Segment Register */ \
1724 V(mfsr, MFSR, 0x7C0004A6) \
1725 /* Move From Segment Register Indirect */ \
1726 V(mfsrin, MFSRIN, 0x7C000526) \
1727 /* Move To Machine State Register Doubleword */ \
1728 V(mtmsrd, MTMSRD, 0x7C000164) \
1729 /* Move To Split Little Endian */ \
1730 V(mtsle, MTSLE, 0x7C000126) \
1731 /* Move To Segment Register */ \
1732 V(mtsr, MTSR, 0x7C0001A4) \
1733 /* Move To Segment Register Indirect */ \
1734 V(mtsrin, MTSRIN, 0x7C0001E4) \
1735 /* SLB Find Entry ESID */ \
1736 V(slbfee, SLBFEE, 0x7C0007A7) \
1737 /* SLB Invalidate All */ \
1738 V(slbia, SLBIA, 0x7C0003E4) \
1739 /* SLB Invalidate Entry */ \
1740 V(slbie, SLBIE, 0x7C000364) \
1741 /* SLB Move From Entry ESID */ \
1742 V(slbmfee, SLBMFEE, 0x7C000726) \
1743 /* SLB Move From Entry VSID */ \
1744 V(slbmfev, SLBMFEV, 0x7C0006A6) \
1745 /* SLB Move To Entry */ \
1746 V(slbmte, SLBMTE, 0x7C000324) \
1747 /* Store Byte Caching Inhibited Indexed */ \
1748 V(stbcix, STBCIX, 0x7C0007AA) \
1749 /* Store Doubleword Caching Inhibited Indexed */ \
1750 V(stdcix, STDCIX, 0x7C0007EA) \
1751 /* Store Halfword and Zero Caching Inhibited Indexed */ \
1752 V(sthcix, STHCIX, 0x7C00076A) \
1753 /* Store Word and Zero Caching Inhibited Indexed */ \
1754 V(stwcix, STWCIX, 0x7C00072A) \
1755 /* TLB Invalidate All */ \
1756 V(tlbia, TLBIA, 0x7C0002E4) \
1757 /* TLB Invalidate Entry */ \
1758 V(tlbie, TLBIE, 0x7C000264) \
1759 /* TLB Invalidate Entry Local */ \
1760 V(tlbiel, TLBIEL, 0x7C000224) \
1761 /* Message Clear Privileged */ \
1762 V(msgclrp, MSGCLRP, 0x7C00015C) \
1763 /* Message Send Privileged */ \
1764 V(msgsndp, MSGSNDP, 0x7C00011C) \
1765 /* Message Clear */ \
1766 V(msgclr, MSGCLR, 0x7C0001DC) \
1767 /* Message Send */ \
1768 V(msgsnd, MSGSND, 0x7C00019C) \
1769 /* Move From Machine State Register */ \
1770 V(mfmsr, MFMSR, 0x7C0000A6) \
1771 /* Move To Machine State Register */ \
1772 V(mtmsr, MTMSR, 0x7C000124) \
1773 /* TLB Synchronize */ \
1774 V(tlbsync, TLBSYNC, 0x7C00046C) \
1775 /* Transaction Abort */ \
1776 V(tabort, TABORT, 0x7C00071D) \
1777 /* Transaction Abort Doubleword Conditional */ \
1778 V(tabortdc, TABORTDC, 0x7C00065D) \
1779 /* Transaction Abort Doubleword Conditional Immediate */ \
1780 V(tabortdci, TABORTDCI, 0x7C0006DD) \
1781 /* Transaction Abort Word Conditional */ \
1782 V(tabortwc, TABORTWC, 0x7C00061D) \
1783 /* Transaction Abort Word Conditional Immediate */ \
1784 V(tabortwci, TABORTWCI, 0x7C00069D) \
1785 /* Transaction Begin */ \
1786 V(tbegin, TBEGIN, 0x7C00051D) \
1787 /* Transaction Check */ \
1788 V(tcheck, TCHECK, 0x7C00059C) \
1789 /* Transaction End */ \
1790 V(tend, TEND, 0x7C00055C) \
1791 /* Transaction Recheckpoint */ \
1792 V(trechkpt, TRECHKPT, 0x7C0007DD) \
1793 /* Transaction Reclaim */ \
1794 V(treclaim, TRECLAIM, 0x7C00075D) \
1795 /* Transaction Suspend or Resume */ \
1796 V(tsr, TSR, 0x7C0005DC) \
1797 /* Load Vector Element Byte Indexed */ \
1798 V(lvebx, LVEBX, 0x7C00000E) \
1799 /* Load Vector Element Halfword Indexed */ \
1800 V(lvehx, LVEHX, 0x7C00004E) \
1801 /* Load Vector Element Word Indexed */ \
1802 V(lvewx, LVEWX, 0x7C00008E) \
1803 /* Load Vector for Shift Left */ \
1804 V(lvsl, LVSL, 0x7C00000C) \
1805 /* Load Vector for Shift Right */ \
1806 V(lvsr, LVSR, 0x7C00004C) \
1807 /* Load Vector Indexed Last */ \
1808 V(lvxl, LVXL, 0x7C0002CE) \
1809 /* Store Vector Element Byte Indexed */ \
1810 V(stvebx, STVEBX, 0x7C00010E) \
1811 /* Store Vector Element Halfword Indexed */ \
1812 V(stvehx, STVEHX, 0x7C00014E) \
1813 /* Store Vector Element Word Indexed */ \
1814 V(stvewx, STVEWX, 0x7C00018E) \
1815 /* Store Vector Indexed Last */ \
1816 V(stvxl, STVXL, 0x7C0003CE) \
1817 /* Floating Merge Even Word */ \
1818 V(fmrgew, FMRGEW, 0xFC00078C) \
1819 /* Floating Merge Odd Word */ \
1820 V(fmrgow, FMRGOW, 0xFC00068C) \
1821 /* Wait for Interrupt */ \
1822 V(wait, WAIT, 0x7C00007C)
1823
1824#define PPC_X_OPCODE_LIST(V) \
1825 PPC_X_OPCODE_A_FORM_LIST(V) \
1826 PPC_X_OPCODE_B_FORM_LIST(V) \
1827 PPC_X_OPCODE_C_FORM_LIST(V) \
1828 PPC_X_OPCODE_D_FORM_LIST(V) \
1829 PPC_X_OPCODE_E_FORM_LIST(V) \
1830 PPC_X_OPCODE_F_FORM_LIST(V) \
1831 PPC_X_OPCODE_G_FORM_LIST(V) \
1832 PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1833 PPC_X_OPCODE_UNUSED_LIST(V)
1834
1835#define PPC_EVS_OPCODE_LIST(V) \
1836 /* Vector Select */ \
1837 V(evsel, EVSEL, 0x10000278)
1838
1839#define PPC_DS_OPCODE_LIST(V) \
1840 /* Load Doubleword */ \
1841 V(ld, LD, 0xE8000000) \
1842 /* Load Doubleword with Update */ \
1843 V(ldu, LDU, 0xE8000001) \
1844 /* Load Word Algebraic */ \
1845 V(lwa, LWA, 0xE8000002) \
1846 /* Store Doubleword */ \
1847 V(std, STD, 0xF8000000) \
1848 /* Store Doubleword with Update */ \
1849 V(stdu, STDU, 0xF8000001) \
1850 /* Load Floating-Point Double Pair */ \
1851 V(lfdp, LFDP, 0xE4000000) \
1852 /* Store Floating-Point Double Pair */ \
1853 V(stfdp, STFDP, 0xF4000000) \
1854 /* Store Quadword */ \
1855 V(stq, STQ, 0xF8000002)
1856
1857#define PPC_DQ_OPCODE_LIST(V) V(lsq, LSQ, 0xE0000000)
1858
1859#define PPC_D_OPCODE_LIST(V) \
1860 /* Trap Doubleword Immediate */ \
1861 V(tdi, TDI, 0x08000000) \
1862 /* Add Immediate */ \
1863 V(addi, ADDI, 0x38000000) \
1864 /* Add Immediate Carrying */ \
1865 V(addic, ADDIC, 0x30000000) \
1866 /* Add Immediate Carrying & record CR0 */ \
1867 V(addicx, ADDICx, 0x34000000) \
1868 /* Add Immediate Shifted */ \
1869 V(addis, ADDIS, 0x3C000000) \
1870 /* AND Immediate & record CR0 */ \
1871 V(andix, ANDIx, 0x70000000) \
1872 /* AND Immediate Shifted & record CR0 */ \
1873 V(andisx, ANDISx, 0x74000000) \
1874 /* Compare Immediate */ \
1875 V(cmpi, CMPI, 0x2C000000) \
1876 /* Compare Logical Immediate */ \
1877 V(cmpli, CMPLI, 0x28000000) \
1878 /* Load Byte and Zero */ \
1879 V(lbz, LBZ, 0x88000000) \
1880 /* Load Byte and Zero with Update */ \
1881 V(lbzu, LBZU, 0x8C000000) \
1882 /* Load Halfword Algebraic */ \
1883 V(lha, LHA, 0xA8000000) \
1884 /* Load Halfword Algebraic with Update */ \
1885 V(lhau, LHAU, 0xAC000000) \
1886 /* Load Halfword and Zero */ \
1887 V(lhz, LHZ, 0xA0000000) \
1888 /* Load Halfword and Zero with Update */ \
1889 V(lhzu, LHZU, 0xA4000000) \
1890 /* Load Multiple Word */ \
1891 V(lmw, LMW, 0xB8000000) \
1892 /* Load Word and Zero */ \
1893 V(lwz, LWZ, 0x80000000) \
1894 /* Load Word and Zero with Update */ \
1895 V(lwzu, LWZU, 0x84000000) \
1896 /* Multiply Low Immediate */ \
1897 V(mulli, MULLI, 0x1C000000) \
1898 /* OR Immediate */ \
1899 V(ori, ORI, 0x60000000) \
1900 /* OR Immediate Shifted */ \
1901 V(oris, ORIS, 0x64000000) \
1902 /* Store Byte */ \
1903 V(stb, STB, 0x98000000) \
1904 /* Store Byte with Update */ \
1905 V(stbu, STBU, 0x9C000000) \
1906 /* Store Halfword */ \
1907 V(sth, STH, 0xB0000000) \
1908 /* Store Halfword with Update */ \
1909 V(sthu, STHU, 0xB4000000) \
1910 /* Store Multiple Word */ \
1911 V(stmw, STMW, 0xBC000000) \
1912 /* Store Word */ \
1913 V(stw, STW, 0x90000000) \
1914 /* Store Word with Update */ \
1915 V(stwu, STWU, 0x94000000) \
1916 /* Subtract From Immediate Carrying */ \
1917 V(subfic, SUBFIC, 0x20000000) \
1918 /* Trap Word Immediate */ \
1919 V(twi, TWI, 0x0C000000) \
1920 /* XOR Immediate */ \
1921 V(xori, XORI, 0x68000000) \
1922 /* XOR Immediate Shifted */ \
1923 V(xoris, XORIS, 0x6C000000) \
1924 /* Load Floating-Point Double */ \
1925 V(lfd, LFD, 0xC8000000) \
1926 /* Load Floating-Point Double with Update */ \
1927 V(lfdu, LFDU, 0xCC000000) \
1928 /* Load Floating-Point Single */ \
1929 V(lfs, LFS, 0xC0000000) \
1930 /* Load Floating-Point Single with Update */ \
1931 V(lfsu, LFSU, 0xC4000000) \
1932 /* Store Floating-Point Double */ \
1933 V(stfd, STFD, 0xD8000000) \
1934 /* Store Floating-Point Double with Update */ \
1935 V(stfdu, STFDU, 0xDC000000) \
1936 /* Store Floating-Point Single */ \
1937 V(stfs, STFS, 0xD0000000) \
1938 /* Store Floating-Point Single with Update */ \
1939 V(stfsu, STFSU, 0xD4000000)
1940
1941#define PPC_XFL_OPCODE_LIST(V) \
1942 /* Move To FPSCR Fields */ \
1943 V(mtfsf, MTFSF, 0xFC00058E)
1944
1945#define PPC_XFX_OPCODE_LIST(V) \
1946 /* Move From Condition Register */ \
1947 V(mfcr, MFCR, 0x7C000026) \
1948 /* Move From One Condition Register Field */ \
1949 V(mfocrf, MFOCRF, 0x7C100026) \
1950 /* Move From Special Purpose Register */ \
1951 V(mfspr, MFSPR, 0x7C0002A6) \
1952 /* Move To Condition Register Fields */ \
1953 V(mtcrf, MTCRF, 0x7C000120) \
1954 /* Move To One Condition Register Field */ \
1955 V(mtocrf, MTOCRF, 0x7C100120) \
1956 /* Move To Special Purpose Register */ \
1957 V(mtspr, MTSPR, 0x7C0003A6) \
1958 /* Debugger Notify Halt */ \
1959 V(dnh, DNH, 0x4C00018C) \
1960 /* Move From Device Control Register */ \
1961 V(mfdcr, MFDCR, 0x7C000286) \
1962 /* Move To Device Control Register */ \
1963 V(mtdcr, MTDCR, 0x7C000386) \
1964 /* Move from Performance Monitor Register */ \
1965 V(mfpmr, MFPMR, 0x7C00029C) \
1966 /* Move To Performance Monitor Register */ \
1967 V(mtpmr, MTPMR, 0x7C00039C) \
1968 /* Move From Branch History Rolling Buffer */ \
1969 V(mfbhrbe, MFBHRBE, 0x7C00025C) \
1970 /* Move From Time Base */ \
1971 V(mftb, MFTB, 0x7C0002E6)
1972
1973#define PPC_MDS_OPCODE_LIST(V) \
1974 /* Rotate Left Doubleword then Clear Left */ \
1975 V(rldcl, RLDCL, 0x78000010) \
1976 /* Rotate Left Doubleword then Clear Right */ \
1977 V(rldcr, RLDCR, 0x78000012)
1978
1979#define PPC_A_OPCODE_LIST(V) \
1980 /* Integer Select */ \
1981 V(isel, ISEL, 0x7C00001E) \
1982 /* Floating Add */ \
1983 V(fadd, FADD, 0xFC00002A) \
1984 /* Floating Add Single */ \
1985 V(fadds, FADDS, 0xEC00002A) \
1986 /* Floating Divide */ \
1987 V(fdiv, FDIV, 0xFC000024) \
1988 /* Floating Divide Single */ \
1989 V(fdivs, FDIVS, 0xEC000024) \
1990 /* Floating Multiply-Add */ \
1991 V(fmadd, FMADD, 0xFC00003A) \
1992 /* Floating Multiply-Add Single */ \
1993 V(fmadds, FMADDS, 0xEC00003A) \
1994 /* Floating Multiply-Subtract */ \
1995 V(fmsub, FMSUB, 0xFC000038) \
1996 /* Floating Multiply-Subtract Single */ \
1997 V(fmsubs, FMSUBS, 0xEC000038) \
1998 /* Floating Multiply */ \
1999 V(fmul, FMUL, 0xFC000032) \
2000 /* Floating Multiply Single */ \
2001 V(fmuls, FMULS, 0xEC000032) \
2002 /* Floating Negative Multiply-Add */ \
2003 V(fnmadd, FNMADD, 0xFC00003E) \
2004 /* Floating Negative Multiply-Add Single */ \
2005 V(fnmadds, FNMADDS, 0xEC00003E) \
2006 /* Floating Negative Multiply-Subtract */ \
2007 V(fnmsub, FNMSUB, 0xFC00003C) \
2008 /* Floating Negative Multiply-Subtract Single */ \
2009 V(fnmsubs, FNMSUBS, 0xEC00003C) \
2010 /* Floating Reciprocal Estimate Single */ \
2011 V(fres, FRES, 0xEC000030) \
2012 /* Floating Reciprocal Square Root Estimate */ \
2013 V(frsqrte, FRSQRTE, 0xFC000034) \
2014 /* Floating Select */ \
2015 V(fsel, FSEL, 0xFC00002E) \
2016 /* Floating Square Root */ \
2017 V(fsqrt, FSQRT, 0xFC00002C) \
2018 /* Floating Square Root Single */ \
2019 V(fsqrts, FSQRTS, 0xEC00002C) \
2020 /* Floating Subtract */ \
2021 V(fsub, FSUB, 0xFC000028) \
2022 /* Floating Subtract Single */ \
2023 V(fsubs, FSUBS, 0xEC000028) \
2024 /* Floating Reciprocal Estimate */ \
2025 V(fre, FRE, 0xFC000030) \
2026 /* Floating Reciprocal Square Root Estimate Single */ \
2027 V(frsqrtes, FRSQRTES, 0xEC000034)
2028
2029#define PPC_VA_OPCODE_A_FORM_LIST(V) \
2030 /* Vector Permute */ \
2031 V(vperm, VPERM, 0x1000002B) \
2032 /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
2033 V(vmladduhm, VMLADDUHM, 0x10000022) \
2034 /* Vector Select */ \
2035 V(vsel, VSEL, 0x1000002A) \
2036 /* Vector Multiply-Sum Mixed Byte Modulo */ \
2037 V(vmsummbm, VMSUMMBM, 0x10000025) \
2038 /* Vector Multiply-Sum Signed Halfword Modulo */ \
2039 V(vmsumshm, VMSUMSHM, 0x10000028) \
2040 /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
2041 V(vmhraddshs, VMHRADDSHS, 0x10000021)
2042
2043#define PPC_VA_OPCODE_UNUSED_LIST(V) \
2044 /* Vector Add Extended & write Carry Unsigned Quadword */ \
2045 V(vaddecuq, VADDECUQ, 0x1000003D) \
2046 /* Vector Add Extended Unsigned Quadword Modulo */ \
2047 V(vaddeuqm, VADDEUQM, 0x1000003C) \
2048 /* Vector Multiply-Add Single-Precision */ \
2049 V(vmaddfp, VMADDFP, 0x1000002E) \
2050 /* Vector Multiply-High-Add Signed Halfword Saturate */ \
2051 V(vmhaddshs, VMHADDSHS, 0x10000020) \
2052 /* Vector Multiply-Sum Signed Halfword Saturate */ \
2053 V(vmsumshs, VMSUMSHS, 0x10000029) \
2054 /* Vector Multiply-Sum Unsigned Byte Modulo */ \
2055 V(vmsumubm, VMSUMUBM, 0x10000024) \
2056 /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
2057 V(vmsumuhm, VMSUMUHM, 0x10000026) \
2058 /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
2059 V(vmsumuhs, VMSUMUHS, 0x10000027) \
2060 /* Vector Negative Multiply-Subtract Single-Precision */ \
2061 V(vnmsubfp, VNMSUBFP, 0x1000002F) \
2062 /* Vector Shift Left Double by Octet Immediate */ \
2063 V(vsldoi, VSLDOI, 0x1000002C) \
2064 /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
2065 V(vsubecuq, VSUBECUQ, 0x1000003F) \
2066 /* Vector Subtract Extended Unsigned Quadword Modulo */ \
2067 V(vsubeuqm, VSUBEUQM, 0x1000003E) \
2068 /* Vector Permute and Exclusive-OR */ \
2069 V(vpermxor, VPERMXOR, 0x1000002D)
2070
2071#define PPC_VA_OPCODE_LIST(V) \
2072 PPC_VA_OPCODE_A_FORM_LIST(V) \
2073 PPC_VA_OPCODE_UNUSED_LIST(V)
2074
2075#define PPC_XX1_OPCODE_LIST(V) \
2076 /* Load VSR Scalar Doubleword Indexed */ \
2077 V(lxsdx, LXSDX, 0x7C000498) \
2078 /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
2079 V(lxsiwax, LXSIWAX, 0x7C000098) \
2080 /* Load VSX Scalar as Integer Byte & Zero Indexed */ \
2081 V(lxsibzx, LXSIBZX, 0x7C00061A) \
2082 /* Load VSX Scalar as Integer Halfword & Zero Indexed */ \
2083 V(lxsihzx, LXSIHZX, 0x7C00065A) \
2084 /* Load VSX Scalar as Integer Word and Zero Indexed */ \
2085 V(lxsiwzx, LXSIWZX, 0x7C000018) \
2086 /* Load VSX Scalar Single-Precision Indexed */ \
2087 V(lxsspx, LXSSPX, 0x7C000418) \
2088 /* Load VSR Vector Doubleword*2 Indexed */ \
2089 V(lxvd, LXVD, 0x7C000698) \
2090 /* Load VSX Vector Indexed */ \
2091 V(lxvx, LXVX, 0x7C000218) \
2092 /* Load VSR Vector Doubleword & Splat Indexed */ \
2093 V(lxvdsx, LXVDSX, 0x7C000298) \
2094 /* Load VSR Vector Word*4 Indexed */ \
2095 V(lxvw, LXVW, 0x7C000618) \
2096 /* Move To VSR Doubleword */ \
2097 V(mtvsrd, MTVSRD, 0x7C000166) \
2098 /* Move To VSR Double Doubleword */ \
2099 V(mtvsrdd, MTVSRDD, 0x7C000366) \
2100 /* Move To VSR Word Algebraic */ \
2101 V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
2102 /* Move To VSR Word and Zero */ \
2103 V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
2104 /* Move From VSR Doubleword */ \
2105 V(mfvsrd, MFVSRD, 0x7C000066) \
2106 /* Move From VSR Word and Zero */ \
2107 V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
2108 /* Store VSR Scalar Doubleword Indexed */ \
2109 V(stxsdx, STXSDX, 0x7C000598) \
2110 /* Store VSX Scalar as Integer Word Indexed */ \
2111 V(stxsiwx, STXSIWX, 0x7C000118) \
2112 /* Store VSX Scalar as Integer Halfword Indexed */ \
2113 V(stxsihx, STXSIHX, 0x7C00075A) \
2114 /* Store VSX Scalar as Integer Byte Indexed */ \
2115 V(stxsibx, STXSIBX, 0x7C00071A) \
2116 /* Store VSR Scalar Word Indexed */ \
2117 V(stxsspx, STXSSPX, 0x7C000518) \
2118 /* Store VSR Vector Doubleword*2 Indexed */ \
2119 V(stxvd, STXVD, 0x7C000798) \
2120 /* Store VSX Vector Indexed */ \
2121 V(stxvx, STXVX, 0x7C000318) \
2122 /* Store VSR Vector Word*4 Indexed */ \
2123 V(stxvw, STXVW, 0x7C000718)
2124
2125#define PPC_B_OPCODE_LIST(V) \
2126 /* Branch Conditional */ \
2127 V(bc, BCX, 0x40000000)
2128
2129#define PPC_XO_OPCODE_LIST(V) \
2130 /* Divide Doubleword */ \
2131 V(divd, DIVD, 0x7C0003D2) \
2132 /* Divide Doubleword Extended */ \
2133 V(divde, DIVDE, 0x7C000352) \
2134 /* Divide Doubleword Extended & record OV */ \
2135 V(divdeo, DIVDEO, 0x7C000752) \
2136 /* Divide Doubleword Extended Unsigned */ \
2137 V(divdeu, DIVDEU, 0x7C000312) \
2138 /* Divide Doubleword Extended Unsigned & record OV */ \
2139 V(divdeuo, DIVDEUO, 0x7C000712) \
2140 /* Divide Doubleword & record OV */ \
2141 V(divdo, DIVDO, 0x7C0007D2) \
2142 /* Divide Doubleword Unsigned */ \
2143 V(divdu, DIVDU, 0x7C000392) \
2144 /* Divide Doubleword Unsigned & record OV */ \
2145 V(divduo, DIVDUO, 0x7C000792) \
2146 /* Multiply High Doubleword */ \
2147 V(mulhd, MULHD, 0x7C000092) \
2148 /* Multiply High Doubleword Unsigned */ \
2149 V(mulhdu, MULHDU, 0x7C000012) \
2150 /* Multiply Low Doubleword */ \
2151 V(mulld, MULLD, 0x7C0001D2) \
2152 /* Multiply Low Doubleword & record OV */ \
2153 V(mulldo, MULLDO, 0x7C0005D2) \
2154 /* Add */ \
2155 V(add, ADDX, 0x7C000214) \
2156 /* Add Carrying */ \
2157 V(addc, ADDCX, 0x7C000014) \
2158 /* Add Carrying & record OV */ \
2159 V(addco, ADDCO, 0x7C000414) \
2160 /* Add Extended */ \
2161 V(adde, ADDEX, 0x7C000114) \
2162 /* Add Extended & record OV & record OV */ \
2163 V(addeo, ADDEO, 0x7C000514) \
2164 /* Add to Minus One Extended */ \
2165 V(addme, ADDME, 0x7C0001D4) \
2166 /* Add to Minus One Extended & record OV */ \
2167 V(addmeo, ADDMEO, 0x7C0005D4) \
2168 /* Add & record OV */ \
2169 V(addo, ADDO, 0x7C000614) \
2170 /* Add to Zero Extended */ \
2171 V(addze, ADDZEX, 0x7C000194) \
2172 /* Add to Zero Extended & record OV */ \
2173 V(addzeo, ADDZEO, 0x7C000594) \
2174 /* Divide Word Format */ \
2175 V(divw, DIVW, 0x7C0003D6) \
2176 /* Divide Word Extended */ \
2177 V(divwe, DIVWE, 0x7C000356) \
2178 /* Divide Word Extended & record OV */ \
2179 V(divweo, DIVWEO, 0x7C000756) \
2180 /* Divide Word Extended Unsigned */ \
2181 V(divweu, DIVWEU, 0x7C000316) \
2182 /* Divide Word Extended Unsigned & record OV */ \
2183 V(divweuo, DIVWEUO, 0x7C000716) \
2184 /* Divide Word & record OV */ \
2185 V(divwo, DIVWO, 0x7C0007D6) \
2186 /* Divide Word Unsigned */ \
2187 V(divwu, DIVWU, 0x7C000396) \
2188 /* Divide Word Unsigned & record OV */ \
2189 V(divwuo, DIVWUO, 0x7C000796) \
2190 /* Multiply High Word */ \
2191 V(mulhw, MULHWX, 0x7C000096) \
2192 /* Multiply High Word Unsigned */ \
2193 V(mulhwu, MULHWUX, 0x7C000016) \
2194 /* Multiply Low Word */ \
2195 V(mullw, MULLW, 0x7C0001D6) \
2196 /* Multiply Low Word & record OV */ \
2197 V(mullwo, MULLWO, 0x7C0005D6) \
2198 /* Negate */ \
2199 V(neg, NEGX, 0x7C0000D0) \
2200 /* Negate & record OV */ \
2201 V(nego, NEGO, 0x7C0004D0) \
2202 /* Subtract From */ \
2203 V(subf, SUBFX, 0x7C000050) \
2204 /* Subtract From Carrying */ \
2205 V(subfc, SUBFCX, 0x7C000010) \
2206 /* Subtract From Carrying & record OV */ \
2207 V(subfco, SUBFCO, 0x7C000410) \
2208 /* Subtract From Extended */ \
2209 V(subfe, SUBFEX, 0x7C000110) \
2210 /* Subtract From Extended & record OV */ \
2211 V(subfeo, SUBFEO, 0x7C000510) \
2212 /* Subtract From Minus One Extended */ \
2213 V(subfme, SUBFME, 0x7C0001D0) \
2214 /* Subtract From Minus One Extended & record OV */ \
2215 V(subfmeo, SUBFMEO, 0x7C0005D0) \
2216 /* Subtract From & record OV */ \
2217 V(subfo, SUBFO, 0x7C000450) \
2218 /* Subtract From Zero Extended */ \
2219 V(subfze, SUBFZE, 0x7C000190) \
2220 /* Subtract From Zero Extended & record OV */ \
2221 V(subfzeo, SUBFZEO, 0x7C000590) \
2222 /* Add and Generate Sixes */ \
2223 V(addg, ADDG, 0x7C000094) \
2224 /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2225 V(macchw, MACCHW, 0x10000158) \
2226 /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2227 V(macchws, MACCHWS, 0x100001D8) \
2228 /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
2229 V(macchwsu, MACCHWSU, 0x10000198) \
2230 /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
2231 V(macchwu, MACCHWU, 0x10000118) \
2232 /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
2233 V(machhw, MACHHW, 0x10000058) \
2234 /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
2235 V(machhws, MACHHWS, 0x100000D8) \
2236 /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
2237 V(machhwsu, MACHHWSU, 0x10000098) \
2238 /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
2239 V(machhwu, MACHHWU, 0x10000018) \
2240 /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2241 V(maclhw, MACLHW, 0x10000358) \
2242 /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2243 V(maclhws, MACLHWS, 0x100003D8) \
2244 /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
2245 V(maclhwsu, MACLHWSU, 0x10000398) \
2246 /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
2247 V(maclhwu, MACLHWU, 0x10000318) \
2248 /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2249 V(nmacchw, NMACCHW, 0x1000015C) \
2250 /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2251 V(nmacchws, NMACCHWS, 0x100001DC) \
2252 /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
2253 V(nmachhw, NMACHHW, 0x1000005C) \
2254 /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
2255 V(nmachhws, NMACHHWS, 0x100000DC) \
2256 /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2257 V(nmaclhw, NMACLHW, 0x1000035C) \
2258 /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2259 V(nmaclhws, NMACLHWS, 0x100003DC)
2260
2261#define PPC_XL_OPCODE_LIST(V) \
2262 /* Branch Conditional to Count Register */ \
2263 V(bcctr, BCCTRX, 0x4C000420) \
2264 /* Branch Conditional to Link Register */ \
2265 V(bclr, BCLRX, 0x4C000020) \
2266 /* Condition Register AND */ \
2267 V(crand, CRAND, 0x4C000202) \
2268 /* Condition Register AND with Complement */ \
2269 V(crandc, CRANDC, 0x4C000102) \
2270 /* Condition Register Equivalent */ \
2271 V(creqv, CREQV, 0x4C000242) \
2272 /* Condition Register NAND */ \
2273 V(crnand, CRNAND, 0x4C0001C2) \
2274 /* Condition Register NOR */ \
2275 V(crnor, CRNOR, 0x4C000042) \
2276 /* Condition Register OR */ \
2277 V(cror, CROR, 0x4C000382) \
2278 /* Condition Register OR with Complement */ \
2279 V(crorc, CRORC, 0x4C000342) \
2280 /* Condition Register XOR */ \
2281 V(crxor, CRXOR, 0x4C000182) \
2282 /* Instruction Synchronize */ \
2283 V(isync, ISYNC, 0x4C00012C) \
2284 /* Move Condition Register Field */ \
2285 V(mcrf, MCRF, 0x4C000000) \
2286 /* Return From Critical Interrupt */ \
2287 V(rfci, RFCI, 0x4C000066) \
2288 /* Return From Interrupt */ \
2289 V(rfi, RFI, 0x4C000064) \
2290 /* Return From Machine Check Interrupt */ \
2291 V(rfmci, RFMCI, 0x4C00004C) \
2292 /* Embedded Hypervisor Privilege */ \
2293 V(ehpriv, EHPRIV, 0x7C00021C) \
2294 /* Return From Guest Interrupt */ \
2295 V(rfgi, RFGI, 0x4C0000CC) \
2296 /* Doze */ \
2297 V(doze, DOZE, 0x4C000324) \
2298 /* Return From Interrupt Doubleword Hypervisor */ \
2299 V(hrfid, HRFID, 0x4C000224) \
2300 /* Nap */ \
2301 V(nap, NAP, 0x4C000364) \
2302 /* Return from Event Based Branch */ \
2303 V(rfebb, RFEBB, 0x4C000124) \
2304 /* Return from Interrupt Doubleword */ \
2305 V(rfid, RFID, 0x4C000024) \
2306 /* Rip Van Winkle */ \
2307 V(rvwinkle, RVWINKLE, 0x4C0003E4) \
2308 /* Sleep */ \
2309 V(sleep, SLEEP, 0x4C0003A4)
2310
2311#define PPC_XX4_OPCODE_LIST(V) \
2312 /* VSX Select */ \
2313 V(xxsel, XXSEL, 0xF0000030)
2314
2315#define PPC_I_OPCODE_LIST(V) \
2316 /* Branch */ \
2317 V(b, BX, 0x48000000)
2318
2319#define PPC_M_OPCODE_LIST(V) \
2320 /* Rotate Left Word Immediate then Mask Insert */ \
2321 V(rlwimi, RLWIMIX, 0x50000000) \
2322 /* Rotate Left Word Immediate then AND with Mask */ \
2323 V(rlwinm, RLWINMX, 0x54000000) \
2324 /* Rotate Left Word then AND with Mask */ \
2325 V(rlwnm, RLWNMX, 0x5C000000)
2326
2327#define PPC_VX_OPCODE_A_FORM_LIST(V) \
2328 /* Vector Splat Byte */ \
2329 V(vspltb, VSPLTB, 0x1000020C) \
2330 /* Vector Splat Word */ \
2331 V(vspltw, VSPLTW, 0x1000028C) \
2332 /* Vector Splat Halfword */ \
2333 V(vsplth, VSPLTH, 0x1000024C) \
2334 /* Vector Extract Unsigned Byte */ \
2335 V(vextractub, VEXTRACTUB, 0x1000020D) \
2336 /* Vector Extract Unsigned Halfword */ \
2337 V(vextractuh, VEXTRACTUH, 0x1000024D) \
2338 /* Vector Extract Unsigned Word */ \
2339 V(vextractuw, VEXTRACTUW, 0x1000028D) \
2340 /* Vector Extract Doubleword */ \
2341 V(vextractd, VEXTRACTD, 0x100002CD) \
2342 /* Vector Insert Byte */ \
2343 V(vinsertb, VINSERTB, 0x1000030D) \
2344 /* Vector Insert Halfword */ \
2345 V(vinserth, VINSERTH, 0x1000034D) \
2346 /* Vector Insert Word */ \
2347 V(vinsertw, VINSERTW, 0x1000038D) \
2348 /* Vector Insert Doubleword */ \
2349 V(vinsertd, VINSERTD, 0x100003CD)
2350
2351#define PPC_VX_OPCODE_B_FORM_LIST(V) \
2352 /* Vector Logical OR */ \
2353 V(vor, VOR, 0x10000484) \
2354 /* Vector Logical XOR */ \
2355 V(vxor, VXOR, 0x100004C4) \
2356 /* Vector Logical NOR */ \
2357 V(vnor, VNOR, 0x10000504) \
2358 /* Vector Shift Right by Octet */ \
2359 V(vsro, VSRO, 0x1000044C) \
2360 /* Vector Shift Left by Octet */ \
2361 V(vslo, VSLO, 0x1000040C) \
2362 /* Vector Add Unsigned Doubleword Modulo */ \
2363 V(vaddudm, VADDUDM, 0x100000C0) \
2364 /* Vector Add Unsigned Word Modulo */ \
2365 V(vadduwm, VADDUWM, 0x10000080) \
2366 /* Vector Add Unsigned Halfword Modulo */ \
2367 V(vadduhm, VADDUHM, 0x10000040) \
2368 /* Vector Add Unsigned Byte Modulo */ \
2369 V(vaddubm, VADDUBM, 0x10000000) \
2370 /* Vector Add Single-Precision */ \
2371 V(vaddfp, VADDFP, 0x1000000A) \
2372 /* Vector Subtract Single-Precision */ \
2373 V(vsubfp, VSUBFP, 0x1000004A) \
2374 /* Vector Subtract Unsigned Doubleword Modulo */ \
2375 V(vsubudm, VSUBUDM, 0x100004C0) \
2376 /* Vector Subtract Unsigned Word Modulo */ \
2377 V(vsubuwm, VSUBUWM, 0x10000480) \
2378 /* Vector Subtract Unsigned Halfword Modulo */ \
2379 V(vsubuhm, VSUBUHM, 0x10000440) \
2380 /* Vector Subtract Unsigned Byte Modulo */ \
2381 V(vsububm, VSUBUBM, 0x10000400) \
2382 /* Vector Multiply Unsigned Word Modulo */ \
2383 V(vmuluwm, VMULUWM, 0x10000089) \
2384 /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
2385 V(vpkuhum, VPKUHUM, 0x1000000E) \
2386 /* Vector Multiply Even Signed Byte */ \
2387 V(vmulesb, VMULESB, 0x10000308) \
2388 /* Vector Multiply Even Unsigned Byte */ \
2389 V(vmuleub, VMULEUB, 0x10000208) \
2390 /* Vector Multiply Odd Signed Byte */ \
2391 V(vmulosb, VMULOSB, 0x10000108) \
2392 /* Vector Multiply Odd Unsigned Byte */ \
2393 V(vmuloub, VMULOUB, 0x10000008) \
2394 /* Vector Multiply Even Unsigned Halfword */ \
2395 V(vmuleuh, VMULEUH, 0x10000248) \
2396 /* Vector Multiply Even Signed Halfword */ \
2397 V(vmulesh, VMULESH, 0x10000348) \
2398 /* Vector Multiply Odd Unsigned Halfword */ \
2399 V(vmulouh, VMULOUH, 0x10000048) \
2400 /* Vector Multiply Odd Signed Halfword */ \
2401 V(vmulosh, VMULOSH, 0x10000148) \
2402 /* Vector Multiply Even Signed Word */ \
2403 V(vmulesw, VMULESW, 0x10000388) \
2404 /* Vector Multiply Even Unsigned Word */ \
2405 V(vmuleuw, VMULEUW, 0x10000288) \
2406 /* Vector Multiply Odd Signed Word */ \
2407 V(vmulosw, VMULOSW, 0x10000188) \
2408 /* Vector Multiply Odd Unsigned Word */ \
2409 V(vmulouw, VMULOUW, 0x10000088) \
2410 /* Vector Multiply Low Doubleword */ \
2411 V(vmulld, VMULLD, 0x100001C9) \
2412 /* Vector Sum across Quarter Signed Halfword Saturate */ \
2413 V(vsum4shs, VSUM4SHS, 0x10000648) \
2414 /* Vector Pack Unsigned Word Unsigned Saturate */ \
2415 V(vpkuwus, VPKUWUS, 0x100000CE) \
2416 /* Vector Sum across Half Signed Word Saturate */ \
2417 V(vsum2sws, VSUM2SWS, 0x10000688) \
2418 /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
2419 V(vpkudum, VPKUDUM, 0x1000044E) \
2420 /* Vector Maximum Signed Byte */ \
2421 V(vmaxsb, VMAXSB, 0x10000102) \
2422 /* Vector Maximum Unsigned Byte */ \
2423 V(vmaxub, VMAXUB, 0x10000002) \
2424 /* Vector Maximum Signed Doubleword */ \
2425 V(vmaxsd, VMAXSD, 0x100001C2) \
2426 /* Vector Maximum Unsigned Doubleword */ \
2427 V(vmaxud, VMAXUD, 0x100000C2) \
2428 /* Vector Maximum Signed Halfword */ \
2429 V(vmaxsh, VMAXSH, 0x10000142) \
2430 /* Vector Maximum Unsigned Halfword */ \
2431 V(vmaxuh, VMAXUH, 0x10000042) \
2432 /* Vector Maximum Signed Word */ \
2433 V(vmaxsw, VMAXSW, 0x10000182) \
2434 /* Vector Maximum Unsigned Word */ \
2435 V(vmaxuw, VMAXUW, 0x10000082) \
2436 /* Vector Minimum Signed Byte */ \
2437 V(vminsb, VMINSB, 0x10000302) \
2438 /* Vector Minimum Unsigned Byte */ \
2439 V(vminub, VMINUB, 0x10000202) \
2440 /* Vector Minimum Signed Doubleword */ \
2441 V(vminsd, VMINSD, 0x100003C2) \
2442 /* Vector Minimum Unsigned Doubleword */ \
2443 V(vminud, VMINUD, 0x100002C2) \
2444 /* Vector Minimum Signed Halfword */ \
2445 V(vminsh, VMINSH, 0x10000342) \
2446 /* Vector Minimum Unsigned Halfword */ \
2447 V(vminuh, VMINUH, 0x10000242) \
2448 /* Vector Minimum Signed Word */ \
2449 V(vminsw, VMINSW, 0x10000382) \
2450 /* Vector Minimum Unsigned Word */ \
2451 V(vminuw, VMINUW, 0x10000282) \
2452 /* Vector Shift Left Byte */ \
2453 V(vslb, VSLB, 0x10000104) \
2454 /* Vector Shift Left Word */ \
2455 V(vslw, VSLW, 0x10000184) \
2456 /* Vector Shift Left Halfword */ \
2457 V(vslh, VSLH, 0x10000144) \
2458 /* Vector Shift Left Doubleword */ \
2459 V(vsld, VSLD, 0x100005C4) \
2460 /* Vector Shift Right Byte */ \
2461 V(vsrb, VSRB, 0x10000204) \
2462 /* Vector Shift Right Word */ \
2463 V(vsrw, VSRW, 0x10000284) \
2464 /* Vector Shift Right Halfword */ \
2465 V(vsrh, VSRH, 0x10000244) \
2466 /* Vector Shift Right Doubleword */ \
2467 V(vsrd, VSRD, 0x100006C4) \
2468 /* Vector Shift Right Algebraic Byte */ \
2469 V(vsrab, VSRAB, 0x10000304) \
2470 /* Vector Shift Right Algebraic Word */ \
2471 V(vsraw, VSRAW, 0x10000384) \
2472 /* Vector Shift Right Algebraic Halfword */ \
2473 V(vsrah, VSRAH, 0x10000344) \
2474 /* Vector Shift Right Algebraic Doubleword */ \
2475 V(vsrad, VSRAD, 0x100003C4) \
2476 /* Vector Logical AND */ \
2477 V(vand, VAND, 0x10000404) \
2478 /* Vector Pack Signed Word Signed Saturate */ \
2479 V(vpkswss, VPKSWSS, 0x100001CE) \
2480 /* Vector Pack Signed Word Unsigned Saturate */ \
2481 V(vpkswus, VPKSWUS, 0x1000014E) \
2482 /* Vector Pack Signed Halfword Signed Saturate */ \
2483 V(vpkshss, VPKSHSS, 0x1000018E) \
2484 /* Vector Pack Signed Halfword Unsigned Saturate */ \
2485 V(vpkshus, VPKSHUS, 0x1000010E) \
2486 /* Vector Add Signed Halfword Saturate */ \
2487 V(vaddshs, VADDSHS, 0x10000340) \
2488 /* Vector Subtract Signed Halfword Saturate */ \
2489 V(vsubshs, VSUBSHS, 0x10000740) \
2490 /* Vector Add Unsigned Halfword Saturate */ \
2491 V(vadduhs, VADDUHS, 0x10000240) \
2492 /* Vector Subtract Unsigned Halfword Saturate */ \
2493 V(vsubuhs, VSUBUHS, 0x10000640) \
2494 /* Vector Add Signed Byte Saturate */ \
2495 V(vaddsbs, VADDSBS, 0x10000300) \
2496 /* Vector Subtract Signed Byte Saturate */ \
2497 V(vsubsbs, VSUBSBS, 0x10000700) \
2498 /* Vector Add Unsigned Byte Saturate */ \
2499 V(vaddubs, VADDUBS, 0x10000200) \
2500 /* Vector Subtract Unsigned Byte Saturate */ \
2501 V(vsububs, VSUBUBS, 0x10000600) \
2502 /* Vector Average Unsigned Byte */ \
2503 V(vavgub, VAVGUB, 0x10000402) \
2504 /* Vector Average Unsigned Halfword */ \
2505 V(vavguh, VAVGUH, 0x10000442) \
2506 /* Vector Logical AND with Complement */ \
2507 V(vandc, VANDC, 0x10000444) \
2508 /* Vector Minimum Single-Precision */ \
2509 V(vminfp, VMINFP, 0x1000044A) \
2510 /* Vector Maximum Single-Precision */ \
2511 V(vmaxfp, VMAXFP, 0x1000040A) \
2512 /* Vector Bit Permute Quadword */ \
2513 V(vbpermq, VBPERMQ, 0x1000054C) \
2514 /* Vector Merge High Byte */ \
2515 V(vmrghb, VMRGHB, 0x1000000C) \
2516 /* Vector Merge High Halfword */ \
2517 V(vmrghh, VMRGHH, 0x1000004C) \
2518 /* Vector Merge High Word */ \
2519 V(vmrghw, VMRGHW, 0x1000008C) \
2520 /* Vector Merge Low Byte */ \
2521 V(vmrglb, VMRGLB, 0x1000010C) \
2522 /* Vector Merge Low Halfword */ \
2523 V(vmrglh, VMRGLH, 0x1000014C) \
2524 /* Vector Merge Low Word */ \
2525 V(vmrglw, VMRGLW, 0x1000018C)
2526
2527#define PPC_VX_OPCODE_C_FORM_LIST(V) \
2528 /* Vector Unpack Low Signed Word */ \
2529 V(vupklsw, VUPKLSW, 0x100006CE) \
2530 /* Vector Unpack High Signed Word */ \
2531 V(vupkhsw, VUPKHSW, 0x1000064E) \
2532 /* Vector Unpack Low Signed Halfword */ \
2533 V(vupklsh, VUPKLSH, 0x100002CE) \
2534 /* Vector Unpack High Signed Halfword */ \
2535 V(vupkhsh, VUPKHSH, 0x1000024E) \
2536 /* Vector Unpack Low Signed Byte */ \
2537 V(vupklsb, VUPKLSB, 0x1000028E) \
2538 /* Vector Unpack High Signed Byte */ \
2539 V(vupkhsb, VUPKHSB, 0x1000020E) \
2540 /* Vector Population Count Byte */ \
2541 V(vpopcntb, VPOPCNTB, 0x10000703)
2542
2543#define PPC_VX_OPCODE_D_FORM_LIST(V) \
2544 /* Vector Negate Word */ \
2545 V(vnegw, VNEGW, 0x10060602) \
2546 /* Vector Negate Doubleword */ \
2547 V(vnegd, VNEGD, 0x10070602)
2548
2549#define PPC_VX_OPCODE_E_FORM_LIST(V) \
2550 /* Vector Splat Immediate Signed Byte */ \
2551 V(vspltisb, VSPLTISB, 0x1000030C) \
2552 /* Vector Splat Immediate Signed Halfword */ \
2553 V(vspltish, VSPLTISH, 0x1000034C) \
2554 /* Vector Splat Immediate Signed Word */ \
2555 V(vspltisw, VSPLTISW, 0x1000038C)
2556
2557#define PPC_VX_OPCODE_F_FORM_LIST(V) \
2558 /* Vector Extract Byte Mask */ \
2559 V(vextractbm, VEXTRACTBM, 0x10080642) \
2560 /* Vector Extract Halfword Mask */ \
2561 V(vextracthm, VEXTRACTHM, 0x10090642) \
2562 /* Vector Extract Word Mask */ \
2563 V(vextractwm, VEXTRACTWM, 0x100A0642) \
2564 /* Vector Extract Doubleword Mask */ \
2565 V(vextractdm, VEXTRACTDM, 0x100B0642)
2566
2567#define PPC_VX_OPCODE_G_FORM_LIST(V) \
2568 /* Vector Insert Word from GPR using \
2569immediate-specified index */ \
2570 V(vinsw, VINSW, 0x100000CF) \
2571 /* Vector Insert Doubleword from GPR using \
2572immediate-specified index */ \
2573 V(vinsd, VINSD, 0x100001CF)
2574
2575#define PPC_VX_OPCODE_UNUSED_LIST(V) \
2576 /* Decimal Add Modulo */ \
2577 V(bcdadd, BCDADD, 0xF0000400) \
2578 /* Decimal Subtract Modulo */ \
2579 V(bcdsub, BCDSUB, 0xF0000440) \
2580 /* Move From Vector Status and Control Register */ \
2581 V(mfvscr, MFVSCR, 0x10000604) \
2582 /* Move To Vector Status and Control Register */ \
2583 V(mtvscr, MTVSCR, 0x10000644) \
2584 /* Vector Add & write Carry Unsigned Quadword */ \
2585 V(vaddcuq, VADDCUQ, 0x10000140) \
2586 /* Vector Add and Write Carry-Out Unsigned Word */ \
2587 V(vaddcuw, VADDCUW, 0x10000180) \
2588 /* Vector Add Signed Word Saturate */ \
2589 V(vaddsws, VADDSWS, 0x10000380) \
2590 /* Vector Add Unsigned Quadword Modulo */ \
2591 V(vadduqm, VADDUQM, 0x10000100) \
2592 /* Vector Add Unsigned Word Saturate */ \
2593 V(vadduws, VADDUWS, 0x10000280) \
2594 /* Vector Average Signed Byte */ \
2595 V(vavgsb, VAVGSB, 0x10000502) \
2596 /* Vector Average Signed Halfword */ \
2597 V(vavgsh, VAVGSH, 0x10000542) \
2598 /* Vector Average Signed Word */ \
2599 V(vavgsw, VAVGSW, 0x10000582) \
2600 /* Vector Average Unsigned Word */ \
2601 V(vavguw, VAVGUW, 0x10000482) \
2602 /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
2603 V(vcfsx, VCFSX, 0x1000034A) \
2604 /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
2605 V(vcfux, VCFUX, 0x1000030A) \
2606 /* Vector Count Leading Zeros Byte */ \
2607 V(vclzb, VCLZB, 0x10000702) \
2608 /* Vector Count Leading Zeros Doubleword */ \
2609 V(vclzd, VCLZD, 0x100007C2) \
2610 /* Vector Count Leading Zeros Halfword */ \
2611 V(vclzh, VCLZH, 0x10000742) \
2612 /* Vector Count Leading Zeros Word */ \
2613 V(vclzw, VCLZW, 0x10000782) \
2614 /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
2615 /* Saturate */ \
2616 V(vctsxs, VCTSXS, 0x100003CA) \
2617 /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
2618 /* Saturate */ \
2619 V(vctuxs, VCTUXS, 0x1000038A) \
2620 /* Vector Equivalence */ \
2621 V(veqv, VEQV, 0x10000684) \
2622 /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
2623 V(vexptefp, VEXPTEFP, 0x1000018A) \
2624 /* Vector Gather Bits by Byte by Doubleword */ \
2625 V(vgbbd, VGBBD, 0x1000050C) \
2626 /* Vector Log Base 2 Estimate Single-Precision */ \
2627 V(vlogefp, VLOGEFP, 0x100001CA) \
2628 /* Vector NAND */ \
2629 V(vnand, VNAND, 0x10000584) \
2630 /* Vector OR with Complement */ \
2631 V(vorc, VORC, 0x10000544) \
2632 /* Vector Pack Pixel */ \
2633 V(vpkpx, VPKPX, 0x1000030E) \
2634 /* Vector Pack Signed Doubleword Signed Saturate */ \
2635 V(vpksdss, VPKSDSS, 0x100005CE) \
2636 /* Vector Pack Signed Doubleword Unsigned Saturate */ \
2637 V(vpksdus, VPKSDUS, 0x1000054E) \
2638 /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
2639 V(vpkudus, VPKUDUS, 0x100004CE) \
2640 /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
2641 V(vpkuhus, VPKUHUS, 0x1000008E) \
2642 /* Vector Pack Unsigned Word Unsigned Modulo */ \
2643 V(vpkuwum, VPKUWUM, 0x1000004E) \
2644 /* Vector Polynomial Multiply-Sum Byte */ \
2645 V(vpmsumb, VPMSUMB, 0x10000408) \
2646 /* Vector Polynomial Multiply-Sum Doubleword */ \
2647 V(vpmsumd, VPMSUMD, 0x100004C8) \
2648 /* Vector Polynomial Multiply-Sum Halfword */ \
2649 V(vpmsumh, VPMSUMH, 0x10000448) \
2650 /* Vector Polynomial Multiply-Sum Word */ \
2651 V(vpmsumw, VPMSUMW, 0x10000488) \
2652 /* Vector Population Count Doubleword */ \
2653 V(vpopcntd, VPOPCNTD, 0x100007C3) \
2654 /* Vector Population Count Halfword */ \
2655 V(vpopcnth, VPOPCNTH, 0x10000743) \
2656 /* Vector Population Count Word */ \
2657 V(vpopcntw, VPOPCNTW, 0x10000783) \
2658 /* Vector Reciprocal Estimate Single-Precision */ \
2659 V(vrefp, VREFP, 0x1000010A) \
2660 /* Vector Round to Single-Precision Integer toward -Infinity */ \
2661 V(vrfim, VRFIM, 0x100002CA) \
2662 /* Vector Round to Single-Precision Integer Nearest */ \
2663 V(vrfin, VRFIN, 0x1000020A) \
2664 /* Vector Round to Single-Precision Integer toward +Infinity */ \
2665 V(vrfip, VRFIP, 0x1000028A) \
2666 /* Vector Round to Single-Precision Integer toward Zero */ \
2667 V(vrfiz, VRFIZ, 0x1000024A) \
2668 /* Vector Rotate Left Byte */ \
2669 V(vrlb, VRLB, 0x10000004) \
2670 /* Vector Rotate Left Doubleword */ \
2671 V(vrld, VRLD, 0x100000C4) \
2672 /* Vector Rotate Left Halfword */ \
2673 V(vrlh, VRLH, 0x10000044) \
2674 /* Vector Rotate Left Word */ \
2675 V(vrlw, VRLW, 0x10000084) \
2676 /* Vector Reciprocal Square Root Estimate Single-Precision */ \
2677 V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
2678 /* Vector Shift Left */ \
2679 V(vsl, VSL, 0x100001C4) \
2680 /* Vector Shift Right */ \
2681 V(vsr, VSR, 0x100002C4) \
2682 /* Vector Subtract & write Carry Unsigned Quadword */ \
2683 V(vsubcuq, VSUBCUQ, 0x10000540) \
2684 /* Vector Subtract and Write Carry-Out Unsigned Word */ \
2685 V(vsubcuw, VSUBCUW, 0x10000580) \
2686 /* Vector Subtract Signed Word Saturate */ \
2687 V(vsubsws, VSUBSWS, 0x10000780) \
2688 /* Vector Subtract Unsigned Quadword Modulo */ \
2689 V(vsubuqm, VSUBUQM, 0x10000500) \
2690 /* Vector Subtract Unsigned Word Saturate */ \
2691 V(vsubuws, VSUBUWS, 0x10000680) \
2692 /* Vector Sum across Quarter Signed Byte Saturate */ \
2693 V(vsum4sbs, VSUM4SBS, 0x10000708) \
2694 /* Vector Sum across Quarter Unsigned Byte Saturate */ \
2695 V(vsum4bus, VSUM4BUS, 0x10000608) \
2696 /* Vector Sum across Signed Word Saturate */ \
2697 V(vsumsws, VSUMSWS, 0x10000788) \
2698 /* Vector Unpack High Pixel */ \
2699 V(vupkhpx, VUPKHPX, 0x1000034E) \
2700 /* Vector Unpack Low Pixel */ \
2701 V(vupklpx, VUPKLPX, 0x100003CE) \
2702 /* Vector AES Cipher */ \
2703 V(vcipher, VCIPHER, 0x10000508) \
2704 /* Vector AES Cipher Last */ \
2705 V(vcipherlast, VCIPHERLAST, 0x10000509) \
2706 /* Vector AES Inverse Cipher */ \
2707 V(vncipher, VNCIPHER, 0x10000548) \
2708 /* Vector AES Inverse Cipher Last */ \
2709 V(vncipherlast, VNCIPHERLAST, 0x10000549) \
2710 /* Vector AES S-Box */ \
2711 V(vsbox, VSBOX, 0x100005C8) \
2712 /* Vector SHA-512 Sigma Doubleword */ \
2713 V(vshasigmad, VSHASIGMAD, 0x100006C2) \
2714 /* Vector SHA-256 Sigma Word */ \
2715 V(vshasigmaw, VSHASIGMAW, 0x10000682) \
2716 /* Vector Merge Even Word */ \
2717 V(vmrgew, VMRGEW, 0x1000078C) \
2718 /* Vector Merge Odd Word */ \
2719 V(vmrgow, VMRGOW, 0x1000068C)
2720
2721#define PPC_VX_OPCODE_LIST(V) \
2722 PPC_VX_OPCODE_A_FORM_LIST(V) \
2723 PPC_VX_OPCODE_B_FORM_LIST(V) \
2724 PPC_VX_OPCODE_C_FORM_LIST(V) \
2725 PPC_VX_OPCODE_D_FORM_LIST(V) \
2726 PPC_VX_OPCODE_E_FORM_LIST(V) \
2727 PPC_VX_OPCODE_F_FORM_LIST(V) \
2728 PPC_VX_OPCODE_G_FORM_LIST(V) \
2729 PPC_VX_OPCODE_UNUSED_LIST(V)
2730
2731#define PPC_XS_OPCODE_LIST(V) \
2732 /* Shift Right Algebraic Doubleword Immediate */ \
2733 V(sradi, SRADIX, 0x7C000674)
2734
2735#define PPC_MD_OPCODE_LIST(V) \
2736 /* Rotate Left Doubleword Immediate then Clear */ \
2737 V(rldic, RLDIC, 0x78000008) \
2738 /* Rotate Left Doubleword Immediate then Clear Left */ \
2739 V(rldicl, RLDICL, 0x78000000) \
2740 /* Rotate Left Doubleword Immediate then Clear Right */ \
2741 V(rldicr, RLDICR, 0x78000004) \
2742 /* Rotate Left Doubleword Immediate then Mask Insert */ \
2743 V(rldimi, RLDIMI, 0x7800000C)
2744
2745#define PPC_SC_OPCODE_LIST(V) \
2746 /* System Call */ \
2747 V(sc, SC, 0x44000002)
2748
2749#define PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
2750 V(pload_store_8ls, PLOAD_STORE_8LS, 0x4000000) \
2751 V(pplwa, PPLWA, 0xA4000000) \
2752 V(ppld, PPLD, 0xE4000000) \
2753 V(ppstd, PPSTD, 0xF4000000)
2754
2755#define PPC_PREFIX_OPCODE_TYPE_10_LIST(V) \
2756 V(pload_store_mls, PLOAD_STORE_MLS, 0x6000000)
2757
2758#define PPC_OPCODE_LIST(V) \
2759 PPC_X_OPCODE_LIST(V) \
2760 PPC_X_OPCODE_EH_S_FORM_LIST(V) \
2761 PPC_XO_OPCODE_LIST(V) \
2762 PPC_DS_OPCODE_LIST(V) \
2763 PPC_DQ_OPCODE_LIST(V) \
2764 PPC_MDS_OPCODE_LIST(V) \
2765 PPC_MD_OPCODE_LIST(V) \
2766 PPC_XS_OPCODE_LIST(V) \
2767 PPC_D_OPCODE_LIST(V) \
2768 PPC_I_OPCODE_LIST(V) \
2769 PPC_B_OPCODE_LIST(V) \
2770 PPC_XL_OPCODE_LIST(V) \
2771 PPC_A_OPCODE_LIST(V) \
2772 PPC_XFX_OPCODE_LIST(V) \
2773 PPC_M_OPCODE_LIST(V) \
2774 PPC_SC_OPCODE_LIST(V) \
2775 PPC_Z23_OPCODE_LIST(V) \
2776 PPC_Z22_OPCODE_LIST(V) \
2777 PPC_EVX_OPCODE_LIST(V) \
2778 PPC_XFL_OPCODE_LIST(V) \
2779 PPC_EVS_OPCODE_LIST(V) \
2780 PPC_VX_OPCODE_LIST(V) \
2781 PPC_VA_OPCODE_LIST(V) \
2782 PPC_VC_OPCODE_LIST(V) \
2783 PPC_XX1_OPCODE_LIST(V) \
2784 PPC_XX2_OPCODE_LIST(V) \
2785 PPC_XX3_OPCODE_VECTOR_LIST(V) \
2786 PPC_XX3_OPCODE_SCALAR_LIST(V) \
2787 PPC_XX4_OPCODE_LIST(V) \
2788 PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
2789 PPC_PREFIX_OPCODE_TYPE_10_LIST(V)
2791enum Opcode : uint32_t {
2792#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
2793 opcode_name = opcode_value,
2795#undef DECLARE_INSTRUCTION
2796 EXTP = 0x4000000, // Extended code set prefixed
2797 EXT0 = 0x10000000, // Extended code set 0
2798 EXT1 = 0x4C000000, // Extended code set 1
2799 EXT2 = 0x7C000000, // Extended code set 2
2800 EXT3 = 0xEC000000, // Extended code set 3
2801 EXT4 = 0xFC000000, // Extended code set 4
2802 EXT5 = 0x78000000, // Extended code set 5 - 64bit only
2803 EXT6 = 0xF0000000, // Extended code set 6
2804};
2805
2806// Instruction encoding bits and masks.
2807enum {
2808 // Instruction encoding bit
2809 B1 = 1 << 1,
2810 B2 = 1 << 2,
2811 B3 = 1 << 3,
2812 B4 = 1 << 4,
2813 B5 = 1 << 5,
2814 B7 = 1 << 7,
2815 B8 = 1 << 8,
2816 B9 = 1 << 9,
2817 B12 = 1 << 12,
2818 B18 = 1 << 18,
2819 B19 = 1 << 19,
2820 B20 = 1 << 20,
2821 B22 = 1 << 22,
2822 B23 = 1 << 23,
2823 B24 = 1 << 24,
2824 B25 = 1 << 25,
2825 B26 = 1 << 26,
2826 B27 = 1 << 27,
2827 B28 = 1 << 28,
2828 B6 = 1 << 6,
2829 B10 = 1 << 10,
2830 B11 = 1 << 11,
2831 B16 = 1 << 16,
2832 B17 = 1 << 17,
2833 B21 = 1 << 21,
2834
2835 // Instruction bit masks
2836 kCondMask = 0x1F << 21,
2837 kOff12Mask = (1 << 12) - 1,
2838 kImm24Mask = (1 << 24) - 1,
2839 kOff16Mask = (1 << 16) - 1,
2840 kImm16Mask = (1 << 16) - 1,
2841 kImm18Mask = (1 << 18) - 1,
2842 kImm22Mask = (1 << 22) - 1,
2843 kImm26Mask = (1 << 26) - 1,
2844 kBOfieldMask = 0x1f << 21,
2845 kOpcodeMask = 0x3f << 26,
2846 kExt1OpcodeMask = 0x3ff << 1,
2847 kExt2OpcodeMask = 0x3ff << 1,
2850 kBOMask = 0x1f << 21,
2851 kBIMask = 0x1F << 16,
2852 kBDMask = 0x14 << 2,
2853 kAAMask = 0x01 << 1,
2854 kLKMask = 0x01,
2855 kRCMask = 0x01,
2856 kTOMask = 0x1f << 21
2857};
2858
2859// -----------------------------------------------------------------------------
2860// Addressing modes and instruction variants.
2862// Overflow Exception
2863enum OEBit {
2864 SetOE = 1 << 10, // Set overflow exception
2865 LeaveOE = 0 << 10 // No overflow exception
2866};
2868// Record bit
2869enum RCBit { // Bit 0
2870 SetRC = 1, // LT,GT,EQ,SO
2871 LeaveRC = 0 // None
2873// Exclusive Access hint bit
2874enum EHBit { // Bit 0
2875 SetEH = 1, // Exclusive Access
2876 LeaveEH = 0 // Atomic Update
2877};
2879// Link bit
2880enum LKBit { // Bit 0
2881 SetLK = 1, // Load effective address of next instruction
2882 LeaveLK = 0 // No action
2883};
2885// Prefixed R bit.
2886enum PRBit { SetPR = 1, LeavePR = 0 };
2888enum BOfield { // Bits 25-21
2889 DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
2890 DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
2891 BF = 4 << 21, // Branch if condition false
2892 DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
2893 DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
2894 BT = 12 << 21, // Branch if condition true
2895 DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
2896 DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
2897 BA = 20 << 21 // Branch always
2898};
2899
2900#if V8_OS_AIX
2901#undef CR_LT
2902#undef CR_GT
2903#undef CR_EQ
2904#undef CR_SO
2905#endif
2906
2907enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
2908
2909#define CRWIDTH 4
2911// These are the documented bit positions biased down by 32
2913 VXSOFT = 21, // 53: Software-Defined Condition
2914 VXSQRT = 22, // 54: Invalid Square Root
2915 VXCVI = 23 // 55: Invalid Integer Convert
2916};
2917
2918// -----------------------------------------------------------------------------
2919// Supervisor Call (svc) specific support.
2920
2921// Special Software Interrupt codes when used in the presence of the PPC
2922// simulator.
2923// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
2924// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
2926 // transition to C code
2927 kCallRtRedirected = 0x10,
2928 // break point
2929 kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
2930 // stop
2931 kStopCode = 1 << 23
2932};
2933const uint32_t kStopCodeMask = kStopCode - 1;
2934const uint32_t kMaxStopCode = kStopCode - 1;
2935const int32_t kDefaultStopCode = -1;
2937// FP rounding modes.
2938enum FPRoundingMode {
2939 RN = 0, // Round to Nearest.
2940 RZ = 1, // Round towards zero.
2941 RP = 2, // Round towards Plus Infinity.
2942 RM = 3, // Round towards Minus Infinity.
2943
2944 // Aliases.
2946 kRoundToZero = RZ,
2950
2956};
2957
2958// -----------------------------------------------------------------------------
2959// Specific instructions, constants, and masks.
2960// These constants are declared in assembler-arm.cc, as they use named registers
2961// and other constants.
2963// add(sp, sp, 4) instruction (aka Pop())
2964extern const Instr kPopInstruction;
2965
2966// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
2967// register r is not encoded.
2968extern const Instr kPushRegPattern;
2969
2970// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
2971// register r is not encoded.
2972extern const Instr kPopRegPattern;
2973
2974// use TWI to indicate redirection call for simulation mode
2975const Instr rtCallRedirInstr = TWI;
2976
2977// -----------------------------------------------------------------------------
2978// Instruction abstraction.
2979
2980// The class Instruction enables access to individual fields defined in the PPC
2981// architecture instruction set encoding.
2982// Note that the Assembler uses typedef int32_t Instr.
2983//
2984// Example: Test whether the instruction at ptr does set the condition code
2985// bits.
2986//
2987// bool InstructionSetsConditionCodes(uint8_t* ptr) {
2988// Instruction* instr = Instruction::At(ptr);
2989// int type = instr->TypeValue();
2990// return ((type == 0) || (type == 1)) && instr->HasS();
2991// }
2992//
2993
2994constexpr uint8_t kInstrSize = 4;
2995constexpr uint8_t kInstrSizeLog2 = 2;
2996constexpr uint8_t kPcLoadDelta = 8;
2997
2998class Instruction {
2999 public:
3000// Helper macro to define static accessors.
3001// We use the cast to char* trick to bypass the strict anti-aliasing rules.
3002#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
3003 static inline return_type Name(Instr instr) { \
3004 char* temp = reinterpret_cast<char*>(&instr); \
3005 return reinterpret_cast<Instruction*>(temp)->Name(); \
3007
3008#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
3010 // Get the raw instruction bits.
3011 inline Instr InstructionBits() const {
3012 return *reinterpret_cast<const Instr*>(this);
3013 }
3015 // Set the raw instruction bits to value.
3017 Instr value, WritableJitAllocation* jit_allocation = nullptr);
3019 // Read one particular bit out of the instruction bits.
3020 inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
3022 // Read a bit field's value out of the instruction bits.
3023 inline int Bits(int hi, int lo) const {
3024 return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
3025 }
3027 // Read a bit field out of the instruction bits.
3028 inline uint32_t BitField(int hi, int lo) const {
3029 return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
3030 }
3031
3032 // Static support.
3034 // Read one particular bit out of the instruction bits.
3035 static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
3037 // Read the value of a bit field out of the instruction bits.
3038 static inline int Bits(Instr instr, int hi, int lo) {
3039 return (instr >> lo) & ((2 << (hi - lo)) - 1);
3040 }
3042 // Read a bit field out of the instruction bits.
3043 static inline uint32_t BitField(Instr instr, int hi, int lo) {
3044 return instr & (((2 << (hi - lo)) - 1) << lo);
3047 inline int RSValue() const { return Bits(25, 21); }
3048 inline int RTValue() const { return Bits(25, 21); }
3049 inline int RAValue() const { return Bits(20, 16); }
3051 inline int RBValue() const { return Bits(15, 11); }
3053 inline int RCValue() const { return Bits(10, 6); }
3056 inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
3057 inline uint32_t OpcodeField() const {
3058 return static_cast<Opcode>(BitField(31, 26));
3059 }
3060 inline uint32_t PrefixOpcodeField() const {
3061 return static_cast<Opcode>(BitField(31, 25));
3063
3064#define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
3065
3066 inline Opcode OpcodeBase() const {
3067 uint32_t opcode = PrefixOpcodeField();
3068 uint32_t extcode = PrefixOpcodeField();
3069 // Check for prefix.
3070 switch (opcode) {
3073 return static_cast<Opcode>(opcode);
3074 }
3075 opcode = OpcodeField();
3076 extcode = OpcodeField();
3077 // Check for suffix.
3078 switch (opcode) {
3080 return static_cast<Opcode>(opcode);
3081 }
3082 switch (opcode) {
3087 return static_cast<Opcode>(opcode);
3088 }
3089 opcode = extcode | BitField(5, 0);
3090 switch (opcode) {
3092 return static_cast<Opcode>(opcode);
3093 }
3094 // Some VX opcodes have integers hard coded in the middle, handle those
3095 // first.
3096 opcode = extcode | BitField(20, 16) | BitField(10, 0);
3097 switch (opcode) {
3100 return static_cast<Opcode>(opcode);
3101 }
3102 opcode = extcode | BitField(10, 0);
3103 switch (opcode) {
3111 return static_cast<Opcode>(opcode);
3112 }
3113 opcode = extcode | BitField(9, 0);
3114 switch (opcode) {
3116 return static_cast<Opcode>(opcode);
3117 }
3118 opcode = extcode | BitField(10, 1) | BitField(20, 20);
3119 switch (opcode) {
3121 return static_cast<Opcode>(opcode);
3122 }
3123 // Some XX2 opcodes have integers hard coded in the middle, handle those
3124 // first.
3125 opcode = extcode | BitField(20, 16) | BitField(10, 2);
3126 switch (opcode) {
3128 return static_cast<Opcode>(opcode);
3129 }
3130 opcode = extcode | BitField(10, 2);
3131 switch (opcode) {
3135 return static_cast<Opcode>(opcode);
3136 }
3137 opcode = extcode | BitField(10, 1);
3138 switch (opcode) {
3144 return static_cast<Opcode>(opcode);
3145 }
3146 opcode = extcode | BitField(9, 1);
3147 switch (opcode) {
3150 return static_cast<Opcode>(opcode);
3151 }
3152 opcode = extcode | BitField(10, 2);
3153 switch (opcode) {
3155 return static_cast<Opcode>(opcode);
3156 }
3157 opcode = extcode | BitField(9, 3);
3158 switch (opcode) {
3160 return static_cast<Opcode>(opcode);
3161 }
3162 opcode = extcode | BitField(10, 3);
3163 switch (opcode) {
3167 return static_cast<Opcode>(opcode);
3168 }
3169 opcode = extcode | BitField(8, 1);
3170 switch (opcode) {
3172 return static_cast<Opcode>(opcode);
3173 }
3174 opcode = extcode | BitField(5, 1);
3175 switch (opcode) {
3177 return static_cast<Opcode>(opcode);
3178 }
3179 opcode = extcode | BitField(4, 1);
3180 switch (opcode) {
3182 return static_cast<Opcode>(opcode);
3183 }
3184 opcode = extcode | BitField(4, 2);
3185 switch (opcode) {
3187 return static_cast<Opcode>(opcode);
3188 }
3189 opcode = extcode | BitField(5, 4);
3190 switch (opcode) {
3192 return static_cast<Opcode>(opcode);
3193 }
3194 opcode = extcode | BitField(2, 0);
3195 switch (opcode) {
3197 return static_cast<Opcode>(opcode);
3198 }
3199 opcode = extcode | BitField(1, 0);
3200 switch (opcode) {
3202 return static_cast<Opcode>(opcode);
3203 }
3204 opcode = extcode | BitField(1, 1);
3205 switch (opcode) {
3207 return static_cast<Opcode>(opcode);
3208 }
3209 UNIMPLEMENTED();
3210 return static_cast<Opcode>(0);
3211 }
3212
3213#undef OPCODE_CASES
3215 // Fields used in Software interrupt instructions
3216 inline SoftwareInterruptCodes SvcValue() const {
3217 return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
3218 }
3219
3220 // Instructions are read of out a code stream. The only way to get a
3221 // reference to an instruction is to convert a pointer. There is no way
3222 // to allocate or create instances of class Instruction.
3223 // Use the At(pc) function to create references to Instruction.
3224 static Instruction* At(uint8_t* pc) {
3225 return reinterpret_cast<Instruction*>(pc);
3226 }
3227
3228 private:
3229 // We need to prevent the creation of instances of class Instruction.
3231};
3232
3233// Helper functions for converting between register numbers and names.
3234class Registers {
3235 public:
3236 // Lookup the register number for the name provided.
3237 static int Number(const char* name);
3238
3239 private:
3240 static const char* names_[kNumRegisters];
3241};
3242
3243// Helper functions for converting between FP register numbers and names.
3244class DoubleRegisters {
3245 public:
3246 // Lookup the register number for the name provided.
3247 static int Number(const char* name);
3249 private:
3250 static const char* names_[kNumDoubleRegisters];
3251};
3252} // namespace internal
3253} // namespace v8
3255static constexpr int kR0DwarfCode = 0;
3256static constexpr int kFpDwarfCode = 31; // frame-pointer
3257static constexpr int kLrDwarfCode = 65; // return-address(lr)
3258static constexpr int kSpDwarfCode = 1; // stack-pointer (sp)
3259
3260#endif // V8_CODEGEN_PPC_CONSTANTS_PPC_H_
static const char * names_[kNumDoubleRegisters]
int BitField(int hi, int lo) const
SoftwareInterruptCodes SvcValue() const
int Bits(int hi, int lo) const
uint32_t PrefixOpcodeField() const
V8_EXPORT_PRIVATE void SetInstructionBits(Instr value, WritableJitAllocation *jit_allocation=nullptr)
static Instruction * At(Address pc)
static const char * names_[kNumRegisters]
#define DECLARE_STATIC_ACCESSOR(Name)
#define PPC_VX_OPCODE_A_FORM_LIST(V)
#define PPC_PREFIX_OPCODE_TYPE_00_LIST(V)
#define PPC_DS_OPCODE_LIST(V)
#define PPC_XX1_OPCODE_LIST(V)
#define PPC_VX_OPCODE_G_FORM_LIST(V)
#define PPC_SC_OPCODE_LIST(V)
#define PPC_VA_OPCODE_LIST(V)
#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value)
static constexpr int kLrDwarfCode
#define PPC_VX_OPCODE_C_FORM_LIST(V)
#define PPC_VX_OPCODE_B_FORM_LIST(V)
#define PPC_VX_OPCODE_D_FORM_LIST(V)
#define PPC_EVS_OPCODE_LIST(V)
#define PPC_X_OPCODE_LIST(V)
#define PPC_I_OPCODE_LIST(V)
#define PPC_OPCODE_LIST(V)
static constexpr int kR0DwarfCode
static constexpr int kSpDwarfCode
#define PPC_XL_OPCODE_LIST(V)
#define PPC_XX3_OPCODE_SCALAR_LIST(V)
#define PPC_M_OPCODE_LIST(V)
#define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V)
#define PPC_XFX_OPCODE_LIST(V)
#define PPC_XX3_OPCODE_VECTOR_B_FORM_LIST(V)
#define PPC_XX4_OPCODE_LIST(V)
#define PPC_XX3_OPCODE_VECTOR_A_FORM_LIST(V)
#define PPC_VX_OPCODE_E_FORM_LIST(V)
#define PPC_VC_OPCODE_LIST(V)
static constexpr int kFpDwarfCode
#define PPC_D_OPCODE_LIST(V)
#define PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_X_OPCODE_EH_S_FORM_LIST(V)
#define PPC_VX_OPCODE_F_FORM_LIST(V)
#define PPC_XO_OPCODE_LIST(V)
#define PPC_B_OPCODE_LIST(V)
#define PPC_Z22_OPCODE_LIST(V)
#define PPC_DQ_OPCODE_LIST(V)
#define PPC_EVX_OPCODE_LIST(V)
#define PPC_Z23_OPCODE_LIST(V)
#define PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_XX2_OPCODE_B_FORM_LIST(V)
#define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V)
#define PPC_A_OPCODE_LIST(V)
#define PPC_PREFIX_OPCODE_TYPE_10_LIST(V)
#define OPCODE_CASES(name, opcode_name, opcode_value)
#define PPC_XFL_OPCODE_LIST(V)
#define PPC_MDS_OPCODE_LIST(V)
#define PPC_MD_OPCODE_LIST(V)
#define PPC_XS_OPCODE_LIST(V)
Instruction * instr
constexpr int B6
constexpr VFPRoundingMode kRoundToNearest
const Instr kPopInstruction
constexpr int B18
constexpr uint8_t kPcLoadDelta
constexpr int32_t kDefaultStopCode
constexpr VFPRoundingMode kRoundToMinusInf
constexpr int B21
constexpr VFPRoundingMode RP
constexpr int B10
constexpr int B25
constexpr int B7
constexpr int B17
constexpr int kOff12Mask
@ kDontCheckForInexactConversion
constexpr int B16
constexpr SoftwareInterruptCodes kStopCode
const Instr rtCallRedirInstr
constexpr uint32_t kMaxStopCode
const int kImm18Mask
const int kLoadPtrMaxReachBits
constexpr int B4
const int kNumDoubleRegisters
constexpr int B26
constexpr SoftwareInterruptCodes kBreakpoint
constexpr int kImm16Mask
constexpr int B8
Union< Smi, HeapNumber > Number
Definition globals.h:1181
constexpr VFPRoundingMode RM
constexpr uint8_t kInstrSizeLog2
constexpr int B24
constexpr int B28
constexpr VFPRoundingMode kRoundToPlusInf
constexpr SoftwareInterruptCodes kCallRtRedirected
constexpr int kImm24Mask
const uint32_t kFPRoundingModeMask
constexpr int kNoRegister
Condition NegateCondition(Condition cond)
constexpr VFPRoundingMode RZ
constexpr size_t kMaxPCRelativeCodeRangeInMB
constexpr int kHasFunctionDescriptorBitShift
constexpr int B19
constexpr int B9
constexpr int kHasFunctionDescriptorBitMask
constexpr VFPRoundingMode kRoundToZero
constexpr int B5
constexpr int kCondMask
constexpr int B12
constexpr VFPRoundingMode RN
constexpr int B23
constexpr uint8_t kInstrSize
const int kOpcodeMask
constexpr int kRootRegisterBias
const Instr kPushRegPattern
const Instr kPopRegPattern
constexpr int B27
const int kLoadDoubleMaxReachBits
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
constexpr uint32_t kStopCodeMask
constexpr int kNumRegisters
constexpr int B20
constexpr int B22
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition macros.h:130