v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instructions-arm64.h
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
6#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
7
8#include "src/base/memory.h"
13#include "src/common/globals.h"
14#include "src/utils/utils.h"
15
16namespace v8 {
17namespace internal {
18
19struct AssemblerOptions;
20class Zone;
21
22// ISA constants. --------------------------------------------------------------
23
24using Instr = uint32_t;
25
26#if defined(V8_OS_WIN)
27extern "C" {
28#endif
29
36
37// This value is a signalling NaN as both a double and as a float (taking the
38// least-significant word).
41
42// A similar value, but as a quiet NaN.
43V8_EXPORT_PRIVATE extern const double kFP64QuietNaN;
45
46// The default NaN values (for FPCR.DN=1).
50
51#if defined(V8_OS_WIN)
52} // end of extern "C"
53#endif
54
57
65
67
69 // The first four values are encodable directly by FPCR<RMode>.
70 FPTieEven = 0x0,
73 FPZero = 0x3,
74
75 // The final rounding modes are only available when explicitly specified by
76 // the instruction (such as with fcvta). They cannot be set in FPCR.
79};
80
82
83// Instructions. ---------------------------------------------------------------
84
85class Instruction {
86 public:
88 // Usually this is aligned, but when de/serializing that's not guaranteed.
89 return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
90 }
91
93 Instr new_instr, WritableJitAllocation* jit_allocation = nullptr);
94
95 int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
96
97 uint32_t Bits(int msb, int lsb) const {
98 return unsigned_bitextract_32(msb, lsb, InstructionBits());
99 }
100
101 int32_t SignedBits(int msb, int lsb) const {
102 // Usually this is aligned, but when de/serializing that's not guaranteed.
103 int32_t bits =
104 base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
105 return signed_bitextract_32(msb, lsb, bits);
106 }
107
108 Instr Mask(uint32_t mask) const { return InstructionBits() & mask; }
109
110 V8_INLINE const Instruction* following(int count = 1) const {
111 return InstructionAtOffset(count * static_cast<int>(kInstrSize));
112 }
113
115 return InstructionAtOffset(count * static_cast<int>(kInstrSize));
116 }
117
118 V8_INLINE const Instruction* preceding(int count = 1) const {
119 return following(-count);
120 }
121
123
124#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
125 int32_t Name() const { return Func(HighBit, LowBit); }
127#undef DEFINE_GETTER
128
129 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
130 // formed from ImmPCRelLo and ImmPCRelHi.
131 int ImmPCRel() const {
133 int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
134 ImmPCRelLo();
135 int width = ImmPCRelLo_width + ImmPCRelHi_width;
136 return signed_bitextract_32(width - 1, 0, offset);
137 }
138
139 uint64_t ImmLogical();
140 unsigned ImmNEONabcdefgh() const;
141 float ImmFP32();
142 double ImmFP64();
143 float ImmNEONFP32() const;
144 double ImmNEONFP64() const;
145
146 unsigned SizeLS() const {
147 return CalcLSDataSizeLog2(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
148 }
149
150 unsigned SizeLSPair() const {
151 return CalcLSPairDataSize(
152 static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
153 }
154
155 int NEONLSIndex(int access_size_shift) const {
156 int q = NEONQ();
157 int s = NEONS();
158 int size = NEONLSSize();
159 int index = (q << 3) | (s << 2) | size;
160 return index >> access_size_shift;
161 }
162
163 // Helpers.
167
171
172 bool IsCompareBranch() const {
174 }
175
177
178 bool IsImmBranch() const { return BranchType() != UnknownBranchType; }
179
180 static float Imm8ToFP32(uint32_t imm8) {
181 // Imm8: abcdefgh (8 bits)
182 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
183 // where B is b ^ 1
184 uint32_t bits = imm8;
185 uint32_t bit7 = (bits >> 7) & 0x1;
186 uint32_t bit6 = (bits >> 6) & 0x1;
187 uint32_t bit5_to_0 = bits & 0x3f;
188 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
189
191 }
192
193 static double Imm8ToFP64(uint32_t imm8) {
194 // Imm8: abcdefgh (8 bits)
195 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
196 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
197 // where B is b ^ 1
198 uint32_t bits = imm8;
199 uint64_t bit7 = (bits >> 7) & 0x1;
200 uint64_t bit6 = (bits >> 6) & 0x1;
201 uint64_t bit5_to_0 = bits & 0x3f;
202 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
203
205 }
206
207 bool IsLdrLiteral() const {
209 }
210
211 bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
212 bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; }
213
217
218 bool IsAdr() const { return Mask(PCRelAddressingMask) == ADR; }
219
220 bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
221
223 // Unresolved internal references are encoded as two consecutive brk
224 // instructions.
225 return IsBrk() && following()->IsBrk();
226 }
227
231
235
236 bool IsAddSubShifted() const {
238 }
239
240 bool IsAddSubExtended() const {
242 }
243
244 // Match any loads or stores, including pairs.
245 bool IsLoadOrStore() const {
247 }
248
249 // Match any loads, including pairs.
250 bool IsLoad() const;
251 // Match any stores, including pairs.
252 bool IsStore() const;
253
254 // Indicate whether Rd can be the stack pointer or the zero register. This
255 // does not check that the instruction actually has an Rd field.
257 // The following instructions use sp or wsp as Rd:
258 // Add/sub (immediate) when not setting the flags.
259 // Add/sub (extended) when not setting the flags.
260 // Logical (immediate) when not setting the flags.
261 // Otherwise, r31 is the zero register.
263 if (Mask(AddSubSetFlagsBit)) {
264 return Reg31IsZeroRegister;
265 } else {
266 return Reg31IsStackPointer;
267 }
268 }
269 if (IsLogicalImmediate()) {
270 // Of the logical (immediate) instructions, only ANDS (and its aliases)
271 // can set the flags. The others can all write into sp.
272 // Note that some logical operations are not available to
273 // immediate-operand instructions, so we have to combine two masks here.
275 return Reg31IsZeroRegister;
276 } else {
277 return Reg31IsStackPointer;
278 }
279 }
280 return Reg31IsZeroRegister;
281 }
282
283 // Indicate whether Rn can be the stack pointer or the zero register. This
284 // does not check that the instruction actually has an Rn field.
286 // The following instructions use sp or wsp as Rn:
287 // All loads and stores.
288 // Add/sub (immediate).
289 // Add/sub (extended).
290 // Otherwise, r31 is the zero register.
292 return Reg31IsStackPointer;
293 }
294 return Reg31IsZeroRegister;
295 }
296
298 if (IsCondBranchImm()) {
299 return CondBranchType;
300 } else if (IsUncondBranchImm()) {
301 return UncondBranchType;
302 } else if (IsCompareBranch()) {
303 return CompareBranchType;
304 } else if (IsTestBranch()) {
305 return TestBranchType;
306 } else {
307 return UnknownBranchType;
308 }
309 }
310
311 static constexpr int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
312 switch (branch_type) {
313 case UncondBranchType:
314 return ImmUncondBranch_width;
315 case CondBranchType:
316 return ImmCondBranch_width;
318 return ImmCmpBranch_width;
319 case TestBranchType:
320 return ImmTestBranch_width;
321 default:
322 UNREACHABLE();
323 }
324 }
325
326 // The range of the branch instruction, expressed as 'instr +- range'.
327 static constexpr int32_t ImmBranchRange(ImmBranchType branch_type) {
328 return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
330 }
331
332 int ImmBranch() const {
333 switch (BranchType()) {
334 case CondBranchType:
335 return ImmCondBranch();
336 case UncondBranchType:
337 return ImmUncondBranch();
339 return ImmCmpBranch();
340 case TestBranchType:
341 return ImmTestBranch();
342 default:
343 UNREACHABLE();
344 }
345 return 0;
346 }
347
350 // Unresolved references are encoded as two consecutive brk instructions.
351 // The associated immediate is made of the two 16-bit payloads.
352 int32_t high16 = ImmException();
353 int32_t low16 = following()->ImmException();
354 return (high16 << 16) | low16;
355 }
356
358 return Mask(UnconditionalBranchMask) == B;
359 }
360
361 bool IsBranchAndLink() const { return Mask(UnconditionalBranchMask) == BL; }
362
366
367 bool IsMovz() const {
368 return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
370 }
371
372 bool IsMovk() const {
373 return (Mask(MoveWideImmediateMask) == MOVK_x) ||
375 }
376
377 bool IsMovn() const {
378 return (Mask(MoveWideImmediateMask) == MOVN_x) ||
380 }
381
382 bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; }
383
384 bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; }
385
386 bool IsBti() const {
388 int imm_hint = ImmHint();
389 switch (imm_hint) {
390 case BTI:
391 case BTI_c:
392 case BTI_j:
393 case BTI_jc:
394 return true;
395 }
396 }
397 return false;
398 }
399
400 bool IsNop(int n) {
401 // A marking nop is an instruction
402 // mov r<n>, r<n>
403 // which is encoded as
404 // orr r<n>, xzr, r<n>
405 return (Mask(LogicalShiftedMask) == ORR_x) && (Rd() == Rm()) && (Rd() == n);
406 }
407
408 // Find the PC offset encoded in this instruction. 'this' may be a branch or
409 // a PC-relative addressing instruction.
410 // The offset returned is unscaled.
412
413 // Find the target of this instruction. 'this' may be a branch or a
414 // PC-relative addressing instruction.
416
417 // Check if the offset is in range of a given branch type. The offset is
418 // a byte offset, unscaled.
419 static constexpr bool IsValidImmPCOffset(ImmBranchType branch_type,
420 ptrdiff_t offset) {
422 return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type));
423 }
424
426 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
427 // a PC-relative addressing instruction.
429 Instruction* target);
431 AssemblerOptions options,
432 Instruction* target);
433 // Patch a literal load instruction to load from 'source'.
435
436 uintptr_t LiteralAddress() {
437 int offset = ImmLLiteral() * kLoadLiteralScale;
438 return reinterpret_cast<uintptr_t>(this) + offset;
439 }
440
442
444 int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
445 // The FUZZ_disasm test relies on no check being done.
447 return this + offset;
448 }
449
451 int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
452 // The FUZZ_disasm test relies on no check being done.
454 return this + offset;
455 }
456
457 template <typename T>
458 V8_INLINE static Instruction* Cast(T src) {
459 return reinterpret_cast<Instruction*>(src);
460 }
461
462 V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
463 return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
464 }
465
466 static const int ImmPCRelRangeBitwidth = 21;
467 static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
469 Instruction* target);
470
471 template <ImmBranchType branch_type>
473 WritableJitAllocation* jit_allocation = nullptr) {
475 DCHECK(IsValidImmPCOffset(branch_type, DistanceTo(target)));
476 int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
477 Instr branch_imm = 0;
478 uint32_t imm_mask = 0;
479 switch (branch_type) {
480 case CondBranchType:
482 static_assert(ImmCondBranch_mask == ImmCmpBranch_mask);
483 static_assert(ImmCondBranch_offset == ImmCmpBranch_offset);
484 // We use a checked truncation here to catch certain bugs where we fail
485 // to check whether a veneer is required. See e.g. crbug.com/1485829.
486 branch_imm = checked_truncate_to_int19(offset) << ImmCondBranch_offset;
487 imm_mask = ImmCondBranch_mask;
488 break;
489 case UncondBranchType:
490 branch_imm = checked_truncate_to_int26(offset)
491 << ImmUncondBranch_offset;
492 imm_mask = ImmUncondBranch_mask;
493 break;
494 case TestBranchType:
495 branch_imm = checked_truncate_to_int14(offset) << ImmTestBranch_offset;
496 imm_mask = ImmTestBranch_mask;
497 break;
498 default:
499 UNREACHABLE();
500 }
501 SetInstructionBits(Mask(~imm_mask) | branch_imm, jit_allocation);
502 }
503};
504
505// Simulator/Debugger debug instructions ---------------------------------------
506// Each debug marker is represented by a HLT instruction. The immediate comment
507// field in the instruction is used to identify the type of debug marker. Each
508// marker encodes arguments in a different way, as described below.
509
510// Indicate to the Debugger that the instruction is a redirected call.
512
513// Represent unreachable code. This is used as a guard in parts of the code that
514// should not be reachable, such as in data encoded inline in the instructions.
516
517// Indicate that the stack is being switched, so the simulator must update its
518// stack limit. The new stack limit is passed in x16.
520
521// A pseudo 'printf' instruction. The arguments will be passed to the platform
522// printf method.
524// Most parameters are stored in ARM64 registers as if the printf
525// pseudo-instruction was a call to the real printf method:
526// x0: The format string.
527// x1-x7: Optional arguments.
528// d0-d7: Optional arguments.
529//
530// Also, the argument layout is described inline in the instructions:
531// - arg_count: The number of arguments.
532// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
533//
534// Floating-point and integer arguments are passed in separate sets of registers
535// in AAPCS64 (even for varargs functions), so it is not possible to determine
536// the type of each argument without some information about the values that were
537// passed in. This information could be retrieved from the printf format string,
538// but the format string is not trivial to parse so we encode the relevant
539// information with the HLT instruction.
542const unsigned kPrintfLength = 3 * kInstrSize;
543
544const unsigned kPrintfMaxArgCount = 4;
545
546// The argument pattern is a set of two-bit-fields, each with one of the
547// following values:
551 // There is no kPrintfArgS because floats are always converted to doubles in C
552 // varargs calls.
553 kPrintfArgD = 3
555static const unsigned kPrintfArgPatternBits = 2;
556
557// A pseudo 'debug' instruction.
559// Parameters are inlined in the code after a debug pseudo-instruction:
560// - Debug code.
561// - Debug parameters.
562// - Debug message string. This is a nullptr-terminated ASCII string, padded to
563// kInstrSize so that subsequent instructions are correctly aligned.
564// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
565// string data.
566const unsigned kDebugCodeOffset = 1 * kInstrSize;
567const unsigned kDebugParamsOffset = 2 * kInstrSize;
568const unsigned kDebugMessageOffset = 3 * kInstrSize;
569
570// Debug parameters.
571// Used without a TRACE_ option, the Debugger will print the arguments only
572// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
573// before every instruction for the specified LOG_ parameters.
574//
575// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
576// others that were not specified.
577//
578// For example:
579//
580// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
581// will print the registers and fp registers only once.
582//
583// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
584// starts disassembling the code.
585//
586// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
587// adds the general purpose registers to the trace.
588//
589// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
590// stops tracing the registers.
591const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
594 BREAK = 1 << 0,
595 LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
596 LOG_REGS = 1 << 2, // Log general purpose registers.
597 LOG_VREGS = 1 << 3, // Log NEON and floating-point registers.
598 LOG_SYS_REGS = 1 << 4, // Log the status flags.
599 LOG_WRITE = 1 << 5, // Log any memory write.
600
604
605 // Trace control.
606 TRACE_ENABLE = 1 << 6,
608 TRACE_OVERRIDE = 3 << 6
610
613 NF_8B = 1,
615 NF_4H = 3,
616 NF_8H = 4,
617 NF_2S = 5,
618 NF_4S = 6,
619 NF_1D = 7,
620 NF_2D = 8,
621 NF_B = 9,
622 NF_H = 10,
623 NF_S = 11,
624 NF_D = 12
626
627static const unsigned kNEONFormatMaxBits = 6;
628
630 // The bit positions in the instruction to consider.
631 uint8_t bits[kNEONFormatMaxBits];
632
633 // Mapping from concatenated bits to format.
635};
636
638 public:
640
641 // Construct a format decoder with increasingly specific format maps for each
642 // substitution. If no format map is specified, the default is the integer
643 // format map.
647 const NEONFormatMap* format1);
649 const NEONFormatMap* format1, const NEONFormatMap* format2);
650
651 // Set the format mapping for all or individual substitutions.
652 void SetFormatMaps(const NEONFormatMap* format0,
653 const NEONFormatMap* format1 = nullptr,
654 const NEONFormatMap* format2 = nullptr);
655 void SetFormatMap(unsigned index, const NEONFormatMap* format);
656
657 // Substitute %s in the input string with the placeholder string for each
658 // register, ie. "'B", "'H", etc.
659 const char* SubstitutePlaceholders(const char* string);
660
661 // Substitute %s in the input string with a new string based on the
662 // substitution mode.
663 const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
666 SubstitutionMode mode3 = kFormat);
667
668 // Append a "2" to a mnemonic string based of the state of the Q bit.
669 const char* Mnemonic(const char* mnemonic);
670
671 VectorFormat GetVectorFormat(int format_index = 0);
673
674 // Built in mappings for common cases.
675
676 // The integer format map uses three bits (Q, size<1:0>) to encode the
677 // "standard" set of NEON integer vector formats.
679 static const NEONFormatMap map = {
680 {23, 22, 30},
682 return &map;
683 }
684
685 // The long integer format map uses two bits (size<1:0>) to encode the
686 // long set of NEON integer vector formats. These are used in narrow, wide
687 // and long operations.
689 static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
690 return &map;
691 }
692
693 // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
694 // formats: NF_2S, NF_4S, NF_2D.
695 static const NEONFormatMap* FPFormatMap() {
696 // The FP format map assumes two bits (Q, size<0>) are used to encode the
697 // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
698 static const NEONFormatMap map = {{22, 30},
700 return &map;
701 }
702
703 // The FP half-precision format map uses one Q bit to encode the
704 // NEON FP vector formats: NF_4H, NF_8H.
705 static const NEONFormatMap* FPHPFormatMap() {
706 static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
707 return &map;
708 }
709
710 // The load/store format map uses three bits (Q, 11, 10) to encode the
711 // set of NEON vector formats.
713 static const NEONFormatMap map = {
714 {11, 10, 30},
716 return &map;
717 }
718
719 // The logical format map uses one bit (Q) to encode the NEON vector format:
720 // NF_8B, NF_16B.
722 static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
723 return &map;
724 }
725
726 // The triangular format map uses between two and five bits to encode the NEON
727 // vector format:
728 // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
729 // x1000->2S, x1001->4S, 10001->2D, all others undefined.
731 static const NEONFormatMap map = {
732 {19, 18, 17, 16, 30},
737 return &map;
738 }
739
740 // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
741 // formats: NF_B, NF_H, NF_S, NF_D.
743 static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
744 return &map;
745 }
746
747 // The long scalar format map uses two bits (size<1:0>) to encode the longer
748 // NEON scalar formats: NF_H, NF_S, NF_D.
750 static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
751 return &map;
752 }
753
754 // The FP scalar format map assumes one bit (size<0>) is used to encode the
755 // NEON FP scalar formats: NF_S, NF_D.
757 static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
758 return &map;
759 }
760
761 // The triangular scalar format map uses between one and four bits to encode
762 // the NEON FP scalar formats:
763 // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
765 static const NEONFormatMap map = {
766 {19, 18, 17, 16},
768 NF_B, NF_S, NF_B, NF_H, NF_B}};
769 return &map;
770 }
771
772 private:
773 // Get a pointer to a string that represents the format or placeholder for
774 // the specified substitution index, based on the format map and instruction.
775 const char* GetSubstitute(int index, SubstitutionMode mode);
776
777 // Get the NEONFormat enumerated value for bits obtained from the
778 // instruction based on the specified format mapping.
780
781 // Convert a NEONFormat into a string.
782 static const char* NEONFormatAsString(NEONFormat format);
783
784 // Convert a NEONFormat into a register placeholder string.
785 static const char* NEONFormatAsPlaceholder(NEONFormat format);
786
787 // Select bits from instrbits_ defined by the bits array, concatenate them,
788 // and return the value.
789 uint8_t PickBits(const uint8_t bits[]);
790
793 char form_buffer_[64];
794 char mne_buffer_[16];
795};
796} // namespace internal
797} // namespace v8
798
799#endif // V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
friend Zone
Definition asm-types.cc:195
SourcePosition pos
V8_EXPORT_PRIVATE void SetInstructionBits(Instr new_instr, WritableJitAllocation *jit_allocation=nullptr)
int32_t SignedBits(int msb, int lsb) const
static constexpr int32_t ImmBranchRange(ImmBranchType branch_type)
V8_INLINE const Instruction * InstructionAtOffset(int64_t offset, CheckAlignment check=CHECK_ALIGNMENT) const
static bool IsValidPCRelOffset(ptrdiff_t offset)
double ImmNEONFP64() const
static constexpr bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset)
void SetPCRelImmTarget(Zone *zone, AssemblerOptions options, Instruction *target)
void SetUnresolvedInternalReferenceImmTarget(Zone *zone, AssemblerOptions options, Instruction *target)
static float Imm8ToFP32(uint32_t imm8)
Instr Mask(uint32_t mask) const
unsigned ImmNEONabcdefgh() const
void SetImmPCOffsetTarget(Zone *zone, AssemblerOptions options, Instruction *target)
V8_EXPORT_PRIVATE int64_t ImmPCOffset()
V8_EXPORT_PRIVATE Instruction * ImmPCOffsetTarget()
bool IsTargetInImmPCOffsetRange(Instruction *target)
static const int ImmPCRelRangeBitwidth
V8_INLINE Instruction * InstructionAtOffset(int64_t offset, CheckAlignment check=CHECK_ALIGNMENT)
V8_INLINE Instruction * preceding(int count=1)
int NEONLSIndex(int access_size_shift) const
V8_INLINE Instr InstructionBits() const
void SetImmLLiteral(Instruction *source)
V8_INLINE const Instruction * preceding(int count=1) const
V8_EXPORT_PRIVATE void SetInstructionBits(Instr value, WritableJitAllocation *jit_allocation=nullptr)
V8_INLINE const Instruction * following(int count=1) const
static double Imm8ToFP64(uint32_t imm8)
V8_INLINE Instruction * following(int count=1)
ImmBranchType BranchType() const
static V8_INLINE Instruction * Cast(T src)
V8_INLINE ptrdiff_t DistanceTo(Instruction *target)
static constexpr int ImmBranchRangeBitwidth(ImmBranchType branch_type)
uint32_t Bits(int msb, int lsb) const
void SetBranchImmTarget(Instruction *target, WritableJitAllocation *jit_allocation=nullptr)
uint8_t PickBits(const uint8_t bits[])
static const NEONFormatMap * LoadStoreFormatMap()
void SetFormatMap(unsigned index, const NEONFormatMap *format)
VectorFormat GetVectorFormat(int format_index=0)
const char * SubstitutePlaceholders(const char *string)
static const NEONFormatMap * FPScalarFormatMap()
void SetFormatMaps(const NEONFormatMap *format0, const NEONFormatMap *format1=nullptr, const NEONFormatMap *format2=nullptr)
VectorFormat GetVectorFormat(const NEONFormatMap *format_map)
NEONFormat GetNEONFormat(const NEONFormatMap *format_map)
static const NEONFormatMap * TriangularFormatMap()
static const NEONFormatMap * LogicalFormatMap()
static const NEONFormatMap * FPFormatMap()
NEONFormatDecoder(const Instruction *instr)
NEONFormatDecoder(const Instruction *instr, const NEONFormatMap *format0, const NEONFormatMap *format1)
static const char * NEONFormatAsPlaceholder(NEONFormat format)
static const NEONFormatMap * IntegerFormatMap()
static const NEONFormatMap * LongScalarFormatMap()
const char * GetSubstitute(int index, SubstitutionMode mode)
static const char * NEONFormatAsString(NEONFormat format)
static const NEONFormatMap * LongIntegerFormatMap()
const NEONFormatMap * formats_[4]
static const NEONFormatMap * FPHPFormatMap()
static const NEONFormatMap * ScalarFormatMap()
NEONFormatDecoder(const Instruction *instr, const NEONFormatMap *format0, const NEONFormatMap *format1, const NEONFormatMap *format2)
static const NEONFormatMap * TriangularScalarFormatMap()
NEONFormatDecoder(const Instruction *instr, const NEONFormatMap *format)
const char * Substitute(const char *string, SubstitutionMode mode0=kFormat, SubstitutionMode mode1=kFormat, SubstitutionMode mode2=kFormat, SubstitutionMode mode3=kFormat)
const char * Mnemonic(const char *mnemonic)
#define INSTRUCTION_FIELDS_LIST(V_)
int32_t offset
#define DEFINE_GETTER(Name, HighBit, LowBit, Func)
TNode< Object > target
std::map< const std::string, const std::string > map
Instruction * instr
ZoneVector< RpoNumber > & result
uint32_t const mask
static V ReadUnalignedValue(Address p)
Definition memory.h:28
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr LoadStoreAnyOp LoadStoreAnyFixed
constexpr LogicalShiftedOp LogicalShiftedMask
constexpr AddrMode PreIndex
constexpr TestBranchOp TestBranchFMask
constexpr UnconditionalBranchOp BL
static const unsigned kNEONFormatMaxBits
constexpr CompareBranchOp CompareBranchFixed
V8_EXPORT_PRIVATE const float kFP32SignallingNaN
constexpr ExceptionOp BRK
constexpr ExceptionOp ExceptionFixed
const Instr kImmExceptionIsRedirectedCall
constexpr UnconditionalBranchToRegisterOp BLR
const unsigned kDebuggerTracingDirectivesMask
V8_EXPORT_PRIVATE const float kFP32PositiveInfinity
constexpr SystemHintOp SystemHintFMask
constexpr AddSubOp AddSubSetFlagsBit
const float16 kFP16NegativeInfinity
int32_t signed_bitextract_32(int msb, int lsb, uint32_t x)
Definition utils.h:563
static const unsigned kPrintfArgPatternBits
constexpr TestBranchOp TestBranchFixed
constexpr AddrMode Offset
const unsigned kPrintfLength
V8_EXPORT_PRIVATE const double kFP64DefaultNaN
constexpr int B
const float16 kFP16PositiveInfinity
V8_EXPORT_PRIVATE const float kFP32DefaultNaN
constexpr ConditionalBranchOp ConditionalBranchFixed
constexpr LoadStoreAnyOp LoadStoreAnyFMask
V8_EXPORT_PRIVATE const double kFP64PositiveInfinity
constexpr LoadLiteralOp LoadLiteralFMask
const unsigned kDebugParamsOffset
constexpr LogicalOp LogicalOpMask
constexpr uint8_t kLoadLiteralScale
const unsigned kPrintfMaxArgCount
constexpr AddSubExtendedOp AddSubExtendedFMask
constexpr ExceptionOp ExceptionMask
constexpr LoadStorePairOp LoadStorePairMask
constexpr PCRelAddressingOp PCRelAddressingFMask
constexpr LoadLiteralOp LoadLiteralFixed
const float16 kFP16DefaultNaN
constexpr uint8_t kInstrSizeLog2
const Instr kImmExceptionIsPrintf
constexpr MoveWideImmediateOp MOVZ_x
const unsigned kPrintfArgPatternListOffset
constexpr AddSubShiftedOp AddSubShiftedFixed
unsigned CalcLSDataSizeLog2(LoadStoreOp op)
const Instr kImmExceptionIsSwitchStackLimit
constexpr LogicalOp ANDS
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
constexpr SystemHintOp SystemHintFixed
constexpr PCRelAddressingOp PCRelAddressingFixed
constexpr MoveWideImmediateOp MoveWideImmediateMask
constexpr SystemPAuthOp SystemPAuthFixed
uint32_t LoadStorePairOp
constexpr UnconditionalBranchOp UnconditionalBranchFMask
V8_EXPORT_PRIVATE const float kFP32QuietNaN
constexpr LogicalImmediateOp LogicalImmediateMask
const Instr kImmExceptionIsDebug
constexpr MoveWideImmediateOp MOVK_w
constexpr LoadLiteralOp LoadLiteralMask
constexpr AddSubExtendedOp AddSubExtendedFixed
constexpr LoadLiteralOp LDR_w_lit
constexpr PCRelAddressingOp PCRelAddressingMask
constexpr CompareBranchOp CompareBranchFMask
constexpr LogicalImmediateOp LogicalImmediateFixed
constexpr UnconditionalBranchOp UnconditionalBranchMask
constexpr LoadStoreOp LoadStoreMask
constexpr AddSubImmediateOp AddSubImmediateFixed
constexpr UnconditionalBranchToRegisterOp UnconditionalBranchToRegisterMask
constexpr AddrMode PostIndex
constexpr SystemPAuthOp SystemPAuthFMask
V8_EXPORT_PRIVATE const float kFP32NegativeInfinity
constexpr ConditionalBranchOp ConditionalBranchFMask
constexpr MoveWideImmediateOp MOVN_x
V8_EXPORT_PRIVATE const double kFP64NegativeInfinity
constexpr AddSubShiftedOp AddSubShiftedFMask
constexpr uint8_t kInstrSize
constexpr ExceptionOp ExceptionFMask
constexpr MoveWideImmediateOp MOVK_x
constexpr MoveWideImmediateOp MOVN_w
V8_EXPORT_PRIVATE const double kFP64SignallingNaN
constexpr UnconditionalBranchOp UnconditionalBranchFixed
constexpr LoadLiteralOp LDR_x_lit
const Instr kImmExceptionIsUnreachable
const unsigned kDebugCodeOffset
uint32_t LoadStoreOp
constexpr LogicalImmediateOp LogicalImmediateFMask
constexpr MoveWideImmediateOp MOVZ_w
V8_EXPORT_PRIVATE const double kFP64QuietNaN
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
Definition utils.h:555
constexpr LogicalShiftedOp ORR_x
const unsigned kPrintfArgCountOffset
constexpr PCRelAddressingOp ADR
unsigned CalcLSPairDataSize(LoadStorePairOp op)
const unsigned kDebugMessageOffset
constexpr AddSubImmediateOp AddSubImmediateFMask
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_INLINE
Definition v8config.h:500