v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instructions-arm64.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM64
6
8
11
12namespace v8 {
13namespace internal {
14
16 WritableJitAllocation* jit_allocation) {
17 // Usually this is aligned, but when de/serializing that's not guaranteed.
18 if (jit_allocation) {
19 jit_allocation->WriteUnalignedValue(reinterpret_cast<Address>(this),
20 new_instr);
21 } else {
22 base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
23 }
24}
25
26bool Instruction::IsLoad() const {
28 return false;
29 }
30
32 return Mask(LoadStorePairLBit) != 0;
33 } else {
34 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
35 switch (op) {
36 case LDRB_w:
37 case LDRH_w:
38 case LDR_w:
39 case LDR_x:
40 case LDRSB_w:
41 case LDRSB_x:
42 case LDRSH_w:
43 case LDRSH_x:
44 case LDRSW_x:
45 case LDR_b:
46 case LDR_h:
47 case LDR_s:
48 case LDR_d:
49 case LDR_q:
50 return true;
51 default:
52 return false;
53 }
54 }
55}
56
57bool Instruction::IsStore() const {
59 return false;
60 }
61
63 return Mask(LoadStorePairLBit) == 0;
64 } else {
65 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
66 switch (op) {
67 case STRB_w:
68 case STRH_w:
69 case STR_w:
70 case STR_x:
71 case STR_b:
72 case STR_h:
73 case STR_s:
74 case STR_d:
75 case STR_q:
76 return true;
77 default:
78 return false;
79 }
80 }
81}
82
83static uint64_t RotateRight(uint64_t value, unsigned int rotate,
84 unsigned int width) {
85 DCHECK_LE(width, 64);
86 rotate &= 63;
87 if (rotate == 0) return value;
88 return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
89 (value >> rotate);
90}
91
92static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value,
93 unsigned width) {
94 DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
95 (width == 32));
96 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
97 uint64_t result = value & ((1ULL << width) - 1ULL);
98 for (unsigned i = width; i < reg_size; i *= 2) {
99 result |= (result << i);
100 }
101 return result;
102}
103
104// Logical immediates can't encode zero, so a return value of zero is used to
105// indicate a failure case. Specifically, where the constraints on imm_s are not
106// met.
107uint64_t Instruction::ImmLogical() {
108 unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
109 int32_t n = BitN();
110 int32_t imm_s = ImmSetBits();
111 int32_t imm_r = ImmRotate();
112
113 // An integer is constructed from the n, imm_s and imm_r bits according to
114 // the following table:
115 //
116 // N imms immr size S R
117 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
118 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
119 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
120 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
121 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
122 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
123 // (s bits must not be all set)
124 //
125 // A pattern is constructed of size bits, where the least significant S+1
126 // bits are set. The pattern is rotated right by R, and repeated across a
127 // 32 or 64-bit value, depending on destination register width.
128 //
129
130 if (n == 1) {
131 if (imm_s == 0x3F) {
132 return 0;
133 }
134 uint64_t bits = (1ULL << (imm_s + 1)) - 1;
135 return RotateRight(bits, imm_r, 64);
136 } else {
137 if ((imm_s >> 1) == 0x1F) {
138 return 0;
139 }
140 for (int width = 0x20; width >= 0x2; width >>= 1) {
141 if ((imm_s & width) == 0) {
142 int mask = width - 1;
143 if ((imm_s & mask) == mask) {
144 return 0;
145 }
146 uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
147 return RepeatBitsAcrossReg(
148 reg_size, RotateRight(bits, imm_r & mask, width), width);
149 }
150 }
151 }
152 UNREACHABLE();
153}
154
155uint32_t Instruction::ImmNEONabcdefgh() const {
156 return ImmNEONabc() << 5 | ImmNEONdefgh();
157}
158
159float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
160
161double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
162
163float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
164
165double Instruction::ImmNEONFP64() const {
166 return Imm8ToFP64(ImmNEONabcdefgh());
167}
168
169unsigned CalcLSDataSizeLog2(LoadStoreOp op) {
170 DCHECK_EQ(LSSize_offset + LSSize_width, kInstrSize * 8);
171 unsigned size_log2 = static_cast<Instr>(op) >> LSSize_offset;
172 if ((op & LSVector_mask) != 0) {
173 // Vector register memory operations encode the access size in the "size"
174 // and "opc" fields.
175 if (size_log2 == 0 && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
176 size_log2 = kQRegSizeLog2;
177 }
178 }
179 return size_log2;
180}
181
183 static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
184 static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
185 switch (op) {
186 case STP_q:
187 case LDP_q:
188 return kQRegSizeLog2;
189 case STP_x:
190 case LDP_x:
191 case STP_d:
192 case LDP_d:
193 return kXRegSizeLog2;
194 default:
195 return kWRegSizeLog2;
196 }
197}
198
199int64_t Instruction::ImmPCOffset() {
200 int64_t offset;
201 if (IsPCRelAddressing()) {
202 // PC-relative addressing. Only ADR is supported.
203 offset = ImmPCRel();
204 } else if (BranchType() != UnknownBranchType) {
205 // All PC-relative branches.
206 // Relative branch offsets are instruction-size-aligned.
208 } else if (IsUnresolvedInternalReference()) {
209 // Internal references are always word-aligned.
211 } else {
212 // Load literal (offset from PC).
214 // The offset is always shifted by 2 bits, even for loads to 64-bits
215 // registers.
216 offset = ImmLLiteral() * kInstrSize;
217 }
218 return offset;
219}
220
221Instruction* Instruction::ImmPCOffsetTarget() {
223}
224
225bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
226 return IsValidImmPCOffset(BranchType(), DistanceTo(target));
227}
228
229void Instruction::SetImmPCOffsetTarget(Zone* zone, AssemblerOptions options,
230 Instruction* target) {
231 if (IsPCRelAddressing()) {
232 SetPCRelImmTarget(zone, options, target);
233 } else if (IsCondBranchImm()) {
235 } else if (IsUncondBranchImm()) {
237 } else if (IsCompareBranch()) {
239 } else if (IsTestBranch()) {
241 } else if (IsUnresolvedInternalReference()) {
242 SetUnresolvedInternalReferenceImmTarget(zone, options, target);
243 } else {
244 // Load literal (offset from PC).
245 SetImmLLiteral(target);
246 }
247}
248
249void Instruction::SetPCRelImmTarget(Zone* zone, AssemblerOptions options,
250 Instruction* target) {
251 // ADRP is not supported, so 'this' must point to an ADR instruction.
252 DCHECK(IsAdr());
253
254 ptrdiff_t target_offset = DistanceTo(target);
255 Instr imm;
256 if (Instruction::IsValidPCRelOffset(target_offset)) {
257 imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
259 } else {
260 PatchingAssembler patcher(zone, options, reinterpret_cast<uint8_t*>(this),
262 patcher.PatchAdrFar(target_offset);
263 }
264}
265
267 Zone* zone, AssemblerOptions options, Instruction* target) {
270 DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
271 int32_t target_offset =
272 static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
273 uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
274 uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
275
276 PatchingAssembler patcher(zone, options, reinterpret_cast<uint8_t*>(this), 2);
277 patcher.brk(high16);
278 patcher.brk(low16);
279}
280
281void Instruction::SetImmLLiteral(Instruction* source) {
286 static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
287 Instr mask = ImmLLiteral_mask;
288
290}
291
293 instrbits_ = instr->InstructionBits();
295}
296
298 const NEONFormatMap* format) {
299 instrbits_ = instr->InstructionBits();
300 SetFormatMaps(format);
301}
302
304 const NEONFormatMap* format0,
305 const NEONFormatMap* format1) {
306 instrbits_ = instr->InstructionBits();
307 SetFormatMaps(format0, format1);
308}
309
311 const NEONFormatMap* format0,
312 const NEONFormatMap* format1,
313 const NEONFormatMap* format2) {
314 instrbits_ = instr->InstructionBits();
315 SetFormatMaps(format0, format1, format2);
316}
317
318void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
319 const NEONFormatMap* format1,
320 const NEONFormatMap* format2) {
321 DCHECK_NOT_NULL(format0);
322 formats_[0] = format0;
323 formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
324 formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
325 // Support four parameters form (e.i. ld4r)
326 // to avoid using positional arguments in DisassemblingDecoder.
327 // See: https://crbug.com/v8/10365
328 formats_[3] = formats_[2];
329}
330
331void NEONFormatDecoder::SetFormatMap(unsigned index,
332 const NEONFormatMap* format) {
334 DCHECK_NOT_NULL(format);
336}
337
338const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
341}
342
343const char* NEONFormatDecoder::Substitute(const char* string,
344 SubstitutionMode mode0,
345 SubstitutionMode mode1,
346 SubstitutionMode mode2,
347 SubstitutionMode mode3) {
348 snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
349 GetSubstitute(1, mode1), GetSubstitute(2, mode2),
350 GetSubstitute(3, mode3));
351 return form_buffer_;
352}
353
354const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
355 if ((instrbits_ & NEON_Q) != 0) {
356 snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
357 return mne_buffer_;
358 }
359 return mnemonic;
360}
361
363 return GetVectorFormat(formats_[format_index]);
364}
365
367 const NEONFormatMap* format_map) {
368 static const VectorFormat vform[] = {
372 DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
373 return vform[GetNEONFormat(format_map)];
374}
375
376const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
377 if (mode == kFormat) {
379 }
380 DCHECK_EQ(mode, kPlaceholder);
382}
383
384NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
385 return format_map->map[PickBits(format_map->bits)];
386}
387
389 static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
390 "2s", "4s", "1d", "2d", "b",
391 "h", "s", "d"};
392 DCHECK_LT(format, arraysize(formats));
393 return formats[format];
394}
395
397 DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
398 (format == NF_D) || (format == NF_UNDEF));
399 static const char* formats[] = {
400 "undefined", "undefined", "undefined", "undefined", "undefined",
401 "undefined", "undefined", "undefined", "undefined", "'B",
402 "'H", "'S", "'D"};
403 return formats[format];
404}
405
406uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
407 uint8_t result = 0;
408 for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
409 if (bits[b] == 0) break;
410 result <<= 1;
411 result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
412 }
413 return result;
414}
415} // namespace internal
416} // namespace v8
417
418#endif // V8_TARGET_ARCH_ARM64
friend Zone
Definition asm-types.cc:195
static Instr ImmPCRelAddress(int imm21)
static Instr ImmLLiteral(int imm19)
static bool IsImmLLiteral(int64_t offset)
V8_INLINE const Instruction * InstructionAtOffset(int64_t offset, CheckAlignment check=CHECK_ALIGNMENT) const
static bool IsValidPCRelOffset(ptrdiff_t offset)
double ImmNEONFP64() const
static constexpr bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset)
void SetPCRelImmTarget(Zone *zone, AssemblerOptions options, Instruction *target)
void SetUnresolvedInternalReferenceImmTarget(Zone *zone, AssemblerOptions options, Instruction *target)
static float Imm8ToFP32(uint32_t imm8)
unsigned ImmNEONabcdefgh() const
void SetImmPCOffsetTarget(Zone *zone, AssemblerOptions options, Instruction *target)
V8_EXPORT_PRIVATE int64_t ImmPCOffset()
V8_EXPORT_PRIVATE Instruction * ImmPCOffsetTarget()
bool IsTargetInImmPCOffsetRange(Instruction *target)
void SetImmLLiteral(Instruction *source)
V8_EXPORT_PRIVATE void SetInstructionBits(Instr value, WritableJitAllocation *jit_allocation=nullptr)
static double Imm8ToFP64(uint32_t imm8)
ImmBranchType BranchType() const
V8_INLINE ptrdiff_t DistanceTo(Instruction *target)
void SetBranchImmTarget(Instruction *target, WritableJitAllocation *jit_allocation=nullptr)
uint8_t PickBits(const uint8_t bits[])
void SetFormatMap(unsigned index, const NEONFormatMap *format)
VectorFormat GetVectorFormat(int format_index=0)
const char * SubstitutePlaceholders(const char *string)
void SetFormatMaps(const NEONFormatMap *format0, const NEONFormatMap *format1=nullptr, const NEONFormatMap *format2=nullptr)
NEONFormat GetNEONFormat(const NEONFormatMap *format_map)
NEONFormatDecoder(const Instruction *instr)
static const char * NEONFormatAsPlaceholder(NEONFormat format)
static const NEONFormatMap * IntegerFormatMap()
const char * GetSubstitute(int index, SubstitutionMode mode)
static const char * NEONFormatAsString(NEONFormat format)
const NEONFormatMap * formats_[4]
const char * Substitute(const char *string, SubstitutionMode mode0=kFormat, SubstitutionMode mode1=kFormat, SubstitutionMode mode2=kFormat, SubstitutionMode mode3=kFormat)
const char * Mnemonic(const char *mnemonic)
static constexpr int kAdrFarPatchableNInstrs
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
uint32_t const mask
int int32_t
Definition unicode.cc:40
static void WriteUnalignedValue(Address p, V value)
Definition memory.h:41
constexpr NEONFormatField NEON_Q
constexpr LoadStoreAnyOp LoadStoreAnyFixed
constexpr GenericInstrField SixtyFourBits
static const unsigned kNEONFormatMaxBits
constexpr uint8_t kLoadLiteralScaleLog2
constexpr int kSRegSize
constexpr int kWRegSizeLog2
constexpr int kWRegSizeInBits
constexpr LoadStoreAnyOp LoadStoreAnyFMask
constexpr int ImmPCRel_mask
constexpr LoadStorePairAnyOp LoadStorePairAnyFixed
constexpr LoadStorePairOp LoadStorePairLBit
constexpr uint8_t kInstrSizeLog2
constexpr int kQRegSizeLog2
unsigned CalcLSDataSizeLog2(LoadStoreOp op)
uint32_t LoadStorePairOp
constexpr LoadStorePairAnyOp LoadStorePairAnyFMask
constexpr int kWRegSize
constexpr LoadStoreOp LoadStoreMask
return value
Definition map-inl.h:893
constexpr int kXRegSizeInBits
constexpr uint8_t kInstrSize
constexpr int kDRegSize
uint32_t LoadStoreOp
constexpr int kXRegSizeLog2
constexpr int kXRegSize
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
Definition utils.h:555
V8_EXPORT_PRIVATE void V8_EXPORT_PRIVATE void const char * format
Definition utils.h:715
unsigned CalcLSPairDataSize(LoadStorePairOp op)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define arraysize(array)
Definition macros.h:67