v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-arm64-inl.h
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
6#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include <type_traits>
12
13#include "src/base/memory.h"
16#include "src/debug/debug.h"
19#include "src/objects/smi.h"
20#include "src/objects/tagged.h"
21
22namespace v8 {
23namespace internal {
24
25bool CpuFeatures::SupportsOptimizer() { return true; }
26
27void WritableRelocInfo::apply(intptr_t delta) {
28 // On arm64 only internal references and immediate branches need extra work.
30 // Absolute code pointer inside code object moves with the code object.
31 intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc_);
32 internal_ref += delta; // Relocate entry.
33 jit_allocation_.WriteUnalignedValue<intptr_t>(pc_, internal_ref);
34 } else {
35 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
36 if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
37 Address old_target =
38 reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
39 Address new_target = old_target - delta;
40 instr->SetBranchImmTarget<UncondBranchType>(
41 reinterpret_cast<Instruction*>(new_target), &jit_allocation_);
42 }
43 }
44}
45
46inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
47 return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
48}
49
50inline bool CPURegister::IsZero() const {
52 return IsRegister() && (code() == kZeroRegCode);
53}
54
55inline bool CPURegister::IsSP() const {
57 return IsRegister() && (code() == kSPRegInternalCode);
58}
59
60inline void CPURegList::Combine(const CPURegList& other) {
61 DCHECK(other.type() == type_);
62 DCHECK(other.RegisterSizeInBits() == size_);
63 list_ |= other.list_;
64}
65
66inline void CPURegList::Remove(const CPURegList& other) {
67 if (other.type() == type_) {
68 list_ &= ~other.list_;
69 }
70}
71
72inline void CPURegList::Combine(const CPURegister& other) {
73 DCHECK(other.type() == type_);
74 DCHECK(other.SizeInBits() == size_);
75 Combine(other.code());
76}
77
78inline void CPURegList::Remove(const CPURegister& other1,
79 const CPURegister& other2,
80 const CPURegister& other3,
81 const CPURegister& other4) {
82 if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
83 if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
84 if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
85 if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
86}
87
88inline void CPURegList::Combine(int code) {
90 list_ |= (1ULL << code);
92}
93
94inline void CPURegList::Remove(int code) {
96 list_ &= ~(1ULL << code);
97}
98
99inline Register Register::XRegFromCode(unsigned code) {
100 if (code == kSPRegInternalCode) {
101 return sp;
102 } else {
103 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
104 return Register::Create(code, kXRegSizeInBits);
105 }
106}
107
108inline Register Register::WRegFromCode(unsigned code) {
109 if (code == kSPRegInternalCode) {
110 return wsp;
111 } else {
112 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
113 return Register::Create(code, kWRegSizeInBits);
114 }
115}
116
117inline VRegister VRegister::BRegFromCode(unsigned code) {
118 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
120}
121
122inline VRegister VRegister::HRegFromCode(unsigned code) {
123 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
125}
126
127inline VRegister VRegister::SRegFromCode(unsigned code) {
128 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
130}
131
132inline VRegister VRegister::DRegFromCode(unsigned code) {
133 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
135}
136
137inline VRegister VRegister::QRegFromCode(unsigned code) {
138 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
140}
141
142inline VRegister VRegister::VRegFromCode(unsigned code) {
143 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
145}
146
147inline Register CPURegister::W() const {
150}
151
155}
156
160}
161
162inline Register CPURegister::X() const {
165}
166
167inline VRegister CPURegister::V() const {
170}
171
172inline VRegister CPURegister::B() const {
175}
176
177inline VRegister CPURegister::H() const {
180}
181
182inline VRegister CPURegister::S() const {
185}
186
187inline VRegister CPURegister::D() const {
190}
191
192inline VRegister CPURegister::Q() const {
195}
196
197// Immediate.
198// Default initializer is for int types
199template <typename T>
201 static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NO_INFO; }
202 static inline int64_t immediate_for(T t) {
203 static_assert(sizeof(T) <= 8);
204 static_assert(std::is_integral<T>::value || std::is_enum<T>::value);
205 return t;
206 }
207};
208
209template <>
212 return RelocInfo::NO_INFO;
213 }
214 static inline int64_t immediate_for(Tagged<Smi> t) {
215 return static_cast<int64_t>(t.ptr());
216 }
217};
218
219template <>
224 static inline int64_t immediate_for(ExternalReference t) {
225 return static_cast<int64_t>(t.raw());
226 }
227};
228
229template <typename T>
231 : value_(static_cast<intptr_t>(handle.address())), rmode_(mode) {
233}
234
235template <typename T>
237 : value_(ImmediateInitializer<T>::immediate_for(t)),
238 rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
239
240template <typename T>
242 : value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
243 static_assert(std::is_integral<T>::value);
244}
245
246template <typename T>
248
249template <typename T>
251 : immediate_(t, rmode), reg_(NoReg) {}
252
253Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
254 : immediate_(0),
255 reg_(reg),
256 shift_(shift),
257 extend_(NO_EXTEND),
258 shift_amount_(shift_amount) {
262}
263
264Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
265 : immediate_(0),
266 reg_(reg),
267 shift_(NO_SHIFT),
268 extend_(extend),
269 shift_amount_(shift_amount) {
272 DCHECK(!reg.IsSP());
273
274 // Extend modes SXTX and UXTX require a 64-bit register.
275 DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
276}
277
278bool Operand::IsHeapNumberRequest() const {
283 return heap_number_request_.has_value();
284}
285
286HeapNumberRequest Operand::heap_number_request() const {
288 return *heap_number_request_;
289}
290
291bool Operand::IsImmediate() const {
292 return reg_ == NoReg && !IsHeapNumberRequest();
293}
294
296 return reg_.is_valid() && (shift_ != NO_SHIFT);
297}
298
300 return reg_.is_valid() && (extend_ != NO_EXTEND);
301}
302
303bool Operand::IsZero() const {
304 if (IsImmediate()) {
305 return ImmediateValue() == 0;
306 } else {
307 return reg().IsZero();
308 }
309}
310
316
318 if (IsShiftedRegister()) {
320 return Operand(reg_.W(), shift(), shift_amount());
321 } else if (IsExtendedRegister()) {
323 return Operand(reg_.W(), extend(), shift_amount());
324 }
326 return *this;
327}
328
333
336 return immediate_;
337}
338
339int64_t Operand::ImmediateValue() const {
341 return immediate_.value();
342}
343
348
351 return reg_;
352}
353
356 return shift_;
357}
358
361 return extend_;
362}
363
364unsigned Operand::shift_amount() const {
366 return shift_amount_;
367}
368
369MemOperand::MemOperand()
370 : base_(NoReg),
371 regoffset_(NoReg),
372 offset_(0),
373 addrmode_(Offset),
374 shift_(NO_SHIFT),
375 extend_(NO_EXTEND),
376 shift_amount_(0) {}
377
378MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
379 : base_(base),
380 regoffset_(NoReg),
382 addrmode_(addrmode),
383 shift_(NO_SHIFT),
384 extend_(NO_EXTEND),
385 shift_amount_(0) {
386 DCHECK(base.Is64Bits() && !base.IsZero());
387}
388
389MemOperand::MemOperand(Register base, Register regoffset, Extend extend,
390 unsigned shift_amount)
391 : base_(base),
392 regoffset_(regoffset),
393 offset_(0),
394 addrmode_(Offset),
395 shift_(NO_SHIFT),
396 extend_(extend),
397 shift_amount_(shift_amount) {
398 DCHECK(base.Is64Bits() && !base.IsZero());
399 DCHECK(!regoffset.IsSP());
400 DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
401
402 // SXTX extend mode requires a 64-bit offset register.
403 DCHECK(regoffset.Is64Bits() || (extend != SXTX));
404}
405
406MemOperand::MemOperand(Register base, Register regoffset, Shift shift,
407 unsigned shift_amount)
408 : base_(base),
409 regoffset_(regoffset),
410 offset_(0),
411 addrmode_(Offset),
412 shift_(shift),
413 extend_(NO_EXTEND),
414 shift_amount_(shift_amount) {
415 DCHECK(base.Is64Bits() && !base.IsZero());
416 DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
417 DCHECK(shift == LSL);
418}
419
420MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
421 : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
422 DCHECK(base.Is64Bits() && !base.IsZero());
423
424 if (offset.IsImmediate()) {
425 offset_ = offset.ImmediateValue();
426 } else if (offset.IsShiftedRegister()) {
428
429 regoffset_ = offset.reg();
430 shift_ = offset.shift();
431 shift_amount_ = offset.shift_amount();
432
434 offset_ = 0;
435
436 // These assertions match those in the shifted-register constructor.
438 DCHECK(shift_ == LSL);
439 } else {
440 DCHECK(offset.IsExtendedRegister());
442
443 regoffset_ = offset.reg();
444 extend_ = offset.extend();
445 shift_amount_ = offset.shift_amount();
446
448 offset_ = 0;
449
450 // These assertions match those in the extended-register constructor.
452 DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
454 }
455}
456
457bool MemOperand::IsImmediateOffset() const {
458 return (addrmode_ == Offset) && regoffset_ == NoReg;
459}
460
461bool MemOperand::IsRegisterOffset() const {
462 return (addrmode_ == Offset) && regoffset_ != NoReg;
463}
464
465bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
466
467bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
468
469void Assembler::Unreachable() { debug("UNREACHABLE", __LINE__, BREAK); }
470
472 Instruction* instr = reinterpret_cast<Instruction*>(pc);
473 DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
474 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
475}
476
477// Read/Modify the code target address in the branch/call instruction at pc.
479 Instruction* instr = reinterpret_cast<Instruction*>(pc);
480 if (instr->IsLdrLiteralX()) {
481 return Memory<Address>(target_pointer_address_at(pc));
482 } else {
483 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
484 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
485 }
486}
487
490 Instruction* instr = reinterpret_cast<Instruction*>(pc);
491 CHECK(instr->IsLdrLiteralW());
492 return Memory<Tagged_t>(target_pointer_address_at(pc));
493}
494
496 Instruction* instr = reinterpret_cast<Instruction*>(pc);
497 if (instr->IsLdrLiteralX()) {
498 return Handle<Code>(reinterpret_cast<Address*>(
499 Assembler::target_address_at(pc, 0 /* unused */)));
500 } else {
501 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
502 DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
503 return Cast<Code>(
504 GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
505 }
506}
507
510 Instruction* instr = reinterpret_cast<Instruction*>(pc);
511 if (instr->IsLdrLiteralX()) {
512 static_assert(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t));
513 return Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc));
514 } else {
515 DCHECK(instr->IsLdrLiteralW());
516 return Memory<uint32_t>(target_pointer_address_at(pc));
517 }
518}
519
522 Instruction* instr = reinterpret_cast<Instruction*>(pc);
523 if (instr->IsLdrLiteralX()) {
524 Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc)) = data;
525 } else {
526 DCHECK(instr->IsLdrLiteralW());
527 DCHECK(is_uint32(data));
528 WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
529 static_cast<uint32_t>(data));
530 }
531}
532
537
539 Instruction* instr = reinterpret_cast<Instruction*>(pc);
540 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
541 DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
542 int builtin_id = static_cast<int>(instr->ImmPCOffset() / kInstrSize);
543 DCHECK(Builtins::IsBuiltinId(builtin_id));
544 return static_cast<Builtin>(builtin_id);
545}
546
548 Instruction* instr = reinterpret_cast<Instruction*>(location);
549 if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
550 return kSpecialTargetSize;
551 } else {
552 DCHECK_EQ(instr->InstructionBits(), 0);
553 return kSystemPointerSize;
554 }
555}
556
558 Address pc, Address target, WritableJitAllocation& jit_allocation,
559 RelocInfo::Mode mode) {
560 jit_allocation.WriteUnalignedValue<Address>(pc, target);
561}
562
564 Address target,
565 WritableJitAllocation* jit_allocation,
566 ICacheFlushMode icache_flush_mode) {
567 Instruction* instr = reinterpret_cast<Instruction*>(pc);
568 if (instr->IsLdrLiteralX()) {
569 if (jit_allocation) {
570 jit_allocation->WriteValue<Address>(target_pointer_address_at(pc),
571 target);
572 } else {
573 Memory<Address>(target_pointer_address_at(pc)) = target;
574 }
575 // Intuitively, we would think it is necessary to always flush the
576 // instruction cache after patching a target address in the code. However,
577 // in this case, only the constant pool contents change. The instruction
578 // accessing the constant pool remains unchanged, so a flush is not
579 // required.
580 } else {
581 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
582 if (target == 0) {
583 // We are simply wiping the target out for serialization. Set the offset
584 // to zero instead.
585 target = pc;
586 }
587 instr->SetBranchImmTarget<UncondBranchType>(
588 reinterpret_cast<Instruction*>(target), jit_allocation);
589 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
591 }
592 }
593}
594
597 WritableJitAllocation* jit_allocation, ICacheFlushMode icache_flush_mode) {
598 Instruction* instr = reinterpret_cast<Instruction*>(pc);
599 CHECK(instr->IsLdrLiteralW());
600 if (jit_allocation) {
601 jit_allocation->WriteValue(target_pointer_address_at(pc), target);
602 } else {
603 Memory<Tagged_t>(target_pointer_address_at(pc)) = target;
604 }
605}
606
608 if (IsCodedSpecially()) {
610 } else {
611 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
612 DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
613 return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize;
614 }
615}
616
621}
622
625 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
626 // Read the address of the word containing the target_address in an
627 // instruction stream.
628 // The only architecture-independent user of this function is the serializer.
629 // The serializer uses it to find out how many raw bytes of instruction to
630 // output before the next target.
631 // For an instruction like B/BL, where the target bits are mixed into the
632 // instruction bits, the size of the target will be zero, indicating that the
633 // serializer should not step forward in memory after a target is resolved
634 // and written.
635 // For LDR literal instructions, we can skip up to the constant pool entry
636 // address. We make sure that RelocInfo is ordered by the
637 // target_address_address so that we do not skip over any relocatable
638 // instruction sequences.
639 if (instr->IsLdrLiteralX()) {
641 } else {
642 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
643 return pc_;
644 }
645}
646
650}
651
652Tagged<HeapObject> RelocInfo::target_object(PtrComprCageBase cage_base) {
655 Tagged_t compressed =
657 DCHECK(!HAS_SMI_TAG(compressed));
658 Tagged<Object> obj(
659 V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
660 return Cast<HeapObject>(obj);
661 } else {
662 return Cast<HeapObject>(
664 }
665}
666
667DirectHandle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
669 return origin->target_object_handle_at(pc_);
670 } else {
672 return origin->code_target_object_handle_at(pc_);
673 }
674}
675
677 ICacheFlushMode icache_flush_mode) {
681 // We must not compress pointers to objects outside of the main pointer
682 // compression cage as we wouldn't be able to decompress them with the
683 // correct cage base.
686 !HeapLayout::InCodeSpace(target));
690 icache_flush_mode);
691 } else {
694 &jit_allocation_, icache_flush_mode);
695 }
696}
697
701}
702
704 Address target, ICacheFlushMode icache_flush_mode) {
707 &jit_allocation_, icache_flush_mode);
708}
709
710WasmCodePointer RelocInfo::wasm_code_pointer_table_entry() const {
712 return WasmCodePointer{Assembler::uint32_constant_at(pc_, constant_pool_)};
713}
714
716 WasmCodePointer target, ICacheFlushMode icache_flush_mode) {
719 &jit_allocation_, icache_flush_mode);
720}
721
724 return ReadUnalignedValue<Address>(pc_);
725}
726
729 return pc_;
730}
731
735}
736
737Builtin RelocInfo::target_builtin_at(Assembler* origin) {
740}
741
745}
746
748 Instruction* instr = reinterpret_cast<Instruction*>(pc);
749 CHECK(instr->IsLdrLiteralW());
750 return ReadUnalignedValue<uint32_t>(target_pointer_address_at(pc));
751}
752
754 uint32_t new_constant,
755 WritableJitAllocation* jit_allocation,
756 ICacheFlushMode icache_flush_mode) {
757 Instruction* instr = reinterpret_cast<Instruction*>(pc);
758 CHECK(instr->IsLdrLiteralW());
759 if (jit_allocation) {
760 jit_allocation->WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
761 new_constant);
762 } else {
763 WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc), new_constant);
764 }
765 // Icache flushing not needed for Ldr via the constant pool.
766}
767
769 DCHECK(rt.is_valid());
770 if (rt.IsRegister()) {
771 return rt.Is64Bits() ? LDR_x : LDR_w;
772 } else {
773 DCHECK(rt.IsVRegister());
774 switch (rt.SizeInBits()) {
775 case kBRegSizeInBits:
776 return LDR_b;
777 case kHRegSizeInBits:
778 return LDR_h;
779 case kSRegSizeInBits:
780 return LDR_s;
781 case kDRegSizeInBits:
782 return LDR_d;
783 default:
784 DCHECK(rt.IsQ());
785 return LDR_q;
786 }
787 }
788}
789
791 DCHECK(rt.is_valid());
792 if (rt.IsRegister()) {
793 return rt.Is64Bits() ? STR_x : STR_w;
794 } else {
795 DCHECK(rt.IsVRegister());
796 switch (rt.SizeInBits()) {
797 case kBRegSizeInBits:
798 return STR_b;
799 case kHRegSizeInBits:
800 return STR_h;
801 case kSRegSizeInBits:
802 return STR_s;
803 case kDRegSizeInBits:
804 return STR_d;
805 default:
806 DCHECK(rt.IsQ());
807 return STR_q;
808 }
809 }
810}
811
813 const CPURegister& rt2) {
814 DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
815 return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
817}
818
820 const CPURegister& rt2) {
822 USE(rt2);
823 if (rt.IsRegister()) {
824 return rt.Is64Bits() ? STP_x : STP_w;
825 } else {
826 DCHECK(rt.IsVRegister());
827 switch (rt.SizeInBits()) {
828 case kSRegSizeInBits:
829 return STP_s;
830 case kDRegSizeInBits:
831 return STP_d;
832 default:
833 DCHECK(rt.IsQ());
834 return STP_q;
835 }
836 }
837}
838
840 if (rt.IsRegister()) {
841 return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
842 } else {
843 DCHECK(rt.IsVRegister());
844 return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
845 }
846}
847
849 unsigned size) {
851}
852
856
858 const Register& regoffset) {
859 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(regoffset) | ExtendMode(UXTW));
860}
861
863 const Register& rn,
864 const Register& rm, Instr op) {
865 DCHECK(AreSameSizeAndType(rd, rn, rm));
866 Emit(SF(rd) | AddSubShiftedFixed | op | Rm(rm) | Rn(rn) | Rd(rd));
867}
868
870 const Register& rm) {
871 DCHECK(AreSameSizeAndType(rn, rm));
872 Emit(SF(rn) | AddSubShiftedFixed | SUB | Flags(SetFlags) | Rm(rm) | Rn(rn) |
873 Rd(xzr));
874}
875
876inline void Assembler::DataProcImmediate(const Register& rd, const Register& rn,
877 int immediate, Instr op) {
878 DCHECK(AreSameSizeAndType(rd, rn));
879 DCHECK(IsImmAddSub(immediate));
880 Emit(SF(rd) | AddSubImmediateFixed | op | ImmAddSub(immediate) | RdSP(rd) |
881 RnSP(rn));
882}
883
894
896 if (S == SetFlags) {
897 return 1 << FlagsUpdate_offset;
898 } else if (S == LeaveFlags) {
899 return 0 << FlagsUpdate_offset;
900 }
901 UNREACHABLE();
902}
903
904Instr Assembler::Cond(Condition cond) { return cond << Condition_offset; }
905
907 Instr imm = static_cast<Instr>(checked_truncate_to_int21(imm21));
908 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
909 Instr immlo = imm << ImmPCRelLo_offset;
910 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
911}
912
914 return checked_truncate_to_int26(imm26) << ImmUncondBranch_offset;
915}
916
918 return checked_truncate_to_int19(imm19) << ImmCondBranch_offset;
919}
920
922 return checked_truncate_to_int19(imm19) << ImmCmpBranch_offset;
923}
924
926 return checked_truncate_to_int14(imm14) << ImmTestBranch_offset;
927}
928
930 DCHECK(is_uint6(bit_pos));
931 // Subtract five from the shift offset, as we need bit 5 from bit_pos.
932 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
933 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
934 b5 &= ImmTestBranchBit5_mask;
935 b40 &= ImmTestBranchBit40_mask;
936 return b5 | b40;
937}
938
942
944 DCHECK(IsImmAddSub(imm));
945 if (is_uint12(imm)) { // No shift required.
946 imm <<= ImmAddSub_offset;
947 } else {
948 imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
949 }
950 return imm;
951}
952
953Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
954 DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
955 ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
956 USE(reg_size);
957 return imms << ImmS_offset;
958}
959
960Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
961 DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
962 ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
963 USE(reg_size);
964 DCHECK(is_uint6(immr));
965 return immr << ImmR_offset;
966}
967
968Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
969 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
970 DCHECK(is_uint6(imms));
971 DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
972 USE(reg_size);
973 return imms << ImmSetBits_offset;
974}
975
976Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
977 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
978 DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
979 ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
980 USE(reg_size);
981 return immr << ImmRotate_offset;
982}
983
985 return checked_truncate_to_int19(imm19) << ImmLLiteral_offset;
986}
987
988Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
989 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
990 DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
991 USE(reg_size);
992 return bitn << BitN_offset;
993}
994
996 DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
997 return shift << ShiftDP_offset;
998}
999
1001 DCHECK(is_uint6(amount));
1002 return amount << ImmDPShift_offset;
1003}
1004
1006 return extend << ExtendMode_offset;
1007}
1008
1009Instr Assembler::ImmExtendShift(unsigned left_shift) {
1010 DCHECK_LE(left_shift, 4);
1011 return left_shift << ImmExtendShift_offset;
1012}
1013
1015 DCHECK(is_uint5(imm));
1016 return imm << ImmCondCmp_offset;
1017}
1018
1020 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1021}
1022
1024 DCHECK(is_uint12(imm12));
1025 return imm12 << ImmLSUnsigned_offset;
1026}
1027
1029 return checked_truncate_to_int9(imm9) << ImmLS_offset;
1030}
1031
1032Instr Assembler::ImmLSPair(int imm7, unsigned size) {
1033 DCHECK_EQ(imm7,
1034 static_cast<int>(static_cast<uint32_t>(imm7 >> size) << size));
1035 int scaled_imm7 = imm7 >> size;
1036 return checked_truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1037}
1038
1039Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1040 DCHECK(is_uint1(shift_amount));
1041 return shift_amount << ImmShiftLS_offset;
1042}
1043
1045 DCHECK(is_uint16(imm16));
1046 return imm16 << ImmException_offset;
1047}
1048
1050 DCHECK(is_uint15(imm15));
1051 return imm15 << ImmSystemRegister_offset;
1052}
1053
1055 DCHECK(is_uint7(imm7));
1056 return imm7 << ImmHint_offset;
1057}
1058
1060 DCHECK(is_uint2(imm2));
1061 return imm2 << ImmBarrierDomain_offset;
1062}
1063
1065 DCHECK(is_uint2(imm2));
1066 return imm2 << ImmBarrierType_offset;
1067}
1068
1070 DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
1071 unsigned size_log2 = static_cast<Instr>(op >> LSSize_offset);
1072 if ((op & LSVector_mask) != 0) {
1073 // Vector register memory operations encode the access size in the "size"
1074 // and "opc" fields.
1075 if (size_log2 == 0 && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
1076 size_log2 = kQRegSizeLog2;
1077 }
1078 }
1079 return size_log2;
1080}
1081
1083 DCHECK(is_uint16(imm));
1084 return imm << ImmMoveWide_offset;
1085}
1086
1088 DCHECK(is_uint2(shift));
1089 return shift << ShiftMoveWide_offset;
1090}
1091
1093
1095 DCHECK(is_uint6(scale));
1096 return scale << FPScale_offset;
1097}
1098
1100 return reg.Is64Bits() ? xzr : wzr;
1101}
1102
1103EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
1104 assembler->CheckBufferSpace();
1105}
1106
1107} // namespace internal
1108} // namespace v8
1109
1110#endif // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#define T
Register * reg_
interpreter::OperandScale scale
Definition builtins.cc:44
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
IndirectHandle< HeapObject > GetEmbeddedObject(EmbeddedObjectIndex index) const
Definition assembler.cc:300
static Instr Flags(FlagsUpdate S)
static Instr SF(Register rd)
static Instr RnSP(Register rn)
static Instr ImmR(unsigned immr, unsigned reg_size)
static Instr ImmTestBranch(int imm14)
static Instr RdSP(Register rd)
static constexpr int kSpecialTargetSize
static void deserialization_set_target_internal_reference_at(Address pc, Address target, WritableJitAllocation &jit_allocation, RelocInfo::Mode mode=RelocInfo::INTERNAL_REFERENCE)
static Instr ImmCondCmp(unsigned imm)
void LoadStoreWRegOffset(Instr memop, const Register &regoffset)
static Instr ImmCmpBranch(int imm19)
static Instr Nzcv(StatusFlags nzcv)
void LoadStoreScaledImmOffset(Instr memop, int offset, unsigned size)
static Tagged_t target_compressed_address_at(Address pc, Address constant_pool)
static Instr ImmPCRelAddress(int imm21)
static Instr ImmException(int imm16)
absl::flat_hash_map< int, int > branch_link_chain_back_edge_
static LoadStoreOp StoreOpFor(const CPURegister &rt)
static Instr ImmAddSub(int imm)
static Instr ImmLSPair(int imm7, unsigned size)
static Instr Cond(Condition cond)
static Instr Rm(CPURegister rm)
static Instr Rd(CPURegister rd)
static Instr FPType(VRegister fd)
static void set_target_compressed_address_at(Address pc, Address constant_pool, Tagged_t target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Instr ImmExtendShift(unsigned left_shift)
EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc)
static void set_uint32_constant_at(Address pc, Address constant_pool, uint32_t new_constant, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static LoadLiteralOp LoadLiteralOpFor(const CPURegister &rt)
static Instr ImmLLiteral(int imm19)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
static Instr ImmShiftLS(unsigned shift_amount)
static Instr ImmSystemRegister(int imm15)
static Instr ImmLS(int imm9)
static constexpr bool IsImmAddSub(int64_t immediate)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
static Instr ImmDPShift(unsigned amount)
static Instr ImmBarrierType(int imm2)
static Instr ImmMoveWide(int imm)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void Emit(Instr instruction)
static Instr ExtendMode(Extend extend)
int LinkAndGetBranchInstructionOffsetTo(Label *label)
int LinkAndGetByteOffsetTo(Label *label)
static Instr ImmS(unsigned imms, unsigned reg_size)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
static Instr ShiftDP(Shift shift)
static unsigned CalcLSDataSizeLog2(LoadStoreOp op)
static V8_INLINE void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_pointer_address_at(Address pc)
static Instr ShiftMoveWide(int shift)
static constexpr int kStartOfLabelLinkChain
static Instr ImmCondBranch(int imm19)
static Instr BitN(unsigned bitn, unsigned reg_size)
static Builtin target_builtin_at(Address pc)
void DataProcImmediate(const Register &rd, const Register &rn, int immediate, Instr op)
static Instr ImmHint(int imm7)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
Handle< HeapObject > target_object_handle_at(Address pc)
static Instr Rn(CPURegister rn)
void CmpPlainRegister(const Register &rn, const Register &rm)
void LoadStoreUnscaledImmOffset(Instr memop, int offset)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void DataProcPlainRegister(const Register &rd, const Register &rn, const Register &rm, Instr op)
static int deserialization_special_target_size(Address location)
static Instr FPScale(unsigned scale)
static Instr ImmUncondBranch(int imm26)
void set_embedded_object_index_referenced_from(Address p, EmbeddedObjectIndex index)
static Instr ImmLSUnsigned(int imm12)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
Instruction * pc() const
Handle< Code > code_target_object_handle_at(Address pc)
static uint32_t uint32_constant_at(Address pc, Address constant_pool)
static Instr ImmTestBranchBit(unsigned bit_pos)
static Instr ImmBarrierDomain(int imm2)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
static constexpr bool IsBuiltinId(Builtin builtin)
Definition builtins.h:128
void Remove(const CPURegList &other)
CPURegister::RegisterType type_
void Combine(const CPURegList &other)
RegisterType type() const
static constexpr CPURegister Create(int code, int size, RegisterType type)
bool IsSameSizeAndType(const CPURegister &other) const
V8_INLINE EnsureSpace(Assembler *assembler)
static V8_INLINE bool InTrustedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InCodeSpace(Tagged< HeapObject > object)
Immediate(Handle< T > handle, RelocInfo::Mode mode=RelocInfo::FULL_EMBEDDED_OBJECT)
RelocInfo::Mode rmode() const
const Register & base() const
const Register & regoffset() const
std::optional< HeapNumberRequest > heap_number_request_
RelocInfo::Mode ImmediateRMode() const
unsigned shift_amount() const
HeapNumberRequest heap_number_request() const
Immediate immediate_for_heap_number_request() const
bool IsHeapNumberRequest() const
Operand ToExtendedRegister() const
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
int32_t immediate() const
static Register WRegFromCode(unsigned code)
static Register XRegFromCode(unsigned code)
static constexpr Register Create(int code, int size)
V8_INLINE Address target_internal_reference()
static constexpr bool IsInternalReference(Mode mode)
Definition reloc-info.h:238
static constexpr bool IsOffHeapTarget(Mode mode)
Definition reloc-info.h:244
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
V8_INLINE Address target_address()
static constexpr bool IsNearBuiltinEntry(Mode mode)
Definition reloc-info.h:247
V8_INLINE Address target_internal_reference_address()
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsWasmCall(Mode mode)
Definition reloc-info.h:213
V8_INLINE int target_address_size()
V8_INLINE Builtin target_builtin_at(Assembler *origin)
V8_INLINE WasmCodePointer wasm_code_pointer_table_entry() const
V8_INLINE Address target_off_heap_target()
static constexpr bool IsWasmStubCall(Mode mode)
Definition reloc-info.h:214
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
V8_INLINE Address target_external_reference()
V8_INLINE Tagged< HeapObject > target_object(PtrComprCageBase cage_base)
V8_INLINE Address constant_pool_entry_address()
V8_INLINE JSDispatchHandle js_dispatch_handle()
V8_INLINE DirectHandle< HeapObject > target_object_handle(Assembler *origin)
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
bool HasTargetAddressAddress() const
V8_INLINE Address target_address_address()
static V8_INLINE Tagged_t CompressObject(Address tagged)
static V8_INLINE Address DecompressTagged(TOnHeapAddress on_heap_addr, Tagged_t raw_value)
static VRegister HRegFromCode(unsigned code)
static constexpr VRegister Create(int code, int size, int lane_count=1)
static VRegister VRegFromCode(unsigned code)
static VRegister BRegFromCode(unsigned code)
static VRegister QRegFromCode(unsigned code)
static VRegister DRegFromCode(unsigned code)
static VRegister SRegFromCode(unsigned code)
V8_INLINE void WriteUnalignedValue(Address address, T value)
V8_INLINE void WriteValue(Address address, T value)
V8_INLINE void set_target_object(Tagged< InstructionStream > host, Tagged< HeapObject > target, WriteBarrierMode write_barrier_mode=UPDATE_WRITE_BARRIER, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
WritableJitAllocation & jit_allocation_
Definition reloc-info.h:462
V8_INLINE void set_wasm_code_pointer_table_entry(WasmCodePointer, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
V8_INLINE void apply(intptr_t delta)
V8_INLINE void set_target_external_reference(Address, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
Operand const offset_
Register const value_
Handle< Code > code
#define V8_EXTERNAL_CODE_SPACE_BOOL
Definition globals.h:255
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define HAS_SMI_TAG(value)
Definition globals.h:1771
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int64_t immediate_
int32_t offset
TNode< Object > target
Instruction * instr
LiftoffRegister reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr GenericInstrField SixtyFourBits
V8_EXPORT_PRIVATE base::Vector< Flag > Flags()
Definition flags.cc:300
constexpr AddrMode PreIndex
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kVRegSizeInBits
constexpr GenericInstrField FP64
uint32_t LoadLiteralOp
constexpr ShiftOp LSR
constexpr AddrMode Offset
constexpr int kWRegSizeInBits
constexpr int kZeroRegCode
constexpr int kNumberOfRegisters
constexpr ShiftOp ASR
void FlushInstructionCache(void *start, size_t size)
constexpr ShiftOp LSL
kInterpreterTrampolineOffset Tagged< HeapObject >
constexpr LoadStoreUnscaledOffsetOp LoadStoreUnscaledOffsetFixed
Address Tagged_t
Definition globals.h:547
constexpr LoadStorePairOp LoadStorePairLBit
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
constexpr uint8_t kInstrSizeLog2
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kQRegSizeLog2
constexpr AddSubShiftedOp AddSubShiftedFixed
constexpr int S
constexpr LoadLiteralOp LDR_d_lit
constexpr int kBRegSizeInBits
uint32_t LoadStorePairOp
constexpr LoadStoreUnsignedOffset LoadStoreUnsignedOffsetFixed
constexpr LoadLiteralOp LDR_w_lit
constexpr Opcode SUB
constexpr AddSubImmediateOp AddSubImmediateFixed
constexpr AddrMode PostIndex
constexpr LoadStoreRegisterOffset LoadStoreRegisterOffsetFixed
constexpr ShiftOp ROR
constexpr int kXRegSizeInBits
constexpr int kNumberOfVRegisters
constexpr GenericInstrField FP32
constexpr uint8_t kInstrSize
constexpr int kSPRegInternalCode
constexpr int kSRegSizeInBits
constexpr LoadLiteralOp LDR_s_lit
constexpr int kQRegSizeInBits
constexpr LoadLiteralOp LDR_x_lit
uint32_t LoadStoreOp
V8_EXPORT_PRIVATE bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2=NoCPUReg, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
constexpr Register NoReg
constexpr GenericInstrField ThirtyTwoBits
constexpr int kHRegSizeInBits
constexpr int kDRegSizeInBits
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
static RelocInfo::Mode rmode_for(ExternalReference t)
static RelocInfo::Mode rmode_for(Tagged< Smi > t)