29#if V8_TARGET_ARCH_ARM64
47unsigned SimulatorFeaturesFromCommandLine() {
48 if (strcmp(
v8_flags.sim_arm64_optional_features,
"none") == 0) {
51 if (strcmp(
v8_flags.sim_arm64_optional_features,
"all") == 0) {
56 "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n",
57 v8_flags.sim_arm64_optional_features.value());
59 "Supported values are: none\n"
61 FATAL(
"sim-arm64-optional-features");
65constexpr unsigned CpuFeaturesFromCompiler() {
66 unsigned features = 0;
67#if defined(__ARM_FEATURE_JCVT) && !defined(V8_TARGET_OS_IOS)
68 features |= 1u << JSCVT;
70#if defined(__ARM_FEATURE_DOTPROD)
71 features |= 1u << DOTPROD;
73#if defined(__ARM_FEATURE_SHA3)
74 features |= 1u << SHA3;
76#if defined(__ARM_FEATURE_ATOMICS)
77 features |= 1u << LSE;
81#if defined(__ARM_FEATURE_AES)
82 features |= 1u << PMULL1Q;
87constexpr unsigned CpuFeaturesFromTargetOS() {
88 unsigned features = 0;
89#if defined(V8_TARGET_OS_MACOS) && !defined(V8_TARGET_OS_IOS)
92 features |= 1u << JSCVT;
93 features |= 1u << DOTPROD;
94 features |= 1u << LSE;
95 features |= 1u << PMULL1Q;
119 supported_ |= SimulatorFeaturesFromCommandLine();
123 unsigned runtime = 0;
124 if (cpu.has_jscvt()) {
125 runtime |= 1u << JSCVT;
127 if (cpu.has_dot_prod()) {
128 runtime |= 1u << DOTPROD;
130 if (cpu.has_sha3()) {
131 runtime |= 1u << SHA3;
134 runtime |= 1u << LSE;
136 if (cpu.has_pmull1q()) {
137 runtime |= 1u << PMULL1Q;
139 if (cpu.has_fp16()) {
140 runtime |= 1u << FP16;
185 if (
Count() % 2 != 0) {
230 Instruction*
instr =
reinterpret_cast<Instruction*
>(
pc_);
231 if (
instr->IsLdrLiteralX()) {
240 Instruction*
instr =
reinterpret_cast<Instruction*
>(
pc_);
242 return instr->IsLdrLiteralX() ||
248 Instruction*
instr =
reinterpret_cast<Instruction*
>(
pc_);
249 if (
instr->IsLdrLiteralX()) {
250 return static_cast<uint32_t
>(
258bool AreAliased(
const CPURegister& reg1,
const CPURegister& reg2,
259 const CPURegister& reg3,
const CPURegister& reg4,
260 const CPURegister& reg5,
const CPURegister& reg6,
261 const CPURegister& reg7,
const CPURegister& reg8) {
262 int number_of_valid_regs = 0;
263 int number_of_valid_fpregs = 0;
265 uint64_t unique_regs = 0;
266 uint64_t unique_fpregs = 0;
268 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
271 if (regs[
i].IsRegister()) {
272 number_of_valid_regs++;
273 unique_regs |= (uint64_t{1} << regs[
i].code());
274 }
else if (regs[
i].IsVRegister()) {
275 number_of_valid_fpregs++;
276 unique_fpregs |= (uint64_t{1} << regs[
i].code());
282 int number_of_unique_regs =
284 int number_of_unique_fpregs =
287 DCHECK(number_of_valid_regs >= number_of_unique_regs);
288 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
290 return (number_of_valid_regs != number_of_unique_regs) ||
291 (number_of_valid_fpregs != number_of_unique_fpregs);
295 const CPURegister& reg3,
const CPURegister& reg4,
296 const CPURegister& reg5,
const CPURegister& reg6,
297 const CPURegister& reg7,
const CPURegister& reg8) {
300 match &= !reg2.is_valid() || reg2.IsSameSizeAndType(reg1);
301 match &= !reg3.is_valid() || reg3.IsSameSizeAndType(reg1);
302 match &= !reg4.is_valid() || reg4.IsSameSizeAndType(reg1);
303 match &= !reg5.is_valid() || reg5.IsSameSizeAndType(reg1);
304 match &= !reg6.is_valid() || reg6.IsSameSizeAndType(reg1);
305 match &= !reg7.is_valid() || reg7.IsSameSizeAndType(reg1);
306 match &= !reg8.is_valid() || reg8.IsSameSizeAndType(reg1);
310bool AreSameFormat(
const Register& reg1,
const Register& reg2,
311 const Register& reg3,
const Register& reg4) {
313 return (!reg2.is_valid() || reg2.IsSameSizeAndType(reg1)) &&
314 (!reg3.is_valid() || reg3.IsSameSizeAndType(reg1)) &&
315 (!reg4.is_valid() || reg4.IsSameSizeAndType(reg1));
318bool AreSameFormat(
const VRegister& reg1,
const VRegister& reg2,
319 const VRegister& reg3,
const VRegister& reg4) {
321 return (!reg2.is_valid() || reg2.IsSameFormat(reg1)) &&
322 (!reg3.is_valid() || reg3.IsSameFormat(reg1)) &&
323 (!reg4.is_valid() || reg4.IsSameFormat(reg1));
326bool AreConsecutive(
const CPURegister& reg1,
const CPURegister& reg2,
327 const CPURegister& reg3,
const CPURegister& reg4) {
330 if (!reg2.is_valid()) {
331 DCHECK(!reg3.is_valid() && !reg4.is_valid());
333 }
else if (reg2.code() != ((reg1.code() + 1) % (reg1.MaxCode() + 1))) {
337 if (!reg3.is_valid()) {
340 }
else if (reg3.code() != ((reg2.code() + 1) % (reg1.MaxCode() + 1))) {
344 if (!reg4.is_valid()) {
346 }
else if (reg4.code() != ((reg3.code() + 1) % (reg1.MaxCode() + 1))) {
353bool AreEven(
const CPURegister& reg1,
const CPURegister& reg2,
354 const CPURegister& reg3,
const CPURegister& reg4,
355 const CPURegister& reg5,
const CPURegister& reg6,
356 const CPURegister& reg7,
const CPURegister& reg8) {
358 bool even = reg1.IsEven();
359 even &= !reg2.is_valid() || reg2.IsEven();
360 even &= !reg3.is_valid() || reg3.IsEven();
361 even &= !reg4.is_valid() || reg4.IsEven();
362 even &= !reg5.is_valid() || reg5.IsEven();
363 even &= !reg6.is_valid() || reg6.IsEven();
364 even &= !reg7.is_valid() || reg7.IsEven();
365 even &= !reg8.is_valid() || reg8.IsEven();
373 return assembler->options().record_reloc_info_for_serialization;
381 const AssemblerOptions& options,
382 std::unique_ptr<AssemblerBuffer> buffer)
383 : AssemblerBase(options,
std::move(buffer)),
385 unresolved_branches_(
zone_.get()),
389#if defined(V8_OS_WIN)
390 if (options.collect_win64_unwind_info) {
391 xdata_encoder_ = std::make_unique<win64_unwindinfo::XdataEncoder>(*
this);
396Assembler::~Assembler() {
397 DCHECK(constpool_.IsEmpty());
398 DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
401void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
403void Assembler::Reset() {
406 DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
407 DCHECK(unresolved_branches_.empty());
408 memset(buffer_start_, 0,
pc_ - buffer_start_);
411 reloc_info_writer.Reposition(buffer_start_ +
buffer_->size(),
pc_);
413 constpool_.SetNextCheckIn(ConstantPool::kCheckInterval);
414 next_veneer_pool_check_ =
kMaxInt;
417#if defined(V8_OS_WIN)
418win64_unwindinfo::BuiltinUnwindInfo Assembler::GetUnwindInfo()
const {
421 return xdata_encoder_->unwinding_info();
425void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
426 DCHECK_IMPLIES(isolate ==
nullptr, heap_number_requests_.empty());
427 for (
auto& request : heap_number_requests_) {
429 Handle<HeapObject>
object =
430 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
431 request.heap_number());
432 EmbeddedObjectIndex index = AddEmbeddedObject(
object);
433 set_embedded_object_index_referenced_from(
pc, index);
437void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
438 GetCode(isolate->main_thread_local_isolate(), desc);
440void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
441 SafepointTableBuilderBase* safepoint_table_builder,
442 int handler_table_offset) {
450 DataAlign(InstructionStream::kMetadataAlignment);
453 ForceConstantPoolEmissionWithoutJump();
454 DCHECK(constpool_.IsEmpty());
456 int code_comments_size = WriteCodeComments();
458 AllocateAndInstallRequestedHeapNumbers(isolate);
464 static constexpr int kConstantPoolSize = 0;
465 static constexpr int kBuiltinJumpTableInfoSize = 0;
466 const int instruction_size =
pc_offset();
467 const int builtin_jump_table_info_offset =
468 instruction_size - kBuiltinJumpTableInfoSize;
469 const int code_comments_offset =
470 builtin_jump_table_info_offset - code_comments_size;
471 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
472 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
473 ? constant_pool_offset
474 : handler_table_offset;
475 const int safepoint_table_offset =
476 (safepoint_table_builder == kNoSafepointTable)
477 ? handler_table_offset2
478 : safepoint_table_builder->safepoint_table_offset();
479 const int reloc_info_offset =
480 static_cast<int>(reloc_info_writer.pos() -
buffer_->start());
481 CodeDesc::Initialize(desc,
this, safepoint_table_offset,
482 handler_table_offset2, constant_pool_offset,
483 code_comments_offset, builtin_jump_table_info_offset,
487void Assembler::Align(
int m) {
490 DCHECK(
m >= kInstrSize && base::bits::IsPowerOfTwo(
m));
496void Assembler::CodeTargetAlign() {
498#if !defined(V8_TARGET_OS_MACOS)
503void Assembler::CheckLabelLinkChain(Label
const*
label) {
505 if (
label->is_linked()) {
506 static const int kMaxLinksToCheck = 64;
507 int links_checked = 0;
508 int64_t linkoffset =
label->pos();
509 bool end_of_chain =
false;
510 while (!end_of_chain) {
511 if (++links_checked > kMaxLinksToCheck)
break;
512 Instruction* link = InstructionAt(linkoffset);
513 int64_t linkpcoffset = link->ImmPCOffset();
514 int64_t prevlinkoffset = linkoffset + linkpcoffset;
516 end_of_chain = (linkoffset == prevlinkoffset);
517 linkoffset = linkoffset + linkpcoffset;
523void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
525 Instruction* label_veneer) {
528 CheckLabelLinkChain(
label);
530 Instruction* link = InstructionAt(
label->pos());
531 Instruction* prev_link = link;
532 Instruction* next_link;
534 if (link != branch) {
535 int i =
static_cast<int>(InstructionOffset(branch));
538 DCHECK(branch_link_chain_back_edge_.contains(
i));
539 prev_link = InstructionAt(branch_link_chain_back_edge_.at(
i));
544 next_link = branch->ImmPCOffsetTarget();
546 if (branch == prev_link) {
548 if (branch == next_link) {
556 label->link_to(
static_cast<int>(InstructionOffset(next_link)));
557 branch_link_chain_back_edge_.erase(
558 static_cast<int>(InstructionOffset(next_link)));
560 }
else if (branch == next_link) {
564 prev_link->SetImmPCOffsetTarget(zone(),
options(), prev_link);
565 branch_link_chain_back_edge_.erase(
566 static_cast<int>(InstructionOffset(branch)));
571 int n =
static_cast<int>(InstructionOffset(next_link));
572 if (branch_link_chain_back_edge_.contains(n)) {
575 branch_link_chain_back_edge_[
n] =
576 static_cast<int>(InstructionOffset(prev_link));
577 branch_link_chain_back_edge_.erase(
578 static_cast<int>(InstructionOffset(branch)));
581 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
582 prev_link->SetImmPCOffsetTarget(zone(),
options(), next_link);
583 }
else if (label_veneer !=
nullptr) {
585 prev_link->SetImmPCOffsetTarget(zone(),
options(), prev_link);
587 bool end_of_chain =
false;
589 while (!end_of_chain) {
590 next_link = link->ImmPCOffsetTarget();
591 end_of_chain = (link == next_link);
592 link->SetImmPCOffsetTarget(zone(),
options(), label_veneer);
598 if (link->IsCondBranchImm() || link->IsCompareBranch()) {
599 static_assert(Instruction::ImmBranchRange(CondBranchType) ==
600 Instruction::ImmBranchRange(CompareBranchType));
601 int max_reachable_pc =
static_cast<int>(InstructionOffset(link)) +
602 Instruction::ImmBranchRange(CondBranchType);
603 unresolved_branches_.erase(max_reachable_pc);
604 }
else if (link->IsTestBranch()) {
606 int max_reachable_pc =
static_cast<int>(InstructionOffset(link)) +
607 Instruction::ImmBranchRange(TestBranchType) +
609 unresolved_branches_.erase(max_reachable_pc);
634 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
639 CheckLabelLinkChain(
label);
642void Assembler::bind(Label*
label) {
650 DeleteUnresolvedBranchInfoForLabel(
label);
664 while (
label->is_linked()) {
665 int linkoffset =
label->pos();
666 Instruction* link = InstructionAt(linkoffset);
667 int prevlinkoffset = linkoffset +
static_cast<int>(link->ImmPCOffset());
669 CheckLabelLinkChain(
label);
673 DCHECK((linkoffset > prevlinkoffset) ||
674 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
678 if (link->IsUnresolvedInternalReference()) {
681 internal_reference_positions_.push_back(linkoffset);
682 memcpy(link, &
pc_, kSystemPointerSize);
684 link->SetImmPCOffsetTarget(zone(),
options(),
685 reinterpret_cast<Instruction*
>(
pc_));
688 branch_link_chain_back_edge_.erase(
689 static_cast<int>(InstructionOffset(link)));
693 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
698 label->link_to(prevlinkoffset);
707int Assembler::LinkAndGetByteOffsetTo(Label*
label) {
709 CheckLabelLinkChain(
label);
712 if (
label->is_bound()) {
725 if (
label->is_linked()) {
740 offset = kStartOfLabelLinkChain;
749void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label*
label) {
751 CheckLabelLinkChain(
label);
753 int link_offset =
label->pos();
755 bool end_of_chain =
false;
757 while (!end_of_chain) {
758 Instruction* link = InstructionAt(link_offset);
759 int max_reachable_pc =
static_cast<int>(InstructionOffset(link));
762 if (link->IsCondBranchImm() || link->IsCompareBranch()) {
763 static_assert(Instruction::ImmBranchRange(CondBranchType) ==
764 Instruction::ImmBranchRange(CompareBranchType));
765 max_reachable_pc += Instruction::ImmBranchRange(CondBranchType);
766 unresolved_branches_.erase(max_reachable_pc);
767 link_pcoffset = link->ImmCondBranch() *
kInstrSize;
768 }
else if (link->IsTestBranch()) {
770 max_reachable_pc += Instruction::ImmBranchRange(TestBranchType) + 1;
771 unresolved_branches_.erase(max_reachable_pc);
772 link_pcoffset = link->ImmTestBranch() *
kInstrSize;
773 }
else if (link->IsUncondBranchImm()) {
774 link_pcoffset = link->ImmUncondBranch() *
kInstrSize;
776 link_pcoffset =
static_cast<int>(link->ImmPCOffset());
779 end_of_chain = (link_pcoffset == 0);
780 link_offset = link_offset + link_pcoffset;
784void Assembler::DeleteUnresolvedBranchInfoForLabel(Label*
label) {
785 if (unresolved_branches_.empty()) {
786 DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
790 if (
label->is_linked()) {
793 DeleteUnresolvedBranchInfoForLabelTraverse(
label);
795 if (unresolved_branches_.empty()) {
796 next_veneer_pool_check_ =
kMaxInt;
798 next_veneer_pool_check_ =
799 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
803bool Assembler::IsConstantPoolAt(Instruction*
instr) {
812 instr->following()->Rn() == kZeroRegCode));
817int Assembler::ConstantPoolSizeAt(Instruction*
instr) {
822 if ((
instr->Mask(ExceptionMask) == HLT) &&
823 (
instr->ImmException() == kImmExceptionIsDebug)) {
824 const char* message =
reinterpret_cast<const char*
>(
825 instr->InstructionAtOffset(kDebugMessageOffset));
830 if ((
instr->Mask(ExceptionMask) == HLT) &&
831 (
instr->ImmException() == kImmExceptionIsPrintf)) {
835 if (IsConstantPoolAt(
instr)) {
836 return instr->ImmLLiteral();
842void Assembler::EmitPoolGuard() {
848void Assembler::StartBlockVeneerPool() { ++veneer_pool_blocked_nesting_; }
850void Assembler::EndBlockVeneerPool() {
851 if (--veneer_pool_blocked_nesting_ == 0) {
853 DCHECK(unresolved_branches_.empty() ||
854 (
pc_offset() < unresolved_branches_first_limit()));
858void Assembler::br(
const Register& xn) {
863void Assembler::blr(
const Register& xn) {
871void Assembler::ret(
const Register& xn) {
876void Assembler::b(
int imm26) { Emit(B | ImmUncondBranch(imm26)); }
878void Assembler::b(Label*
label) {
879 b(LinkAndGetBranchInstructionOffsetTo(
label));
882void Assembler::b(
int imm19, Condition cond) {
883 Emit(B_cond | ImmCondBranch(imm19) | cond);
886void Assembler::b(Label*
label, Condition cond) {
887 b(LinkAndGetBranchInstructionOffsetTo(
label), cond);
890void Assembler::bl(
int imm26) { Emit(BL | ImmUncondBranch(imm26)); }
892void Assembler::bl(Label*
label) {
893 bl(LinkAndGetBranchInstructionOffsetTo(
label));
896void Assembler::cbz(
const Register& rt,
int imm19) {
897 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
900void Assembler::cbz(
const Register& rt, Label*
label) {
901 cbz(rt, LinkAndGetBranchInstructionOffsetTo(
label));
904void Assembler::cbnz(
const Register& rt,
int imm19) {
905 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
908void Assembler::cbnz(
const Register& rt, Label*
label) {
909 cbnz(rt, LinkAndGetBranchInstructionOffsetTo(
label));
912void Assembler::tbz(
const Register& rt,
unsigned bit_pos,
int imm14) {
913 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
914 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
917void Assembler::tbz(
const Register& rt,
unsigned bit_pos, Label*
label) {
918 tbz(rt, bit_pos, LinkAndGetBranchInstructionOffsetTo(
label));
921void Assembler::tbnz(
const Register& rt,
unsigned bit_pos,
int imm14) {
922 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
923 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
926void Assembler::tbnz(
const Register& rt,
unsigned bit_pos, Label*
label) {
927 tbnz(rt, bit_pos, LinkAndGetBranchInstructionOffsetTo(
label));
930void Assembler::adr(
const Register& rd,
int imm21) {
932 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
935void Assembler::adr(
const Register& rd, Label*
label) {
936 adr(rd, LinkAndGetByteOffsetTo(
label));
939void Assembler::nop(NopMarkerTypes n) {
940 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
941 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
944void Assembler::add(
const Register& rd,
const Register& rn,
945 const Operand& operand) {
946 AddSub(rd, rn, operand, LeaveFlags, ADD);
949void Assembler::adds(
const Register& rd,
const Register& rn,
950 const Operand& operand) {
951 AddSub(rd, rn, operand, SetFlags, ADD);
954void Assembler::cmn(
const Register& rn,
const Operand& operand) {
955 Register zr = AppropriateZeroRegFor(rn);
956 adds(zr, rn, operand);
959void Assembler::sub(
const Register& rd,
const Register& rn,
960 const Operand& operand) {
961 AddSub(rd, rn, operand, LeaveFlags, SUB);
964void Assembler::subs(
const Register& rd,
const Register& rn,
965 const Operand& operand) {
966 AddSub(rd, rn, operand, SetFlags, SUB);
969void Assembler::cmp(
const Register& rn,
const Operand& operand) {
970 Register zr = AppropriateZeroRegFor(rn);
971 subs(zr, rn, operand);
974void Assembler::neg(
const Register& rd,
const Operand& operand) {
975 Register zr = AppropriateZeroRegFor(rd);
976 sub(rd, zr, operand);
979void Assembler::negs(
const Register& rd,
const Operand& operand) {
980 Register zr = AppropriateZeroRegFor(rd);
981 subs(rd, zr, operand);
984void Assembler::adc(
const Register& rd,
const Register& rn,
985 const Operand& operand) {
986 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
989void Assembler::adcs(
const Register& rd,
const Register& rn,
990 const Operand& operand) {
991 AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
994void Assembler::sbc(
const Register& rd,
const Register& rn,
995 const Operand& operand) {
996 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
999void Assembler::sbcs(
const Register& rd,
const Register& rn,
1000 const Operand& operand) {
1001 AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
1004void Assembler::ngc(
const Register& rd,
const Operand& operand) {
1005 Register zr = AppropriateZeroRegFor(rd);
1006 sbc(rd, zr, operand);
1009void Assembler::ngcs(
const Register& rd,
const Operand& operand) {
1010 Register zr = AppropriateZeroRegFor(rd);
1011 sbcs(rd, zr, operand);
1015void Assembler::and_(
const Register& rd,
const Register& rn,
1016 const Operand& operand) {
1017 Logical(rd, rn, operand, AND);
1020void Assembler::ands(
const Register& rd,
const Register& rn,
1021 const Operand& operand) {
1022 Logical(rd, rn, operand, ANDS);
1025void Assembler::tst(
const Register& rn,
const Operand& operand) {
1026 ands(AppropriateZeroRegFor(rn), rn, operand);
1029void Assembler::bic(
const Register& rd,
const Register& rn,
1030 const Operand& operand) {
1031 Logical(rd, rn, operand, BIC);
1034void Assembler::bics(
const Register& rd,
const Register& rn,
1035 const Operand& operand) {
1036 Logical(rd, rn, operand, BICS);
1039void Assembler::orr(
const Register& rd,
const Register& rn,
1040 const Operand& operand) {
1041 Logical(rd, rn, operand, ORR);
1044void Assembler::orn(
const Register& rd,
const Register& rn,
1045 const Operand& operand) {
1046 Logical(rd, rn, operand, ORN);
1049void Assembler::eor(
const Register& rd,
const Register& rn,
1050 const Operand& operand) {
1051 Logical(rd, rn, operand, EOR);
1054void Assembler::eon(
const Register& rd,
const Register& rn,
1055 const Operand& operand) {
1056 Logical(rd, rn, operand, EON);
1059void Assembler::lslv(
const Register& rd,
const Register& rn,
1060 const Register& rm) {
1061 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1062 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1063 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1066void Assembler::lsrv(
const Register& rd,
const Register& rn,
1067 const Register& rm) {
1068 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1069 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1070 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1073void Assembler::asrv(
const Register& rd,
const Register& rn,
1074 const Register& rm) {
1075 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1076 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1077 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1080void Assembler::rorv(
const Register& rd,
const Register& rn,
1081 const Register& rm) {
1082 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1083 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1084 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1088void Assembler::bfm(
const Register& rd,
const Register& rn,
int immr,
1090 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1092 Emit(SF(rd) | BFM | N | ImmR(immr, rd.SizeInBits()) |
1093 ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1096void Assembler::sbfm(
const Register& rd,
const Register& rn,
int immr,
1098 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1100 Emit(SF(rd) | SBFM | N | ImmR(immr, rd.SizeInBits()) |
1101 ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1104void Assembler::ubfm(
const Register& rd,
const Register& rn,
int immr,
1106 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1108 Emit(SF(rd) | UBFM | N | ImmR(immr, rd.SizeInBits()) |
1109 ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1112void Assembler::extr(
const Register& rd,
const Register& rn,
const Register& rm,
1114 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1115 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1117 Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.SizeInBits()) | Rn(rn) |
1121void Assembler::csel(
const Register& rd,
const Register& rn,
const Register& rm,
1123 ConditionalSelect(rd, rn, rm, cond, CSEL);
1126void Assembler::csinc(
const Register& rd,
const Register& rn,
1127 const Register& rm, Condition cond) {
1128 ConditionalSelect(rd, rn, rm, cond, CSINC);
1131void Assembler::csinv(
const Register& rd,
const Register& rn,
1132 const Register& rm, Condition cond) {
1133 ConditionalSelect(rd, rn, rm, cond, CSINV);
1136void Assembler::csneg(
const Register& rd,
const Register& rn,
1137 const Register& rm, Condition cond) {
1138 ConditionalSelect(rd, rn, rm, cond, CSNEG);
1141void Assembler::cset(
const Register& rd, Condition cond) {
1142 DCHECK((cond != al) && (cond != nv));
1143 Register zr = AppropriateZeroRegFor(rd);
1147void Assembler::csetm(
const Register& rd, Condition cond) {
1148 DCHECK((cond != al) && (cond != nv));
1149 Register zr = AppropriateZeroRegFor(rd);
1153void Assembler::cinc(
const Register& rd,
const Register& rn, Condition cond) {
1154 DCHECK((cond != al) && (cond != nv));
1158void Assembler::cinv(
const Register& rd,
const Register& rn, Condition cond) {
1159 DCHECK((cond != al) && (cond != nv));
1163void Assembler::cneg(
const Register& rd,
const Register& rn, Condition cond) {
1164 DCHECK((cond != al) && (cond != nv));
1168void Assembler::ConditionalSelect(
const Register& rd,
const Register& rn,
1169 const Register& rm, Condition cond,
1170 ConditionalSelectOp op) {
1171 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1172 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1173 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1176void Assembler::ccmn(
const Register& rn,
const Operand& operand,
1177 StatusFlags nzcv, Condition cond) {
1178 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1181void Assembler::ccmp(
const Register& rn,
const Operand& operand,
1182 StatusFlags nzcv, Condition cond) {
1183 ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1186void Assembler::DataProcessing3Source(
const Register& rd,
const Register& rn,
1187 const Register& rm,
const Register& ra,
1188 DataProcessing3SourceOp op) {
1189 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1192void Assembler::mul(
const Register& rd,
const Register& rn,
1193 const Register& rm) {
1195 Register zr = AppropriateZeroRegFor(rn);
1196 DataProcessing3Source(rd, rn, rm, zr, MADD);
1199void Assembler::madd(
const Register& rd,
const Register& rn,
const Register& rm,
1200 const Register& ra) {
1202 DataProcessing3Source(rd, rn, rm, ra, MADD);
1205void Assembler::mneg(
const Register& rd,
const Register& rn,
1206 const Register& rm) {
1208 Register zr = AppropriateZeroRegFor(rn);
1209 DataProcessing3Source(rd, rn, rm, zr, MSUB);
1212void Assembler::msub(
const Register& rd,
const Register& rn,
const Register& rm,
1213 const Register& ra) {
1215 DataProcessing3Source(rd, rn, rm, ra, MSUB);
1218void Assembler::smaddl(
const Register& rd,
const Register& rn,
1219 const Register& rm,
const Register& ra) {
1220 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1221 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1222 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1225void Assembler::smsubl(
const Register& rd,
const Register& rn,
1226 const Register& rm,
const Register& ra) {
1227 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1228 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1229 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1232void Assembler::umaddl(
const Register& rd,
const Register& rn,
1233 const Register& rm,
const Register& ra) {
1234 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1235 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1236 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1239void Assembler::umsubl(
const Register& rd,
const Register& rn,
1240 const Register& rm,
const Register& ra) {
1241 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1242 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1243 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1246void Assembler::smull(
const Register& rd,
const Register& rn,
1247 const Register& rm) {
1249 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1250 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1253void Assembler::smulh(
const Register& rd,
const Register& rn,
1254 const Register& rm) {
1258 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1261void Assembler::umulh(
const Register& rd,
const Register& rn,
1262 const Register& rm) {
1266 DataProcessing3Source(rd, rn, rm, xzr, UMULH_x);
1269void Assembler::sdiv(
const Register& rd,
const Register& rn,
1270 const Register& rm) {
1271 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1272 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1273 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1276void Assembler::udiv(
const Register& rd,
const Register& rn,
1277 const Register& rm) {
1278 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1279 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1280 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1283void Assembler::rbit(
const Register& rd,
const Register& rn) {
1284 DataProcessing1Source(rd, rn, RBIT);
1287void Assembler::rev16(
const Register& rd,
const Register& rn) {
1288 DataProcessing1Source(rd, rn, REV16);
1291void Assembler::rev32(
const Register& rd,
const Register& rn) {
1293 DataProcessing1Source(rd, rn, REV);
1296void Assembler::rev(
const Register& rd,
const Register& rn) {
1297 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1300void Assembler::clz(
const Register& rd,
const Register& rn) {
1301 DataProcessing1Source(rd, rn, CLZ);
1304void Assembler::cls(
const Register& rd,
const Register& rn) {
1305 DataProcessing1Source(rd, rn, CLS);
1308void Assembler::pacib1716() { Emit(PACIB1716); }
1309void Assembler::autib1716() { Emit(AUTIB1716); }
1310void Assembler::pacibsp() { Emit(PACIBSP); }
1311void Assembler::autibsp() { Emit(AUTIBSP); }
1313void Assembler::bti(BranchTargetIdentifier
id) {
1316 case BranchTargetIdentifier::kBti:
1319 case BranchTargetIdentifier::kBtiCall:
1322 case BranchTargetIdentifier::kBtiJump:
1325 case BranchTargetIdentifier::kBtiJumpCall:
1328 case BranchTargetIdentifier::kNone:
1329 case BranchTargetIdentifier::kPacibsp:
1337void Assembler::ldp(
const CPURegister& rt,
const CPURegister& rt2,
1338 const MemOperand& src) {
1339 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1342void Assembler::stp(
const CPURegister& rt,
const CPURegister& rt2,
1343 const MemOperand& dst) {
1344 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1346#if defined(V8_OS_WIN)
1347 if (xdata_encoder_ && rt == x29 && rt2 == lr && dst.base().IsSP()) {
1348 xdata_encoder_->onSaveFpLr();
1353void Assembler::ldpsw(
const Register& rt,
const Register& rt2,
1354 const MemOperand& src) {
1356 LoadStorePair(rt, rt2, src, LDPSW_x);
1359void Assembler::LoadStorePair(
const CPURegister& rt,
const CPURegister& rt2,
1360 const MemOperand& addr, LoadStorePairOp op) {
1362 DCHECK(((op & LoadStorePairLBit) == 0) || rt != rt2);
1365 int offset =
static_cast<int>(addr.offset());
1367 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1371 if (addr.IsImmediateOffset()) {
1378 if (addr.IsPreIndex()) {
1381 DCHECK(addr.IsPostIndex());
1385 Emit(addrmodeop | memop);
1389void Assembler::ldrb(
const Register& rt,
const MemOperand& src) {
1390 LoadStore(rt, src, LDRB_w);
1393void Assembler::strb(
const Register& rt,
const MemOperand& dst) {
1394 LoadStore(rt, dst, STRB_w);
1397void Assembler::ldrsb(
const Register& rt,
const MemOperand& src) {
1398 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1401void Assembler::ldrh(
const Register& rt,
const MemOperand& src) {
1402 LoadStore(rt, src, LDRH_w);
1405void Assembler::strh(
const Register& rt,
const MemOperand& dst) {
1406 LoadStore(rt, dst, STRH_w);
1409void Assembler::ldrsh(
const Register& rt,
const MemOperand& src) {
1410 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1413void Assembler::ldr(
const CPURegister& rt,
const MemOperand& src) {
1414 LoadStore(rt, src, LoadOpFor(rt));
1417void Assembler::str(
const CPURegister& rt,
const MemOperand& src) {
1418 LoadStore(rt, src, StoreOpFor(rt));
1421void Assembler::ldrsw(
const Register& rt,
const MemOperand& src) {
1423 LoadStore(rt, src, LDRSW_x);
1426void Assembler::ldr_pcrel(
const CPURegister& rt,
int imm19) {
1430 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1433Operand Operand::EmbeddedNumber(
double number) {
1436 return Operand(Immediate(Smi::FromInt(smi)));
1438 return EmbeddedHeapNumber(number);
1441Operand Operand::EmbeddedHeapNumber(
double number) {
1442 Operand
result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
1443 result.heap_number_request_.emplace(number);
1448void Assembler::ldr(
const CPURegister& rt,
const Operand& operand) {
1449 if (operand.IsHeapNumberRequest()) {
1450 BlockPoolsScope no_pool_before_ldr_of_heap_number_request(
this);
1451 RequestHeapNumber(operand.heap_number_request());
1452 ldr(rt, operand.immediate_for_heap_number_request());
1454 ldr(rt, operand.immediate());
1458void Assembler::ldr(
const CPURegister& rt,
const Immediate& imm) {
1459 BlockPoolsScope no_pool_before_ldr_pcrel_instr(
this);
1460 RecordRelocInfo(imm.rmode(), imm.value());
1466void Assembler::ldar(
const Register& rt,
const Register& rn) {
1469 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1472void Assembler::ldaxr(
const Register& rt,
const Register& rn) {
1475 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1478void Assembler::stlr(
const Register& rt,
const Register& rn) {
1481 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1484void Assembler::stlxr(
const Register& rs,
const Register& rt,
1485 const Register& rn) {
1487 DCHECK(rs != rt && rs != rn);
1489 Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1492void Assembler::ldarb(
const Register& rt,
const Register& rn) {
1495 Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1498void Assembler::ldaxrb(
const Register& rt,
const Register& rn) {
1501 Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1504void Assembler::stlrb(
const Register& rt,
const Register& rn) {
1507 Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1510void Assembler::stlxrb(
const Register& rs,
const Register& rt,
1511 const Register& rn) {
1515 DCHECK(rs != rt && rs != rn);
1516 Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1519void Assembler::ldarh(
const Register& rt,
const Register& rn) {
1522 Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1525void Assembler::ldaxrh(
const Register& rt,
const Register& rn) {
1528 Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1531void Assembler::stlrh(
const Register& rt,
const Register& rn) {
1534 Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1537void Assembler::stlxrh(
const Register& rs,
const Register& rt,
1538 const Register& rn) {
1542 DCHECK(rs != rt && rs != rn);
1543 Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1546#define COMPARE_AND_SWAP_W_X_LIST(V) \
1552#define DEFINE_ASM_FUNC(FN, OP) \
1553 void Assembler::FN(const Register& rs, const Register& rt, \
1554 const MemOperand& src) { \
1555 DCHECK(IsEnabled(LSE)); \
1556 DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
1557 LoadStoreAcquireReleaseOp op = rt.Is64Bits() ? OP##_x : OP##_w; \
1558 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
1560COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC)
1561#undef DEFINE_ASM_FUNC
1563#define COMPARE_AND_SWAP_W_LIST(V) \
1573#define DEFINE_ASM_FUNC(FN, OP) \
1574 void Assembler::FN(const Register& rs, const Register& rt, \
1575 const MemOperand& src) { \
1576 DCHECK(IsEnabled(LSE)); \
1577 DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
1578 Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
1580COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC)
1581#undef DEFINE_ASM_FUNC
1583#define COMPARE_AND_SWAP_PAIR_LIST(V) \
1589#define DEFINE_ASM_FUNC(FN, OP) \
1590 void Assembler::FN(const Register& rs, const Register& rs1, \
1591 const Register& rt, const Register& rt1, \
1592 const MemOperand& src) { \
1593 DCHECK(IsEnabled(LSE)); \
1594 DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
1595 DCHECK(AreEven(rs, rt)); \
1596 DCHECK(AreConsecutive(rs, rs1)); \
1597 DCHECK(AreConsecutive(rt, rt1)); \
1598 DCHECK(AreSameFormat(rs, rs1, rt, rt1)); \
1599 LoadStoreAcquireReleaseOp op = rt.Is64Bits() ? OP##_x : OP##_w; \
1600 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.base())); \
1602COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC)
1603#undef DEFINE_ASM_FUNC
1609#define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \
1610 V(DEF, add, LDADD) \
1611 V(DEF, clr, LDCLR) \
1612 V(DEF, eor, LDEOR) \
1613 V(DEF, set, LDSET) \
1614 V(DEF, smax, LDSMAX) \
1615 V(DEF, smin, LDSMIN) \
1616 V(DEF, umax, LDUMAX) \
1617 V(DEF, umin, LDUMIN)
1619#define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
1620 V(NAME, OP##_x, OP##_w) \
1621 V(NAME##l, OP##L_x, OP##L_w) \
1622 V(NAME##b, OP##B, OP##B) \
1623 V(NAME##lb, OP##LB, OP##LB) \
1624 V(NAME##h, OP##H, OP##H) \
1625 V(NAME##lh, OP##LH, OP##LH)
1627#define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \
1628 ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
1629 V(NAME##a, OP##A_x, OP##A_w) \
1630 V(NAME##al, OP##AL_x, OP##AL_w) \
1631 V(NAME##ab, OP##AB, OP##AB) \
1632 V(NAME##alb, OP##ALB, OP##ALB) \
1633 V(NAME##ah, OP##AH, OP##AH) \
1634 V(NAME##alh, OP##ALH, OP##ALH)
1636#define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \
1637 void Assembler::ld##FN(const Register& rs, const Register& rt, \
1638 const MemOperand& src) { \
1639 DCHECK(IsEnabled(LSE)); \
1640 DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
1641 AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
1642 Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
1644#define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \
1645 void Assembler::st##FN(const Register& rs, const MemOperand& src) { \
1646 DCHECK(IsEnabled(LSE)); \
1647 ld##FN(rs, AppropriateZeroRegFor(rs), src); \
1650ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES,
1651 DEFINE_ASM_LOAD_FUNC)
1652ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES,
1653 DEFINE_ASM_STORE_FUNC)
1655#define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \
1656 void Assembler::FN(const Register& rs, const Register& rt, \
1657 const MemOperand& src) { \
1658 DCHECK(IsEnabled(LSE)); \
1659 DCHECK(src.IsImmediateOffset() && (src.offset() == 0)); \
1660 AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \
1661 Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base())); \
1664ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP)
1666#undef DEFINE_ASM_LOAD_FUNC
1667#undef DEFINE_ASM_STORE_FUNC
1668#undef DEFINE_ASM_SWP_FUNC
1670void Assembler::sdot(
const VRegister& vd,
const VRegister& vn,
1671 const VRegister& vm) {
1672 DCHECK(IsEnabled(DOTPROD));
1673 DCHECK((vn.Is16B() && vd.Is4S()) || (vn.Is8B() && vd.Is2S()));
1675 Emit(VFormat(vd) | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd));
1678void Assembler::NEON3DifferentL(
const VRegister& vd,
const VRegister& vn,
1679 const VRegister& vm, NEON3DifferentOp vop) {
1681 DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
1682 (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1683 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1684 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1685 Instr format, op = vop;
1686 if (vd.IsScalar()) {
1688 format = SFormat(vn);
1690 format = VFormat(vn);
1692 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
1695void Assembler::NEON3DifferentW(
const VRegister& vd,
const VRegister& vn,
1696 const VRegister& vm, NEON3DifferentOp vop) {
1698 DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
1699 (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
1700 (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
1701 Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1704void Assembler::NEON3DifferentHN(
const VRegister& vd,
const VRegister& vn,
1705 const VRegister& vm, NEON3DifferentOp vop) {
1707 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1708 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1709 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1710 Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1713#define NEON_3DIFF_LONG_LIST(V) \
1714 V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
1715 V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
1716 V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
1717 V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
1718 V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
1719 V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
1720 V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
1721 V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
1722 V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
1723 V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
1724 V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
1725 V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
1726 V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
1727 V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
1728 V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
1729 V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
1730 V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
1731 V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
1732 V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
1733 V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
1734 V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
1735 V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
1736 V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
1737 V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
1738 V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
1739 V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
1740 V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
1741 V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
1742 V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1743 V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1744 V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1745 V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1746 V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1747 V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S())
1749#define DEFINE_ASM_FUNC(FN, OP, AS) \
1750 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
1751 const VRegister& vm) { \
1753 NEON3DifferentL(vd, vn, vm, OP); \
1755NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
1756#undef DEFINE_ASM_FUNC
1758#define NEON_3DIFF_HN_LIST(V) \
1759 V(addhn, NEON_ADDHN, vd.IsD()) \
1760 V(addhn2, NEON_ADDHN2, vd.IsQ()) \
1761 V(raddhn, NEON_RADDHN, vd.IsD()) \
1762 V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
1763 V(subhn, NEON_SUBHN, vd.IsD()) \
1764 V(subhn2, NEON_SUBHN2, vd.IsQ()) \
1765 V(rsubhn, NEON_RSUBHN, vd.IsD()) \
1766 V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
1768#define DEFINE_ASM_FUNC(FN, OP, AS) \
1769 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
1770 const VRegister& vm) { \
1772 NEON3DifferentHN(vd, vn, vm, OP); \
1774NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
1775#undef DEFINE_ASM_FUNC
1777void Assembler::NEONPerm(
const VRegister& vd,
const VRegister& vn,
1778 const VRegister& vm, NEONPermOp op) {
1781 Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
1784void Assembler::trn1(
const VRegister& vd,
const VRegister& vn,
1785 const VRegister& vm) {
1786 NEONPerm(vd, vn, vm, NEON_TRN1);
1789void Assembler::trn2(
const VRegister& vd,
const VRegister& vn,
1790 const VRegister& vm) {
1791 NEONPerm(vd, vn, vm, NEON_TRN2);
1794void Assembler::uzp1(
const VRegister& vd,
const VRegister& vn,
1795 const VRegister& vm) {
1796 NEONPerm(vd, vn, vm, NEON_UZP1);
1799void Assembler::uzp2(
const VRegister& vd,
const VRegister& vn,
1800 const VRegister& vm) {
1801 NEONPerm(vd, vn, vm, NEON_UZP2);
1804void Assembler::zip1(
const VRegister& vd,
const VRegister& vn,
1805 const VRegister& vm) {
1806 NEONPerm(vd, vn, vm, NEON_ZIP1);
1809void Assembler::zip2(
const VRegister& vd,
const VRegister& vn,
1810 const VRegister& vm) {
1811 NEONPerm(vd, vn, vm, NEON_ZIP2);
1814void Assembler::NEONShiftImmediate(
const VRegister& vd,
const VRegister& vn,
1815 NEONShiftImmediateOp op,
int immh_immb) {
1818 if (vn.IsScalar()) {
1822 q = vd.IsD() ? 0 :
NEON_Q;
1825 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
1828void Assembler::NEONShiftLeftImmediate(
const VRegister& vd,
const VRegister& vn,
1829 int shift, NEONShiftImmediateOp op) {
1830 int laneSizeInBits = vn.LaneSizeInBits();
1831 DCHECK((shift >= 0) && (shift < laneSizeInBits));
1832 NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
1835void Assembler::NEONShiftRightImmediate(
const VRegister& vd,
1836 const VRegister& vn,
int shift,
1837 NEONShiftImmediateOp op) {
1838 int laneSizeInBits = vn.LaneSizeInBits();
1839 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
1840 NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
1843void Assembler::NEONShiftImmediateL(
const VRegister& vd,
const VRegister& vn,
1844 int shift, NEONShiftImmediateOp op) {
1845 int laneSizeInBits = vn.LaneSizeInBits();
1846 DCHECK((shift >= 0) && (shift < laneSizeInBits));
1847 int immh_immb = (laneSizeInBits + shift) << 16;
1849 DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1850 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1851 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1853 q = vn.IsD() ? 0 :
NEON_Q;
1854 Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
1857void Assembler::NEONShiftImmediateN(
const VRegister& vd,
const VRegister& vn,
1858 int shift, NEONShiftImmediateOp op) {
1860 int laneSizeInBits = vd.LaneSizeInBits();
1861 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
1862 int immh_immb = (2 * laneSizeInBits - shift) << 16;
1864 if (vn.IsScalar()) {
1865 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
1866 (vd.Is1S() && vn.Is1D()));
1870 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1871 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1872 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1874 q = vd.IsD() ? 0 :
NEON_Q;
1876 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
1879void Assembler::shl(
const VRegister& vd,
const VRegister& vn,
int shift) {
1880 DCHECK(vd.IsVector() || vd.Is1D());
1881 NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
1884void Assembler::sli(
const VRegister& vd,
const VRegister& vn,
int shift) {
1885 DCHECK(vd.IsVector() || vd.Is1D());
1886 NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
1889void Assembler::sqshl(
const VRegister& vd,
const VRegister& vn,
int shift) {
1890 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
1893void Assembler::sqshlu(
const VRegister& vd,
const VRegister& vn,
int shift) {
1894 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
1897void Assembler::uqshl(
const VRegister& vd,
const VRegister& vn,
int shift) {
1898 NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
1901void Assembler::sshll(
const VRegister& vd,
const VRegister& vn,
int shift) {
1903 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
1906void Assembler::sshll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
1908 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
1911void Assembler::sxtl(
const VRegister& vd,
const VRegister& vn) {
1915void Assembler::sxtl2(
const VRegister& vd,
const VRegister& vn) {
1919void Assembler::ushll(
const VRegister& vd,
const VRegister& vn,
int shift) {
1921 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
1924void Assembler::ushll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
1926 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
1929void Assembler::uxtl(
const VRegister& vd,
const VRegister& vn) {
1933void Assembler::uxtl2(
const VRegister& vd,
const VRegister& vn) {
1937void Assembler::sri(
const VRegister& vd,
const VRegister& vn,
int shift) {
1938 DCHECK(vd.IsVector() || vd.Is1D());
1939 NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
1942void Assembler::sshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
1943 DCHECK(vd.IsVector() || vd.Is1D());
1944 NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
1947void Assembler::ushr(
const VRegister& vd,
const VRegister& vn,
int shift) {
1948 DCHECK(vd.IsVector() || vd.Is1D());
1949 NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
1952void Assembler::srshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
1953 DCHECK(vd.IsVector() || vd.Is1D());
1954 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
1957void Assembler::urshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
1958 DCHECK(vd.IsVector() || vd.Is1D());
1959 NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
1962void Assembler::ssra(
const VRegister& vd,
const VRegister& vn,
int shift) {
1963 DCHECK(vd.IsVector() || vd.Is1D());
1964 NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
1967void Assembler::usra(
const VRegister& vd,
const VRegister& vn,
int shift) {
1968 DCHECK(vd.IsVector() || vd.Is1D());
1969 NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
1972void Assembler::srsra(
const VRegister& vd,
const VRegister& vn,
int shift) {
1973 DCHECK(vd.IsVector() || vd.Is1D());
1974 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
1977void Assembler::ursra(
const VRegister& vd,
const VRegister& vn,
int shift) {
1978 DCHECK(vd.IsVector() || vd.Is1D());
1979 NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
1982void Assembler::shrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
1983 DCHECK(vn.IsVector() && vd.IsD());
1984 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
1987void Assembler::shrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
1988 DCHECK(vn.IsVector() && vd.IsQ());
1989 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
1992void Assembler::rshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
1993 DCHECK(vn.IsVector() && vd.IsD());
1994 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
1997void Assembler::rshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
1998 DCHECK(vn.IsVector() && vd.IsQ());
1999 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
2002void Assembler::sqshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2003 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2004 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2007void Assembler::sqshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2008 DCHECK(vn.IsVector() && vd.IsQ());
2009 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2012void Assembler::sqrshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2013 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2014 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2017void Assembler::sqrshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2018 DCHECK(vn.IsVector() && vd.IsQ());
2019 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2022void Assembler::sqshrun(
const VRegister& vd,
const VRegister& vn,
int shift) {
2023 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2024 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2027void Assembler::sqshrun2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2028 DCHECK(vn.IsVector() && vd.IsQ());
2029 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2032void Assembler::sqrshrun(
const VRegister& vd,
const VRegister& vn,
int shift) {
2033 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2034 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2037void Assembler::sqrshrun2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2038 DCHECK(vn.IsVector() && vd.IsQ());
2039 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2042void Assembler::uqshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2043 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2044 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2047void Assembler::uqshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2048 DCHECK(vn.IsVector() && vd.IsQ());
2049 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2052void Assembler::uqrshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2053 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2054 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2057void Assembler::uqrshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2058 DCHECK(vn.IsVector() && vd.IsQ());
2059 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2062void Assembler::uaddw(
const VRegister& vd,
const VRegister& vn,
2063 const VRegister& vm) {
2065 NEON3DifferentW(vd, vn, vm, NEON_UADDW);
2068void Assembler::uaddw2(
const VRegister& vd,
const VRegister& vn,
2069 const VRegister& vm) {
2071 NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
2074void Assembler::saddw(
const VRegister& vd,
const VRegister& vn,
2075 const VRegister& vm) {
2077 NEON3DifferentW(vd, vn, vm, NEON_SADDW);
2080void Assembler::saddw2(
const VRegister& vd,
const VRegister& vn,
2081 const VRegister& vm) {
2083 NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
2086void Assembler::usubw(
const VRegister& vd,
const VRegister& vn,
2087 const VRegister& vm) {
2089 NEON3DifferentW(vd, vn, vm, NEON_USUBW);
2092void Assembler::usubw2(
const VRegister& vd,
const VRegister& vn,
2093 const VRegister& vm) {
2095 NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
2098void Assembler::ssubw(
const VRegister& vd,
const VRegister& vn,
2099 const VRegister& vm) {
2101 NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
2104void Assembler::ssubw2(
const VRegister& vd,
const VRegister& vn,
2105 const VRegister& vm) {
2107 NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
2110void Assembler::mov(
const Register& rd,
const Register& rm) {
2114 if (rd.IsSP() || rm.IsSP()) {
2117 orr(rd, AppropriateZeroRegFor(rd), rm);
2121void Assembler::ins(
const VRegister& vd,
int vd_index,
const Register& rn) {
2124 int lane_size = vd.LaneSizeInBytes();
2126 switch (lane_size) {
2146 DCHECK((0 <= vd_index) &&
2148 Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
2151void Assembler::mov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2153 umov(rd, vn, vn_index);
2156void Assembler::smov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2159 int lane_size = vn.LaneSizeInBytes();
2162 switch (lane_size) {
2175 q = rd.IsW() ? 0 :
NEON_Q;
2176 DCHECK((0 <= vn_index) &&
2178 Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2181void Assembler::cls(
const VRegister& vd,
const VRegister& vn) {
2183 DCHECK(!vd.Is1D() && !vd.Is2D());
2184 Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
2187void Assembler::clz(
const VRegister& vd,
const VRegister& vn) {
2189 DCHECK(!vd.Is1D() && !vd.Is2D());
2190 Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
2193void Assembler::cnt(
const VRegister& vd,
const VRegister& vn) {
2195 DCHECK(vd.Is8B() || vd.Is16B());
2196 Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
2199void Assembler::rev16(
const VRegister& vd,
const VRegister& vn) {
2201 DCHECK(vd.Is8B() || vd.Is16B());
2202 Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
2205void Assembler::rev32(
const VRegister& vd,
const VRegister& vn) {
2207 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
2208 Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
2211void Assembler::rev64(
const VRegister& vd,
const VRegister& vn) {
2213 DCHECK(!vd.Is1D() && !vd.Is2D());
2214 Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
2217void Assembler::ursqrte(
const VRegister& vd,
const VRegister& vn) {
2219 DCHECK(vd.Is2S() || vd.Is4S());
2220 Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
2223void Assembler::urecpe(
const VRegister& vd,
const VRegister& vn) {
2225 DCHECK(vd.Is2S() || vd.Is4S());
2226 Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
2229void Assembler::NEONAddlp(
const VRegister& vd,
const VRegister& vn,
2230 NEON2RegMiscOp op) {
2231 DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) ||
2232 (op == NEON_UADALP));
2234 DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
2235 (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
2236 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
2237 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2240void Assembler::saddlp(
const VRegister& vd,
const VRegister& vn) {
2241 NEONAddlp(vd, vn, NEON_SADDLP);
2244void Assembler::uaddlp(
const VRegister& vd,
const VRegister& vn) {
2245 NEONAddlp(vd, vn, NEON_UADDLP);
2248void Assembler::sadalp(
const VRegister& vd,
const VRegister& vn) {
2249 NEONAddlp(vd, vn, NEON_SADALP);
2252void Assembler::uadalp(
const VRegister& vd,
const VRegister& vn) {
2253 NEONAddlp(vd, vn, NEON_UADALP);
2256void Assembler::NEONAcrossLanesL(
const VRegister& vd,
const VRegister& vn,
2257 NEONAcrossLanesOp op) {
2258 DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
2259 (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
2260 (vn.Is4S() && vd.Is1D()));
2261 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2264void Assembler::saddlv(
const VRegister& vd,
const VRegister& vn) {
2265 NEONAcrossLanesL(vd, vn, NEON_SADDLV);
2268void Assembler::uaddlv(
const VRegister& vd,
const VRegister& vn) {
2269 NEONAcrossLanesL(vd, vn, NEON_UADDLV);
2272void Assembler::NEONAcrossLanes(
const VRegister& vd,
const VRegister& vn,
2273 NEONAcrossLanesOp op) {
2274 DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
2275 (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
2276 (vn.Is4S() && vd.Is1S()));
2277 if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
2278 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2280 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2284#define NEON_ACROSSLANES_LIST(V) \
2285 V(fmaxv, NEON_FMAXV, vd.Is1S()) \
2286 V(fminv, NEON_FMINV, vd.Is1S()) \
2287 V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
2288 V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
2289 V(addv, NEON_ADDV, true) \
2290 V(smaxv, NEON_SMAXV, true) \
2291 V(sminv, NEON_SMINV, true) \
2292 V(umaxv, NEON_UMAXV, true) \
2293 V(uminv, NEON_UMINV, true)
2295#define DEFINE_ASM_FUNC(FN, OP, AS) \
2296 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2298 NEONAcrossLanes(vd, vn, OP); \
2300NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
2301#undef DEFINE_ASM_FUNC
2303void Assembler::mov(
const VRegister& vd,
int vd_index,
const Register& rn) {
2304 ins(vd, vd_index, rn);
2307void Assembler::umov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2310 int lane_size = vn.LaneSizeInBytes();
2313 switch (lane_size) {
2334 DCHECK((0 <= vn_index) &&
2336 Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2339void Assembler::mov(
const VRegister& vd,
const VRegister& vn,
int vn_index) {
2341 dup(vd, vn, vn_index);
2344void Assembler::dup(
const VRegister& vd,
const Register& rn) {
2348 Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
2351void Assembler::ins(
const VRegister& vd,
int vd_index,
const VRegister& vn,
2356 int lane_size = vd.LaneSizeInBytes();
2358 switch (lane_size) {
2374 DCHECK((0 <= vd_index) &&
2376 DCHECK((0 <= vn_index) &&
2378 Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
2379 ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
2382void Assembler::NEONTable(
const VRegister& vd,
const VRegister& vn,
2383 const VRegister& vm, NEONTableOp op) {
2384 DCHECK(vd.Is16B() || vd.Is8B());
2387 Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
2390void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2391 const VRegister& vm) {
2392 NEONTable(vd, vn, vm, NEON_TBL_1v);
2395void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2396 const VRegister& vn2,
const VRegister& vm) {
2400 NEONTable(vd, vn, vm, NEON_TBL_2v);
2403void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2404 const VRegister& vn2,
const VRegister& vn3,
2405 const VRegister& vm) {
2410 NEONTable(vd, vn, vm, NEON_TBL_3v);
2413void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2414 const VRegister& vn2,
const VRegister& vn3,
2415 const VRegister& vn4,
const VRegister& vm) {
2421 NEONTable(vd, vn, vm, NEON_TBL_4v);
2424void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2425 const VRegister& vm) {
2426 NEONTable(vd, vn, vm, NEON_TBX_1v);
2429void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2430 const VRegister& vn2,
const VRegister& vm) {
2434 NEONTable(vd, vn, vm, NEON_TBX_2v);
2437void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2438 const VRegister& vn2,
const VRegister& vn3,
2439 const VRegister& vm) {
2444 NEONTable(vd, vn, vm, NEON_TBX_3v);
2447void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2448 const VRegister& vn2,
const VRegister& vn3,
2449 const VRegister& vn4,
const VRegister& vm) {
2455 NEONTable(vd, vn, vm, NEON_TBX_4v);
2458void Assembler::mov(
const VRegister& vd,
int vd_index,
const VRegister& vn,
2460 ins(vd, vd_index, vn, vn_index);
2463void Assembler::mvn(
const Register& rd,
const Operand& operand) {
2464 orn(rd, AppropriateZeroRegFor(rd), operand);
2467void Assembler::mrs(
const Register& rt, SystemRegister sysreg) {
2469 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
2472void Assembler::msr(SystemRegister sysreg,
const Register& rt) {
2474 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
2477void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }
2480Instr Assembler::LoadStoreStructAddrModeField(
const MemOperand& addr) {
2481 Instr addr_field = RnSP(addr.base());
2483 if (addr.IsPostIndex()) {
2487 "Opcodes must match for NEON post index memop.");
2490 if (addr.offset() == 0) {
2491 addr_field |= RmNot31(addr.regoffset());
2495 addr_field |= (0x1F << Rm_offset);
2498 DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
2503void Assembler::LoadStoreStructVerify(
const VRegister& vt,
2504 const MemOperand& addr, Instr op) {
2509 if (addr.IsImmediateOffset()) {
2512 int offset = vt.SizeInBytes();
2562 DCHECK(addr.regoffset() != NoReg || addr.offset() ==
offset);
2571void Assembler::LoadStoreStruct(
const VRegister& vt,
const MemOperand& addr,
2572 NEONLoadStoreMultiStructOp op) {
2573 LoadStoreStructVerify(vt, addr, op);
2574 DCHECK(vt.IsVector() || vt.Is1D());
2575 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2578void Assembler::LoadStoreStructSingleAllLanes(
const VRegister& vt,
2579 const MemOperand& addr,
2580 NEONLoadStoreSingleStructOp op) {
2581 LoadStoreStructVerify(vt, addr, op);
2582 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2585void Assembler::ld1(
const VRegister& vt,
const MemOperand& src) {
2586 LoadStoreStruct(vt, src, NEON_LD1_1v);
2589void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2590 const MemOperand& src) {
2594 LoadStoreStruct(vt, src, NEON_LD1_2v);
2597void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2598 const VRegister& vt3,
const MemOperand& src) {
2603 LoadStoreStruct(vt, src, NEON_LD1_3v);
2606void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2607 const VRegister& vt3,
const VRegister& vt4,
2608 const MemOperand& src) {
2614 LoadStoreStruct(vt, src, NEON_LD1_4v);
2617void Assembler::ld2(
const VRegister& vt,
const VRegister& vt2,
2618 const MemOperand& src) {
2622 LoadStoreStruct(vt, src, NEON_LD2);
2625void Assembler::ld2(
const VRegister& vt,
const VRegister& vt2,
int lane,
2626 const MemOperand& src) {
2630 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
2633void Assembler::ld2r(
const VRegister& vt,
const VRegister& vt2,
2634 const MemOperand& src) {
2638 LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
2641void Assembler::ld3(
const VRegister& vt,
const VRegister& vt2,
2642 const VRegister& vt3,
const MemOperand& src) {
2647 LoadStoreStruct(vt, src, NEON_LD3);
2650void Assembler::ld3(
const VRegister& vt,
const VRegister& vt2,
2651 const VRegister& vt3,
int lane,
const MemOperand& src) {
2656 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
2659void Assembler::ld3r(
const VRegister& vt,
const VRegister& vt2,
2660 const VRegister& vt3,
const MemOperand& src) {
2665 LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
2668void Assembler::ld4(
const VRegister& vt,
const VRegister& vt2,
2669 const VRegister& vt3,
const VRegister& vt4,
2670 const MemOperand& src) {
2676 LoadStoreStruct(vt, src, NEON_LD4);
2679void Assembler::ld4(
const VRegister& vt,
const VRegister& vt2,
2680 const VRegister& vt3,
const VRegister& vt4,
int lane,
2681 const MemOperand& src) {
2687 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
2690void Assembler::ld4r(
const VRegister& vt,
const VRegister& vt2,
2691 const VRegister& vt3,
const VRegister& vt4,
2692 const MemOperand& src) {
2698 LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
2701void Assembler::st1(
const VRegister& vt,
const MemOperand& src) {
2702 LoadStoreStruct(vt, src, NEON_ST1_1v);
2705void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2706 const MemOperand& src) {
2710 LoadStoreStruct(vt, src, NEON_ST1_2v);
2713void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2714 const VRegister& vt3,
const MemOperand& src) {
2719 LoadStoreStruct(vt, src, NEON_ST1_3v);
2722void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2723 const VRegister& vt3,
const VRegister& vt4,
2724 const MemOperand& src) {
2730 LoadStoreStruct(vt, src, NEON_ST1_4v);
2733void Assembler::st2(
const VRegister& vt,
const VRegister& vt2,
2734 const MemOperand& dst) {
2738 LoadStoreStruct(vt, dst, NEON_ST2);
2741void Assembler::st2(
const VRegister& vt,
const VRegister& vt2,
int lane,
2742 const MemOperand& dst) {
2746 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
2749void Assembler::st3(
const VRegister& vt,
const VRegister& vt2,
2750 const VRegister& vt3,
const MemOperand& dst) {
2755 LoadStoreStruct(vt, dst, NEON_ST3);
2758void Assembler::st3(
const VRegister& vt,
const VRegister& vt2,
2759 const VRegister& vt3,
int lane,
const MemOperand& dst) {
2764 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
2767void Assembler::st4(
const VRegister& vt,
const VRegister& vt2,
2768 const VRegister& vt3,
const VRegister& vt4,
2769 const MemOperand& dst) {
2775 LoadStoreStruct(vt, dst, NEON_ST4);
2778void Assembler::st4(
const VRegister& vt,
const VRegister& vt2,
2779 const VRegister& vt3,
const VRegister& vt4,
int lane,
2780 const MemOperand& dst) {
2786 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
2789void Assembler::LoadStoreStructSingle(
const VRegister& vt, uint32_t lane,
2790 const MemOperand& addr,
2791 NEONLoadStoreSingleStructOp op) {
2792 LoadStoreStructVerify(vt, addr, op);
2796 unsigned lane_size = vt.LaneSizeInBytes();
2806 if (lane_size == 8) lane++;
2808 Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
2809 Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
2810 Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
2813 switch (lane_size) {
2828 Emit(
instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
2831void Assembler::ld1(
const VRegister& vt,
int lane,
const MemOperand& src) {
2832 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
2835void Assembler::ld1r(
const VRegister& vt,
const MemOperand& src) {
2836 LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
2839void Assembler::st1(
const VRegister& vt,
int lane,
const MemOperand& dst) {
2840 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
2843void Assembler::dmb(BarrierDomain domain, BarrierType type) {
2844 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
2847void Assembler::dsb(BarrierDomain domain, BarrierType type) {
2848 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
2851void Assembler::isb() {
2852 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
2855void Assembler::csdb() { hint(CSDB); }
2857void Assembler::fmov(
const VRegister& vd,
double imm) {
2858 if (vd.IsScalar()) {
2860 Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm));
2864 Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
2868void Assembler::fmov(
const VRegister& vd,
float imm) {
2869 if (vd.IsScalar()) {
2871 Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm));
2873 DCHECK(vd.Is2S() || vd.Is4S());
2876 Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
2880void Assembler::fmov(
const Register& rd,
const VRegister&
fn) {
2883 Emit(op | Rd(rd) | Rn(
fn));
2886void Assembler::fmov(
const VRegister& vd,
const Register& rn) {
2887 DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits());
2889 Emit(op | Rd(vd) | Rn(rn));
2892void Assembler::fmov(
const VRegister& vd,
const VRegister& vn) {
2893 DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits());
2894 Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
2897void Assembler::fmov(
const VRegister& vd,
int index,
const Register& rn) {
2898 DCHECK((index == 1) && vd.Is1D() && rn.IsX());
2900 Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
2903void Assembler::fmov(
const Register& rd,
const VRegister& vn,
int index) {
2904 DCHECK((index == 1) && vn.Is1D() && rd.IsX());
2906 Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
2909void Assembler::fmadd(
const VRegister& fd,
const VRegister&
fn,
2910 const VRegister& fm,
const VRegister& fa) {
2911 FPDataProcessing3Source(fd,
fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
2914void Assembler::fmsub(
const VRegister& fd,
const VRegister&
fn,
2915 const VRegister& fm,
const VRegister& fa) {
2916 FPDataProcessing3Source(fd,
fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
2919void Assembler::fnmadd(
const VRegister& fd,
const VRegister&
fn,
2920 const VRegister& fm,
const VRegister& fa) {
2921 FPDataProcessing3Source(fd,
fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
2924void Assembler::fnmsub(
const VRegister& fd,
const VRegister&
fn,
2925 const VRegister& fm,
const VRegister& fa) {
2926 FPDataProcessing3Source(fd,
fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
2929void Assembler::fnmul(
const VRegister& vd,
const VRegister& vn,
2930 const VRegister& vm) {
2933 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
2936void Assembler::fcmp(
const VRegister&
fn,
const VRegister& fm) {
2938 Emit(FPType(
fn) | FCMP | Rm(fm) | Rn(
fn));
2941void Assembler::fcmp(
const VRegister&
fn,
double value) {
2947 Emit(FPType(
fn) | FCMP_zero | Rn(
fn));
2950void Assembler::fccmp(
const VRegister&
fn,
const VRegister& fm,
2951 StatusFlags nzcv, Condition cond) {
2953 Emit(FPType(
fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(
fn) | Nzcv(nzcv));
2956void Assembler::fcsel(
const VRegister& fd,
const VRegister&
fn,
2957 const VRegister& fm, Condition cond) {
2959 DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits());
2960 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(
fn) | Rd(fd));
2963void Assembler::NEONFPConvertToInt(
const Register& rd,
const VRegister& vn,
2965 Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
2968void Assembler::NEONFPConvertToInt(
const VRegister& vd,
const VRegister& vn,
2970 if (vn.IsScalar()) {
2971 DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
2974 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2977void Assembler::fcvt(
const VRegister& vd,
const VRegister& vn) {
2980 DCHECK(vn.Is1S() || vn.Is1H());
2982 }
else if (vd.Is1S()) {
2983 DCHECK(vn.Is1D() || vn.Is1H());
2987 DCHECK(vn.Is1D() || vn.Is1S());
2990 FPDataProcessing1Source(vd, vn, op);
2993void Assembler::fcvtl(
const VRegister& vd,
const VRegister& vn) {
2994 DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
2995 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2996 Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
2999void Assembler::fcvtl2(
const VRegister& vd,
const VRegister& vn) {
3000 DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
3001 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
3002 Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
3005void Assembler::fcvtn(
const VRegister& vd,
const VRegister& vn) {
3006 DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
3007 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
3008 Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
3011void Assembler::fcvtn2(
const VRegister& vd,
const VRegister& vn) {
3012 DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
3013 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
3014 Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
3017void Assembler::fcvtxn(
const VRegister& vd,
const VRegister& vn) {
3018 Instr format = 1 << NEONSize_offset;
3019 if (vd.IsScalar()) {
3020 DCHECK(vd.Is1S() && vn.Is1D());
3021 Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
3023 DCHECK(vd.Is2S() && vn.Is2D());
3024 Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
3028void Assembler::fcvtxn2(
const VRegister& vd,
const VRegister& vn) {
3029 DCHECK(vd.Is4S() && vn.Is2D());
3030 Instr format = 1 << NEONSize_offset;
3031 Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
3034void Assembler::fjcvtzs(
const Register& rd,
const VRegister& vn) {
3035 DCHECK(rd.IsW() && vn.Is1D());
3036 Emit(FJCVTZS | Rn(vn) | Rd(rd));
3039#define NEON_FP2REGMISC_FCVT_LIST(V) \
3040 V(fcvtnu, NEON_FCVTNU, FCVTNU) \
3041 V(fcvtns, NEON_FCVTNS, FCVTNS) \
3042 V(fcvtpu, NEON_FCVTPU, FCVTPU) \
3043 V(fcvtps, NEON_FCVTPS, FCVTPS) \
3044 V(fcvtmu, NEON_FCVTMU, FCVTMU) \
3045 V(fcvtms, NEON_FCVTMS, FCVTMS) \
3046 V(fcvtau, NEON_FCVTAU, FCVTAU) \
3047 V(fcvtas, NEON_FCVTAS, FCVTAS)
3049#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
3050 void Assembler::FN(const Register& rd, const VRegister& vn) { \
3051 NEONFPConvertToInt(rd, vn, SCA_OP); \
3053 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3054 NEONFPConvertToInt(vd, vn, VEC_OP); \
3056NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
3057#undef DEFINE_ASM_FUNCS
3059void Assembler::scvtf(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3062 NEONFP2RegMisc(vd, vn, NEON_SCVTF, 0.0);
3064 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3065 NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
3069void Assembler::ucvtf(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3072 NEONFP2RegMisc(vd, vn, NEON_UCVTF, 0.0);
3074 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3075 NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
3079void Assembler::scvtf(
const VRegister& vd,
const Register& rn,
int fbits) {
3082 Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
3084 Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
3089void Assembler::ucvtf(
const VRegister& fd,
const Register& rn,
int fbits) {
3092 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
3094 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
3099void Assembler::NEON3Same(
const VRegister& vd,
const VRegister& vn,
3100 const VRegister& vm, NEON3SameOp vop) {
3102 DCHECK(vd.IsVector() || !vd.IsQ());
3104 Instr format, op = vop;
3105 if (vd.IsScalar()) {
3107 format = SFormat(vd);
3109 format = VFormat(vd);
3112 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
3115void Assembler::NEONFP3Same(
const VRegister& vd,
const VRegister& vn,
3116 const VRegister& vm, Instr op) {
3118 if (vd.Is4H() || vd.Is8H()) {
3122 Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3125#define NEON_FP2REGMISC_LIST(V) \
3126 V(fabs, NEON_FABS, FABS) \
3127 V(fneg, NEON_FNEG, FNEG) \
3128 V(fsqrt, NEON_FSQRT, FSQRT) \
3129 V(frintn, NEON_FRINTN, FRINTN) \
3130 V(frinta, NEON_FRINTA, FRINTA) \
3131 V(frintp, NEON_FRINTP, FRINTP) \
3132 V(frintm, NEON_FRINTM, FRINTM) \
3133 V(frintx, NEON_FRINTX, FRINTX) \
3134 V(frintz, NEON_FRINTZ, FRINTZ) \
3135 V(frinti, NEON_FRINTI, FRINTI) \
3136 V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
3137 V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar)
3139#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
3140 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3141 if (vd.IsScalar()) { \
3142 DCHECK(vd.Is1S() || vd.Is1D()); \
3143 NEONFP2RegMisc(vd, vn, SCA_OP); \
3145 NEONFP2RegMisc(vd, vn, VEC_OP, 0.0); \
3148NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
3149#undef DEFINE_ASM_FUNC
3151void Assembler::shll(
const VRegister& vd,
const VRegister& vn,
int shift) {
3152 DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) ||
3153 (vd.Is4S() && vn.Is4H() && shift == 16) ||
3154 (vd.Is2D() && vn.Is2S() && shift == 32));
3156 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3159void Assembler::shll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
3161 DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) ||
3162 (vd.Is4S() && vn.Is8H() && shift == 16) ||
3163 (vd.Is2D() && vn.Is4S() && shift == 32));
3164 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3167void Assembler::NEONFP2RegMisc(
const VRegister& vd,
const VRegister& vn,
3168 NEON2RegMiscOp vop,
double value) {
3174 if (vd.IsScalar()) {
3175 DCHECK(vd.Is1S() || vd.Is1D());
3177 }
else if (vd.Is4H() || vd.Is8H()) {
3180 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());
3183 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3186void Assembler::fcmeq(
const VRegister& vd,
const VRegister& vn,
double value) {
3187 NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
3190void Assembler::fcmge(
const VRegister& vd,
const VRegister& vn,
double value) {
3191 NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
3194void Assembler::fcmgt(
const VRegister& vd,
const VRegister& vn,
double value) {
3195 NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
3198void Assembler::fcmle(
const VRegister& vd,
const VRegister& vn,
double value) {
3199 NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
3202void Assembler::fcmlt(
const VRegister& vd,
const VRegister& vn,
double value) {
3203 NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
3206void Assembler::frecpx(
const VRegister& vd,
const VRegister& vn) {
3209 DCHECK(vd.Is1S() || vd.Is1D());
3210 Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
3213void Assembler::fcvtzs(
const Register& rd,
const VRegister& vn,
int fbits) {
3214 DCHECK(vn.Is1S() || vn.Is1D());
3215 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3217 Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
3219 Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
3224void Assembler::fcvtzs(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3227 NEONFP2RegMisc(vd, vn, NEON_FCVTZS, 0.0);
3229 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3230 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
3234void Assembler::fcvtzu(
const Register& rd,
const VRegister& vn,
int fbits) {
3235 DCHECK(vn.Is1S() || vn.Is1D());
3236 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3238 Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
3240 Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
3245void Assembler::fcvtzu(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3248 NEONFP2RegMisc(vd, vn, NEON_FCVTZU, 0.0);
3250 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3251 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
3255void Assembler::NEONFP2RegMisc(
const VRegister& vd,
const VRegister& vn,
3258 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3261void Assembler::NEON2RegMisc(
const VRegister& vd,
const VRegister& vn,
3262 NEON2RegMiscOp vop,
int value) {
3267 Instr format, op = vop;
3268 if (vd.IsScalar()) {
3270 format = SFormat(vd);
3272 format = VFormat(vd);
3275 Emit(format | op | Rn(vn) | Rd(vd));
3278void Assembler::cmeq(
const VRegister& vd,
const VRegister& vn,
int value) {
3279 DCHECK(vd.IsVector() || vd.Is1D());
3280 NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
3283void Assembler::cmge(
const VRegister& vd,
const VRegister& vn,
int value) {
3284 DCHECK(vd.IsVector() || vd.Is1D());
3285 NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
3288void Assembler::cmgt(
const VRegister& vd,
const VRegister& vn,
int value) {
3289 DCHECK(vd.IsVector() || vd.Is1D());
3290 NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
3293void Assembler::cmle(
const VRegister& vd,
const VRegister& vn,
int value) {
3294 DCHECK(vd.IsVector() || vd.Is1D());
3295 NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
3298void Assembler::cmlt(
const VRegister& vd,
const VRegister& vn,
int value) {
3299 DCHECK(vd.IsVector() || vd.Is1D());
3300 NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
3303#define NEON_3SAME_LIST(V) \
3304 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
3305 V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
3306 V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
3307 V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
3308 V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
3309 V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
3310 V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
3311 V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
3312 V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
3313 V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
3314 V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
3315 V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
3316 V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
3317 V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
3318 V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
3319 V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3320 V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3321 V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3322 V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
3323 V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
3324 V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
3325 V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
3326 V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
3327 V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
3328 V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
3329 V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
3330 V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
3331 V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
3332 V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
3333 V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
3334 V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
3335 V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
3336 V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
3337 V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
3338 V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
3339 V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
3340 V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
3341 V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
3342 V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
3343 V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
3344 V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
3345 V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
3346 V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
3347 V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
3348 V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
3349 V(uqadd, NEON_UQADD, true) \
3350 V(sqadd, NEON_SQADD, true) \
3351 V(uqsub, NEON_UQSUB, true) \
3352 V(sqsub, NEON_SQSUB, true) \
3353 V(sqshl, NEON_SQSHL, true) \
3354 V(uqshl, NEON_UQSHL, true) \
3355 V(sqrshl, NEON_SQRSHL, true) \
3356 V(uqrshl, NEON_UQRSHL, true)
3358#define DEFINE_ASM_FUNC(FN, OP, AS) \
3359 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3360 const VRegister& vm) { \
3362 NEON3Same(vd, vn, vm, OP); \
3364NEON_3SAME_LIST(DEFINE_ASM_FUNC)
3365#undef DEFINE_ASM_FUNC
3367#define NEON_FP3SAME_LIST_V2(V) \
3368 V(fadd, NEON_FADD, FADD) \
3369 V(fsub, NEON_FSUB, FSUB) \
3370 V(fmul, NEON_FMUL, FMUL) \
3371 V(fdiv, NEON_FDIV, FDIV) \
3372 V(fmax, NEON_FMAX, FMAX) \
3373 V(fmaxnm, NEON_FMAXNM, FMAXNM) \
3374 V(fmin, NEON_FMIN, FMIN) \
3375 V(fminnm, NEON_FMINNM, FMINNM) \
3376 V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
3377 V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
3378 V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
3379 V(fabd, NEON_FABD, NEON_FABD_scalar) \
3380 V(fmla, NEON_FMLA, 0) \
3381 V(fmls, NEON_FMLS, 0) \
3382 V(facge, NEON_FACGE, NEON_FACGE_scalar) \
3383 V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
3384 V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
3385 V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
3386 V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
3387 V(faddp, NEON_FADDP, 0) \
3388 V(fmaxp, NEON_FMAXP, 0) \
3389 V(fminp, NEON_FMINP, 0) \
3390 V(fmaxnmp, NEON_FMAXNMP, 0) \
3391 V(fminnmp, NEON_FMINNMP, 0)
3393#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
3394 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3395 const VRegister& vm) { \
3397 if ((SCA_OP != 0) && vd.IsScalar()) { \
3398 DCHECK(vd.Is1S() || vd.Is1D()); \
3401 DCHECK(vd.IsVector()); \
3402 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S() || vd.Is4H() || vd.Is8H()); \
3405 NEONFP3Same(vd, vn, vm, op); \
3407NEON_FP3SAME_LIST_V2(DEFINE_ASM_FUNC)
3408#undef DEFINE_ASM_FUNC
3410void Assembler::bcax(
const VRegister& vd,
const VRegister& vn,
3411 const VRegister& vm,
const VRegister& va) {
3413 DCHECK(vd.Is16B() && vn.Is16B() && vm.Is16B());
3414 Emit(NEON_BCAX | Rd(vd) | Rn(vn) | Rm(vm) | Ra(va));
3417void Assembler::eor3(
const VRegister& vd,
const VRegister& vn,
3418 const VRegister& vm,
const VRegister& va) {
3420 DCHECK(vd.Is16B() && vn.Is16B() && vm.Is16B() && va.Is16B());
3421 Emit(NEON_EOR3 | Rd(vd) | Rn(vn) | Rm(vm) | Ra(va));
3424void Assembler::addp(
const VRegister& vd,
const VRegister& vn) {
3425 DCHECK((vd.Is1D() && vn.Is2D()));
3426 Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
3429void Assembler::faddp(
const VRegister& vd,
const VRegister& vn) {
3430 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3431 Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
3434void Assembler::fmaxp(
const VRegister& vd,
const VRegister& vn) {
3435 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3436 Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
3439void Assembler::fminp(
const VRegister& vd,
const VRegister& vn) {
3440 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3441 Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
3444void Assembler::fmaxnmp(
const VRegister& vd,
const VRegister& vn) {
3445 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3446 Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
3449void Assembler::fminnmp(
const VRegister& vd,
const VRegister& vn) {
3450 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3451 Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
3454void Assembler::orr(
const VRegister& vd,
const int imm8,
const int left_shift) {
3455 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
3458void Assembler::mov(
const VRegister& vd,
const VRegister& vn) {
3461 orr(vd.V8B(), vn.V8B(), vn.V8B());
3464 orr(vd.V16B(), vn.V16B(), vn.V16B());
3468void Assembler::bic(
const VRegister& vd,
const int imm8,
const int left_shift) {
3469 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
3472void Assembler::movi(
const VRegister& vd,
const uint64_t imm, Shift shift,
3473 const int shift_amount) {
3474 DCHECK((shift == LSL) || (shift == MSL));
3475 if (vd.Is2D() || vd.Is1D()) {
3478 for (
int i = 0;
i < 8; ++
i) {
3479 int byte = (imm >> (
i * 8)) & 0xFF;
3480 DCHECK((
byte == 0) || (
byte == 0xFF));
3486 Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
3487 ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
3488 }
else if (shift == LSL) {
3490 NEONModifiedImmShiftLsl(vd,
static_cast<int>(imm), shift_amount,
3491 NEONModifiedImmediate_MOVI);
3494 NEONModifiedImmShiftMsl(vd,
static_cast<int>(imm), shift_amount,
3495 NEONModifiedImmediate_MOVI);
3499void Assembler::mvn(
const VRegister& vd,
const VRegister& vn) {
3502 not_(vd.V8B(), vn.V8B());
3505 not_(vd.V16B(), vn.V16B());
3509void Assembler::mvni(
const VRegister& vd,
const int imm8, Shift shift,
3510 const int shift_amount) {
3511 DCHECK((shift == LSL) || (shift == MSL));
3513 NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3515 NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3519void Assembler::NEONFPByElement(
const VRegister& vd,
const VRegister& vn,
3520 const VRegister& vm,
int vm_index,
3521 NEONByIndexedElementOp vop) {
3523 DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
3524 (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
3525 (vd.Is1D() && vm.Is1D()));
3526 DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));
3529 int index_num_bits = vm.Is1S() ? 2 : 1;
3530 if (vd.IsScalar()) {
3534 Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
3538void Assembler::NEONByElement(
const VRegister& vd,
const VRegister& vn,
3539 const VRegister& vm,
int vm_index,
3540 NEONByIndexedElementOp vop) {
3542 DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
3543 (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
3544 (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
3545 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3546 (vm.Is1S() && (vm_index < 4)));
3548 Instr format, op = vop;
3549 int index_num_bits = vm.Is1H() ? 3 : 2;
3550 if (vd.IsScalar()) {
3552 format = SFormat(vn);
3554 format = VFormat(vn);
3556 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3560void Assembler::NEONByElementL(
const VRegister& vd,
const VRegister& vn,
3561 const VRegister& vm,
int vm_index,
3562 NEONByIndexedElementOp vop) {
3563 DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
3564 (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
3565 (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
3566 (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
3567 (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
3568 (vd.Is1D() && vn.Is1S() && vm.Is1S()));
3570 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3571 (vm.Is1S() && (vm_index < 4)));
3573 Instr format, op = vop;
3574 int index_num_bits = vm.Is1H() ? 3 : 2;
3575 if (vd.IsScalar()) {
3577 format = SFormat(vn);
3579 format = VFormat(vn);
3581 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3585#define NEON_BYELEMENT_LIST(V) \
3586 V(mul, NEON_MUL_byelement, vn.IsVector()) \
3587 V(mla, NEON_MLA_byelement, vn.IsVector()) \
3588 V(mls, NEON_MLS_byelement, vn.IsVector()) \
3589 V(sqdmulh, NEON_SQDMULH_byelement, true) \
3590 V(sqrdmulh, NEON_SQRDMULH_byelement, true)
3592#define DEFINE_ASM_FUNC(FN, OP, AS) \
3593 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3594 const VRegister& vm, int vm_index) { \
3596 NEONByElement(vd, vn, vm, vm_index, OP); \
3598NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
3599#undef DEFINE_ASM_FUNC
3601#define NEON_FPBYELEMENT_LIST(V) \
3602 V(fmul, NEON_FMUL_byelement) \
3603 V(fmla, NEON_FMLA_byelement) \
3604 V(fmls, NEON_FMLS_byelement) \
3605 V(fmulx, NEON_FMULX_byelement)
3607#define DEFINE_ASM_FUNC(FN, OP) \
3608 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3609 const VRegister& vm, int vm_index) { \
3610 NEONFPByElement(vd, vn, vm, vm_index, OP); \
3612NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
3613#undef DEFINE_ASM_FUNC
3615#define NEON_BYELEMENT_LONG_LIST(V) \
3616 V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
3617 V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
3618 V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
3619 V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3620 V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
3621 V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
3622 V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
3623 V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
3624 V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
3625 V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
3626 V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
3627 V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3628 V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
3629 V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
3630 V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
3631 V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
3632 V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
3633 V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
3635#define DEFINE_ASM_FUNC(FN, OP, AS) \
3636 void Assembler::FN(const VRegister& vd, const VRegister& vn, \
3637 const VRegister& vm, int vm_index) { \
3639 NEONByElementL(vd, vn, vm, vm_index, OP); \
3641NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
3642#undef DEFINE_ASM_FUNC
3644void Assembler::suqadd(
const VRegister& vd,
const VRegister& vn) {
3645 NEON2RegMisc(vd, vn, NEON_SUQADD);
3648void Assembler::usqadd(
const VRegister& vd,
const VRegister& vn) {
3649 NEON2RegMisc(vd, vn, NEON_USQADD);
3652void Assembler::abs(
const VRegister& vd,
const VRegister& vn) {
3653 DCHECK(vd.IsVector() || vd.Is1D());
3654 NEON2RegMisc(vd, vn, NEON_ABS);
3657void Assembler::sqabs(
const VRegister& vd,
const VRegister& vn) {
3658 NEON2RegMisc(vd, vn, NEON_SQABS);
3661void Assembler::neg(
const VRegister& vd,
const VRegister& vn) {
3662 DCHECK(vd.IsVector() || vd.Is1D());
3663 NEON2RegMisc(vd, vn, NEON_NEG);
3666void Assembler::sqneg(
const VRegister& vd,
const VRegister& vn) {
3667 NEON2RegMisc(vd, vn, NEON_SQNEG);
3670void Assembler::NEONXtn(
const VRegister& vd,
const VRegister& vn,
3671 NEON2RegMiscOp vop) {
3672 Instr format, op = vop;
3673 if (vd.IsScalar()) {
3674 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3675 (vd.Is1S() && vn.Is1D()));
3677 format = SFormat(vd);
3679 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3680 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3681 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3682 format = VFormat(vd);
3684 Emit(format | op | Rn(vn) | Rd(vd));
3687void Assembler::xtn(
const VRegister& vd,
const VRegister& vn) {
3688 DCHECK(vd.IsVector() && vd.IsD());
3689 NEONXtn(vd, vn, NEON_XTN);
3692void Assembler::xtn2(
const VRegister& vd,
const VRegister& vn) {
3693 DCHECK(vd.IsVector() && vd.IsQ());
3694 NEONXtn(vd, vn, NEON_XTN);
3697void Assembler::sqxtn(
const VRegister& vd,
const VRegister& vn) {
3698 DCHECK(vd.IsScalar() || vd.IsD());
3699 NEONXtn(vd, vn, NEON_SQXTN);
3702void Assembler::sqxtn2(
const VRegister& vd,
const VRegister& vn) {
3703 DCHECK(vd.IsVector() && vd.IsQ());
3704 NEONXtn(vd, vn, NEON_SQXTN);
3707void Assembler::sqxtun(
const VRegister& vd,
const VRegister& vn) {
3708 DCHECK(vd.IsScalar() || vd.IsD());
3709 NEONXtn(vd, vn, NEON_SQXTUN);
3712void Assembler::sqxtun2(
const VRegister& vd,
const VRegister& vn) {
3713 DCHECK(vd.IsVector() && vd.IsQ());
3714 NEONXtn(vd, vn, NEON_SQXTUN);
3717void Assembler::uqxtn(
const VRegister& vd,
const VRegister& vn) {
3718 DCHECK(vd.IsScalar() || vd.IsD());
3719 NEONXtn(vd, vn, NEON_UQXTN);
3722void Assembler::uqxtn2(
const VRegister& vd,
const VRegister& vn) {
3723 DCHECK(vd.IsVector() && vd.IsQ());
3724 NEONXtn(vd, vn, NEON_UQXTN);
3728void Assembler::not_(
const VRegister& vd,
const VRegister& vn) {
3730 DCHECK(vd.Is8B() || vd.Is16B());
3731 Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3734void Assembler::rbit(
const VRegister& vd,
const VRegister& vn) {
3736 DCHECK(vd.Is8B() || vd.Is16B());
3737 Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3740void Assembler::ext(
const VRegister& vd,
const VRegister& vn,
3741 const VRegister& vm,
int index) {
3743 DCHECK(vd.Is8B() || vd.Is16B());
3744 DCHECK((0 <= index) && (index < vd.LaneCount()));
3745 Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
3748void Assembler::dup(
const VRegister& vd,
const VRegister& vn,
int vn_index) {
3753 int lane_size = vn.LaneSizeInBytes();
3755 switch (lane_size) {
3771 if (vd.IsScalar()) {
3776 q = vd.IsD() ? 0 :
NEON_Q;
3779 Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
3783void Assembler::dcptr(Label*
label) {
3784 BlockPoolsScope no_pool_inbetween(
this);
3785 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3786 if (
label->is_bound()) {
3792 internal_reference_positions_.push_back(
pc_offset());
3793 dc64(
reinterpret_cast<uintptr_t
>(buffer_start_ +
label->pos()));
3796 if (
label->is_linked()) {
3807 offset = kStartOfLabelLinkChain;
3829uint32_t Assembler::FPToImm8(
double imm) {
3830 uint64_t bits = base::bit_cast<uint64_t>(imm);
3835 uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
3837 uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
3839 uint64_t bit5_to_0 = (bits >> 48) & 0x3F;
3841 return static_cast<uint32_t
>(bit7 | bit6 | bit5_to_0);
3844Instr Assembler::ImmFP(
double imm) {
return FPToImm8(imm) << ImmFP_offset; }
3845Instr Assembler::ImmNEONFP(
double imm) {
3846 return ImmNEONabcdefgh(FPToImm8(imm));
3850void Assembler::MoveWide(
const Register& rd, uint64_t imm,
int shift,
3851 MoveWideImmediateOp mov_op) {
3853 if (rd.Is32Bits()) {
3856 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
3857 ((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
3863 DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
3864 DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
3870 if ((imm & ~0xFFFFULL) == 0) {
3872 }
else if ((imm & ~(0xFFFFULL << 16)) == 0) {
3875 }
else if ((imm & ~(0xFFFFULL << 32)) == 0) {
3879 }
else if ((imm & ~(0xFFFFULL << 48)) == 0) {
3888 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
3889 ImmMoveWide(
static_cast<int>(imm)) | ShiftMoveWide(shift));
3892void Assembler::AddSub(
const Register& rd,
const Register& rn,
3893 const Operand& operand, FlagsUpdate S, AddSubOp op) {
3894 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
3895 DCHECK(!operand.NeedsRelocation(
this));
3896 if (operand.IsImmediate()) {
3897 int64_t immediate = operand.ImmediateValue();
3898 DCHECK(IsImmAddSub(immediate));
3900 Emit(SF(rd) | AddSubImmediateFixed | op |
Flags(S) |
3901 ImmAddSub(
static_cast<int>(immediate)) | dest_reg | RnSP(rn));
3902 }
else if (operand.IsShiftedRegister()) {
3903 DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits());
3913 if (rn.IsSP() || rd.IsSP()) {
3914 DCHECK(!(rd.IsSP() && (S == SetFlags)));
3915 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
3916 AddSubExtendedFixed | op);
3918 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
3921 DCHECK(operand.IsExtendedRegister());
3922 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
3926void Assembler::AddSubWithCarry(
const Register& rd,
const Register& rn,
3927 const Operand& operand, FlagsUpdate S,
3928 AddSubWithCarryOp op) {
3929 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
3930 DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits());
3931 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
3932 DCHECK(!operand.NeedsRelocation(
this));
3933 Emit(SF(rd) | op |
Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
3936void Assembler::hlt(
int code) {
3938 Emit(HLT | ImmException(code));
3941void Assembler::brk(
int code) {
3943 Emit(BRK | ImmException(code));
3946void Assembler::EmitStringData(
const char*
string) {
3947 size_t len = strlen(
string) + 1;
3949 EmitData(
string,
static_cast<int>(len));
3951 const char pad[] = {
'\0',
'\0',
'\0',
'\0'};
3953 "Size of padding must match instruction size.");
3957void Assembler::debug(
const char* message, uint32_t code, Instr params) {
3958 if (
options().enable_simulator_code) {
3959 size_t size_of_debug_sequence =
3964 BlockPoolsScope scope(
this, size_of_debug_sequence);
3971 hlt(kImmExceptionIsDebug);
3976 DCHECK_EQ(SizeOfCodeGeneratedSince(&
start), kDebugMessageOffset);
3977 EmitStringData(message);
3978 hlt(kImmExceptionIsUnreachable);
3979 DCHECK_EQ(SizeOfCodeGeneratedSince(&
start), size_of_debug_sequence);
3984 if (params &
BREAK) {
3989void Assembler::Logical(
const Register& rd,
const Register& rn,
3990 const Operand& operand, LogicalOp op) {
3991 DCHECK(rd.SizeInBits() == rn.SizeInBits());
3992 DCHECK(!operand.NeedsRelocation(
this));
3993 if (operand.IsImmediate()) {
3994 int64_t immediate = operand.ImmediateValue();
3995 unsigned reg_size = rd.SizeInBits();
3999 DCHECK(rd.Is64Bits() || is_uint32(immediate));
4002 if ((op & NOT) == NOT) {
4004 immediate = rd.Is64Bits() ? ~immediate : (~immediate &
kWRegMask);
4007 unsigned n, imm_s, imm_r;
4008 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
4010 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
4016 DCHECK(operand.IsShiftedRegister());
4017 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
4019 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
4023void Assembler::LogicalImmediate(
const Register& rd,
const Register& rn,
4024 unsigned n,
unsigned imm_s,
unsigned imm_r,
4026 unsigned reg_size = rd.SizeInBits();
4027 Instr dest_reg = (op ==
ANDS) ? Rd(rd) : RdSP(rd);
4028 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
4029 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
4033void Assembler::ConditionalCompare(
const Register& rn,
const Operand& operand,
4034 StatusFlags nzcv, Condition cond,
4035 ConditionalCompareOp op) {
4037 DCHECK(!operand.NeedsRelocation(
this));
4038 if (operand.IsImmediate()) {
4039 int64_t immediate = operand.ImmediateValue();
4040 DCHECK(IsImmConditionalCompare(immediate));
4042 ImmCondCmp(
static_cast<unsigned>(immediate));
4044 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
4047 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
4050void Assembler::DataProcessing1Source(
const Register& rd,
const Register& rn,
4051 DataProcessing1SourceOp op) {
4052 DCHECK(rd.SizeInBits() == rn.SizeInBits());
4053 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
4056void Assembler::FPDataProcessing1Source(
const VRegister& vd,
4057 const VRegister& vn,
4058 FPDataProcessing1SourceOp op) {
4059 Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
4062void Assembler::FPDataProcessing2Source(
const VRegister& fd,
4063 const VRegister&
fn,
4064 const VRegister& fm,
4065 FPDataProcessing2SourceOp op) {
4066 DCHECK(fd.SizeInBits() ==
fn.SizeInBits());
4067 DCHECK(fd.SizeInBits() == fm.SizeInBits());
4068 Emit(FPType(fd) | op | Rm(fm) | Rn(
fn) | Rd(fd));
4071void Assembler::FPDataProcessing3Source(
const VRegister& fd,
4072 const VRegister&
fn,
4073 const VRegister& fm,
4074 const VRegister& fa,
4075 FPDataProcessing3SourceOp op) {
4077 Emit(FPType(fd) | op | Rm(fm) | Rn(
fn) | Rd(fd) | Ra(fa));
4080void Assembler::NEONModifiedImmShiftLsl(
const VRegister& vd,
const int imm8,
4081 const int left_shift,
4082 NEONModifiedImmediateOp op) {
4083 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
4085 DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
4086 (left_shift == 24));
4089 int cmode_1, cmode_2, cmode_3;
4090 if (vd.Is8B() || vd.Is16B()) {
4091 DCHECK_EQ(op, NEONModifiedImmediate_MOVI);
4096 cmode_1 = (left_shift >> 3) & 1;
4097 cmode_2 = left_shift >> 4;
4099 if (vd.Is4H() || vd.Is8H()) {
4100 DCHECK((left_shift == 0) || (left_shift == 8));
4104 int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
4108 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4111void Assembler::NEONModifiedImmShiftMsl(
const VRegister& vd,
const int imm8,
4112 const int shift_amount,
4113 NEONModifiedImmediateOp op) {
4114 DCHECK(vd.Is2S() || vd.Is4S());
4115 DCHECK((shift_amount == 8) || (shift_amount == 16));
4118 int cmode_0 = (shift_amount >> 4) & 1;
4119 int cmode = 0xC | cmode_0;
4123 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4126void Assembler::EmitShift(
const Register& rd,
const Register& rn, Shift shift,
4127 unsigned shift_amount) {
4130 lsl(rd, rn, shift_amount);
4133 lsr(rd, rn, shift_amount);
4136 asr(rd, rn, shift_amount);
4139 ror(rd, rn, shift_amount);
4146void Assembler::EmitExtendShift(
const Register& rd,
const Register& rn,
4147 Extend extend,
unsigned left_shift) {
4148 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
4149 unsigned reg_size = rd.SizeInBits();
4151 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
4153 unsigned high_bit = (8 << (extend & 0x3)) - 1;
4155 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
4157 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
4162 ubfm(rd, rn_, non_shift_bits, high_bit);
4167 sbfm(rd, rn_, non_shift_bits, high_bit);
4171 DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
4173 lsl(rd, rn_, left_shift);
4181 lsl(rd, rn_, left_shift);
4185void Assembler::DataProcShiftedRegister(
const Register& rd,
const Register& rn,
4186 const Operand& operand, FlagsUpdate S,
4188 DCHECK(operand.IsShiftedRegister());
4189 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
4190 DCHECK(!operand.NeedsRelocation(
this));
4191 Emit(SF(rd) | op |
Flags(S) | ShiftDP(operand.shift()) |
4192 ImmDPShift(operand.shift_amount()) | Rm(operand.reg()) | Rn(rn) |
4196void Assembler::DataProcExtendedRegister(
const Register& rd,
const Register& rn,
4197 const Operand& operand, FlagsUpdate S,
4199 DCHECK(!operand.NeedsRelocation(
this));
4201 Emit(SF(rd) | op |
Flags(S) | Rm(operand.reg()) |
4202 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
4203 dest_reg | RnSP(rn));
4206void Assembler::LoadStore(
const CPURegister& rt,
const MemOperand& addr,
4208 Instr memop = op | Rt(rt) | RnSP(addr.base());
4210 if (addr.IsImmediateOffset()) {
4212 int offset =
static_cast<int>(addr.offset());
4213 if (IsImmLSScaled(addr.offset(), size_log2)) {
4214 LoadStoreScaledImmOffset(memop,
offset, size_log2);
4216 DCHECK(IsImmLSUnscaled(addr.offset()));
4217 LoadStoreUnscaledImmOffset(memop,
offset);
4219 }
else if (addr.IsRegisterOffset()) {
4220 Extend ext = addr.extend();
4221 Shift shift = addr.shift();
4222 unsigned shift_amount = addr.shift_amount();
4232 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
4233 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
4236 DCHECK(IsImmLSUnscaled(addr.offset()));
4238 int offset =
static_cast<int>(addr.offset());
4239 if (addr.IsPreIndex()) {
4240 Emit(LoadStorePreIndexFixed | memop | ImmLS(
offset));
4242 DCHECK(addr.IsPostIndex());
4243 Emit(LoadStorePostIndexFixed | memop | ImmLS(
offset));
4248void Assembler::pmull(
const VRegister& vd,
const VRegister& vn,
4249 const VRegister& vm) {
4251 DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is1D() && vd.Is1Q()));
4252 DCHECK(IsEnabled(PMULL1Q) || vd.Is8H());
4253 Emit(VFormat(vn) | NEON_PMULL | Rm(vm) | Rn(vn) | Rd(vd));
4256void Assembler::pmull2(
const VRegister& vd,
const VRegister& vn,
4257 const VRegister& vm) {
4259 DCHECK((vn.Is16B() && vd.Is8H()) || (vn.Is2D() && vd.Is1Q()));
4260 DCHECK(IsEnabled(PMULL1Q) || vd.Is8H());
4261 Emit(VFormat(vn) | NEON_PMULL2 | Rm(vm) | Rn(vn) | Rd(vd));
4264bool Assembler::IsImmLSPair(int64_t
offset,
unsigned size) {
4265 bool offset_is_size_multiple =
4266 (
static_cast<int64_t
>(
static_cast<uint64_t
>(
offset >>
size) <<
size) ==
4268 return offset_is_size_multiple && is_int7(
offset >> size);
4271bool Assembler::IsImmLLiteral(int64_t
offset) {
4273 bool offset_is_inst_multiple =
4274 (
static_cast<int64_t
>(
static_cast<uint64_t
>(
offset >> inst_size)
4275 << inst_size) ==
offset);
4278 return offset_is_inst_multiple &&
is_intn(
offset, ImmLLiteral_width);
4288bool Assembler::IsImmLogical(uint64_t value,
unsigned width,
unsigned* n,
4289 unsigned* imm_s,
unsigned* imm_r) {
4290 DCHECK((n !=
nullptr) && (imm_s !=
nullptr) && (imm_r !=
nullptr));
4291 DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
4293 bool negate =
false;
4332 if (width == kWRegSizeInBits) {
4361 uint64_t a = LargestPowerOf2Divisor(value);
4362 uint64_t value_plus_a = value +
a;
4363 uint64_t b = LargestPowerOf2Divisor(value_plus_a);
4364 uint64_t value_plus_a_minus_b = value_plus_a - b;
4365 uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
4367 int d, clz_a, out_n;
4378 mask = ((uint64_t{1} << d) - 1);
4399 mask = ~uint64_t{0};
4405 if (!base::bits::IsPowerOfTwo(d)) {
4409 if (((b - a) & ~
mask) != 0) {
4422 static const uint64_t multipliers[] = {
4423 0x0000000000000001UL, 0x0000000100000001UL, 0x0001000100010001UL,
4424 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL,
4428 DCHECK((multiplier_idx >= 0) &&
4429 (
static_cast<size_t>(multiplier_idx) <
arraysize(multipliers)));
4430 uint64_t multiplier = multipliers[multiplier_idx];
4431 uint64_t candidate = (b -
a) * multiplier;
4433 if (value != candidate) {
4446 int s = clz_a - clz_b;
4457 r = (clz_b + 1) & (d - 1);
4459 r = (clz_a + 1) & (d - 1);
4476 *imm_s = ((-d * 2) | (s - 1)) & 0x3F;
4482bool Assembler::IsImmFP32(uint32_t bits) {
4486 if ((bits & 0x7FFFF) != 0) {
4491 uint32_t b_pattern = (bits >> 16) & 0x3E00;
4492 if (b_pattern != 0 && b_pattern != 0x3E00) {
4497 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
4504bool Assembler::IsImmFP64(uint64_t bits) {
4509 if ((bits & 0xFFFFFFFFFFFFL) != 0) {
4514 uint32_t b_pattern = (bits >> 48) & 0x3FC0;
4515 if (b_pattern != 0 && b_pattern != 0x3FC0) {
4520 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
4527void Assembler::GrowBuffer() {
4529 int old_size =
buffer_->size();
4530 int new_size = std::min(2 * old_size, old_size + 1 * MB);
4534 if (new_size > kMaximalBufferSize) {
4535 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
4539 std::unique_ptr<AssemblerBuffer> new_buffer =
buffer_->Grow(new_size);
4540 DCHECK_EQ(new_size, new_buffer->size());
4541 uint8_t* new_start = new_buffer->start();
4544 intptr_t pc_delta = new_start - buffer_start_;
4545 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
4546 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
4547 memmove(new_start, buffer_start_,
pc_offset());
4548 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
4552 buffer_ = std::move(new_buffer);
4553 buffer_start_ = new_start;
4555 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
4556 reloc_info_writer.last_pc() + pc_delta);
4563 for (
auto pos : internal_reference_positions_) {
4564 Address address =
reinterpret_cast<intptr_t
>(buffer_start_) +
pos;
4565 intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
4566 internal_ref += pc_delta;
4567 WriteUnalignedValue<intptr_t>(address, internal_ref);
4573void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
4574 ConstantPoolMode constant_pool_mode) {
4575 if (rmode == RelocInfo::INTERNAL_REFERENCE ||
4576 rmode == RelocInfo::CONST_POOL || rmode == RelocInfo::VENEER_POOL ||
4577 rmode == RelocInfo::DEOPT_SCRIPT_OFFSET ||
4578 rmode == RelocInfo::DEOPT_INLINING_ID ||
4579 rmode == RelocInfo::DEOPT_REASON || rmode == RelocInfo::DEOPT_ID ||
4580 rmode == RelocInfo::DEOPT_NODE_ID) {
4582 DCHECK(RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
4583 RelocInfo::IsDeoptNodeId(rmode) ||
4584 RelocInfo::IsDeoptPosition(rmode) ||
4585 RelocInfo::IsInternalReference(rmode) ||
4586 RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
4588 }
else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
4589 if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
4590 Handle<HeapObject>
handle(
reinterpret_cast<Address*
>(data));
4591 data = AddEmbeddedObject(handle);
4593 if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
4594 if (constpool_.RecordEntry(
static_cast<uint32_t
>(data), rmode) ==
4595 RelocInfoStatus::kMustOmitForDuplicate) {
4599 if (constpool_.RecordEntry(
static_cast<uint64_t
>(data), rmode) ==
4600 RelocInfoStatus::kMustOmitForDuplicate) {
4608 if (!ShouldRecordRelocInfo(rmode))
return;
4612 DCHECK(constpool_.IsBlocked());
4615 RelocInfo rinfo(
reinterpret_cast<Address>(
pc_), rmode, data);
4616 DCHECK_GE(buffer_space(), kMaxRelocSize);
4617 reloc_info_writer.Write(&rinfo);
4620void Assembler::near_jump(
int offset, RelocInfo::Mode rmode) {
4621 BlockPoolsScope no_pool_before_b_instr(
this);
4622 if (!RelocInfo::IsNoInfo(rmode))
4623 RecordRelocInfo(rmode,
offset, NO_POOL_ENTRY);
4627void Assembler::near_call(
int offset, RelocInfo::Mode rmode) {
4628 BlockPoolsScope no_pool_before_bl_instr(
this);
4629 if (!RelocInfo::IsNoInfo(rmode))
4630 RecordRelocInfo(rmode,
offset, NO_POOL_ENTRY);
4634void Assembler::near_call(HeapNumberRequest request) {
4635 BlockPoolsScope no_pool_before_bl_instr(
this);
4636 RequestHeapNumber(request);
4637 EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
4638 RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
4640 bl(
static_cast<int>(index));
4645void ConstantPool::EmitPrologue(Alignment require_alignment) {
4649 const int marker_size = 1;
4651 ComputeSize(Jump::kOmitted, require_alignment) /
kInt32Size - marker_size;
4652 assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
4653 Assembler::Rt(xzr));
4654 assm_->EmitPoolGuard();
4657int ConstantPool::PrologueSize(Jump require_jump)
const {
4662 int prologue_size = require_jump == Jump::kRequired ?
kInstrSize : 0;
4664 return prologue_size;
4667void ConstantPool::SetLoadOffsetToConstPoolEntry(
int load_offset,
4668 Instruction* entry_offset,
4669 const ConstantPoolKey&
key) {
4670 Instruction*
instr = assm_->InstructionAt(load_offset);
4673 instr->SetImmPCOffsetTarget(assm_->zone(), assm_->options(), entry_offset);
4676void ConstantPool::Check(Emission force_emit, Jump require_jump,
4682 DCHECK_EQ(force_emit, Emission::kIfNeeded);
4690 if (!IsEmpty() && (force_emit == Emission::kForced ||
4691 ShouldEmitNow(require_jump, margin))) {
4694 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
4695 assm_->CheckVeneerPool(
false, require_jump == Jump::kRequired,
4696 assm_->kVeneerDistanceMargin + worst_case_size +
4697 static_cast<int>(margin));
4701 int needed_space = worst_case_size + assm_->kGap;
4702 while (assm_->buffer_space() <= needed_space) {
4703 assm_->GrowBuffer();
4706 EmitAndClear(require_jump);
4710 SetNextCheckIn(ConstantPool::kCheckInterval);
4716const size_t ConstantPool::kMaxDistToPool32 = 1 *
MB;
4717const size_t ConstantPool::kMaxDistToPool64 = 1 *
MB;
4718const size_t ConstantPool::kCheckInterval = 128 *
kInstrSize;
4719const size_t ConstantPool::kApproxDistToPool32 = 64 *
KB;
4720const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
4722const size_t ConstantPool::kOpportunityDistToPool32 = 64 *
KB;
4723const size_t ConstantPool::kOpportunityDistToPool64 = 64 *
KB;
4724const size_t ConstantPool::kApproxMaxEntryCount = 512;
4726intptr_t Assembler::MaxPCOffsetAfterVeneerPoolIfEmittedNow(
size_t margin) {
4728 static constexpr int kBranchSizeInBytes =
kInstrSize;
4729 static constexpr int kGuardSizeInBytes =
kInstrSize;
4730 const size_t max_veneer_size_in_bytes =
4731 unresolved_branches_.size() * kVeneerCodeSize;
4732 return static_cast<intptr_t
>(
pc_offset() + kBranchSizeInBytes +
4733 kGuardSizeInBytes + max_veneer_size_in_bytes +
4737void Assembler::RecordVeneerPool(
int location_offset,
int size) {
4738 Assembler::BlockPoolsScope block_pools(
this, PoolEmissionCheck::kSkip);
4739 RelocInfo rinfo(
reinterpret_cast<Address>(buffer_start_) + location_offset,
4740 RelocInfo::VENEER_POOL,
static_cast<intptr_t
>(size));
4741 reloc_info_writer.Write(&rinfo);
4744void Assembler::EmitVeneers(
bool force_emit,
bool need_protection,
4747 BlockPoolsScope scope(
this, PoolEmissionCheck::kSkip);
4756 int veneer_pool_relocinfo_loc =
pc_offset();
4759 if (need_protection) {
4769 const intptr_t max_pc_after_veneers =
4770 MaxPCOffsetAfterVeneerPoolIfEmittedNow(margin);
4775 auto it = unresolved_branches_.begin();
4776 while (it != unresolved_branches_.end()) {
4777 const int max_reachable_pc = it->first & ~1;
4778 if (!force_emit && max_reachable_pc > max_pc_after_veneers)
break;
4788 pc_offset -= (Instruction::ImmBranchRange(TestBranchType) + 1);
4790 static_assert(Instruction::ImmBranchRange(CondBranchType) ==
4791 Instruction::ImmBranchRange(CompareBranchType));
4792 pc_offset -= Instruction::ImmBranchRange(CondBranchType);
4795 Label veneer_size_check;
4796 bind(&veneer_size_check);
4798 Label*
label = it->second;
4799 Instruction* veneer =
reinterpret_cast<Instruction*
>(
pc_);
4800 Instruction* branch = InstructionAt(
pc_offset);
4801 RemoveBranchFromLabelLinkChain(branch,
label, veneer);
4802 branch->SetImmPCOffsetTarget(zone(),
options(), veneer);
4804 DCHECK_EQ(SizeOfCodeGeneratedSince(&veneer_size_check),
4805 static_cast<uint64_t
>(kVeneerCodeSize));
4806 it = unresolved_branches_.erase(it);
4813 if (unresolved_branches_.empty()) {
4814 next_veneer_pool_check_ =
kMaxInt;
4816 next_veneer_pool_check_ =
4817 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
4821 int pool_size =
static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
4822 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
4827void Assembler::CheckVeneerPool(
bool force_emit,
bool require_jump,
4830 if (unresolved_branches_.empty()) {
4831 DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
4840 if (is_veneer_pool_blocked()) {
4845 if (!require_jump) {
4847 margin *= kVeneerNoProtectionFactor;
4849 if (force_emit || ShouldEmitVeneers(margin)) {
4850 EmitVeneers(force_emit, require_jump, margin);
4852 next_veneer_pool_check_ =
4853 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
4857int Assembler::buffer_space()
const {
4858 return static_cast<int>(reloc_info_writer.pos() -
pc_);
4861void Assembler::RecordConstPool(
int size) {
4864 Assembler::BlockPoolsScope block_pools(
this);
4865 RecordRelocInfo(RelocInfo::CONST_POOL,
static_cast<intptr_t
>(size));
4868void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
4876 Instruction* expected_adr = InstructionAt(0);
4877 CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
4878 int rd_code = expected_adr->Rd();
4879 for (
int i = 0;
i < kAdrFarPatchableNNops; ++
i) {
4880 CHECK(InstructionAt((
i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
4882 Instruction* expected_movz =
4883 InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
4884 CHECK(expected_movz->IsMovz() && (expected_movz->ImmMoveWide() == 0) &&
4885 (expected_movz->ShiftMoveWide() == 0));
4886 int scratch_code = expected_movz->Rd();
4889 Register rd = Register::XRegFromCode(rd_code);
4890 Register scratch = Register::XRegFromCode(scratch_code);
4892 adr(rd, target_offset & 0xFFFF);
4893 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
4894 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
4896 add(rd, rd, scratch);
4899void PatchingAssembler::PatchSubSp(uint32_t immediate) {
4904 Instruction* expected_adr = InstructionAt(0);
4905 CHECK(expected_adr->IsAddSubImmediate());
4906 sub(sp, sp, immediate);
4909#undef NEON_3DIFF_LONG_LIST
4910#undef NEON_3DIFF_HN_LIST
4911#undef NEON_ACROSSLANES_LIST
4912#undef NEON_FP2REGMISC_FCVT_LIST
4913#undef NEON_FP2REGMISC_LIST
4914#undef NEON_3SAME_LIST
4915#undef NEON_FP3SAME_LIST_V2
4916#undef NEON_BYELEMENT_LIST
4917#undef NEON_FPBYELEMENT_LIST
4918#undef NEON_BYELEMENT_LONG_LIST
static Address target_pointer_address_at(Address pc)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void Remove(const CPURegList &other)
bool IncludesAliasOf(const CPURegister &other1, const CPURegister &other2=NoCPUReg, const CPURegister &other3=NoCPUReg, const CPURegister &other4=NoCPUReg) const
static CPURegList GetCallerSavedV(int size=kDRegSizeInBits)
static CPURegList GetCallerSaved(int size=kXRegSizeInBits)
static CPURegList GetCalleeSavedV(int size=kDRegSizeInBits)
CPURegister PopHighestIndex()
CPURegister::RegisterType type_
CPURegister PopLowestIndex()
static CPURegList GetCalleeSaved(int size=kXRegSizeInBits)
void Combine(const CPURegList &other)
CPURegList(CPURegister reg0, CPURegisters... regs)
static constexpr CPURegister Create(int code, int size, RegisterType type)
static bool supports_wasm_simd_128_
static bool SupportsWasmSimd128()
static unsigned supported_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
bool NeedsRelocation(const Assembler *assembler) const
static bool IsOnlyForSerializer(Mode mode)
static const int kApplyMask
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
static constexpr bool IsNoInfo(Mode mode)
base::OwnedVector< uint8_t > buffer_
#define ASM_CODE_COMMENT(asm)
#define COMPRESS_POINTERS_BOOL
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
std::optional< TNode< JSArray > > a
DirectHandle< JSReceiver > options
ZoneVector< RpoNumber > & result
constexpr unsigned CountLeadingZeros(T value)
constexpr unsigned CountTrailingZeros(T value)
V8_EXPORT_PRIVATE bool AreConsecutive(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
constexpr NEONFormatField NEON_Q
V8_EXPORT_PRIVATE base::Vector< Flag > Flags()
constexpr NEONLoadStoreMultiStructOp NEON_LD3
constexpr NEONFormatField NEON_16B
V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width)
uint32_t NEONLoadStoreMultiStructPostIndexOp
constexpr NEONLoadStoreMultiStructOp NEON_ST3
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle_h
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructLoad4
constexpr ConditionalCompareRegisterOp ConditionalCompareRegisterFixed
constexpr NEONLoadStoreMultiStructOp NEON_ST1_4v
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle_b
constexpr int kBitsPerByte
constexpr FPDataProcessing1SourceOp FCVT_hs
V8_EXPORT_PRIVATE int LaneCountFromFormat(VectorFormat vform)
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructLoad1
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructLoad3
uint32_t FPIntegerConvertOp
constexpr NEONLoadStoreMultiStructOp NEON_LD2
bool DoubleToSmiInteger(double value, int *smi_int_value)
constexpr LoadStoreAcquireReleaseOp STLXR_x
constexpr int64_t kWRegMask
constexpr NEONFormatField NEON_sz
constexpr LoadStoreAcquireReleaseOp LDAR_x
constexpr uint8_t kLoadLiteralScaleLog2
std::variant< Zone *, AccountingAllocator * > MaybeAssemblerZone
constexpr NEONLoadStoreMultiStructOp NEON_ST1_3v
constexpr NEONLoadStoreMultiStructOp NEON_LD1_3v
constexpr LoadStoreAcquireReleaseOp LDAR_w
constexpr FPDataProcessing1SourceOp FCVT_dh
constexpr NEONLoadStoreSingleStructOp NEON_LD1R
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructStore1
constexpr int kWRegSizeInBits
bool AreEven(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const unsigned kPrintfLength
constexpr NEONLoadStoreMultiStructOp NEON_ST2
constexpr NEONLoadStoreSingleStructPostIndexOp NEONLoadStoreSingleStructPostIndex
constexpr FPDataProcessing2SourceOp FNMUL_s
constexpr NEON2RegMiscOp NEON2RegMiscHPFixed
constexpr NEONLoadStoreMultiStructOp NEON_LD1_2v
constexpr FPDataProcessing1SourceOp FCVT_sh
constexpr NEONFormatField NEON_8H
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructStore4
constexpr FPDataProcessing1SourceOp FCVT_hd
constexpr LoadStoreAcquireReleaseOp STLR_x
constexpr uint8_t kInstrSizeLog2
constexpr NEONScalarFormatField NEONScalar
unsigned CalcLSDataSizeLog2(LoadStoreOp op)
constexpr int kBitfieldNOffset
constexpr NEONLoadStoreMultiStructOp NEON_LD4
constexpr NEONLoadStoreMultiStructPostIndexOp NEONLoadStoreMultiStructPostIndex
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle_s
constexpr LogicalShiftedOp LogicalShiftedFixed
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructStore3
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
uint32_t LoadStoreAcquireReleaseOp
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr NEONFormatField NEON_4S
bool AreSameFormat(const Register ®1, const Register ®2, const Register ®3=NoReg, const Register ®4=NoReg)
constexpr NEONLoadStoreSingleStructOp NEON_LD3R
constexpr LoadStoreAcquireReleaseOp LDAXR_w
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle_d
constexpr FPDataProcessing2SourceOp FNMUL_d
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr NEON3SameOp NEON3SameHPMask
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructStore2
constexpr FPIntegerConvertOp FMOV_dx
constexpr CPURegister NoCPUReg
constexpr LoadStoreAcquireReleaseOp STLR_w
constexpr NEONModifiedImmediateOp NEONModifiedImmediateOpBit
constexpr NEONLoadStoreMultiStructOp NEON_LD1_4v
constexpr LoadStoreAcquireReleaseOp STLXR_w
constexpr NEONFormatField NEON_2D
constexpr int kXRegSizeInBits
constexpr FPDataProcessing1SourceOp FCVT_sd
constexpr NEONLoadStoreMultiStructOp NEON_ST4
constexpr LoadStorePairPostIndexOp LoadStorePairPostIndexFixed
constexpr uint8_t kInstrSize
constexpr NEONLoadStoreMultiStructOp NEON_ST1_2v
constexpr FPIntegerConvertOp FMOV_ws
static int CountLeadingZeros(uint64_t value, int width)
constexpr LoadStoreAcquireReleaseOp LDAXR_x
V8_EXPORT_PRIVATE bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2=NoCPUReg, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
constexpr int kRegListSizeInBits
constexpr ConditionalCompareImmediateOp ConditionalCompareImmediateFixed
constexpr NEONLoadStoreSingleStructOp NEONLoadStoreSingleStructLoad2
constexpr NEONLoadStoreSingleStructOp NEON_LD4R
constexpr LoadStorePairOffsetOp LoadStorePairOffsetFixed
uint32_t FPDataProcessing1SourceOp
constexpr NEONLoadStoreMultiStructOp NEON_LD1_1v
constexpr NEONModifiedImmediateOp NEONModifiedImmediate_MOVI
constexpr LoadStorePairPreIndexOp LoadStorePairPreIndexFixed
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
constexpr FPIntegerConvertOp FMOV_sw
constexpr FPIntegerConvertOp FMOV_xd
constexpr FPDataProcessing1SourceOp FCVT_ds
constexpr NEONLoadStoreMultiStructOp NEON_ST1_1v
constexpr NEONLoadStoreSingleStructOp NEON_LD2R
unsigned CalcLSPairDataSize(LoadStorePairOp op)
const unsigned kDebugMessageOffset
constexpr Register padreg
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
std::unique_ptr< ValueMirror > key