56static const unsigned kArmv6 = 0u;
57static const unsigned kArmv7 = kArmv6 | (1u << ARMv7);
58static const unsigned kArmv7WithSudiv = kArmv7 | (1u << ARMv7_SUDIV);
59static const unsigned kArmv8 = kArmv7WithSudiv | (1u << ARMv8);
61static unsigned CpuFeaturesFromCommandLine() {
63 const char* arm_arch =
v8_flags.arm_arch;
64 if (strcmp(arm_arch,
"armv8") == 0) {
66 }
else if (strcmp(arm_arch,
"armv7+sudiv") == 0) {
68 }
else if (strcmp(arm_arch,
"armv7") == 0) {
70 }
else if (strcmp(arm_arch,
"armv6") == 0) {
73 fprintf(stderr,
"Error: unrecognised value for --arm-arch ('%s').\n",
76 "Supported values are: armv8\n"
86 std::optional<bool> maybe_enable_armv7 =
v8_flags.enable_armv7;
87 std::optional<bool> maybe_enable_vfp3 =
v8_flags.enable_vfp3;
88 std::optional<bool> maybe_enable_32dregs =
v8_flags.enable_32dregs;
89 std::optional<bool> maybe_enable_neon =
v8_flags.enable_neon;
90 std::optional<bool> maybe_enable_sudiv =
v8_flags.enable_sudiv;
91 std::optional<bool> maybe_enable_armv8 =
v8_flags.enable_armv8;
92 if (maybe_enable_armv7.has_value() || maybe_enable_vfp3.has_value() ||
93 maybe_enable_32dregs.has_value() || maybe_enable_neon.has_value() ||
94 maybe_enable_sudiv.has_value() || maybe_enable_armv8.has_value()) {
97 bool enable_armv7 = (
result & (1u << ARMv7)) != 0;
98 bool enable_vfp3 = (
result & (1u << ARMv7)) != 0;
99 bool enable_32dregs = (
result & (1u << ARMv7)) != 0;
100 bool enable_neon = (
result & (1u << ARMv7)) != 0;
101 bool enable_sudiv = (
result & (1u << ARMv7_SUDIV)) != 0;
102 bool enable_armv8 = (
result & (1u << ARMv8)) != 0;
103 if (maybe_enable_armv7.has_value()) {
105 "Warning: --enable_armv7 is deprecated. "
106 "Use --arm_arch instead.\n");
107 enable_armv7 = maybe_enable_armv7.value();
109 if (maybe_enable_vfp3.has_value()) {
111 "Warning: --enable_vfp3 is deprecated. "
112 "Use --arm_arch instead.\n");
113 enable_vfp3 = maybe_enable_vfp3.value();
115 if (maybe_enable_32dregs.has_value()) {
117 "Warning: --enable_32dregs is deprecated. "
118 "Use --arm_arch instead.\n");
119 enable_32dregs = maybe_enable_32dregs.value();
121 if (maybe_enable_neon.has_value()) {
123 "Warning: --enable_neon is deprecated. "
124 "Use --arm_arch instead.\n");
125 enable_neon = maybe_enable_neon.value();
127 if (maybe_enable_sudiv.has_value()) {
129 "Warning: --enable_sudiv is deprecated. "
130 "Use --arm_arch instead.\n");
131 enable_sudiv = maybe_enable_sudiv.value();
133 if (maybe_enable_armv8.has_value()) {
135 "Warning: --enable_armv8 is deprecated. "
136 "Use --arm_arch instead.\n");
137 enable_armv8 = maybe_enable_armv8.value();
143 enable_32dregs =
true;
147 if (enable_armv7 && enable_vfp3 && enable_32dregs && enable_neon) {
173static constexpr unsigned CpuFeaturesFromCompiler() {
178#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
179#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_ARMV7_INSTRUCTIONS"
181#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_SUDIV)
182#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_SUDIV"
184#if defined(CAN_USE_ARMV7_INSTRUCTIONS) != defined(CAN_USE_VFP3_INSTRUCTIONS)
187#error "CAN_USE_ARMV7_INSTRUCTIONS should match CAN_USE_VFP3_INSTRUCTIONS"
189#if defined(CAN_USE_NEON) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
190#error "CAN_USE_NEON should imply CAN_USE_ARMV7_INSTRUCTIONS"
194#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && \
195 defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
196 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
198#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
199 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
200 return kArmv7WithSudiv;
201#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
202 defined(CAN_USE_VFP3_INSTRUCTIONS)
214 unsigned command_line = CpuFeaturesFromCommandLine();
217 supported_ |= command_line & CpuFeaturesFromCompiler();
229 unsigned runtime = kArmv6;
231 if (cpu.has_neon() && cpu.has_vfp3_d32()) {
234 if (cpu.has_idiva()) {
235 runtime |= kArmv7WithSudiv;
236 if (cpu.architecture() >= 8) {
246 supported_ |= command_line & CpuFeaturesFromCompiler();
270 const char* arm_arch =
nullptr;
271 const char* arm_target_type =
"";
272 const char* arm_no_probe =
"";
273 const char* arm_fpu =
"";
274 const char* arm_thumb =
"";
275 const char* arm_float_abi =
nullptr;
278 arm_target_type =
" simulator";
281#if defined ARM_TEST_NO_FEATURE_PROBE
282 arm_no_probe =
" noprobe";
285#if defined CAN_USE_ARMV8_INSTRUCTIONS
287#elif defined CAN_USE_ARMV7_INSTRUCTIONS
293#if defined CAN_USE_NEON
295#elif defined CAN_USE_VFP3_INSTRUCTIONS
296#if defined CAN_USE_VFP32DREGS
299 arm_fpu =
" vfp3-d16";
307#elif USE_EABI_HARDFLOAT
308 arm_float_abi =
"hard";
310 arm_float_abi =
"softfp";
313#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
314 arm_thumb =
" thumb";
317 printf(
"target%s%s %s%s%s %s\n", arm_target_type, arm_no_probe, arm_arch,
318 arm_fpu, arm_thumb, arm_float_abi);
322 printf(
"ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
328#elif USE_EABI_HARDFLOAT
329 bool eabi_hardfloat =
true;
331 bool eabi_hardfloat =
false;
333 printf(
" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
356 return static_cast<uint32_t
>(
371 DCHECK(is_uint5(shift_imm));
402 result.is_heap_number_request_ =
true;
403 result.value_.heap_number_request = HeapNumberRequest(value);
417MemOperand::MemOperand(Register rn, Register rm, AddrMode am)
418 : rn_(rn), rm_(rm), shift_op_(
LSL), shift_imm_(0), am_(am) {}
420MemOperand::MemOperand(Register rn, Register rm, ShiftOp shift_op,
421 int shift_imm, AddrMode am)
425 shift_imm_(shift_imm & 31),
427 DCHECK(is_uint5(shift_imm));
430NeonMemOperand::NeonMemOperand(Register rn, AddrMode am,
int align)
431 : rn_(rn), rm_(am == Offset ?
pc :
sp) {
432 DCHECK((am == Offset) || (am == PostIndex));
436NeonMemOperand::NeonMemOperand(Register rn, Register rm,
int align)
441void NeonMemOperand::SetAlignment(
int align) {
460void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
461 DCHECK_IMPLIES(isolate ==
nullptr, heap_number_requests_.empty());
462 for (
auto& request : heap_number_requests_) {
463 Handle<HeapObject>
object =
464 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
465 request.heap_number());
467 Memory<Address>(constant_pool_entry_address(
pc, 0 )) =
487const Instr kBOrBlPCImmedMask = 0xE *
B24;
488const Instr kBOrBlPCImmedPattern = 0xA *
B24;
493const Instr kBlxRegMask =
496const Instr kBlxIp =
al | kBlxRegPattern | ip.code();
498const Instr kMovMvnPattern = 0xD *
B21;
500const Instr kMovLeaveCCMask = 0xDFF *
B16;
501const Instr kMovLeaveCCPattern = 0x1A0 *
B16;
502const Instr kMovwPattern = 0x30 *
B20;
503const Instr kMovtPattern = 0x34 *
B20;
504const Instr kMovwLeaveCCFlip = 0x5 *
B21;
505const Instr kMovImmedMask = 0x7F *
B21;
506const Instr kMovImmedPattern = 0x1D *
B21;
507const Instr kOrrImmedMask = 0x7F *
B21;
508const Instr kOrrImmedPattern = 0x1C *
B21;
510const Instr kCmpCmnPattern = 0x15 *
B20;
516const Instr kLdrRegFpOffsetPattern =
al |
B26 |
L | Offset | fp.code() *
B16;
517const Instr kStrRegFpOffsetPattern =
al |
B26 | Offset | fp.code() *
B16;
518const Instr kLdrRegFpNegOffsetPattern =
521const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
523Assembler::Assembler(
const AssemblerOptions& options,
524 std::unique_ptr<AssemblerBuffer> buffer)
525 : AssemblerBase(options,
std::move(buffer)),
526 pending_32_bit_constants_(),
527 scratch_register_list_(DefaultTmpList()),
528 scratch_vfp_register_list_(DefaultFPTmpList()) {
529 reloc_info_writer.Reposition(buffer_start_ +
buffer_->size(),
pc_);
530 constant_pool_deadline_ = kMaxInt;
531 const_pool_blocked_nesting_ = 0;
532 no_const_pool_before_ = 0;
533 first_const_pool_32_use_ = -1;
535 if (CpuFeatures::IsSupported(VFP32DREGS)) {
539 EnableCpuFeature(VFP32DREGS);
543Assembler::~Assembler() {
544 DCHECK_EQ(const_pool_blocked_nesting_, 0);
549RegList Assembler::DefaultTmpList() {
return {ip}; }
553 if (CpuFeatures::IsSupported(VFP32DREGS)) {
556 return d14.ToVfpRegList() | d15.ToVfpRegList();
560 return d14.ToVfpRegList();
564void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
565 GetCode(isolate->main_thread_local_isolate(), desc);
567void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
568 SafepointTableBuilderBase* safepoint_table_builder,
569 int handler_table_offset) {
577 DataAlign(InstructionStream::kMetadataAlignment);
580 CheckConstPool(
true,
false);
581 DCHECK(pending_32_bit_constants_.empty());
583 int code_comments_size = WriteCodeComments();
585 AllocateAndInstallRequestedHeapNumbers(isolate);
591 static constexpr int kConstantPoolSize = 0;
592 static constexpr int kBuiltinJumpTableInfoSize = 0;
593 const int instruction_size =
pc_offset();
594 const int builtin_jump_table_info_offset =
595 instruction_size - kBuiltinJumpTableInfoSize;
596 const int code_comments_offset =
597 builtin_jump_table_info_offset - code_comments_size;
598 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
599 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
600 ? constant_pool_offset
601 : handler_table_offset;
602 const int safepoint_table_offset =
603 (safepoint_table_builder == kNoSafepointTable)
604 ? handler_table_offset2
605 : safepoint_table_builder->safepoint_table_offset();
606 const int reloc_info_offset =
607 static_cast<int>(reloc_info_writer.pos() -
buffer_->start());
608 CodeDesc::Initialize(desc,
this, safepoint_table_offset,
609 handler_table_offset2, constant_pool_offset,
610 code_comments_offset, builtin_jump_table_info_offset,
614void Assembler::Align(
int m) {
615 DCHECK(
m >= 4 && base::bits::IsPowerOfTwo(
m));
622void Assembler::CodeTargetAlign() {
628 return Instruction::ConditionField(
instr);
631bool Assembler::IsLdrRegisterImmediate(Instr
instr) {
632 return (
instr & (B27 | B26 | B25 | B22 | B20)) == (
B26 |
B20);
635bool Assembler::IsVldrDRegisterImmediate(Instr
instr) {
636 return (
instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 *
B24 |
B20 | 11 *
B8);
639int Assembler::GetLdrRegisterImmediateOffset(Instr
instr) {
646int Assembler::GetVldrDRegisterImmediateOffset(Instr
instr) {
660 instr = (
instr & ~B23) | (positive ? B23 : 0);
672 instr = (
instr & ~B23) | (positive ? B23 : 0);
677bool Assembler::IsStrRegisterImmediate(Instr
instr) {
678 return (
instr & (B27 | B26 | B25 | B22 | B20)) ==
B26;
687 instr = (
instr & ~B23) | (positive ? B23 : 0);
692bool Assembler::IsAddRegisterImmediate(Instr
instr) {
693 return (
instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (
B25 |
B23);
705 return Register::from_code(Instruction::RdValue(
instr));
709 return Register::from_code(Instruction::RnValue(
instr));
713 return Register::from_code(Instruction::RmValue(
instr));
716bool Assembler::IsPush(Instr
instr) {
717 return ((
instr & ~kRdMask) == kPushRegPattern);
720bool Assembler::IsPop(Instr
instr) {
721 return ((
instr & ~kRdMask) == kPopRegPattern);
724bool Assembler::IsStrRegFpOffset(Instr
instr) {
725 return ((
instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
728bool Assembler::IsLdrRegFpOffset(Instr
instr) {
729 return ((
instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
732bool Assembler::IsStrRegFpNegOffset(Instr
instr) {
733 return ((
instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
736bool Assembler::IsLdrRegFpNegOffset(Instr
instr) {
737 return ((
instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
740bool Assembler::IsLdrPcImmediateOffset(Instr
instr) {
743 return (
instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
746bool Assembler::IsBOrBlPcImmediateOffset(Instr
instr) {
747 return (
instr & kBOrBlPCImmedMask) == kBOrBlPCImmedPattern;
750bool Assembler::IsVldrDPcImmediateOffset(Instr
instr) {
753 return (
instr & kVldrDPCMask) == kVldrDPCPattern;
756bool Assembler::IsBlxReg(Instr
instr) {
759 return (
instr & kBlxRegMask) == kBlxRegPattern;
762bool Assembler::IsBlxIp(Instr
instr) {
765 return instr == kBlxIp;
768bool Assembler::IsTstImmediate(Instr
instr) {
769 return (
instr & (B27 | B26 |
I | kOpCodeMask | S | kRdMask)) == (
I |
TST |
S);
772bool Assembler::IsCmpRegister(Instr
instr) {
773 return (
instr & (B27 | B26 |
I | kOpCodeMask | S | kRdMask | B4)) ==
777bool Assembler::IsCmpImmediate(Instr
instr) {
778 return (
instr & (B27 | B26 |
I | kOpCodeMask | S | kRdMask)) == (
I |
CMP |
S);
786int Assembler::GetCmpImmediateRawImmediate(Instr
instr) {
808int Assembler::target_at(
int pos) {
810 if (is_uint24(
instr)) {
816 if ((Instruction::ConditionField(
instr) == kSpecialCondition) &&
817 ((
instr & B24) != 0)) {
821 return pos + Instruction::kPcLoadDelta + imm26;
824void Assembler::target_at_put(
int pos,
int target_pos) {
826 if (is_uint24(
instr)) {
827 DCHECK(target_pos ==
pos || target_pos >= 0);
837 Register::from_code(Instruction::RmValue(instr_at(
pos + kInstrSize)));
840 DCHECK(IsNop(instr_at(
pos + kInstrSize), dst.code()));
841 if (!CpuFeatures::IsSupported(ARMv7)) {
842 DCHECK(IsNop(instr_at(
pos + 2 * kInstrSize), dst.code()));
856 CHECK(is_uint24(target24));
857 if (is_uint8(target24)) {
860 PatchingAssembler patcher(
861 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 1);
862 patcher.mov(dst, Operand(target24));
865 uint16_t target16_1 = target24 >> 16;
866 if (CpuFeatures::IsSupported(ARMv7)) {
868 if (target16_1 == 0) {
869 PatchingAssembler patcher(
870 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 1);
871 CpuFeatureScope scope(&patcher, ARMv7);
872 patcher.movw(dst, target16_0);
874 PatchingAssembler patcher(
875 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 2);
876 CpuFeatureScope scope(&patcher, ARMv7);
877 patcher.movw(dst, target16_0);
878 patcher.movt(dst, target16_1);
882 uint8_t target8_0 = target16_0 &
kImm8Mask;
883 uint8_t target8_1 = target16_0 >> 8;
884 uint8_t target8_2 = target16_1 &
kImm8Mask;
885 if (target8_2 == 0) {
886 PatchingAssembler patcher(
887 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 2);
888 patcher.mov(dst, Operand(target8_0));
889 patcher.orr(dst, dst, Operand(target8_1 << 8));
891 PatchingAssembler patcher(
892 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 3);
893 patcher.mov(dst, Operand(target8_0));
894 patcher.orr(dst, dst, Operand(target8_1 << 8));
895 patcher.orr(dst, dst, Operand(target8_2 << 16));
901 int imm26 = target_pos - (
pos + Instruction::kPcLoadDelta);
903 if (Instruction::ConditionField(
instr) == kSpecialCondition) {
909 instr &= ~kImm24Mask;
911 int imm24 = imm26 >> 2;
912 CHECK(is_int24(imm24));
913 instr_at_put(
pos,
instr | (imm24 & kImm24Mask));
916void Assembler::print(
const Label* L) {
917 if (
L->is_unused()) {
919 }
else if (
L->is_bound()) {
920 PrintF(
"bound label to %d\n",
L->pos());
921 }
else if (
L->is_linked()) {
925 while (l.is_linked()) {
928 if ((
instr & ~kImm24Mask) == 0) {
935 if (cond == kSpecialCondition) {
939 if ((
instr & B24) != 0)
1000 PrintF(
"label in inconsistent state (pos = %d)\n",
L->pos_);
1004void Assembler::bind_to(Label* L,
int pos) {
1006 while (
L->is_linked()) {
1007 int fixup_pos =
L->pos();
1009 target_at_put(fixup_pos,
pos);
1015 if (
pos > last_bound_pos_) last_bound_pos_ =
pos;
1018void Assembler::bind(Label* L) {
1023void Assembler::next(Label* L) {
1025 int link = target_at(
L->pos());
1026 if (link ==
L->pos()) {
1042bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
1059 if (imm32 <= 0xFF) {
1068 int half_trailing_zeros = base::bits::CountTrailingZerosNonZero(imm32) / 2;
1069 uint32_t imm8 = imm32 >> (half_trailing_zeros * 2);
1075 *rotate_imm = (16 - half_trailing_zeros);
1081 uint32_t imm32_rot16 = base::bits::RotateLeft32(imm32, 16);
1082 half_trailing_zeros =
1083 base::bits::CountTrailingZerosNonZero(imm32_rot16) / 2;
1084 imm8 = imm32_rot16 >> (half_trailing_zeros * 2);
1091 *rotate_imm = 8 - half_trailing_zeros;
1098 if (
instr !=
nullptr) {
1099 if ((*
instr & kMovMvnMask) == kMovMvnPattern) {
1100 if (FitsShifter(~imm32, rotate_imm, immed_8,
nullptr)) {
1101 *
instr ^= kMovMvnFlip;
1103 }
else if ((*
instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1104 if (CpuFeatures::IsSupported(ARMv7)) {
1105 if (imm32 < 0x10000) {
1106 *
instr ^= kMovwLeaveCCFlip;
1107 *
instr |= Assembler::EncodeMovwImmediate(imm32);
1108 *rotate_imm = *immed_8 = 0;
1113 }
else if ((*
instr & kCmpCmnMask) == kCmpCmnPattern) {
1114 if (FitsShifter(-
static_cast<int>(imm32), rotate_imm, immed_8,
nullptr)) {
1115 *
instr ^= kCmpCmnFlip;
1120 if (alu_insn == ADD || alu_insn == SUB) {
1121 if (FitsShifter(-
static_cast<int>(imm32), rotate_imm, immed_8,
1123 *
instr ^= kAddSubFlip;
1126 }
else if (alu_insn == AND || alu_insn == BIC) {
1127 if (FitsShifter(~imm32, rotate_imm, immed_8,
nullptr)) {
1128 *
instr ^= kAndBicFlip;
1141bool MustOutputRelocInfo(RelocInfo::Mode rmode,
const Assembler* assembler) {
1142 if (RelocInfo::IsOnlyForSerializer(rmode)) {
1143 if (assembler->predictable_code_size())
return true;
1144 return assembler->options().record_reloc_info_for_serialization;
1145 }
else if (RelocInfo::IsNoInfo(rmode)) {
1151bool UseMovImmediateLoad(
const Operand&
x,
const Assembler* assembler) {
1153 if (
x.MustOutputRelocInfo(assembler)) {
1158 return CpuFeatures::IsSupported(ARMv7);
1164bool Operand::MustOutputRelocInfo(
const Assembler* assembler)
const {
1165 return v8::internal::MustOutputRelocInfo(rmode_, assembler);
1168int Operand::InstructionsRequired(
const Assembler* assembler,
1169 Instr
instr)
const {
1171 if (rm_.is_valid())
return 1;
1172 uint32_t dummy1, dummy2;
1173 if (MustOutputRelocInfo(assembler) ||
1174 !FitsShifter(immediate(), &dummy1, &dummy2, &
instr)) {
1179 if (UseMovImmediateLoad(*
this, assembler)) {
1180 DCHECK(CpuFeatures::IsSupported(ARMv7));
1187 if ((
instr & ~kCondMask) != 13 * B21) {
1193 return instructions;
1201void Assembler::Move32BitImmediate(Register rd,
const Operand&
x,
1203 if (UseMovImmediateLoad(
x,
this)) {
1204 CpuFeatureScope scope(
this, ARMv7);
1208 DCHECK(!
x.MustOutputRelocInfo(
this));
1209 UseScratchRegisterScope temps(
this);
1211 Register target = rd !=
pc && rd !=
sp ? rd : temps.Acquire();
1212 uint32_t imm32 =
static_cast<uint32_t
>(
x.immediate());
1213 movw(target, imm32 & 0xFFFF, cond);
1214 movt(target, imm32 >> 16, cond);
1215 if (target.code() != rd.code()) {
1216 mov(rd, target, LeaveCC, cond);
1220 if (
x.IsHeapNumberRequest()) {
1221 RequestHeapNumber(
x.heap_number_request());
1224 immediate =
x.immediate();
1226 ConstantPoolAddEntry(
pc_offset(),
x.rmode_, immediate);
1227 ldr_pcrel(rd, 0, cond);
1231void Assembler::AddrMode1(Instr
instr, Register rd, Register rn,
1235 bool set_flags = (
instr &
S) != 0;
1236 DCHECK((opcode == ADC) || (opcode == ADD) || (opcode == AND) ||
1237 (opcode == BIC) || (opcode == EOR) || (opcode == ORR) ||
1238 (opcode == RSB) || (opcode == RSC) || (opcode == SBC) ||
1239 (opcode == SUB) || (opcode == CMN) || (opcode == CMP) ||
1240 (opcode == TEQ) || (opcode == TST) || (opcode == MOV) ||
1243 DCHECK(rd.is_valid() || (opcode == CMN) || (opcode == CMP) ||
1244 (opcode == TEQ) || (opcode == TST));
1246 DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
1247 DCHECK(rd.is_valid() || rn.is_valid());
1249 if (!AddrMode1TryEncodeOperand(&
instr,
x)) {
1253 UseScratchRegisterScope temps(
this);
1255 if ((opcode == MOV) && !set_flags) {
1259 Move32BitImmediate(rd,
x, cond);
1260 }
else if ((opcode == ADD || opcode == SUB) && !set_flags && (rd == rn) &&
1261 !temps.CanAcquire()) {
1265 uint32_t imm =
x.immediate();
1274 int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
1275 uint32_t
mask = (0xFF << trailing_zeroes);
1276 if (opcode == ADD) {
1277 add(rd, rd, Operand(imm &
mask), LeaveCC, cond);
1280 sub(rd, rd, Operand(imm &
mask), LeaveCC, cond);
1283 }
while (!ImmediateFitsAddrMode1Instruction(imm));
1284 if (opcode == ADD) {
1285 add(rd, rd, Operand(imm), LeaveCC, cond);
1288 sub(rd, rd, Operand(imm), LeaveCC, cond);
1295 Register scratch = (rd.is_valid() && rd != rn && rd !=
pc && rd != sp)
1298 mov(scratch,
x, LeaveCC, cond);
1299 AddrMode1(
instr, rd, rn, Operand(scratch));
1303 if (!rd.is_valid()) {
1305 emit(
instr | rn.code() * B16);
1306 }
else if (!rn.is_valid()) {
1309 DCHECK(!
x.IsRegisterShiftedRegister() || rd !=
pc);
1310 emit(
instr | rd.code() * B12);
1312 emit(
instr | rn.code() * B16 | rd.code() * B12);
1314 if (rn ==
pc ||
x.rm_ ==
pc) {
1316 BlockConstPoolFor(1);
1320bool Assembler::AddrMode1TryEncodeOperand(Instr*
instr,
const Operand&
x) {
1321 if (
x.IsImmediate()) {
1323 uint32_t rotate_imm;
1325 if (
x.MustOutputRelocInfo(
this) ||
1326 !FitsShifter(
x.immediate(), &rotate_imm, &immed_8,
instr)) {
1330 *
instr |=
I | rotate_imm *
B8 | immed_8;
1331 }
else if (
x.IsImmediateShiftedRegister()) {
1332 *
instr |=
x.shift_imm_ *
B7 |
x.shift_op_ |
x.rm_.code();
1334 DCHECK(
x.IsRegisterShiftedRegister());
1337 *
instr |=
x.rs_.code() *
B8 |
x.shift_op_ |
B4 |
x.rm_.code();
1343void Assembler::AddrMode2(Instr
instr, Register rd,
const MemOperand&
x) {
1349 if (!
x.rm_.is_valid()) {
1351 int offset_12 =
x.offset_;
1352 if (offset_12 < 0) {
1353 offset_12 = -offset_12;
1356 if (!is_uint12(offset_12)) {
1359 UseScratchRegisterScope temps(
this);
1361 bool is_load = (
instr &
L) == L;
1362 Register scratch = (is_load && rd !=
x.rn_ && rd !=
pc && rd !=
sp)
1365 mov(scratch, Operand(
x.offset_), LeaveCC,
1366 Instruction::ConditionField(
instr));
1377 instr |=
B25 |
x.shift_imm_ *
B7 |
x.shift_op_ |
x.rm_.code();
1380 emit(
instr | am |
x.rn_.code() * B16 | rd.code() * B12);
1383void Assembler::AddrMode3(Instr
instr, Register rd,
const MemOperand&
x) {
1384 DCHECK((
instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1390 bool is_load = (
instr &
L) == L;
1391 if (!
x.rm_.is_valid()) {
1393 int offset_8 =
x.offset_;
1395 offset_8 = -offset_8;
1398 if (!is_uint8(offset_8)) {
1401 UseScratchRegisterScope temps(
this);
1403 Register scratch = (is_load && rd !=
x.rn_ && rd !=
pc && rd !=
sp)
1406 mov(scratch, Operand(
x.offset_), LeaveCC,
1407 Instruction::ConditionField(
instr));
1412 instr |=
B | (offset_8 >> 4) * B8 | (offset_8 & 0xF);
1413 }
else if (
x.shift_imm_ != 0) {
1416 UseScratchRegisterScope temps(
this);
1419 (is_load && rd !=
x.rn_ && rd !=
pc && rd !=
sp) ? rd : temps.Acquire();
1420 mov(scratch, Operand(
x.rm_,
x.shift_op_,
x.shift_imm_), LeaveCC,
1421 Instruction::ConditionField(
instr));
1430 emit(
instr | am |
x.rn_.code() * B16 | rd.code() * B12);
1433void Assembler::AddrMode4(Instr
instr, Register rn, RegList rl) {
1437 emit(
instr | rn.code() * B16 | rl.bits());
1440void Assembler::AddrMode5(Instr
instr, CRegister crd,
const MemOperand&
x) {
1443 (
instr & ~(kCondMask | kCoprocessorMask |
P | U | N | W | L)));
1444 DCHECK(
x.rn_.is_valid() && !
x.rm_.is_valid());
1446 int offset_8 =
x.offset_;
1450 offset_8 = -offset_8;
1453 DCHECK(is_uint8(offset_8));
1457 if ((am &
P) == 0) am |=
W;
1460 emit(
instr | am |
x.rn_.code() * B16 | crd.code() * B12 | offset_8);
1463int Assembler::branch_offset(Label* L) {
1465 if (
L->is_bound()) {
1466 target_pos =
L->pos();
1468 if (
L->is_linked()) {
1470 target_pos =
L->pos();
1478 return target_pos - (
pc_offset() + Instruction::kPcLoadDelta);
1482void Assembler::b(
int branch_offset, Condition cond, RelocInfo::Mode rmode) {
1483 if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
1485 int imm24 = branch_offset >> 2;
1486 const bool b_imm_check = is_int24(imm24);
1491 BlockConstPoolFor(1);
1493 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1497 CheckConstPool(
false,
false);
1501void Assembler::bl(
int branch_offset, Condition cond, RelocInfo::Mode rmode) {
1502 if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
1504 int imm24 = branch_offset >> 2;
1505 const bool bl_imm_check = is_int24(imm24);
1506 CHECK(bl_imm_check);
1510 BlockConstPoolFor(1);
1512 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1515void Assembler::blx(
int branch_offset) {
1517 int h = ((branch_offset & 2) >> 1) * B24;
1518 int imm24 = branch_offset >> 2;
1519 const bool blx_imm_check = is_int24(imm24);
1520 CHECK(blx_imm_check);
1524 BlockConstPoolFor(1);
1526 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1529void Assembler::blx(Register target, Condition cond) {
1531 emit(cond | B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX | target.code());
1534void Assembler::bx(Register target, Condition cond) {
1536 emit(cond | B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BX | target.code());
1539void Assembler::b(Label* L, Condition cond) {
1541 b(branch_offset(L), cond);
1544void Assembler::bl(Label* L, Condition cond) {
1546 bl(branch_offset(L), cond);
1549void Assembler::blx(Label* L) {
1551 blx(branch_offset(L));
1556void Assembler::and_(Register dst, Register src1,
const Operand& src2, SBit s,
1558 AddrMode1(cond | AND | s, dst, src1, src2);
1561void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
1563 and_(dst, src1, Operand(src2), s, cond);
1566void Assembler::eor(Register dst, Register src1,
const Operand& src2, SBit s,
1568 AddrMode1(cond | EOR | s, dst, src1, src2);
1571void Assembler::eor(Register dst, Register src1, Register src2, SBit s,
1573 AddrMode1(cond | EOR | s, dst, src1, Operand(src2));
1576void Assembler::sub(Register dst, Register src1,
const Operand& src2, SBit s,
1578 AddrMode1(cond | SUB | s, dst, src1, src2);
1581void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
1583 sub(dst, src1, Operand(src2), s, cond);
1586void Assembler::rsb(Register dst, Register src1,
const Operand& src2, SBit s,
1588 AddrMode1(cond | RSB | s, dst, src1, src2);
1591void Assembler::add(Register dst, Register src1,
const Operand& src2, SBit s,
1593 AddrMode1(cond | ADD | s, dst, src1, src2);
1596void Assembler::add(Register dst, Register src1, Register src2, SBit s,
1598 add(dst, src1, Operand(src2), s, cond);
1601void Assembler::adc(Register dst, Register src1,
const Operand& src2, SBit s,
1603 AddrMode1(cond | ADC | s, dst, src1, src2);
1606void Assembler::sbc(Register dst, Register src1,
const Operand& src2, SBit s,
1608 AddrMode1(cond | SBC | s, dst, src1, src2);
1611void Assembler::rsc(Register dst, Register src1,
const Operand& src2, SBit s,
1613 AddrMode1(cond | RSC | s, dst, src1, src2);
1616void Assembler::tst(Register src1,
const Operand& src2, Condition cond) {
1617 AddrMode1(cond | TST | S, no_reg, src1, src2);
1620void Assembler::tst(Register src1, Register src2, Condition cond) {
1621 tst(src1, Operand(src2), cond);
1624void Assembler::teq(Register src1,
const Operand& src2, Condition cond) {
1625 AddrMode1(cond | TEQ | S, no_reg, src1, src2);
1628void Assembler::cmp(Register src1,
const Operand& src2, Condition cond) {
1629 AddrMode1(cond | CMP | S, no_reg, src1, src2);
1632void Assembler::cmp(Register src1, Register src2, Condition cond) {
1633 cmp(src1, Operand(src2), cond);
1636void Assembler::cmp_raw_immediate(Register src,
int raw_immediate,
1638 DCHECK(is_uint12(raw_immediate));
1639 emit(cond |
I | CMP | S | src.code() << 16 | raw_immediate);
1642void Assembler::cmn(Register src1,
const Operand& src2, Condition cond) {
1643 AddrMode1(cond | CMN | S, no_reg, src1, src2);
1646void Assembler::orr(Register dst, Register src1,
const Operand& src2, SBit s,
1648 AddrMode1(cond | ORR | s, dst, src1, src2);
1651void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
1653 orr(dst, src1, Operand(src2), s, cond);
1656void Assembler::mov(Register dst,
const Operand& src, SBit s, Condition cond) {
1659 DCHECK(!(src.IsRegister() && src.rm() == dst && s == LeaveCC && cond == al));
1660 AddrMode1(cond | MOV | s, dst, no_reg, src);
1663void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
1664 mov(dst, Operand(src), s, cond);
1667void Assembler::mov_label_offset(Register dst, Label*
label) {
1668 if (
label->is_bound()) {
1669 mov(dst, Operand(
label->pos() +
1670 (InstructionStream::kHeaderSize - kHeapObjectTag)));
1696 CHECK(is_uint24(link));
1697 BlockConstPoolScope block_const_pool(
this);
1700 if (!CpuFeatures::IsSupported(ARMv7)) {
1706void Assembler::movw(Register
reg, uint32_t immediate, Condition cond) {
1707 DCHECK(IsEnabled(ARMv7));
1708 emit(cond | 0x30 * B20 |
reg.code() * B12 | EncodeMovwImmediate(immediate));
1711void Assembler::movt(Register
reg, uint32_t immediate, Condition cond) {
1712 DCHECK(IsEnabled(ARMv7));
1713 emit(cond | 0x34 * B20 |
reg.code() * B12 | EncodeMovwImmediate(immediate));
1716void Assembler::bic(Register dst, Register src1,
const Operand& src2, SBit s,
1718 AddrMode1(cond | BIC | s, dst, src1, src2);
1721void Assembler::mvn(Register dst,
const Operand& src, SBit s, Condition cond) {
1722 AddrMode1(cond | MVN | s, dst, no_reg, src);
1725void Assembler::asr(Register dst, Register src1,
const Operand& src2, SBit s,
1727 if (src2.IsRegister()) {
1728 mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
1730 mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
1734void Assembler::lsl(Register dst, Register src1,
const Operand& src2, SBit s,
1736 if (src2.IsRegister()) {
1737 mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
1739 mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
1743void Assembler::lsr(Register dst, Register src1,
const Operand& src2, SBit s,
1745 if (src2.IsRegister()) {
1746 mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
1748 mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
1753void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1754 SBit s, Condition cond) {
1756 emit(cond | A | s | dst.code() * B16 | srcA.code() * B12 | src2.code() * B8 |
1757 B7 | B4 | src1.code());
1760void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1763 DCHECK(IsEnabled(ARMv7));
1764 emit(cond | B22 | B21 | dst.code() * B16 | srcA.code() * B12 |
1765 src2.code() * B8 | B7 | B4 | src1.code());
1768void Assembler::sdiv(Register dst, Register src1, Register src2,
1771 DCHECK(IsEnabled(SUDIV));
1772 emit(cond | B26 | B25 | B24 | B20 | dst.code() * B16 | 0xF * B12 |
1773 src2.code() * B8 | B4 | src1.code());
1776void Assembler::udiv(Register dst, Register src1, Register src2,
1779 DCHECK(IsEnabled(SUDIV));
1780 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xF * B12 |
1781 src2.code() * B8 | B4 | src1.code());
1784void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1788 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1791void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1794 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1795 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1798void Assembler::smmul(Register dst, Register src1, Register src2,
1801 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xF * B12 |
1802 src2.code() * B8 | B4 | src1.code());
1805void Assembler::smlal(Register dstL, Register dstH, Register src1,
1806 Register src2, SBit s, Condition cond) {
1809 emit(cond | B23 | B22 | A | s | dstH.code() * B16 | dstL.code() * B12 |
1810 src2.code() * B8 | B7 | B4 | src1.code());
1813void Assembler::smull(Register dstL, Register dstH, Register src1,
1814 Register src2, SBit s, Condition cond) {
1817 emit(cond | B23 | B22 | s | dstH.code() * B16 | dstL.code() * B12 |
1818 src2.code() * B8 | B7 | B4 | src1.code());
1821void Assembler::umlal(Register dstL, Register dstH, Register src1,
1822 Register src2, SBit s, Condition cond) {
1825 emit(cond | B23 | A | s | dstH.code() * B16 | dstL.code() * B12 |
1826 src2.code() * B8 | B7 | B4 | src1.code());
1829void Assembler::umull(Register dstL, Register dstH, Register src1,
1830 Register src2, SBit s, Condition cond) {
1833 emit(cond | B23 | s | dstH.code() * B16 | dstL.code() * B12 |
1834 src2.code() * B8 | B7 | B4 | src1.code());
1838void Assembler::clz(Register dst, Register src, Condition cond) {
1840 emit(cond | B24 | B22 | B21 | 15 * B16 | dst.code() * B12 | 15 * B8 | CLZ |
1847void Assembler::usat(Register dst,
int satpos,
const Operand& src,
1850 DCHECK((satpos >= 0) && (satpos <= 31));
1851 DCHECK(src.IsImmediateShiftedRegister());
1852 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1855 if (src.shift_op_ == ASR) {
1859 emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
1860 src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
1869void Assembler::ubfx(Register dst, Register src,
int lsb,
int width,
1871 DCHECK(IsEnabled(ARMv7));
1873 DCHECK((lsb >= 0) && (lsb <= 31));
1874 DCHECK((width >= 1) && (width <= (32 - lsb)));
1875 emit(cond | 0xF * B23 | B22 | B21 | (width - 1) * B16 | dst.code() * B12 |
1876 lsb * B7 | B6 | B4 | src.code());
1884void Assembler::sbfx(Register dst, Register src,
int lsb,
int width,
1886 DCHECK(IsEnabled(ARMv7));
1888 DCHECK((lsb >= 0) && (lsb <= 31));
1889 DCHECK((width >= 1) && (width <= (32 - lsb)));
1890 emit(cond | 0xF * B23 | B21 | (width - 1) * B16 | dst.code() * B12 |
1891 lsb * B7 | B6 | B4 | src.code());
1898void Assembler::bfc(Register dst,
int lsb,
int width, Condition cond) {
1899 DCHECK(IsEnabled(ARMv7));
1901 DCHECK((lsb >= 0) && (lsb <= 31));
1902 DCHECK((width >= 1) && (width <= (32 - lsb)));
1903 int msb = lsb + width - 1;
1904 emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
1911void Assembler::bfi(Register dst, Register src,
int lsb,
int width,
1913 DCHECK(IsEnabled(ARMv7));
1915 DCHECK((lsb >= 0) && (lsb <= 31));
1916 DCHECK((width >= 1) && (width <= (32 - lsb)));
1917 int msb = lsb + width - 1;
1918 emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 |
1922void Assembler::pkhbt(Register dst, Register src1,
const Operand& src2,
1929 DCHECK(src2.IsImmediateShiftedRegister());
1931 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1932 DCHECK(src2.shift_op() == LSL);
1933 emit(cond | 0x68 * B20 | src1.code() * B16 | dst.code() * B12 |
1934 src2.shift_imm_ * B7 | B4 | src2.rm().code());
1937void Assembler::pkhtb(Register dst, Register src1,
const Operand& src2,
1944 DCHECK(src2.IsImmediateShiftedRegister());
1946 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1947 DCHECK(src2.shift_op() == ASR);
1948 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1949 emit(cond | 0x68 * B20 | src1.code() * B16 | dst.code() * B12 | asr * B7 |
1950 B6 | B4 | src2.rm().code());
1953void Assembler::sxtb(Register dst, Register src,
int rotate, Condition cond) {
1959 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1960 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1961 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1964void Assembler::sxtab(Register dst, Register src1, Register src2,
int rotate,
1972 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1973 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1974 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1977void Assembler::sxth(Register dst, Register src,
int rotate, Condition cond) {
1983 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1984 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1985 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1988void Assembler::sxtah(Register dst, Register src1, Register src2,
int rotate,
1996 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1997 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1998 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2001void Assembler::uxtb(Register dst, Register src,
int rotate, Condition cond) {
2007 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2008 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
2009 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2012void Assembler::uxtab(Register dst, Register src1, Register src2,
int rotate,
2020 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2021 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
2022 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2025void Assembler::uxtb16(Register dst, Register src,
int rotate, Condition cond) {
2031 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2032 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
2033 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2036void Assembler::uxth(Register dst, Register src,
int rotate, Condition cond) {
2042 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2043 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
2044 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2047void Assembler::uxtah(Register dst, Register src1, Register src2,
int rotate,
2055 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2056 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
2057 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2060void Assembler::rbit(Register dst, Register src, Condition cond) {
2063 DCHECK(IsEnabled(ARMv7));
2066 emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
2069void Assembler::rev(Register dst, Register src, Condition cond) {
2074 emit(cond | 0x6BF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
2078void Assembler::mrs(Register dst, SRegister s, Condition cond) {
2080 emit(cond | B24 | s | 15 * B16 | dst.code() * B12);
2083void Assembler::msr(SRegisterFieldMask fields,
const Operand& src,
2086 DCHECK(((fields & 0xFFF0FFFF) == CPSR) || ((fields & 0xFFF0FFFF) == SPSR));
2088 if (src.IsImmediate()) {
2090 uint32_t rotate_imm;
2092 if (src.MustOutputRelocInfo(
this) ||
2093 !FitsShifter(src.immediate(), &rotate_imm, &immed_8,
nullptr)) {
2094 UseScratchRegisterScope temps(
this);
2095 Register scratch = temps.Acquire();
2098 Move32BitImmediate(scratch, src);
2099 msr(fields, Operand(scratch), cond);
2102 instr =
I | rotate_imm *
B8 | immed_8;
2104 DCHECK(src.IsRegister());
2105 instr = src.rm_.code();
2107 emit(cond |
instr | B24 | B21 | fields | 15 * B12);
2111void Assembler::ldr(Register dst,
const MemOperand& src, Condition cond) {
2112 AddrMode2(cond | B26 | L, dst, src);
2115void Assembler::str(Register src,
const MemOperand& dst, Condition cond) {
2116 AddrMode2(cond | B26, src, dst);
2119void Assembler::ldrb(Register dst,
const MemOperand& src, Condition cond) {
2120 AddrMode2(cond | B26 | B | L, dst, src);
2123void Assembler::strb(Register src,
const MemOperand& dst, Condition cond) {
2124 AddrMode2(cond | B26 | B, src, dst);
2127void Assembler::ldrh(Register dst,
const MemOperand& src, Condition cond) {
2128 AddrMode3(cond | L | B7 | H | B4, dst, src);
2131void Assembler::strh(Register src,
const MemOperand& dst, Condition cond) {
2132 AddrMode3(cond | B7 | H | B4, src, dst);
2135void Assembler::ldrsb(Register dst,
const MemOperand& src, Condition cond) {
2136 AddrMode3(cond | L | B7 | S6 | B4, dst, src);
2139void Assembler::ldrsh(Register dst,
const MemOperand& src, Condition cond) {
2140 AddrMode3(cond | L | B7 | S6 | H | B4, dst, src);
2143void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src,
2145 DCHECK(src.rm() == no_reg);
2148 DCHECK_EQ(dst1.code() + 1, dst2.code());
2149 AddrMode3(cond | B7 | B6 | B4, dst1, src);
2152void Assembler::strd(Register src1, Register src2,
const MemOperand& dst,
2154 DCHECK(dst.rm() == no_reg);
2157 DCHECK_EQ(src1.code() + 1, src2.code());
2158 AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
2161void Assembler::ldr_pcrel(Register dst,
int imm12, Condition cond) {
2167 DCHECK(is_uint12(imm12));
2168 emit(cond | B26 | am | L |
pc.code() * B16 | dst.code() * B12 | imm12);
2172void Assembler::ldrex(Register dst, Register src, Condition cond) {
2177 emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xF9F);
2180void Assembler::strex(Register src1, Register src2, Register dst,
2190 emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xF9 * B4 |
2194void Assembler::ldrexb(Register dst, Register src, Condition cond) {
2199 emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
2203void Assembler::strexb(Register src1, Register src2, Register dst,
2213 emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
2214 0xF9 * B4 | src2.code());
2217void Assembler::ldrexh(Register dst, Register src, Condition cond) {
2222 emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
2223 dst.code() * B12 | 0xF9F);
2226void Assembler::strexh(Register src1, Register src2, Register dst,
2236 emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
2237 0xF9 * B4 | src2.code());
2240void Assembler::ldrexd(Register dst1, Register dst2, Register src,
2247 DCHECK_EQ(dst1.code() + 1, dst2.code());
2248 emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
2252void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
2259 DCHECK_EQ(src1.code() + 1, src2.code());
2260 emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
2261 0xF9 * B4 | src1.code());
2265void Assembler::pld(
const MemOperand& address) {
2269 DCHECK(address.rm() == no_reg);
2270 DCHECK(address.am() == Offset);
2272 int offset = address.offset();
2278 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 |
2279 address.rn().code() * B16 | 0xF * B12 |
offset);
2283void Assembler::ldm(BlockAddrMode am, Register
base, RegList dst,
2288 AddrMode4(cond | B27 | am | L,
base, dst);
2291 if (cond == al && dst.has(
pc)) {
2297 CheckConstPool(
true, no_const_pool_before_ ==
pc_offset() - kInstrSize);
2301void Assembler::stm(BlockAddrMode am, Register
base, RegList src,
2303 AddrMode4(cond | B27 | am,
base, src);
2309void Assembler::stop(Condition cond, int32_t code) {
2313 BlockConstPoolScope block_const_pool(
this);
2315 svc(kStopCode + code, cond);
2317 svc(kStopCode + kMaxStopCode, cond);
2332void Assembler::bkpt(uint32_t imm16) {
2333 DCHECK(is_uint16(imm16));
2334 emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
2337void Assembler::svc(uint32_t imm24, Condition cond) {
2338 CHECK(is_uint24(imm24));
2339 emit(cond | 15 * B24 | imm24);
2342void Assembler::dmb(BarrierOption option) {
2343 if (CpuFeatures::IsSupported(ARMv7)) {
2345 emit(kSpecialCondition | 0x57FF * B12 | 5 * B4 | option);
2349 mcr(p15, 0, r0, cr7, cr10, 5);
2353void Assembler::dsb(BarrierOption option) {
2354 if (CpuFeatures::IsSupported(ARMv7)) {
2356 emit(kSpecialCondition | 0x57FF * B12 | 4 * B4 | option);
2360 mcr(p15, 0, r0, cr7, cr10, 4);
2364void Assembler::isb(BarrierOption option) {
2365 if (CpuFeatures::IsSupported(ARMv7)) {
2367 emit(kSpecialCondition | 0x57FF * B12 | 6 * B4 | option);
2371 mcr(p15, 0, r0, cr7, cr5, 4);
2375void Assembler::csdb() {
2382void Assembler::cdp(Coprocessor coproc,
int opcode_1, CRegister crd,
2383 CRegister crn, CRegister crm,
int opcode_2,
2385 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2386 emit(cond | B27 | B26 | B25 | (opcode_1 & 15) * B20 | crn.code() * B16 |
2387 crd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | crm.code());
2390void Assembler::cdp2(Coprocessor coproc,
int opcode_1, CRegister crd,
2391 CRegister crn, CRegister crm,
int opcode_2) {
2392 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2395void Assembler::mcr(Coprocessor coproc,
int opcode_1, Register rd,
2396 CRegister crn, CRegister crm,
int opcode_2,
2398 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2399 emit(cond | B27 | B26 | B25 | (opcode_1 & 7) * B21 | crn.code() * B16 |
2400 rd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | B4 | crm.code());
2403void Assembler::mcr2(Coprocessor coproc,
int opcode_1, Register rd,
2404 CRegister crn, CRegister crm,
int opcode_2) {
2405 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2408void Assembler::mrc(Coprocessor coproc,
int opcode_1, Register rd,
2409 CRegister crn, CRegister crm,
int opcode_2,
2411 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2412 emit(cond | B27 | B26 | B25 | (opcode_1 & 7) * B21 | L | crn.code() * B16 |
2413 rd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | B4 | crm.code());
2416void Assembler::mrc2(Coprocessor coproc,
int opcode_1, Register rd,
2417 CRegister crn, CRegister crm,
int opcode_2) {
2418 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2421void Assembler::ldc(Coprocessor coproc, CRegister crd,
const MemOperand& src,
2422 LFlag l, Condition cond) {
2423 AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src);
2426void Assembler::ldc(Coprocessor coproc, CRegister crd, Register rn,
int option,
2427 LFlag l, Condition cond) {
2429 DCHECK(is_uint8(option));
2430 emit(cond | B27 | B26 | U | l | L | rn.code() * B16 | crd.code() * B12 |
2431 coproc * B8 | (option & 255));
2434void Assembler::ldc2(Coprocessor coproc, CRegister crd,
const MemOperand& src,
2436 ldc(coproc, crd, src, l, kSpecialCondition);
2439void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn,
int option,
2441 ldc(coproc, crd, rn, option, l, kSpecialCondition);
2446void Assembler::vldr(
const DwVfpRegister dst,
const Register
base,
int offset,
2447 const Condition cond) {
2452 DCHECK(VfpRegisterIsAvailable(dst));
2460 dst.split_code(&vd, &d);
2464 emit(cond | 0xD * B24 | u * B23 | d * B22 | B20 |
base.code() * B16 |
2465 vd * B12 | 0xB * B8 | ((
offset / 4) & 255));
2467 UseScratchRegisterScope temps(
this);
2468 Register scratch = temps.Acquire();
2477 emit(cond | 0xD * B24 | d * B22 | B20 | scratch.code() * B16 | vd * B12 |
2482void Assembler::vldr(
const DwVfpRegister dst,
const MemOperand& operand,
2483 const Condition cond) {
2484 DCHECK(VfpRegisterIsAvailable(dst));
2485 DCHECK(operand.am_ == Offset);
2486 if (operand.rm().is_valid()) {
2487 UseScratchRegisterScope temps(
this);
2488 Register scratch = temps.Acquire();
2489 add(scratch, operand.rn(),
2490 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2491 vldr(dst, scratch, 0, cond);
2493 vldr(dst, operand.rn(), operand.offset(), cond);
2497void Assembler::vldr(
const SwVfpRegister dst,
const Register
base,
int offset,
2498 const Condition cond) {
2509 dst.split_code(&sd, &d);
2513 emit(cond | u * B23 | d * B22 | 0xD1 * B20 |
base.code() * B16 | sd * B12 |
2514 0xA * B8 | ((
offset / 4) & 255));
2518 UseScratchRegisterScope temps(
this);
2519 Register scratch = temps.Acquire();
2526 emit(cond | d * B22 | 0xD1 * B20 | scratch.code() * B16 | sd * B12 |
2531void Assembler::vldr(
const SwVfpRegister dst,
const MemOperand& operand,
2532 const Condition cond) {
2533 DCHECK(operand.am_ == Offset);
2534 if (operand.rm().is_valid()) {
2535 UseScratchRegisterScope temps(
this);
2536 Register scratch = temps.Acquire();
2537 add(scratch, operand.rn(),
2538 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2539 vldr(dst, scratch, 0, cond);
2541 vldr(dst, operand.rn(), operand.offset(), cond);
2545void Assembler::vstr(
const DwVfpRegister src,
const Register
base,
int offset,
2546 const Condition cond) {
2551 DCHECK(VfpRegisterIsAvailable(src));
2560 src.split_code(&vd, &d);
2563 emit(cond | 0xD * B24 | u * B23 | d * B22 |
base.code() * B16 | vd * B12 |
2564 0xB * B8 | ((
offset / 4) & 255));
2568 UseScratchRegisterScope temps(
this);
2569 Register scratch = temps.Acquire();
2576 emit(cond | 0xD * B24 | d * B22 | scratch.code() * B16 | vd * B12 |
2581void Assembler::vstr(
const DwVfpRegister src,
const MemOperand& operand,
2582 const Condition cond) {
2583 DCHECK(VfpRegisterIsAvailable(src));
2584 DCHECK(operand.am_ == Offset);
2585 if (operand.rm().is_valid()) {
2586 UseScratchRegisterScope temps(
this);
2587 Register scratch = temps.Acquire();
2588 add(scratch, operand.rn(),
2589 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2590 vstr(src, scratch, 0, cond);
2592 vstr(src, operand.rn(), operand.offset(), cond);
2596void Assembler::vstr(
const SwVfpRegister src,
const Register
base,
int offset,
2597 const Condition cond) {
2609 src.split_code(&sd, &d);
2612 emit(cond | u * B23 | d * B22 | 0xD0 * B20 |
base.code() * B16 | sd * B12 |
2613 0xA * B8 | ((
offset / 4) & 255));
2617 UseScratchRegisterScope temps(
this);
2618 Register scratch = temps.Acquire();
2625 emit(cond | d * B22 | 0xD0 * B20 | scratch.code() * B16 | sd * B12 |
2630void Assembler::vstr(
const SwVfpRegister src,
const MemOperand& operand,
2631 const Condition cond) {
2632 DCHECK(operand.am_ == Offset);
2633 if (operand.rm().is_valid()) {
2634 UseScratchRegisterScope temps(
this);
2635 Register scratch = temps.Acquire();
2636 add(scratch, operand.rn(),
2637 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2638 vstr(src, scratch, 0, cond);
2640 vstr(src, operand.rn(), operand.offset(), cond);
2644void Assembler::vldm(BlockAddrMode am, Register
base, DwVfpRegister first,
2645 DwVfpRegister last, Condition cond) {
2650 DCHECK(VfpRegisterIsAvailable(last));
2651 DCHECK(am == ia || am == ia_w || am == db_w);
2655 first.split_code(&sd, &d);
2656 int count = last.code() - first.code() + 1;
2658 emit(cond | B27 | B26 | am | d * B22 | B20 |
base.code() * B16 | sd * B12 |
2659 0xB * B8 | count * 2);
2662void Assembler::vstm(BlockAddrMode am, Register
base, DwVfpRegister first,
2663 DwVfpRegister last, Condition cond) {
2668 DCHECK(VfpRegisterIsAvailable(last));
2669 DCHECK(am == ia || am == ia_w || am == db_w);
2673 first.split_code(&sd, &d);
2674 int count = last.code() - first.code() + 1;
2676 emit(cond | B27 | B26 | am | d * B22 |
base.code() * B16 | sd * B12 |
2677 0xB * B8 | count * 2);
2680void Assembler::vldm(BlockAddrMode am, Register
base, SwVfpRegister first,
2681 SwVfpRegister last, Condition cond) {
2686 DCHECK(am == ia || am == ia_w || am == db_w);
2690 first.split_code(&sd, &d);
2691 int count = last.code() - first.code() + 1;
2692 emit(cond | B27 | B26 | am | d * B22 | B20 |
base.code() * B16 | sd * B12 |
2696void Assembler::vstm(BlockAddrMode am, Register
base, SwVfpRegister first,
2697 SwVfpRegister last, Condition cond) {
2702 DCHECK(am == ia || am == ia_w || am == db_w);
2706 first.split_code(&sd, &d);
2707 int count = last.code() - first.code() + 1;
2708 emit(cond | B27 | B26 | am | d * B22 |
base.code() * B16 | sd * B12 |
2712static void DoubleAsTwoUInt32(base::Double d, uint32_t* lo, uint32_t* hi) {
2713 uint64_t
i = d.AsUint64();
2715 *
lo =
i & 0xFFFFFFFF;
2719static void WriteVmovIntImmEncoding(uint8_t imm, uint32_t* encoding) {
2721 *encoding = ((imm & 0x80) << (24 - 7));
2722 *encoding |= ((imm & 0x70) << (16 - 4));
2723 *encoding |= (imm & 0x0f);
2729static bool FitsVmovIntImm(uint64_t imm, uint32_t* encoding, uint8_t* cmode) {
2730 uint32_t
lo = imm & 0xFFFFFFFF;
2731 uint32_t
hi = imm >> 32;
2732 if ((lo == hi && ((lo & 0xffffff00) == 0))) {
2733 WriteVmovIntImmEncoding(imm & 0xff, encoding);
2736 }
else if ((lo == hi) && ((lo & 0xffff) == (lo >> 16)) &&
2737 ((lo & 0xff) == (lo >> 24))) {
2739 WriteVmovIntImmEncoding(imm & 0xff, encoding);
2747void Assembler::vmov(
const DwVfpRegister dst, uint64_t imm) {
2751 if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
2752 CpuFeatureScope scope(
this, NEON);
2757 dst.split_code(&vd, &d);
2758 emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 |
2759 op * B5 | 0x1 * B4 | enc);
2765void Assembler::vmov(
const QwNeonRegister dst, uint64_t imm) {
2769 if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
2770 CpuFeatureScope scope(
this, NEON);
2775 dst.split_code(&vd, &d);
2776 emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 |
2777 0x1 * B6 | op * B5 | 0x1 * B4 | enc);
2785static bool FitsVmovFPImmediate(base::Double d, uint32_t* encoding) {
2805 DoubleAsTwoUInt32(d, &lo, &hi);
2808 if ((lo != 0) || ((hi & 0xFFFF) != 0)) {
2813 if (((hi & 0x3FC00000) != 0) && ((hi & 0x3FC00000) != 0x3FC00000)) {
2818 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2824 *encoding = (
hi >> 16) & 0xF;
2825 *encoding |= (
hi >> 4) & 0x70000;
2826 *encoding |= (
hi >> 12) & 0x80000;
2831void Assembler::vmov(
const SwVfpRegister dst, Float32 imm) {
2833 if (CpuFeatures::IsSupported(VFPv3) &&
2834 FitsVmovFPImmediate(base::Double(imm.get_scalar()), &enc)) {
2835 CpuFeatureScope scope(
this, VFPv3);
2843 dst.split_code(&vd, &d);
2844 emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
2846 UseScratchRegisterScope temps(
this);
2847 Register scratch = temps.Acquire();
2848 mov(scratch, Operand(imm.get_bits()));
2853void Assembler::vmov(
const DwVfpRegister dst, base::Double imm,
2854 const Register extra_scratch) {
2855 DCHECK(VfpRegisterIsAvailable(dst));
2857 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
2858 CpuFeatureScope scope(
this, VFPv3);
2866 dst.split_code(&vd, &d);
2867 emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 |
2872 DoubleAsTwoUInt32(imm, &lo, &hi);
2873 UseScratchRegisterScope temps(
this);
2874 Register scratch = temps.Acquire();
2879 mov(scratch, Operand(lo));
2880 vmov(dst, scratch, scratch);
2881 }
else if (extra_scratch == no_reg) {
2883 mov(scratch, Operand(lo));
2884 vmov(NeonS32, dst, 0, scratch);
2885 if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
2886 CpuFeatureScope scope(
this, ARMv7);
2887 movt(scratch, hi >> 16);
2889 mov(scratch, Operand(hi));
2891 vmov(NeonS32, dst, 1, scratch);
2895 mov(scratch, Operand(lo));
2896 mov(extra_scratch, Operand(hi));
2897 vmov(dst, scratch, extra_scratch);
2902void Assembler::vmov(
const SwVfpRegister dst,
const SwVfpRegister src,
2903 const Condition cond) {
2907 dst.split_code(&sd, &d);
2908 src.split_code(&sm, &
m);
2909 emit(cond | 0xE * B24 | d * B22 | 0xB * B20 | sd * B12 | 0xA * B8 | B6 |
2913void Assembler::vmov(
const DwVfpRegister dst,
const DwVfpRegister src,
2914 const Condition cond) {
2919 DCHECK(VfpRegisterIsAvailable(dst));
2920 DCHECK(VfpRegisterIsAvailable(src));
2922 dst.split_code(&vd, &d);
2924 src.split_code(&vm, &
m);
2925 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 | B6 |
2929void Assembler::vmov(
const DwVfpRegister dst,
const Register src1,
2930 const Register src2,
const Condition cond) {
2935 DCHECK(VfpRegisterIsAvailable(dst));
2938 dst.split_code(&vm, &
m);
2939 emit(cond | 0xC * B24 | B22 | src2.code() * B16 | src1.code() * B12 |
2940 0xB * B8 |
m * B5 | B4 | vm);
2943void Assembler::vmov(
const Register dst1,
const Register dst2,
2944 const DwVfpRegister src,
const Condition cond) {
2949 DCHECK(VfpRegisterIsAvailable(src));
2952 src.split_code(&vm, &
m);
2953 emit(cond | 0xC * B24 | B22 | B20 | dst2.code() * B16 | dst1.code() * B12 |
2954 0xB * B8 |
m * B5 | B4 | vm);
2957void Assembler::vmov(
const SwVfpRegister dst,
const Register src,
2958 const Condition cond) {
2965 dst.split_code(&sn, &n);
2966 emit(cond | 0xE * B24 | sn * B16 | src.code() * B12 | 0xA * B8 | n * B7 | B4);
2969void Assembler::vmov(
const Register dst,
const SwVfpRegister src,
2970 const Condition cond) {
2977 src.split_code(&sn, &n);
2978 emit(cond | 0xE * B24 | B20 | sn * B16 | dst.code() * B12 | 0xA * B8 |
2984enum VFPType { S32, U32, F32, F64 };
2986static bool IsSignedVFPType(VFPType type) {
2997static bool IsIntegerVFPType(VFPType type) {
3010static bool IsDoubleVFPType(VFPType type) {
3025static void SplitRegCode(VFPType reg_type,
int reg_code,
int* vm,
int*
m) {
3026 DCHECK((reg_code >= 0) && (reg_code <= 31));
3027 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
3028 SwVfpRegister::split_code(reg_code, vm,
m);
3030 DwVfpRegister::split_code(reg_code, vm,
m);
3035static Instr EncodeVCVT(
const VFPType dst_type,
const int dst_code,
3036 const VFPType src_type,
const int src_code,
3037 VFPConversionMode mode,
const Condition cond) {
3038 DCHECK(src_type != dst_type);
3040 SplitRegCode(src_type, src_code, &Vm, &M);
3041 SplitRegCode(dst_type, dst_code, &Vd, &
D);
3043 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
3048 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
3052 if (IsIntegerVFPType(dst_type)) {
3053 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
3054 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3057 DCHECK(IsIntegerVFPType(src_type));
3059 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
3060 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
3063 return (cond | 0xE * B24 | B23 |
D * B22 | 0x3 * B20 | B19 | opc2 * B16 |
3064 Vd * B12 | 0x5 * B9 | sz * B8 | op * B7 | B6 | M * B5 | Vm);
3070 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3071 return (cond | 0xE * B24 | B23 |
D * B22 | 0x3 * B20 | 0x7 * B16 |
3072 Vd * B12 | 0x5 * B9 | sz * B8 | B7 | B6 | M * B5 | Vm);
3076void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
const SwVfpRegister src,
3077 VFPConversionMode mode,
const Condition cond) {
3078 DCHECK(VfpRegisterIsAvailable(dst));
3079 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
3082void Assembler::vcvt_f32_s32(
const SwVfpRegister dst,
const SwVfpRegister src,
3083 VFPConversionMode mode,
const Condition cond) {
3084 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
3087void Assembler::vcvt_f64_u32(
const DwVfpRegister dst,
const SwVfpRegister src,
3088 VFPConversionMode mode,
const Condition cond) {
3089 DCHECK(VfpRegisterIsAvailable(dst));
3090 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
3093void Assembler::vcvt_f32_u32(
const SwVfpRegister dst,
const SwVfpRegister src,
3094 VFPConversionMode mode,
const Condition cond) {
3095 emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
3098void Assembler::vcvt_s32_f32(
const SwVfpRegister dst,
const SwVfpRegister src,
3099 VFPConversionMode mode,
const Condition cond) {
3100 emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
3103void Assembler::vcvt_u32_f32(
const SwVfpRegister dst,
const SwVfpRegister src,
3104 VFPConversionMode mode,
const Condition cond) {
3105 emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
3108void Assembler::vcvt_s32_f64(
const SwVfpRegister dst,
const DwVfpRegister src,
3109 VFPConversionMode mode,
const Condition cond) {
3110 DCHECK(VfpRegisterIsAvailable(src));
3111 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
3114void Assembler::vcvt_u32_f64(
const SwVfpRegister dst,
const DwVfpRegister src,
3115 VFPConversionMode mode,
const Condition cond) {
3116 DCHECK(VfpRegisterIsAvailable(src));
3117 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
3120void Assembler::vcvt_f64_f32(
const DwVfpRegister dst,
const SwVfpRegister src,
3121 VFPConversionMode mode,
const Condition cond) {
3122 DCHECK(VfpRegisterIsAvailable(dst));
3123 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
3126void Assembler::vcvt_f32_f64(
const SwVfpRegister dst,
const DwVfpRegister src,
3127 VFPConversionMode mode,
const Condition cond) {
3128 DCHECK(VfpRegisterIsAvailable(src));
3129 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
3132void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
int fraction_bits,
3133 const Condition cond) {
3137 DCHECK(IsEnabled(VFPv3));
3138 DCHECK(VfpRegisterIsAvailable(dst));
3139 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
3141 dst.split_code(&vd, &d);
3142 int imm5 = 32 - fraction_bits;
3144 int imm4 = (imm5 >> 1) & 0xF;
3145 emit(cond | 0xE * B24 | B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3146 vd * B12 | 0x5 * B9 | B8 | B7 | B6 |
i * B5 | imm4);
3149void Assembler::vneg(
const DwVfpRegister dst,
const DwVfpRegister src,
3150 const Condition cond) {
3154 DCHECK(VfpRegisterIsAvailable(dst));
3155 DCHECK(VfpRegisterIsAvailable(src));
3157 dst.split_code(&vd, &d);
3159 src.split_code(&vm, &
m);
3161 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3162 B8 | B6 |
m * B5 | vm);
3165void Assembler::vneg(
const SwVfpRegister dst,
const SwVfpRegister src,
3166 const Condition cond) {
3171 dst.split_code(&vd, &d);
3173 src.split_code(&vm, &
m);
3175 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3179void Assembler::vabs(
const DwVfpRegister dst,
const DwVfpRegister src,
3180 const Condition cond) {
3184 DCHECK(VfpRegisterIsAvailable(dst));
3185 DCHECK(VfpRegisterIsAvailable(src));
3187 dst.split_code(&vd, &d);
3189 src.split_code(&vm, &
m);
3190 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 | B7 |
3194void Assembler::vabs(
const SwVfpRegister dst,
const SwVfpRegister src,
3195 const Condition cond) {
3200 dst.split_code(&vd, &d);
3202 src.split_code(&vm, &
m);
3203 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B7 | B6 |
3207void Assembler::vadd(
const DwVfpRegister dst,
const DwVfpRegister src1,
3208 const DwVfpRegister src2,
const Condition cond) {
3214 DCHECK(VfpRegisterIsAvailable(dst));
3215 DCHECK(VfpRegisterIsAvailable(src1));
3216 DCHECK(VfpRegisterIsAvailable(src2));
3218 dst.split_code(&vd, &d);
3220 src1.split_code(&vn, &n);
3222 src2.split_code(&vm, &
m);
3223 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3224 0x5 * B9 | B8 | n * B7 |
m * B5 | vm);
3227void Assembler::vadd(
const SwVfpRegister dst,
const SwVfpRegister src1,
3228 const SwVfpRegister src2,
const Condition cond) {
3235 dst.split_code(&vd, &d);
3237 src1.split_code(&vn, &n);
3239 src2.split_code(&vm, &
m);
3240 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3241 0x5 * B9 | n * B7 |
m * B5 | vm);
3244void Assembler::vsub(
const DwVfpRegister dst,
const DwVfpRegister src1,
3245 const DwVfpRegister src2,
const Condition cond) {
3251 DCHECK(VfpRegisterIsAvailable(dst));
3252 DCHECK(VfpRegisterIsAvailable(src1));
3253 DCHECK(VfpRegisterIsAvailable(src2));
3255 dst.split_code(&vd, &d);
3257 src1.split_code(&vn, &n);
3259 src2.split_code(&vm, &
m);
3260 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3261 0x5 * B9 | B8 | n * B7 | B6 |
m * B5 | vm);
3264void Assembler::vsub(
const SwVfpRegister dst,
const SwVfpRegister src1,
3265 const SwVfpRegister src2,
const Condition cond) {
3272 dst.split_code(&vd, &d);
3274 src1.split_code(&vn, &n);
3276 src2.split_code(&vm, &
m);
3277 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3278 0x5 * B9 | n * B7 | B6 |
m * B5 | vm);
3281void Assembler::vmul(
const DwVfpRegister dst,
const DwVfpRegister src1,
3282 const DwVfpRegister src2,
const Condition cond) {
3288 DCHECK(VfpRegisterIsAvailable(dst));
3289 DCHECK(VfpRegisterIsAvailable(src1));
3290 DCHECK(VfpRegisterIsAvailable(src2));
3292 dst.split_code(&vd, &d);
3294 src1.split_code(&vn, &n);
3296 src2.split_code(&vm, &
m);
3297 emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
3298 0x5 * B9 | B8 | n * B7 |
m * B5 | vm);
3301void Assembler::vmul(
const SwVfpRegister dst,
const SwVfpRegister src1,
3302 const SwVfpRegister src2,
const Condition cond) {
3309 dst.split_code(&vd, &d);
3311 src1.split_code(&vn, &n);
3313 src2.split_code(&vm, &
m);
3314 emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
3315 0x5 * B9 | n * B7 |
m * B5 | vm);
3318void Assembler::vmla(
const DwVfpRegister dst,
const DwVfpRegister src1,
3319 const DwVfpRegister src2,
const Condition cond) {
3323 DCHECK(VfpRegisterIsAvailable(dst));
3324 DCHECK(VfpRegisterIsAvailable(src1));
3325 DCHECK(VfpRegisterIsAvailable(src2));
3327 dst.split_code(&vd, &d);
3329 src1.split_code(&vn, &n);
3331 src2.split_code(&vm, &
m);
3332 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
3333 n * B7 |
m * B5 | vm);
3336void Assembler::vmla(
const SwVfpRegister dst,
const SwVfpRegister src1,
3337 const SwVfpRegister src2,
const Condition cond) {
3342 dst.split_code(&vd, &d);
3344 src1.split_code(&vn, &n);
3346 src2.split_code(&vm, &
m);
3347 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3351void Assembler::vmls(
const DwVfpRegister dst,
const DwVfpRegister src1,
3352 const DwVfpRegister src2,
const Condition cond) {
3356 DCHECK(VfpRegisterIsAvailable(dst));
3357 DCHECK(VfpRegisterIsAvailable(src1));
3358 DCHECK(VfpRegisterIsAvailable(src2));
3360 dst.split_code(&vd, &d);
3362 src1.split_code(&vn, &n);
3364 src2.split_code(&vm, &
m);
3365 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
3366 n * B7 | B6 |
m * B5 | vm);
3369void Assembler::vmls(
const SwVfpRegister dst,
const SwVfpRegister src1,
3370 const SwVfpRegister src2,
const Condition cond) {
3375 dst.split_code(&vd, &d);
3377 src1.split_code(&vn, &n);
3379 src2.split_code(&vm, &
m);
3380 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3384void Assembler::vdiv(
const DwVfpRegister dst,
const DwVfpRegister src1,
3385 const DwVfpRegister src2,
const Condition cond) {
3391 DCHECK(VfpRegisterIsAvailable(dst));
3392 DCHECK(VfpRegisterIsAvailable(src1));
3393 DCHECK(VfpRegisterIsAvailable(src2));
3395 dst.split_code(&vd, &d);
3397 src1.split_code(&vn, &n);
3399 src2.split_code(&vm, &
m);
3400 emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
3401 n * B7 |
m * B5 | vm);
3404void Assembler::vdiv(
const SwVfpRegister dst,
const SwVfpRegister src1,
3405 const SwVfpRegister src2,
const Condition cond) {
3412 dst.split_code(&vd, &d);
3414 src1.split_code(&vn, &n);
3416 src2.split_code(&vm, &
m);
3417 emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3421void Assembler::vcmp(
const DwVfpRegister src1,
const DwVfpRegister src2,
3422 const Condition cond) {
3427 DCHECK(VfpRegisterIsAvailable(src1));
3428 DCHECK(VfpRegisterIsAvailable(src2));
3430 src1.split_code(&vd, &d);
3432 src2.split_code(&vm, &
m);
3433 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
3434 0x5 * B9 | B8 | B6 |
m * B5 | vm);
3437void Assembler::vcmp(
const SwVfpRegister src1,
const SwVfpRegister src2,
3438 const Condition cond) {
3444 src1.split_code(&vd, &d);
3446 src2.split_code(&vm, &
m);
3447 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
3448 0x5 * B9 | B6 |
m * B5 | vm);
3451void Assembler::vcmp(
const DwVfpRegister src1,
const double src2,
3452 const Condition cond) {
3457 DCHECK(VfpRegisterIsAvailable(src1));
3460 src1.split_code(&vd, &d);
3461 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
3462 0x5 * B9 | B8 | B6);
3465void Assembler::vcmp(
const SwVfpRegister src1,
const float src2,
3466 const Condition cond) {
3473 src1.split_code(&vd, &d);
3474 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
3478void Assembler::vmaxnm(
const DwVfpRegister dst,
const DwVfpRegister src1,
3479 const DwVfpRegister src2) {
3482 DCHECK(IsEnabled(ARMv8));
3484 dst.split_code(&vd, &d);
3486 src1.split_code(&vn, &n);
3488 src2.split_code(&vm, &
m);
3490 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3491 0x5 * B9 | B8 | n * B7 |
m * B5 | vm);
3494void Assembler::vmaxnm(
const SwVfpRegister dst,
const SwVfpRegister src1,
3495 const SwVfpRegister src2) {
3498 DCHECK(IsEnabled(ARMv8));
3500 dst.split_code(&vd, &d);
3502 src1.split_code(&vn, &n);
3504 src2.split_code(&vm, &
m);
3506 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3507 0x5 * B9 | n * B7 |
m * B5 | vm);
3510void Assembler::vminnm(
const DwVfpRegister dst,
const DwVfpRegister src1,
3511 const DwVfpRegister src2) {
3514 DCHECK(IsEnabled(ARMv8));
3516 dst.split_code(&vd, &d);
3518 src1.split_code(&vn, &n);
3520 src2.split_code(&vm, &
m);
3522 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3523 0x5 * B9 | B8 | n * B7 | B6 |
m * B5 | vm);
3526void Assembler::vminnm(
const SwVfpRegister dst,
const SwVfpRegister src1,
3527 const SwVfpRegister src2) {
3530 DCHECK(IsEnabled(ARMv8));
3532 dst.split_code(&vd, &d);
3534 src1.split_code(&vn, &n);
3536 src2.split_code(&vm, &
m);
3538 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3539 0x5 * B9 | n * B7 | B6 |
m * B5 | vm);
3542void Assembler::vsel(Condition cond,
const DwVfpRegister dst,
3543 const DwVfpRegister src1,
const DwVfpRegister src2) {
3547 DCHECK(IsEnabled(ARMv8));
3549 dst.split_code(&vd, &d);
3551 src1.split_code(&vn, &n);
3553 src2.split_code(&vm, &
m);
3562 int vsel_cond = (cond >> 30) & 0x3;
3563 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3565 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3570 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3571 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 |
m * B5 | vm);
3574void Assembler::vsel(Condition cond,
const SwVfpRegister dst,
3575 const SwVfpRegister src1,
const SwVfpRegister src2) {
3579 DCHECK(IsEnabled(ARMv8));
3581 dst.split_code(&vd, &d);
3583 src1.split_code(&vn, &n);
3585 src2.split_code(&vm, &
m);
3594 int vsel_cond = (cond >> 30) & 0x3;
3595 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3597 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3602 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3603 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 |
m * B5 | vm);
3606void Assembler::vsqrt(
const DwVfpRegister dst,
const DwVfpRegister src,
3607 const Condition cond) {
3611 DCHECK(VfpRegisterIsAvailable(dst));
3612 DCHECK(VfpRegisterIsAvailable(src));
3614 dst.split_code(&vd, &d);
3616 src.split_code(&vm, &
m);
3617 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3618 B8 | 0x3 * B6 |
m * B5 | vm);
3621void Assembler::vsqrt(
const SwVfpRegister dst,
const SwVfpRegister src,
3622 const Condition cond) {
3627 dst.split_code(&vd, &d);
3629 src.split_code(&vm, &
m);
3630 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3631 0x3 * B6 |
m * B5 | vm);
3634void Assembler::vmsr(Register dst, Condition cond) {
3638 emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3641void Assembler::vmrs(Register dst, Condition cond) {
3645 emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3648void Assembler::vrinta(
const SwVfpRegister dst,
const SwVfpRegister src) {
3652 DCHECK(IsEnabled(ARMv8));
3654 dst.split_code(&vd, &d);
3656 src.split_code(&vm, &
m);
3657 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3658 0x5 * B9 | B6 |
m * B5 | vm);
3661void Assembler::vrinta(
const DwVfpRegister dst,
const DwVfpRegister src) {
3665 DCHECK(IsEnabled(ARMv8));
3667 dst.split_code(&vd, &d);
3669 src.split_code(&vm, &
m);
3670 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3671 0x5 * B9 | B8 | B6 |
m * B5 | vm);
3674void Assembler::vrintn(
const SwVfpRegister dst,
const SwVfpRegister src) {
3678 DCHECK(IsEnabled(ARMv8));
3680 dst.split_code(&vd, &d);
3682 src.split_code(&vm, &
m);
3683 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3684 vd * B12 | 0x5 * B9 | B6 |
m * B5 | vm);
3687void Assembler::vrintn(
const DwVfpRegister dst,
const DwVfpRegister src) {
3691 DCHECK(IsEnabled(ARMv8));
3693 dst.split_code(&vd, &d);
3695 src.split_code(&vm, &
m);
3696 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3697 vd * B12 | 0x5 * B9 | B8 | B6 |
m * B5 | vm);
3700void Assembler::vrintp(
const SwVfpRegister dst,
const SwVfpRegister src) {
3704 DCHECK(IsEnabled(ARMv8));
3706 dst.split_code(&vd, &d);
3708 src.split_code(&vm, &
m);
3709 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3710 vd * B12 | 0x5 * B9 | B6 |
m * B5 | vm);
3713void Assembler::vrintp(
const DwVfpRegister dst,
const DwVfpRegister src) {
3717 DCHECK(IsEnabled(ARMv8));
3719 dst.split_code(&vd, &d);
3721 src.split_code(&vm, &
m);
3722 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3723 vd * B12 | 0x5 * B9 | B8 | B6 |
m * B5 | vm);
3726void Assembler::vrintm(
const SwVfpRegister dst,
const SwVfpRegister src) {
3730 DCHECK(IsEnabled(ARMv8));
3732 dst.split_code(&vd, &d);
3734 src.split_code(&vm, &
m);
3735 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3736 vd * B12 | 0x5 * B9 | B6 |
m * B5 | vm);
3739void Assembler::vrintm(
const DwVfpRegister dst,
const DwVfpRegister src) {
3743 DCHECK(IsEnabled(ARMv8));
3745 dst.split_code(&vd, &d);
3747 src.split_code(&vm, &
m);
3748 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3749 vd * B12 | 0x5 * B9 | B8 | B6 |
m * B5 | vm);
3752void Assembler::vrintz(
const SwVfpRegister dst,
const SwVfpRegister src,
3753 const Condition cond) {
3756 DCHECK(IsEnabled(ARMv8));
3758 dst.split_code(&vd, &d);
3760 src.split_code(&vm, &
m);
3761 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3762 0x5 * B9 | B7 | B6 |
m * B5 | vm);
3765void Assembler::vrintz(
const DwVfpRegister dst,
const DwVfpRegister src,
3766 const Condition cond) {
3769 DCHECK(IsEnabled(ARMv8));
3771 dst.split_code(&vd, &d);
3773 src.split_code(&vm, &
m);
3774 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3775 0x5 * B9 | B8 | B7 | B6 |
m * B5 | vm);
3780void Assembler::vld1(NeonSize size,
const NeonListOperand& dst,
3781 const NeonMemOperand& src) {
3787 dst.base().split_code(&vd, &d);
3788 emit(0xFU * B28 | 4 * B24 | d * B22 | 2 * B20 | src.rn().code() * B16 |
3789 vd * B12 | dst.type() * B8 | size * B6 | src.align() * B4 |
3794void Assembler::vld1s(NeonSize size,
const NeonListOperand& dst, uint8_t index,
3795 const NeonMemOperand& src) {
3805 uint8_t index_align = index << (size + 1);
3809 dst.base().split_code(&vd, &d);
3810 emit(0xFU * B28 | 4 * B24 | 1 * B23 | d * B22 | 2 * B20 |
3811 src.rn().code() * B16 | vd * B12 | size * B10 | index_align * B4 |
3816void Assembler::vld1r(NeonSize size,
const NeonListOperand& dst,
3817 const NeonMemOperand& src) {
3820 dst.base().split_code(&vd, &d);
3821 emit(0xFU * B28 | 4 * B24 | 1 * B23 | d * B22 | 2 * B20 |
3822 src.rn().code() * B16 | vd * B12 | 0xC * B8 | size * B6 |
3823 dst.length() * B5 | src.rm().code());
3826void Assembler::vst1(NeonSize size,
const NeonListOperand& src,
3827 const NeonMemOperand& dst) {
3833 src.base().split_code(&vd, &d);
3834 emit(0xFU * B28 | 4 * B24 | d * B22 | dst.rn().code() * B16 | vd * B12 |
3835 src.type() * B8 | size * B6 | dst.align() * B4 | dst.rm().code());
3838void Assembler::vst1s(NeonSize size,
const NeonListOperand& src, uint8_t index,
3839 const NeonMemOperand& dst) {
3847 uint8_t index_align = index << (size + 1);
3849 src.base().split_code(&vd, &d);
3850 emit(0xFU * B28 | 9 * B23 | d * B22 | dst.rn().code() * B16 | vd * B12 |
3851 size * B10 | index_align * B4 | dst.rm().code());
3854void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3860 dst.split_code(&vd, &d);
3862 src.split_code(&vm, &
m);
3864 int imm3 = 1 <<
NeonSz(dt);
3865 emit(0xFU * B28 | B25 | U * B24 | B23 | d * B22 | imm3 * B19 | vd * B12 |
3866 0xA * B8 |
m * B5 | B4 | vm);
3869void Assembler::vqmovn(NeonDataType dst_dt, NeonDataType src_dt,
3870 DwVfpRegister dst, QwNeonRegister src) {
3877 dst.split_code(&vd, &d);
3879 src.split_code(&vm, &
m);
3880 int size =
NeonSz(dst_dt);
3882 int op =
NeonU(src_dt) ? 0b11 :
NeonU(dst_dt) ? 0b01 : 0b10;
3883 emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | 0x2 * B16 | vd * B12 |
3884 0x2 * B8 | op * B6 |
m * B5 | vm);
3887static int EncodeScalar(NeonDataType dt,
int index) {
3894 opc1_opc2 = 0x8 |
index;
3899 opc1_opc2 = 0x1 | (index << 1);
3904 opc1_opc2 = index << 2;
3909 return (opc1_opc2 >> 2) *
B21 | (opc1_opc2 & 0x3) * B5;
3912void Assembler::vmov(NeonDataType dt, DwVfpRegister dst,
int index,
3916 DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
3918 dst.split_code(&vd, &d);
3919 int opc1_opc2 = EncodeScalar(dt, index);
3920 emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
3924void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
3928 DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
3930 src.split_code(&vn, &n);
3931 int opc1_opc2 = EncodeScalar(dt, index);
3934 emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
3935 n * B7 | B4 | opc1_opc2);
3938void Assembler::vmov(QwNeonRegister dst, QwNeonRegister src) {
3941 vorr(dst, src, src);
3944void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
3961 dst.split_code(&vd, &d);
3963 emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
3964 0xB * B8 | d * B7 | E * B5 | B4);
3969void NeonSplitCode(NeonRegType type,
int code,
int* vm,
int*
m,
int* encoding) {
3970 if (type == NEON_D) {
3971 DwVfpRegister::split_code(code, vm,
m);
3974 QwNeonRegister::split_code(code, vm,
m);
3979static Instr EncodeNeonDupOp(NeonSize size, NeonRegType reg_type,
int dst_code,
3980 DwVfpRegister src,
int index) {
3982 int sz =
static_cast<int>(
size);
3984 DCHECK_GT(kSimd128Size / (1 << sz), index);
3985 int imm4 = (1 << sz) | ((index << (sz + 1)) & 0xF);
3988 NeonSplitCode(reg_type, dst_code, &vd, &d, &qbit);
3990 src.split_code(&vm, &
m);
3993 0x18 *
B7 | qbit |
m *
B5 | vm;
3996void Assembler::vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4000 emit(EncodeNeonDupOp(size, NEON_D, dst.code(), src, index));
4003void Assembler::vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src,
4007 emit(EncodeNeonDupOp(size, NEON_Q, dst.code(), src, index));
4011static Instr EncodeNeonVCVT(VFPType dst_type, QwNeonRegister dst,
4012 VFPType src_type, QwNeonRegister src) {
4013 DCHECK(src_type != dst_type);
4014 DCHECK(src_type == F32 || dst_type == F32);
4017 dst.split_code(&vd, &d);
4019 src.split_code(&vm, &
m);
4022 if (src_type == F32) {
4023 DCHECK(dst_type == S32 || dst_type == U32);
4024 op = dst_type == U32 ? 3 : 2;
4026 DCHECK(src_type == S32 || src_type == U32);
4027 op = src_type == U32 ? 1 : 0;
4030 return 0x1E7U *
B23 | d *
B22 | 0x3B *
B16 | vd *
B12 | 0x3 *
B9 | op *
B7 |
4034void Assembler::vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src) {
4036 DCHECK(VfpRegisterIsAvailable(dst));
4037 DCHECK(VfpRegisterIsAvailable(src));
4038 emit(EncodeNeonVCVT(F32, dst, S32, src));
4041void Assembler::vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src) {
4043 DCHECK(VfpRegisterIsAvailable(dst));
4044 DCHECK(VfpRegisterIsAvailable(src));
4045 emit(EncodeNeonVCVT(F32, dst, U32, src));
4048void Assembler::vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src) {
4050 DCHECK(VfpRegisterIsAvailable(dst));
4051 DCHECK(VfpRegisterIsAvailable(src));
4052 emit(EncodeNeonVCVT(S32, dst, F32, src));
4055void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
4057 DCHECK(VfpRegisterIsAvailable(dst));
4058 DCHECK(VfpRegisterIsAvailable(src));
4059 emit(EncodeNeonVCVT(U32, dst, F32, src));
4092static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
4093 int dst_code,
int src_code) {
4094 int op_encoding = 0;
4098 op_encoding =
B10 | 0x3 *
B7;
4105 op_encoding =
B16 | 0x6 *
B7;
4109 op_encoding =
B16 |
B10 | 0x6 *
B7;
4112 op_encoding =
B16 | 0x7 *
B7;
4116 op_encoding =
B16 |
B10 | 0x7 *
B7;
4119 op_encoding =
B17 | 0xD *
B7;
4122 op_encoding =
B17 | 0x8 *
B7;
4125 op_encoding =
B17 | 0xF *
B7;
4128 op_encoding =
B17 | 0xB *
B7;
4131 op_encoding = 0x2 *
B16 | 0x3 *
B7;
4134 op_encoding = 0x2 *
B16 | 0x2 *
B7;
4137 op_encoding = 0x2 *
B7;
4140 op_encoding = 0x1 *
B7;
4146 op_encoding = 0x2 *
B16 |
B7;
4150 op_encoding = 0x3 *
B16 | 0xA *
B7;
4154 op_encoding = 0x3 *
B16 | 0xB *
B7;
4157 op_encoding = 0xC *
B7;
4160 op_encoding = 0xD *
B7;
4163 op_encoding = 0x4 *
B7;
4166 op_encoding = 0x5 *
B7;
4170 op_encoding = 0x1 *
B16 | 0x2 *
B7;
4174 op_encoding = 0x1 *
B16 | 0x4 *
B7;
4177 op_encoding = 0xA *
B7;
4181 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4183 NeonSplitCode(reg_type, src_code, &vm, &
m, &op_encoding);
4189void Assembler::vmvn(QwNeonRegister dst, QwNeonRegister src) {
4193 emit(EncodeNeonUnaryOp(VMVN, NEON_Q, Neon8, dst.code(), src.code()));
4196void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
4201 emit(EncodeNeonUnaryOp(VSWP, NEON_D, Neon8, dst.code(), src.code()));
4204void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
4208 emit(EncodeNeonUnaryOp(VSWP, NEON_Q, Neon8, dst.code(), src.code()));
4211void Assembler::vabs(QwNeonRegister dst, QwNeonRegister src) {
4215 emit(EncodeNeonUnaryOp(VABSF, NEON_Q, Neon32, dst.code(), src.code()));
4218void Assembler::vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4222 emit(EncodeNeonUnaryOp(VABS, NEON_Q, size, dst.code(), src.code()));
4225void Assembler::vneg(QwNeonRegister dst, QwNeonRegister src) {
4229 emit(EncodeNeonUnaryOp(VNEGF, NEON_Q, Neon32, dst.code(), src.code()));
4232void Assembler::vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4236 emit(EncodeNeonUnaryOp(VNEG, NEON_Q, size, dst.code(), src.code()));
4239enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
4241static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, NeonRegType reg_type,
4242 int dst_code,
int src_code1,
4244 int op_encoding = 0;
4247 op_encoding = 0x1 *
B20;
4250 op_encoding =
B24 | 0x3 *
B20;
4253 op_encoding =
B24 | 0x2 *
B20;
4256 op_encoding =
B24 | 0x1 *
B20;
4262 op_encoding = 0x2 *
B20;
4265 op_encoding = 0x3 *
B20;
4274 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4276 NeonSplitCode(reg_type, src_code1, &vn, &n, &op_encoding);
4278 NeonSplitCode(reg_type, src_code2, &vm, &
m, &op_encoding);
4280 return 0x1E4U *
B23 | op_encoding | d *
B22 | vn *
B16 | vd *
B12 |
B8 |
4284void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
4285 QwNeonRegister src2) {
4289 emit(EncodeNeonBinaryBitwiseOp(VAND, NEON_Q, dst.code(), src1.code(),
4293void Assembler::vbic(QwNeonRegister dst, QwNeonRegister src1,
4294 QwNeonRegister src2) {
4298 emit(EncodeNeonBinaryBitwiseOp(VBIC, NEON_Q, dst.code(), src1.code(),
4302void Assembler::vbsl(QwNeonRegister dst, QwNeonRegister src1,
4303 QwNeonRegister src2) {
4307 emit(EncodeNeonBinaryBitwiseOp(VBSL, NEON_Q, dst.code(), src1.code(),
4311void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
4312 DwVfpRegister src2) {
4316 emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_D, dst.code(), src1.code(),
4320void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
4321 QwNeonRegister src2) {
4325 emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_Q, dst.code(), src1.code(),
4329void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
4330 QwNeonRegister src2) {
4334 emit(EncodeNeonBinaryBitwiseOp(VORR, NEON_Q, dst.code(), src1.code(),
4338void Assembler::vorn(QwNeonRegister dst, QwNeonRegister src1,
4339 QwNeonRegister src2) {
4343 emit(EncodeNeonBinaryBitwiseOp(VORN, NEON_Q, dst.code(), src1.code(),
4360static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
4361 QwNeonRegister src1, QwNeonRegister src2) {
4362 int op_encoding = 0;
4365 op_encoding = 0xD *
B8;
4368 op_encoding =
B21 | 0xD *
B8;
4371 op_encoding =
B24 | 0xD *
B8 |
B4;
4374 op_encoding =
B21 | 0xF *
B8;
4377 op_encoding = 0xF *
B8;
4380 op_encoding = 0xF *
B8 |
B4;
4383 op_encoding =
B21 | 0xF *
B8 |
B4;
4386 op_encoding = 0xE *
B8;
4389 op_encoding =
B24 | 0xE *
B8;
4392 op_encoding =
B24 |
B21 | 0xE *
B8;
4398 dst.split_code(&vd, &d);
4400 src1.split_code(&vn, &n);
4402 src2.split_code(&vm, &
m);
4423static Instr EncodeNeonDataTypeBinOp(IntegerBinOp op, NeonDataType dt,
4424 QwNeonRegister dst, QwNeonRegister src1,
4425 QwNeonRegister src2) {
4426 int op_encoding = 0;
4429 op_encoding = 0x8 *
B8;
4435 op_encoding =
B24 | 0x8 *
B8;
4438 op_encoding = 0x2 *
B8 |
B4;
4441 op_encoding = 0x9 *
B8 |
B4;
4444 op_encoding = 0x6 *
B8 |
B4;
4447 op_encoding = 0x6 *
B8;
4450 op_encoding = 0x8 *
B8 |
B4;
4453 op_encoding =
B24 | 0x8 *
B8 |
B4;
4456 op_encoding = 0x3 *
B8 |
B4;
4459 op_encoding = 0x3 *
B8;
4465 op_encoding =
B24 | 0xB *
B8;
4471 dst.split_code(&vd, &d);
4473 src1.split_code(&vn, &n);
4475 src2.split_code(&vm, &
m);
4479 n *
B7 |
B6 |
m *
B5 | vm | op_encoding;
4482static Instr EncodeNeonSizeBinOp(IntegerBinOp op, NeonSize size,
4483 QwNeonRegister dst, QwNeonRegister src1,
4484 QwNeonRegister src2) {
4487 return EncodeNeonDataTypeBinOp(op,
static_cast<NeonDataType>(size), dst, src1,
4491void Assembler::vadd(QwNeonRegister dst, QwNeonRegister src1,
4492 QwNeonRegister src2) {
4496 emit(EncodeNeonBinOp(VADDF, dst, src1, src2));
4499void Assembler::vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4500 QwNeonRegister src2) {
4504 emit(EncodeNeonSizeBinOp(VADD, size, dst, src1, src2));
4507void Assembler::vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4508 QwNeonRegister src2) {
4512 emit(EncodeNeonDataTypeBinOp(VQADD, dt, dst, src1, src2));
4515void Assembler::vsub(QwNeonRegister dst, QwNeonRegister src1,
4516 QwNeonRegister src2) {
4520 emit(EncodeNeonBinOp(VSUBF, dst, src1, src2));
4523void Assembler::vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4524 QwNeonRegister src2) {
4528 emit(EncodeNeonSizeBinOp(VSUB, size, dst, src1, src2));
4531void Assembler::vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4532 QwNeonRegister src2) {
4536 emit(EncodeNeonDataTypeBinOp(VQSUB, dt, dst, src1, src2));
4539void Assembler::vmlal(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src1,
4540 DwVfpRegister src2) {
4545 dst.split_code(&vd, &d);
4547 src1.split_code(&vn, &n);
4549 src2.split_code(&vm, &
m);
4554 emit(0xFU * B28 | B25 | u * B24 | B23 | d * B22 | size * B20 | vn * B16 |
4555 vd * B12 | 0x8 * B8 | n * B7 |
m * B5 | vm);
4558void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
4559 QwNeonRegister src2) {
4563 emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
4566void Assembler::vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4567 QwNeonRegister src2) {
4571 emit(EncodeNeonSizeBinOp(VMUL, size, dst, src1, src2));
4574void Assembler::vmull(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src1,
4575 DwVfpRegister src2) {
4580 dst.split_code(&vd, &d);
4582 src1.split_code(&vn, &n);
4584 src2.split_code(&vm, &
m);
4587 emit(0xFU * B28 | B25 | u * B24 | B23 | d * B22 | size * B20 | vn * B16 |
4588 vd * B12 | 0xC * B8 | n * B7 |
m * B5 | vm);
4591void Assembler::vmin(QwNeonRegister dst, QwNeonRegister src1,
4592 QwNeonRegister src2) {
4596 emit(EncodeNeonBinOp(VMINF, dst, src1, src2));
4599void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4600 QwNeonRegister src2) {
4604 emit(EncodeNeonDataTypeBinOp(VMIN, dt, dst, src1, src2));
4607void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
4608 QwNeonRegister src2) {
4612 emit(EncodeNeonBinOp(VMAXF, dst, src1, src2));
4615void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4616 QwNeonRegister src2) {
4620 emit(EncodeNeonDataTypeBinOp(VMAX, dt, dst, src1, src2));
4623enum NeonShiftOp { VSHL, VSHR, VSLI, VSRI, VSRA };
4625static Instr EncodeNeonShiftRegisterOp(NeonShiftOp op, NeonDataType dt,
4626 NeonRegType reg_type,
int dst_code,
4627 int src_code,
int shift_code) {
4629 int op_encoding = 0;
4631 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4633 NeonSplitCode(reg_type, src_code, &vm, &
m, &op_encoding);
4635 NeonSplitCode(reg_type, shift_code, &vn, &n, &op_encoding);
4640 0x4 *
B8 | n *
B7 |
m *
B5 | vm | op_encoding;
4643static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonSize size,
bool is_unsigned,
4644 NeonRegType reg_type,
int dst_code,
int src_code,
4646 int size_in_bits = kBitsPerByte << static_cast<int>(size);
4647 int op_encoding = 0, imm6 = 0,
L = 0;
4650 DCHECK(shift >= 0 && size_in_bits > shift);
4651 imm6 = size_in_bits + shift;
4652 op_encoding = 0x5 *
B8;
4656 DCHECK(shift > 0 && size_in_bits >= shift);
4657 imm6 = 2 * size_in_bits - shift;
4658 if (is_unsigned) op_encoding |=
B24;
4662 DCHECK(shift >= 0 && size_in_bits > shift);
4663 imm6 = size_in_bits + shift;
4664 op_encoding =
B24 | 0x5 *
B8;
4668 DCHECK(shift > 0 && size_in_bits >= shift);
4669 imm6 = 2 * size_in_bits - shift;
4670 op_encoding =
B24 | 0x4 *
B8;
4674 DCHECK(shift > 0 && size_in_bits >= shift);
4675 imm6 = 2 * size_in_bits - shift;
4677 if (is_unsigned) op_encoding |=
B24;
4688 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4690 NeonSplitCode(reg_type, src_code, &vm, &
m, &op_encoding);
4696void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
4702 dst.code(), src.code(), shift));
4705void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
4706 QwNeonRegister shift) {
4710 emit(EncodeNeonShiftRegisterOp(VSHL, dt, NEON_Q, dst.code(), src.code(),
4714void Assembler::vshr(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src,
4720 dst.code(), src.code(), shift));
4723void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
4729 dst.code(), src.code(), shift));
4732void Assembler::vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4737 emit(EncodeNeonShiftOp(VSLI, size,
false, NEON_D, dst.code(), src.code(),
4741void Assembler::vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4746 emit(EncodeNeonShiftOp(VSRI, size,
false, NEON_D, dst.code(), src.code(),
4750void Assembler::vsra(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src,
4756 dst.code(), src.code(), imm));
4759void Assembler::vrecpe(QwNeonRegister dst, QwNeonRegister src) {
4763 emit(EncodeNeonUnaryOp(VRECPE, NEON_Q, Neon32, dst.code(), src.code()));
4766void Assembler::vrsqrte(QwNeonRegister dst, QwNeonRegister src) {
4770 emit(EncodeNeonUnaryOp(VRSQRTE, NEON_Q, Neon32, dst.code(), src.code()));
4773void Assembler::vrecps(QwNeonRegister dst, QwNeonRegister src1,
4774 QwNeonRegister src2) {
4778 emit(EncodeNeonBinOp(VRECPS, dst, src1, src2));
4781void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
4782 QwNeonRegister src2) {
4786 emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
4789enum NeonPairwiseOp { VPADD, VPMIN, VPMAX };
4791static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
4792 DwVfpRegister dst, DwVfpRegister src1,
4793 DwVfpRegister src2) {
4794 int op_encoding = 0;
4797 op_encoding = 0xB *
B8 |
B4;
4800 op_encoding = 0xA *
B8 |
B4;
4803 op_encoding = 0xA *
B8;
4809 dst.split_code(&vd, &d);
4811 src1.split_code(&vn, &n);
4813 src2.split_code(&vm, &
m);
4817 n *
B7 |
m *
B5 | vm | op_encoding;
4820void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
4821 DwVfpRegister src2) {
4826 dst.split_code(&vd, &d);
4828 src1.split_code(&vn, &n);
4830 src2.split_code(&vm, &
m);
4832 emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
4836void Assembler::vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
4837 DwVfpRegister src2) {
4844void Assembler::vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
4845 DwVfpRegister src2) {
4849 emit(EncodeNeonPairwiseOp(VPMIN, dt, dst, src1, src2));
4852void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
4853 DwVfpRegister src2) {
4857 emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
4860void Assembler::vrintm(NeonDataType dt,
const QwNeonRegister dst,
4861 const QwNeonRegister src) {
4864 DCHECK(IsEnabled(ARMv8));
4865 emit(EncodeNeonUnaryOp(VRINTM, NEON_Q,
NeonSize(dt), dst.code(), src.code()));
4868void Assembler::vrintn(NeonDataType dt,
const QwNeonRegister dst,
4869 const QwNeonRegister src) {
4872 DCHECK(IsEnabled(ARMv8));
4873 emit(EncodeNeonUnaryOp(VRINTN, NEON_Q,
NeonSize(dt), dst.code(), src.code()));
4876void Assembler::vrintp(NeonDataType dt,
const QwNeonRegister dst,
4877 const QwNeonRegister src) {
4880 DCHECK(IsEnabled(ARMv8));
4881 emit(EncodeNeonUnaryOp(VRINTP, NEON_Q,
NeonSize(dt), dst.code(), src.code()));
4884void Assembler::vrintz(NeonDataType dt,
const QwNeonRegister dst,
4885 const QwNeonRegister src) {
4888 DCHECK(IsEnabled(ARMv8));
4889 emit(EncodeNeonUnaryOp(VRINTZ, NEON_Q,
NeonSize(dt), dst.code(), src.code()));
4892void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4893 QwNeonRegister src2) {
4897 emit(EncodeNeonSizeBinOp(VTST, size, dst, src1, src2));
4900void Assembler::vceq(QwNeonRegister dst, QwNeonRegister src1,
4901 QwNeonRegister src2) {
4905 emit(EncodeNeonBinOp(VCEQF, dst, src1, src2));
4908void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4909 QwNeonRegister src2) {
4913 emit(EncodeNeonSizeBinOp(VCEQ, size, dst, src1, src2));
4916void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4922 emit(EncodeNeonUnaryOp(VCEQ0, NEON_Q, size, dst.code(), src1.code()));
4925void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
4926 QwNeonRegister src2) {
4930 emit(EncodeNeonBinOp(VCGEF, dst, src1, src2));
4933void Assembler::vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4934 QwNeonRegister src2) {
4938 emit(EncodeNeonDataTypeBinOp(VCGE, dt, dst, src1, src2));
4941void Assembler::vcgt(QwNeonRegister dst, QwNeonRegister src1,
4942 QwNeonRegister src2) {
4946 emit(EncodeNeonBinOp(VCGTF, dst, src1, src2));
4949void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4950 QwNeonRegister src2) {
4954 emit(EncodeNeonDataTypeBinOp(VCGT, dt, dst, src1, src2));
4957void Assembler::vclt(NeonSize size, QwNeonRegister dst, QwNeonRegister src,
4963 emit(EncodeNeonUnaryOp(VCLT0, NEON_Q, size, dst.code(), src.code()));
4966void Assembler::vrhadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4967 QwNeonRegister src2) {
4971 emit(EncodeNeonDataTypeBinOp(VRHADD, dt, dst, src1, src2));
4974void Assembler::vext(QwNeonRegister dst, QwNeonRegister src1,
4975 QwNeonRegister src2,
int bytes) {
4980 dst.split_code(&vd, &d);
4982 src1.split_code(&vn, &n);
4984 src2.split_code(&vm, &
m);
4986 emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
4987 n * B7 | B6 |
m * B5 | vm);
4990void Assembler::vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
4991 if (size == Neon32) {
4992 vtrn(size, src1, src2);
4997 emit(EncodeNeonUnaryOp(VZIP, NEON_D, size, src1.code(), src2.code()));
5001void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
5005 emit(EncodeNeonUnaryOp(VZIP, NEON_Q, size, src1.code(), src2.code()));
5008void Assembler::vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
5009 if (size == Neon32) {
5010 vtrn(size, src1, src2);
5015 emit(EncodeNeonUnaryOp(VUZP, NEON_D, size, src1.code(), src2.code()));
5019void Assembler::vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
5023 emit(EncodeNeonUnaryOp(VUZP, NEON_Q, size, src1.code(), src2.code()));
5026void Assembler::vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
5030 emit(EncodeNeonUnaryOp(VREV16, NEON_Q, size, dst.code(), src.code()));
5033void Assembler::vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
5037 emit(EncodeNeonUnaryOp(VREV32, NEON_Q, size, dst.code(), src.code()));
5040void Assembler::vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
5044 emit(EncodeNeonUnaryOp(VREV64, NEON_Q, size, dst.code(), src.code()));
5047void Assembler::vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
5051 emit(EncodeNeonUnaryOp(VTRN, NEON_D, size, src1.code(), src2.code()));
5054void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
5058 emit(EncodeNeonUnaryOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
5061void Assembler::vpadal(NeonDataType dt, QwNeonRegister dst,
5062 QwNeonRegister src) {
5065 emit(EncodeNeonUnaryOp(
NeonU(dt) ? VPADAL_U : VPADAL_S, NEON_Q,
5069void Assembler::vpaddl(NeonDataType dt, QwNeonRegister dst,
5070 QwNeonRegister src) {
5073 emit(EncodeNeonUnaryOp(
NeonU(dt) ? VPADDL_U : VPADDL_S, NEON_Q,
5077void Assembler::vqrdmulh(NeonDataType dt, QwNeonRegister dst,
5078 QwNeonRegister src1, QwNeonRegister src2) {
5080 DCHECK(dt == NeonS16 || dt == NeonS32);
5081 emit(EncodeNeonDataTypeBinOp(VQRDMULH, dt, dst, src1, src2));
5084void Assembler::vcnt(QwNeonRegister dst, QwNeonRegister src) {
5088 emit(EncodeNeonUnaryOp(VCNT, NEON_Q, Neon8, dst.code(), src.code()));
5092static Instr EncodeNeonVTB(DwVfpRegister dst,
const NeonListOperand& list,
5093 DwVfpRegister index,
bool vtbx) {
5099 dst.split_code(&vd, &d);
5101 list.base().split_code(&vn, &n);
5103 index.split_code(&vm, &
m);
5104 int op = vtbx ? 1 : 0;
5106 list.length() *
B8 | n *
B7 | op *
B6 |
m *
B5 | vm;
5109void Assembler::vtbl(DwVfpRegister dst,
const NeonListOperand& list,
5110 DwVfpRegister index) {
5112 emit(EncodeNeonVTB(dst, list, index,
false));
5115void Assembler::vtbx(DwVfpRegister dst,
const NeonListOperand& list,
5116 DwVfpRegister index) {
5118 emit(EncodeNeonVTB(dst, list, index,
true));
5122void Assembler::nop(
int type) {
5128 DCHECK(0 <= type && type <= 14);
5129 emit(al | 13 * B21 | type * B12 | type);
5132void Assembler::pop() { add(sp, sp, Operand(kPointerSize)); }
5134bool Assembler::IsMovT(Instr
instr) {
5137 EncodeMovwImmediate(0xFFFF));
5138 return instr == kMovtPattern;
5141bool Assembler::IsMovW(Instr
instr) {
5144 EncodeMovwImmediate(0xFFFF));
5145 return instr == kMovwPattern;
5148Instr Assembler::GetMovTPattern() {
return kMovtPattern; }
5150Instr Assembler::GetMovWPattern() {
return kMovwPattern; }
5152Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
5154 return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
5157Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
5158 instruction &= ~EncodeMovwImmediate(0xFFFF);
5159 return instruction | EncodeMovwImmediate(immediate);
5162int Assembler::DecodeShiftImm(Instr
instr) {
5163 int rotate = Instruction::RotateValue(
instr) * 2;
5164 int immed8 = Instruction::Immed8Value(
instr);
5165 return base::bits::RotateRight32(immed8, rotate);
5168Instr Assembler::PatchShiftImm(Instr
instr,
int immed) {
5169 uint32_t rotate_imm = 0;
5170 uint32_t immed_8 = 0;
5171 bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8,
nullptr);
5174 return (
instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
5177bool Assembler::IsNop(Instr
instr,
int type) {
5178 DCHECK(0 <= type && type <= 14);
5183bool Assembler::IsMovImmed(Instr
instr) {
5184 return (
instr & kMovImmedMask) == kMovImmedPattern;
5187bool Assembler::IsOrrImmed(Instr
instr) {
5188 return (
instr & kOrrImmedMask) == kOrrImmedPattern;
5192bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
5195 return FitsShifter(imm32, &dummy1, &dummy2,
nullptr);
5198bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
5199 return is_uint12(abs(imm32));
5203void Assembler::RecordConstPool(
int size) {
5206 RecordRelocInfo(RelocInfo::CONST_POOL,
static_cast<intptr_t
>(size));
5209void Assembler::GrowBuffer() {
5213 int old_size =
buffer_->size();
5214 int new_size = std::min(2 * old_size, old_size + 1 * MB);
5218 if (new_size > kMaximalBufferSize) {
5219 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
5223 std::unique_ptr<AssemblerBuffer> new_buffer =
buffer_->Grow(new_size);
5224 DCHECK_EQ(new_size, new_buffer->size());
5225 uint8_t* new_start = new_buffer->start();
5228 int pc_delta = new_start - buffer_start_;
5229 int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
5230 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
5232 uint8_t* new_reloc_start =
reinterpret_cast<uint8_t*
>(
5233 reinterpret_cast<Address>(reloc_info_writer.pos()) + rc_delta);
5234 MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size);
5237 buffer_ = std::move(new_buffer);
5238 buffer_start_ = new_start;
5239 pc_ =
reinterpret_cast<uint8_t*
>(
reinterpret_cast<Address>(
pc_) + pc_delta);
5240 uint8_t* new_last_pc =
reinterpret_cast<uint8_t*
>(
5241 reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
5242 reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
5249void Assembler::db(uint8_t data) {
5252 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5254 *
reinterpret_cast<uint8_t*
>(
pc_) = data;
5255 pc_ +=
sizeof(uint8_t);
5258void Assembler::dd(uint32_t data) {
5261 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5263 base::WriteUnalignedValue(
reinterpret_cast<Address>(
pc_), data);
5264 pc_ +=
sizeof(uint32_t);
5267void Assembler::dq(uint64_t value) {
5270 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5272 base::WriteUnalignedValue(
reinterpret_cast<Address>(
pc_), value);
5273 pc_ +=
sizeof(uint64_t);
5276void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
5277 if (!ShouldRecordRelocInfo(rmode))
return;
5278 DCHECK_GE(buffer_space(), kMaxRelocSize);
5279 RelocInfo rinfo(
reinterpret_cast<Address>(
pc_), rmode, data);
5280 reloc_info_writer.Write(&rinfo);
5283void Assembler::ConstantPoolAddEntry(
int position, RelocInfo::Mode rmode,
5285 DCHECK(rmode != RelocInfo::CONST_POOL);
5290 bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
5291 (rmode == RelocInfo::CODE_TARGET && value != 0) ||
5292 (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
5293 DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
5294 if (first_const_pool_32_use_ < 0) {
5295 DCHECK(pending_32_bit_constants_.empty());
5296 DCHECK_EQ(constant_pool_deadline_, kMaxInt);
5297 first_const_pool_32_use_ =
position;
5298 constant_pool_deadline_ =
position + kCheckPoolDeadline;
5300 DCHECK(!pending_32_bit_constants_.empty());
5302 ConstantPoolEntry entry(
position, value, sharing_ok, rmode);
5304 bool shared =
false;
5307 for (
size_t i = 0;
i < pending_32_bit_constants_.
size();
i++) {
5308 ConstantPoolEntry& current_entry = pending_32_bit_constants_[
i];
5309 if (!current_entry.sharing_ok())
continue;
5310 if (entry.value() == current_entry.value() &&
5311 entry.rmode() == current_entry.rmode()) {
5312 entry.set_merged_index(
i);
5319 pending_32_bit_constants_.emplace_back(entry);
5323 BlockConstPoolFor(1);
5326 if (MustOutputRelocInfo(rmode,
this) && !shared) {
5327 RecordRelocInfo(rmode);
5331void Assembler::BlockConstPoolFor(
int instructions) {
5333 if (no_const_pool_before_ < pc_limit) {
5334 no_const_pool_before_ = pc_limit;
5339 if (constant_pool_deadline_ < no_const_pool_before_) {
5343 first_const_pool_32_use_ + kMaxDistToIntPool);
5344 constant_pool_deadline_ = no_const_pool_before_;
5348void Assembler::CheckConstPool(
bool force_emit,
bool require_jump) {
5352 if (is_const_pool_blocked()) {
5359 if (pending_32_bit_constants_.empty()) {
5362 DCHECK(force_emit || !require_jump);
5374 DCHECK_NE(first_const_pool_32_use_, -1);
5375 int dist32 =
pc_offset() - first_const_pool_32_use_;
5379 }
else if (dist32 < kCheckPoolDeadline / 2) {
5384 int size_after_marker = pending_32_bit_constants_.size() *
kPointerSize;
5387 for (
size_t i = 0;
i < pending_32_bit_constants_.
size();
i++) {
5388 ConstantPoolEntry& entry = pending_32_bit_constants_[
i];
5389 if (entry.is_merged()) size_after_marker -=
kPointerSize;
5395 int jump_instr = require_jump ?
kInstrSize : 0;
5396 int size_up_to_marker = jump_instr +
kInstrSize;
5397 int size = size_up_to_marker + size_after_marker;
5398 int needed_space = size + kGap;
5399 while (buffer_space() <= needed_space) GrowBuffer();
5404 BlockConstPoolScope block_const_pool(
this);
5405 RecordConstPool(size);
5418 emit(kConstantPoolMarker |
5422 CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].
position());
5423 CHECK(!pending_32_bit_constants_[0].is_merged());
5427 first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
5431 int needed_space = pending_32_bit_constants_.size() *
kPointerSize + kGap;
5432 while (buffer_space() <= needed_space) {
5437 for (
size_t i = 0;
i < pending_32_bit_constants_.
size();
i++) {
5438 ConstantPoolEntry& entry = pending_32_bit_constants_[
i];
5445 GetLdrRegisterImmediateOffset(
instr) == 0);
5447 int delta =
pc_offset() - entry.position() - Instruction::kPcLoadDelta;
5448 DCHECK(is_uint12(delta));
5454 if (entry.is_merged()) {
5455 DCHECK(entry.sharing_ok());
5456 ConstantPoolEntry& merged =
5457 pending_32_bit_constants_[entry.merged_index()];
5458 DCHECK(entry.value() == merged.value());
5459 DCHECK_LT(merged.position(), entry.position());
5460 Instr merged_instr = instr_at(merged.position());
5461 DCHECK(IsLdrPcImmediateOffset(merged_instr));
5462 delta = GetLdrRegisterImmediateOffset(merged_instr);
5463 delta += merged.position() - entry.position();
5465 instr_at_put(entry.position(),
5466 SetLdrRegisterImmediateOffset(
instr, delta));
5467 if (!entry.is_merged()) {
5468 emit(entry.value());
5472 pending_32_bit_constants_.clear();
5474 first_const_pool_32_use_ = -1;
5476 DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
5478 if (after_pool.is_linked()) {
5485 constant_pool_deadline_ =
kMaxInt;
5488PatchingAssembler::PatchingAssembler(
const AssemblerOptions& options,
5489 uint8_t* address,
int instructions)
5491 address, instructions *
kInstrSize + kGap)) {
5495PatchingAssembler::~PatchingAssembler() {
5497 DCHECK(pending_32_bit_constants_.empty());
5504void PatchingAssembler::Emit(Address addr) { emit(
static_cast<Instr>(addr)); }
5506void PatchingAssembler::PadWithNops() {
5513LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
5515 if (rep == MachineRepresentation::kWord8) {
5516 *
this = LoadStoreLaneParams(laneidx, Neon8, 8);
5517 }
else if (rep == MachineRepresentation::kWord16) {
5518 *
this = LoadStoreLaneParams(laneidx, Neon16, 4);
5519 }
else if (rep == MachineRepresentation::kWord32) {
5520 *
this = LoadStoreLaneParams(laneidx, Neon32, 2);
5521 }
else if (rep == MachineRepresentation::kWord64) {
5522 *
this = LoadStoreLaneParams(laneidx, Neon64, 1);
static const int kArmCortexA9
static const int kArmCortexA5
static bool ArmUsingHardFloat()
static V8_INLINE bool is_constant_pool_load(Address pc)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static bool SupportsWasmSimd128()
static unsigned supported_
static unsigned dcache_line_size_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
static constexpr Tagged< Smi > FromInt(int value)
base::OwnedVector< uint8_t > buffer_
#define ASM_CODE_COMMENT_STRING(asm,...)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
DirectHandle< JSReceiver > options
ZoneVector< RpoNumber > & result
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
constexpr NEONFormatField NEON_Q
constexpr NeonDataType NeonS16
int EncodeConstantPoolLength(int length)
constexpr NeonSize Neon32
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
constexpr AddrMode NegPreIndex
RegListBase< Register > RegList
constexpr int kPointerSize
constexpr NeonDataType NeonS8
int NeonSz(NeonDataType dt)
constexpr NeonSize Neon16
NeonDataType NeonSizeToDataType(NeonSize size)
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
constexpr NeonDataType NeonU8
Condition NegateCondition(Condition cond)
NeonSize NeonDataTypeToSize(NeonDataType dt)
constexpr NeonDataType NeonU16
constexpr NeonDataType NeonS32
constexpr MiscInstructionsBits74 BLX
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr NEONScalarFormatField NEON_D
constexpr NeonDataType NeonU32
constexpr AddrMode PostIndex
constexpr uint8_t kInstrSize
int NeonU(NeonDataType dt)
constexpr AddrMode NegOffset
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
const Instr kPushRegPattern
constexpr int kOpCodeMask
const Instr kPopRegPattern
constexpr int kNumRegisters
#define P(name, number_of_args, result_size)
#define I(name, number_of_args, result_size)
#define DCHECK_LE(v1, v2)
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)