40#include <sys/utsname.h>
43#if V8_TARGET_ARCH_PPC64
61#if V8_ENABLE_WEBASSEMBLY
73 if (cross_compile)
return;
86 memset(
reinterpret_cast<void*
>(&uts), 0,
sizeof(uts));
89 int rel = atoi(uts.release);
120 const char* ppc_arch =
nullptr;
122 printf(
"target %s\n", ppc_arch);
133 const Register kRegisters[] = {
r0,
sp, r2, r3, r4, r5, r6, r7,
134 r8, r9, r10,
r11, ip, r13, r14, r15,
135 r16, r17, r18, r19, r20, r21, r22, r23,
136 r24, r25, r26, r27, r28, r29, r30, fp};
137 return kRegisters[num];
164 return static_cast<uint32_t
>(
182 result.is_heap_number_request_ =
true;
183 result.value_.heap_number_request = HeapNumberRequest(value);
187MemOperand::MemOperand(Register rn, int64_t
offset)
190MemOperand::MemOperand(Register ra, Register rb)
191 : ra_(ra),
offset_(0), rb_(rb) {}
193MemOperand::MemOperand(Register ra, Register rb, int64_t
offset)
196void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
197 DCHECK_IMPLIES(isolate ==
nullptr, heap_number_requests_.empty());
198 for (
auto& request : heap_number_requests_) {
199 Handle<HeapObject>
object =
200 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
201 request.heap_number());
204 set_target_address_at(
pc, constant_pool,
object.address(),
nullptr,
212Assembler::Assembler(
const AssemblerOptions& options,
213 std::unique_ptr<AssemblerBuffer> buffer)
214 : AssemblerBase(options,
std::move(buffer)),
215 scratch_register_list_({ip}),
216 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
217 reloc_info_writer.Reposition(buffer_start_ +
buffer_->size(),
pc_);
219 no_trampoline_pool_before_ = 0;
220 trampoline_pool_blocked_nesting_ = 0;
221 constant_pool_entry_sharing_blocked_nesting_ = 0;
222 next_trampoline_check_ =
kMaxInt;
223 internal_trampoline_exception_ =
false;
225 optimizable_cmpi_pos_ = -1;
226 trampoline_emitted_ =
v8_flags.force_long_branches;
227 tracked_branch_count_ = 0;
228 relocations_.reserve(128);
231void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
232 GetCode(isolate->main_thread_local_isolate(), desc);
234void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
235 SafepointTableBuilderBase* safepoint_table_builder,
236 int handler_table_offset) {
244 DataAlign(InstructionStream::kMetadataAlignment);
247 int constant_pool_size = EmitConstantPool();
251 int code_comments_size = WriteCodeComments();
253 AllocateAndInstallRequestedHeapNumbers(isolate);
259 static constexpr int kBuiltinJumpTableInfoSize = 0;
260 const int instruction_size =
pc_offset();
261 const int builtin_jump_table_info_offset =
262 instruction_size - kBuiltinJumpTableInfoSize;
263 const int code_comments_offset =
264 builtin_jump_table_info_offset - code_comments_size;
265 const int constant_pool_offset = code_comments_offset - constant_pool_size;
266 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
267 ? constant_pool_offset
268 : handler_table_offset;
269 const int safepoint_table_offset =
270 (safepoint_table_builder == kNoSafepointTable)
271 ? handler_table_offset2
272 : safepoint_table_builder->safepoint_table_offset();
273 const int reloc_info_offset =
274 static_cast<int>(reloc_info_writer.pos() -
buffer_->start());
275 CodeDesc::Initialize(desc,
this, safepoint_table_offset,
276 handler_table_offset2, constant_pool_offset,
277 code_comments_offset, builtin_jump_table_info_offset,
281void Assembler::Align(
int m) {
282 DCHECK(
m >= 4 && base::bits::IsPowerOfTwo(
m));
289void Assembler::CodeTargetAlign() { Align(8); }
292 switch (
instr & kCondMask) {
302bool Assembler::IsLis(Instr
instr) {
303 return ((
instr & kOpcodeMask) == ADDIS) && GetRA(
instr) ==
r0;
306bool Assembler::IsLi(Instr
instr) {
307 return ((
instr & kOpcodeMask) == ADDI) && GetRA(
instr) ==
r0;
310bool Assembler::IsAddic(Instr
instr) {
return (
instr & kOpcodeMask) == ADDIC; }
312bool Assembler::IsOri(Instr
instr) {
return (
instr & kOpcodeMask) ==
ORI; }
314bool Assembler::IsBranch(Instr
instr) {
return ((
instr & kOpcodeMask) == BCX); }
317 return Register::from_code(Instruction::RAValue(
instr));
321 return Register::from_code(Instruction::RBValue(
instr));
325bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
326 Instr instr4, Instr instr5) {
333 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
334 (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
335 ((instr5 >> 16) == 0x618C));
338bool Assembler::IsCmpRegister(Instr
instr) {
339 return (((
instr & kOpcodeMask) == EXT2) &&
340 ((EXT2 | (
instr & kExt2OpcodeMask)) == CMP));
343bool Assembler::IsRlwinm(Instr
instr) {
344 return ((
instr & kOpcodeMask) == RLWINMX);
347bool Assembler::IsAndi(Instr
instr) {
return ((
instr & kOpcodeMask) == ANDIx); }
349bool Assembler::IsRldicl(Instr
instr) {
350 return (((
instr & kOpcodeMask) == EXT5) &&
351 ((EXT5 | (
instr & kExt5OpcodeMask)) == RLDICL));
354bool Assembler::IsCmpImmediate(Instr
instr) {
355 return ((
instr & kOpcodeMask) == CMPI);
358bool Assembler::IsCrSet(Instr
instr) {
359 return (((
instr & kOpcodeMask) == EXT1) &&
360 ((EXT1 | (
instr & kExt1OpcodeMask)) == CREQV));
368int Assembler::GetCmpImmediateRawImmediate(Instr
instr) {
388 kUnboundMovLabelOffsetOpcode = 0 << 26,
389 kUnboundAddLabelOffsetOpcode = 1 << 26,
390 kUnboundAddLabelLongOffsetOpcode = 2 << 26,
391 kUnboundMovLabelAddrOpcode = 3 << 26,
392 kUnboundJumpTableEntryOpcode = 4 << 26
395int Assembler::target_at(
int pos) {
409 case kUnboundMovLabelOffsetOpcode:
410 case kUnboundAddLabelOffsetOpcode:
411 case kUnboundAddLabelLongOffsetOpcode:
412 case kUnboundMovLabelAddrOpcode:
413 case kUnboundJumpTableEntryOpcode:
426void Assembler::target_at_put(
int pos,
int target_pos,
bool* is_branch) {
430 if (is_branch !=
nullptr) {
431 *is_branch = (opcode ==
BX || opcode == BCX);
436 int imm26 = target_pos -
pos;
437 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
438 if (imm26 == kInstrSize && !(
instr & kLKMask)) {
449 int imm16 = target_pos -
pos;
450 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
451 if (imm16 == kInstrSize && !(
instr & kLKMask)) {
461 case kUnboundMovLabelOffsetOpcode: {
464 Register dst = Register::from_code(instr_at(
pos + kInstrSize));
467 PatchingAssembler patcher(
468 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos), 2);
469 patcher.bitwise_mov32(dst,
offset);
472 case kUnboundAddLabelLongOffsetOpcode:
473 case kUnboundAddLabelOffsetOpcode: {
475 Instr operands = instr_at(
pos + kInstrSize);
476 Register dst = Register::from_code((operands >> 27) & 0x1F);
477 Register base = Register::from_code((operands >> 22) & 0x1F);
478 int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
479 ?
static_cast<int32_t>(instr_at(
pos + 2 * kInstrSize))
482 PatchingAssembler patcher(
483 options(),
reinterpret_cast<uint8_t*
>(buffer_start_ +
pos),
484 2 +
static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
486 if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
489 case kUnboundMovLabelAddrOpcode: {
491 Register dst = Register::from_code(instr_at(
pos + kInstrSize));
492 PatchingAssembler patcher(
options(),
493 reinterpret_cast<uint8_t*
>(buffer_start_ +
pos),
494 kMovInstructionsNoConstantPool);
496 patcher.bitwise_mov(dst, target_pos);
499 case kUnboundJumpTableEntryOpcode: {
500 PatchingAssembler patcher(
options(),
501 reinterpret_cast<uint8_t*
>(buffer_start_ +
pos),
502 kSystemPointerSize / kInstrSize);
504 patcher.dp(target_pos);
513int Assembler::max_reach_from(
int pos) {
523 case kUnboundMovLabelOffsetOpcode:
524 case kUnboundAddLabelOffsetOpcode:
525 case kUnboundMovLabelAddrOpcode:
526 case kUnboundJumpTableEntryOpcode:
534void Assembler::bind_to(Label* L,
int pos) {
536 int32_t trampoline_pos = kInvalidSlotPos;
537 bool is_branch =
false;
538 while (
L->is_linked()) {
539 int fixup_pos =
L->pos();
541 int maxReach = max_reach_from(fixup_pos);
544 if (trampoline_pos == kInvalidSlotPos) {
545 trampoline_pos = get_trampoline_entry();
546 CHECK_NE(trampoline_pos, kInvalidSlotPos);
547 target_at_put(trampoline_pos,
pos);
549 target_at_put(fixup_pos, trampoline_pos);
551 target_at_put(fixup_pos,
pos, &is_branch);
556 if (!trampoline_emitted_ && is_branch) {
562 if (
pos > last_bound_pos_) last_bound_pos_ =
pos;
565void Assembler::bind(Label* L) {
570void Assembler::next(Label* L) {
572 int link = target_at(
L->pos());
573 if (link == kEndOfChain) {
581bool Assembler::is_near(Label* L, Condition cond) {
583 if (
L->is_bound() ==
false)
return false;
585 int maxReach = ((cond ==
al) ? 26 : 16);
591void Assembler::a_form(Instr
instr, DoubleRegister frt, DoubleRegister fra,
592 DoubleRegister frb, RCBit
r) {
593 emit(
instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
r);
596void Assembler::d_form(Instr
instr, Register rt, Register ra,
597 const intptr_t val,
bool signed_disp) {
599 if (!is_int16(val)) {
602 CHECK(is_int16(val));
604 if (!is_uint16(val)) {
606 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
607 val, val, is_uint16(val), kImm16Mask);
609 CHECK(is_uint16(val));
611 emit(
instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
614void Assembler::xo_form(Instr
instr, Register rt, Register ra, Register rb,
616 emit(
instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o |
r);
619void Assembler::md_form(Instr
instr, Register ra, Register rs,
int shift,
620 int maskbit, RCBit
r) {
621 int sh0_4 = shift & 0x1F;
622 int sh5 = (shift >> 5) & 0x1;
623 int m0_4 = maskbit & 0x1F;
624 int m5 = (maskbit >> 5) & 0x1;
626 emit(
instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
627 m5 * B5 | sh5 * B1 |
r);
630void Assembler::mds_form(Instr
instr, Register ra, Register rs, Register rb,
631 int maskbit, RCBit
r) {
632 int m0_4 = maskbit & 0x1F;
633 int m5 = (maskbit >> 5) & 0x1;
635 emit(
instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
640int32_t Assembler::get_trampoline_entry() {
641 int32_t trampoline_entry = kInvalidSlotPos;
643 if (!internal_trampoline_exception_) {
644 trampoline_entry = trampoline_.take_slot();
646 if (kInvalidSlotPos == trampoline_entry) {
647 internal_trampoline_exception_ =
true;
650 return trampoline_entry;
653int Assembler::link(Label* L) {
658 if (
L->is_linked()) {
675void Assembler::bclr(BOfield bo,
int condition_bit, LKBit lk) {
676 emit(EXT1 |
static_cast<uint32_t
>(bo) | condition_bit * B16 | BCLRX | lk);
679void Assembler::bcctr(BOfield bo,
int condition_bit, LKBit lk) {
680 emit(EXT1 |
static_cast<uint32_t
>(bo) | condition_bit * B16 | BCCTRX | lk);
684void Assembler::blr() { bclr(BA, 0, LeaveLK); }
687void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
689void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
691void Assembler::bc(
int branch_offset, BOfield bo,
int condition_bit, LKBit lk) {
692 int imm16 = branch_offset;
693 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
694 emit(BCX |
static_cast<uint32_t
>(bo) | condition_bit * B16 |
695 (imm16 & kImm16Mask) | lk);
698void Assembler::b(
int branch_offset, LKBit lk) {
699 int imm26 = branch_offset;
700 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
701 emit(BX | (imm26 & kImm26Mask) | lk);
704void Assembler::xori(Register dst, Register src,
const Operand& imm) {
705 d_form(XORI, src, dst, imm.immediate(),
false);
708void Assembler::xoris(Register ra, Register rs,
const Operand& imm) {
709 d_form(XORIS, rs, ra, imm.immediate(),
false);
712void Assembler::rlwinm(Register ra, Register rs,
int sh,
int mb,
int me,
717 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
721void Assembler::rlwnm(Register ra, Register rs, Register rb,
int mb,
int me,
725 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
729void Assembler::rlwimi(Register ra, Register rs,
int sh,
int mb,
int me,
734 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
738void Assembler::slwi(Register dst, Register src,
const Operand& val, RCBit rc) {
739 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
740 rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
743void Assembler::srwi(Register dst, Register src,
const Operand& val, RCBit rc) {
744 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
745 rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
748void Assembler::clrrwi(Register dst, Register src,
const Operand& val,
750 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
751 rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
754void Assembler::clrlwi(Register dst, Register src,
const Operand& val,
756 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
757 rlwinm(dst, src, 0, val.immediate(), 31, rc);
760void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit
r) {
761 rlwnm(ra, rs, rb, 0, 31,
r);
764void Assembler::rotlwi(Register ra, Register rs,
int sh, RCBit
r) {
765 rlwinm(ra, rs, sh, 0, 31,
r);
768void Assembler::rotrwi(Register ra, Register rs,
int sh, RCBit
r) {
769 rlwinm(ra, rs, 32 - sh, 0, 31,
r);
772void Assembler::subi(Register dst, Register src,
const Operand& imm) {
773 addi(dst, src, Operand(-(imm.immediate())));
776void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
778 xo_form(EXT2 | ADDCX, dst, src1, src2, o,
r);
781void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
783 xo_form(EXT2 | ADDEX, dst, src1, src2, o,
r);
786void Assembler::addze(Register dst, Register src1, OEBit o, RCBit
r) {
788 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o |
r);
791void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
793 xo_form(EXT2 | SUBFX, dst, src2, src1, o,
r);
796void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
798 xo_form(EXT2 | SUBFCX, dst, src2, src1, o,
r);
801void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
803 xo_form(EXT2 | SUBFEX, dst, src2, src1, o,
r);
806void Assembler::subfic(Register dst, Register src,
const Operand& imm) {
807 d_form(SUBFIC, dst, src, imm.immediate(),
true);
810void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
812 xo_form(EXT2 | ADDX, dst, src1, src2, o,
r);
816void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
818 xo_form(EXT2 | MULLW, dst, src1, src2, o,
r);
821void Assembler::mulli(Register dst, Register src,
const Operand& imm) {
822 d_form(MULLI, dst, src, imm.immediate(),
true);
826void Assembler::mulhd(Register dst, Register src1, Register src2, RCBit
r) {
827 xo_form(EXT2 | MULHD, dst, src1, src2, LeaveOE,
r);
831void Assembler::mulhdu(Register dst, Register src1, Register src2, RCBit
r) {
832 xo_form(EXT2 | MULHDU, dst, src1, src2, LeaveOE,
r);
836void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit
r) {
837 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE,
r);
841void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit
r) {
842 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE,
r);
846void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
848 xo_form(EXT2 | DIVW, dst, src1, src2, o,
r);
852void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
854 xo_form(EXT2 | DIVWU, dst, src1, src2, o,
r);
857void Assembler::addi(Register dst, Register src,
const Operand& imm) {
859 d_form(ADDI, dst, src, imm.immediate(),
true);
862void Assembler::addis(Register dst, Register src,
const Operand& imm) {
864 d_form(ADDIS, dst, src, imm.immediate(),
true);
867void Assembler::addic(Register dst, Register src,
const Operand& imm) {
868 d_form(ADDIC, dst, src, imm.immediate(),
true);
871void Assembler::andi(Register ra, Register rs,
const Operand& imm) {
872 d_form(ANDIx, rs, ra, imm.immediate(),
false);
875void Assembler::andis(Register ra, Register rs,
const Operand& imm) {
876 d_form(ANDISx, rs, ra, imm.immediate(),
false);
879void Assembler::ori(Register ra, Register rs,
const Operand& imm) {
880 d_form(ORI, rs, ra, imm.immediate(),
false);
883void Assembler::oris(Register dst, Register src,
const Operand& imm) {
884 d_form(ORIS, src, dst, imm.immediate(),
false);
887void Assembler::cmpi(Register src1,
const Operand& src2, CRegister cr) {
888 intptr_t imm16 = src2.immediate();
891 DCHECK(cr.code() >= 0 && cr.code() <= 7);
893 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
896void Assembler::cmpli(Register src1,
const Operand& src2, CRegister cr) {
897 uintptr_t uimm16 = src2.immediate();
899 DCHECK(is_uint16(uimm16));
900 DCHECK(cr.code() >= 0 && cr.code() <= 7);
902 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
905void Assembler::cmpwi(Register src1,
const Operand& src2, CRegister cr) {
906 intptr_t imm16 = src2.immediate();
910 DCHECK(cr.code() >= 0 && cr.code() <= 7);
915 if (imm16 == 0 &&
pos > 0 && last_bound_pos_ !=
pos) {
916 optimizable_cmpi_pos_ =
pos;
919 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
922void Assembler::cmplwi(Register src1,
const Operand& src2, CRegister cr) {
923 uintptr_t uimm16 = src2.immediate();
925 DCHECK(is_uint16(uimm16));
926 DCHECK(cr.code() >= 0 && cr.code() <= 7);
928 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
931void Assembler::isel(Register rt, Register ra, Register rb,
int cb) {
932 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
937void Assembler::li(Register dst,
const Operand& imm) {
938 d_form(ADDI, dst, r0, imm.immediate(),
true);
941void Assembler::lis(Register dst,
const Operand& imm) {
942 d_form(ADDIS, dst, r0, imm.immediate(),
true);
946void Assembler::mr(Register dst, Register src) {
951void Assembler::lbz(Register dst,
const MemOperand& src) {
953 d_form(LBZ, dst, src.ra(), src.offset(),
true);
956void Assembler::lhz(Register dst,
const MemOperand& src) {
958 d_form(LHZ, dst, src.ra(), src.offset(),
true);
961void Assembler::lwz(Register dst,
const MemOperand& src) {
963 d_form(LWZ, dst, src.ra(), src.offset(),
true);
966void Assembler::lwzu(Register dst,
const MemOperand& src) {
968 d_form(LWZU, dst, src.ra(), src.offset(),
true);
971void Assembler::lha(Register dst,
const MemOperand& src) {
973 d_form(LHA, dst, src.ra(), src.offset(),
true);
976void Assembler::lwa(Register dst,
const MemOperand& src) {
977 int offset = src.offset();
981 emit(LD | dst.code() * B21 | src.ra().code() * B16 |
offset | 2);
984void Assembler::stb(Register dst,
const MemOperand& src) {
986 d_form(STB, dst, src.ra(), src.offset(),
true);
989void Assembler::sth(Register dst,
const MemOperand& src) {
991 d_form(STH, dst, src.ra(), src.offset(),
true);
994void Assembler::stw(Register dst,
const MemOperand& src) {
996 d_form(STW, dst, src.ra(), src.offset(),
true);
999void Assembler::stwu(Register dst,
const MemOperand& src) {
1001 d_form(STWU, dst, src.ra(), src.offset(),
true);
1004void Assembler::neg(Register rt, Register ra, OEBit o, RCBit
r) {
1005 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o |
r);
1009void Assembler::ld(Register rd,
const MemOperand& src) {
1010 int offset = src.offset();
1014 emit(LD | rd.code() * B21 | src.ra().code() * B16 |
offset);
1017void Assembler::ldu(Register rd,
const MemOperand& src) {
1018 int offset = src.offset();
1022 emit(LD | rd.code() * B21 | src.ra().code() * B16 |
offset | 1);
1025void Assembler::std(Register rs,
const MemOperand& src) {
1026 int offset = src.offset();
1030 emit(STD | rs.code() * B21 | src.ra().code() * B16 |
offset);
1033void Assembler::stdu(Register rs,
const MemOperand& src) {
1034 int offset = src.offset();
1038 emit(STD | rs.code() * B21 | src.ra().code() * B16 |
offset | 1);
1041void Assembler::rldic(Register ra, Register rs,
int sh,
int mb, RCBit
r) {
1042 md_form(EXT5 | RLDIC, ra, rs, sh, mb,
r);
1045void Assembler::rldicl(Register ra, Register rs,
int sh,
int mb, RCBit
r) {
1046 md_form(EXT5 | RLDICL, ra, rs, sh, mb,
r);
1049void Assembler::rldcl(Register ra, Register rs, Register rb,
int mb, RCBit
r) {
1050 mds_form(EXT5 | RLDCL, ra, rs, rb, mb,
r);
1053void Assembler::rldicr(Register ra, Register rs,
int sh,
int me, RCBit
r) {
1054 md_form(EXT5 | RLDICR, ra, rs, sh, me,
r);
1057void Assembler::sldi(Register dst, Register src,
const Operand& val, RCBit rc) {
1058 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1059 rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1062void Assembler::srdi(Register dst, Register src,
const Operand& val, RCBit rc) {
1063 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1064 rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1067void Assembler::clrrdi(Register dst, Register src,
const Operand& val,
1069 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1070 rldicr(dst, src, 0, 63 - val.immediate(), rc);
1073void Assembler::clrldi(Register dst, Register src,
const Operand& val,
1075 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1076 rldicl(dst, src, 0, val.immediate(), rc);
1079void Assembler::rldimi(Register ra, Register rs,
int sh,
int mb, RCBit
r) {
1080 md_form(EXT5 | RLDIMI, ra, rs, sh, mb,
r);
1083void Assembler::sradi(Register ra, Register rs,
int sh, RCBit
r) {
1084 int sh0_4 = sh & 0x1F;
1085 int sh5 = (sh >> 5) & 0x1;
1087 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1091void Assembler::rotld(Register ra, Register rs, Register rb, RCBit
r) {
1092 rldcl(ra, rs, rb, 0,
r);
1095void Assembler::rotldi(Register ra, Register rs,
int sh, RCBit
r) {
1096 rldicl(ra, rs, sh, 0,
r);
1099void Assembler::rotrdi(Register ra, Register rs,
int sh, RCBit
r) {
1100 rldicl(ra, rs, 64 - sh, 0,
r);
1103void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1105 xo_form(EXT2 | MULLD, dst, src1, src2, o,
r);
1108void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1110 xo_form(EXT2 | DIVD, dst, src1, src2, o,
r);
1113void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1115 xo_form(EXT2 | DIVDU, dst, src1, src2, o,
r);
1119#define GENERATE_PREFIX_SUFFIX_BITS(immediate, prefix, suffix) \
1120 CHECK(is_int34(immediate)); \
1122 SIGN_EXT_IMM18((immediate >> 16) & kImm18Mask); \
1123 int16_t suffix = immediate & kImm16Mask; \
1124 DCHECK(is_int18(prefix));
1126void Assembler::paddi(Register dst, Register src,
const Operand& imm) {
1127 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1129 intptr_t immediate = imm.immediate();
1130 GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1131 BlockTrampolinePoolScope block_trampoline_pool(
this);
1132 pload_store_mls(Operand(hi));
1133 addi(dst, src, Operand(lo));
1136void Assembler::pli(Register dst,
const Operand& imm) {
1137 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1138 intptr_t immediate = imm.immediate();
1139 GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1140 BlockTrampolinePoolScope block_trampoline_pool(
this);
1141 pload_store_mls(Operand(hi));
1142 li(dst, Operand(lo));
1145void Assembler::psubi(Register dst, Register src,
const Operand& imm) {
1146 paddi(dst, src, Operand(-(imm.immediate())));
1149void Assembler::plbz(Register dst,
const MemOperand& src) {
1151 int64_t
offset = src.offset();
1152 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1153 BlockTrampolinePoolScope block_trampoline_pool(
this);
1154 pload_store_mls(Operand(hi));
1158void Assembler::plhz(Register dst,
const MemOperand& src) {
1160 int64_t
offset = src.offset();
1161 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1162 BlockTrampolinePoolScope block_trampoline_pool(
this);
1163 pload_store_mls(Operand(hi));
1167void Assembler::plha(Register dst,
const MemOperand& src) {
1169 int64_t
offset = src.offset();
1170 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1171 BlockTrampolinePoolScope block_trampoline_pool(
this);
1172 pload_store_mls(Operand(hi));
1176void Assembler::plwz(Register dst,
const MemOperand& src) {
1178 int64_t
offset = src.offset();
1179 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1180 BlockTrampolinePoolScope block_trampoline_pool(
this);
1181 pload_store_mls(Operand(hi));
1185void Assembler::plwa(Register dst,
const MemOperand& src) {
1187 int64_t
offset = src.offset();
1188 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1189 BlockTrampolinePoolScope block_trampoline_pool(
this);
1190 pload_store_8ls(Operand(hi));
1191 emit(PPLWA | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1194void Assembler::pld(Register dst,
const MemOperand& src) {
1196 int64_t
offset = src.offset();
1197 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1198 BlockTrampolinePoolScope block_trampoline_pool(
this);
1199 pload_store_8ls(Operand(hi));
1200 emit(PPLD | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1203void Assembler::plfs(DoubleRegister dst,
const MemOperand& src) {
1205 int64_t
offset = src.offset();
1206 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1207 BlockTrampolinePoolScope block_trampoline_pool(
this);
1208 pload_store_mls(Operand(hi));
1212void Assembler::plfd(DoubleRegister dst,
const MemOperand& src) {
1214 int64_t
offset = src.offset();
1215 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1216 BlockTrampolinePoolScope block_trampoline_pool(
this);
1217 pload_store_mls(Operand(hi));
1221void Assembler::pstb(Register src,
const MemOperand& dst) {
1223 int64_t
offset = dst.offset();
1224 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1225 BlockTrampolinePoolScope block_trampoline_pool(
this);
1226 pload_store_mls(Operand(hi));
1230void Assembler::psth(Register src,
const MemOperand& dst) {
1232 int64_t
offset = dst.offset();
1233 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1234 BlockTrampolinePoolScope block_trampoline_pool(
this);
1235 pload_store_mls(Operand(hi));
1239void Assembler::pstw(Register src,
const MemOperand& dst) {
1241 int64_t
offset = dst.offset();
1242 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1243 BlockTrampolinePoolScope block_trampoline_pool(
this);
1244 pload_store_mls(Operand(hi));
1248void Assembler::pstd(Register src,
const MemOperand& dst) {
1250 int64_t
offset = dst.offset();
1251 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1252 BlockTrampolinePoolScope block_trampoline_pool(
this);
1253 pload_store_8ls(Operand(hi));
1254 emit(PPSTD | src.code() * B21 | dst.ra().code() * B16 | (lo & kImm16Mask));
1257void Assembler::pstfs(
const DoubleRegister src,
const MemOperand& dst) {
1259 int64_t
offset = dst.offset();
1260 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1261 BlockTrampolinePoolScope block_trampoline_pool(
this);
1262 pload_store_mls(Operand(hi));
1266void Assembler::pstfd(
const DoubleRegister src,
const MemOperand& dst) {
1268 int64_t
offset = dst.offset();
1269 GENERATE_PREFIX_SUFFIX_BITS(
offset, hi, lo)
1270 BlockTrampolinePoolScope block_trampoline_pool(
this);
1271 pload_store_mls(Operand(hi));
1274#undef GENERATE_PREFIX_SUFFIX_BITS
1276int Assembler::instructions_required_for_mov(Register dst,
1277 const Operand& src)
const {
1279 !(src.must_output_reloc_info(
this) || is_trampoline_pool_blocked());
1280 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1281 if (ConstantPoolAccessIsInOverflow()) {
1282 return kMovInstructionsConstantPool + 1;
1284 return kMovInstructionsConstantPool;
1287 return kMovInstructionsNoConstantPool;
1290bool Assembler::use_constant_pool_for_mov(Register dst,
const Operand& src,
1291 bool canOptimize)
const {
1297 intptr_t value = src.immediate();
1298 bool allowOverflow = !((canOptimize && is_int32(value)) || dst ==
r0);
1301 (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))) {
1305 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1314void Assembler::EnsureSpaceFor(
int space_needed) {
1315 if (buffer_space() <= (kGap + space_needed)) {
1316 GrowBuffer(space_needed);
1320bool Operand::must_output_reloc_info(
const Assembler* assembler)
const {
1321 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1322 if (assembler !=
nullptr && assembler->predictable_code_size())
return true;
1323 return assembler->options().record_reloc_info_for_serialization;
1324 }
else if (RelocInfo::IsNoInfo(rmode_)) {
1336void Assembler::mov(Register dst,
const Operand& src) {
1338 if (src.IsHeapNumberRequest()) {
1339 RequestHeapNumber(src.heap_number_request());
1342 value = src.immediate();
1344 bool relocatable = src.must_output_reloc_info(
this);
1349 (is_trampoline_pool_blocked() &&
1350 (!is_int16(value) ||
1351 !(CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))));
1353 if (!src.IsHeapNumberRequest() &&
1354 use_constant_pool_for_mov(dst, src, canOptimize)) {
1355 DCHECK(is_constant_pool_available());
1357 RecordRelocInfo(src.rmode_);
1359 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1360 if (access == ConstantPoolEntry::OVERFLOWED) {
1361 addis(dst, kConstantPoolRegister, Operand::Zero());
1364 ld(dst,
MemOperand(kConstantPoolRegister, 0));
1370 if (is_int16(value)) {
1371 li(dst, Operand(value));
1372 }
else if (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)) {
1373 pli(dst, Operand(value));
1376 if (is_int32(value)) {
1377 lis(dst, Operand(value >> 16));
1379 if (is_int48(value)) {
1380 li(dst, Operand(value >> 32));
1382 lis(dst, Operand(value >> 48));
1383 u16 = ((value >> 32) & 0xFFFF);
1385 ori(dst, dst, Operand(u16));
1388 sldi(dst, dst, Operand(32));
1389 u16 = ((value >> 16) & 0xFFFF);
1391 oris(dst, dst, Operand(u16));
1394 u16 = (value & 0xFFFF);
1396 ori(dst, dst, Operand(u16));
1404 RecordRelocInfo(src.rmode_);
1406 bitwise_mov(dst, value);
1409void Assembler::bitwise_mov(Register dst, intptr_t value) {
1410 BlockTrampolinePoolScope block_trampoline_pool(
this);
1413 int hi_word =
static_cast<int>(hi_32 >> 16);
1414 int lo_word =
static_cast<int>(hi_32 & 0xFFFF);
1416 ori(dst, dst, Operand(lo_word));
1417 sldi(dst, dst, Operand(32));
1418 hi_word =
static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1419 lo_word =
static_cast<int>(lo_32 & 0xFFFF);
1420 oris(dst, dst, Operand(hi_word));
1421 ori(dst, dst, Operand(lo_word));
1424void Assembler::bitwise_mov32(Register dst, int32_t value) {
1425 BlockTrampolinePoolScope block_trampoline_pool(
this);
1426 int hi_word =
static_cast<int>(value >> 16);
1427 int lo_word =
static_cast<int>(value & 0xFFFF);
1429 ori(dst, dst, Operand(lo_word));
1432void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1433 BlockTrampolinePoolScope block_trampoline_pool(
this);
1434 if (is_int16(value)) {
1435 addi(dst, src, Operand(value));
1438 int hi_word =
static_cast<int>(value >> 16);
1439 int lo_word =
static_cast<int>(value & 0xFFFF);
1440 if (lo_word & 0x8000) hi_word++;
1446void Assembler::patch_pc_address(Register dst,
int pc_offset,
1447 int return_address_offset) {
1448 DCHECK(is_int16(return_address_offset));
1449 Assembler patching_assembler(
1452 patching_assembler.addi(dst, dst, Operand(return_address_offset));
1455void Assembler::mov_label_offset(Register dst, Label*
label) {
1457 if (
label->is_bound()) {
1460 Operand(
position + InstructionStream::kHeaderSize - kHeapObjectTag));
1476 BlockTrampolinePoolScope block_trampoline_pool(
this);
1477 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1482void Assembler::add_label_offset(Register dst, Register
base, Label*
label,
1485 if (
label->is_bound()) {
1497 BlockTrampolinePoolScope block_trampoline_pool(
this);
1499 emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1500 : kUnboundAddLabelLongOffsetOpcode) |
1501 (link & kImm26Mask));
1502 emit(dst.code() * B27 |
base.code() * B22 | (delta & kImm22Mask));
1504 if (!is_int22(delta)) {
1510void Assembler::mov_label_addr(Register dst, Label*
label) {
1512 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1514 if (
label->is_bound()) {
1531 BlockTrampolinePoolScope block_trampoline_pool(
this);
1532 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1534 DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1535 for (
int i = 0;
i < kMovInstructionsNoConstantPool - 2;
i++) nop();
1539void Assembler::emit_label_addr(Label*
label) {
1541 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1543 if (
label->is_bound()) {
1558 BlockTrampolinePoolScope block_trampoline_pool(
this);
1559 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1565void Assembler::crxor(
int bt,
int ba,
int bb) {
1566 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1569void Assembler::creqv(
int bt,
int ba,
int bb) {
1570 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1573void Assembler::mflr(Register dst) {
1574 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);
1577void Assembler::mtlr(Register src) {
1578 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);
1581void Assembler::mtctr(Register src) {
1582 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);
1585void Assembler::mtxer(Register src) {
1586 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1589void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1593 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1596void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1598void Assembler::mtcrf(Register src, uint8_t FXM) {
1599 emit(MTCRF | src.code() * B21 | FXM * B12);
1601void Assembler::mffprd(Register dst, DoubleRegister src) {
1602 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1605void Assembler::mffprwz(Register dst, DoubleRegister src) {
1606 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1609void Assembler::mtfprd(DoubleRegister dst, Register src) {
1610 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1613void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1614 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1617void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1618 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1624void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
1635void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1637void Assembler::dcbf(Register ra, Register rb) {
1638 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1641void Assembler::sync() { emit(EXT2 | SYNC); }
1643void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1645void Assembler::icbi(Register ra, Register rb) {
1646 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1649void Assembler::isync() { emit(EXT1 | ISYNC); }
1653void Assembler::lfd(
const DoubleRegister frt,
const MemOperand& src) {
1654 int offset = src.offset();
1660 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1663void Assembler::lfdu(
const DoubleRegister frt,
const MemOperand& src) {
1664 int offset = src.offset();
1670 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1673void Assembler::lfs(
const DoubleRegister frt,
const MemOperand& src) {
1674 int offset = src.offset();
1680 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1683void Assembler::lfsu(
const DoubleRegister frt,
const MemOperand& src) {
1684 int offset = src.offset();
1690 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1693void Assembler::stfd(
const DoubleRegister frs,
const MemOperand& src) {
1694 int offset = src.offset();
1700 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1703void Assembler::stfdu(
const DoubleRegister frs,
const MemOperand& src) {
1704 int offset = src.offset();
1710 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1713void Assembler::stfs(
const DoubleRegister frs,
const MemOperand& src) {
1714 int offset = src.offset();
1720 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1723void Assembler::stfsu(
const DoubleRegister frs,
const MemOperand& src) {
1724 int offset = src.offset();
1730 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1733void Assembler::fsub(
const DoubleRegister frt,
const DoubleRegister fra,
1734 const DoubleRegister frb, RCBit rc) {
1735 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1738void Assembler::fadd(
const DoubleRegister frt,
const DoubleRegister fra,
1739 const DoubleRegister frb, RCBit rc) {
1740 a_form(EXT4 | FADD, frt, fra, frb, rc);
1743void Assembler::fmul(
const DoubleRegister frt,
const DoubleRegister fra,
1744 const DoubleRegister frc, RCBit rc) {
1745 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1749void Assembler::fcpsgn(
const DoubleRegister frt,
const DoubleRegister fra,
1750 const DoubleRegister frb, RCBit rc) {
1751 emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1755void Assembler::fdiv(
const DoubleRegister frt,
const DoubleRegister fra,
1756 const DoubleRegister frb, RCBit rc) {
1757 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1760void Assembler::fcmpu(
const DoubleRegister fra,
const DoubleRegister frb,
1762 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1763 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1766void Assembler::fmr(
const DoubleRegister frt,
const DoubleRegister frb,
1768 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1771void Assembler::fctiwz(
const DoubleRegister frt,
const DoubleRegister frb) {
1772 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1775void Assembler::fctiw(
const DoubleRegister frt,
const DoubleRegister frb) {
1776 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1779void Assembler::fctiwuz(
const DoubleRegister frt,
const DoubleRegister frb) {
1780 emit(EXT4 | FCTIWUZ | frt.code() * B21 | frb.code() * B11);
1783void Assembler::frin(
const DoubleRegister frt,
const DoubleRegister frb,
1785 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1788void Assembler::friz(
const DoubleRegister frt,
const DoubleRegister frb,
1790 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1793void Assembler::frip(
const DoubleRegister frt,
const DoubleRegister frb,
1795 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1798void Assembler::frim(
const DoubleRegister frt,
const DoubleRegister frb,
1800 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1803void Assembler::frsp(
const DoubleRegister frt,
const DoubleRegister frb,
1805 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1808void Assembler::fcfid(
const DoubleRegister frt,
const DoubleRegister frb,
1810 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1813void Assembler::fcfidu(
const DoubleRegister frt,
const DoubleRegister frb,
1815 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1818void Assembler::fcfidus(
const DoubleRegister frt,
const DoubleRegister frb,
1820 emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1823void Assembler::fcfids(
const DoubleRegister frt,
const DoubleRegister frb,
1825 emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1828void Assembler::fctid(
const DoubleRegister frt,
const DoubleRegister frb,
1830 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1833void Assembler::fctidz(
const DoubleRegister frt,
const DoubleRegister frb,
1835 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1838void Assembler::fctidu(
const DoubleRegister frt,
const DoubleRegister frb,
1840 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1843void Assembler::fctiduz(
const DoubleRegister frt,
const DoubleRegister frb,
1845 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1848void Assembler::fsel(
const DoubleRegister frt,
const DoubleRegister fra,
1849 const DoubleRegister frc,
const DoubleRegister frb,
1851 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1852 frc.code() * B6 | rc);
1855void Assembler::fneg(
const DoubleRegister frt,
const DoubleRegister frb,
1857 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1860void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1863 emit(EXT4 | MTFSB0 | bt * B21 | rc);
1866void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1869 emit(EXT4 | MTFSB1 | bt * B21 | rc);
1872void Assembler::mtfsfi(
int bf,
int immediate, RCBit rc) {
1873 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1876void Assembler::mffs(
const DoubleRegister frt, RCBit rc) {
1877 emit(EXT4 | MFFS | frt.code() * B21 | rc);
1880void Assembler::mtfsf(
const DoubleRegister frb,
bool L,
int FLM,
bool W,
1882 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1885void Assembler::fsqrt(
const DoubleRegister frt,
const DoubleRegister frb,
1887 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1890void Assembler::fabs(
const DoubleRegister frt,
const DoubleRegister frb,
1892 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1895void Assembler::fmadd(
const DoubleRegister frt,
const DoubleRegister fra,
1896 const DoubleRegister frc,
const DoubleRegister frb,
1898 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1899 frc.code() * B6 | rc);
1902void Assembler::fmsub(
const DoubleRegister frt,
const DoubleRegister fra,
1903 const DoubleRegister frc,
const DoubleRegister frb,
1905 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1906 frc.code() * B6 | rc);
1910void Assembler::mfvsrd(
const Register ra,
const Simd128Register rs) {
1912 emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
1915void Assembler::mfvsrwz(
const Register ra,
const Simd128Register rs) {
1917 emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
1920void Assembler::mtvsrd(
const Simd128Register rt,
const Register ra) {
1922 emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
1925void Assembler::mtvsrdd(
const Simd128Register rt,
const Register ra,
1926 const Register rb) {
1928 emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
1931void Assembler::lxvd(
const Simd128Register rt,
const MemOperand& src) {
1932 CHECK(src.rb().is_valid());
1934 emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1938void Assembler::lxvx(
const Simd128Register rt,
const MemOperand& src) {
1939 CHECK(src.rb().is_valid());
1941 emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1945void Assembler::lxsdx(
const Simd128Register rt,
const MemOperand& src) {
1946 CHECK(src.rb().is_valid());
1948 emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1952void Assembler::lxsibzx(
const Simd128Register rt,
const MemOperand& src) {
1953 CHECK(src.rb().is_valid());
1955 emit(LXSIBZX | rt.code() * B21 | src.ra().code() * B16 |
1956 src.rb().code() * B11 | TX);
1959void Assembler::lxsihzx(
const Simd128Register rt,
const MemOperand& src) {
1960 CHECK(src.rb().is_valid());
1962 emit(LXSIHZX | rt.code() * B21 | src.ra().code() * B16 |
1963 src.rb().code() * B11 | TX);
1966void Assembler::lxsiwzx(
const Simd128Register rt,
const MemOperand& src) {
1967 CHECK(src.rb().is_valid());
1969 emit(LXSIWZX | rt.code() * B21 | src.ra().code() * B16 |
1970 src.rb().code() * B11 | TX);
1973void Assembler::stxsdx(
const Simd128Register rs,
const MemOperand& dst) {
1974 CHECK(dst.rb().is_valid());
1976 emit(STXSDX | rs.code() * B21 | dst.ra().code() * B16 |
1977 dst.rb().code() * B11 | SX);
1980void Assembler::stxsibx(
const Simd128Register rs,
const MemOperand& dst) {
1981 CHECK(dst.rb().is_valid());
1983 emit(STXSIBX | rs.code() * B21 | dst.ra().code() * B16 |
1984 dst.rb().code() * B11 | SX);
1987void Assembler::stxsihx(
const Simd128Register rs,
const MemOperand& dst) {
1988 CHECK(dst.rb().is_valid());
1990 emit(STXSIHX | rs.code() * B21 | dst.ra().code() * B16 |
1991 dst.rb().code() * B11 | SX);
1994void Assembler::stxsiwx(
const Simd128Register rs,
const MemOperand& dst) {
1995 CHECK(dst.rb().is_valid());
1997 emit(STXSIWX | rs.code() * B21 | dst.ra().code() * B16 |
1998 dst.rb().code() * B11 | SX);
2001void Assembler::stxvd(
const Simd128Register rt,
const MemOperand& dst) {
2002 CHECK(dst.rb().is_valid());
2004 emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2008void Assembler::stxvx(
const Simd128Register rt,
const MemOperand& dst) {
2009 CHECK(dst.rb().is_valid());
2011 emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2015void Assembler::xxspltib(
const Simd128Register rt,
const Operand& imm) {
2017 CHECK(is_uint8(imm.immediate()));
2018 emit(XXSPLTIB | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0xFF) * B11 |
2023void Assembler::nop(
int type) {
2026 case NON_MARKING_NOP:
2029 case GROUP_ENDING_NOP:
2032 case DEBUG_BREAK_NOP:
2039 ori(
reg,
reg, Operand::Zero());
2042bool Assembler::IsNop(Instr
instr,
int type) {
2045 case NON_MARKING_NOP:
2048 case GROUP_ENDING_NOP:
2051 case DEBUG_BREAK_NOP:
2060void Assembler::GrowBuffer(
int needed) {
2064 int old_size =
buffer_->size();
2065 int new_size = std::min(2 * old_size, old_size + 1 * MB);
2066 int space = buffer_space() + (new_size - old_size);
2067 new_size += (space < needed) ? needed - space : 0;
2071 if (new_size > kMaximalBufferSize) {
2072 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
2076 std::unique_ptr<AssemblerBuffer> new_buffer =
buffer_->Grow(new_size);
2077 DCHECK_EQ(new_size, new_buffer->size());
2078 uint8_t* new_start = new_buffer->start();
2081 intptr_t pc_delta = new_start - buffer_start_;
2082 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
2083 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
2085 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2089 buffer_ = std::move(new_buffer);
2090 buffer_start_ = new_start;
2092 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2093 reloc_info_writer.last_pc() + pc_delta);
2100void Assembler::db(uint8_t data) {
2102 *
reinterpret_cast<uint8_t*
>(
pc_) = data;
2103 pc_ +=
sizeof(uint8_t);
2106void Assembler::dd(uint32_t data) {
2108 *
reinterpret_cast<uint32_t*
>(
pc_) = data;
2109 pc_ +=
sizeof(uint32_t);
2112void Assembler::dq(uint64_t value) {
2114 *
reinterpret_cast<uint64_t*
>(
pc_) = value;
2115 pc_ +=
sizeof(uint64_t);
2118void Assembler::dp(uintptr_t data) {
2120 *
reinterpret_cast<uintptr_t*
>(
pc_) = data;
2121 pc_ +=
sizeof(uintptr_t);
2124void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2125 if (!ShouldRecordRelocInfo(rmode))
return;
2126 DeferredRelocInfo rinfo(
pc_offset(), rmode, data);
2127 relocations_.push_back(rinfo);
2130void Assembler::EmitRelocations() {
2131 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2133 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2134 it != relocations_.end(); it++) {
2135 RelocInfo::Mode rmode = it->rmode();
2137 RelocInfo rinfo(
pc, rmode, it->data());
2140 if (RelocInfo::IsInternalReference(rmode)) {
2142 intptr_t
pos =
static_cast<intptr_t
>(Memory<Address>(
pc));
2143 Memory<Address>(
pc) =
reinterpret_cast<Address>(buffer_start_) +
pos;
2144 }
else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2146 intptr_t
pos =
static_cast<intptr_t
>(target_address_at(
pc, kNullAddress));
2147 set_target_address_at(
pc, 0,
2148 reinterpret_cast<Address>(buffer_start_) +
pos,
2149 nullptr, SKIP_ICACHE_FLUSH);
2152 reloc_info_writer.Write(&rinfo);
2156void Assembler::BlockTrampolinePoolFor(
int instructions) {
2157 BlockTrampolinePoolBefore(
pc_offset() + instructions * kInstrSize);
2160void Assembler::CheckTrampolinePool() {
2166 if (trampoline_pool_blocked_nesting_ > 0)
return;
2167 if (
pc_offset() < no_trampoline_pool_before_) {
2168 next_trampoline_check_ = no_trampoline_pool_before_;
2172 DCHECK(!trampoline_emitted_);
2173 if (tracked_branch_count_ > 0) {
2174 int size = tracked_branch_count_ *
kInstrSize;
2178 trampoline_emitted_ =
true;
2179 next_trampoline_check_ =
kMaxInt;
2182 b(size + kInstrSize, LeaveLK);
2187 trampoline_ = Trampoline(
pc_offset() - size, tracked_branch_count_);
2191PatchingAssembler::PatchingAssembler(
const AssemblerOptions& options,
2192 uint8_t* address,
int instructions)
2194 address, instructions *
kInstrSize + kGap)) {
2198PatchingAssembler::~PatchingAssembler() {
static const int kUnknownCacheLineSize
static V8_INLINE bool IsConstantPoolLoadStart(Address pc, ConstantPoolEntry::Access *access=nullptr)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static bool SupportsWasmSimd128()
static unsigned supported_
static unsigned icache_line_size_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
@ INTERNAL_REFERENCE_ENCODED
static constexpr Tagged< Smi > FromInt(int value)
base::OwnedVector< uint8_t > buffer_
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
#define SIGN_EXT_IMM16(imm)
#define SIGN_EXT_IMM22(imm)
#define SIGN_EXT_IMM26(imm)
DirectHandle< JSReceiver > options
ZoneVector< RpoNumber > & result
constexpr Address kNullAddress
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
static unsigned CpuFeaturesImpliedByCompiler()
constexpr uint8_t kInstrSize
static constexpr Address kNullAddress
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
constexpr MiscInstructionsBits74 BX
constexpr int kNumRegisters
#define CHECK_GE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
std::unique_ptr< ValueMirror > value