55#if defined(__riscv_f) && defined(__riscv_d)
59#if (defined __riscv_vector) && (__riscv_v >= 1000000)
60 answer |= 1u << RISCV_SIMD;
63#if (defined __riscv_zba)
67#if (defined __riscv_zbb)
71#if (defined __riscv_zbs)
75#if (defined _riscv_zicond)
76 answer |= 1u << ZICOND;
81#ifdef _RISCV_TARGET_SIMULATOR
82static unsigned SimulatorFeatures() {
84 answer |= 1u << RISCV_SIMD;
88 answer |= 1u << ZICOND;
99#ifdef _RISCV_TARGET_SIMULATOR
103 if (cross_compile)
return;
109 if (cpu.has_rvv())
supported_ |= 1u << RISCV_SIMD;
114 supported_ |= (1u << ZBA) | (1u << ZBB) | (1u << ZBS);
116#ifdef V8_COMPRESS_POINTERS
118 FATAL(
"SV57 is not supported");
133 printf(
"RISC-V Extension zba=%d,zbb=%d,zbs=%d,ZICOND=%d\n",
139 const int kNumbers[] = {
173 return kNumbers[
reg.code()];
179 zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
180 a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
181 return kRegisters[num];
211 return static_cast<uint32_t
>(
221 : rm_(
no_reg), rmode_(rmode) {
230 result.is_heap_number_request_ =
true;
250 request.heap_number());
252#ifdef V8_TARGET_ARCH_RISCV64
265 std::unique_ptr<AssemblerBuffer> buffer)
268 scratch_register_list_(DefaultTmpList()),
269 scratch_double_register_list_(DefaultFPTmpList()),
293 GetCode(isolate->main_thread_local_isolate(), desc);
297 int handler_table_offset) {
319 static constexpr int kConstantPoolSize = 0;
320 static constexpr int kBuiltinJumpTableInfoSize = 0;
322 const int builtin_jump_table_info_offset =
324 const int code_comments_offset =
325 builtin_jump_table_info_offset - code_comments_size;
326 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
327 const int handler_table_offset2 = (handler_table_offset ==
kNoHandlerTable)
328 ? constant_pool_offset
329 : handler_table_offset;
330 const int safepoint_table_offset =
332 ? handler_table_offset2
334 const int reloc_info_offset =
337 handler_table_offset2, constant_pool_offset,
338 code_comments_offset, builtin_jump_table_info_offset,
374 uintptr_t address = *p;
378 uintptr_t instr_address =
reinterpret_cast<uintptr_t
>(p);
379 DCHECK(instr_address - address < INT_MAX);
380 int delta =
static_cast<int>(instr_address - address);
391 switch (instruction->InstructionOpcodeType()) {
411 int32_t imm12 =
instr >> 20;
422 uintptr_t instr_address =
424 uintptr_t imm =
reinterpret_cast<uintptr_t
>(
pc);
428 DCHECK(instr_address - imm < INT_MAX);
429 int32_t delta =
static_cast<int32_t
>(instr_address - imm);
443 int32_t
offset = instruction->RvcImm11CJValue();
449 int32_t
offset = instruction->RvcImm8BValue();
459 return (imm18 +
pos);
468 int32_t imm = target_pos -
pos;
472 instr &= ~kBImm12Mask;
473 int32_t imm12 = ((imm & 0x800) >> 4) |
474 ((imm & 0x1e) << 7) |
475 ((imm & 0x7e0) << 20) |
476 ((imm & 0x1000) << 19);
482#if V8_TARGET_ARCH_RISCV64
484#elif V8_TARGET_ARCH_RISCV32
488 instr &= ~kImm12Mask;
497 int32_t imm = target_pos -
pos;
501 instr &= ~kImm20Mask;
502 int32_t imm20 = (imm & 0xff000) |
503 ((imm & 0x800) << 9) |
504 ((imm & 0x7fe) << 20) |
505 ((imm & 0x100000) << 11);
514 int32_t imm = target_pos -
pos;
517 instr &= ~kImm11Mask;
518 int16_t imm11 = ((imm & 0x800) >> 1) | ((imm & 0x400) >> 4) |
519 ((imm & 0x300) >> 1) | ((imm & 0x80) >> 3) |
520 ((imm & 0x40) >> 1) | ((imm & 0x20) >> 5) |
521 ((imm & 0x10) << 5) | (imm & 0xe);
530 int32_t imm = target_pos -
pos;
534 instr &= ~kRvcBImm8Mask;
535 int32_t imm8 = ((imm & 0x20) >> 5) | ((imm & 0x6)) | ((imm & 0xc0) >> 3) |
536 ((imm & 0x18) << 2) | ((imm & 0x100) >> 1);
537 imm8 = ((imm8 & 0x1f) << 2) | ((imm8 & 0xe0) << 5);
557 disasm.InstructionDecode(disasm_buffer,
pc);
563 uintptr_t imm =
reinterpret_cast<uintptr_t
>(
buffer_start_) + target_pos;
574 switch (instruction->InstructionOpcodeType()) {
591 (instruction->RdValue() <<
kRdShift);
593 int32_t Hi20 = (((int32_t)
offset + 0x800) >> 12);
594 int32_t Lo12 = (int32_t)
offset << 20 >> 20;
618 (instruction->RdValue() == instruction_I->
Rs1Value())) {
634 int32_t Hi20 = (((int32_t)
offset + 0x800) >> 12);
635 int32_t Lo12 = (int32_t)
offset << 20 >> 20;
640 const int kImm31_20Mask = ((1 << 12) - 1) << 20;
641 const int kImm11_0Mask = ((1 << 12) - 1);
642 instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
665 if (instruction->InstructionOpcodeType() ==
AUIPC) {
671 if (
L->is_unused()) {
673 }
else if (
L->is_bound()) {
674 PrintF(
"bound label to %d\n",
L->pos());
675 }
else if (
L->is_linked()) {
679 while (l.is_linked()) {
690 PrintF(
"label in inconsistent state (pos = %d)\n",
L->pos_);
698 bool is_internal =
false;
706 while (
L->is_linked()) {
707 int fixup_pos =
L->pos();
708 int dist =
pos - fixup_pos;
710 next(
L, is_internal);
726 fixup_pos = trampoline_pos;
738 fixup_pos = trampoline_pos;
777 if (
L ==
nullptr || !
L->is_bound())
return true;
789 int32_t imm13 = ((
instr & 0xf00) >> 7) | ((
instr & 0x7e000000) >> 20) |
790 ((
instr & 0x80) << 4) | ((
instr & 0x80000000) >> 19);
791 imm13 = imm13 << 19 >> 19;
802 int32_t imm12 =
static_cast<int32_t
>(instr_I &
kImm12Mask) >> 20;
803 int32_t
offset = imm12 + imm_auipc;
813 int32_t Hi20 = (((int32_t)
offset + 0x800) >> 12);
814 int32_t Lo12 = (int32_t)
offset << 20 >> 20;
836 return trampoline_entry;
845 target_pos =
L->pos();
847 if (
L->is_linked()) {
848 target_pos =
L->pos();
860 uintptr_t imm =
reinterpret_cast<uintptr_t
>(
buffer_start_) + target_pos;
876 target_pos =
L->pos();
878 if (
L->is_linked()) {
879 target_pos =
L->pos();
898 return static_cast<int32_t
>(
offset);
908 target_pos =
L->pos();
911 if (
L->is_linked()) {
912 target_pos =
L->pos();
939 target_pos =
L->pos();
943 if (
L->is_linked()) {
944 target_pos =
L->pos();
945 int32_t imm18 = target_pos - at_offset;
947 int32_t imm16 = imm18 >> 2;
958 L->link_to(at_offset);
987 return int64_t(
V << (64 -
N)) >> (64 -
N);
990#if V8_TARGET_ARCH_RISCV64
991void Assembler::RV_li(Register rd, int64_t imm) {
993 if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.CanAcquire())) {
996 RecursiveLi(rd, imm);
1000int Assembler::RV_li_count(int64_t imm,
bool is_get_temp_reg) {
1001 if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
1002 return GeneralLiCount(imm, is_get_temp_reg);
1004 return RecursiveLiCount(imm);
1008void Assembler::GeneralLi(Register rd, int64_t imm) {
1022 if (is_int32(imm + 0x800)) {
1024 int64_t high_20 = ((imm + 0x800) >> 12);
1025 int64_t low_12 = imm << 52 >> 52;
1027 lui(rd, (int32_t)high_20);
1029 addi(rd, rd, low_12);
1032 addi(rd, zero_reg, low_12);
1038 int64_t up_32 = imm >> 32;
1039 int64_t low_32 = imm & 0xffffffffull;
1042 if (up_32 == 0 || low_32 == 0) {
1046 temp_reg = temps.CanAcquire() ? temps.Acquire() :
no_reg;
1048 if (temp_reg !=
no_reg) {
1050 int64_t sim_low = 0;
1053 int64_t high_20 = ((low_32 + 0x800) >> 12);
1054 int64_t low_12 = low_32 & 0xfff;
1058 sim_low = ((high_20 << 12) << 32) >> 32;
1059 lui(rd, (int32_t)high_20);
1061 sim_low += (low_12 << 52 >> 52) | low_12;
1062 addi(rd, rd, low_12);
1066 ori(rd, zero_reg, low_12);
1069 if (sim_low & 0x100000000) {
1078 up_32 = (up_32 - 0xffffffff) & 0xffffffff;
1088 int64_t high_20 = (up_32 + 0x800) >> 12;
1089 int64_t low_12 = up_32 & 0xfff;
1093 lui(temp_reg, (int32_t)high_20);
1095 addi(temp_reg, temp_reg, low_12);
1098 ori(temp_reg, zero_reg, low_12);
1101 slli(temp_reg, temp_reg, 32);
1103 add(rd, rd, temp_reg);
1111 int64_t high_20 = (up_32 + 0x800) >> 12;
1112 int64_t low_12 = up_32 & 0xfff;
1116 lui(rd, (int32_t)high_20);
1118 addi(rd, rd, low_12);
1121 ori(rd, zero_reg, low_12);
1127 uint32_t
mask = 0x80000000;
1130 for (
i = 0;
i < 32;
i++) {
1131 if ((low_32 &
mask) == 0) {
1136 slli(rd, rd, shift_val);
1142 if ((
i + 11) < 32) {
1144 part = ((uint32_t)(low_32 << i) >>
i) >> (32 - (
i + 11));
1145 slli(rd, rd, shift_val + 11);
1150 part = (uint32_t)(low_32 <<
i) >>
i;
1151 slli(rd, rd, shift_val + (32 -
i));
1160void Assembler::li_ptr(Register rd, int64_t imm) {
1161#ifdef RISCV_USE_SV39
1165 DCHECK_EQ((imm & 0xffffff8000000000ll), 0);
1166 int64_t a8 = imm & 0xff;
1167 int64_t high_31 = (imm >> 8) & 0x7fffffff;
1168 int64_t high_20 = ((high_31 + 0x800) >> 12);
1169 int64_t low_12 = high_31 & 0xfff;
1170 lui(rd, (int32_t)high_20);
1171 addi(rd, rd, low_12);
1178 DCHECK_EQ((imm & 0xfff0000000000000ll), 0);
1179 int64_t a6 = imm & 0x3f;
1180 int64_t b11 = (imm >> 6) & 0x7ff;
1181 int64_t high_31 = (imm >> 17) & 0x7fffffff;
1182 int64_t high_20 = ((high_31 + 0x800) >> 12);
1183 int64_t low_12 = high_31 & 0xfff;
1184 lui(rd, (int32_t)high_20);
1185 addi(rd, rd, low_12);
1193void Assembler::li_constant(Register rd, int64_t imm) {
1196 lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
1199 (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
1202 addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52);
1204 addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52);
1206 addi(rd, rd, imm << 52 >> 52);
1209void Assembler::li_constant32(Register rd, int32_t imm) {
1212 int32_t high_20 = ((imm + 0x800) >> 12);
1215 addi(rd, rd, low_12);
1218#elif V8_TARGET_ARCH_RISCV32
1219void Assembler::RV_li(Register rd, int32_t imm) {
1220 int32_t high_20 = ((imm + 0x800) >> 12);
1225 addi(rd, rd, low_12);
1228 addi(rd, zero_reg, low_12);
1232int Assembler::RV_li_count(int32_t imm,
bool is_get_temp_reg) {
1235 int32_t high_20 = ((imm + 0x800) >> 12);
1250void Assembler::li_ptr(Register rd, int32_t imm) {
1254 int32_t high_20 = ((imm + 0x800) >> 12);
1257 addi(rd, rd, low_12);
1260void Assembler::li_constant(Register rd, int32_t imm) {
1263 int32_t high_20 = ((imm + 0x800) >> 12);
1266 addi(rd, rd, low_12);
1284 lui(zero_reg, code);
1290#if defined(V8_HOST_ARCH_RISCV64) || defined(V8_HOST_ARCH_RISCV32)
1303 int second_access_add_to_offset) {
1304 bool two_accesses =
static_cast<bool>(access_type);
1305 DCHECK_LE(second_access_add_to_offset, 7);
1308 if (is_int12(src.offset()) &&
1309 (!two_accesses || is_int12(
static_cast<int32_t
>(
1310 src.offset() + second_access_add_to_offset)))) {
1320 int second_Access_add_to_offset) {
1325 constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
1326 constexpr int32_t kMaxOffsetForSimpleAdjustment =
1327 2 * kMinOffsetForSimpleAdjustment;
1328 if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
1329 addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
1330 src->offset_ -= kMinOffsetForSimpleAdjustment;
1331 }
else if (-kMaxOffsetForSimpleAdjustment <= src->
offset() &&
1332 src->offset() < 0) {
1333 addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
1334 src->offset_ += kMinOffsetForSimpleAdjustment;
1336 RV_li(scratch, (
static_cast<intptr_t
>(src->offset()) + 0x800) >> 12 << 12);
1337 add(scratch, scratch, src->rm());
1338 src->offset_ = src->offset() << 20 >> 20;
1340 RV_li(scratch, src->offset());
1341 add(scratch, scratch, src->rm());
1348 intptr_t pc_delta) {
1350 intptr_t* p =
reinterpret_cast<intptr_t*
>(
pc);
1361 DEBUG_PRINTF(
"\ttarget_address 0x%" PRIxPTR
"\n", target_address);
1363#if V8_TARGET_ARCH_RISCV64
1364#ifdef RISCV_USE_SV39
1369#elif V8_TARGET_ARCH_RISCV32
1378 intptr_t pc_delta) {
1397 int old_size =
buffer_->size();
1398 int new_size = std::min(2 * old_size, old_size + 1 * MB);
1407 std::unique_ptr<AssemblerBuffer> new_buffer =
buffer_->Grow(new_size);
1408 DCHECK_EQ(new_size, new_buffer->size());
1409 uint8_t* new_start = new_buffer->start();
1413 intptr_t rc_delta = (new_start + new_size) - (
buffer_start_ + old_size);
1420 buffer_ = std::move(new_buffer);
1462 if (
label->is_bound()) {
1481 DEBUG_PRINTF(
"\tBlockTrampolinePoolFor %d", instructions);
1526 CHECK(is_int32(imm + 0x800));
1527 int32_t Hi20 = (((int32_t)imm + 0x800) >> 12);
1528 int32_t Lo12 = (int32_t)imm << 20 >> 20;
1559#if V8_TARGET_ARCH_RISCV64
1560 if (IsLd(*
reinterpret_cast<Instr*
>(
pc + 4))) {
1561#elif V8_TARGET_ARCH_RISCV32
1566 if (jit_allocation) {
1570 Memory<Address>(
reinterpret_cast<Address>(
pc + Hi20 + Lo12)) =
target;
1574 intptr_t imm = (intptr_t)target - (intptr_t)
pc;
1577 DCHECK(is_int32(imm + 0x800));
1592#if V8_TARGET_ARCH_RISCV64
1593 if (IsLd(*
reinterpret_cast<Instr*
>(
pc + 4))) {
1594#elif V8_TARGET_ARCH_RISCV32
1599 return Memory<Address>(
pc + Hi20 + Lo12);
1604 return pc + Hi20 + Lo12;
1612#if V8_TARGET_ARCH_RISCV64
1614#ifdef RISCV_USE_SV39
1622 if (
IsLui(*
reinterpret_cast<Instr*
>(instr0)) &&
1627 int64_t addr = (int64_t)(instr0->Imm20UValue() <<
kImm20Shift) +
1628 (int64_t)instr1->Imm12Value();
1630 addr |= (int64_t)instr3->Imm12Value();
1641 if (
IsLui(*
reinterpret_cast<Instr*
>(instr0)) &&
1648 int64_t addr = (int64_t)(instr0->Imm20UValue() <<
kImm20Shift) +
1649 (int64_t)instr1->Imm12Value();
1651 addr |= (int64_t)instr3->Imm12Value();
1653 addr |= (int64_t)instr5->Imm12Value();
1656 return static_cast<Address>(addr);
1679 WritableJitAllocation* jit_allocation,
1681 DEBUG_PRINTF(
"\tset_target_value_at: pc: %" PRIxPTR
"\ttarget: %" PRIx64
1682 "\told: %" PRIx64
"\n",
1684 uint32_t* p =
reinterpret_cast<uint32_t*
>(
pc);
1685#ifdef RISCV_USE_SV39
1686 DCHECK_EQ((target & 0xffffff8000000000ll), 0);
1696 int64_t a8 = target & 0xff;
1697 int64_t high_31 = (target >> 8) & 0x7fffffff;
1698 int64_t high_20 = ((high_31 + 0x800) >> 12);
1699 int64_t low_12 = high_31 & 0xfff;
1700 instr_at_put(
pc, (*p & 0xfff) | ((int32_t)high_20 << 12), jit_allocation);
1702 (*(p + 1) & 0xfffff) | ((int32_t)low_12 << 20), jit_allocation);
1711 DCHECK_EQ((target & 0xffff000000000000ll), 0);
1723 int64_t a6 = target & 0x3f;
1724 int64_t b11 = (target >> 6) & 0x7ff;
1725 int64_t high_31 = (target >> 17) & 0x7fffffff;
1726 int64_t high_20 = ((high_31 + 0x800) >> 12);
1727 int64_t low_12 = high_31 & 0xfff;
1728 instr_at_put(
pc, (*p & 0xfff) | ((int32_t)high_20 << 12), jit_allocation);
1730 (*(p + 1) & 0xfffff) | ((int32_t)low_12 << 20), jit_allocation);
1746#elif V8_TARGET_ARCH_RISCV32
1751 return static_cast<Address>(addr);
1759 WritableJitAllocation* jit_allocation,
1761 DEBUG_PRINTF(
"\tset_target_value_at: pc: %x\ttarget: %x\n",
pc, target);
1771#if V8_TARGET_ARCH_RISCV64
1772 bool result = IsLd(instr_value) && (
instr->Rs1Value() == kRegCode_zero_reg) &&
1773 (
instr->RdValue() == kRegCode_zero_reg);
1774#elif V8_TARGET_ARCH_RISCV32
1775 bool result =
IsLw(instr_value) && (
instr->Rs1Value() == kRegCode_zero_reg) &&
1776 (
instr->RdValue() == kRegCode_zero_reg);
1784 instr_following->
RdValue() == kRegCode_zero_reg));
1791 return instr->Imm12Value();
1812template <
typename T>
1814 *
reinterpret_cast<T*
>(
pc_) =
x;
1846 if (jit_allocation) {
1856 if (jit_allocation) {
1866 if (jit_allocation) {
1875void ConstantPool::EmitPrologue(Alignment require_alignment) {
1879 const int marker_size = 1;
1881 ComputeSize(Jump::kOmitted, require_alignment) /
kInt32Size - marker_size;
1882#if V8_TARGET_ARCH_RISCV64
1883 assm_->ld(zero_reg, zero_reg, word_count);
1884#elif V8_TARGET_ARCH_RISCV32
1885 assm_->lw(zero_reg, zero_reg, word_count);
1887 assm_->EmitPoolGuard();
1890int ConstantPool::PrologueSize(Jump require_jump)
const {
1895 int prologue_size = require_jump == Jump::kRequired ?
kInstrSize : 0;
1897 return prologue_size;
1900void ConstantPool::SetLoadOffsetToConstPoolEntry(
int load_offset,
1901 Instruction* entry_offset,
1902 const ConstantPoolKey&
key) {
1903 Instr instr_auipc = assm_->instr_at(load_offset);
1904 Instr instr_load = assm_->instr_at(load_offset + 4);
1906 DCHECK(assm_->IsAuipc(instr_auipc));
1907#if V8_TARGET_ARCH_RISCV64
1908 DCHECK(assm_->IsLd(instr_load));
1909#elif V8_TARGET_ARCH_RISCV32
1910 DCHECK(assm_->IsLw(instr_load));
1912 DCHECK_EQ(assm_->LoadOffset(instr_load), 1);
1913 DCHECK_EQ(assm_->AuipcOffset(instr_auipc), 0);
1914 int32_t distance =
static_cast<int32_t
>(
1915 reinterpret_cast<Address>(entry_offset) -
1916 reinterpret_cast<Address>(assm_->toAddress(load_offset)));
1917 CHECK(is_int32(distance + 0x800));
1918 int32_t Hi20 = (((int32_t)distance + 0x800) >> 12);
1919 int32_t Lo12 = (int32_t)distance << 20 >> 20;
1920 assm_->instr_at_put(load_offset,
SetHi20Offset(Hi20, instr_auipc));
1921 assm_->instr_at_put(load_offset + 4,
SetLoadOffset(Lo12, instr_load));
1924void ConstantPool::Check(Emission force_emit, Jump require_jump,
1930 DCHECK_EQ(force_emit, Emission::kIfNeeded);
1938 if (!IsEmpty() && (force_emit == Emission::kForced ||
1939 ShouldEmitNow(require_jump, margin))) {
1942 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
1946 int needed_space = worst_case_size + assm_->kGap;
1947 while (assm_->buffer_space() <= needed_space) {
1948 assm_->GrowBuffer();
1951 EmitAndClear(require_jump);
1955 SetNextCheckIn(ConstantPool::kCheckInterval);
1961const size_t ConstantPool::kMaxDistToPool32 = 1 *
MB;
1962const size_t ConstantPool::kMaxDistToPool64 = 1 *
MB;
1963const size_t ConstantPool::kCheckInterval = 128 *
kInstrSize;
1964const size_t ConstantPool::kApproxDistToPool32 = 64 *
KB;
1965const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
1967const size_t ConstantPool::kOpportunityDistToPool32 = 64 *
KB;
1968const size_t ConstantPool::kOpportunityDistToPool64 = 64 *
KB;
1969const size_t ConstantPool::kApproxMaxEntryCount = 512;
1971#if defined(V8_TARGET_ARCH_RISCV64)
1981void Assembler::RecursiveLi(Register rd, int64_t val) {
1982 if (val > 0 && RecursiveLiImplCount(val) > 2) {
1984 uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
1985 int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
1986 if (countFillZero < RecursiveLiImplCount(val)) {
1987 RecursiveLiImpl(rd, ShiftedVal);
1988 srli(rd, rd, LeadingZeros);
1992 RecursiveLiImpl(rd, val);
1995int Assembler::RecursiveLiCount(int64_t val) {
1996 if (val > 0 && RecursiveLiImplCount(val) > 2) {
1998 uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
2002 int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
2003 if (countFillZero < RecursiveLiImplCount(val)) {
2004 return countFillZero;
2007 return RecursiveLiImplCount(val);
2010void Assembler::RecursiveLiImpl(Register rd, int64_t Val) {
2011 if (is_int32(Val)) {
2019 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
2020 int64_t Lo12 = Val << 52 >> 52;
2023 lui(rd, (int32_t)Hi20);
2026 if (Lo12 || Hi20 == 0) {
2028 addiw(rd, rd, Lo12);
2030 addi(rd, zero_reg, Lo12);
2060 int64_t Lo12 = Val << 52 >> 52;
2061 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
2063 Hi52 =
signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
2068 if (ShiftAmount > 12 && !is_int12(Hi52)) {
2069 if (is_int32((uint64_t)Hi52 << 12)) {
2073 Hi52 = (uint64_t)Hi52 << 12;
2076 RecursiveLi(rd, Hi52);
2080 slli(rd, rd, ShiftAmount);
2087int Assembler::RecursiveLiImplCount(int64_t Val) {
2089 if (is_int32(Val)) {
2097 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
2098 int64_t Lo12 = Val << 52 >> 52;
2105 if (Lo12 || Hi20 == 0) {
2137 int64_t Lo12 = Val << 52 >> 52;
2138 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
2140 Hi52 =
signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
2145 if (ShiftAmount > 12 && !is_int12(Hi52)) {
2146 if (is_int32((uint64_t)Hi52 << 12)) {
2150 Hi52 = (uint64_t)Hi52 << 12;
2154 count += RecursiveLiImplCount(Hi52);
2168int Assembler::GeneralLiCount(int64_t imm,
bool is_get_temp_reg) {
2171 if (is_int32(imm + 0x800)) {
2173 int64_t high_20 = ((imm + 0x800) >> 12);
2174 int64_t low_12 = imm << 52 >> 52;
2186 int64_t up_32 = imm >> 32;
2187 int64_t low_32 = imm & 0xffffffffull;
2189 if (is_get_temp_reg) {
2191 int64_t sim_low = 0;
2194 int64_t high_20 = ((low_32 + 0x800) >> 12);
2195 int64_t low_12 = low_32 & 0xfff;
2199 sim_low = ((high_20 << 12) << 32) >> 32;
2202 sim_low += (low_12 << 52 >> 52) | low_12;
2210 if (sim_low & 0x100000000) {
2219 up_32 = (up_32 - 0xffffffff) & 0xffffffff;
2224 int64_t high_20 = (up_32 + 0x800) >> 12;
2225 int64_t low_12 = up_32 & 0xfff;
2247 int64_t high_20 = (up_32 + 0x800) >> 12;
2248 int64_t low_12 = up_32 & 0xfff;
2263 uint32_t
mask = 0x80000000;
2265 for (
i = 0;
i < 32;
i++) {
2266 if ((low_32 &
mask) == 0) {
2275 if ((
i + 11) < 32) {
#define DEBUG_PRINTF(...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
constexpr T * begin() const
bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const
std::unique_ptr< AssemblerBuffer > buffer_
int instruction_size() const
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
std::forward_list< HeapNumberRequest > heap_number_requests_
size_t EmbeddedObjectIndex
static bool IsCBranch(Instr instr)
static bool IsCJal(Instr instr)
static int LoadOffset(Instr instr)
static int AuipcOffset(Instr instr)
static int JalrOffset(Instr instr)
static bool IsSlli(Instr instr)
static int JumpOffset(Instr instr)
static bool IsAddi(Instr instr)
void srli(Register rd, Register rs1, uint8_t shamt)
static bool IsAuipc(Instr instr)
void slli(Register rd, Register rs1, uint8_t shamt)
void bind_to(Label *L, int pos)
static const int kMaximalBufferSize
static void set_target_value_at(Address pc, uint64_t target, WritableJitAllocation *jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
RelocInfoWriter reloc_info_writer
bool is_near(Label *L, OffsetSize bits)
bool is_buffer_growth_blocked() const
bool is_internal_reference(Label *L)
void addi(Register dst, Register src, const Operand &imm)
void break_(uint32_t code, bool break_as_stop=false)
void label_at_put(Label *L, int at_offset)
void CheckTrampolinePool()
static constexpr int kCJalOffsetBits
static bool IsLw(Instr instr)
void RecordConstPool(int size)
static constexpr int kBranchOffsetBits
static constexpr int kTrampolineSlotsSize
static constexpr int kNoHandlerTable
static VfpRegList DefaultFPTmpList()
static int32_t target_constant32_at(Address pc)
static bool IsConstantPoolAt(Instruction *instr)
static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, intptr_t pc_delta, WritableJitAllocation *jit_allocation=nullptr)
void AllocateAndInstallRequestedHeapNumbers(LocalIsolate *isolate)
bool internal_trampoline_exception_
int32_t get_trampoline_entry()
friend class BlockTrampolinePoolScope
static Address target_constant_address_at(Address pc)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsJalr(Instr instr)
void auipc(Register rs, int16_t imm16)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static bool IsOri(Instr instr)
void instr_at_put(int pos, Instr instr)
static constexpr SafepointTableBuilderBase * kNoSafepointTable
int last_trampoline_pool_end_
uint64_t jump_address(Label *L)
static constexpr int kMaxRelocSize
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
static constexpr int kJumpOffsetBits
static bool IsLui(Instr instr)
friend class UseScratchRegisterScope
void print(const Label *L)
bool MustUseReg(RelocInfo::Mode rmode)
std::deque< int > internal_reference_positions_
void lui(Register rd, int32_t j)
static bool IsBranch(Instr instr)
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, intptr_t pc_delta, WritableJitAllocation *jit_allocation=nullptr)
void BlockTrampolinePoolFor(int instructions)
static V8_INLINE void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
int no_trampoline_pool_before_
static constexpr int kInvalidSlotPos
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void target_at_put(int pos, int target_pos)
void ori(Register rd, Register rj, int32_t ui12)
static RegList DefaultTmpList()
int unbound_labels_count_
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
bool is_near_branch(Label *L)
static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I, int32_t offset, WritableJitAllocation *jit_allocation=nullptr)
static constexpr int kCBranchOffsetBits
static constexpr uint8_t kNopByte
uint64_t branch_long_offset(Label *L)
void AdjustBaseAndOffset(MemOperand *src)
int BranchOffset(Instr instr)
V8_INLINE void CheckBuffer()
int trampoline_pool_blocked_nesting_
static void set_target_constant32_at(Address pc, uint32_t target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode)
static constexpr int kMaxBranchOffset
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void set_embedded_object_index_referenced_from(Address p, EmbeddedObjectIndex index)
bool NeedAdjustBaseAndOffset(const MemOperand &src, OffsetAccessType=OffsetAccessType::SINGLE_ACCESS, int second_Access_add_to_offset=4)
void ForceConstantPoolEmissionWithoutJump()
bool block_buffer_growth_
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
void CheckTrampolinePoolQuick(int extra_instructions=0)
static bool IsJal(Instr instr)
static void disassembleInstr(uint8_t *pc)
void BlockTrampolinePoolBefore(int pc_offset)
static int BrachlongOffset(Instr auipc, Instr jalr)
static int ConstantPoolSizeAt(Instruction *instr)
static constexpr int kMaxJumpOffset
static void Initialize(CodeDesc *desc, Assembler *assembler, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, int builtin_jump_table_info_offset, int reloc_info_offset)
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static bool SupportsWasmSimd128()
static unsigned supported_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
static constexpr int kHeaderSize
static constexpr int kMetadataAlignment
static Instruction * At(Address pc)
void link_to(int pos, Distance distance=kFar)
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void Reposition(uint8_t *pos, uint8_t *pc)
uint8_t * last_pc() const
void Write(const RelocInfo *rinfo)
static constexpr bool IsInternalReference(Mode mode)
static const int kApplyMask
uint32_t wasm_call_tag() const
static constexpr bool IsRelativeCodeTarget(Mode mode)
static constexpr int ModeMask(Mode mode)
static constexpr bool IsNearBuiltinEntry(Mode mode)
static constexpr bool IsInternalReferenceEncoded(Mode mode)
static constexpr bool IsEmbeddedObjectMode(Mode mode)
@ INTERNAL_REFERENCE_ENCODED
static constexpr bool IsNoInfo(Mode mode)
int safepoint_table_offset() const
static constexpr Tagged< Smi > FromInt(int value)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
V8_INLINE void WriteUnalignedValue(Address address, T value)
V8_INLINE void WriteValue(Address address, T value)
#define ASM_CODE_COMMENT(asm)
ZoneVector< RpoNumber > & result
constexpr unsigned CountLeadingZeros(T value)
constexpr unsigned CountTrailingZeros(T value)
constexpr bool IsPowerOfTwo(T value)
constexpr std::make_unsigned_t< T > Unsigned(T value)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
const uint32_t kBImm12Mask
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
constexpr DoubleRegister kScratchDoubleReg
constexpr uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
const uint32_t kMaxTracepointCode
void FlushInstructionCache(void *start, size_t size)
const uint32_t kRvcBImm8Mask
static Instr SetLoadOffset(int32_t offset, Instr instr)
int ToNumber(Register reg)
int64_t signExtend(uint64_t V, int N)
static Instr SetHi20Offset(int32_t hi20, Instr instr)
const uint32_t kImm11Mask
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
constexpr Opcode RO_C_BEQZ
constexpr bool is_intn(int64_t x, unsigned n)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
constexpr Opcode RO_C_BNEZ
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
const uint32_t kRs1FieldMask
const int kEndOfJumpChain
static unsigned CpuFeaturesImpliedByCompiler()
constexpr uint8_t kInstrSize
static Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr)
static ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos, Instr instr)
static Instr SetCBranchOffset(int32_t pos, int32_t target_pos, Instr instr)
static Instr SetLo12Offset(int32_t lo12, Instr instr)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
const uint32_t kImm20Mask
static Instr SetBranchOffset(int32_t pos, int32_t target_pos, Instr instr)
constexpr int kNumRegisters
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)