12#if defined(V8_TARGET_ARCH_PPC64)
14ConstantPoolBuilder::ConstantPoolBuilder(
int ptr_reach_bits,
15 int double_reach_bits) {
23 const PerTypeEntryInfo& info =
info_[
type];
35 if (!
is_uintn(dbl_offset, info.regular_reach_bits) ||
43 if (!
is_uintn(ptr_offset, info.regular_reach_bits)) {
53 DCHECK(!emitted_label_.is_bound());
58 if (entry->sharing_ok()) {
60 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
61 int end =
static_cast<int>(info.shared_entries.size());
62 for (
int i = 0;
i <
end;
i++, it++) {
64 ? entry->value() == it->value()
65 : entry->value64() == it->value64()) {
67 entry->set_merged_index(
i);
75 DCHECK(!merged || entry->merged_index() < info.regular_count);
82 info.shared_entries.push_back(*entry);
84 info.entries.push_back(*entry);
89 if (merged || info.overflow())
return access;
94 info.overflow_start =
static_cast<int>(info.entries.size()) - 1;
100void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
103 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
105 int base = emitted_label_.pos();
107 int shared_end =
static_cast<int>(shared_entries.size());
108 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
109 for (
int i = 0;
i < shared_end;
i++, shared_it++) {
111 shared_it->set_offset(
offset);
113 assm->dp(shared_it->value());
115 assm->dq(shared_it->value64());
120 assm->PatchConstantPoolAccessInstruction(shared_it->position(),
offset,
125void ConstantPoolBuilder::EmitGroup(Assembler* assm,
129 const bool overflow = info.overflow();
130 std::vector<ConstantPoolEntry>&
entries = info.entries;
131 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
133 int base = emitted_label_.pos();
140 EmitSharedEntries(assm, type);
145 end = overflow ? info.overflow_start :
static_cast<int>(
entries.size());
148 if (!overflow)
return;
149 begin = info.overflow_start;
153 std::vector<ConstantPoolEntry>::iterator it =
entries.begin();
154 if (begin > 0) std::advance(it, begin);
155 for (
int i = begin;
i <
end;
i++, it++) {
159 if (!it->is_merged()) {
162 entry_access = access;
164 assm->dp(it->value());
166 assm->dq(it->value64());
170 offset = shared_entries[it->merged_index()].offset();
178 assm->PatchConstantPoolAccessInstruction(it->position(),
offset,
184int ConstantPoolBuilder::Emit(Assembler* assm) {
185 bool emitted = emitted_label_.is_bound();
186 bool empty = IsEmpty();
191 assm->bind(&emitted_label_);
209 return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
214#if defined(V8_TARGET_ARCH_ARM64)
218ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
219ConstantPool::~ConstantPool() {
DCHECK_EQ(blocked_nesting_, 0); }
221RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
222 RelocInfo::Mode rmode) {
223 ConstantPoolKey
key(data, rmode);
225 return RecordKey(std::move(
key), assm_->pc_offset());
228RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
229 RelocInfo::Mode rmode) {
230 ConstantPoolKey
key(data, rmode);
232 return RecordKey(std::move(
key), assm_->pc_offset());
235RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey
key,
int offset) {
236 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(
key);
237 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
238 if (
key.is_value32()) {
239 if (entry32_count_ == 0) first_use_32_ =
offset;
242 if (entry64_count_ == 0) first_use_64_ =
offset;
248 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
253 return write_reloc_info;
256RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
257 const ConstantPoolKey&
key) {
258 if (
key.AllowsDeduplication()) {
261 return RelocInfoStatus::kMustOmitForDuplicate;
264 return RelocInfoStatus::kMustRecord;
267void ConstantPool::EmitAndClear(Jump require_jump) {
270 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
271 Alignment require_alignment =
272 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
273 int size = ComputeSize(require_jump, require_alignment);
275 assm_->bind(&size_check);
276 assm_->RecordConstPool(size);
290 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
292 assm_->RecordComment(
"[ Constant Pool");
293 EmitPrologue(require_alignment);
294 if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
296 assm_->RecordComment(
"]");
298 if (after_pool.is_linked()) assm_->bind(&after_pool);
300 DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
304void ConstantPool::Clear() {
314void ConstantPool::StartBlock() {
315 if (blocked_nesting_ == 0) {
318 old_next_check_ = next_check_;
324void ConstantPool::EndBlock() {
326 if (blocked_nesting_ == 0) {
327 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
331 next_check_ = std::min(next_check_, old_next_check_);
335bool ConstantPool::IsBlocked()
const {
return blocked_nesting_ > 0; }
337void ConstantPool::SetNextCheckIn(
size_t instructions) {
339 assm_->pc_offset() +
static_cast<int>(instructions *
kInstrSize);
342void ConstantPool::EmitEntries() {
345 auto range =
entries_.equal_range(iter->first);
346 bool shared = iter->first.AllowsDeduplication();
347 for (
auto it = range.first; it != range.second; ++it) {
348 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
349 if (!shared) Emit(it->first);
351 if (shared) Emit(iter->first);
356void ConstantPool::Emit(
const ConstantPoolKey&
key) {
357 if (
key.is_value32()) {
358 assm_->dd(
key.value32());
360 assm_->dq(
key.value64());
364bool ConstantPool::ShouldEmitNow(Jump require_jump,
size_t margin)
const {
365 if (IsEmpty())
return false;
366 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
379 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
380 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
381 size_t pool_end_64 = pool_end_32 - Entry32Count() *
kInt32Size;
382 if (Entry64Count() != 0) {
385 size_t dist64 = pool_end_64 - first_use_64_;
386 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
387 bool opportune_emission_without_jump =
388 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
389 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
390 if (next_check_too_late || opportune_emission_without_jump ||
391 approximate_distance_exceeded) {
395 if (Entry32Count() != 0) {
396 size_t dist32 = pool_end_32 - first_use_32_;
397 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
398 bool opportune_emission_without_jump =
399 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
400 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
401 if (next_check_too_late || opportune_emission_without_jump ||
402 approximate_distance_exceeded) {
409int ConstantPool::ComputeSize(Jump require_jump,
410 Alignment require_alignment)
const {
411 int size_up_to_marker = PrologueSize(require_jump);
412 int alignment = require_alignment == Alignment::kRequired ?
kInstrSize : 0;
413 size_t size_after_marker =
415 return size_up_to_marker +
static_cast<int>(size_after_marker);
418Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
420 int size_up_to_marker = PrologueSize(require_jump);
421 if (Entry64Count() != 0 &&
423 return Alignment::kRequired;
425 return Alignment::kOmitted;
428bool ConstantPool::IsInImmRangeIfEmittedAt(
int pc_offset) {
432 Alignment require_alignment =
433 IsAlignmentRequiredIfEmittedAt(Jump::kRequired,
pc_offset);
435 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
436 size_t pool_end_64 = pool_end_32 - Entry32Count() *
kInt32Size;
437 bool entries_in_range_32 =
438 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
439 bool entries_in_range_64 =
440 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
441 return entries_in_range_32 && entries_in_range_64;
444ConstantPool::BlockScope::BlockScope(Assembler* assm,
size_t margin)
445 : pool_(&assm->constpool_) {
446 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
450ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
451 : pool_(&assm->constpool_) {
452 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
456ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
458void ConstantPool::MaybeCheck() {
459 if (assm_->pc_offset() >= next_check_) {
460 Check(Emission::kIfNeeded, Jump::kRequired);
466#if defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
470ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
471ConstantPool::~ConstantPool() {
DCHECK_EQ(blocked_nesting_, 0); }
473RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
474 RelocInfo::Mode rmode) {
475 ConstantPoolKey
key(data, rmode);
477 return RecordKey(std::move(
key), assm_->pc_offset());
480RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
481 RelocInfo::Mode rmode) {
482 ConstantPoolKey
key(data, rmode);
484 return RecordKey(std::move(
key), assm_->pc_offset());
487RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey
key,
int offset) {
488 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(
key);
489 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
490 if (
key.is_value32()) {
491 if (entry32_count_ == 0) first_use_32_ =
offset;
494 if (entry64_count_ == 0) first_use_64_ =
offset;
500 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
505 return write_reloc_info;
508RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
509 const ConstantPoolKey&
key) {
510 if (
key.AllowsDeduplication()) {
513 return RelocInfoStatus::kMustOmitForDuplicate;
516 return RelocInfoStatus::kMustRecord;
519void ConstantPool::EmitAndClear(Jump require_jump) {
522 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
523 Alignment require_alignment =
524 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
525 int size = ComputeSize(require_jump, require_alignment);
527 assm_->bind(&size_check);
528 assm_->RecordConstPool(size);
542 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
544 assm_->RecordComment("[ Constant Pool");
546 EmitPrologue(require_alignment);
547 if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
549 assm_->RecordComment("]");
550 assm_->bind(&after_pool);
553 DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
557void ConstantPool::Clear() {
566void ConstantPool::StartBlock() {
567 if (blocked_nesting_ == 0) {
575void ConstantPool::EndBlock() {
577 if (blocked_nesting_ == 0) {
578 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
584bool ConstantPool::IsBlocked()
const {
return blocked_nesting_ > 0; }
586void ConstantPool::SetNextCheckIn(
size_t instructions) {
588 assm_->pc_offset() +
static_cast<int>(instructions *
kInstrSize);
591void ConstantPool::EmitEntries() {
594 auto range =
entries_.equal_range(iter->first);
595 bool shared = iter->first.AllowsDeduplication();
596 for (
auto it = range.first; it != range.second; ++it) {
597 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
598 if (!shared) Emit(it->first);
600 if (shared) Emit(iter->first);
605void ConstantPool::Emit(
const ConstantPoolKey&
key) {
606 if (
key.is_value32()) {
607 assm_->dd(
key.value32());
609 assm_->dq(
key.value64());
613bool ConstantPool::ShouldEmitNow(Jump require_jump,
size_t margin)
const {
614 if (IsEmpty())
return false;
615 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
628 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
629 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
630 size_t pool_end_64 = pool_end_32 - Entry32Count() *
kInt32Size;
631 if (Entry64Count() != 0) {
634 size_t dist64 = pool_end_64 - first_use_64_;
635 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
636 bool opportune_emission_without_jump =
637 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
638 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
639 if (next_check_too_late || opportune_emission_without_jump ||
640 approximate_distance_exceeded) {
644 if (Entry32Count() != 0) {
645 size_t dist32 = pool_end_32 - first_use_32_;
646 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
647 bool opportune_emission_without_jump =
648 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
649 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
650 if (next_check_too_late || opportune_emission_without_jump ||
651 approximate_distance_exceeded) {
658int ConstantPool::ComputeSize(Jump require_jump,
659 Alignment require_alignment)
const {
660 int size_up_to_marker = PrologueSize(require_jump);
661 int alignment = require_alignment == Alignment::kRequired ?
kInstrSize : 0;
662 size_t size_after_marker =
664 return size_up_to_marker +
static_cast<int>(size_after_marker);
667Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
669 int size_up_to_marker = PrologueSize(require_jump);
670 if (Entry64Count() != 0 &&
672 return Alignment::kRequired;
674 return Alignment::kOmitted;
677bool ConstantPool::IsInImmRangeIfEmittedAt(
int pc_offset) {
681 Alignment require_alignment =
682 IsAlignmentRequiredIfEmittedAt(Jump::kRequired,
pc_offset);
684 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
685 size_t pool_end_64 = pool_end_32 - Entry32Count() *
kInt32Size;
686 bool entries_in_range_32 =
687 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
688 bool entries_in_range_64 =
689 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
690 return entries_in_range_32 && entries_in_range_64;
693ConstantPool::BlockScope::BlockScope(Assembler* assm,
size_t margin)
694 : pool_(&assm->constpool_) {
695 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
699ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
700 : pool_(&assm->constpool_) {
701 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
705ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
707void ConstantPool::MaybeCheck() {
708 if (assm_->pc_offset() >= next_check_) {
709 Check(Emission::kIfNeeded, Jump::kRequired);
#define DEBUG_PRINTF(...)
static int size(Type type)
std::vector< EntryBuilder > entries_
ZoneVector< Entry > entries
Node::Uses::const_iterator begin(const Node::Uses &uses)
constexpr int kSystemPointerSize
constexpr uint8_t kInstrSize
constexpr int kDoubleSize
constexpr bool is_uintn(int64_t x, unsigned n)
OptimizedCompilationInfo * info_
#define DCHECK_LE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr bool IsAligned(T value, U alignment)
std::unique_ptr< ValueMirror > key