v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
constant-pool.cc
Go to the documentation of this file.
1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
8
9namespace v8 {
10namespace internal {
11
12#if defined(V8_TARGET_ARCH_PPC64)
13
14ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
15 int double_reach_bits) {
16 info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
17 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
18 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
19}
20
21ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
22 ConstantPoolEntry::Type type) const {
23 const PerTypeEntryInfo& info = info_[type];
24
25 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
26
27 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
28 int dbl_offset = dbl_count * kDoubleSize;
29 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
30 int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset;
31
32 if (type == ConstantPoolEntry::DOUBLE) {
33 // Double overflow detection must take into account the reach for both types
34 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
35 if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
36 (ptr_count > 0 &&
38 ptr_reach_bits))) {
40 }
41 } else {
43 if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
45 }
46 }
47
49}
50
51ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
52 ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
53 DCHECK(!emitted_label_.is_bound());
54 PerTypeEntryInfo& info = info_[type];
55 const int entry_size = ConstantPoolEntry::size(type);
56 bool merged = false;
57
58 if (entry->sharing_ok()) {
59 // Try to merge entries
60 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
61 int end = static_cast<int>(info.shared_entries.size());
62 for (int i = 0; i < end; i++, it++) {
63 if ((entry_size == kSystemPointerSize)
64 ? entry->value() == it->value()
65 : entry->value64() == it->value64()) {
66 // Merge with found entry.
67 entry->set_merged_index(i);
68 merged = true;
69 break;
70 }
71 }
72 }
73
74 // By definition, merged entries have regular access.
75 DCHECK(!merged || entry->merged_index() < info.regular_count);
77 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
78
79 // Enforce an upper bound on search time by limiting the search to
80 // unique sharable entries which fit in the regular section.
81 if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
82 info.shared_entries.push_back(*entry);
83 } else {
84 info.entries.push_back(*entry);
85 }
86
87 // We're done if we found a match or have already triggered the
88 // overflow state.
89 if (merged || info.overflow()) return access;
90
91 if (access == ConstantPoolEntry::REGULAR) {
92 info.regular_count++;
93 } else {
94 info.overflow_start = static_cast<int>(info.entries.size()) - 1;
95 }
96
97 return access;
98}
99
100void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
102 PerTypeEntryInfo& info = info_[type];
103 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
104 const int entry_size = ConstantPoolEntry::size(type);
105 int base = emitted_label_.pos();
106 DCHECK_GT(base, 0);
107 int shared_end = static_cast<int>(shared_entries.size());
108 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
109 for (int i = 0; i < shared_end; i++, shared_it++) {
110 int offset = assm->pc_offset() - base;
111 shared_it->set_offset(offset); // Save offset for merged entries.
112 if (entry_size == kSystemPointerSize) {
113 assm->dp(shared_it->value());
114 } else {
115 assm->dq(shared_it->value64());
116 }
117 DCHECK(is_uintn(offset, info.regular_reach_bits));
118
119 // Patch load sequence with correct offset.
120 assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
122 }
123}
124
125void ConstantPoolBuilder::EmitGroup(Assembler* assm,
128 PerTypeEntryInfo& info = info_[type];
129 const bool overflow = info.overflow();
130 std::vector<ConstantPoolEntry>& entries = info.entries;
131 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
132 const int entry_size = ConstantPoolEntry::size(type);
133 int base = emitted_label_.pos();
134 DCHECK_GT(base, 0);
135 int begin;
136 int end;
137
138 if (access == ConstantPoolEntry::REGULAR) {
139 // Emit any shared entries first
140 EmitSharedEntries(assm, type);
141 }
142
143 if (access == ConstantPoolEntry::REGULAR) {
144 begin = 0;
145 end = overflow ? info.overflow_start : static_cast<int>(entries.size());
146 } else {
148 if (!overflow) return;
149 begin = info.overflow_start;
150 end = static_cast<int>(entries.size());
151 }
152
153 std::vector<ConstantPoolEntry>::iterator it = entries.begin();
154 if (begin > 0) std::advance(it, begin);
155 for (int i = begin; i < end; i++, it++) {
156 // Update constant pool if necessary and get the entry's offset.
157 int offset;
158 ConstantPoolEntry::Access entry_access;
159 if (!it->is_merged()) {
160 // Emit new entry
161 offset = assm->pc_offset() - base;
162 entry_access = access;
163 if (entry_size == kSystemPointerSize) {
164 assm->dp(it->value());
165 } else {
166 assm->dq(it->value64());
167 }
168 } else {
169 // Retrieve offset from shared entry.
170 offset = shared_entries[it->merged_index()].offset();
171 entry_access = ConstantPoolEntry::REGULAR;
172 }
173
174 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
175 is_uintn(offset, info.regular_reach_bits));
176
177 // Patch load sequence with correct offset.
178 assm->PatchConstantPoolAccessInstruction(it->position(), offset,
179 entry_access, type);
180 }
181}
182
183// Emit and return size of pool.
184int ConstantPoolBuilder::Emit(Assembler* assm) {
185 bool emitted = emitted_label_.is_bound();
186 bool empty = IsEmpty();
187
188 if (!emitted) {
189 // Mark start of constant pool. Align if necessary.
190 if (!empty) assm->DataAlign(kDoubleSize);
191 assm->bind(&emitted_label_);
192 if (!empty) {
193 // Emit in groups based on access and type.
194 // Emit doubles first for alignment purposes.
198 assm->DataAlign(kDoubleSize);
199 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
201 }
203 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
205 }
206 }
207 }
208
209 return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
210}
211
212#endif // defined(V8_TARGET_ARCH_PPC64)
213
214#if defined(V8_TARGET_ARCH_ARM64)
215
216// Constant Pool.
217
218ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
219ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
220
221RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
222 RelocInfo::Mode rmode) {
223 ConstantPoolKey key(data, rmode);
224 CHECK(key.is_value32());
225 return RecordKey(std::move(key), assm_->pc_offset());
226}
227
228RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
229 RelocInfo::Mode rmode) {
230 ConstantPoolKey key(data, rmode);
231 CHECK(!key.is_value32());
232 return RecordKey(std::move(key), assm_->pc_offset());
233}
234
235RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
236 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
237 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
238 if (key.is_value32()) {
239 if (entry32_count_ == 0) first_use_32_ = offset;
240 ++entry32_count_;
241 } else {
242 if (entry64_count_ == 0) first_use_64_ = offset;
243 ++entry64_count_;
244 }
245 }
246 entries_.insert(std::make_pair(key, offset));
247
248 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
249 // Request constant pool emission after the next instruction.
250 SetNextCheckIn(1);
251 }
252
253 return write_reloc_info;
254}
255
256RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
257 const ConstantPoolKey& key) {
258 if (key.AllowsDeduplication()) {
259 auto existing = entries_.find(key);
260 if (existing != entries_.end()) {
261 return RelocInfoStatus::kMustOmitForDuplicate;
262 }
263 }
264 return RelocInfoStatus::kMustRecord;
265}
266
267void ConstantPool::EmitAndClear(Jump require_jump) {
268 DCHECK(!IsBlocked());
269 // Prevent recursive pool emission.
270 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
271 Alignment require_alignment =
272 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
273 int size = ComputeSize(require_jump, require_alignment);
274 Label size_check;
275 assm_->bind(&size_check);
276 assm_->RecordConstPool(size);
277
278 // Emit the constant pool. It is preceded by an optional branch if
279 // {require_jump} and a header which will:
280 // 1) Encode the size of the constant pool, for use by the disassembler.
281 // 2) Terminate the program, to try to prevent execution from accidentally
282 // flowing into the constant pool.
283 // 3) align the 64bit pool entries to 64-bit.
284 // TODO(all): Make the alignment part less fragile. Currently code is
285 // allocated as a byte array so there are no guarantees the alignment will
286 // be preserved on compaction. Currently it works as allocation seems to be
287 // 64-bit aligned.
288
289 Label after_pool;
290 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
291
292 assm_->RecordComment("[ Constant Pool");
293 EmitPrologue(require_alignment);
294 if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
295 EmitEntries();
296 assm_->RecordComment("]");
297
298 if (after_pool.is_linked()) assm_->bind(&after_pool);
299
300 DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
301 Clear();
302}
303
304void ConstantPool::Clear() {
305 entries_.clear();
306 first_use_32_ = -1;
307 first_use_64_ = -1;
308 entry32_count_ = 0;
309 entry64_count_ = 0;
310 next_check_ = 0;
311 old_next_check_ = 0;
312}
313
314void ConstantPool::StartBlock() {
315 if (blocked_nesting_ == 0) {
316 // Prevent constant pool checks from happening by setting the next check to
317 // the biggest possible offset.
318 old_next_check_ = next_check_;
319 next_check_ = kMaxInt;
320 }
321 ++blocked_nesting_;
322}
323
324void ConstantPool::EndBlock() {
325 --blocked_nesting_;
326 if (blocked_nesting_ == 0) {
327 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
328 // Restore the old next_check_ value if it's less than the current
329 // next_check_. This accounts for any attempt to emit pools sooner whilst
330 // pools were blocked.
331 next_check_ = std::min(next_check_, old_next_check_);
332 }
333}
334
335bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
336
337void ConstantPool::SetNextCheckIn(size_t instructions) {
338 next_check_ =
339 assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
340}
341
342void ConstantPool::EmitEntries() {
343 for (auto iter = entries_.begin(); iter != entries_.end();) {
344 DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
345 auto range = entries_.equal_range(iter->first);
346 bool shared = iter->first.AllowsDeduplication();
347 for (auto it = range.first; it != range.second; ++it) {
348 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
349 if (!shared) Emit(it->first);
350 }
351 if (shared) Emit(iter->first);
352 iter = range.second;
353 }
354}
355
356void ConstantPool::Emit(const ConstantPoolKey& key) {
357 if (key.is_value32()) {
358 assm_->dd(key.value32());
359 } else {
360 assm_->dq(key.value64());
361 }
362}
363
364bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
365 if (IsEmpty()) return false;
366 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
367 return true;
368 }
369 // We compute {dist32/64}, i.e. the distance from the first instruction
370 // accessing a 32bit/64bit entry in the constant pool to any of the
371 // 32bit/64bit constant pool entries, respectively. This is required because
372 // we do not guarantee that entries are emitted in order of reference, i.e. it
373 // is possible that the entry with the earliest reference is emitted last.
374 // The constant pool should be emitted if either of the following is true:
375 // (A) {dist32/64} will be out of range at the next check in.
376 // (B) Emission can be done behind an unconditional branch and {dist32/64}
377 // exceeds {kOpportunityDist*}.
378 // (C) {dist32/64} exceeds the desired approximate distance to the pool.
379 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
380 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
381 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
382 if (Entry64Count() != 0) {
383 // The 64-bit constants are always emitted before the 32-bit constants, so
384 // we subtract the size of the 32-bit constants from {size}.
385 size_t dist64 = pool_end_64 - first_use_64_;
386 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
387 bool opportune_emission_without_jump =
388 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
389 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
390 if (next_check_too_late || opportune_emission_without_jump ||
391 approximate_distance_exceeded) {
392 return true;
393 }
394 }
395 if (Entry32Count() != 0) {
396 size_t dist32 = pool_end_32 - first_use_32_;
397 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
398 bool opportune_emission_without_jump =
399 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
400 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
401 if (next_check_too_late || opportune_emission_without_jump ||
402 approximate_distance_exceeded) {
403 return true;
404 }
405 }
406 return false;
407}
408
409int ConstantPool::ComputeSize(Jump require_jump,
410 Alignment require_alignment) const {
411 int size_up_to_marker = PrologueSize(require_jump);
412 int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
413 size_t size_after_marker =
414 Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
415 return size_up_to_marker + static_cast<int>(size_after_marker);
416}
417
418Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
419 int pc_offset) const {
420 int size_up_to_marker = PrologueSize(require_jump);
421 if (Entry64Count() != 0 &&
422 !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
423 return Alignment::kRequired;
424 }
425 return Alignment::kOmitted;
426}
427
428bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
429 // Check that all entries are in range if the pool is emitted at {pc_offset}.
430 // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
431 // and over-estimates the last entry's address with the pool's end.
432 Alignment require_alignment =
433 IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
434 size_t pool_end_32 =
435 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
436 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
437 bool entries_in_range_32 =
438 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
439 bool entries_in_range_64 =
440 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
441 return entries_in_range_32 && entries_in_range_64;
442}
443
444ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
445 : pool_(&assm->constpool_) {
446 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
447 pool_->StartBlock();
448}
449
450ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
451 : pool_(&assm->constpool_) {
452 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
453 pool_->StartBlock();
454}
455
456ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
457
458void ConstantPool::MaybeCheck() {
459 if (assm_->pc_offset() >= next_check_) {
460 Check(Emission::kIfNeeded, Jump::kRequired);
461 }
462}
463
464#endif // defined(V8_TARGET_ARCH_ARM64)
465
466#if defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
467
468// Constant Pool.
469
470ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
471ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
472
473RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
474 RelocInfo::Mode rmode) {
475 ConstantPoolKey key(data, rmode);
476 CHECK(key.is_value32());
477 return RecordKey(std::move(key), assm_->pc_offset());
478}
479
480RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
481 RelocInfo::Mode rmode) {
482 ConstantPoolKey key(data, rmode);
483 CHECK(!key.is_value32());
484 return RecordKey(std::move(key), assm_->pc_offset());
485}
486
487RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
488 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
489 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
490 if (key.is_value32()) {
491 if (entry32_count_ == 0) first_use_32_ = offset;
492 ++entry32_count_;
493 } else {
494 if (entry64_count_ == 0) first_use_64_ = offset;
495 ++entry64_count_;
496 }
497 }
498 entries_.insert(std::make_pair(key, offset));
499
500 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
501 // Request constant pool emission after the next instruction.
502 SetNextCheckIn(1);
503 }
504
505 return write_reloc_info;
506}
507
508RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
509 const ConstantPoolKey& key) {
510 if (key.AllowsDeduplication()) {
511 auto existing = entries_.find(key);
512 if (existing != entries_.end()) {
513 return RelocInfoStatus::kMustOmitForDuplicate;
514 }
515 }
516 return RelocInfoStatus::kMustRecord;
517}
518
519void ConstantPool::EmitAndClear(Jump require_jump) {
520 DCHECK(!IsBlocked());
521 // Prevent recursive pool emission.
522 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
523 Alignment require_alignment =
524 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
525 int size = ComputeSize(require_jump, require_alignment);
526 Label size_check;
527 assm_->bind(&size_check);
528 assm_->RecordConstPool(size);
529
530 // Emit the constant pool. It is preceded by an optional branch if
531 // {require_jump} and a header which will:
532 // 1) Encode the size of the constant pool, for use by the disassembler.
533 // 2) Terminate the program, to try to prevent execution from accidentally
534 // flowing into the constant pool.
535 // 3) align the 64bit pool entries to 64-bit.
536 // TODO(all): Make the alignment part less fragile. Currently code is
537 // allocated as a byte array so there are no guarantees the alignment will
538 // be preserved on compaction. Currently it works as allocation seems to be
539 // 64-bit aligned.
540 DEBUG_PRINTF("\tConstant Pool start\n")
541 Label after_pool;
542 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
543
544 assm_->RecordComment("[ Constant Pool");
545
546 EmitPrologue(require_alignment);
547 if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
548 EmitEntries();
549 assm_->RecordComment("]");
550 assm_->bind(&after_pool);
551 DEBUG_PRINTF("\tConstant Pool end\n")
552
553 DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
554 Clear();
555}
556
557void ConstantPool::Clear() {
558 entries_.clear();
559 first_use_32_ = -1;
560 first_use_64_ = -1;
561 entry32_count_ = 0;
562 entry64_count_ = 0;
563 next_check_ = 0;
564}
565
566void ConstantPool::StartBlock() {
567 if (blocked_nesting_ == 0) {
568 // Prevent constant pool checks from happening by setting the next check to
569 // the biggest possible offset.
570 next_check_ = kMaxInt;
571 }
572 ++blocked_nesting_;
573}
574
575void ConstantPool::EndBlock() {
576 --blocked_nesting_;
577 if (blocked_nesting_ == 0) {
578 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
579 // Make sure a check happens quickly after getting unblocked.
580 next_check_ = 0;
581 }
582}
583
584bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
585
586void ConstantPool::SetNextCheckIn(size_t instructions) {
587 next_check_ =
588 assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
589}
590
591void ConstantPool::EmitEntries() {
592 for (auto iter = entries_.begin(); iter != entries_.end();) {
593 DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
594 auto range = entries_.equal_range(iter->first);
595 bool shared = iter->first.AllowsDeduplication();
596 for (auto it = range.first; it != range.second; ++it) {
597 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
598 if (!shared) Emit(it->first);
599 }
600 if (shared) Emit(iter->first);
601 iter = range.second;
602 }
603}
604
605void ConstantPool::Emit(const ConstantPoolKey& key) {
606 if (key.is_value32()) {
607 assm_->dd(key.value32());
608 } else {
609 assm_->dq(key.value64());
610 }
611}
612
613bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
614 if (IsEmpty()) return false;
615 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
616 return true;
617 }
618 // We compute {dist32/64}, i.e. the distance from the first instruction
619 // accessing a 32bit/64bit entry in the constant pool to any of the
620 // 32bit/64bit constant pool entries, respectively. This is required because
621 // we do not guarantee that entries are emitted in order of reference, i.e. it
622 // is possible that the entry with the earliest reference is emitted last.
623 // The constant pool should be emitted if either of the following is true:
624 // (A) {dist32/64} will be out of range at the next check in.
625 // (B) Emission can be done behind an unconditional branch and {dist32/64}
626 // exceeds {kOpportunityDist*}.
627 // (C) {dist32/64} exceeds the desired approximate distance to the pool.
628 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
629 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
630 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
631 if (Entry64Count() != 0) {
632 // The 64-bit constants are always emitted before the 32-bit constants, so
633 // we subtract the size of the 32-bit constants from {size}.
634 size_t dist64 = pool_end_64 - first_use_64_;
635 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
636 bool opportune_emission_without_jump =
637 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
638 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
639 if (next_check_too_late || opportune_emission_without_jump ||
640 approximate_distance_exceeded) {
641 return true;
642 }
643 }
644 if (Entry32Count() != 0) {
645 size_t dist32 = pool_end_32 - first_use_32_;
646 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
647 bool opportune_emission_without_jump =
648 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
649 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
650 if (next_check_too_late || opportune_emission_without_jump ||
651 approximate_distance_exceeded) {
652 return true;
653 }
654 }
655 return false;
656}
657
658int ConstantPool::ComputeSize(Jump require_jump,
659 Alignment require_alignment) const {
660 int size_up_to_marker = PrologueSize(require_jump);
661 int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
662 size_t size_after_marker =
663 Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
664 return size_up_to_marker + static_cast<int>(size_after_marker);
665}
666
667Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
668 int pc_offset) const {
669 int size_up_to_marker = PrologueSize(require_jump);
670 if (Entry64Count() != 0 &&
671 !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
672 return Alignment::kRequired;
673 }
674 return Alignment::kOmitted;
675}
676
677bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
678 // Check that all entries are in range if the pool is emitted at {pc_offset}.
679 // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
680 // and over-estimates the last entry's address with the pool's end.
681 Alignment require_alignment =
682 IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
683 size_t pool_end_32 =
684 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
685 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
686 bool entries_in_range_32 =
687 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
688 bool entries_in_range_64 =
689 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
690 return entries_in_range_32 && entries_in_range_64;
691}
692
693ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
694 : pool_(&assm->constpool_) {
695 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
696 pool_->StartBlock();
697}
698
699ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
700 : pool_(&assm->constpool_) {
701 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
702 pool_->StartBlock();
703}
704
705ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
706
707void ConstantPool::MaybeCheck() {
708 if (assm_->pc_offset() >= next_check_) {
709 Check(Emission::kIfNeeded, Jump::kRequired);
710 }
711}
712
713#endif // defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32)
714
715} // namespace internal
716} // namespace v8
#define DEBUG_PRINTF(...)
static int size(Type type)
int end
int32_t offset
std::vector< EntryBuilder > entries_
ZoneVector< Entry > entries
int pc_offset
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
constexpr int kInt64Size
Definition globals.h:402
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kInt32Size
Definition globals.h:401
constexpr uint8_t kInstrSize
constexpr int kMaxInt
Definition globals.h:374
constexpr int kDoubleSize
Definition globals.h:407
constexpr bool is_uintn(int64_t x, unsigned n)
Definition utils.h:574
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
std::unique_ptr< ValueMirror > key
wasm::ValueType type