v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
register-allocator.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <iomanip>
8#include <optional>
9
10#include "src/base/iterator.h"
12#include "src/base/vector.h"
20
21namespace v8 {
22namespace internal {
23namespace compiler {
24
25#define TRACE(...) \
26 do { \
27 if (v8_flags.trace_turbo_alloc) PrintF(__VA_ARGS__); \
28 } while (false)
29
30namespace {
31
32static constexpr int kFloat32Bit =
34static constexpr int kSimd128Bit =
36
37const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
38 const InstructionBlock* block) {
39 RpoNumber index = block->loop_header();
40 if (!index.IsValid()) return nullptr;
41 return sequence->InstructionBlockAt(index);
42}
43
44const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
45 LifetimePosition pos) {
46 return code->GetInstructionBlock(pos.ToInstructionIndex());
47}
48
49Instruction* GetLastInstruction(InstructionSequence* code,
50 const InstructionBlock* block) {
51 return code->InstructionAt(block->last_instruction_index());
52}
53
54} // namespace
55
56using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
57
60 const DelayedInsertionMapKey& b) const {
61 if (a.first == b.first) {
62 return a.second.Compare(b.second);
63 }
64 return a.first < b.first;
65 }
66};
67
70
72 void* hint, UsePositionHintType hint_type)
73 : operand_(operand), hint_(hint), pos_(pos), flags_(0) {
75 bool register_beneficial = true;
77 if (operand_ != nullptr && operand_->IsUnallocated()) {
78 const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
79 if (unalloc->HasRegisterPolicy()) {
81 } else if (unalloc->HasSlotPolicy()) {
83 register_beneficial = false;
84 } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
86 register_beneficial = false;
87 } else {
88 register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
89 }
90 }
92 RegisterBeneficialField::encode(register_beneficial) |
95}
96
98 int hint_register;
99 return HintRegister(&hint_register);
100}
101
102bool UsePosition::HintRegister(int* register_code) const {
103 if (hint_ == nullptr) return false;
104 switch (HintTypeField::decode(flags_)) {
107 return false;
109 UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
110 int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
111 if (assigned_register == kUnassignedRegister) return false;
112 *register_code = assigned_register;
113 return true;
114 }
117 reinterpret_cast<InstructionOperand*>(hint_);
118 *register_code = LocationOperand::cast(operand)->register_code();
119 return true;
120 }
123 reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
124 int assigned_register = phi->assigned_register();
125 if (assigned_register == kUnassignedRegister) return false;
126 *register_code = assigned_register;
127 return true;
128 }
129 }
130 UNREACHABLE();
131}
132
154
160
167
176
177void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
178
180 TopLevelLiveRange* top_level)
181 : relative_id_(relative_id),
182 bits_(0),
183 intervals_(),
184 positions_span_(),
185 top_level_(top_level),
186 next_(nullptr),
187 current_interval_(intervals_.begin()) {
192}
193
194#ifdef DEBUG
195void LiveRange::VerifyPositions() const {
196 SLOW_DCHECK(std::is_sorted(positions().begin(), positions().end(),
198
199 // Verify that each `UsePosition` is covered by a `UseInterval`.
200 UseIntervalVector::const_iterator interval = intervals().begin();
201 for (UsePosition* pos : positions()) {
202 DCHECK_LE(Start(), pos->pos());
203 DCHECK_LE(pos->pos(), End());
204 DCHECK_NE(interval, intervals().end());
205 // NOTE: Even though `UseInterval`s are conceptually half-open (e.g., when
206 // splitting), we still regard the `UsePosition` that coincides with
207 // the end of an interval as covered by that interval.
208 while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
209 ++interval;
210 DCHECK_NE(interval, intervals().end());
211 }
212 }
213}
214
215void LiveRange::VerifyIntervals() const {
216 DCHECK(!intervals().empty());
217 DCHECK_EQ(intervals().front().start(), Start());
218 // The `UseInterval`s must be sorted and disjoint.
219 LifetimePosition last_end = intervals().front().end();
220 for (UseIntervalVector::const_iterator interval = intervals().begin() + 1;
221 interval != intervals().end(); ++interval) {
222 DCHECK_LE(last_end, interval->start());
223 last_end = interval->end();
224 }
225 DCHECK_EQ(last_end, End());
226}
227#endif
228
233
238
241
242 // Update cache for `TopLevelLiveRange::GetChildCovers()`.
243 auto& children = TopLevel()->children_;
244 children.erase(std::lower_bound(children.begin(), children.end(), next_,
246
247 // Merge use intervals.
249 // `start_` doesn't change.
250 end_ = next_->end_;
251
252 // Merge use positions.
256 positions_span_.size() + next_->positions_span_.size());
257
258 // Join linked lists of live ranges.
259 LiveRange* old_next = next_;
260 next_ = next_->next_;
261 old_next->next_ = nullptr;
262}
263
269
271 DCHECK(!spilled());
272 DCHECK(!TopLevel()->HasNoSpillType());
273 set_spilled(true);
275}
276
286
287bool LiveRange::RegisterFromFirstHint(int* register_index) {
290 return false;
291 }
293 positions_span_.first()->pos());
295
296 bool needs_revisit = false;
298 for (; pos_it != positions_span_.end(); ++pos_it) {
299 if ((*pos_it)->HintRegister(register_index)) {
300 break;
301 }
302 // Phi and use position hints can be assigned during allocation which
303 // would invalidate the cached hint position. Make sure we revisit them.
304 needs_revisit = needs_revisit ||
305 (*pos_it)->hint_type() == UsePositionHintType::kPhi ||
306 (*pos_it)->hint_type() == UsePositionHintType::kUsePos;
307 }
308 if (!needs_revisit) {
310 std::distance(positions_span_.begin(), pos_it);
311 }
312#ifdef DEBUG
313 UsePosition** pos_check_it =
314 std::find_if(positions_span_.begin(), positions_span_.end(),
315 [](UsePosition* pos) { return pos->HasHint(); });
316 CHECK_EQ(pos_it, pos_check_it);
317#endif
318 return pos_it != positions_span_.end();
319}
320
322 return std::lower_bound(positions_span_.cbegin(), positions_span_.cend(),
324 return use->pos() < start;
325 });
326}
327
329 LifetimePosition start) const {
330 UsePosition* const* use_pos_it = std::find_if(
332 [](const UsePosition* pos) { return pos->RegisterIsBeneficial(); });
333 return use_pos_it == positions_span_.cend() ? nullptr : *use_pos_it;
334}
335
337 const LifetimePosition& start) const {
339 if (next_use == nullptr) return End();
340 return next_use->pos();
341}
342
344 LifetimePosition start) const {
345 UsePosition* const* use_pos_it =
346 std::find_if(NextUsePosition(start), positions_span_.cend(),
347 [](const UsePosition* pos) {
348 return pos->type() == UsePositionType::kRequiresRegister ||
349 pos->SpillDetrimental();
350 });
351 return use_pos_it == positions_span_.cend() ? nullptr : *use_pos_it;
352}
353
355 UsePosition* const* use_pos_it =
356 std::find_if(NextUsePosition(start), positions_span_.cend(),
357 [](const UsePosition* pos) {
358 return pos->type() == UsePositionType::kRequiresRegister;
359 });
360 return use_pos_it == positions_span_.cend() ? nullptr : *use_pos_it;
361}
362
364 // We cannot spill a live range that has a use requiring a register
365 // at the current or the immediate next position.
367 if (use_pos == nullptr) return true;
368 return use_pos->pos() > pos.NextStart().End();
369}
370
371bool LiveRange::IsTopLevel() const { return top_level_ == this; }
372
374 DCHECK(!IsEmpty());
375 if (HasRegisterAssigned()) {
376 DCHECK(!spilled());
379 }
380 DCHECK(spilled());
382 if (TopLevel()->HasSpillOperand()) {
384 DCHECK(!op->IsUnallocated());
385 return *op;
386 }
387 return TopLevel()->GetSpillRangeOperand();
388}
389
394 current_interval_ = std::lower_bound(
396 [](const UseInterval& interval, LifetimePosition position) {
397 return interval.end() < position;
398 });
399 }
400 return current_interval_;
401}
402
404 UseIntervalVector::iterator to_start_of, LifetimePosition but_not_past) {
405 DCHECK_LE(intervals_.begin(), to_start_of);
406 DCHECK_LT(to_start_of, intervals_.end());
408 if (to_start_of->start() > but_not_past) return;
409 if (to_start_of->start() > current_interval_->start()) {
410 current_interval_ = to_start_of;
411 }
412}
413
415 DCHECK(Start() < position);
416 DCHECK(End() > position);
417
418 int new_id = TopLevel()->GetNextChildId();
420 zone->New<LiveRange>(new_id, representation(), TopLevel());
421
422 // Partition original use intervals to the two live ranges.
423
424 // Find the first interval that ends after the position. (This either needs
425 // to be split or completely belongs to the split-off LiveRange.)
426 UseIntervalVector::iterator split_interval = std::upper_bound(
428 [](LifetimePosition position, const UseInterval& interval) {
429 return position < interval.end();
430 });
431 DCHECK_NE(split_interval, intervals_.end());
432
433 bool split_at_start = false;
434 if (split_interval->start() == position) {
435 split_at_start = true;
436 } else if (split_interval->Contains(position)) {
437 UseInterval new_interval = split_interval->SplitAt(position);
438 split_interval = intervals_.insert(zone, split_interval + 1, new_interval);
439 }
440 result->intervals_ = intervals_.SplitAt(split_interval);
442 DCHECK(!result->intervals_.empty());
443
444 result->start_ = result->intervals_.front().start();
445 result->end_ = end_;
446 end_ = intervals_.back().end();
447
448 // Partition use positions.
449 UsePosition** split_position_it;
450 if (split_at_start) {
451 // The split position coincides with the beginning of a use interval
452 // (the end of a lifetime hole). Use at this position should be attributed
453 // to the split child because split child owns use interval covering it.
454 split_position_it = std::lower_bound(
456 [](const UsePosition* use_pos, LifetimePosition pos) {
457 return use_pos->pos() < pos;
458 });
459 } else {
460 split_position_it = std::lower_bound(
462 [](const UsePosition* use_pos, LifetimePosition pos) {
463 return use_pos->pos() <= pos;
464 });
465 }
466
467 size_t result_size = std::distance(split_position_it, positions_span_.end());
468 result->positions_span_ = base::VectorOf(split_position_it, result_size);
469 positions_span_.Truncate(positions_span_.size() - result_size);
470
471 // Update or discard cached iteration state to make sure it does not point
472 // to use positions and intervals that no longer belong to this live range.
474 result->current_hint_position_index_ =
477 }
478
480 result->current_interval_ = result->intervals_.begin();
481
482#ifdef DEBUG
483 VerifyChildStructure();
484 result->VerifyChildStructure();
485#endif
486
487 result->top_level_ = TopLevel();
488 result->next_ = next_;
489 next_ = result;
490
491 // Update cache for `TopLevelLiveRange::GetChildCovers()`.
492 auto& children = TopLevel()->children_;
493 children.insert(std::upper_bound(children.begin(), children.end(), result,
495 1, result);
496 return result;
497}
498
500 const InstructionOperand& spill_op) {
502 DCHECK(Start() <= pos->pos() && pos->pos() <= End());
503 if (!pos->HasOperand()) continue;
504 switch (pos->type()) {
506 DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
507 InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
508 break;
510 DCHECK(op.IsRegister() || op.IsFPRegister());
511 [[fallthrough]];
514 InstructionOperand::ReplaceWith(pos->operand(), &op);
515 break;
516 }
517 }
518}
519
520// This implements an ordering on live ranges so that they are ordered by their
521// start positions. This is needed for the correctness of the register
522// allocation algorithm. If two live ranges start at the same offset then there
523// is a tie breaker based on where the value is first used. This part of the
524// ordering is merely a heuristic.
527 LifetimePosition other_start = other->Start();
528 if (start == other_start) {
529 // Prefer register that has a controlflow hint to make sure it gets
530 // allocated first. This allows the control flow aware alloction to
531 // just put ranges back into the queue without other ranges interfering.
532 if (controlflow_hint() < other->controlflow_hint()) {
533 return true;
534 }
535 // The other has a smaller hint.
536 if (controlflow_hint() > other->controlflow_hint()) {
537 return false;
538 }
539 // Both have the same hint or no hint at all. Use first use position.
540 // To make the order total, handle the case where both positions are null.
541 if (positions_span_.empty() && other->positions_span_.empty()) {
542 return TopLevel()->vreg() < other->TopLevel()->vreg();
543 }
544 if (positions_span_.empty()) return false;
545 if (other->positions_span_.empty()) return true;
547 UsePosition* other_pos = other->positions_span_.first();
548 // To make the order total, handle the case where both positions are equal.
549 if (pos->pos() == other_pos->pos())
550 return TopLevel()->vreg() < other->TopLevel()->vreg();
551 return pos->pos() < other_pos->pos();
552 }
553 return start < other_start;
554}
555
556void LiveRange::SetUseHints(int register_index) {
558 if (!pos->HasOperand()) continue;
559 switch (pos->type()) {
561 break;
565 pos->set_assigned_register(register_index);
566 break;
567 }
568 }
569}
570
572 if (IsEmpty()) return false;
573 return Start() <= position && position < End();
574}
575
577 if (!CanCover(position)) return false;
578 bool covers = false;
581 while (interval != intervals().end() && interval->start() <= position) {
582 // The list of `UseInterval`s shall be sorted.
583 DCHECK(interval + 1 == intervals().end() ||
584 interval[1].start() >= interval->start());
585 if (interval->Contains(position)) {
586 covers = true;
587 break;
588 }
589 ++interval;
590 }
591 if (!covers && interval > intervals_.begin()) {
592 // To ensure that we advance {current_interval_} below, move back to the
593 // last interval starting before position.
594 interval--;
595 DCHECK_LE(interval->start(), position);
596 }
598 return covers;
599}
600
602 // NOTE: A binary search was measured to be slower, e.g., on the binary from
603 // https://crbug.com/v8/9529.
604 UseIntervalVector::iterator interval = std::find_if(
606 [=](const UseInterval& interval) { return interval.end() >= position; });
607 DCHECK_NE(interval, intervals().end());
608 return interval->end();
609}
610
612 // NOTE: A binary search was measured to be slower, e.g., on the binary from
613 // https://crbug.com/v8/9529.
616 [=](const UseInterval& interval) {
617 return interval.start() >= position;
618 });
619 DCHECK_NE(interval, intervals().end());
620 next_start_ = interval->start();
621 return next_start_;
622}
623
625 if (IsEmpty() || other->IsEmpty() || other->Start() > End() ||
626 Start() > other->End())
628
629 LifetimePosition min_end = std::min(End(), other->End());
630 UseIntervalVector::iterator b = other->intervals_.begin();
631 LifetimePosition advance_last_processed_up_to = b->start();
633 while (a != intervals().end() && b != other->intervals().end()) {
634 if (a->start() > min_end || b->start() > min_end) break;
635 LifetimePosition cur_intersection = a->Intersect(*b);
636 if (cur_intersection.IsValid()) {
637 return cur_intersection;
638 }
639 if (a->start() < b->start()) {
640 ++a;
641 if (a == intervals().end() || a->start() > other->End()) break;
642 AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
643 } else {
644 ++b;
645 }
646 }
648}
649
651 bool with_children) const {
652 StdoutStream os;
653 PrintableLiveRange wrapper;
654 wrapper.register_configuration_ = config;
655 for (const LiveRange* i = this; i != nullptr; i = i->next()) {
656 wrapper.range_ = i;
657 os << wrapper << std::endl;
658 if (!with_children) break;
659 }
660}
661
662void LiveRange::Print(bool with_children) const {
663 Print(RegisterConfiguration::Default(), with_children);
664}
665
666bool LiveRange::RegisterFromBundle(int* hint) const {
667 LiveRangeBundle* bundle = TopLevel()->get_bundle();
668 if (bundle == nullptr || bundle->reg() == kUnassignedRegister) return false;
669 *hint = bundle->reg();
670 return true;
671}
672
674 LiveRangeBundle* bundle = TopLevel()->get_bundle();
675 if (bundle == nullptr || bundle->reg() != kUnassignedRegister) return;
676 bundle->set_reg(reg);
677}
678
687
689 Zone* zone)
690 : LiveRange(0, rep, this),
691 vreg_(vreg),
692 last_child_id_(0),
693 spill_operand_(nullptr),
694 spill_move_insertion_locations_(nullptr),
695 children_({this}, zone),
696 spilled_in_deferred_blocks_(false),
697 has_preassigned_slot_(false),
698 spill_start_index_(kMaxInt) {
699 bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
700}
701
708
710 const InstructionOperand& op) {
711 DCHECK_IMPLIES(op.IsConstant(),
712 GetSpillMoveInsertionLocations(data) == nullptr);
713
714 if (HasGeneralSpillRange()) {
716 }
717
718 InstructionSequence* sequence = data->code();
719 Zone* zone = sequence->zone();
720
722 to_spill != nullptr; to_spill = to_spill->next) {
723 Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
724 ParallelMove* move =
725 instr->GetOrCreateParallelMove(Instruction::START, zone);
726 move->AddMove(*to_spill->operand, op);
727 instr->block()->mark_needs_frame();
728 }
729}
730
732 const InstructionOperand& op) {
733 DCHECK_IMPLIES(op.IsConstant(),
734 GetSpillMoveInsertionLocations(data) == nullptr);
735 bool might_be_duplicated = has_slot_use() || spilled();
736 InstructionSequence* sequence = data->code();
737
740 to_spill != nullptr; previous = to_spill, to_spill = to_spill->next) {
741 Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
742 ParallelMove* move = instr->GetParallelMove(Instruction::START);
743 // Skip insertion if it's possible that the move exists already as a
744 // constraint move from a fixed output register to a slot.
745 bool found = false;
746 if (move != nullptr && (might_be_duplicated || has_preassigned_slot())) {
747 for (MoveOperands* move_op : *move) {
748 if (move_op->IsEliminated()) continue;
749 if (move_op->source().Equals(*to_spill->operand) &&
750 move_op->destination().Equals(op)) {
751 found = true;
752 if (has_preassigned_slot()) move_op->Eliminate();
753 break;
754 }
755 }
756 }
757 if (found || has_preassigned_slot()) {
758 // Remove the item from the list.
759 if (previous == nullptr) {
761 } else {
762 previous->next = to_spill->next;
763 }
764 // Even though this location doesn't need a spill instruction, the
765 // block does require a frame.
766 instr->block()->mark_needs_frame();
767 }
768 }
769}
770
773 DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
775 spill_operand_ = operand;
776}
777
780 DCHECK(spill_range);
781 spill_range_ = spill_range;
782}
783
789
791#ifdef DEBUG
792 // Make sure the cache contains the correct, actual children.
793 LiveRange* child = this;
794 for (LiveRange* cached_child : children_) {
795 DCHECK_EQ(cached_child, child);
796 child = child->next();
797 }
798 DCHECK_NULL(child);
799#endif
800
801 auto child_it =
802 std::lower_bound(children_.begin(), children_.end(), pos,
803 [](const LiveRange* range, LifetimePosition pos) {
804 return range->End() <= pos;
805 });
806 return child_it == children_.end() || !(*child_it)->Covers(pos) ? nullptr
807 : *child_it;
808}
809
810#ifdef DEBUG
811void TopLevelLiveRange::Verify() const {
812 VerifyChildrenInOrder();
813 for (const LiveRange* child = this; child != nullptr; child = child->next()) {
814 VerifyChildStructure();
815 }
816}
817
818void TopLevelLiveRange::VerifyChildrenInOrder() const {
819 LifetimePosition last_end = End();
820 for (const LiveRange* child = this->next(); child != nullptr;
821 child = child->next()) {
822 DCHECK(last_end <= child->Start());
823 last_end = child->End();
824 }
825}
826#endif
827
829 TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
830 DCHECK(!IsEmpty());
833 start_ = start;
834}
835
837 LifetimePosition end, Zone* zone) {
838 TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
839 end.value());
840 DCHECK(!IsEmpty());
841
842 // Drop front intervals until intervals_.front().start() > end.
843 LifetimePosition new_end = end;
844 while (!intervals_.empty() && intervals_.front().start() <= end) {
845 if (intervals_.front().end() > end) {
846 new_end = intervals_.front().end();
847 }
849 }
850 intervals_.push_front(zone, UseInterval(start, new_end));
852 if (end_ < new_end) {
853 end_ = new_end;
854 }
855 if (start_ > start) {
856 start_ = start;
857 }
858}
859
861 LifetimePosition end, Zone* zone) {
862 TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
863 end.value());
864 if (intervals_.empty()) {
866 start_ = start;
867 end_ = end;
868 } else {
869 UseInterval& first_interval = intervals_.front();
870 if (end == first_interval.start()) {
871 // Coalesce directly adjacent intervals.
872 first_interval.set_start(start);
873 start_ = start;
874 } else if (end < first_interval.start()) {
876 start_ = start;
877 } else {
878 // Order of instruction's processing (see ProcessInstructions) guarantees
879 // that each new use interval either precedes, intersects with or touches
880 // the last added interval.
881 DCHECK(intervals_.size() == 1 || end <= intervals_.begin()[1].start());
882 first_interval.set_start(std::min(start, first_interval.start()));
883 first_interval.set_end(std::max(end, first_interval.end()));
884 if (start_ > start) {
885 start_ = start;
886 }
887 if (end_ < end) {
888 end_ = end;
889 }
890 }
891 }
893}
894
896 TRACE("Add to live range %d use position %d\n", vreg(),
897 use_pos->pos().value());
898 // Since we `ProcessInstructions` in reverse, the `use_pos` is almost always
899 // inserted at the front of `positions_`, hence (i) use linear instead of
900 // binary search and (ii) grow towards the `kFront` exclusively on `insert`.
901 UsePositionVector::iterator insert_it = std::find_if(
902 positions_.begin(), positions_.end(), [=](const UsePosition* pos) {
903 return UsePosition::Ordering()(use_pos, pos);
904 });
905 positions_.insert<kFront>(zone, insert_it, use_pos);
906
908 // We must not have child `LiveRange`s yet (e.g. from splitting), otherwise we
909 // would have to adjust their `positions_span_` as well.
911}
912
913std::ostream& operator<<(std::ostream& os,
914 const PrintableLiveRange& printable_range) {
915 const LiveRange* range = printable_range.range_;
916 os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
917 << " ";
918 if (range->TopLevel()->is_phi()) os << "phi ";
919 if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
920
921 os << "{" << std::endl;
922 for (UsePosition* use_pos : range->positions()) {
923 if (use_pos->HasOperand()) {
924 os << *use_pos->operand() << use_pos->pos() << " ";
925 }
926 }
927 os << std::endl;
928
929 for (const UseInterval& interval : range->intervals()) {
930 interval.PrettyPrint(os);
931 os << std::endl;
932 }
933 os << "}";
934 return os;
935}
936
937namespace {
938void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
939 os << " ";
940 for (auto block : blocks) {
941 LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
942 block->first_instruction_index());
943 LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
944 block->last_instruction_index())
945 .NextFullStart();
946 int length = end_pos.value() - start_pos.value();
947 constexpr int kMaxPrefixLength = 32;
948 char buffer[kMaxPrefixLength];
949 int rpo_number = block->rpo_number().ToInt();
950 const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
951 int max_prefix_length = std::min(length, kMaxPrefixLength);
952 int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
953 deferred_marker);
954 os << buffer;
955 int remaining = length - std::min(prefix, max_prefix_length) - 1;
956 for (int i = 0; i < remaining; ++i) os << '-';
957 os << ']';
958 }
959 os << '\n';
960}
961} // namespace
962
964 const TopLevelLiveRange* toplevel) {
965 int position = 0;
966 os << std::setw(3) << toplevel->vreg() << ": ";
967
968 const char* kind_string;
969 switch (toplevel->spill_type()) {
971 kind_string = "ss";
972 break;
974 kind_string = "sd";
975 break;
977 kind_string = "so";
978 break;
979 default:
980 kind_string = "s?";
981 }
982
983 for (const LiveRange* range = toplevel; range != nullptr;
984 range = range->next()) {
985 for (const UseInterval& interval : range->intervals()) {
986 LifetimePosition start = interval.start();
987 LifetimePosition end = interval.end();
988 CHECK_GE(start.value(), position);
989 for (; start.value() > position; position++) {
990 os << ' ';
991 }
992 int length = end.value() - start.value();
993 constexpr int kMaxPrefixLength = 32;
994 char buffer[kMaxPrefixLength];
995 int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
996 int prefix;
997 if (range->spilled()) {
998 prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
999 } else {
1000 prefix = snprintf(buffer, max_prefix_length, "|%s",
1001 RegisterName(range->assigned_register()));
1002 }
1003 os << buffer;
1004 position += std::min(prefix, max_prefix_length - 1);
1005 CHECK_GE(end.value(), position);
1006 const char line_style = range->spilled() ? '-' : '=';
1007 for (; end.value() > position; position++) {
1008 os << line_style;
1009 }
1010 }
1011 }
1012 os << '\n';
1013}
1014
1016 std::ostringstream os;
1017 PrintBlockRow(os, code()->instruction_blocks());
1018 for (auto const toplevel : data()->fixed_live_ranges()) {
1019 if (toplevel == nullptr) continue;
1020 PrintRangeRow(os, toplevel);
1021 }
1022 int rowcount = 0;
1023 for (auto toplevel : data()->live_ranges()) {
1024 if (!CanProcessRange(toplevel)) continue;
1025 if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
1026 PrintRangeRow(os, toplevel);
1027 }
1028 PrintF("%s\n", os.str().c_str());
1029}
1030
1032 : ranges_(zone),
1033 intervals_(zone),
1034 assigned_slot_(kUnassignedSlot),
1035 byte_width_(ByteWidthForStackSlot(parent->representation())) {
1036 DCHECK(!parent->IsEmpty());
1037
1038 // Spill ranges are created for top level. This is so that, when merging
1039 // decisions are made, we consider the full extent of the virtual register,
1040 // and avoid clobbering it.
1042 for (const LiveRange* range = parent; range != nullptr;
1043 range = range->next()) {
1044 // Deep copy the `UseInterval`s, since the `LiveRange`s are subsequently
1045 // modified, so just storing those has correctness issues.
1046 for (UseInterval interval : range->intervals()) {
1047 DCHECK_NE(LifetimePosition::MaxPosition(), interval.start());
1048 bool can_coalesce = last_end == interval.start();
1049 if (can_coalesce) {
1050 intervals_.back().set_end(interval.end());
1051 } else {
1052 intervals_.push_back(interval);
1053 }
1054 last_end = interval.end();
1055 }
1056 }
1057 ranges_.push_back(parent);
1058 parent->SetSpillRange(this);
1059}
1060
1061// Checks if the `UseInterval`s in `a` intersect with those in `b`.
1062// Returns the two intervals that intersected, or `std::nullopt` if none did.
1063static std::optional<std::pair<UseInterval, UseInterval>>
1066 SLOW_DCHECK(std::is_sorted(a.begin(), a.end()) &&
1067 std::is_sorted(b.begin(), b.end()));
1068 if (a.empty() || b.empty() || a.last().end() <= b.first().start() ||
1069 b.last().end() <= a.first().start()) {
1070 return {};
1071 }
1072
1073 // `a` shall have less intervals then `b`.
1074 if (a.size() > b.size()) {
1075 std::swap(a, b);
1076 }
1077
1078 auto a_it = a.begin();
1079 // Advance `b` already to the interval that ends at or after `a_start`.
1080 LifetimePosition a_start = a.first().start();
1081 auto b_it = std::lower_bound(
1082 b.begin(), b.end(), a_start,
1083 [](const UseInterval& interval, LifetimePosition position) {
1084 return interval.end() < position;
1085 });
1086 while (a_it != a.end() && b_it != b.end()) {
1087 if (a_it->end() <= b_it->start()) {
1088 ++a_it;
1089 } else if (b_it->end() <= a_it->start()) {
1090 ++b_it;
1091 } else {
1092 return std::make_pair(*a_it, *b_it);
1093 }
1094 }
1095 return {};
1096}
1097
1098// Used by `LiveRangeBundle`s and `SpillRange`s, hence allow passing different
1099// containers of `UseInterval`s, as long as they can be converted to a
1100// `base::Vector` (which is essentially just a memory span).
1101template <typename ContainerA, typename ContainerB>
1102std::optional<std::pair<UseInterval, UseInterval>> AreUseIntervalsIntersecting(
1103 const ContainerA& a, const ContainerB& b) {
1105 base::VectorOf(b));
1106}
1107
1109 if (HasSlot() || other->HasSlot()) return false;
1110 if (byte_width() != other->byte_width()) return false;
1111 if (AreUseIntervalsIntersecting(intervals_, other->intervals_)) return false;
1112
1113 // Merge vectors of `UseInterval`s.
1114 intervals_.reserve(intervals_.size() + other->intervals_.size());
1115 for (UseInterval interval : other->intervals_) {
1116 UseInterval* insert_it =
1117 std::lower_bound(intervals_.begin(), intervals_.end(), interval);
1118 // Since the intervals didn't intersect, they should also be unique.
1119 DCHECK_IMPLIES(insert_it != intervals_.end(), *insert_it != interval);
1120 intervals_.insert(insert_it, 1, interval);
1121 }
1122 other->intervals_.clear();
1123
1124 // Merge vectors of `TopLevelLiveRange`s.
1125 for (TopLevelLiveRange* range : other->ranges_) {
1126 DCHECK(range->GetSpillRange() == other);
1127 range->SetSpillRange(this);
1128 }
1129 ranges_.insert(ranges_.end(), other->ranges_.begin(), other->ranges_.end());
1130 other->ranges_.clear();
1131
1132 return true;
1133}
1134
1135void SpillRange::Print() const {
1136 StdoutStream os;
1137 os << "{" << std::endl;
1138 for (const TopLevelLiveRange* range : ranges_) {
1139 os << range->vreg() << " ";
1140 }
1141 os << std::endl;
1142
1143 for (const UseInterval& interval : intervals_) {
1144 interval.PrettyPrint(os);
1145 os << std::endl;
1146 }
1147 os << "}" << std::endl;
1148}
1149
1151 const InstructionBlock* block,
1152 Zone* zone)
1153 : phi_(phi),
1154 block_(block),
1155 incoming_operands_(zone),
1156 assigned_register_(kUnassignedRegister) {
1157 incoming_operands_.reserve(phi->operands().size());
1158}
1159
1161 InstructionOperand* operand) {
1162 incoming_operands_.push_back(operand);
1163}
1164
1166 const InstructionOperand& assigned) {
1167 for (InstructionOperand* operand : incoming_operands_) {
1168 InstructionOperand::ReplaceWith(operand, &assigned);
1169 }
1170}
1171
1175 const char* debug_name)
1176 : allocation_zone_(zone),
1177 frame_(frame),
1178 code_(code),
1180 config_(config),
1182 live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1183 live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1184 live_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1186 this->config()->num_general_registers(),
1187 nullptr, allocation_zone()),
1190 this->config()->num_double_registers(),
1191 nullptr, allocation_zone()),
1194 assigned_registers_(nullptr),
1196 virtual_register_count_(code->VirtualRegisterCount()),
1198 spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
1199 zone),
1201 slot_for_const_range_(zone) {
1204 kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
1205 nullptr);
1208 this->config()->num_simd128_registers(),
1209 nullptr);
1213 this->config()->num_simd128_registers(),
1214 nullptr);
1215 }
1216
1217 // Eagerly initialize live ranges to avoid repeated null checks.
1218 DCHECK_EQ(code->VirtualRegisterCount(), live_ranges_.size());
1219 for (int i = 0; i < code->VirtualRegisterCount(); ++i) {
1221 }
1222
1224 this->config()->num_general_registers(), code_zone());
1226 this->config()->num_double_registers(), code_zone());
1228 this->config()->num_general_registers(), code_zone());
1230 this->config()->num_double_registers(), code_zone());
1233 this->config()->num_simd128_registers(), code_zone());
1235 this->config()->num_simd128_registers(), code_zone());
1236 }
1237
1239 this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1240}
1241
1244 const InstructionOperand& from, const InstructionOperand& to) {
1245 Instruction* instr = code()->InstructionAt(index);
1246 ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1247 return moves->AddMove(from, to);
1248}
1249
1251 int virtual_register) {
1252 DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1253 return code()->GetRepresentation(virtual_register);
1254}
1255
1262
1268
1270 const InstructionBlock* block, PhiInstruction* phi) {
1273 phi, block, allocation_zone());
1274 auto res =
1275 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1276 DCHECK(res.second);
1277 USE(res);
1278 return map_value;
1279}
1280
1282 int virtual_register) {
1283 auto it = phi_map_.find(virtual_register);
1284 DCHECK(it != phi_map_.end());
1285 return it->second;
1286}
1287
1292
1294 bool found = false;
1295 for (int operand_index : *live_in_sets()[0]) {
1296 found = true;
1297 PrintF("Register allocator error: live v%d reached first block.\n",
1298 operand_index);
1299 LiveRange* range = GetLiveRangeFor(operand_index);
1300 PrintF(" (first use is at position %d in instruction %d)\n",
1301 range->positions().first()->pos().value(),
1302 range->positions().first()->pos().ToInstructionIndex());
1303 if (debug_name() == nullptr) {
1304 PrintF("\n");
1305 } else {
1306 PrintF(" (function: %s)\n", debug_name());
1307 }
1308 }
1309 return found;
1310}
1311
1312// If a range is defined in a deferred block, we can expect all the range
1313// to only cover positions in deferred blocks. Otherwise, a block on the
1314// hot path would be dominated by a deferred block, meaning it is unreachable
1315// without passing through the deferred block, which is contradictory.
1316// In particular, when such a range contributes a result back on the hot
1317// path, it will be as one of the inputs of a phi. In that case, the value
1318// will be transferred via a move in the Gap::END's of the last instruction
1319// of a deferred block.
1321 const size_t live_ranges_size = live_ranges().size();
1322 for (const TopLevelLiveRange* range : live_ranges()) {
1323 CHECK_EQ(live_ranges_size,
1324 live_ranges().size()); // TODO(neis): crbug.com/831822
1325 DCHECK_NOT_NULL(range);
1326 if (range->IsEmpty() ||
1327 !code()
1328 ->GetInstructionBlock(range->Start().ToInstructionIndex())
1329 ->IsDeferred()) {
1330 continue;
1331 }
1332 for (const UseInterval& interval : range->intervals()) {
1333 int first = interval.FirstGapIndex();
1334 int last = interval.LastGapIndex();
1335 for (int instr = first; instr <= last;) {
1337 if (!block->IsDeferred()) return false;
1338 instr = block->last_instruction_index() + 1;
1339 }
1340 }
1341 }
1342 return true;
1343}
1344
1346 TopLevelLiveRange* range, SpillMode spill_mode) {
1347 using SpillType = TopLevelLiveRange::SpillType;
1348 DCHECK(!range->HasSpillOperand());
1349
1350 SpillRange* spill_range = range->GetAllocatedSpillRange();
1351 if (spill_range == nullptr) {
1352 spill_range = allocation_zone()->New<SpillRange>(range, allocation_zone());
1353 }
1354 if (spill_mode == SpillMode::kSpillDeferred &&
1355 (range->spill_type() != SpillType::kSpillRange)) {
1356 range->set_spill_type(SpillType::kDeferredSpillRange);
1357 } else {
1358 range->set_spill_type(SpillType::kSpillRange);
1359 }
1360
1361 return spill_range;
1362}
1363
1365 int index) {
1366 switch (rep) {
1377 } else {
1379 }
1380 } else {
1381 int alias_base_index = -1;
1382 int aliases = config()->GetAliases(
1383 rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1384 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1385 while (aliases--) {
1386 int aliased_reg = alias_base_index + aliases;
1387 fixed_fp_register_use_->Add(aliased_reg);
1388 }
1389 }
1390 break;
1393 break;
1394 default:
1395 DCHECK(!IsFloatingPoint(rep));
1396 fixed_register_use_->Add(index);
1397 break;
1398 }
1399}
1400
1402 switch (rep) {
1408 return fixed_fp_register_use_->Contains(index);
1412 return fixed_fp_register_use_->Contains(index);
1413 } else {
1415 }
1416 } else {
1417 int alias_base_index = -1;
1418 int aliases = config()->GetAliases(
1419 rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1420 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1421 bool result = false;
1422 while (aliases-- && !result) {
1423 int aliased_reg = alias_base_index + aliases;
1424 result |= fixed_fp_register_use_->Contains(aliased_reg);
1425 }
1426 return result;
1427 }
1428 }
1430 return fixed_fp_register_use_->Contains(index);
1431 default:
1432 DCHECK(!IsFloatingPoint(rep));
1433 return fixed_register_use_->Contains(index);
1434 }
1435}
1436
1438 int index) {
1439 switch (rep) {
1450 } else {
1452 }
1453 } else {
1454 int alias_base_index = -1;
1455 int aliases = config()->GetAliases(
1456 rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1457 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1458 while (aliases--) {
1459 int aliased_reg = alias_base_index + aliases;
1460 assigned_double_registers_->Add(aliased_reg);
1461 }
1462 }
1463 break;
1466 break;
1467 default:
1468 DCHECK(!IsFloatingPoint(rep));
1469 assigned_registers_->Add(index);
1470 break;
1471 }
1472}
1473
1475 return pos.IsFullStart() &&
1476 (static_cast<size_t>(pos.ToInstructionIndex()) ==
1477 code()->instructions().size() ||
1478 code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1479 pos.ToInstructionIndex());
1480}
1481
1484
1486 UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input,
1487 bool is_output) {
1488 TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1489 DCHECK(operand->HasFixedPolicy());
1490 InstructionOperand allocated;
1492 int virtual_register = operand->virtual_register();
1493 if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1494 rep = data()->RepresentationFor(virtual_register);
1495 }
1496 if (operand->HasFixedSlotPolicy()) {
1498 operand->fixed_slot_index());
1499 } else if (operand->HasFixedRegisterPolicy()) {
1500 DCHECK(!IsFloatingPoint(rep));
1501 DCHECK_IMPLIES(is_input || is_output,
1502 data()->config()->IsAllocatableGeneralCode(
1503 operand->fixed_register_index()));
1505 operand->fixed_register_index());
1506 } else if (operand->HasFixedFPRegisterPolicy()) {
1507 DCHECK(IsFloatingPoint(rep));
1511 DCHECK_IMPLIES(is_input || is_output,
1512 data()->config()->IsAllocatableFloatCode(
1513 operand->fixed_register_index()));
1514 } else if (rep == MachineRepresentation::kFloat64) {
1515 DCHECK_IMPLIES(is_input || is_output,
1516 data()->config()->IsAllocatableDoubleCode(
1517 operand->fixed_register_index()));
1518 } else if (rep == MachineRepresentation::kSimd128) {
1519 DCHECK_IMPLIES(is_input || is_output,
1520 data()->config()->IsAllocatableSimd128Code(
1521 operand->fixed_register_index()));
1522 } else {
1523 UNREACHABLE();
1524 }
1526 operand->fixed_register_index());
1527 } else {
1528 UNREACHABLE();
1529 }
1530 if (is_input && allocated.IsAnyRegister()) {
1531 data()->MarkFixedUse(rep, operand->fixed_register_index());
1532 }
1533 InstructionOperand::ReplaceWith(operand, &allocated);
1534 if (is_tagged) {
1535 TRACE("Fixed reg is tagged at %d\n", pos);
1537 if (instr->HasReferenceMap()) {
1538 instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1539 }
1540 }
1541 return operand;
1542}
1543
1545 for (InstructionBlock* block : code()->instruction_blocks()) {
1548 }
1549}
1550
1552 int start = block->first_instruction_index();
1553 int end = block->last_instruction_index();
1554 DCHECK_NE(-1, start);
1555 for (int i = start; i <= end; ++i) {
1557 if (i != end) MeetConstraintsAfter(i);
1558 }
1559 // Meet register constraints for the instruction in the end.
1561}
1562
1564 const InstructionBlock* block) {
1565 int end = block->last_instruction_index();
1566 Instruction* last_instruction = code()->InstructionAt(end);
1567 for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1568 InstructionOperand* output_operand = last_instruction->OutputAt(i);
1569 DCHECK(!output_operand->IsConstant());
1570 UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1571 int output_vreg = output->virtual_register();
1572 TopLevelLiveRange* range = data()->GetLiveRangeFor(output_vreg);
1573 bool assigned = false;
1574 if (output->HasFixedPolicy()) {
1575 AllocateFixed(output, -1, false, false, true);
1576 // This value is produced on the stack, we never need to spill it.
1577 if (output->IsStackSlot()) {
1579 data()->frame()->GetSpillSlotCount());
1580 range->SetSpillOperand(LocationOperand::cast(output));
1581 range->SetSpillStartIndex(end);
1582 assigned = true;
1583 }
1584
1585 for (const RpoNumber& succ : block->successors()) {
1586 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1587 DCHECK_EQ(1, successor->PredecessorCount());
1588 int gap_index = successor->first_instruction_index();
1589 // Create an unconstrained operand for the same virtual register
1590 // and insert a gap move from the fixed output to the operand.
1592 output_vreg);
1593 data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1594 }
1595 }
1596
1597 if (!assigned) {
1598 for (const RpoNumber& succ : block->successors()) {
1599 const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1600 DCHECK_EQ(1, successor->PredecessorCount());
1601 int gap_index = successor->first_instruction_index();
1602 range->RecordSpillLocation(allocation_zone(), gap_index, output);
1603 range->SetSpillStartIndex(gap_index);
1604 }
1605 }
1606 }
1607}
1608
1610 Instruction* first = code()->InstructionAt(instr_index);
1611 // Handle fixed temporaries.
1612 for (size_t i = 0; i < first->TempCount(); i++) {
1613 UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1614 if (temp->HasFixedPolicy())
1615 AllocateFixed(temp, instr_index, false, false, false);
1616 }
1617 // Handle constant/fixed output operands.
1618 for (size_t i = 0; i < first->OutputCount(); i++) {
1619 InstructionOperand* output = first->OutputAt(i);
1620 if (output->IsConstant()) {
1621 int output_vreg = ConstantOperand::cast(output)->virtual_register();
1622 TopLevelLiveRange* range = data()->GetLiveRangeFor(output_vreg);
1623 range->SetSpillStartIndex(instr_index + 1);
1624 range->SetSpillOperand(output);
1625 continue;
1626 }
1627 UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1628 TopLevelLiveRange* range =
1629 data()->GetLiveRangeFor(first_output->virtual_register());
1630 bool assigned = false;
1631 if (first_output->HasFixedPolicy()) {
1632 int output_vreg = first_output->virtual_register();
1634 output_vreg);
1635 bool is_tagged = code()->IsReference(output_vreg);
1636 if (first_output->HasSecondaryStorage()) {
1637 range->MarkHasPreassignedSlot();
1639 std::make_pair(range, first_output->GetSecondaryStorage()));
1640 }
1641 AllocateFixed(first_output, instr_index, is_tagged, false, true);
1642
1643 // This value is produced on the stack, we never need to spill it.
1644 if (first_output->IsStackSlot()) {
1645 DCHECK(LocationOperand::cast(first_output)->index() <
1646 data()->frame()->GetTotalFrameSlotCount());
1647 range->SetSpillOperand(LocationOperand::cast(first_output));
1648 range->SetSpillStartIndex(instr_index + 1);
1649 assigned = true;
1650 }
1651 data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1652 output_copy);
1653 }
1654 // Make sure we add a gap move for spilling (if we have not done
1655 // so already).
1656 if (!assigned) {
1657 range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1658 first_output);
1659 range->SetSpillStartIndex(instr_index + 1);
1660 }
1661 }
1662}
1663
1665 Instruction* second = code()->InstructionAt(instr_index);
1666 // Handle fixed input operands of second instruction.
1667 ZoneVector<TopLevelLiveRange*>* spilled_consts = nullptr;
1668 for (size_t i = 0; i < second->InputCount(); i++) {
1669 InstructionOperand* input = second->InputAt(i);
1670 if (input->IsImmediate()) {
1671 continue; // Ignore immediates.
1672 }
1673 UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1674 if (cur_input->HasSlotPolicy()) {
1675 TopLevelLiveRange* range =
1676 data()->GetLiveRangeFor(cur_input->virtual_register());
1677 if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
1678 bool already_spilled = false;
1679 if (spilled_consts == nullptr) {
1680 spilled_consts =
1682 allocation_zone());
1683 } else {
1684 auto it =
1685 std::find(spilled_consts->begin(), spilled_consts->end(), range);
1686 already_spilled = it != spilled_consts->end();
1687 }
1688 auto it = data()->slot_for_const_range().find(range);
1689 if (it == data()->slot_for_const_range().end()) {
1690 DCHECK(!already_spilled);
1691 int width = ByteWidthForStackSlot(range->representation());
1692 int index = data()->frame()->AllocateSpillSlot(width);
1695 range->representation(), index);
1696 it = data()->slot_for_const_range().emplace(range, slot).first;
1697 }
1698 if (!already_spilled) {
1699 auto* slot = it->second;
1700 int input_vreg = cur_input->virtual_register();
1702 input_vreg);
1703 // Spill at every use position for simplicity, this case is very rare.
1704 data()->AddGapMove(instr_index, Instruction::END, input_copy, *slot);
1705 spilled_consts->push_back(range);
1706 }
1707 }
1708 }
1709 if (cur_input->HasFixedPolicy()) {
1710 int input_vreg = cur_input->virtual_register();
1712 input_vreg);
1713 bool is_tagged = code()->IsReference(input_vreg);
1714 AllocateFixed(cur_input, instr_index, is_tagged, true, false);
1715 data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1716 }
1717 }
1718 // Handle "output same as input" for second instruction.
1719 for (size_t i = 0; i < second->OutputCount(); i++) {
1720 InstructionOperand* output = second->OutputAt(i);
1721 if (!output->IsUnallocated()) continue;
1722 UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1723 if (!second_output->HasSameAsInputPolicy()) continue;
1724 DCHECK_EQ(0, i); // Only valid for first output.
1725 UnallocatedOperand* cur_input =
1726 UnallocatedOperand::cast(second->InputAt(second_output->input_index()));
1727 int output_vreg = second_output->virtual_register();
1728 int input_vreg = cur_input->virtual_register();
1730 input_vreg);
1731 *cur_input =
1732 UnallocatedOperand(*cur_input, second_output->virtual_register());
1733 MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1734 input_copy, *cur_input);
1735 DCHECK_NOT_NULL(gap_move);
1736 if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1737 if (second->HasReferenceMap()) {
1738 RegisterAllocationData::DelayedReference delayed_reference = {
1739 second->reference_map(), &gap_move->source()};
1740 data()->delayed_references().push_back(delayed_reference);
1741 }
1742 }
1743 }
1744}
1745
1747 // Process the blocks in reverse order.
1748 for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1750 ResolvePhis(block);
1751 }
1752}
1753
1755 for (PhiInstruction* phi : block->phis()) {
1756 int phi_vreg = phi->virtual_register();
1758 data()->InitializePhiMap(block, phi);
1759 InstructionOperand& output = phi->output();
1760 // Map the destination operands, so the commitment phase can find them.
1761 for (size_t i = 0; i < phi->operands().size(); ++i) {
1762 InstructionBlock* cur_block =
1763 code()->InstructionBlockAt(block->predecessors()[i]);
1765 phi->operands()[i]);
1766 MoveOperands* move = data()->AddGapMove(
1767 cur_block->last_instruction_index(), Instruction::END, input, output);
1768 map_value->AddOperand(&move->destination());
1769 DCHECK(!code()
1770 ->InstructionAt(cur_block->last_instruction_index())
1771 ->HasReferenceMap());
1772 }
1773 TopLevelLiveRange* live_range = data()->GetLiveRangeFor(phi_vreg);
1774 int gap_index = block->first_instruction_index();
1775 live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1776 live_range->SetSpillStartIndex(gap_index);
1777 // We use the phi-ness of some nodes in some later heuristics.
1778 live_range->set_is_phi(true);
1779 live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1780 }
1781}
1782
1784 Zone* local_zone)
1785 : data_(data), phi_hints_(local_zone) {}
1786
1788 const InstructionBlock* block, RegisterAllocationData* data) {
1789 size_t block_index = block->rpo_number().ToSize();
1790 SparseBitVector* live_out = data->live_out_sets()[block_index];
1791 if (live_out == nullptr) {
1792 // Compute live out for the given block, except not including backward
1793 // successor edges.
1794 Zone* zone = data->allocation_zone();
1795 const InstructionSequence* code = data->code();
1796
1797 live_out = zone->New<SparseBitVector>(zone);
1798
1799 // Process all successor blocks.
1800 for (const RpoNumber& succ : block->successors()) {
1801 // Add values live on entry to the successor.
1802 if (succ <= block->rpo_number()) continue;
1803 SparseBitVector* live_in = data->live_in_sets()[succ.ToSize()];
1804 if (live_in != nullptr) live_out->Union(*live_in);
1805
1806 // All phi input operands corresponding to this successor edge are live
1807 // out from this block.
1808 const InstructionBlock* successor = code->InstructionBlockAt(succ);
1809 size_t index = successor->PredecessorIndexOf(block->rpo_number());
1810 DCHECK(index < successor->PredecessorCount());
1811 for (PhiInstruction* phi : successor->phis()) {
1812 live_out->Add(phi->operands()[index]);
1813 }
1814 }
1815 data->live_out_sets()[block_index] = live_out;
1816 }
1817 return live_out;
1818}
1819
1821 SparseBitVector* live_out) {
1822 // Add an interval that includes the entire block to the live range for
1823 // each live_out value.
1825 block->first_instruction_index());
1827 block->last_instruction_index())
1828 .NextStart();
1829 for (int operand_index : *live_out) {
1830 TopLevelLiveRange* range = data()->GetLiveRangeFor(operand_index);
1832 }
1833}
1834
1836 int result = -index - 1;
1837 switch (rep) {
1839 result -=
1840 kNumberOfFixedRangesPerRegister * config()->num_simd128_registers();
1841 [[fallthrough]];
1843 result -=
1844 kNumberOfFixedRangesPerRegister * config()->num_float_registers();
1845 [[fallthrough]];
1847 result -=
1848 kNumberOfFixedRangesPerRegister * config()->num_double_registers();
1849 [[fallthrough]];
1851 result -=
1852 kNumberOfFixedRangesPerRegister * config()->num_general_registers();
1853 break;
1854 default:
1855 UNREACHABLE();
1856 }
1857 return result;
1858}
1859
1861 SpillMode spill_mode) {
1862 int offset = spill_mode == SpillMode::kSpillAtDefinition
1863 ? 0
1864 : config()->num_general_registers();
1865 DCHECK(index < config()->num_general_registers());
1867 if (result == nullptr) {
1869 result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep);
1870 DCHECK(result->IsFixed());
1871 result->set_assigned_register(index);
1872 data()->MarkAllocated(rep, index);
1873 if (spill_mode == SpillMode::kSpillDeferred) {
1874 result->set_deferred_fixed();
1875 }
1877 }
1878 return result;
1879}
1880
1882 int index, MachineRepresentation rep, SpillMode spill_mode) {
1883 int num_regs = config()->num_double_registers();
1884 ZoneVector<TopLevelLiveRange*>* live_ranges =
1887 switch (rep) {
1890 num_regs = config()->num_float_registers();
1891 live_ranges = &data()->fixed_float_live_ranges();
1892 break;
1894 num_regs = config()->num_simd128_registers();
1895 live_ranges = &data()->fixed_simd128_live_ranges();
1896 break;
1897 default:
1898 break;
1899 }
1900 }
1901
1902 int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
1903
1904 DCHECK(index < num_regs);
1905 USE(num_regs);
1906 TopLevelLiveRange* result = (*live_ranges)[offset + index];
1907 if (result == nullptr) {
1908 result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep);
1909 DCHECK(result->IsFixed());
1910 result->set_assigned_register(index);
1911 data()->MarkAllocated(rep, index);
1912 if (spill_mode == SpillMode::kSpillDeferred) {
1913 result->set_deferred_fixed();
1914 }
1915 (*live_ranges)[offset + index] = result;
1916 }
1917 return result;
1918}
1919
1921 int index, SpillMode spill_mode) {
1923 int num_regs = config()->num_simd128_registers();
1924 ZoneVector<TopLevelLiveRange*>* live_ranges =
1926 int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
1927
1928 DCHECK(index < num_regs);
1929 USE(num_regs);
1930 TopLevelLiveRange* result = (*live_ranges)[offset + index];
1931 if (result == nullptr) {
1935 DCHECK(result->IsFixed());
1936 result->set_assigned_register(index);
1938 if (spill_mode == SpillMode::kSpillDeferred) {
1939 result->set_deferred_fixed();
1940 }
1941 (*live_ranges)[offset + index] = result;
1942 }
1943 return result;
1944}
1945
1947 SpillMode spill_mode) {
1948 if (operand->IsUnallocated()) {
1949 return data()->GetLiveRangeFor(
1950 UnallocatedOperand::cast(operand)->virtual_register());
1951 } else if (operand->IsConstant()) {
1952 return data()->GetLiveRangeFor(
1953 ConstantOperand::cast(operand)->virtual_register());
1954 } else if (operand->IsRegister()) {
1955 return FixedLiveRangeFor(
1956 LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
1957 } else if (operand->IsFPRegister()) {
1961 return FixedSIMD128LiveRangeFor(op->register_code(), spill_mode);
1962 }
1964 spill_mode);
1965 } else {
1966 return nullptr;
1967 }
1968}
1969
1971 InstructionOperand* operand,
1972 void* hint,
1973 UsePositionHintType hint_type) {
1974 return allocation_zone()->New<UsePosition>(pos, operand, hint, hint_type);
1975}
1976
1978 InstructionOperand* operand, void* hint,
1979 UsePositionHintType hint_type,
1980 SpillMode spill_mode) {
1981 TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
1982 if (range == nullptr) return nullptr;
1983
1984 if (range->IsEmpty() || range->Start() > position) {
1985 // Can happen if there is a definition without use.
1986 range->AddUseInterval(position, position.NextStart(), allocation_zone());
1987 range->AddUsePosition(NewUsePosition(position.NextStart()),
1988 allocation_zone());
1989 } else {
1990 range->ShortenTo(position);
1991 }
1992 if (!operand->IsUnallocated()) return nullptr;
1993 UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1994 UsePosition* use_pos =
1995 NewUsePosition(position, unalloc_operand, hint, hint_type);
1996 range->AddUsePosition(use_pos, allocation_zone());
1997 return use_pos;
1998}
1999
2002 InstructionOperand* operand, void* hint,
2003 UsePositionHintType hint_type,
2004 SpillMode spill_mode) {
2005 TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2006 if (range == nullptr) return nullptr;
2007 UsePosition* use_pos = nullptr;
2008 if (operand->IsUnallocated()) {
2009 UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2010 use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2011 range->AddUsePosition(use_pos, allocation_zone());
2012 }
2013 range->AddUseInterval(block_start, position, allocation_zone());
2014 return use_pos;
2015}
2016
2018 SparseBitVector* live) {
2019 int block_start = block->first_instruction_index();
2020 LifetimePosition block_start_position =
2022 bool fixed_float_live_ranges = false;
2023 bool fixed_simd128_live_ranges = false;
2025 int mask = data()->code()->representation_mask();
2026 fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
2027 fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2029 int mask = data()->code()->representation_mask();
2030 fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2031 }
2032 SpillMode spill_mode = SpillModeForBlock(block);
2033
2034 for (int index = block->last_instruction_index(); index >= block_start;
2035 index--) {
2036 LifetimePosition curr_position =
2038 Instruction* instr = code()->InstructionAt(index);
2040 DCHECK(curr_position.IsInstructionPosition());
2041 // Process output, inputs, and temps of this instruction.
2042 for (size_t i = 0; i < instr->OutputCount(); i++) {
2043 InstructionOperand* output = instr->OutputAt(i);
2044 if (output->IsUnallocated()) {
2045 // Unsupported.
2046 DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2047 int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2048 live->Remove(out_vreg);
2049 } else if (output->IsConstant()) {
2050 int out_vreg = ConstantOperand::cast(output)->virtual_register();
2051 live->Remove(out_vreg);
2052 }
2053 if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2054 output->IsRegister() &&
2057 // The register defined here is blocked from gap start - it is the
2058 // exception value.
2059 // TODO(mtrofin): should we explore an explicit opcode for
2060 // the first instruction in the handler?
2062 spill_mode);
2063 } else {
2064 Define(curr_position, output, spill_mode);
2065 }
2066 }
2067
2068 if (instr->ClobbersRegisters()) {
2069 for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2070 // Create a UseInterval at this instruction for all fixed registers,
2071 // (including the instruction outputs). Adding another UseInterval here
2072 // is OK because AddUseInterval will just merge it with the existing
2073 // one at the end of the range.
2074 int code = config()->GetAllocatableGeneralCode(i);
2075 TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
2076 range->AddUseInterval(curr_position, curr_position.End(),
2077 allocation_zone());
2078 }
2079 }
2080
2081 if (instr->ClobbersDoubleRegisters()) {
2082 for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2083 // Add a UseInterval for all DoubleRegisters. See comment above for
2084 // general registers.
2085 int code = config()->GetAllocatableDoubleCode(i);
2087 code, MachineRepresentation::kFloat64, spill_mode);
2088 range->AddUseInterval(curr_position, curr_position.End(),
2089 allocation_zone());
2090 }
2091 // Clobber fixed float registers on archs with non-simple aliasing.
2093 if (fixed_float_live_ranges) {
2094 for (int i = 0; i < config()->num_allocatable_float_registers();
2095 ++i) {
2096 // Add a UseInterval for all FloatRegisters. See comment above for
2097 // general registers.
2098 int code = config()->GetAllocatableFloatCode(i);
2100 code, MachineRepresentation::kFloat32, spill_mode);
2101 range->AddUseInterval(curr_position, curr_position.End(),
2102 allocation_zone());
2103 }
2104 }
2105 if (fixed_simd128_live_ranges) {
2106 for (int i = 0; i < config()->num_allocatable_simd128_registers();
2107 ++i) {
2108 int code = config()->GetAllocatableSimd128Code(i);
2110 code, MachineRepresentation::kSimd128, spill_mode);
2111 range->AddUseInterval(curr_position, curr_position.End(),
2112 allocation_zone());
2113 }
2114 }
2116 if (fixed_simd128_live_ranges) {
2117 for (int i = 0; i < config()->num_allocatable_simd128_registers();
2118 ++i) {
2119 int code = config()->GetAllocatableSimd128Code(i);
2120 TopLevelLiveRange* range =
2121 FixedSIMD128LiveRangeFor(code, spill_mode);
2122 range->AddUseInterval(curr_position, curr_position.End(),
2123 allocation_zone());
2124 }
2125 }
2126 }
2127 }
2128
2129 for (size_t i = 0; i < instr->InputCount(); i++) {
2130 InstructionOperand* input = instr->InputAt(i);
2131 if (input->IsImmediate()) {
2132 continue; // Ignore immediates.
2133 }
2134 LifetimePosition use_pos;
2135 if (input->IsUnallocated() &&
2136 UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2137 use_pos = curr_position;
2138 } else {
2139 use_pos = curr_position.End();
2140 }
2141
2142 if (input->IsUnallocated()) {
2143 UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2144 int vreg = unalloc->virtual_register();
2145 live->Add(vreg);
2146 if (unalloc->HasSlotPolicy()) {
2148 block->IsDeferred()
2151 }
2152 }
2153 Use(block_start_position, use_pos, input, spill_mode);
2154 }
2155
2156 for (size_t i = 0; i < instr->TempCount(); i++) {
2157 InstructionOperand* temp = instr->TempAt(i);
2158 // Unsupported.
2159 DCHECK_IMPLIES(temp->IsUnallocated(),
2160 !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2161 if (instr->ClobbersTemps()) {
2162 if (temp->IsRegister()) continue;
2163 if (temp->IsUnallocated()) {
2164 UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2165 if (temp_unalloc->HasFixedPolicy()) {
2166 continue;
2167 }
2168 }
2169 }
2170 Use(block_start_position, curr_position.End(), temp, spill_mode);
2171 Define(curr_position, temp, spill_mode);
2172 }
2173
2174 // Process the moves of the instruction's gaps, making their sources live.
2175 const Instruction::GapPosition kPositions[] = {Instruction::END,
2177 curr_position = curr_position.PrevStart();
2178 DCHECK(curr_position.IsGapPosition());
2179 for (const Instruction::GapPosition& position : kPositions) {
2180 ParallelMove* move = instr->GetParallelMove(position);
2181 if (move == nullptr) continue;
2182 if (position == Instruction::END) {
2183 curr_position = curr_position.End();
2184 } else {
2185 curr_position = curr_position.Start();
2186 }
2187 for (MoveOperands* cur : *move) {
2188 InstructionOperand& from = cur->source();
2189 InstructionOperand& to = cur->destination();
2190 void* hint = &to;
2192 UsePosition* to_use = nullptr;
2193 int phi_vreg = -1;
2194 if (to.IsUnallocated()) {
2195 int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2196 TopLevelLiveRange* to_range = data()->GetLiveRangeFor(to_vreg);
2197 if (to_range->is_phi()) {
2198 phi_vreg = to_vreg;
2199 if (to_range->is_non_loop_phi()) {
2200 hint = to_range->current_hint_position();
2201 hint_type = hint == nullptr ? UsePositionHintType::kNone
2203 } else {
2204 hint_type = UsePositionHintType::kPhi;
2205 hint = data()->GetPhiMapValueFor(to_vreg);
2206 }
2207 } else {
2208 if (live->Contains(to_vreg)) {
2209 to_use =
2210 Define(curr_position, &to, &from,
2211 UsePosition::HintTypeForOperand(from), spill_mode);
2212 live->Remove(to_vreg);
2213 } else {
2214 cur->Eliminate();
2215 continue;
2216 }
2217 }
2218 } else {
2219 Define(curr_position, &to, spill_mode);
2220 }
2221 UsePosition* from_use = Use(block_start_position, curr_position, &from,
2222 hint, hint_type, spill_mode);
2223 // Mark range live.
2224 if (from.IsUnallocated()) {
2225 live->Add(UnallocatedOperand::cast(from).virtual_register());
2226 }
2227 // When the value is moved to a register to meet input constraints,
2228 // we should consider this value use similar as a register use in the
2229 // backward spilling heuristics, even though this value use is not
2230 // register benefical at the AllocateBlockedReg stage.
2231 if (to.IsAnyRegister() ||
2232 (to.IsUnallocated() &&
2233 UnallocatedOperand::cast(&to)->HasRegisterPolicy())) {
2234 from_use->set_spill_detrimental();
2235 }
2236 // Resolve use position hints just created.
2237 if (to_use != nullptr && from_use != nullptr) {
2238 to_use->ResolveHint(from_use);
2239 from_use->ResolveHint(to_use);
2240 }
2241 DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2242 DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2243 // Potentially resolve phi hint.
2244 if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2245 }
2246 }
2247 }
2248}
2249
2251 SparseBitVector* live) {
2252 for (PhiInstruction* phi : block->phis()) {
2253 // The live range interval already ends at the first instruction of the
2254 // block.
2255 int phi_vreg = phi->virtual_register();
2256 live->Remove(phi_vreg);
2257 // Select a hint from a predecessor block that precedes this block in the
2258 // rpo order. In order of priority:
2259 // - Avoid hints from deferred blocks.
2260 // - Prefer hints from allocated (or explicit) operands.
2261 // - Prefer hints from empty blocks (containing just parallel moves and a
2262 // jump). In these cases, if we can elide the moves, the jump threader
2263 // is likely to be able to elide the jump.
2264 // The enforcement of hinting in rpo order is required because hint
2265 // resolution that happens later in the compiler pipeline visits
2266 // instructions in reverse rpo order, relying on the fact that phis are
2267 // encountered before their hints.
2268 InstructionOperand* hint = nullptr;
2269 int hint_preference = 0;
2270
2271 // The cost of hinting increases with the number of predecessors. At the
2272 // same time, the typical benefit decreases, since this hinting only
2273 // optimises the execution path through one predecessor. A limit of 2 is
2274 // sufficient to hit the common if/else pattern.
2275 int predecessor_limit = 2;
2276
2277 for (RpoNumber predecessor : block->predecessors()) {
2278 const InstructionBlock* predecessor_block =
2279 code()->InstructionBlockAt(predecessor);
2280 DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2281
2282 // Only take hints from earlier rpo numbers.
2283 if (predecessor >= block->rpo_number()) continue;
2284
2285 // Look up the predecessor instruction.
2286 const Instruction* predecessor_instr =
2287 GetLastInstruction(code(), predecessor_block);
2288 InstructionOperand* predecessor_hint = nullptr;
2289 // Phis are assigned in the END position of the last instruction in each
2290 // predecessor block.
2291 for (MoveOperands* move :
2292 *predecessor_instr->GetParallelMove(Instruction::END)) {
2293 InstructionOperand& to = move->destination();
2294 if (to.IsUnallocated() &&
2295 UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2296 predecessor_hint = &move->source();
2297 break;
2298 }
2299 }
2300 DCHECK_NOT_NULL(predecessor_hint);
2301
2302 // For each predecessor, generate a score according to the priorities
2303 // described above, and pick the best one. Flags in higher-order bits have
2304 // a higher priority than those in lower-order bits.
2305 int predecessor_hint_preference = 0;
2306 const int kNotDeferredBlockPreference = (1 << 2);
2307 const int kMoveIsAllocatedPreference = (1 << 1);
2308 const int kBlockIsEmptyPreference = (1 << 0);
2309
2310 // - Avoid hints from deferred blocks.
2311 if (!predecessor_block->IsDeferred()) {
2312 predecessor_hint_preference |= kNotDeferredBlockPreference;
2313 }
2314
2315 // - Prefer hints from allocated operands.
2316 //
2317 // Already-allocated operands are typically assigned using the parallel
2318 // moves on the last instruction. For example:
2319 //
2320 // gap (v101 = [x0|R|w32]) (v100 = v101)
2321 // ArchJmp
2322 // ...
2323 // phi: v100 = v101 v102
2324 //
2325 // We have already found the END move, so look for a matching START move
2326 // from an allocated operand.
2327 //
2328 // Note that we cannot simply look up data()->live_ranges()[vreg] here
2329 // because the live ranges are still being built when this function is
2330 // called.
2331 // TODO(v8): Find a way to separate hinting from live range analysis in
2332 // BuildLiveRanges so that we can use the O(1) live-range look-up.
2333 auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2334 if (moves != nullptr) {
2335 for (MoveOperands* move : *moves) {
2336 InstructionOperand& to = move->destination();
2337 if (predecessor_hint->Equals(to)) {
2338 if (move->source().IsAllocated()) {
2339 predecessor_hint_preference |= kMoveIsAllocatedPreference;
2340 }
2341 break;
2342 }
2343 }
2344 }
2345
2346 // - Prefer hints from empty blocks.
2347 if (predecessor_block->last_instruction_index() ==
2348 predecessor_block->first_instruction_index()) {
2349 predecessor_hint_preference |= kBlockIsEmptyPreference;
2350 }
2351
2352 if ((hint == nullptr) ||
2353 (predecessor_hint_preference > hint_preference)) {
2354 // Take the hint from this predecessor.
2355 hint = predecessor_hint;
2356 hint_preference = predecessor_hint_preference;
2357 }
2358
2359 if (--predecessor_limit <= 0) break;
2360 }
2361 DCHECK_NOT_NULL(hint);
2362
2364 block->first_instruction_index());
2365 UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2367 SpillModeForBlock(block));
2368 MapPhiHint(hint, use_pos);
2369 }
2370}
2371
2373 SparseBitVector* live) {
2374 DCHECK(block->IsLoopHeader());
2375 // Add a live range stretching from the first loop instruction to the last
2376 // for each value live on entry to the header.
2378 block->first_instruction_index());
2380 code()->LastLoopInstructionIndex(block))
2381 .NextFullStart();
2382 for (int operand_index : *live) {
2383 TopLevelLiveRange* range = data()->GetLiveRangeFor(operand_index);
2385 }
2386 // Insert all values into the live in sets of all blocks in the loop.
2387 for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2388 ++i) {
2389 live_in_sets()[i]->Union(*live);
2390 }
2391}
2392
2394 // Process the blocks in reverse order.
2395 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2396 --block_id) {
2398 InstructionBlock* block =
2400 SparseBitVector* live = ComputeLiveOut(block, data());
2401 // Initially consider all live_out values live for the entire block. We
2402 // will shorten these intervals if necessary.
2403 AddInitialIntervals(block, live);
2404 // Process the instructions in reverse order, generating and killing
2405 // live values.
2406 ProcessInstructions(block, live);
2407 // All phi output operands are killed by this block.
2408 ProcessPhis(block, live);
2409 // Now live is live_in for this block except not including values live
2410 // out on backward successor edges.
2411 if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2412 live_in_sets()[block_id] = live;
2413 }
2414 // Postprocess the ranges.
2415 const size_t live_ranges_size = data()->live_ranges().size();
2416 for (TopLevelLiveRange* range : data()->live_ranges()) {
2418 CHECK_EQ(live_ranges_size,
2419 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
2420 DCHECK_NOT_NULL(range);
2421 // Give slots to all ranges with a non fixed slot use.
2422 if (range->has_slot_use() && range->HasNoSpillType()) {
2423 SpillMode spill_mode =
2424 range->slot_use_kind() ==
2426 ? SpillMode::kSpillDeferred
2427 : SpillMode::kSpillAtDefinition;
2428 data()->AssignSpillRangeToLiveRange(range, spill_mode);
2429 }
2430 // TODO(bmeurer): This is a horrible hack to make sure that for constant
2431 // live ranges, every use requires the constant to be in a register.
2432 // Without this hack, all uses with "any" policy would get the constant
2433 // operand assigned.
2434 if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2435 for (UsePosition* pos : range->positions()) {
2436 if (pos->type() == UsePositionType::kRequiresSlot ||
2438 continue;
2439 }
2441 // Can't mark phis as needing a register.
2442 if (!pos->pos().IsGapPosition()) {
2444 }
2445 pos->set_type(new_type, true);
2446 }
2447 }
2448 range->ResetCurrentHintPosition();
2449 }
2450 for (auto preassigned : data()->preassigned_slot_ranges()) {
2451 TopLevelLiveRange* range = preassigned.first;
2452 int slot_id = preassigned.second;
2453 SpillRange* spill = range->HasSpillRange()
2454 ? range->GetSpillRange()
2456 range, SpillMode::kSpillAtDefinition);
2457 spill->set_assigned_slot(slot_id);
2458 }
2459#ifdef DEBUG
2460 Verify();
2461#endif
2462}
2463
2465 UsePosition* use_pos) {
2466 DCHECK(!use_pos->IsResolved());
2467 auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2468 DCHECK(res.second);
2469 USE(res);
2470}
2471
2473 UsePosition* use_pos) {
2474 auto it = phi_hints_.find(operand);
2475 if (it == phi_hints_.end()) return;
2476 DCHECK(!it->second->IsResolved());
2477 it->second->ResolveHint(use_pos);
2478}
2479
2480#ifdef DEBUG
2481void LiveRangeBuilder::Verify() const {
2482 for (auto& hint : phi_hints_) {
2483 DCHECK(hint.second->IsResolved());
2484 }
2485 for (TopLevelLiveRange* current : data()->live_ranges()) {
2486 DCHECK_NOT_NULL(current);
2487 if (!current->IsEmpty()) {
2488 // New LiveRanges should not be split.
2489 DCHECK_NULL(current->next());
2490 // General integrity check.
2491 current->Verify();
2492 if (current->intervals().size() < 2) continue;
2493
2494 // Consecutive intervals should not end and start in the same block,
2495 // otherwise the intervals should have been joined, because the
2496 // variable is live throughout that block.
2497 UseIntervalVector::const_iterator interval = current->intervals().begin();
2498 UseIntervalVector::const_iterator next_interval = interval + 1;
2499 DCHECK(NextIntervalStartsInDifferentBlocks(*interval, *next_interval));
2500
2501 for (++interval; interval != current->intervals().end(); ++interval) {
2502 // Except for the first interval, the other intevals must start at
2503 // a block boundary, otherwise data wouldn't flow to them.
2504 // You might trigger this CHECK if your SSA is not valid. For instance,
2505 // if the inputs of a Phi node are in the wrong order.
2506 DCHECK(IntervalStartsAtBlockBoundary(*interval));
2507 // The last instruction of the predecessors of the block the interval
2508 // starts must be covered by the range.
2509 DCHECK(IntervalPredecessorsCoveredByRange(*interval, current));
2510 next_interval = interval + 1;
2511 if (next_interval != current->intervals().end()) {
2512 // Check the consecutive intervals property, except for the last
2513 // interval, where it doesn't apply.
2514 DCHECK(
2515 NextIntervalStartsInDifferentBlocks(*interval, *next_interval));
2516 }
2517 }
2518 }
2519 }
2520}
2521
2522bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2523 UseInterval interval) const {
2524 LifetimePosition start = interval.start();
2525 if (!start.IsFullStart()) return false;
2526 int instruction_index = start.ToInstructionIndex();
2527 const InstructionBlock* block =
2528 data()->code()->GetInstructionBlock(instruction_index);
2529 return block->first_instruction_index() == instruction_index;
2530}
2531
2532bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2533 UseInterval interval, TopLevelLiveRange* range) const {
2534 LifetimePosition start = interval.start();
2535 int instruction_index = start.ToInstructionIndex();
2536 const InstructionBlock* block =
2537 data()->code()->GetInstructionBlock(instruction_index);
2538 for (RpoNumber pred_index : block->predecessors()) {
2539 const InstructionBlock* predecessor =
2540 data()->code()->InstructionBlockAt(pred_index);
2541 LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2542 predecessor->last_instruction_index());
2543 last_pos = last_pos.NextStart().End();
2544 if (!range->Covers(last_pos)) return false;
2545 }
2546 return true;
2547}
2548
2549bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2550 UseInterval interval, UseInterval next) const {
2551 LifetimePosition end = interval.end();
2552 LifetimePosition next_start = next.start();
2553 // Since end is not covered, but the previous position is, move back a
2554 // position
2555 end = end.IsStart() ? end.PrevStart().End() : end.Start();
2556 int last_covered_index = end.ToInstructionIndex();
2557 const InstructionBlock* block =
2558 data()->code()->GetInstructionBlock(last_covered_index);
2559 const InstructionBlock* next_block =
2560 data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2561 return block->rpo_number() < next_block->rpo_number();
2562}
2563#endif
2564
2566 TRACE("Build bundles\n");
2567 // Process the blocks in reverse order.
2568 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2569 --block_id) {
2570 InstructionBlock* block =
2572 TRACE("Block B%d\n", block_id);
2573 for (auto phi : block->phis()) {
2574 TopLevelLiveRange* out_range =
2575 data()->GetLiveRangeFor(phi->virtual_register());
2576 LiveRangeBundle* out = out_range->get_bundle();
2577 if (out == nullptr) {
2580 out->TryAddRange(out_range);
2581 }
2582 TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
2583 out_range->TopLevel()->vreg(), out_range->relative_id());
2584 bool phi_interferes_with_backedge_input = false;
2585 for (auto input : phi->operands()) {
2586 TopLevelLiveRange* input_range = data()->GetLiveRangeFor(input);
2587 TRACE("Input value v%d with range %d:%d\n", input,
2588 input_range->TopLevel()->vreg(), input_range->relative_id());
2589 LiveRangeBundle* input_bundle = input_range->get_bundle();
2590 if (input_bundle != nullptr) {
2591 TRACE("Merge\n");
2592 LiveRangeBundle* merged =
2593 LiveRangeBundle::TryMerge(out, input_bundle);
2594 if (merged != nullptr) {
2595 DCHECK_EQ(out_range->get_bundle(), merged);
2596 DCHECK_EQ(input_range->get_bundle(), merged);
2597 out = merged;
2598 TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
2599 out->id());
2600 } else if (input_range->Start() > out_range->Start()) {
2601 // We are only interested in values defined after the phi, because
2602 // those are values that will go over a back-edge.
2603 phi_interferes_with_backedge_input = true;
2604 }
2605 } else {
2606 TRACE("Add\n");
2607 if (out->TryAddRange(input_range)) {
2608 TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
2609 out->id());
2610 } else if (input_range->Start() > out_range->Start()) {
2611 // We are only interested in values defined after the phi, because
2612 // those are values that will go over a back-edge.
2613 phi_interferes_with_backedge_input = true;
2614 }
2615 }
2616 }
2617 // Spilling the phi at the loop header is not beneficial if there is
2618 // a back-edge with an input for the phi that interferes with the phi's
2619 // value, because in case that input gets spilled it might introduce
2620 // a stack-to-stack move at the back-edge.
2621 if (phi_interferes_with_backedge_input)
2623 }
2624 TRACE("Done block B%d\n", block_id);
2625 }
2626}
2627
2629 DCHECK_NULL(range->get_bundle());
2630 // We may only add a new live range if its use intervals do not
2631 // overlap with existing intervals in the bundle.
2633 return false;
2634 AddRange(range);
2635 return true;
2636}
2637
2639 TopLevelLiveRange** range_insert_it = std::lower_bound(
2640 ranges_.begin(), ranges_.end(), range, LiveRangeOrdering());
2641 DCHECK_IMPLIES(range_insert_it != ranges_.end(), *range_insert_it != range);
2642 // TODO(dlehmann): We might save some memory by using
2643 // `DoubleEndedSplitVector::insert<kFront>()` here: Since we add ranges
2644 // mostly backwards, ranges with an earlier `Start()` are inserted mostly
2645 // at the front.
2646 ranges_.insert(range_insert_it, 1, range);
2647 range->set_bundle(this);
2648
2649 // We also tried `std::merge`ing the sorted vectors of `intervals_` directly,
2650 // but it turns out the (always happening) copies are more expensive
2651 // than the (apparently seldom) copies due to insertion in the middle.
2652 for (UseInterval interval : range->intervals()) {
2653 UseInterval* interval_insert_it =
2654 std::lower_bound(intervals_.begin(), intervals_.end(), interval);
2655 DCHECK_IMPLIES(interval_insert_it != intervals_.end(),
2656 *interval_insert_it != interval);
2657 intervals_.insert(interval_insert_it, 1, interval);
2658 }
2659}
2660
2662 LiveRangeBundle* rhs) {
2663 if (rhs == lhs) return lhs;
2664
2665 if (auto found =
2667 auto [interval1, interval2] = *found;
2668 TRACE("No merge %d:%d %d:%d\n", interval1.start().value(),
2669 interval1.end().value(), interval2.start().value(),
2670 interval2.end().value());
2671 return nullptr;
2672 }
2673 // Uses are disjoint, merging is possible.
2674
2675 // Merge the smaller bundle into the bigger.
2676 if (lhs->intervals_.size() < rhs->intervals_.size()) {
2677 std::swap(lhs, rhs);
2678 }
2679 for (TopLevelLiveRange* range : rhs->ranges_) {
2680 lhs->AddRange(range);
2681 }
2682
2683 rhs->ranges_.clear();
2684 rhs->intervals_.clear();
2685
2686 return lhs;
2687}
2688
2690 DCHECK_IMPLIES(ranges_.empty(), intervals_.empty());
2691 SpillRange* target = nullptr;
2692 for (auto range : ranges_) {
2693 if (range->TopLevel()->HasSpillRange()) {
2694 SpillRange* current = range->TopLevel()->GetSpillRange();
2695 if (target == nullptr) {
2696 target = current;
2697 } else if (target != current) {
2698 target->TryMerge(current);
2699 }
2700 }
2701 }
2702 // Clear the fields so that we don't try to merge the spill ranges again when
2703 // we hit the same bundle from a different LiveRange in AssignSpillSlots.
2704 // LiveRangeBundles are not used after this.
2705 ranges_.clear();
2706 intervals_.clear();
2707}
2708
2711 : data_(data),
2712 mode_(kind),
2713 num_registers_(GetRegisterCount(data->config(), kind)),
2714 num_allocatable_registers_(
2715 GetAllocatableRegisterCount(data->config(), kind)),
2716 allocatable_register_codes_(
2717 GetAllocatableRegisterCodes(data->config(), kind)),
2718 check_fp_aliasing_(false) {
2720 check_fp_aliasing_ = (data->code()->representation_mask() &
2721 (kFloat32Bit | kSimd128Bit)) != 0;
2722 }
2723}
2724
2726 const LiveRange* range, int instruction_index) {
2728
2729 ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2730 if (range->Start() >= ret || ret >= range->End()) {
2732 }
2733 return ret;
2734}
2735
2737 size_t initial_range_count = data()->live_ranges().size();
2738 for (size_t i = 0; i < initial_range_count; ++i) {
2739 CHECK_EQ(initial_range_count,
2740 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
2741 TopLevelLiveRange* range = data()->live_ranges()[i];
2742 if (!CanProcessRange(range)) continue;
2743 // Only assume defined by memory operand if we are guaranteed to spill it or
2744 // it has a spill operand.
2745 if (range->HasNoSpillType() ||
2746 (range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
2747 continue;
2748 }
2749 LifetimePosition start = range->Start();
2750 TRACE("Live range %d:%d is defined by a spill operand.\n",
2751 range->TopLevel()->vreg(), range->relative_id());
2752 LifetimePosition next_pos = start;
2753 if (next_pos.IsGapPosition()) {
2754 next_pos = next_pos.NextStart();
2755 }
2756
2757 UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2758 // If the range already has a spill operand and it doesn't need a
2759 // register immediately, split it and spill the first part of the range.
2760 if (pos == nullptr) {
2761 Spill(range, SpillMode::kSpillAtDefinition);
2762 } else if (pos->pos() > range->Start().NextStart()) {
2763 // Do not spill live range eagerly if use position that can benefit from
2764 // the register is too close to the start of live range.
2766 range, pos->pos().ToInstructionIndex());
2767 // There is no place to split, so we can't split and spill.
2768 if (!split_pos.IsValid()) continue;
2769
2770 split_pos =
2771 FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2772
2773 SplitRangeAt(range, split_pos);
2774 Spill(range, SpillMode::kSpillAtDefinition);
2775 }
2776 }
2777}
2778
2781 DCHECK(!range->TopLevel()->IsFixed());
2782 TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2783 range->relative_id(), pos.value());
2784
2785 if (pos <= range->Start()) return range;
2786
2787 // We can't properly connect liveranges if splitting occurred at the end
2788 // a block.
2789 DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2790 (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2791 pos.ToInstructionIndex()));
2792
2794 return result;
2795}
2796
2800 DCHECK(!range->TopLevel()->IsFixed());
2801 TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2802 range->TopLevel()->vreg(), range->relative_id(), start.value(),
2803 end.value());
2804
2806 DCHECK(split_pos >= start);
2807 return SplitRangeAt(range, split_pos);
2808}
2809
2812 int start_instr = start.ToInstructionIndex();
2813 int end_instr = end.ToInstructionIndex();
2814 DCHECK_LE(start_instr, end_instr);
2815
2816 // We have no choice
2817 if (start_instr == end_instr) return end;
2818
2819 const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2820 const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2821
2822 if (end_block == start_block) {
2823 // The interval is split in the same basic block. Split at the latest
2824 // possible position.
2825 return end;
2826 }
2827
2828 const InstructionBlock* block = end_block;
2829 // Find header of outermost loop.
2830 do {
2831 const InstructionBlock* loop = GetContainingLoop(code(), block);
2832 if (loop == nullptr ||
2833 loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2834 // No more loops or loop starts before the lifetime start.
2835 break;
2836 }
2837 block = loop;
2838 } while (true);
2839
2840 // We did not find any suitable outer loop. Split at the latest possible
2841 // position unless end_block is a loop header itself.
2842 if (block == end_block && !end_block->IsLoopHeader()) return end;
2843
2845 block->first_instruction_index());
2846}
2847
2849 LiveRange* range, LifetimePosition pos, SpillMode spill_mode,
2850 LiveRange** begin_spill_out) {
2851 *begin_spill_out = range;
2852 // TODO(herhut): Be more clever here as long as we do not move pos out of
2853 // deferred code.
2854 if (spill_mode == SpillMode::kSpillDeferred) return pos;
2855 const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2856 const InstructionBlock* loop_header =
2857 block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2858 if (loop_header == nullptr) return pos;
2859
2860 while (loop_header != nullptr) {
2861 // We are going to spill live range inside the loop.
2862 // If possible try to move spilling position backwards to loop header.
2863 // This will reduce number of memory moves on the back edge.
2865 loop_header->first_instruction_index());
2866 // Stop if we moved to a loop header before the value is defined or
2867 // at the define position that is not beneficial to spill.
2868 if (range->TopLevel()->Start() > loop_start ||
2869 (range->TopLevel()->Start() == loop_start &&
2870 range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
2871 return pos;
2872
2873 LiveRange* live_at_header = range->TopLevel()->GetChildCovers(loop_start);
2874
2875 if (live_at_header != nullptr && !live_at_header->spilled()) {
2876 for (const LiveRange* check_use = live_at_header;
2877 check_use != nullptr && check_use->Start() < pos;
2878 check_use = check_use->next()) {
2879 // If we find a use for which spilling is detrimental, don't spill
2880 // at the loop header
2881 UsePosition* next_use =
2882 check_use->NextUsePositionSpillDetrimental(loop_start);
2883 // UsePosition at the end of a UseInterval may
2884 // have the same value as the start of next range.
2885 if (next_use != nullptr && next_use->pos() <= pos) {
2886 return pos;
2887 }
2888 }
2889 // No register beneficial use inside the loop before the pos.
2890 *begin_spill_out = live_at_header;
2891 pos = loop_start;
2892 }
2893
2894 // Try hoisting out to an outer loop.
2895 loop_header = GetContainingLoop(code(), loop_header);
2896 }
2897 return pos;
2898}
2899
2901 DCHECK(!range->spilled());
2902 DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
2903 GetInstructionBlock(code(), range->Start())->IsDeferred());
2904 TopLevelLiveRange* first = range->TopLevel();
2905 TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
2906 range->relative_id(), spill_mode);
2907
2908 TRACE("Starting spill type is %d\n", static_cast<int>(first->spill_type()));
2909 if (first->HasNoSpillType()) {
2910 TRACE("New spill range needed\n");
2911 data()->AssignSpillRangeToLiveRange(first, spill_mode);
2912 }
2913 // Upgrade the spillmode, in case this was only spilled in deferred code so
2914 // far.
2915 if ((spill_mode == SpillMode::kSpillAtDefinition) &&
2916 (first->spill_type() ==
2918 TRACE("Upgrading\n");
2919 first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
2920 }
2921 TRACE("Final spill type is %d\n", static_cast<int>(first->spill_type()));
2922 range->Spill();
2923}
2924
2925const char* RegisterAllocator::RegisterName(int register_code) const {
2926 if (register_code == kUnassignedRegister) return "unassigned";
2927 switch (mode()) {
2929 return i::RegisterName(Register::from_code(register_code));
2931 return i::RegisterName(DoubleRegister::from_code(register_code));
2933 return i::RegisterName(Simd128Register::from_code(register_code));
2934 }
2935}
2936
2938 RegisterKind kind, Zone* local_zone)
2939 : RegisterAllocator(data, kind),
2940 unhandled_live_ranges_(local_zone),
2941 active_live_ranges_(local_zone),
2942 inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
2943 local_zone),
2944 next_active_ranges_change_(LifetimePosition::Invalid()),
2945 next_inactive_ranges_change_(LifetimePosition::Invalid()) {
2946 active_live_ranges().reserve(8);
2947}
2948
2950 LifetimePosition begin_pos,
2951 LiveRange* end_range) {
2952 // Spill begin_range after begin_pos, then spill every live range of this
2953 // virtual register until but excluding end_range.
2954 DCHECK(begin_range->Covers(begin_pos));
2955 DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel());
2956
2957 if (begin_range != end_range) {
2958 DCHECK_LE(begin_range->End(), end_range->Start());
2959 if (!begin_range->spilled()) {
2960 SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition);
2961 }
2962 for (LiveRange* range = begin_range->next(); range != end_range;
2963 range = range->next()) {
2964 if (!range->spilled()) {
2965 range->Spill();
2966 }
2967 }
2968 }
2969}
2970
2972 if (range->next() != nullptr && range->next()->ShouldRecombine()) {
2973 LiveRange* to_remove = range->next();
2974 TRACE("Recombining %d:%d with %d\n", range->TopLevel()->vreg(),
2975 range->relative_id(), to_remove->relative_id());
2976
2977 // Remove the range from unhandled, as attaching it will change its
2978 // state and hence ordering in the unhandled set.
2979 auto removed_cnt = unhandled_live_ranges().erase(to_remove);
2980 DCHECK_EQ(removed_cnt, 1);
2981 USE(removed_cnt);
2982
2983 range->AttachToNext(zone);
2984 } else if (range->next() != nullptr) {
2985 TRACE("No recombine for %d:%d to %d\n", range->TopLevel()->vreg(),
2986 range->relative_id(), range->next()->relative_id());
2987 }
2988}
2989
2992 SpillMode spill_mode) {
2993 for (auto it = active_live_ranges().begin();
2994 it != active_live_ranges().end();) {
2995 LiveRange* active_range = *it;
2996 TopLevelLiveRange* toplevel = (*it)->TopLevel();
2997 auto found = to_be_live.find(toplevel);
2998 if (found == to_be_live.end()) {
2999 // Is not contained in {to_be_live}, spill it.
3000 // Fixed registers are exempt from this. They might have been
3001 // added from inactive at the block boundary but we know that
3002 // they cannot conflict as they are built before register
3003 // allocation starts. It would be algorithmically fine to split
3004 // them and reschedule but the code does not allow to do this.
3005 if (toplevel->IsFixed()) {
3006 TRACE("Keeping reactivated fixed range for %s\n",
3007 RegisterName(toplevel->assigned_register()));
3008 ++it;
3009 } else {
3010 // When spilling a previously spilled/reloaded range, we add back the
3011 // tail that we might have split off when we reloaded/spilled it
3012 // previously. Otherwise we might keep generating small split-offs.
3013 MaybeUndoPreviousSplit(active_range, allocation_zone());
3014 TRACE("Putting back %d:%d\n", toplevel->vreg(),
3015 active_range->relative_id());
3016 LiveRange* split = SplitRangeAt(active_range, position);
3017 DCHECK_NE(split, active_range);
3018
3019 // Make sure we revisit this range once it has a use that requires
3020 // a register.
3021 UsePosition* next_use = split->NextRegisterPosition(position);
3022 if (next_use != nullptr) {
3023 // Move to the start of the gap before use so that we have a space
3024 // to perform the potential reload. Otherwise, do not spill but add
3025 // to unhandled for reallocation.
3026 LifetimePosition revisit_at = next_use->pos().FullStart();
3027 TRACE("Next use at %d\n", revisit_at.value());
3028 if (!data()->IsBlockBoundary(revisit_at)) {
3029 // Leave some space so we have enough gap room.
3030 revisit_at = revisit_at.PrevStart().FullStart();
3031 }
3032 // If this range became life right at the block boundary that we are
3033 // currently processing, we do not need to split it. Instead move it
3034 // to unhandled right away.
3035 if (position < revisit_at) {
3036 LiveRange* third_part = SplitRangeAt(split, revisit_at);
3037 DCHECK_NE(split, third_part);
3038 Spill(split, spill_mode);
3039 TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
3040 third_part->relative_id());
3041 third_part->SetRecombine();
3042 AddToUnhandled(third_part);
3043 } else {
3044 AddToUnhandled(split);
3045 }
3046 } else {
3047 Spill(split, spill_mode);
3048 }
3049 it = ActiveToHandled(it);
3050 }
3051 } else {
3052 // This range is contained in {to_be_live}, so we can keep it.
3053 int expected_register = found->second;
3054 to_be_live.erase(found);
3055 if (expected_register == active_range->assigned_register()) {
3056 // Was life and in correct register, simply pass through.
3057 TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
3058 active_range->relative_id(),
3059 RegisterName(active_range->assigned_register()));
3060 ++it;
3061 } else {
3062 // Was life but wrong register. Split and schedule for
3063 // allocation.
3064 TRACE("Scheduling %d:%d\n", toplevel->vreg(),
3065 active_range->relative_id());
3066 LiveRange* split = SplitRangeAt(active_range, position);
3067 split->set_controlflow_hint(expected_register);
3068 AddToUnhandled(split);
3069 it = ActiveToHandled(it);
3070 }
3071 }
3072 }
3073}
3074
3076 int reg) {
3077 // We know the register is currently free but it might be in
3078 // use by a currently inactive range. So we might not be able
3079 // to reload for the full distance. In such case, split here.
3080 // TODO(herhut):
3081 // It might be better if we could use the normal unhandled queue and
3082 // give reloading registers pecedence. That way we would compute the
3083 // intersection for the entire future.
3084 LifetimePosition new_end = range->End();
3085 for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
3087 cur_reg != reg) {
3088 continue;
3089 }
3091 for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
3093 !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
3094 range->representation(), reg)) {
3095 continue;
3096 }
3097 if (new_end <= cur_inactive->NextStart()) {
3098 // Inactive ranges are sorted by their next start, so the remaining
3099 // ranges cannot contribute to new_end.
3100 break;
3101 }
3102 auto next_intersection = cur_inactive->FirstIntersection(range);
3103 if (!next_intersection.IsValid()) continue;
3104 new_end = std::min(new_end, next_intersection);
3105 }
3106 }
3107 if (new_end != range->End()) {
3108 TRACE("Found new end for %d:%d at %d\n", range->TopLevel()->vreg(),
3109 range->relative_id(), new_end.value());
3110 LiveRange* tail = SplitRangeAt(range, new_end);
3111 AddToUnhandled(tail);
3112 }
3114 return range;
3115}
3116
3118 RangeRegisterSmallMap const& to_be_live, LifetimePosition position) {
3119 // Assumption: All ranges in {to_be_live} are currently spilled and there are
3120 // no conflicting registers in the active ranges.
3121 // The former is ensured by SpillNotLiveRanges, the latter is by construction
3122 // of the to_be_live set.
3123 for (auto [range, reg] : to_be_live) {
3124 LiveRange* to_resurrect = range->GetChildCovers(position);
3125 if (to_resurrect == nullptr) {
3126 // While the range was life until the end of the predecessor block, it is
3127 // not live in this block. Either there is a lifetime gap or the range
3128 // died.
3129 TRACE("No candidate for %d at %d\n", range->vreg(), position.value());
3130 } else {
3131 // We might be resurrecting a range that we spilled until its next use
3132 // before. In such cases, we have to unsplit it before processing as
3133 // otherwise we might get register changes from one range to the other
3134 // in the middle of blocks.
3135 // If there is a gap between this range and the next, we can just keep
3136 // it as a register change won't hurt.
3137 MaybeUndoPreviousSplit(to_resurrect, allocation_zone());
3138 if (to_resurrect->Start() == position) {
3139 // This range already starts at this block. It might have been spilled,
3140 // so we have to unspill it. Otherwise, it is already in the unhandled
3141 // queue waiting for processing.
3142 DCHECK(!to_resurrect->HasRegisterAssigned());
3143 TRACE("Reload %d:%d starting at %d itself\n", range->vreg(),
3144 to_resurrect->relative_id(), position.value());
3145 if (to_resurrect->spilled()) {
3146 to_resurrect->Unspill();
3147 to_resurrect->set_controlflow_hint(reg);
3148 AddToUnhandled(to_resurrect);
3149 } else {
3150 // Assign the preassigned register if we know. Otherwise, nothing to
3151 // do as already in unhandeled.
3152 if (reg != kUnassignedRegister) {
3153 auto erased_cnt = unhandled_live_ranges().erase(to_resurrect);
3154 DCHECK_EQ(erased_cnt, 1);
3155 USE(erased_cnt);
3156 // We know that there is no conflict with active ranges, so just
3157 // assign the register to the range.
3158 to_resurrect = AssignRegisterOnReload(to_resurrect, reg);
3159 AddToActive(to_resurrect);
3160 }
3161 }
3162 } else {
3163 // This range was spilled before. We have to split it and schedule the
3164 // second part for allocation (or assign the register if we know).
3165 DCHECK(to_resurrect->spilled());
3166 LiveRange* split = SplitRangeAt(to_resurrect, position);
3167 TRACE("Reload %d:%d starting at %d as %d\n", range->vreg(),
3168 to_resurrect->relative_id(), split->Start().value(),
3169 split->relative_id());
3170 DCHECK_NE(split, to_resurrect);
3171 if (reg != kUnassignedRegister) {
3172 // We know that there is no conflict with active ranges, so just
3173 // assign the register to the range.
3174 split = AssignRegisterOnReload(split, reg);
3175 AddToActive(split);
3176 } else {
3177 // Let normal register assignment find a suitable register.
3178 split->set_controlflow_hint(reg);
3179 AddToUnhandled(split);
3180 }
3181 }
3182 }
3183 }
3184}
3185
3187 InstructionBlock* current_block, LifetimePosition boundary) {
3188 // Pick the state that would generate the least spill/reloads.
3189 // Compute vectors of ranges with use counts for both sides.
3190 // We count uses only for live ranges that are unique to either the left or
3191 // the right predecessor since many live ranges are shared between both.
3192 // Shared ranges don't influence the decision anyway and this is faster.
3193 auto& left = data()->GetSpillState(current_block->predecessors()[0]);
3194 auto& right = data()->GetSpillState(current_block->predecessors()[1]);
3195
3196 // Build a set of the `TopLevelLiveRange`s in the left predecessor.
3197 // Usually this set is very small, e.g., for JetStream2 at most 3 ranges in
3198 // ~72% of the cases and at most 8 ranges in ~93% of the cases. In those cases
3199 // `SmallMap` is backed by inline storage and uses fast linear search.
3200 // In some pathological cases the set grows large (e.g. the Wasm binary of
3201 // v8:9529) and then `SmallMap` gives us O(log n) worst case lookup when
3202 // intersecting with the right predecessor below. The set is encoded as a
3203 // `SmallMap` to `Dummy` values, since we don't have an equivalent `SmallSet`.
3204 struct Dummy {};
3206 for (LiveRange* range : left) {
3207 TopLevelLiveRange* parent = range->TopLevel();
3208 auto [_, inserted] = left_set.emplace(parent, Dummy{});
3209 // The `LiveRange`s in `left` come from the spill state, which is just the
3210 // list of active `LiveRange`s at the end of the block (see
3211 // `RememberSpillState`). Since at most one `LiveRange` out of a
3212 // `TopLevelLiveRange` can be active at the same time, there should never be
3213 // the same `TopLevelLiveRange` twice in `left_set`, hence this check.
3214 DCHECK(inserted);
3215 USE(inserted);
3216 }
3217
3218 // Now build a list of ranges unique to either the left or right predecessor.
3219 struct RangeUseCount {
3220 // The set above contains `TopLevelLiveRange`s, but ultimately we want to
3221 // count uses of the child `LiveRange` covering `boundary`.
3222 // The lookup in `GetChildCovers` is O(log n), so do it only once when
3223 // inserting into this list.
3224 LiveRange* range;
3225 // +1 if used in the left predecessor, -1 if used in the right predecessor.
3226 int use_count_delta;
3227 };
3228 SmallZoneVector<RangeUseCount, 16> unique_range_use_counts(allocation_zone());
3229 for (LiveRange* range : right) {
3230 TopLevelLiveRange* parent = range->TopLevel();
3231 auto left_it = left_set.find(parent);
3232 bool range_is_shared_left_and_right = left_it != left_set.end();
3233 if (range_is_shared_left_and_right) {
3234 left_set.erase(left_it);
3235 } else {
3236 // This range is unique to the right predecessor, so insert into the list.
3237 LiveRange* child = parent->GetChildCovers(boundary);
3238 if (child != nullptr) {
3239 unique_range_use_counts.push_back({child, -1});
3240 }
3241 }
3242 }
3243 // So far `unique_range_use_counts` contains only the ranges unique in the
3244 // right predecessor. Now also add the ranges from the left predecessor.
3245 for (auto [parent, _] : left_set) {
3246 LiveRange* child = parent->GetChildCovers(boundary);
3247 if (child != nullptr) {
3248 unique_range_use_counts.push_back({child, +1});
3249 }
3250 }
3251
3252 // Finally, count the uses for each range.
3253 int use_count_difference = 0;
3254 for (auto [range, use_count] : unique_range_use_counts) {
3255 if (range->NextUsePositionRegisterIsBeneficial(boundary) != nullptr) {
3256 use_count_difference += use_count;
3257 }
3258 }
3259 if (use_count_difference == 0) {
3260 // There is a tie in beneficial register uses. Now, look at any use at all.
3261 // We do not account for all uses, like flowing into a phi.
3262 // So we just look at ranges still being live.
3263 TRACE("Looking at only uses\n");
3264 for (auto [range, use_count] : unique_range_use_counts) {
3265 if (range->NextUsePosition(boundary) != range->positions().end()) {
3266 use_count_difference += use_count;
3267 }
3268 }
3269 }
3270 TRACE("Left predecessor has %d more uses than right\n", use_count_difference);
3271 return use_count_difference > 0 ? current_block->predecessors()[0]
3272 : current_block->predecessors()[1];
3273}
3274
3276 MachineRepresentation rep, int reg,
3277 const RangeRegisterSmallMap& to_be_live) {
3278 for (auto [range, expected_reg] : to_be_live) {
3279 if (data()->config()->AreAliases(range->representation(), expected_reg, rep,
3280 reg)) {
3281 return true;
3282 }
3283 }
3284 return false;
3285}
3286
3288 InstructionBlock* current_block, RangeRegisterSmallMap& to_be_live) {
3289 struct Vote {
3290 size_t count;
3291 int used_registers[RegisterConfiguration::kMaxRegisters];
3292 explicit Vote(int reg) : count(1), used_registers{0} {
3293 used_registers[reg] = 1;
3294 }
3295 };
3296 struct TopLevelLiveRangeComparator {
3297 bool operator()(const TopLevelLiveRange* lhs,
3298 const TopLevelLiveRange* rhs) const {
3299 return lhs->vreg() < rhs->vreg();
3300 }
3301 };
3302 // Typically this map is very small, e.g., on JetStream2 it has at most 3
3303 // elements ~80% of the time and at most 8 elements ~94% of the time.
3304 // Thus use a `SmallZoneMap` to avoid allocations and because linear search
3305 // in an array is faster than map lookup for such small sizes.
3306 // We don't want too many inline elements though since `Vote` is pretty large.
3307 using RangeVoteMap =
3309 static_assert(sizeof(RangeVoteMap) < 4096, "too large stack allocation");
3310 RangeVoteMap counts(data()->allocation_zone());
3311
3312 int deferred_blocks = 0;
3313 for (RpoNumber pred : current_block->predecessors()) {
3314 if (!ConsiderBlockForControlFlow(current_block, pred)) {
3315 // Back edges of a loop count as deferred here too.
3316 deferred_blocks++;
3317 continue;
3318 }
3319 const auto& pred_state = data()->GetSpillState(pred);
3320 for (LiveRange* range : pred_state) {
3321 // We might have spilled the register backwards, so the range we
3322 // stored might have lost its register. Ignore those.
3323 if (!range->HasRegisterAssigned()) continue;
3324 TopLevelLiveRange* toplevel = range->TopLevel();
3325 auto [it, inserted] =
3326 counts.try_emplace(toplevel, range->assigned_register());
3327 if (!inserted) {
3328 it->second.count++;
3329 it->second.used_registers[range->assigned_register()]++;
3330 }
3331 }
3332 }
3333
3334 // Choose the live ranges from the majority.
3335 const size_t majority =
3336 (current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
3337 bool taken_registers[RegisterConfiguration::kMaxRegisters] = {false};
3338 DCHECK(to_be_live.empty());
3339 auto assign_to_live = [this, majority, &counts](
3340 std::function<bool(TopLevelLiveRange*)> filter,
3341 RangeRegisterSmallMap& to_be_live,
3342 bool* taken_registers) {
3343 bool check_aliasing =
3345 for (const auto& val : counts) {
3346 if (!filter(val.first)) continue;
3347 if (val.second.count >= majority) {
3348 int register_max = 0;
3350 bool conflict = false;
3351 int num_regs = num_registers();
3352 int num_codes = num_allocatable_registers();
3353 const int* codes = allocatable_register_codes();
3354 MachineRepresentation rep = val.first->representation();
3355 if (check_aliasing && (rep == MachineRepresentation::kFloat32 ||
3358 GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3359 for (int idx = 0; idx < num_regs; idx++) {
3360 int uses = val.second.used_registers[idx];
3361 if (uses == 0) continue;
3362 if (uses > register_max || (conflict && uses == register_max)) {
3363 reg = idx;
3364 register_max = uses;
3365 conflict = check_aliasing ? CheckConflict(rep, reg, to_be_live)
3366 : taken_registers[reg];
3367 }
3368 }
3369 if (conflict) {
3371 } else if (!check_aliasing) {
3372 taken_registers[reg] = true;
3373 }
3374 TRACE("Reset %d as live due vote %zu in %s\n", val.first->vreg(),
3375 val.second.count, RegisterName(reg));
3376 auto [_, inserted] = to_be_live.emplace(val.first, reg);
3377 DCHECK(inserted);
3378 USE(inserted);
3379 }
3380 }
3381 };
3382 // First round, process fixed registers, as these have precedence.
3383 // There is only one fixed range per register, so we cannot have
3384 // conflicts.
3385 assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live,
3386 taken_registers);
3387 // Second round, process the rest.
3388 assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live,
3389 taken_registers);
3390}
3391
3393 InstructionBlock* current_block, RpoNumber predecessor) {
3394 // We ignore predecessors on back edges when looking for control flow effects,
3395 // as those lie in the future of allocation and we have no data yet. Also,
3396 // deferred bocks are ignored on deferred to non-deferred boundaries, as we do
3397 // not want them to influence allocation of non deferred code.
3398 return (predecessor < current_block->rpo_number()) &&
3399 (current_block->IsDeferred() ||
3400 !code()->InstructionBlockAt(predecessor)->IsDeferred());
3401}
3402
3404 InstructionBlock* block) {
3405 if (spill_mode == SpillMode::kSpillDeferred) {
3408 // Adds range back to inactive, resolving resulting conflicts.
3409 auto add_to_inactive = [this, max](LiveRange* range) {
3410 AddToInactive(range);
3411 // Splits other if it conflicts with range. Other is placed in unhandled
3412 // for later reallocation.
3413 auto split_conflicting = [this, max](LiveRange* range, LiveRange* other,
3414 std::function<void(LiveRange*)>
3415 update_caches) {
3416 if (other->TopLevel()->IsFixed()) return;
3417 int reg = range->assigned_register();
3419 if (other->assigned_register() != reg) {
3420 return;
3421 }
3422 } else {
3423 if (!data()->config()->AreAliases(range->representation(), reg,
3424 other->representation(),
3425 other->assigned_register())) {
3426 return;
3427 }
3428 }
3429 // The inactive range might conflict, so check whether we need to
3430 // split and spill. We can look for the first intersection, as there
3431 // cannot be any intersections in the past, as those would have been a
3432 // conflict then.
3433 LifetimePosition next_start = range->FirstIntersection(other);
3434 if (!next_start.IsValid() || (next_start > max)) {
3435 // There is no conflict or the conflict is outside of the current
3436 // stretch of deferred code. In either case we can ignore the
3437 // inactive range.
3438 return;
3439 }
3440 // They overlap. So we need to split active and reschedule it
3441 // for allocation.
3442 TRACE("Resolving conflict of %d with deferred fixed for register %s\n",
3443 other->TopLevel()->vreg(),
3444 RegisterName(other->assigned_register()));
3445 LiveRange* split_off =
3446 other->SplitAt(next_start, data()->allocation_zone());
3447 // Try to get the same register after the deferred block.
3448 split_off->set_controlflow_hint(other->assigned_register());
3449 DCHECK_NE(split_off, other);
3450 AddToUnhandled(split_off);
3451 update_caches(other);
3452 };
3453 // Now check for conflicts in active and inactive ranges. We might have
3454 // conflicts in inactive, as we do not do this check on every block
3455 // boundary but only on deferred/non-deferred changes but inactive
3456 // live ranges might become live on any block boundary.
3457 for (auto active : active_live_ranges()) {
3458 split_conflicting(range, active, [this](LiveRange* updated) {
3460 std::min(updated->End(), next_active_ranges_change_);
3461 });
3462 }
3463 for (int reg = 0; reg < num_registers(); ++reg) {
3465 reg != range->assigned_register()) {
3466 continue;
3467 }
3469 for (auto inactive : inactive_live_ranges(reg)) {
3470 if (inactive->NextStart() > max) break;
3471 split_conflicting(range, inactive, [this](LiveRange* updated) {
3473 std::min(updated->End(), next_inactive_ranges_change_);
3474 });
3475 }
3476 }
3477 };
3478 if (mode() == RegisterKind::kGeneral) {
3479 for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3480 if (current != nullptr) {
3481 if (current->IsDeferredFixed()) {
3482 add_to_inactive(current);
3483 }
3484 }
3485 }
3486 } else if (mode() == RegisterKind::kDouble) {
3487 for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3488 if (current != nullptr) {
3489 if (current->IsDeferredFixed()) {
3490 add_to_inactive(current);
3491 }
3492 }
3493 }
3495 for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3496 if (current != nullptr) {
3497 if (current->IsDeferredFixed()) {
3498 add_to_inactive(current);
3499 }
3500 }
3501 }
3502 for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3503 if (current != nullptr) {
3504 if (current->IsDeferredFixed()) {
3505 add_to_inactive(current);
3506 }
3507 }
3508 }
3509 }
3510 } else {
3512 for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3513 if (current != nullptr) {
3514 if (current->IsDeferredFixed()) {
3515 add_to_inactive(current);
3516 }
3517 }
3518 }
3519 }
3520 } else {
3521 // Remove all ranges.
3522 for (int reg = 0; reg < num_registers(); ++reg) {
3523 for (auto it = inactive_live_ranges(reg).begin();
3524 it != inactive_live_ranges(reg).end();) {
3525 if ((*it)->TopLevel()->IsDeferredFixed()) {
3526 it = inactive_live_ranges(reg).erase(it);
3527 } else {
3528 ++it;
3529 }
3530 }
3531 }
3532 }
3533}
3534
3536 const InstructionBlock* block) {
3537 if (block->IsDeferred()) return true;
3538 if (block->PredecessorCount() == 0) return true;
3539 bool pred_is_deferred = false;
3540 for (auto pred : block->predecessors()) {
3541 if (pred.IsNext(block->rpo_number())) {
3542 pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
3543 break;
3544 }
3545 }
3546 return !pred_is_deferred;
3547}
3548
3550 for (auto pred : block->predecessors()) {
3551 InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3552 if (!pred_block->IsDeferred()) return true;
3553 }
3554 return false;
3555}
3556
3558 DCHECK(unhandled_live_ranges().empty());
3559 DCHECK(active_live_ranges().empty());
3560 for (int reg = 0; reg < num_registers(); ++reg) {
3562 }
3563
3565 data()->ResetSpillState();
3566
3567 if (v8_flags.trace_turbo_alloc) {
3569 }
3570
3571 const size_t live_ranges_size = data()->live_ranges().size();
3572 for (TopLevelLiveRange* range : data()->live_ranges()) {
3573 CHECK_EQ(live_ranges_size,
3574 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3575 if (!CanProcessRange(range)) continue;
3576 for (LiveRange* to_add = range; to_add != nullptr;
3577 to_add = to_add->next()) {
3578 if (!to_add->spilled()) {
3579 AddToUnhandled(to_add);
3580 }
3581 }
3582 }
3583
3584 if (mode() == RegisterKind::kGeneral) {
3585 for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3586 if (current != nullptr) {
3587 if (current->IsDeferredFixed()) continue;
3588 AddToInactive(current);
3589 }
3590 }
3591 } else if (mode() == RegisterKind::kDouble) {
3592 for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3593 if (current != nullptr) {
3594 if (current->IsDeferredFixed()) continue;
3595 AddToInactive(current);
3596 }
3597 }
3599 for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3600 if (current != nullptr) {
3601 if (current->IsDeferredFixed()) continue;
3602 AddToInactive(current);
3603 }
3604 }
3605 for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3606 if (current != nullptr) {
3607 if (current->IsDeferredFixed()) continue;
3608 AddToInactive(current);
3609 }
3610 }
3611 }
3612 } else {
3614 for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3615 if (current != nullptr) {
3616 if (current->IsDeferredFixed()) continue;
3617 AddToInactive(current);
3618 }
3619 }
3620 }
3621
3622 RpoNumber last_block = RpoNumber::FromInt(0);
3623 RpoNumber max_blocks =
3624 RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3625 LifetimePosition next_block_boundary =
3627 data()
3628 ->code()
3629 ->InstructionBlockAt(last_block)
3630 ->last_instruction_index())
3631 .NextFullStart();
3632 SpillMode spill_mode = SpillMode::kSpillAtDefinition;
3633
3634 // Process all ranges. We also need to ensure that we have seen all block
3635 // boundaries. Linear scan might have assigned and spilled ranges before
3636 // reaching the last block and hence we would ignore control flow effects for
3637 // those. Not only does this produce a potentially bad assignment, it also
3638 // breaks with the invariant that we undo spills that happen in deferred code
3639 // when crossing a deferred/non-deferred boundary.
3640 while (!unhandled_live_ranges().empty() || last_block < max_blocks) {
3642 LiveRange* current = unhandled_live_ranges().empty()
3643 ? nullptr
3644 : *unhandled_live_ranges().begin();
3646 current ? current->Start() : next_block_boundary;
3647#ifdef DEBUG
3648 allocation_finger_ = position;
3649#endif
3650 // Check whether we just moved across a block boundary. This will trigger
3651 // for the first range that is past the current boundary.
3652 if (position >= next_block_boundary) {
3653 TRACE("Processing boundary at %d leaving %d\n",
3654 next_block_boundary.value(), last_block.ToInt());
3655
3656 // Forward state to before block boundary
3657 LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
3658 ForwardStateTo(end_of_block);
3659
3660 // Remember this state.
3661 InstructionBlock* current_block = data()->code()->GetInstructionBlock(
3662 next_block_boundary.ToInstructionIndex());
3663
3664 // Store current spill state (as the state at end of block). For
3665 // simplicity, we store the active ranges, e.g., the live ranges that
3666 // are not spilled.
3667 data()->RememberSpillState(last_block, active_live_ranges());
3668
3669 // Only reset the state if this was not a direct fallthrough. Otherwise
3670 // control flow resolution will get confused (it does not expect changes
3671 // across fallthrough edges.).
3672 bool fallthrough =
3673 (current_block->PredecessorCount() == 1) &&
3674 current_block->predecessors()[0].IsNext(current_block->rpo_number());
3675
3676 // When crossing a deferred/non-deferred boundary, we have to load or
3677 // remove the deferred fixed ranges from inactive.
3678 if ((spill_mode == SpillMode::kSpillDeferred) !=
3679 current_block->IsDeferred()) {
3680 // Update spill mode.
3681 spill_mode = current_block->IsDeferred()
3682 ? SpillMode::kSpillDeferred
3683 : SpillMode::kSpillAtDefinition;
3684
3685 ForwardStateTo(next_block_boundary);
3686
3687#ifdef DEBUG
3688 // Allow allocation at current position.
3689 allocation_finger_ = next_block_boundary;
3690#endif
3691 UpdateDeferredFixedRanges(spill_mode, current_block);
3692 }
3693
3694 // Allocation relies on the fact that each non-deferred block has at
3695 // least one non-deferred predecessor. Check this invariant here.
3696 DCHECK_IMPLIES(!current_block->IsDeferred(),
3697 HasNonDeferredPredecessor(current_block));
3698
3699 if (!fallthrough) {
3700#ifdef DEBUG
3701 // Allow allocation at current position.
3702 allocation_finger_ = next_block_boundary;
3703#endif
3704
3705 // We are currently at next_block_boundary - 1. Move the state to the
3706 // actual block boundary position. In particular, we have to
3707 // reactivate inactive ranges so that they get rescheduled for
3708 // allocation if they were not live at the predecessors.
3709 ForwardStateTo(next_block_boundary);
3710
3712
3713 // If we end up deciding to use the state of the immediate
3714 // predecessor, it is better not to perform a change. It would lead to
3715 // the same outcome anyway.
3716 // This may never happen on boundaries between deferred and
3717 // non-deferred code, as we rely on explicit respill to ensure we
3718 // spill at definition.
3719 bool no_change_required = false;
3720
3721 auto pick_state_from = [this, current_block](
3722 RpoNumber pred,
3723 RangeRegisterSmallMap& to_be_live) -> bool {
3724 TRACE("Using information from B%d\n", pred.ToInt());
3725 // If this is a fall-through that is not across a deferred
3726 // boundary, there is nothing to do.
3727 bool is_noop = pred.IsNext(current_block->rpo_number());
3728 if (!is_noop) {
3729 auto& spill_state = data()->GetSpillState(pred);
3730 TRACE("Not a fallthrough. Adding %zu elements...\n",
3731 spill_state.size());
3732 LifetimePosition pred_end =
3734 this->code()->InstructionBlockAt(pred)->code_end());
3735 DCHECK(to_be_live.empty());
3736 for (const auto range : spill_state) {
3737 // Filter out ranges that were split or had their register
3738 // stolen by backwards working spill heuristics. These have
3739 // been spilled after the fact, so ignore them.
3740 if (range->End() < pred_end || !range->HasRegisterAssigned())
3741 continue;
3742 auto [_, inserted] = to_be_live.emplace(
3743 range->TopLevel(), range->assigned_register());
3744 DCHECK(inserted);
3745 USE(inserted);
3746 }
3747 }
3748 return is_noop;
3749 };
3750
3751 // Multiple cases here:
3752 // 1) We have a single predecessor => this is a control flow split, so
3753 // just restore the predecessor state.
3754 // 2) We have two predecessors => this is a conditional, so break ties
3755 // based on what to do based on forward uses, trying to benefit
3756 // the same branch if in doubt (make one path fast).
3757 // 3) We have many predecessors => this is a switch. Compute union
3758 // based on majority, break ties by looking forward.
3759 if (current_block->PredecessorCount() == 1) {
3760 TRACE("Single predecessor for B%d\n",
3761 current_block->rpo_number().ToInt());
3762 no_change_required =
3763 pick_state_from(current_block->predecessors()[0], to_be_live);
3764 } else if (current_block->PredecessorCount() == 2) {
3765 TRACE("Two predecessors for B%d\n",
3766 current_block->rpo_number().ToInt());
3767 // If one of the branches does not contribute any information,
3768 // e.g. because it is deferred or a back edge, we can short cut
3769 // here right away.
3770 RpoNumber chosen_predecessor = RpoNumber::Invalid();
3771 if (!ConsiderBlockForControlFlow(current_block,
3772 current_block->predecessors()[0])) {
3773 chosen_predecessor = current_block->predecessors()[1];
3774 } else if (!ConsiderBlockForControlFlow(
3775 current_block, current_block->predecessors()[1])) {
3776 chosen_predecessor = current_block->predecessors()[0];
3777 } else {
3778 chosen_predecessor = ChooseOneOfTwoPredecessorStates(
3779 current_block, next_block_boundary);
3780 }
3781 no_change_required = pick_state_from(chosen_predecessor, to_be_live);
3782
3783 } else {
3784 // Merge at the end of, e.g., a switch.
3785 ComputeStateFromManyPredecessors(current_block, to_be_live);
3786 }
3787
3788 if (!no_change_required) {
3789 SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode);
3790 ReloadLiveRanges(to_be_live, next_block_boundary);
3791 }
3792 }
3793 // Update block information
3794 last_block = current_block->rpo_number();
3796 current_block->last_instruction_index())
3797 .NextFullStart();
3798
3799 // We might have created new unhandled live ranges, so cycle around the
3800 // loop to make sure we pick the top most range in unhandled for
3801 // processing.
3802 continue;
3803 }
3804
3805 DCHECK_NOT_NULL(current);
3806
3807 TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
3808 current->relative_id(), position.value());
3809
3810 // Now we can erase current, as we are sure to process it.
3812
3813 if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
3814 continue;
3815
3817
3818 DCHECK(!current->HasRegisterAssigned() && !current->spilled());
3819
3820 ProcessCurrentRange(current, spill_mode);
3821 }
3822
3823 if (v8_flags.trace_turbo_alloc) {
3825 }
3826}
3827
3829 int reg) {
3830 data()->MarkAllocated(range->representation(), reg);
3831 range->set_assigned_register(reg);
3832 range->SetUseHints(reg);
3833 range->UpdateBundleRegister(reg);
3834 if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
3835 data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
3836 }
3837}
3838
3840 TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
3841 range->relative_id(), RegisterName(range->assigned_register()));
3842 active_live_ranges().push_back(range);
3844 std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
3845}
3846
3848 TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
3849 range->relative_id());
3851 next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
3852 DCHECK(range->HasRegisterAssigned());
3853 // Keep `inactive_live_ranges` sorted.
3854 inactive_live_ranges(range->assigned_register())
3855 .insert(std::upper_bound(
3856 inactive_live_ranges(range->assigned_register()).begin(),
3857 inactive_live_ranges(range->assigned_register()).end(), range,
3859 1, range);
3860}
3861
3863 if (range == nullptr || range->IsEmpty()) return;
3864 DCHECK(!range->HasRegisterAssigned() && !range->spilled());
3865 DCHECK(allocation_finger_ <= range->Start());
3866
3867 TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
3868 range->relative_id());
3869 unhandled_live_ranges().insert(range);
3870}
3871
3874 TRACE("Moving live range %d:%d from active to handled\n",
3875 (*it)->TopLevel()->vreg(), (*it)->relative_id());
3876 return active_live_ranges().erase(it);
3877}
3878
3881 LiveRange* range = *it;
3882 TRACE("Moving live range %d:%d from active to inactive\n",
3883 (range)->TopLevel()->vreg(), range->relative_id());
3884 LifetimePosition next_active = range->NextStartAfter(position);
3886 std::min(next_inactive_ranges_change_, next_active);
3887 DCHECK(range->HasRegisterAssigned());
3888 // Keep `inactive_live_ranges` sorted.
3889 inactive_live_ranges(range->assigned_register())
3890 .insert(std::upper_bound(
3891 inactive_live_ranges(range->assigned_register()).begin(),
3892 inactive_live_ranges(range->assigned_register()).end(), range,
3894 1, range);
3895 return active_live_ranges().erase(it);
3896}
3897
3900 LiveRange* range = *it;
3901 TRACE("Moving live range %d:%d from inactive to handled\n",
3902 range->TopLevel()->vreg(), range->relative_id());
3903 int reg = range->assigned_register();
3904 // This must keep the order of `inactive_live_ranges` intact since one of its
3905 // callers `SplitAndSpillIntersecting` relies on it being sorted.
3906 return inactive_live_ranges(reg).erase(it);
3907}
3908
3912 LiveRange* range = *it;
3913 active_live_ranges().push_back(range);
3914 TRACE("Moving live range %d:%d from inactive to active\n",
3915 range->TopLevel()->vreg(), range->relative_id());
3917 std::min(next_active_ranges_change_, range->NextEndAfter(position));
3918 int reg = range->assigned_register();
3919 // Remove the element without copying O(n) subsequent elements.
3920 // The order of `inactive_live_ranges` is established afterwards by sorting in
3921 // `ForwardStateTo`, which is the only caller.
3922 std::swap(*it, inactive_live_ranges(reg).back());
3924 return it;
3925}
3926
3930 for (auto it = active_live_ranges().begin();
3931 it != active_live_ranges().end();) {
3932 LiveRange* cur_active = *it;
3933 if (cur_active->End() <= position) {
3934 it = ActiveToHandled(it);
3935 } else if (!cur_active->Covers(position)) {
3936 it = ActiveToInactive(it, position);
3937 } else {
3938 next_active_ranges_change_ = std::min(
3940 ++it;
3941 }
3942 }
3943 }
3944
3947 for (int reg = 0; reg < num_registers(); ++reg) {
3948 for (auto it = inactive_live_ranges(reg).begin();
3949 it != inactive_live_ranges(reg).end();) {
3950 LiveRange* cur_inactive = *it;
3951 if (cur_inactive->End() <= position) {
3952 it = InactiveToHandled(it);
3953 } else if (cur_inactive->Covers(position)) {
3954 it = InactiveToActive(it, position);
3955 } else {
3958 // This modifies `cur_inactive.next_start_` and thus
3959 // invalidates the ordering of `inactive_live_ranges(reg)`.
3960 cur_inactive->NextStartAfter(position));
3961 ++it;
3962 }
3963 }
3964 std::sort(inactive_live_ranges(reg).begin(),
3966 }
3967 }
3968
3969 for (int reg = 0; reg < num_registers(); ++reg) {
3971 }
3972}
3973
3975 DCHECK(start->IsDeferred());
3976 RpoNumber last_block =
3977 RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3978 while ((start->rpo_number() < last_block)) {
3979 InstructionBlock* next =
3980 code()->InstructionBlockAt(start->rpo_number().Next());
3981 if (!next->IsDeferred()) break;
3982 start = next;
3983 }
3984 return start->last_instruction_index();
3985}
3986
3988 int* num_regs, int* num_codes,
3989 const int** codes) const {
3992 *num_regs = data()->config()->num_float_registers();
3993 *num_codes = data()->config()->num_allocatable_float_registers();
3994 *codes = data()->config()->allocatable_float_codes();
3995 } else if (rep == MachineRepresentation::kSimd128) {
3996 *num_regs = data()->config()->num_simd128_registers();
3997 *num_codes = data()->config()->num_allocatable_simd128_registers();
3998 *codes = data()->config()->allocatable_simd128_codes();
3999 } else if (rep == MachineRepresentation::kSimd256) {
4000 *num_regs = data()->config()->num_simd256_registers();
4001 *num_codes = data()->config()->num_allocatable_simd256_registers();
4002 *codes = data()->config()->allocatable_simd256_codes();
4003 } else {
4004 UNREACHABLE();
4005 }
4006}
4007
4008void LinearScanAllocator::GetSIMD128RegisterSet(int* num_regs, int* num_codes,
4009 const int** codes) const {
4011
4012 *num_regs = data()->config()->num_simd128_registers();
4013 *num_codes = data()->config()->num_allocatable_simd128_registers();
4014 *codes = data()->config()->allocatable_simd128_codes();
4015}
4016
4018 LiveRange* range, base::Vector<LifetimePosition> positions) {
4019 int num_regs = num_registers();
4020 int num_codes = num_allocatable_registers();
4021 const int* codes = allocatable_register_codes();
4022 MachineRepresentation rep = range->representation();
4026 GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4029 GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
4030 }
4031 DCHECK_GE(positions.length(), num_regs);
4032
4033 for (int i = 0; i < num_regs; ++i) {
4034 positions[i] = LifetimePosition::MaxPosition();
4035 }
4036
4037 for (LiveRange* cur_active : active_live_ranges()) {
4038 int cur_reg = cur_active->assigned_register();
4040 positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
4041 TRACE("Register %s is free until pos %d (1) due to %d\n",
4042 RegisterName(cur_reg),
4044 cur_active->TopLevel()->vreg());
4045 } else {
4046 int alias_base_index = -1;
4047 int aliases = data()->config()->GetAliases(
4048 cur_active->representation(), cur_reg, rep, &alias_base_index);
4049 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4050 while (aliases--) {
4051 int aliased_reg = alias_base_index + aliases;
4052 positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
4053 }
4054 }
4055 }
4056
4057 for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
4059 for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
4060 DCHECK_GT(cur_inactive->End(), range->Start());
4061 DCHECK_EQ(cur_inactive->assigned_register(), cur_reg);
4062 // No need to carry out intersections, when this register won't be
4063 // interesting to this range anyway.
4064 // TODO(mtrofin): extend to aliased ranges, too.
4066 (positions[cur_reg] <= cur_inactive->NextStart() ||
4067 range->End() <= cur_inactive->NextStart())) {
4068 break;
4069 }
4070 LifetimePosition next_intersection =
4071 cur_inactive->FirstIntersection(range);
4072 if (!next_intersection.IsValid()) continue;
4074 positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
4075 TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
4076 positions[cur_reg].value());
4077 } else {
4078 int alias_base_index = -1;
4079 int aliases = data()->config()->GetAliases(
4080 cur_inactive->representation(), cur_reg, rep, &alias_base_index);
4081 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4082 while (aliases--) {
4083 int aliased_reg = alias_base_index + aliases;
4084 positions[aliased_reg] =
4085 std::min(positions[aliased_reg], next_intersection);
4086 }
4087 }
4088 }
4089 }
4090}
4091
4092// High-level register allocation summary:
4093//
4094// We attempt to first allocate the preferred (hint) register. If that is not
4095// possible, we find a register that's free, and allocate that. If that's not
4096// possible, we search for a register to steal from a range that was allocated.
4097// The goal is to optimize for throughput by avoiding register-to-memory moves,
4098// which are expensive.
4100 SpillMode spill_mode) {
4102 free_until_pos;
4103 FindFreeRegistersForRange(current, free_until_pos);
4104 if (!TryAllocatePreferredReg(current, free_until_pos)) {
4105 if (!TryAllocateFreeReg(current, free_until_pos)) {
4106 AllocateBlockedReg(current, spill_mode);
4107 }
4108 }
4109 if (current->HasRegisterAssigned()) {
4110 AddToActive(current);
4111 }
4112}
4113
4115 LiveRange* current, base::Vector<const LifetimePosition> free_until_pos) {
4116 int hint_register;
4117 if (current->RegisterFromControlFlow(&hint_register) ||
4118 current->RegisterFromFirstHint(&hint_register) ||
4119 current->RegisterFromBundle(&hint_register)) {
4120 TRACE(
4121 "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
4122 RegisterName(hint_register), free_until_pos[hint_register].value(),
4123 current->TopLevel()->vreg(), current->relative_id(),
4124 current->End().value());
4125
4126 // The desired register is free until the end of the current live range.
4127 if (free_until_pos[hint_register] >= current->End()) {
4128 TRACE("Assigning preferred reg %s to live range %d:%d\n",
4129 RegisterName(hint_register), current->TopLevel()->vreg(),
4130 current->relative_id());
4131 SetLiveRangeAssignedRegister(current, hint_register);
4132 return true;
4133 }
4134 }
4135 return false;
4136}
4137
4139 LiveRange* current, int hint_reg,
4140 base::Vector<const LifetimePosition> free_until_pos) {
4141 int num_regs = 0; // used only for the call to GetFPRegisterSet.
4142 int num_codes = num_allocatable_registers();
4143 const int* codes = allocatable_register_codes();
4144 MachineRepresentation rep = current->representation();
4148 GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4151 GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
4152 }
4153
4154 DCHECK_GE(free_until_pos.length(), num_codes);
4155
4156 // Find the register which stays free for the longest time. Check for
4157 // the hinted register first, as we might want to use that one. Only
4158 // count full instructions for free ranges, as an instruction's internal
4159 // positions do not help but might shadow a hinted register. This is
4160 // typically the case for function calls, where all registered are
4161 // cloberred after the call except for the argument registers, which are
4162 // set before the call. Hence, the argument registers always get ignored,
4163 // as their available time is shorter.
4164 int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg;
4165 int current_free = free_until_pos[reg].ToInstructionIndex();
4166 for (int i = 0; i < num_codes; ++i) {
4167 int code = codes[i];
4168 // Prefer registers that have no fixed uses to avoid blocking later hints.
4169 // We use the first register that has no fixed uses to ensure we use
4170 // byte addressable registers in ia32 first.
4171 int candidate_free = free_until_pos[code].ToInstructionIndex();
4172 TRACE("Register %s in free until %d\n", RegisterName(code), candidate_free);
4173 if ((candidate_free > current_free) ||
4174 (candidate_free == current_free && reg != hint_reg &&
4175 (data()->HasFixedUse(current->representation(), reg) &&
4176 !data()->HasFixedUse(current->representation(), code)))) {
4177 reg = code;
4178 current_free = candidate_free;
4179 }
4180 }
4181
4182 return reg;
4183}
4184
4186 LiveRange* current, base::Vector<const LifetimePosition> free_until_pos) {
4187 // Compute register hint, if such exists.
4188 int hint_reg = kUnassignedRegister;
4189 current->RegisterFromControlFlow(&hint_reg) ||
4190 current->RegisterFromFirstHint(&hint_reg) ||
4191 current->RegisterFromBundle(&hint_reg);
4192
4193 int reg =
4194 PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos);
4195
4196 LifetimePosition pos = free_until_pos[reg];
4197
4198 if (pos <= current->Start()) {
4199 // All registers are blocked.
4200 return false;
4201 }
4202
4203 if (pos < current->End()) {
4204 // Register reg is available at the range start but becomes blocked before
4205 // the range end. Split current before the position where it becomes
4206 // blocked. Shift the split position to the last gap position. This is to
4207 // ensure that if a connecting move is needed, that move coincides with the
4208 // start of the range that it defines. See crbug.com/1182985.
4209 LifetimePosition gap_pos =
4210 pos.IsGapPosition() ? pos : pos.FullStart().End();
4211 if (gap_pos <= current->Start()) return false;
4212 LiveRange* tail = SplitRangeAt(current, gap_pos);
4213 AddToUnhandled(tail);
4214
4215 // Try to allocate preferred register once more.
4216 if (TryAllocatePreferredReg(current, free_until_pos)) return true;
4217 }
4218
4219 // Register reg is available at the range start and is free until the range
4220 // end.
4221 DCHECK_GE(pos, current->End());
4222 TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
4223 current->TopLevel()->vreg(), current->relative_id());
4225
4226 return true;
4227}
4228
4230 SpillMode spill_mode) {
4231 UsePosition* register_use = current->NextRegisterPosition(current->Start());
4232 if (register_use == nullptr) {
4233 // There is no use in the current live range that requires a register.
4234 // We can just spill it.
4235 LiveRange* begin_spill = nullptr;
4237 current, current->Start(), spill_mode, &begin_spill);
4238 MaybeSpillPreviousRanges(begin_spill, spill_pos, current);
4239 Spill(current, spill_mode);
4240 return;
4241 }
4242
4243 MachineRepresentation rep = current->representation();
4244
4245 // use_pos keeps track of positions a register/alias is used at.
4246 // block_pos keeps track of positions where a register/alias is blocked
4247 // from.
4251 block_pos(LifetimePosition::MaxPosition());
4252
4253 for (LiveRange* range : active_live_ranges()) {
4254 int cur_reg = range->assigned_register();
4255 bool is_fixed_or_cant_spill =
4256 range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
4258 if (is_fixed_or_cant_spill) {
4259 block_pos[cur_reg] = use_pos[cur_reg] =
4261 } else {
4263 block_pos[cur_reg]);
4264 use_pos[cur_reg] =
4265 range->NextLifetimePositionRegisterIsBeneficial(current->Start());
4266 }
4267 } else {
4268 int alias_base_index = -1;
4269 int aliases = data()->config()->GetAliases(
4270 range->representation(), cur_reg, rep, &alias_base_index);
4271 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4272 while (aliases--) {
4273 int aliased_reg = alias_base_index + aliases;
4274 if (is_fixed_or_cant_spill) {
4275 block_pos[aliased_reg] = use_pos[aliased_reg] =
4277 } else {
4278 use_pos[aliased_reg] =
4279 std::min(block_pos[aliased_reg],
4280 range->NextLifetimePositionRegisterIsBeneficial(
4281 current->Start()));
4282 }
4283 }
4284 }
4285 }
4286
4287 for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4289 for (LiveRange* range : inactive_live_ranges(cur_reg)) {
4290 DCHECK(range->End() > current->Start());
4291 DCHECK_EQ(range->assigned_register(), cur_reg);
4292 bool is_fixed = range->TopLevel()->IsFixed();
4293
4294 // Don't perform costly intersections if they are guaranteed to not update
4295 // block_pos or use_pos.
4296 // TODO(mtrofin): extend to aliased ranges, too.
4298 DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
4299 if (block_pos[cur_reg] <= range->NextStart()) break;
4300 if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
4301 }
4302
4303 LifetimePosition next_intersection = range->FirstIntersection(current);
4304 if (!next_intersection.IsValid()) continue;
4305
4307 if (is_fixed) {
4308 block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
4309 use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
4310 } else {
4311 use_pos[cur_reg] = std::min(use_pos[cur_reg], next_intersection);
4312 }
4313 } else {
4314 int alias_base_index = -1;
4315 int aliases = data()->config()->GetAliases(
4316 range->representation(), cur_reg, rep, &alias_base_index);
4317 DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4318 while (aliases--) {
4319 int aliased_reg = alias_base_index + aliases;
4320 if (is_fixed) {
4321 block_pos[aliased_reg] =
4322 std::min(block_pos[aliased_reg], next_intersection);
4323 use_pos[aliased_reg] =
4324 std::min(block_pos[aliased_reg], use_pos[aliased_reg]);
4325 } else {
4326 use_pos[aliased_reg] =
4327 std::min(use_pos[aliased_reg], next_intersection);
4328 }
4329 }
4330 }
4331 }
4332 }
4333
4334 // Compute register hint if it exists.
4335 int hint_reg = kUnassignedRegister;
4336 current->RegisterFromControlFlow(&hint_reg) ||
4337 register_use->HintRegister(&hint_reg) ||
4338 current->RegisterFromBundle(&hint_reg);
4339 int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
4340
4341 if (use_pos[reg] < register_use->pos()) {
4342 // If there is a gap position before the next register use, we can
4343 // spill until there. The gap position will then fit the fill move.
4344 if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
4345 register_use->pos())) {
4346 SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
4347 return;
4348 }
4349 }
4350
4351 // When in deferred spilling mode avoid stealing registers beyond the current
4352 // deferred region. This is required as we otherwise might spill an inactive
4353 // range with a start outside of deferred code and that would not be reloaded.
4354 LifetimePosition new_end = current->End();
4355 if (spill_mode == SpillMode::kSpillDeferred) {
4356 InstructionBlock* deferred_block =
4357 code()->GetInstructionBlock(current->Start().ToInstructionIndex());
4358 new_end =
4360 LastDeferredInstructionIndex(deferred_block)));
4361 }
4362
4363 // We couldn't spill until the next register use. Split before the register
4364 // is blocked, if applicable.
4365 if (block_pos[reg] < new_end) {
4366 // Register becomes blocked before the current range end. Split before that
4367 // position.
4368 new_end = block_pos[reg].Start();
4369 }
4370
4371 // If there is no register available at all, we can only spill this range.
4372 // Happens for instance on entry to deferred code where registers might
4373 // become blocked yet we aim to reload ranges.
4374 if (new_end == current->Start()) {
4375 SpillBetween(current, new_end, register_use->pos(), spill_mode);
4376 return;
4377 }
4378
4379 // Split at the new end if we found one.
4380 if (new_end != current->End()) {
4381 LiveRange* tail = SplitBetween(current, current->Start(), new_end);
4382 AddToUnhandled(tail);
4383 }
4384
4385 // Register reg is not blocked for the whole range.
4386 DCHECK(block_pos[reg] >= current->End());
4387 TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
4388 current->TopLevel()->vreg(), current->relative_id());
4390
4391 // This register was not free. Thus we need to find and spill
4392 // parts of active and inactive live regions that use the same register
4393 // at the same lifetime positions as current.
4394 SplitAndSpillIntersecting(current, spill_mode);
4395}
4396
4398 SpillMode spill_mode) {
4399 DCHECK(current->HasRegisterAssigned());
4400 int reg = current->assigned_register();
4401 LifetimePosition split_pos = current->Start();
4402 for (auto it = active_live_ranges().begin();
4403 it != active_live_ranges().end();) {
4404 LiveRange* range = *it;
4406 if (range->assigned_register() != reg) {
4407 ++it;
4408 continue;
4409 }
4410 } else {
4411 if (!data()->config()->AreAliases(current->representation(), reg,
4412 range->representation(),
4413 range->assigned_register())) {
4414 ++it;
4415 continue;
4416 }
4417 }
4418
4419 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4420 LiveRange* begin_spill = nullptr;
4421 LifetimePosition spill_pos =
4422 FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill);
4423 MaybeSpillPreviousRanges(begin_spill, spill_pos, range);
4424 if (next_pos == nullptr) {
4425 SpillAfter(range, spill_pos, spill_mode);
4426 } else {
4427 // When spilling between spill_pos and next_pos ensure that the range
4428 // remains spilled at least until the start of the current live range.
4429 // This guarantees that we will not introduce new unhandled ranges that
4430 // start before the current range as this violates allocation invariants
4431 // and will lead to an inconsistent state of active and inactive
4432 // live-ranges: ranges are allocated in order of their start positions,
4433 // ranges are retired from active/inactive when the start of the
4434 // current live-range is larger than their end.
4436 next_pos->pos()));
4437 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
4438 spill_mode);
4439 }
4440 it = ActiveToHandled(it);
4441 }
4442
4443 for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4445 if (cur_reg != reg) continue;
4446 }
4448 for (auto it = inactive_live_ranges(cur_reg).begin();
4449 it != inactive_live_ranges(cur_reg).end();) {
4450 LiveRange* range = *it;
4452 !data()->config()->AreAliases(current->representation(), reg,
4453 range->representation(), cur_reg)) {
4454 ++it;
4455 continue;
4456 }
4457 DCHECK(range->End() > current->Start());
4458 if (range->TopLevel()->IsFixed()) {
4459 ++it;
4460 continue;
4461 }
4462
4463 LifetimePosition next_intersection = range->FirstIntersection(current);
4464 if (next_intersection.IsValid()) {
4465 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4466 if (next_pos == nullptr) {
4467 SpillAfter(range, split_pos, spill_mode);
4468 } else {
4469 next_intersection = std::min(next_intersection, next_pos->pos());
4470 SpillBetween(range, split_pos, next_intersection, spill_mode);
4471 }
4472 it = InactiveToHandled(it);
4473 } else {
4474 ++it;
4475 }
4476 }
4477 }
4478}
4479
4481 if (!range->is_phi()) return false;
4482
4483 DCHECK(!range->HasSpillOperand());
4484 // Check how many operands belong to the same bundle as the output.
4485 LiveRangeBundle* out_bundle = range->get_bundle();
4487 data()->GetPhiMapValueFor(range);
4488 const PhiInstruction* phi = phi_map_value->phi();
4489 const InstructionBlock* block = phi_map_value->block();
4490 // Count the number of spilled operands.
4491 size_t spilled_count = 0;
4492 for (size_t i = 0; i < phi->operands().size(); i++) {
4493 int op = phi->operands()[i];
4494 TopLevelLiveRange* op_range = data()->GetLiveRangeFor(op);
4495 if (!op_range->HasSpillRange() || op_range->get_bundle() != out_bundle)
4496 continue;
4497 const InstructionBlock* pred =
4498 code()->InstructionBlockAt(block->predecessors()[i]);
4499 LifetimePosition pred_end =
4501 pred->last_instruction_index());
4502 LiveRange* op_range_child = op_range->GetChildCovers(pred_end);
4503 if (op_range_child != nullptr && op_range_child->spilled()) {
4504 spilled_count++;
4505 }
4506 }
4507
4508 // Only continue if more than half of the operands are spilled to the same
4509 // slot (because part of same bundle).
4510 if (spilled_count * 2 <= phi->operands().size()) {
4511 return false;
4512 }
4513
4514 // If the range does not need register soon, spill it to the merged
4515 // spill range.
4516 LifetimePosition next_pos = range->Start();
4517 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
4518 UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
4519 if (pos == nullptr) {
4520 Spill(range, SpillMode::kSpillAtDefinition);
4521 return true;
4522 } else if (pos->pos() > range->Start().NextStart()) {
4523 SpillBetween(range, range->Start(), pos->pos(),
4524 SpillMode::kSpillAtDefinition);
4525 return true;
4526 }
4527 return false;
4528}
4529
4531 SpillMode spill_mode) {
4532 LiveRange* second_part = SplitRangeAt(range, pos);
4533 Spill(second_part, spill_mode);
4534}
4535
4538 SpillMode spill_mode) {
4539 SpillBetweenUntil(range, start, start, end, spill_mode);
4540}
4541
4544 LifetimePosition until,
4546 SpillMode spill_mode) {
4547 CHECK(start < end);
4548 LiveRange* second_part = SplitRangeAt(range, start);
4549
4550 if (second_part->Start() < end) {
4551 // The split result intersects with [start, end[.
4552 // Split it at position between ]start+1, end[, spill the middle part
4553 // and put the rest to unhandled.
4554
4555 // Make sure that the third part always starts after the start of the
4556 // second part, as that likely is the current position of the register
4557 // allocator and we cannot add ranges to unhandled that start before
4558 // the current position.
4559 LifetimePosition split_start = std::max(second_part->Start().End(), until);
4560
4561 // If end is an actual use (which it typically is) we have to split
4562 // so that there is a gap before so that we have space for moving the
4563 // value into its position.
4564 // However, if we have no choice, split right where asked.
4565 LifetimePosition third_part_end =
4566 std::max(split_start, end.PrevStart().End());
4567 // Instead of spliting right after or even before the block boundary,
4568 // split on the boumndary to avoid extra moves.
4569 if (data()->IsBlockBoundary(end.Start())) {
4570 third_part_end = std::max(split_start, end.Start());
4571 }
4572
4573 LiveRange* third_part =
4574 SplitBetween(second_part, split_start, third_part_end);
4575 if (GetInstructionBlock(data()->code(), second_part->Start())
4576 ->IsDeferred()) {
4577 // Try to use the same register as before.
4578 TRACE("Setting control flow hint for %d:%d to %s\n",
4579 third_part->TopLevel()->vreg(), third_part->relative_id(),
4580 RegisterName(range->controlflow_hint()));
4581 third_part->set_controlflow_hint(range->controlflow_hint());
4582 }
4583
4584 AddToUnhandled(third_part);
4585 // This can happen, even if we checked for start < end above, as we fiddle
4586 // with the end location. However, we are guaranteed to be after or at
4587 // until, so this is fine.
4588 if (third_part != second_part) {
4589 Spill(second_part, spill_mode);
4590 }
4591 } else {
4592 // The split result does not intersect with [start, end[.
4593 // Nothing to spill. Just put it to unhandled as whole.
4594 AddToUnhandled(second_part);
4595 }
4596}
4597
4599
4601 for (auto range : data()->live_ranges()) {
4603 DCHECK_NOT_NULL(range);
4604 if (range->IsSpilledOnlyInDeferredBlocks(data())) {
4605 // If the range is spilled only in deferred blocks and starts in
4606 // a non-deferred block, we transition its representation here so
4607 // that the LiveRangeConnector processes them correctly. If,
4608 // however, they start in a deferred block, we uograde them to
4609 // spill at definition, as that definition is in a deferred block
4610 // anyway. While this is an optimization, the code in LiveRangeConnector
4611 // relies on it!
4612 if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
4613 TRACE("Live range %d is spilled and alive in deferred code only\n",
4614 range->vreg());
4615 range->TransitionRangeToSpillAtDefinition();
4616 } else {
4617 TRACE("Live range %d is spilled deferred code only but alive outside\n",
4618 range->vreg());
4619 range->TransitionRangeToDeferredSpill(data()->allocation_zone());
4620 }
4621 }
4622 }
4623}
4624
4626 ZoneVector<SpillRange*> spill_ranges(data()->allocation_zone());
4627 for (const TopLevelLiveRange* range : data()->live_ranges()) {
4628 DCHECK_NOT_NULL(range);
4629 if (range->HasSpillRange()) {
4630 DCHECK_NOT_NULL(range->GetSpillRange());
4631 spill_ranges.push_back(range->GetSpillRange());
4632 }
4633 }
4634 // At this point, the `SpillRange`s for all `TopLevelLiveRange`s should be
4635 // unique, since none have been merged yet.
4636 DCHECK_EQ(spill_ranges.size(),
4637 std::set(spill_ranges.begin(), spill_ranges.end()).size());
4638
4639 // Merge all `SpillRange`s that belong to the same `LiveRangeBundle`.
4640 for (const TopLevelLiveRange* range : data()->live_ranges()) {
4642 DCHECK_NOT_NULL(range);
4643 if (range->get_bundle() != nullptr) {
4644 range->get_bundle()->MergeSpillRangesAndClear();
4645 }
4646 }
4647
4648 // Now merge *all* disjoint, non-empty `SpillRange`s.
4649 // Formerly, this merging was O(n^2) in the number of `SpillRange`s, which
4650 // then dominated compile time (>40%) for some pathological cases,
4651 // e.g., https://crbug.com/v8/14133.
4652 // Now, we allow only `kMaxRetries` unsuccessful merges with directly
4653 // following `SpillRange`s. After each `kMaxRetries`, we exponentially
4654 // increase the stride, which limits the inner loop to O(log n) and thus
4655 // the overall merging to O(n * log n).
4656
4657 // The merging above may have left some `SpillRange`s empty, remove them.
4658 SpillRange** end_nonempty =
4659 std::remove_if(spill_ranges.begin(), spill_ranges.end(),
4660 [](const SpillRange* range) { return range->IsEmpty(); });
4661 for (SpillRange** range_it = spill_ranges.begin(); range_it < end_nonempty;
4662 ++range_it) {
4664 SpillRange* range = *range_it;
4665 DCHECK(!range->IsEmpty());
4666 constexpr size_t kMaxRetries = 1000;
4667 size_t retries = kMaxRetries;
4668 size_t stride = 1;
4669 for (SpillRange** other_it = range_it + 1; other_it < end_nonempty;
4670 other_it += stride) {
4671 SpillRange* other = *other_it;
4672 DCHECK(!other->IsEmpty());
4673 if (range->TryMerge(other)) {
4674 DCHECK(other->IsEmpty());
4675 std::iter_swap(other_it, --end_nonempty);
4676 } else if (--retries == 0) {
4677 retries = kMaxRetries;
4678 stride *= 2;
4679 }
4680 }
4681 }
4682 spill_ranges.erase(end_nonempty, spill_ranges.end());
4683
4684 // Allocate slots for the merged spill ranges.
4685 for (SpillRange* range : spill_ranges) {
4687 DCHECK(!range->IsEmpty());
4688 if (!range->HasSlot()) {
4689 // Allocate a new operand referring to the spill slot, aligned to the
4690 // operand size.
4691 int width = range->byte_width();
4692 int index = data()->frame()->AllocateSpillSlot(width, width);
4693 range->set_assigned_slot(index);
4694 }
4695 }
4696}
4697
4699 const size_t live_ranges_size = data()->live_ranges().size();
4700 for (TopLevelLiveRange* top_range : data()->live_ranges()) {
4702 CHECK_EQ(live_ranges_size,
4703 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
4704 DCHECK_NOT_NULL(top_range);
4705 if (top_range->IsEmpty()) continue;
4706 InstructionOperand spill_operand;
4707 if (top_range->HasSpillOperand()) {
4708 auto it = data()->slot_for_const_range().find(top_range);
4709 if (it != data()->slot_for_const_range().end()) {
4710 spill_operand = *it->second;
4711 } else {
4712 spill_operand = *top_range->GetSpillOperand();
4713 }
4714 } else if (top_range->HasSpillRange()) {
4715 spill_operand = top_range->GetSpillRangeOperand();
4716 }
4717 if (top_range->is_phi()) {
4719 top_range->GetAssignedOperand());
4720 }
4721 for (LiveRange* range = top_range; range != nullptr;
4722 range = range->next()) {
4723 InstructionOperand assigned = range->GetAssignedOperand();
4724 DCHECK(!assigned.IsUnallocated());
4725 range->ConvertUsesToOperand(assigned, spill_operand);
4726 }
4727
4728 if (!spill_operand.IsInvalid()) {
4729 // If this top level range has a child spilled in a deferred block, we use
4730 // the range and control flow connection mechanism instead of spilling at
4731 // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
4732 // phases. Normally, when we spill at definition, we do not insert a
4733 // connecting move when a successor child range is spilled - because the
4734 // spilled range picks up its value from the slot which was assigned at
4735 // definition. For ranges that are determined to spill only in deferred
4736 // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
4737 // where a spill operand is expected, and then finalize by inserting the
4738 // spills in the deferred blocks dominators.
4739 if (!top_range->IsSpilledOnlyInDeferredBlocks(data()) &&
4740 !top_range->HasGeneralSpillRange()) {
4741 // Spill at definition if the range isn't spilled in a way that will be
4742 // handled later.
4743 top_range->FilterSpillMoves(data(), spill_operand);
4744 top_range->CommitSpillMoves(data(), spill_operand);
4745 }
4746 }
4747 }
4748}
4749
4752
4754 int safe_point = 0;
4755 for (ReferenceMap* map : *data()->code()->reference_maps()) {
4756 if (safe_point > map->instruction_position()) return false;
4757 safe_point = map->instruction_position();
4758 }
4759 return true;
4760}
4761
4764 // Map all delayed references.
4765 for (RegisterAllocationData::DelayedReference& delayed_reference :
4766 data()->delayed_references()) {
4767 delayed_reference.map->RecordReference(
4768 AllocatedOperand::cast(*delayed_reference.operand));
4769 }
4770 // Iterate over all safe point positions and record a pointer
4771 // for all spilled live ranges at this point.
4772 int last_range_start = 0;
4773 const ReferenceMaps* reference_maps = data()->code()->reference_maps();
4774 ReferenceMaps::const_iterator first_it = reference_maps->begin();
4775 const size_t live_ranges_size = data()->live_ranges().size();
4776 // Select subset of `TopLevelLiveRange`s to process, sort them by their start.
4777 ZoneVector<TopLevelLiveRange*> candidate_ranges(data()->allocation_zone());
4778 candidate_ranges.reserve(data()->live_ranges().size());
4779 for (TopLevelLiveRange* range : data()->live_ranges()) {
4780 CHECK_EQ(live_ranges_size,
4781 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
4782 DCHECK_NOT_NULL(range);
4783 // Skip non-reference values.
4784 if (!data()->code()->IsReference(range->vreg())) continue;
4785 // Skip empty live ranges.
4786 if (range->IsEmpty()) continue;
4787 if (range->has_preassigned_slot()) continue;
4788 candidate_ranges.push_back(range);
4789 }
4790 std::sort(candidate_ranges.begin(), candidate_ranges.end(),
4792 for (TopLevelLiveRange* range : candidate_ranges) {
4793 // Find the extent of the range and its children.
4794 int start = range->Start().ToInstructionIndex();
4795 int end = range->Children().back()->End().ToInstructionIndex();
4796
4797 // Ranges should be sorted, so that the first reference map in the current
4798 // live range has to be after {first_it}.
4799 DCHECK_LE(last_range_start, start);
4800 last_range_start = start;
4801 USE(last_range_start);
4802
4803 // Step across all the safe points that are before the start of this range,
4804 // recording how far we step in order to save doing this for the next range.
4805 for (; first_it != reference_maps->end(); ++first_it) {
4806 ReferenceMap* map = *first_it;
4807 if (map->instruction_position() >= start) break;
4808 }
4809
4810 InstructionOperand spill_operand;
4811 if (((range->HasSpillOperand() &&
4812 !range->GetSpillOperand()->IsConstant()) ||
4813 range->HasSpillRange())) {
4814 if (range->HasSpillOperand()) {
4815 spill_operand = *range->GetSpillOperand();
4816 } else {
4817 spill_operand = range->GetSpillRangeOperand();
4818 }
4819 DCHECK(spill_operand.IsStackSlot());
4821 AllocatedOperand::cast(spill_operand).representation()));
4822 }
4823
4824 LiveRange* cur = nullptr;
4825 // Step through the safe points to see whether they are in the range.
4826 for (auto it = first_it; it != reference_maps->end(); ++it) {
4827 ReferenceMap* map = *it;
4828 int safe_point = map->instruction_position();
4829
4830 // The safe points are sorted so we can stop searching here.
4831 if (safe_point - 1 > end) break;
4832
4833 // Advance to the next active range that covers the current
4834 // safe point position.
4835 LifetimePosition safe_point_pos =
4837
4838 // Search for the child range (cur) that covers safe_point_pos. If we
4839 // don't find it before the children pass safe_point_pos, keep cur at
4840 // the last child, because the next safe_point_pos may be covered by cur.
4841 // This may happen if cur has more than one interval, and the current
4842 // safe_point_pos is in between intervals.
4843 // For that reason, cur may be at most the last child.
4844 // Use binary search for the first iteration, then linear search after.
4845 bool found = false;
4846 if (cur == nullptr) {
4847 cur = range->GetChildCovers(safe_point_pos);
4848 found = cur != nullptr;
4849 } else {
4850 while (!found) {
4851 if (cur->Covers(safe_point_pos)) {
4852 found = true;
4853 } else {
4854 LiveRange* next = cur->next();
4855 if (next == nullptr || next->Start() > safe_point_pos) {
4856 break;
4857 }
4858 cur = next;
4859 }
4860 }
4861 }
4862
4863 if (!found) {
4864 continue;
4865 }
4866
4867 // Check if the live range is spilled and the safe point is after
4868 // the spill position.
4869 int spill_index = range->IsSpilledOnlyInDeferredBlocks(data()) ||
4870 range->LateSpillingSelected()
4871 ? cur->Start().ToInstructionIndex()
4872 : range->spill_start_index();
4873
4874 if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
4875 TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
4876 range->vreg(), spill_index, safe_point);
4877 map->RecordReference(AllocatedOperand::cast(spill_operand));
4878 }
4879
4880 if (!cur->spilled()) {
4881 TRACE(
4882 "Pointer in register for range %d:%d (start at %d) "
4883 "at safe point %d\n",
4884 range->vreg(), cur->relative_id(), cur->Start().value(),
4885 safe_point);
4886 InstructionOperand operand = cur->GetAssignedOperand();
4887 DCHECK(!operand.IsStackSlot());
4889 AllocatedOperand::cast(operand).representation()));
4890 map->RecordReference(AllocatedOperand::cast(operand));
4891 }
4892 }
4893 }
4894}
4895
4898
4900 const InstructionBlock* block) const {
4901 if (block->PredecessorCount() != 1) return false;
4902 return block->predecessors()[0].IsNext(block->rpo_number());
4903}
4904
4906 ZoneVector<SparseBitVector*>& live_in_sets = data()->live_in_sets();
4907 for (const InstructionBlock* block : code()->instruction_blocks()) {
4908 if (CanEagerlyResolveControlFlow(block)) continue;
4909 SparseBitVector* live = live_in_sets[block->rpo_number().ToInt()];
4910 for (int vreg : *live) {
4912 TopLevelLiveRange* live_range = data()->live_ranges()[vreg];
4914 block->first_instruction_index());
4915 LiveRange* cur_range = live_range->GetChildCovers(cur_start);
4916 DCHECK_NOT_NULL(cur_range);
4917 if (cur_range->spilled()) continue;
4918
4919 for (const RpoNumber& pred : block->predecessors()) {
4920 // Find ranges that may need to be connected.
4921 const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
4922 LifetimePosition pred_end =
4924 pred_block->last_instruction_index());
4925 // We don't need to perform the O(log n) search if we already know it
4926 // will be the same range.
4927 if (cur_range->CanCover(pred_end)) continue;
4928 LiveRange* pred_range = live_range->GetChildCovers(pred_end);
4929 // This search should always succeed because the `vreg` associated to
4930 // this `live_range` must be live out in all predecessor blocks.
4931 DCHECK_NOT_NULL(pred_range);
4932 // Since the `cur_range` did not cover `pred_end` earlier, the found
4933 // `pred_range` must be different.
4934 DCHECK_NE(cur_range, pred_range);
4935
4936 InstructionOperand pred_op = pred_range->GetAssignedOperand();
4937 InstructionOperand cur_op = cur_range->GetAssignedOperand();
4938 if (pred_op.Equals(cur_op)) continue;
4939
4940 if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
4941 // We're doing a reload.
4942 // We don't need to, if:
4943 // 1) there's no register use in this block, and
4944 // 2) the range ends before the block does, and
4945 // 3) we don't have a successor, or the successor is spilled.
4946 LifetimePosition block_start =
4947 LifetimePosition::GapFromInstructionIndex(block->code_start());
4948 LifetimePosition block_end =
4950 // Note that this is not the successor if we have control flow!
4951 // However, in the following condition, we only refer to it if it
4952 // begins in the current block, in which case we can safely declare it
4953 // to be the successor.
4954 const LiveRange* successor = cur_range->next();
4955 if (cur_range->End() < block_end &&
4956 (successor == nullptr || successor->spilled())) {
4957 // verify point 1: no register use. We can go to the end of the
4958 // range, since it's all within the block.
4959
4960 bool uses_reg = false;
4961 for (UsePosition* const* use_pos_it =
4962 cur_range->NextUsePosition(block_start);
4963 use_pos_it != cur_range->positions().end(); ++use_pos_it) {
4964 if ((*use_pos_it)->operand()->IsAnyRegister()) {
4965 uses_reg = true;
4966 break;
4967 }
4968 }
4969 if (!uses_reg) continue;
4970 }
4971 if (cur_range->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) &&
4972 pred_block->IsDeferred()) {
4973 // The spill location should be defined in pred_block, so add
4974 // pred_block to the list of blocks requiring a spill operand.
4975 TRACE("Adding B%d to list of spill blocks for %d\n",
4976 pred_block->rpo_number().ToInt(),
4977 cur_range->TopLevel()->vreg());
4978 cur_range->TopLevel()
4980 ->Add(pred_block->rpo_number().ToInt());
4981 }
4982 }
4983 int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
4984 USE(move_loc);
4986 cur_range->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) &&
4987 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) &&
4988 move_loc != -1,
4989 code()->GetInstructionBlock(move_loc)->IsDeferred());
4990 }
4991 }
4992 }
4993
4994 // At this stage, we collected blocks needing a spill operand due to reloads
4995 // from ConnectRanges and from ResolveControlFlow. Time to commit the spills
4996 // for deferred blocks. This is a convenient time to commit spills for general
4997 // spill ranges also, because they need to use the LiveRangeFinder.
4998 const size_t live_ranges_size = data()->live_ranges().size();
4999 SpillPlacer spill_placer(data(), local_zone);
5000 for (TopLevelLiveRange* top : data()->live_ranges()) {
5001 CHECK_EQ(live_ranges_size,
5002 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
5003 DCHECK_NOT_NULL(top);
5004 if (top->IsEmpty()) continue;
5005 if (top->IsSpilledOnlyInDeferredBlocks(data())) {
5006 CommitSpillsInDeferredBlocks(top, local_zone);
5007 } else if (top->HasGeneralSpillRange()) {
5008 spill_placer.Add(top);
5009 }
5010 }
5011}
5012
5014 const InstructionOperand& cur_op,
5015 const InstructionBlock* pred,
5016 const InstructionOperand& pred_op) {
5017 DCHECK(!pred_op.Equals(cur_op));
5018 int gap_index;
5020 if (block->PredecessorCount() == 1) {
5021 gap_index = block->first_instruction_index();
5023 } else {
5025 // The connecting move might invalidate uses of the destination operand in
5026 // the deoptimization call. See crbug.com/v8/12218. Omitting the move is
5027 // safe since the deopt call exits the current code.
5028 if (last->IsDeoptimizeCall()) {
5029 return -1;
5030 }
5031 // In every other case the last instruction should not participate in
5032 // register allocation, or it could interfere with the connecting move.
5033 for (size_t i = 0; i < last->InputCount(); ++i) {
5034 DCHECK(last->InputAt(i)->IsImmediate());
5035 }
5036 DCHECK_EQ(1, pred->SuccessorCount());
5037 DCHECK(!code()
5038 ->InstructionAt(pred->last_instruction_index())
5039 ->HasReferenceMap());
5040 gap_index = pred->last_instruction_index();
5042 }
5043 data()->AddGapMove(gap_index, position, pred_op, cur_op);
5044 return gap_index;
5045}
5046
5048 DelayedInsertionMap delayed_insertion_map(local_zone);
5049 const size_t live_ranges_size = data()->live_ranges().size();
5050 for (TopLevelLiveRange* top_range : data()->live_ranges()) {
5051 CHECK_EQ(live_ranges_size,
5052 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
5053 DCHECK_NOT_NULL(top_range);
5054 bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(data());
5055 LiveRange* first_range = top_range;
5056 for (LiveRange *second_range = first_range->next(); second_range != nullptr;
5057 first_range = second_range, second_range = second_range->next()) {
5058 LifetimePosition pos = second_range->Start();
5059 // Add gap move if the two live ranges touch and there is no block
5060 // boundary.
5061 if (second_range->spilled()) continue;
5062 if (first_range->End() != pos) continue;
5063 if (data()->IsBlockBoundary(pos) &&
5064 !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
5065 continue;
5066 }
5067 InstructionOperand prev_operand = first_range->GetAssignedOperand();
5068 InstructionOperand cur_operand = second_range->GetAssignedOperand();
5069 if (prev_operand.Equals(cur_operand)) continue;
5070 bool delay_insertion = false;
5072 int gap_index = pos.ToInstructionIndex();
5073 if (connect_spilled && !prev_operand.IsAnyRegister() &&
5074 cur_operand.IsAnyRegister()) {
5075 const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
5076 DCHECK(block->IsDeferred());
5077 // Performing a reload in this block, meaning the spill operand must
5078 // be defined here.
5079 top_range->GetListOfBlocksRequiringSpillOperands(data())->Add(
5080 block->rpo_number().ToInt());
5081 }
5082
5083 if (pos.IsGapPosition()) {
5084 gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
5085 } else {
5086 if (pos.IsStart()) {
5087 delay_insertion = true;
5088 } else {
5089 gap_index++;
5090 }
5091 gap_pos = delay_insertion ? Instruction::END : Instruction::START;
5092 }
5093 // Reloads or spills for spilled in deferred blocks ranges must happen
5094 // only in deferred blocks.
5095 DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
5096 cur_operand.IsAnyRegister()),
5097 code()->GetInstructionBlock(gap_index)->IsDeferred());
5098
5099 ParallelMove* move =
5101 gap_pos, code_zone());
5102 if (!delay_insertion) {
5103 move->AddMove(prev_operand, cur_operand);
5104 } else {
5105 delayed_insertion_map.insert(
5106 std::make_pair(std::make_pair(move, prev_operand), cur_operand));
5107 }
5108 }
5109 }
5110 if (delayed_insertion_map.empty()) return;
5111 // Insert all the moves which should occur after the stored move.
5112 ZoneVector<MoveOperands*> to_insert(local_zone);
5113 ZoneVector<MoveOperands*> to_eliminate(local_zone);
5114 to_insert.reserve(4);
5115 to_eliminate.reserve(4);
5116 ParallelMove* moves = delayed_insertion_map.begin()->first.first;
5117 for (auto it = delayed_insertion_map.begin();; ++it) {
5118 bool done = it == delayed_insertion_map.end();
5119 if (done || it->first.first != moves) {
5120 // Commit the MoveOperands for current ParallelMove.
5121 for (MoveOperands* move : to_eliminate) {
5122 move->Eliminate();
5123 }
5124 for (MoveOperands* move : to_insert) {
5125 moves->push_back(move);
5126 }
5127 if (done) break;
5128 // Reset state.
5129 to_eliminate.clear();
5130 to_insert.clear();
5131 moves = it->first.first;
5132 }
5133 // Gather all MoveOperands for a single ParallelMove.
5134 MoveOperands* move =
5135 code_zone()->New<MoveOperands>(it->first.second, it->second);
5136 moves->PrepareInsertAfter(move, &to_eliminate);
5137 to_insert.push_back(move);
5138 }
5139}
5140
5142 Zone* temp_zone) {
5143 DCHECK(range->IsSpilledOnlyInDeferredBlocks(data()));
5144 DCHECK(!range->spilled());
5145
5146 InstructionSequence* code = data()->code();
5147 InstructionOperand spill_operand = range->GetSpillRangeOperand();
5148
5149 TRACE("Live Range %d will be spilled only in deferred blocks.\n",
5150 range->vreg());
5151 // If we have ranges that aren't spilled but require the operand on the stack,
5152 // make sure we insert the spill.
5153 for (const LiveRange* child = range; child != nullptr;
5154 child = child->next()) {
5155 for (const UsePosition* pos : child->positions()) {
5156 if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
5157 continue;
5158 range->AddBlockRequiringSpillOperand(
5159 code->GetInstructionBlock(pos->pos().ToInstructionIndex())
5160 ->rpo_number(),
5161 data());
5162 }
5163 }
5164
5165 ZoneQueue<int> worklist(temp_zone);
5166
5167 for (int block_id : *range->GetListOfBlocksRequiringSpillOperands(data())) {
5168 worklist.push(block_id);
5169 }
5170
5171 ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
5172 // Seek the deferred blocks that dominate locations requiring spill operands,
5173 // and spill there. We only need to spill at the start of such blocks.
5174 SparseBitVector done_blocks(temp_zone);
5175 while (!worklist.empty()) {
5176 int block_id = worklist.front();
5177 worklist.pop();
5178 if (done_blocks.Contains(block_id)) continue;
5179 done_blocks.Add(block_id);
5180 InstructionBlock* spill_block =
5181 code->InstructionBlockAt(RpoNumber::FromInt(block_id));
5182
5183 for (const RpoNumber& pred : spill_block->predecessors()) {
5184 const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
5185
5186 if (pred_block->IsDeferred()) {
5187 worklist.push(pred_block->rpo_number().ToInt());
5188 } else {
5189 LifetimePosition pred_end =
5191 pred_block->last_instruction_index());
5192
5193 LiveRange* child_range = range->GetChildCovers(pred_end);
5194 DCHECK_NOT_NULL(child_range);
5195
5196 InstructionOperand pred_op = child_range->GetAssignedOperand();
5197
5198 RpoNumber spill_block_number = spill_block->rpo_number();
5199 if (done_moves.find(std::make_pair(
5200 spill_block_number, range->vreg())) == done_moves.end()) {
5201 TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
5202 spill_block_number.ToInt());
5203 data()->AddGapMove(spill_block->first_instruction_index(),
5205 spill_operand);
5206 done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
5207 spill_block->mark_needs_frame();
5208 }
5209 }
5210 }
5211 }
5212}
5213
5214#undef TRACE
5215
5216} // namespace compiler
5217} // namespace internal
5218} // namespace v8
int pos_
uint8_t data_[MAX_STACK_LENGTH]
Builtins::Kind kind
Definition builtins.cc:40
#define SLOW_DCHECK(condition)
Definition checks.h:21
SourcePosition pos
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
std::pair< iterator, bool > emplace(Args &&... args)
Definition small-map.h:422
iterator erase(const iterator &position)
Definition small-map.h:509
bool empty() const
Definition small-map.h:543
iterator find(const key_type &key)
Definition small-map.h:329
int length() const
Definition vector.h:64
constexpr bool empty() const
Definition vector.h:73
constexpr size_t size() const
Definition vector.h:70
constexpr T * begin() const
Definition vector.h:96
constexpr T * end() const
Definition vector.h:103
bool Contains(int i) const
Definition bit-vector.h:180
static constexpr DwVfpRegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
void Union(const SparseBitVector &other)
V8_INLINE bool Contains(int i) const
void reserve(size_t new_cap)
T * insert(const T *pos, It first, It last)
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
static AllocatedOperand * New(Zone *zone, LocationKind kind, MachineRepresentation rep, int index)
RegisterAllocationData * data() const
void MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock *block)
RegisterAllocationData * data() const
InstructionOperand * AllocateFixed(UnallocatedOperand *operand, int pos, bool is_tagged, bool is_input, bool is_output)
ConstraintBuilder(RegisterAllocationData *data)
iterator insert(Zone *zone, const_iterator position, const T &value)
DoubleEndedSplitVector< T > SplitAt(const_iterator split_begin_const)
void Append(Zone *zone, DoubleEndedSplitVector< T > other)
void SetAllocatedRegisters(BitVector *regs)
Definition frame.h:104
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
Definition frame.h:138
void SetAllocatedDoubleRegisters(BitVector *regs)
Definition frame.h:109
size_t PredecessorIndexOf(RpoNumber rpo_number) const
const PhiInstructions & phis() const
bool Equals(const InstructionOperand &that) const
static void ReplaceWith(InstructionOperand *dest, const InstructionOperand *src)
Instruction * InstructionAt(int index) const
const ReferenceMaps * reference_maps() const
MachineRepresentation GetRepresentation(int virtual_register) const
InstructionBlock * GetInstructionBlock(int instruction_index) const
bool IsReference(int virtual_register) const
static MachineRepresentation DefaultRepresentation()
InstructionBlock * InstructionBlockAt(RpoNumber rpo_number)
const InstructionOperand * OutputAt(size_t i) const
ParallelMove * GetParallelMove(GapPosition pos)
ParallelMove * GetOrCreateParallelMove(GapPosition pos, Zone *zone)
static bool ExistsGapPositionBetween(LifetimePosition pos1, LifetimePosition pos2)
static LifetimePosition InstructionFromInstructionIndex(int index)
static LifetimePosition GapFromInstructionIndex(int index)
ZoneVector< LiveRange * > & active_live_ranges()
void MaybeUndoPreviousSplit(LiveRange *range, Zone *zone)
int LastDeferredInstructionIndex(InstructionBlock *start)
void SpillBetweenUntil(LiveRange *range, LifetimePosition start, LifetimePosition until, LifetimePosition end, SpillMode spill_mode)
InactiveLiveRangeQueue::iterator InactiveToActive(InactiveLiveRangeQueue::iterator it, LifetimePosition position)
void SpillNotLiveRanges(RangeRegisterSmallMap &to_be_live, LifetimePosition position, SpillMode spill_mode)
bool CheckConflict(MachineRepresentation rep, int reg, const RangeRegisterSmallMap &to_be_live)
bool TryReuseSpillForPhi(TopLevelLiveRange *range)
int PickRegisterThatIsAvailableLongest(LiveRange *current, int hint_reg, base::Vector< const LifetimePosition > free_until_pos)
RpoNumber ChooseOneOfTwoPredecessorStates(InstructionBlock *current_block, LifetimePosition boundary)
void ProcessCurrentRange(LiveRange *current, SpillMode spill_mode)
ZoneVector< LiveRange * >::iterator ActiveToInactive(ZoneVector< LiveRange * >::iterator it, LifetimePosition position)
void ForwardStateTo(LifetimePosition position)
void SpillAfter(LiveRange *range, LifetimePosition pos, SpillMode spill_mode)
void FindFreeRegistersForRange(LiveRange *range, base::Vector< LifetimePosition > free_until_pos)
void AllocateBlockedReg(LiveRange *range, SpillMode spill_mode)
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock *block)
void MaybeSpillPreviousRanges(LiveRange *begin_range, LifetimePosition begin_pos, LiveRange *end_range)
LiveRange * AssignRegisterOnReload(LiveRange *range, int reg)
InactiveLiveRangeQueue::iterator InactiveToHandled(InactiveLiveRangeQueue::iterator it)
bool TryAllocatePreferredReg(LiveRange *range, base::Vector< const LifetimePosition > free_until_pos)
void SpillBetween(LiveRange *range, LifetimePosition start, LifetimePosition end, SpillMode spill_mode)
InactiveLiveRangeQueue & inactive_live_ranges(int reg)
void PrintRangeRow(std::ostream &os, const TopLevelLiveRange *toplevel)
bool TryAllocateFreeReg(LiveRange *range, base::Vector< const LifetimePosition > free_until_pos)
void GetSIMD128RegisterSet(int *num_regs, int *num_codes, const int **codes) const
bool ConsiderBlockForControlFlow(InstructionBlock *current_block, RpoNumber predecessor)
LinearScanAllocator(RegisterAllocationData *data, RegisterKind kind, Zone *local_zone)
void GetFPRegisterSet(MachineRepresentation rep, int *num_regs, int *num_codes, const int **codes) const
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(const InstructionBlock *block)
void SplitAndSpillIntersecting(LiveRange *range, SpillMode spill_mode)
ZoneVector< LiveRange * >::iterator ActiveToHandled(ZoneVector< LiveRange * >::iterator it)
void SetLiveRangeAssignedRegister(LiveRange *range, int reg)
void ReloadLiveRanges(RangeRegisterSmallMap const &to_be_live, LifetimePosition position)
void ComputeStateFromManyPredecessors(InstructionBlock *current_block, RangeRegisterSmallMap &to_be_live)
bool HasNonDeferredPredecessor(InstructionBlock *block)
UsePosition * Use(LifetimePosition block_start, LifetimePosition position, InstructionOperand *operand, void *hint, UsePositionHintType hint_type, SpillMode spill_mode)
static SparseBitVector * ComputeLiveOut(const InstructionBlock *block, RegisterAllocationData *data)
void ProcessPhis(const InstructionBlock *block, SparseBitVector *live)
RegisterAllocationData * data() const
UsePosition * NewUsePosition(LifetimePosition pos, InstructionOperand *operand, void *hint, UsePositionHintType hint_type)
SpillMode SpillModeForBlock(const InstructionBlock *block) const
TopLevelLiveRange * LiveRangeFor(InstructionOperand *operand, SpillMode spill_mode)
TopLevelLiveRange * FixedLiveRangeFor(int index, SpillMode spill_mode)
void ProcessInstructions(const InstructionBlock *block, SparseBitVector *live)
TopLevelLiveRange * FixedFPLiveRangeFor(int index, MachineRepresentation rep, SpillMode spill_mode)
void MapPhiHint(InstructionOperand *operand, UsePosition *use_pos)
ZoneVector< SparseBitVector * > & live_in_sets() const
void ProcessLoopHeader(const InstructionBlock *block, SparseBitVector *live)
UsePosition * Define(LifetimePosition position, InstructionOperand *operand, void *hint, UsePositionHintType hint_type, SpillMode spill_mode)
TopLevelLiveRange * FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode)
int FixedFPLiveRangeID(int index, MachineRepresentation rep)
void AddInitialIntervals(const InstructionBlock *block, SparseBitVector *live_out)
void ResolvePhiHint(InstructionOperand *operand, UsePosition *use_pos)
const RegisterConfiguration * config() const
ZoneMap< InstructionOperand *, UsePosition * > phi_hints_
LiveRangeBuilder(RegisterAllocationData *data, Zone *local_zone)
void AddRange(TopLevelLiveRange *range)
ZoneVector< TopLevelLiveRange * > ranges_
bool TryAddRange(TopLevelLiveRange *range)
static LiveRangeBundle * TryMerge(LiveRangeBundle *lhs, LiveRangeBundle *rhs)
bool CanEagerlyResolveControlFlow(const InstructionBlock *block) const
LiveRangeConnector(RegisterAllocationData *data)
void CommitSpillsInDeferredBlocks(TopLevelLiveRange *range, Zone *temp_zone)
base::Vector< UsePosition * > positions() const
UseIntervalVector::iterator FirstSearchIntervalForPosition(LifetimePosition position)
UsePosition * current_hint_position() const
void Print(const RegisterConfiguration *config, bool with_children) const
base::Vector< UsePosition * > positions_span_
UsePosition *const * NextUsePosition(LifetimePosition start) const
LifetimePosition NextLifetimePositionRegisterIsBeneficial(const LifetimePosition &start) const
void AdvanceLastProcessedMarker(UseIntervalVector::iterator to_start_of, LifetimePosition but_not_past)
LifetimePosition NextStartAfter(LifetimePosition position)
LifetimePosition FirstIntersection(LiveRange *other)
LiveRange(const LiveRange &)=delete
LifetimePosition NextEndAfter(LifetimePosition position)
const UseIntervalVector & intervals() const
MachineRepresentation representation() const
void ConvertUsesToOperand(const InstructionOperand &op, const InstructionOperand &spill_op)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
bool RegisterFromFirstHint(int *register_index)
bool CanBeSpilled(LifetimePosition pos) const
LiveRange * SplitAt(LifetimePosition position, Zone *zone)
bool CanCover(LifetimePosition position) const
UsePosition * NextUsePositionSpillDetrimental(LifetimePosition start) const
InstructionOperand GetAssignedOperand() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start) const
bool Covers(LifetimePosition position)
UsePosition * NextRegisterPosition(LifetimePosition start) const
UseIntervalVector::iterator current_interval_
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
static bool IsSupportedRepresentation(MachineRepresentation rep)
const InstructionOperand & source() const
OperandAssigner(RegisterAllocationData *data)
RegisterAllocationData * data() const
void PrepareInsertAfter(MoveOperands *move, ZoneVector< MoveOperands * > *to_eliminate) const
MoveOperands * AddMove(const InstructionOperand &from, const InstructionOperand &to)
PhiMapValue(PhiInstruction *phi, const InstructionBlock *block, Zone *zone)
SpillRange * AssignSpillRangeToLiveRange(TopLevelLiveRange *range, SpillMode spill_mode)
const RegisterConfiguration * config() const
ZoneVector< TopLevelLiveRange * > & fixed_double_live_ranges()
RegisterAllocationData(const RegisterAllocationData &)=delete
ZoneVector< TopLevelLiveRange * > live_ranges_
const ZoneVector< TopLevelLiveRange * > & fixed_live_ranges() const
ZoneVector< SparseBitVector * > & live_in_sets()
ZoneVector< TopLevelLiveRange * > & fixed_float_live_ranges()
void RememberSpillState(RpoNumber block, const ZoneVector< LiveRange * > &state)
ZoneMap< TopLevelLiveRange *, AllocatedOperand * > slot_for_const_range_
RangesWithPreassignedSlots & preassigned_slot_ranges()
ZoneMap< TopLevelLiveRange *, AllocatedOperand * > & slot_for_const_range()
ZoneVector< ZoneVector< LiveRange * > > spill_state_
ZoneVector< LiveRange * > & GetSpillState(RpoNumber block)
MoveOperands * AddGapMove(int index, Instruction::GapPosition position, const InstructionOperand &from, const InstructionOperand &to)
ZoneVector< TopLevelLiveRange * > fixed_double_live_ranges_
const RegisterConfiguration *const config_
ZoneVector< TopLevelLiveRange * > fixed_float_live_ranges_
bool HasFixedUse(MachineRepresentation rep, int index)
const ZoneVector< TopLevelLiveRange * > & live_ranges() const
void MarkFixedUse(MachineRepresentation rep, int index)
PhiMapValue * GetPhiMapValueFor(TopLevelLiveRange *top_range)
TopLevelLiveRange * NewLiveRange(int index, MachineRepresentation rep)
MachineRepresentation RepresentationFor(int virtual_register)
PhiMapValue * InitializePhiMap(const InstructionBlock *block, PhiInstruction *phi)
ZoneVector< TopLevelLiveRange * > fixed_live_ranges_
void MarkAllocated(MachineRepresentation rep, int index)
ZoneVector< TopLevelLiveRange * > fixed_simd128_live_ranges_
ZoneVector< TopLevelLiveRange * > & fixed_simd128_live_ranges()
LifetimePosition FindOptimalSpillingPos(LiveRange *range, LifetimePosition pos, SpillMode spill_mode, LiveRange **begin_spill_out)
RegisterAllocationData * data() const
void Spill(LiveRange *range, SpillMode spill_mode)
LiveRange * SplitBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
const char * RegisterName(int allocation_index) const
RegisterAllocator(RegisterAllocationData *data, RegisterKind kind)
LifetimePosition GetSplitPositionForInstruction(const LiveRange *range, int instruction_index)
LifetimePosition FindOptimalSplitPos(LifetimePosition start, LifetimePosition end)
LiveRange * SplitRangeAt(LiveRange *range, LifetimePosition pos)
bool IsNext(const RpoNumber other) const
static RpoNumber FromInt(int index)
void Add(TopLevelLiveRange *range)
SpillRange(TopLevelLiveRange *range, Zone *zone)
ZoneVector< TopLevelLiveRange * > ranges_
void RecordSpillLocation(Zone *zone, int gap_index, InstructionOperand *operand)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
bool IsSpilledOnlyInDeferredBlocks(const RegisterAllocationData *data) const
LiveRange * GetChildCovers(LifetimePosition pos)
void SetSpillOperand(InstructionOperand *operand)
void CommitSpillMoves(RegisterAllocationData *data, const InstructionOperand &operand)
SpillMoveInsertionList * GetSpillMoveInsertionLocations(const RegisterAllocationData *data) const
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
void AddUsePosition(UsePosition *pos, Zone *zone)
void SetSpillRange(SpillRange *spill_range)
SparseBitVector * GetListOfBlocksRequiringSpillOperands(const RegisterAllocationData *data) const
SpillMoveInsertionList * spill_move_insertion_locations_
void SetLateSpillingSelected(bool late_spilling_selected)
void FilterSpillMoves(RegisterAllocationData *data, const InstructionOperand &operand)
void set_start(LifetimePosition start)
UseInterval SplitAt(LifetimePosition pos)
UsePosition(LifetimePosition pos, InstructionOperand *operand, void *hint, UsePositionHintType hint_type)
bool HintRegister(int *register_code) const
InstructionOperand * operand() const
void ResolveHint(UsePosition *use_pos)
UsePositionHintType hint_type() const
static UsePositionHintType HintTypeForOperand(const InstructionOperand &op)
void set_type(UsePositionType type, bool register_beneficial)
RecordWriteMode const mode_
Operand const operand_
Handle< Code > code
JSRegExp::Flags flags_
int start
uint32_t count
int end
LineAndColumn current
LineAndColumn previous
std::vector< std::unique_ptr< InstanceTypeTree > > children
int32_t offset
std::optional< TNode< JSArray > > a
#define _
double second
Instruction * instr
RpoNumber block
ZoneVector< RpoNumber > & result
#define TRACE(...)
LiftoffRegister reg
Point to
int position
Definition liveedit.cc:290
uint32_t const mask
int r
Definition mul-fft.cc:298
auto Reversed(T &t)
Definition iterator.h:105
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
template const Signature< wasm::ValueType > bool
const int * GetAllocatableRegisterCodes(const RegisterConfiguration *config, RegisterKind kind)
std::pair< ParallelMove *, InstructionOperand > DelayedInsertionMapKey
static const int32_t kUnassignedRegister
static std::optional< std::pair< UseInterval, UseInterval > > AreUseIntervalsIntersectingVector(base::Vector< const UseInterval > a, base::Vector< const UseInterval > b)
int GetAllocatableRegisterCount(const RegisterConfiguration *config, RegisterKind kind)
int GetRegisterCount(const RegisterConfiguration *config, RegisterKind kind)
int ByteWidthForStackSlot(MachineRepresentation rep)
ZoneVector< InstructionBlock * > InstructionBlocks
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
std::optional< std::pair< UseInterval, UseInterval > > AreUseIntervalsIntersecting(const ContainerA &a, const ContainerB &b)
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
constexpr AliasingKind kFPAliasing
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE constexpr int RepresentationBit(MachineRepresentation rep)
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsSimd128(MachineRepresentation rep)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation nullptr
Definition flags.cc:1263
return value
Definition map-inl.h:893
constexpr int kMaxInt
Definition globals.h:374
void Dummy(char *arg)
Definition d8.cc:4558
ZoneUnorderedMap< int, BytecodeSequenceNode * > children_
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
bool operator()(const DelayedInsertionMapKey &a, const DelayedInsertionMapKey &b) const
const RegisterConfiguration * register_configuration_
SpillMoveInsertionList(int gap_index, InstructionOperand *operand, SpillMoveInsertionList *next)