v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
register-allocator.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
6#define V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
7
8#include "src/base/bits.h"
11#include "src/common/globals.h"
14#include "src/flags/flags.h"
15#include "src/utils/ostreams.h"
18
19namespace v8 {
20namespace internal {
21
22class TickCounter;
23
24namespace compiler {
25
27
28// This class represents a single point of an InstructionOperand's lifetime. For
29// each instruction there are four lifetime positions:
30//
31// [[START, END], [START, END]]
32//
33// Where the first half position corresponds to
34//
35// [GapPosition::START, GapPosition::END]
36//
37// and the second half position corresponds to
38//
39// [Lifetime::USED_AT_START, Lifetime::USED_AT_END]
40//
41class LifetimePosition final {
42 public:
43 // Return the lifetime position that corresponds to the beginning of
44 // the gap with the given index.
46 return LifetimePosition(index * kStep);
47 }
48 // Return the lifetime position that corresponds to the beginning of
49 // the instruction with the given index.
53
55 LifetimePosition pos2) {
56 if (pos1 > pos2) std::swap(pos1, pos2);
57 LifetimePosition next(pos1.value_ + 1);
58 if (next.IsGapPosition()) return next < pos2;
59 return next.NextFullStart() < pos2;
60 }
61
62 // Returns a numeric representation of this lifetime position.
63 int value() const { return value_; }
64
65 // Returns the index of the instruction to which this lifetime position
66 // corresponds.
67 int ToInstructionIndex() const {
68 DCHECK(IsValid());
69 return value_ / kStep;
70 }
71
72 // Returns true if this lifetime position corresponds to a START value
73 bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
74 // Returns true if this lifetime position corresponds to an END value
75 bool IsEnd() const { return (value_ & (kHalfStep - 1)) == 1; }
76 // Returns true if this lifetime position corresponds to a gap START value
77 bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
78
79 bool IsGapPosition() const { return (value_ & 0x2) == 0; }
80 bool IsInstructionPosition() const { return !IsGapPosition(); }
81
82 // Returns the lifetime position for the current START.
84 DCHECK(IsValid());
85 return LifetimePosition(value_ & ~(kHalfStep - 1));
86 }
87
88 // Returns the lifetime position for the current gap START.
90 DCHECK(IsValid());
91 return LifetimePosition(value_ & ~(kStep - 1));
92 }
93
94 // Returns the lifetime position for the current END.
96 DCHECK(IsValid());
97 return LifetimePosition(Start().value_ + kHalfStep / 2);
98 }
99
100 // Returns the lifetime position for the beginning of the next START.
105
106 // Returns the lifetime position for the beginning of the next gap START.
111
112 // Returns the lifetime position for the beginning of the previous START.
118
119 // Constructs the lifetime position which does not correspond to any
120 // instruction.
122
123 // Returns true if this lifetime positions corrensponds to some
124 // instruction.
125 bool IsValid() const { return value_ != -1; }
126
127 bool operator<(const LifetimePosition& that) const {
128 return this->value_ < that.value_;
129 }
130
131 bool operator<=(const LifetimePosition& that) const {
132 return this->value_ <= that.value_;
133 }
134
135 bool operator==(const LifetimePosition& that) const {
136 return this->value_ == that.value_;
137 }
138
139 bool operator!=(const LifetimePosition& that) const {
140 return this->value_ != that.value_;
141 }
142
143 bool operator>(const LifetimePosition& that) const {
144 return this->value_ > that.value_;
145 }
146
147 bool operator>=(const LifetimePosition& that) const {
148 return this->value_ >= that.value_;
149 }
150
151 // APIs to aid debugging. For general-stream APIs, use operator<<.
152 void Print() const;
153
154 static inline LifetimePosition Invalid() { return LifetimePosition(); }
155
157 // We have to use this kind of getter instead of static member due to
158 // crash bug in GDB.
160 }
161
162 static inline LifetimePosition FromInt(int value) {
163 return LifetimePosition(value);
164 }
165
166 private:
167 static const int kHalfStep = 2;
168 static const int kStep = 2 * kHalfStep;
169
170 static_assert(base::bits::IsPowerOfTwo(kHalfStep),
171 "Code relies on kStep and kHalfStep being a power of two");
172
173 explicit LifetimePosition(int value) : value_(value) {}
174
176};
177
178inline std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
179 os << '@' << pos.ToInstructionIndex();
180 if (pos.IsGapPosition()) {
181 os << 'g';
182 } else {
183 os << 'i';
184 }
185 if (pos.IsStart()) {
186 os << 's';
187 } else {
188 os << 'e';
189 }
190 return os;
191}
192
193class SpillRange;
194class LiveRange;
195class TopLevelLiveRange;
196
197class RegisterAllocationData final : public ZoneObject {
198 public:
201
202 // Encodes whether a spill happens in deferred code (kSpillDeferred) or
203 // regular code (kSpillAtDefinition).
205
206 static constexpr int kNumberOfFixedRangesPerRegister = 2;
207
208 class PhiMapValue : public ZoneObject {
209 public:
210 PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone);
211
212 const PhiInstruction* phi() const { return phi_; }
213 const InstructionBlock* block() const { return block_; }
214
215 // For hinting.
216 int assigned_register() const { return assigned_register_; }
217 void set_assigned_register(int register_code) {
219 assigned_register_ = register_code;
220 }
222
223 void AddOperand(InstructionOperand* operand);
224 void CommitAssignment(const InstructionOperand& operand);
225
226 private:
231 };
233
241
245 const char* debug_name = nullptr);
246
248 return live_ranges_;
249 }
278 InstructionSequence* code() const { return code_; }
279 // This zone is for data structures only needed during register allocation
280 // phases.
282 // This zone is for InstructionOperands and moves that live beyond register
283 // allocation.
284 Zone* code_zone() const { return code()->zone(); }
285 Frame* frame() const { return frame_; }
286 const char* debug_name() const { return debug_name_; }
287 const RegisterConfiguration* config() const { return config_; }
288
289 MachineRepresentation RepresentationFor(int virtual_register);
290
292 // Creates a new live range.
294
296 SpillMode spill_mode);
298
300 const InstructionOperand& from,
301 const InstructionOperand& to);
302
305
306 void MarkFixedUse(MachineRepresentation rep, int index);
307 bool HasFixedUse(MachineRepresentation rep, int index);
308
309 void MarkAllocated(MachineRepresentation rep, int index);
310
312 PhiInstruction* phi);
314 PhiMapValue* GetPhiMapValueFor(int virtual_register);
316
320
322 const ZoneVector<LiveRange*>& state) {
323 spill_state_[block.ToSize()] = state;
324 }
325
327 auto& result = spill_state_[block.ToSize()];
328 return result;
329 }
330
332 for (auto& state : spill_state_) {
333 state.clear();
334 }
335 }
336
338
342
343 private:
345 Frame* const frame_;
347 const char* const debug_name_;
369};
370
371// Representation of the non-empty interval [start,end[.
372// This is a value class given that it only contains two (32-bit) positions.
373class UseInterval final {
374 public:
379
380 LifetimePosition start() const { return start_; }
385 LifetimePosition end() const { return end_; }
388 end_ = end;
389 }
390
391 // Split this interval at the given position without effecting the
392 // live range that owns it. The interval must contain the position.
394 DCHECK(Contains(pos) && pos != start());
395 UseInterval after(pos, end_);
396 end_ = pos;
397 return after;
398 }
399
400 // If this interval intersects with other return smallest position
401 // that belongs to both of them.
403 LifetimePosition intersection_start = std::max(start_, other.start_);
404 LifetimePosition intersection_end = std::min(end_, other.end_);
405 if (intersection_start < intersection_end) return intersection_start;
407 }
408
409 bool Contains(LifetimePosition point) const {
410 return start_ <= point && point < end_;
411 }
412
413 // Returns the index of the first gap covered by this interval.
414 int FirstGapIndex() const {
415 int ret = start_.ToInstructionIndex();
417 ++ret;
418 }
419 return ret;
420 }
421
422 // Returns the index of the last gap covered by this interval.
423 int LastGapIndex() const {
424 int ret = end_.ToInstructionIndex();
425 if (end_.IsGapPosition() && end_.IsStart()) {
426 --ret;
427 }
428 return ret;
429 }
430
431 bool operator==(const UseInterval& other) const {
432 return std::tie(start_, end_) == std::tie(other.start_, other.end_);
433 }
434 bool operator!=(const UseInterval& other) const { return !(*this == other); }
435
436 bool operator<(const UseInterval& other) const {
437 return start_ < other.start_;
438 }
439
440 void PrettyPrint(std::ostream& os) const {
441 os << '[' << start() << ", " << end() << ')';
442 }
443
444 private:
447};
448
455
456enum class UsePositionHintType : uint8_t {
457 kNone,
458 kOperand,
459 kUsePos,
460 kPhi,
462};
463
464// Representation of a use position.
466 : public NON_EXPORTED_BASE(ZoneObject) {
467 public:
469 UsePositionHintType hint_type);
470 UsePosition(const UsePosition&) = delete;
472
474 bool HasOperand() const { return operand_ != nullptr; }
475
476 bool RegisterIsBeneficial() const {
477 return RegisterBeneficialField::decode(flags_);
478 }
479 bool SpillDetrimental() const {
480 return SpillDetrimentalField::decode(flags_);
481 }
482
483 UsePositionType type() const { return TypeField::decode(flags_); }
484 void set_type(UsePositionType type, bool register_beneficial);
485
486 LifetimePosition pos() const { return pos_; }
487
488 // For hinting only.
489 void set_assigned_register(int register_code) {
490 flags_ = AssignedRegisterField::update(flags_, register_code);
491 }
493 flags_ = SpillDetrimentalField::update(flags_, true);
494 }
495
497 return HintTypeField::decode(flags_);
498 }
499 bool HasHint() const;
500 bool HintRegister(int* register_code) const;
501 void SetHint(UsePosition* use_pos);
502 void ResolveHint(UsePosition* use_pos);
503 bool IsResolved() const {
504 return hint_type() != UsePositionHintType::kUnresolved;
505 }
506 static UsePositionHintType HintTypeForOperand(const InstructionOperand& op);
507
508 struct Ordering {
509 bool operator()(const UsePosition* left, const UsePosition* right) const {
510 return left->pos() < right->pos();
511 }
512 };
513
514 private:
520
522 void* hint_;
524 uint32_t flags_;
525};
526
527class SpillRange;
529class LiveRangeBundle;
530
532
533// A data structure that:
534// - Allocates its elements in the Zone.
535// - Has O(1) random access.
536// - Inserts at the front are O(1) (asymptotically).
537// - Can be split efficiently into two halves, and merged again efficiently
538// if those were not modified in the meantime.
539// - Has empty storage at the front and back, such that split halves both
540// can perform inserts without reallocating.
541template <typename T>
543 public:
544 using value_type = T;
545 using iterator = T*;
546 using const_iterator = const T*;
547
548 // This allows us to skip calling destructors and use simple copies,
549 // which is sufficient for the exclusive use here in the register allocator.
551 static_assert(std::is_trivially_destructible<T>::value);
552
553 size_t size() const { return data_end_ - data_begin_; }
554 bool empty() const { return size() == 0; }
555 size_t capacity() const { return storage_end_ - storage_begin_; }
556
557 T* data() const { return data_begin_; }
558
560
561 T& operator[](size_t position) {
563 return data_begin_[position];
564 }
565 const T& operator[](size_t position) const {
567 return data_begin_[position];
568 }
569
571 const_iterator begin() const { return data_begin_; }
572 iterator end() { return data_end_; }
573 const_iterator end() const { return data_end_; }
574
575 T& front() {
576 DCHECK(!empty());
577 return *begin();
578 }
579 const T& front() const {
580 DCHECK(!empty());
581 return *begin();
582 }
583 T& back() {
584 DCHECK(!empty());
585 return *std::prev(end());
586 }
587 const T& back() const {
588 DCHECK(!empty());
589 return *std::prev(end());
590 }
591
592 void push_front(Zone* zone, const T& value) {
594 --data_begin_;
596 }
597 void pop_front() {
598 DCHECK(!empty());
599 ++data_begin_;
600 }
601
602 // This can be configured to arrange the data in the middle of the backing
603 // store (`kFrontOrBack`, default), or at the end of the backing store, if
604 // subsequent inserts are mostly at the front (`kFront`).
605 template <GrowthDirection direction = kFrontOrBack>
606 iterator insert(Zone* zone, const_iterator position, const T& value) {
609 size_t old_size = size();
610
611 size_t insert_index = position - data_begin_;
613
614 // Make space for the insertion.
615 // Copy towards the end with more remaining space, such that over time
616 // the data is roughly centered, which is beneficial in case of splitting.
617 if (direction == kFront || space_at_front() >= space_at_back()) {
618 // Copy to the left.
620 T* copy_src_begin = data_begin_;
621 T* copy_src_end = data_begin_ + insert_index;
622 --data_begin_;
623 std::copy(copy_src_begin, copy_src_end, data_begin_);
624 } else {
625 // Copy to the right.
627 T* copy_src_begin = data_begin_ + insert_index;
628 T* copy_src_end = data_end_;
629 ++data_end_;
630 std::copy_backward(copy_src_begin, copy_src_end, data_end_);
631 }
632
633 T* insert_position = data_begin_ + insert_index;
634 *insert_position = value;
635
636#ifdef DEBUG
637 Verify();
638#endif
639 DCHECK_LE(begin(), insert_position);
640 DCHECK_LT(insert_position, end());
641 DCHECK_EQ(size(), old_size + 1);
642 USE(old_size);
643
644 return insert_position;
645 }
646
647 // Returns a split-off vector from `split_begin` to `end()`.
648 // Afterwards, `this` ends just before `split_begin`.
649 // This does not allocate; it instead splits the backing store in two halves.
651 iterator split_begin = const_cast<iterator>(split_begin_const);
652
653 DCHECK_LE(data_begin_, split_begin);
654 DCHECK_LE(split_begin, data_end_);
655 size_t old_size = size();
656
657 // NOTE: The splitted allocation might no longer fulfill alignment
658 // requirements by the Zone allocator, so do not delete it!
659 DoubleEndedSplitVector split_off;
660 split_off.storage_begin_ = split_begin;
661 split_off.data_begin_ = split_begin;
662 split_off.data_end_ = data_end_;
663 split_off.storage_end_ = storage_end_;
664 data_end_ = split_begin;
665 storage_end_ = split_begin;
666
667#ifdef DEBUG
668 Verify();
669 split_off.Verify();
670#endif
671 DCHECK_EQ(size() + split_off.size(), old_size);
672 USE(old_size);
673
674 return split_off;
675 }
676
677 // Appends the elements from `other` after the end of `this`.
678 // In particular if `other` is directly adjacent to `this`, it does not
679 // allocate or copy.
681 if (data_end_ == other.data_begin_) {
682 // The `other`s elements are directly adjacent to ours, so just extend
683 // our storage to encompass them.
684 // This could happen if `other` comes from an earlier `this->SplitAt()`.
685 // For the usage here in the register allocator, this is always the case.
686 DCHECK_EQ(other.storage_begin_, other.data_begin_);
688 data_end_ = other.data_end_;
689 storage_end_ = other.storage_end_;
690 return;
691 }
692
693 // General case: Copy into newly allocated vector.
694 // TODO(dlehmann): One could check if `this` or `other` has enough capacity
695 // such that one can avoid the allocation, but currently we never reach
696 // this path anyway.
698 size_t merged_size = this->size() + other.size();
699 result.GrowAt<kFront>(zone, merged_size);
700
701 result.data_begin_ -= merged_size;
702 std::copy(this->begin(), this->end(), result.data_begin_);
703 std::copy(other.begin(), other.end(), result.data_begin_ + this->size());
704 DCHECK_EQ(result.data_begin_ + merged_size, result.data_end_);
705
706 *this = std::move(result);
707
708#ifdef DEBUG
709 Verify();
710#endif
711 DCHECK_EQ(size(), merged_size);
712 }
713
714 private:
715 static constexpr size_t kMinCapacity = 2;
716
717 size_t space_at_front() const { return data_begin_ - storage_begin_; }
718 size_t space_at_back() const { return storage_end_ - data_end_; }
719
720 template <GrowthDirection direction>
722 if constexpr (direction == kFront) {
723 if (V8_LIKELY(space_at_front() > 0)) return;
724 GrowAt<kFront>(zone, capacity() * 2);
726 } else {
727 if (V8_LIKELY(space_at_front() > 0 || space_at_back() > 0)) return;
728 GrowAt<kFrontOrBack>(zone, capacity() * 2);
729 DCHECK(space_at_front() > 0 || space_at_back() > 0);
730 }
731 }
732
733 template <GrowthDirection direction>
735 size_t new_minimum_capacity) {
736 DoubleEndedSplitVector<T> old = std::move(*this);
737
738 size_t new_capacity = std::max(kMinCapacity, new_minimum_capacity);
739 storage_begin_ = zone->AllocateArray<T>(new_capacity);
740 storage_end_ = storage_begin_ + new_capacity;
741
742 size_t remaining_capacity = new_capacity - old.size();
743 size_t remaining_capacity_front =
744 direction == kFront ? remaining_capacity : remaining_capacity / 2;
745
746 data_begin_ = storage_begin_ + remaining_capacity_front;
747 data_end_ = data_begin_ + old.size();
748 std::copy(old.begin(), old.end(), data_begin_);
749
750#ifdef DEBUG
751 Verify();
752#endif
753 DCHECK_EQ(size(), old.size());
754 }
755
756#ifdef DEBUG
757 void Verify() const {
761 }
762#endif
763
764 // Do not store a pointer to the `Zone` to save memory when there are very
765 // many `LiveRange`s (which each contain this vector twice).
766 // It makes the API a bit cumbersome, because the Zone has to be explicitly
767 // passed around, but is worth the 1-3% of max zone memory reduction.
768
769 T* storage_begin_ = nullptr;
770 T* data_begin_ = nullptr;
771 T* data_end_ = nullptr;
772 T* storage_end_ = nullptr;
773};
774
777
778// Representation of SSA values' live ranges as a collection of (continuous)
779// intervals over the instruction ordering.
781 public:
782 LiveRange(const LiveRange&) = delete;
783 LiveRange& operator=(const LiveRange&) = delete;
784
785 const UseIntervalVector& intervals() const { return intervals_; }
786 base::Vector<UsePosition*> positions() const { return positions_span_; }
787
788 TopLevelLiveRange* TopLevel() { return top_level_; }
789 const TopLevelLiveRange* TopLevel() const { return top_level_; }
790
791 bool IsTopLevel() const;
792
793 LiveRange* next() const { return next_; }
794
795 int relative_id() const { return relative_id_; }
796
797 bool IsEmpty() const { return intervals_.empty(); }
798
799 InstructionOperand GetAssignedOperand() const;
800
802 return RepresentationField::decode(bits_);
803 }
804
805 int assigned_register() const { return AssignedRegisterField::decode(bits_); }
806 bool HasRegisterAssigned() const {
807 return assigned_register() != kUnassignedRegister;
808 }
809 void set_assigned_register(int reg);
810 void UnsetAssignedRegister();
811
812 bool ShouldRecombine() const { return RecombineField::decode(bits_); }
813
814 void SetRecombine() { bits_ = RecombineField::update(bits_, true); }
816 bits_ = ControlFlowRegisterHint::update(bits_, reg);
817 }
818 int controlflow_hint() const {
819 return ControlFlowRegisterHint::decode(bits_);
820 }
822 int hint = controlflow_hint();
823 if (hint != kUnassignedRegister) {
824 *reg = hint;
825 return true;
826 }
827 return false;
828 }
829 bool spilled() const { return SpilledField::decode(bits_); }
830 void AttachToNext(Zone* zone);
831 void Unspill();
832 void Spill();
833
834 RegisterKind kind() const;
835
836 // Returns use position in this live range that follows both start
837 // and last processed use position.
838 UsePosition* const* NextUsePosition(LifetimePosition start) const;
839
840 // Returns use position for which register is required in this live
841 // range and which follows both start and last processed use position
842 UsePosition* NextRegisterPosition(LifetimePosition start) const;
843
844 // Returns use position for which register is beneficial in this live
845 // range and which follows both start and last processed use position
846 UsePosition* NextUsePositionRegisterIsBeneficial(
847 LifetimePosition start) const;
848
849 // Returns lifetime position for which register is beneficial in this live
850 // range and which follows both start and last processed use position.
851 LifetimePosition NextLifetimePositionRegisterIsBeneficial(
852 const LifetimePosition& start) const;
853
854 // Returns use position for which spilling is detrimental in this live
855 // range and which follows both start and last processed use position
856 UsePosition* NextUsePositionSpillDetrimental(LifetimePosition start) const;
857
858 // Can this live range be spilled at this position.
859 bool CanBeSpilled(LifetimePosition pos) const;
860
861 // Splits this live range and links the resulting ranges together.
862 // Returns the child, which starts at position.
863 // All uses following the given position will be moved from this
864 // live range to the result live range.
865 // The current range will terminate at position, while result will start from
866 // position.
867 LiveRange* SplitAt(LifetimePosition position, Zone* zone);
868
869 // Returns false when no register is hinted, otherwise sets register_index.
870 // Uses {current_hint_position_} as a cache, and tries to update it.
871 bool RegisterFromFirstHint(int* register_index);
872
874 return positions_span_[current_hint_position_index_];
875 }
876
878 DCHECK(!IsEmpty());
879 DCHECK_EQ(start_, intervals_.front().start());
880 return start_;
881 }
882
884 DCHECK(!IsEmpty());
885 DCHECK_EQ(end_, intervals_.back().end());
886 return end_;
887 }
888
889 bool ShouldBeAllocatedBefore(const LiveRange* other) const;
890 bool CanCover(LifetimePosition position) const;
891 bool Covers(LifetimePosition position);
894 LifetimePosition FirstIntersection(LiveRange* other);
895 LifetimePosition NextStart() const { return next_start_; }
896
897#ifdef DEBUG
898 void VerifyChildStructure() const {
899 VerifyIntervals();
900 VerifyPositions();
901 }
902#endif
903
904 void ConvertUsesToOperand(const InstructionOperand& op,
905 const InstructionOperand& spill_op);
906 void SetUseHints(int register_index);
907 void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
908 void ResetCurrentHintPosition() { current_hint_position_index_ = 0; }
909
910 void Print(const RegisterConfiguration* config, bool with_children) const;
911 void Print(bool with_children) const;
912
913 bool RegisterFromBundle(int* hint) const;
914 void UpdateBundleRegister(int reg) const;
915
916 private:
917 friend class TopLevelLiveRange;
918 friend Zone;
919
920 explicit LiveRange(int relative_id, MachineRepresentation rep,
921 TopLevelLiveRange* top_level);
922
923 void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
924
925 UseIntervalVector::iterator FirstSearchIntervalForPosition(
927 void AdvanceLastProcessedMarker(UseIntervalVector::iterator to_start_of,
928 LifetimePosition but_not_past);
929
930#ifdef DEBUG
931 void VerifyPositions() const;
932 void VerifyIntervals() const;
933#endif
934
936 // Bits (1,7[ are used by TopLevelLiveRange.
941 // Bits 28-31 are used by TopLevelLiveRange.
942
943 // Unique among children of the same virtual register.
945 uint32_t bits_;
946
948 // This is a view into the `positions_` owned by the `TopLevelLiveRange`.
949 // This allows cheap splitting and merging of `LiveRange`s.
951
953 // TODO(dlehmann): Remove linked list fully and instead use only the
954 // `TopLevelLiveRange::children_` vector. This requires API changes to
955 // `SplitAt` and `AttachToNext`, as they need access to a vector iterator.
957
958 // This is used as a cache in `FirstSearchIntervalForPosition`.
960 // This is used as a cache in `BuildLiveRanges` and during register
961 // allocation.
962 size_t current_hint_position_index_ = 0;
963
964 // Next interval start, relative to the current linear scan position.
966
967 // Just a cache for `Start()` and `End()` that improves locality
968 // (i.e., one less pointer indirection).
971};
972
974 bool operator()(const LiveRange* left, const LiveRange* right) const {
975 return left->Start() < right->Start();
976 }
977};
978// Bundle live ranges that are connected by phis and do not overlap. This tries
979// to restore some pre-SSA information and is used as a hint to allocate the
980// same spill slot or reuse the same register for connected live ranges.
982 public:
983 explicit LiveRangeBundle(Zone* zone, int id)
984 : ranges_(zone), intervals_(zone), id_(id) {}
985
986 int id() const { return id_; }
987
988 int reg() const { return reg_; }
989 void set_reg(int reg) {
991 reg_ = reg;
992 }
993
995 bool TryAddRange(TopLevelLiveRange* range);
996 // If merging is possible, merge either {lhs} into {rhs} or {rhs} into
997 // {lhs}, clear the source and return the result. Otherwise return nullptr.
999
1000 private:
1001 void AddRange(TopLevelLiveRange* range);
1002
1003 // A flat set, sorted by `LiveRangeOrdering`.
1005 // A flat set, sorted by their `start()` position.
1007
1008 int id_;
1010};
1011
1012// Register allocation splits LiveRanges so it can make more fine-grained
1013// allocation and spilling decisions. The LiveRanges that belong to the same
1014// virtual register form a linked-list, and the head of this list is a
1015// TopLevelLiveRange.
1017 public:
1018 explicit TopLevelLiveRange(int vreg, MachineRepresentation rep, Zone* zone);
1021
1022 int spill_start_index() const { return spill_start_index_; }
1023
1024 bool IsFixed() const { return vreg_ < 0; }
1025
1026 bool IsDeferredFixed() const { return DeferredFixedField::decode(bits_); }
1027 void set_deferred_fixed() { bits_ = DeferredFixedField::update(bits_, true); }
1028 bool is_phi() const { return IsPhiField::decode(bits_); }
1029 void set_is_phi(bool value) { bits_ = IsPhiField::update(bits_, value); }
1030
1031 bool is_non_loop_phi() const { return IsNonLoopPhiField::decode(bits_); }
1032 bool is_loop_phi() const { return is_phi() && !is_non_loop_phi(); }
1033 void set_is_non_loop_phi(bool value) {
1034 bits_ = IsNonLoopPhiField::update(bits_, value);
1035 }
1037 return SpillAtLoopHeaderNotBeneficialField::decode(bits_);
1038 }
1040 bits_ = SpillAtLoopHeaderNotBeneficialField::update(bits_, true);
1041 }
1042
1043 enum SlotUseKind { kNoSlotUse, kDeferredSlotUse, kGeneralSlotUse };
1044
1045 bool has_slot_use() const {
1046 return slot_use_kind() > SlotUseKind::kNoSlotUse;
1047 }
1048
1050 return slot_use_kind() == SlotUseKind::kGeneralSlotUse;
1051 }
1052
1054 bits_ = HasSlotUseField::update(bits_, SlotUseKind::kNoSlotUse);
1055 }
1057 bits_ = HasSlotUseField::update(bits_, std::max(slot_use_kind(), value));
1058 }
1059 SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
1060
1061 // Add a new interval or a new use position to this live range.
1062 void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
1063 void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
1064 void AddUsePosition(UsePosition* pos, Zone* zone);
1065
1066 // Shorten the most recently added interval by setting a new start.
1067 void ShortenTo(LifetimePosition start);
1068
1069 // Spill range management.
1070 void SetSpillRange(SpillRange* spill_range);
1071
1072 // Encodes whether a range is also available from a memory location:
1073 // kNoSpillType: not availble in memory location.
1074 // kSpillOperand: computed in a memory location at range start.
1075 // kSpillRange: copied (spilled) to memory location at the definition,
1076 // or at the beginning of some later blocks if
1077 // LateSpillingSelected() is true.
1078 // kDeferredSpillRange: copied (spilled) to memory location at entry
1079 // to deferred blocks that have a use from memory.
1080 //
1081 // Ranges either start out at kSpillOperand, which is also their final
1082 // state, or kNoSpillType. When spilled only in deferred code, a range
1083 // ends up with kDeferredSpillRange, while when spilled in regular code,
1084 // a range will be tagged as kSpillRange.
1085 enum class SpillType {
1086 kNoSpillType,
1087 kSpillOperand,
1088 kSpillRange,
1089 kDeferredSpillRange
1090 };
1092 bits_ = SpillTypeField::update(bits_, value);
1093 }
1094 SpillType spill_type() const { return SpillTypeField::decode(bits_); }
1096 DCHECK_EQ(SpillType::kSpillOperand, spill_type());
1097 return spill_operand_;
1098 }
1099
1101 DCHECK_NE(SpillType::kSpillOperand, spill_type());
1102 return spill_range_;
1103 }
1104
1106 DCHECK_GE(spill_type(), SpillType::kSpillRange);
1107 return spill_range_;
1108 }
1109 bool HasNoSpillType() const {
1110 return spill_type() == SpillType::kNoSpillType;
1111 }
1112 bool HasSpillOperand() const {
1113 return spill_type() == SpillType::kSpillOperand;
1114 }
1115 bool HasSpillRange() const { return spill_type() >= SpillType::kSpillRange; }
1117 return spill_type() == SpillType::kSpillRange;
1118 }
1119 AllocatedOperand GetSpillRangeOperand() const;
1120
1121 void RecordSpillLocation(Zone* zone, int gap_index,
1122 InstructionOperand* operand);
1123 void SetSpillOperand(InstructionOperand* operand);
1125 spill_start_index_ = std::min(start, spill_start_index_);
1126 }
1127
1128 // Omits any moves from spill_move_insertion_locations_ that can be skipped.
1129 void FilterSpillMoves(RegisterAllocationData* data,
1130 const InstructionOperand& operand);
1131
1132 // Writes all moves from spill_move_insertion_locations_ to the schedule.
1133 void CommitSpillMoves(RegisterAllocationData* data,
1134 const InstructionOperand& operand);
1135
1136 // If all the children of this range are spilled in deferred blocks, and if
1137 // for any non-spilled child with a use position requiring a slot, that range
1138 // is contained in a deferred block, mark the range as
1139 // IsSpilledOnlyInDeferredBlocks, so that we avoid spilling at definition,
1140 // and instead let the LiveRangeConnector perform the spills within the
1141 // deferred blocks. If so, we insert here spills for non-spilled ranges
1142 // with slot use positions.
1144 spill_start_index_ = -1;
1145 spilled_in_deferred_blocks_ = true;
1146 spill_move_insertion_locations_ = nullptr;
1147 list_of_blocks_requiring_spill_operands_ = zone->New<SparseBitVector>(zone);
1148 }
1149
1150 // Updates internal data structures to reflect that this range is not
1151 // spilled at definition but instead spilled in some blocks only.
1153 spill_start_index_ = -1;
1154 spill_move_insertion_locations_ = nullptr;
1155 list_of_blocks_requiring_spill_operands_ = zone->New<SparseBitVector>(zone);
1156 }
1157
1158 // Promotes this range to spill at definition if it was marked for spilling
1159 // in deferred blocks before.
1161 DCHECK_NOT_NULL(spill_move_insertion_locations_);
1162 if (spill_type() == SpillType::kDeferredSpillRange) {
1163 set_spill_type(SpillType::kSpillRange);
1164 }
1165 }
1166
1168 return !HasSpillOperand() && spill_range_ == nullptr;
1169 }
1171 int vreg() const { return vreg_; }
1172
1173#ifdef DEBUG
1174 void Verify() const;
1175 void VerifyChildrenInOrder() const;
1176#endif
1177
1178 // Returns the child `LiveRange` covering the given position, or `nullptr`
1179 // if no such range exists. Uses a binary search.
1180 LiveRange* GetChildCovers(LifetimePosition pos);
1181
1182 const ZoneVector<LiveRange*>& Children() const { return children_; }
1183
1184 int GetNextChildId() { return ++last_child_id_; }
1185
1187 return spill_type() == SpillType::kDeferredSpillRange;
1188 }
1189
1190 struct SpillMoveInsertionList;
1191
1193 const RegisterAllocationData* data) const {
1194 DCHECK(!IsSpilledOnlyInDeferredBlocks(data));
1195 return spill_move_insertion_locations_;
1196 }
1197
1198 void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
1199 bool has_preassigned_slot() const { return has_preassigned_slot_; }
1200
1201 // Late spilling refers to spilling at places after the definition. These
1202 // spills are guaranteed to cover at least all of the sub-ranges where the
1203 // register allocator chose to evict the value from a register.
1204 void SetLateSpillingSelected(bool late_spilling_selected) {
1205 DCHECK(spill_type() == SpillType::kSpillRange);
1206 SpillRangeMode new_mode = late_spilling_selected
1207 ? SpillRangeMode::kSpillLater
1208 : SpillRangeMode::kSpillAtDefinition;
1209 // A single TopLevelLiveRange should never be used in both modes.
1210 DCHECK(SpillRangeModeField::decode(bits_) == SpillRangeMode::kNotSet ||
1211 SpillRangeModeField::decode(bits_) == new_mode);
1212 bits_ = SpillRangeModeField::update(bits_, new_mode);
1213 }
1215 // Nobody should be reading this value until it's been decided.
1216 DCHECK_IMPLIES(HasGeneralSpillRange(), SpillRangeModeField::decode(bits_) !=
1217 SpillRangeMode::kNotSet);
1218 return SpillRangeModeField::decode(bits_) == SpillRangeMode::kSpillLater;
1219 }
1220
1222 const RegisterAllocationData* data) {
1223 DCHECK(IsSpilledOnlyInDeferredBlocks(data));
1224 GetListOfBlocksRequiringSpillOperands(data)->Add(block_id.ToInt());
1225 }
1226
1228 const RegisterAllocationData* data) const {
1229 DCHECK(IsSpilledOnlyInDeferredBlocks(data));
1230 return list_of_blocks_requiring_spill_operands_;
1231 }
1232
1233 LiveRangeBundle* get_bundle() const { return bundle_; }
1234 void set_bundle(LiveRangeBundle* bundle) { bundle_ = bundle; }
1235
1236 private:
1237 friend class LiveRange;
1238
1239 // If spill type is kSpillRange, then this value indicates whether we've
1240 // chosen to spill at the definition or at some later points.
1241 enum class SpillRangeMode : uint8_t {
1242 kNotSet,
1243 kSpillAtDefinition,
1244 kSpillLater,
1245 };
1246
1254
1257 union {
1258 // Correct value determined by spill_type()
1261 };
1262
1263 union {
1266 };
1267
1268 LiveRangeBundle* bundle_ = nullptr;
1269
1271
1272 // This is a cache for the binary search in `GetChildCovers`.
1273 // The `LiveRange`s are sorted by their `Start()` position.
1275
1276 // TODO(mtrofin): generalize spilling after definition, currently specialized
1277 // just for spill in a single deferred block.
1280
1282};
1283
1288
1289std::ostream& operator<<(std::ostream& os,
1290 const PrintableLiveRange& printable_range);
1291
1292// Represent the spill operand of a LiveRange and its use intervals. After
1293// register allocation, disjoint spill ranges are merged and they get assigned
1294// the same spill slot by OperandAssigner::AssignSpillSlots().
1295// TODO(dlehmann): `SpillRange`s seem awefully similar to `LiveRangeBundle`s,
1296// especially since both store `ranges_` and `intervals_` and check for
1297// intersection in exactly the same way. I wonder if we can merge those two
1298// concepts and save a bunch of memory by not storing ranges and intervals
1299// twice.
1300class SpillRange final : public ZoneObject {
1301 public:
1302 static const int kUnassignedSlot = -1;
1303
1304 SpillRange(TopLevelLiveRange* range, Zone* zone);
1305 SpillRange(const SpillRange&) = delete;
1307
1308 bool IsEmpty() const { return ranges_.empty(); }
1309 bool TryMerge(SpillRange* other);
1310 bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
1311
1316 int assigned_slot() const {
1318 return assigned_slot_;
1319 }
1320
1321 // Spill slots can be 4, 8, or 16 bytes wide.
1322 int byte_width() const { return byte_width_; }
1323
1324 void Print() const;
1325
1326 private:
1328 // A flat set, sorted by their `start()` position.
1330
1333};
1334
1335class ConstraintBuilder final : public ZoneObject {
1336 public:
1340
1341 // Phase 1 : insert moves to account for fixed register operands.
1343
1344 // Phase 2: deconstruct SSA by inserting moves in successors and the headers
1345 // of blocks containing phis.
1346 void ResolvePhis();
1347
1348 private:
1349 RegisterAllocationData* data() const { return data_; }
1350 InstructionSequence* code() const { return data()->code(); }
1351 Zone* allocation_zone() const { return data()->allocation_zone(); }
1352
1354 bool is_tagged, bool is_input,
1355 bool is_output);
1356 void MeetRegisterConstraints(const InstructionBlock* block);
1357 void MeetConstraintsBefore(int index);
1358 void MeetConstraintsAfter(int index);
1360 const InstructionBlock* block);
1361 void ResolvePhis(const InstructionBlock* block);
1362
1364};
1365
1366class LiveRangeBuilder final : public ZoneObject {
1367 public:
1368 explicit LiveRangeBuilder(RegisterAllocationData* data, Zone* local_zone);
1371
1372 // Phase 3: compute liveness of all virtual register.
1373 void BuildLiveRanges();
1374 static SparseBitVector* ComputeLiveOut(const InstructionBlock* block,
1376
1377 private:
1379 static constexpr int kNumberOfFixedRangesPerRegister =
1381
1382 RegisterAllocationData* data() const { return data_; }
1383 InstructionSequence* code() const { return data()->code(); }
1384 Zone* allocation_zone() const { return data()->allocation_zone(); }
1385 Zone* code_zone() const { return code()->zone(); }
1386 const RegisterConfiguration* config() const { return data()->config(); }
1388 return data()->live_in_sets();
1389 }
1390
1391#ifdef DEBUG
1392 // Verification.
1393 void Verify() const;
1394 bool IntervalStartsAtBlockBoundary(UseInterval interval) const;
1395 bool IntervalPredecessorsCoveredByRange(UseInterval interval,
1396 TopLevelLiveRange* range) const;
1397 bool NextIntervalStartsInDifferentBlocks(UseInterval interval,
1398 UseInterval next) const;
1399#endif
1400
1401 // Liveness analysis support.
1402 void AddInitialIntervals(const InstructionBlock* block,
1403 SparseBitVector* live_out);
1404 void ProcessInstructions(const InstructionBlock* block,
1405 SparseBitVector* live);
1406 void ProcessPhis(const InstructionBlock* block, SparseBitVector* live);
1407 void ProcessLoopHeader(const InstructionBlock* block, SparseBitVector* live);
1408
1409 static int FixedLiveRangeID(int index) { return -index - 1; }
1410 int FixedFPLiveRangeID(int index, MachineRepresentation rep);
1411 TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode);
1413 SpillMode spill_mode);
1415
1416 void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
1417 void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
1418
1420 void* hint, UsePositionHintType hint_type);
1425 SpillMode spill_mode);
1426 // Helper methods for building intervals.
1428 void* hint, UsePositionHintType hint_type,
1429 SpillMode spill_mode);
1431 SpillMode spill_mode) {
1432 Define(position, operand, nullptr, UsePositionHintType::kNone, spill_mode);
1433 }
1435 InstructionOperand* operand, void* hint,
1436 UsePositionHintType hint_type, SpillMode spill_mode);
1438 InstructionOperand* operand, SpillMode spill_mode) {
1439 Use(block_start, position, operand, nullptr, UsePositionHintType::kNone,
1440 spill_mode);
1441 }
1443 return block->IsDeferred() ? SpillMode::kSpillDeferred
1444 : SpillMode::kSpillAtDefinition;
1445 }
1448};
1449
1450class BundleBuilder final : public ZoneObject {
1451 public:
1452 explicit BundleBuilder(RegisterAllocationData* data) : data_(data) {}
1453
1454 void BuildBundles();
1455
1456 private:
1457 RegisterAllocationData* data() const { return data_; }
1458 InstructionSequence* code() const { return data_->code(); }
1461};
1462
1464 public:
1468
1469 protected:
1471 RegisterAllocationData* data() const { return data_; }
1472 InstructionSequence* code() const { return data()->code(); }
1473 RegisterKind mode() const { return mode_; }
1474 int num_registers() const { return num_registers_; }
1476 const int* allocatable_register_codes() const {
1478 }
1479 // Returns true iff. we must check float register aliasing.
1480 bool check_fp_aliasing() const { return check_fp_aliasing_; }
1481
1482 // TODO(mtrofin): explain why splitting in gap START is always OK.
1484 int instruction_index);
1485
1486 Zone* allocation_zone() const { return data()->allocation_zone(); }
1487
1488 // Find the optimal split for ranges defined by a memory operand, e.g.
1489 // constants or function parameters passed on the stack.
1491
1492 // Split the given range at the given position.
1493 // If range starts at or after the given position then the
1494 // original range is returned.
1495 // Otherwise returns the live range that starts at pos and contains
1496 // all uses from the original range that follow pos. Uses at pos will
1497 // still be owned by the original range after splitting.
1499
1500 bool CanProcessRange(LiveRange* range) const {
1501 return range != nullptr && !range->IsEmpty() && range->kind() == mode();
1502 }
1503
1504 // Split the given range in a position from the interval [start, end].
1507
1508 // Find a lifetime position in the interval [start, end] which
1509 // is optimal for splitting: it is either header of the outermost
1510 // loop covered by this interval or the latest possible position.
1513
1514 void Spill(LiveRange* range, SpillMode spill_mode);
1515
1516 // If we are trying to spill a range inside the loop try to
1517 // hoist spill position out to the point just before the loop.
1520 SpillMode spill_mode,
1521 LiveRange** begin_spill_out);
1522
1524 const char* RegisterName(int allocation_index) const;
1525
1526 private:
1533
1534 private:
1536};
1537
1538// A map from `TopLevelLiveRange`s to their expected physical register.
1539// Typically this is very small, e.g., on JetStream2 it has 3 elements or less
1540// >50% of the times it is queried, 8 elements or less >90% of the times,
1541// and never more than 15 elements. Hence this is backed by a `SmallZoneMap`.
1543 SmallZoneMap<TopLevelLiveRange*, /* expected_register */ int, 16>;
1544
1546 public:
1548 Zone* local_zone);
1551
1552 // Phase 4: compute register assignments.
1553 void AllocateRegisters();
1554
1555 private:
1556 void MaybeSpillPreviousRanges(LiveRange* begin_range,
1557 LifetimePosition begin_pos,
1558 LiveRange* end_range);
1559 void MaybeUndoPreviousSplit(LiveRange* range, Zone* zone);
1561 LifetimePosition position, SpillMode spill_mode);
1563 void ReloadLiveRanges(RangeRegisterSmallMap const& to_be_live,
1565
1566 void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
1568 const InstructionBlock* block);
1570
1572 bool operator()(const LiveRange* a, const LiveRange* b) const {
1573 return a->ShouldBeAllocatedBefore(b);
1574 }
1575 };
1576
1578 bool operator()(const LiveRange* a, const LiveRange* b) const {
1579 return a->NextStart() < b->NextStart();
1580 }
1581 };
1582
1583 // NOTE: We also tried a sorted ZoneVector instead of a `ZoneMultiset`
1584 // (like for `InactiveLiveRangeQueue`), but it does not improve performance
1585 // or max memory usage.
1586 // TODO(dlehmann): Try `std::priority_queue`/`std::make_heap` instead.
1589 // Sorted by `InactiveLiveRangeOrdering`.
1590 // TODO(dlehmann): Try `std::priority_queue`/`std::make_heap` instead.
1599 // At several places in the register allocator we rely on inactive live ranges
1600 // being sorted. Previously, this was always true by using a std::multiset.
1601 // But to improve performance and in particular reduce memory usage, we
1602 // switched to a sorted vector.
1603 // Insert this to ensure we don't violate the sorted assumption, and to
1604 // document where we actually rely on inactive live ranges being sorted.
1610
1611 void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
1612
1613 // Helper methods for updating the life range lists.
1614 void AddToActive(LiveRange* range);
1615 void AddToInactive(LiveRange* range);
1616 void AddToUnhandled(LiveRange* range);
1625
1627
1629
1630 // Helper methods for choosing state after control flow events.
1631
1633 RpoNumber predecessor);
1635 LifetimePosition boundary);
1637 const RangeRegisterSmallMap& to_be_live);
1639 RangeRegisterSmallMap& to_be_live);
1640
1641 // Helper methods for allocating registers.
1642
1643 // Spilling a phi at range start can be beneficial when the phi input is
1644 // already spilled and shares the same spill slot. This function tries to
1645 // guess if spilling the phi is beneficial based on live range bundles and
1646 // spilled phi inputs.
1649 LiveRange* current, int hint_reg,
1651 bool TryAllocateFreeReg(LiveRange* range,
1654 LiveRange* range, base::Vector<const LifetimePosition> free_until_pos);
1655 void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
1656 int* num_codes, const int** codes) const;
1657 void GetSIMD128RegisterSet(int* num_regs, int* num_codes,
1658 const int** codes) const;
1660 base::Vector<LifetimePosition> free_until_pos);
1661 void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
1662 void AllocateBlockedReg(LiveRange* range, SpillMode spill_mode);
1663
1664 // Spill the given life range after position pos.
1665 void SpillAfter(LiveRange* range, LifetimePosition pos, SpillMode spill_mode);
1666
1667 // Spill the given life range after position [start] and up to position [end].
1669 LifetimePosition end, SpillMode spill_mode);
1670
1671 // Spill the given life range after position [start] and up to position [end].
1672 // Range is guaranteed to be spilled at least until position [until].
1675 SpillMode spill_mode);
1676 void SplitAndSpillIntersecting(LiveRange* range, SpillMode spill_mode);
1677
1678 void PrintRangeRow(std::ostream& os, const TopLevelLiveRange* toplevel);
1679
1680 void PrintRangeOverview();
1681
1685
1686 // Approximate at what position the set of ranges will change next.
1687 // Used to avoid scanning for updates even if none are present.
1690
1691#ifdef DEBUG
1692 LifetimePosition allocation_finger_;
1693#endif
1694};
1695
1696class OperandAssigner final : public ZoneObject {
1697 public:
1701
1702 // Phase 5: final decision on spilling mode.
1703 void DecideSpillingMode();
1704
1705 // Phase 6: assign spill splots.
1706 void AssignSpillSlots();
1707
1708 // Phase 7: commit assignment.
1709 void CommitAssignment();
1710
1711 private:
1712 RegisterAllocationData* data() const { return data_; }
1713
1715};
1716
1717class ReferenceMapPopulator final : public ZoneObject {
1718 public:
1722
1723 // Phase 10: compute values for pointer maps.
1724 void PopulateReferenceMaps();
1725
1726 private:
1727 RegisterAllocationData* data() const { return data_; }
1728
1729 bool SafePointsAreInOrder() const;
1730
1732};
1733
1734class LiveRangeBoundArray;
1735// Insert moves of the form
1736//
1737// Operand(child_(k+1)) = Operand(child_k)
1738//
1739// where child_k and child_(k+1) are consecutive children of a range (so
1740// child_k->next() == child_(k+1)), and Operand(...) refers to the
1741// assigned operand, be it a register or a slot.
1742class LiveRangeConnector final : public ZoneObject {
1743 public:
1747
1748 // Phase 8: reconnect split ranges with moves, when the control flow
1749 // between the ranges is trivial (no branches).
1750 void ConnectRanges(Zone* local_zone);
1751
1752 // Phase 9: insert moves to connect ranges across basic blocks, when the
1753 // control flow between them cannot be trivially resolved, such as joining
1754 // branches. Also determines whether to spill at the definition or later, and
1755 // adds spill moves to the gaps in the schedule.
1756 void ResolveControlFlow(Zone* local_zone);
1757
1758 private:
1759 RegisterAllocationData* data() const { return data_; }
1760 InstructionSequence* code() const { return data()->code(); }
1761 Zone* code_zone() const { return code()->zone(); }
1762
1763 bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
1764 int ResolveControlFlow(const InstructionBlock* block,
1765 const InstructionOperand& cur_op,
1766 const InstructionBlock* pred,
1767 const InstructionOperand& pred_op);
1768
1769 void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range, Zone* temp_zone);
1770
1772};
1773
1774} // namespace compiler
1775} // namespace internal
1776} // namespace v8
1777
1778#endif // V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
int pos_
#define T
Builtins::Kind kind
Definition builtins.cc:40
#define SLOW_DCHECK(condition)
Definition checks.h:21
SourcePosition pos
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
RegisterAllocationData * data() const
BundleBuilder(RegisterAllocationData *data)
void MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock *block)
ConstraintBuilder(const ConstraintBuilder &)=delete
ConstraintBuilder & operator=(const ConstraintBuilder &)=delete
RegisterAllocationData * data() const
InstructionOperand * AllocateFixed(UnallocatedOperand *operand, int pos, bool is_tagged, bool is_input, bool is_output)
ConstraintBuilder(RegisterAllocationData *data)
V8_NOINLINE V8_PRESERVE_MOST void GrowAt(Zone *zone, size_t new_minimum_capacity)
iterator insert(Zone *zone, const_iterator position, const T &value)
DoubleEndedSplitVector< T > SplitAt(const_iterator split_begin_const)
void Append(Zone *zone, DoubleEndedSplitVector< T > other)
static bool ExistsGapPositionBetween(LifetimePosition pos1, LifetimePosition pos2)
bool operator>=(const LifetimePosition &that) const
static LifetimePosition InstructionFromInstructionIndex(int index)
static LifetimePosition FromInt(int value)
bool operator!=(const LifetimePosition &that) const
bool operator<=(const LifetimePosition &that) const
bool operator<(const LifetimePosition &that) const
static LifetimePosition GapFromInstructionIndex(int index)
bool operator>(const LifetimePosition &that) const
bool operator==(const LifetimePosition &that) const
LinearScanAllocator & operator=(const LinearScanAllocator &)=delete
ZoneVector< LiveRange * > & active_live_ranges()
void MaybeUndoPreviousSplit(LiveRange *range, Zone *zone)
LinearScanAllocator(const LinearScanAllocator &)=delete
int LastDeferredInstructionIndex(InstructionBlock *start)
void SpillBetweenUntil(LiveRange *range, LifetimePosition start, LifetimePosition until, LifetimePosition end, SpillMode spill_mode)
InactiveLiveRangeQueue::iterator InactiveToActive(InactiveLiveRangeQueue::iterator it, LifetimePosition position)
void SpillNotLiveRanges(RangeRegisterSmallMap &to_be_live, LifetimePosition position, SpillMode spill_mode)
bool CheckConflict(MachineRepresentation rep, int reg, const RangeRegisterSmallMap &to_be_live)
bool TryReuseSpillForPhi(TopLevelLiveRange *range)
int PickRegisterThatIsAvailableLongest(LiveRange *current, int hint_reg, base::Vector< const LifetimePosition > free_until_pos)
RpoNumber ChooseOneOfTwoPredecessorStates(InstructionBlock *current_block, LifetimePosition boundary)
void ProcessCurrentRange(LiveRange *current, SpillMode spill_mode)
ZoneVector< LiveRange * >::iterator ActiveToInactive(ZoneVector< LiveRange * >::iterator it, LifetimePosition position)
void ForwardStateTo(LifetimePosition position)
void SpillAfter(LiveRange *range, LifetimePosition pos, SpillMode spill_mode)
void FindFreeRegistersForRange(LiveRange *range, base::Vector< LifetimePosition > free_until_pos)
void AllocateBlockedReg(LiveRange *range, SpillMode spill_mode)
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock *block)
void MaybeSpillPreviousRanges(LiveRange *begin_range, LifetimePosition begin_pos, LiveRange *end_range)
LiveRange * AssignRegisterOnReload(LiveRange *range, int reg)
InactiveLiveRangeQueue::iterator InactiveToHandled(InactiveLiveRangeQueue::iterator it)
bool TryAllocatePreferredReg(LiveRange *range, base::Vector< const LifetimePosition > free_until_pos)
void SpillBetween(LiveRange *range, LifetimePosition start, LifetimePosition end, SpillMode spill_mode)
InactiveLiveRangeQueue & inactive_live_ranges(int reg)
void PrintRangeRow(std::ostream &os, const TopLevelLiveRange *toplevel)
bool TryAllocateFreeReg(LiveRange *range, base::Vector< const LifetimePosition > free_until_pos)
void GetSIMD128RegisterSet(int *num_regs, int *num_codes, const int **codes) const
bool ConsiderBlockForControlFlow(InstructionBlock *current_block, RpoNumber predecessor)
ZoneVector< InactiveLiveRangeQueue > inactive_live_ranges_
LinearScanAllocator(RegisterAllocationData *data, RegisterKind kind, Zone *local_zone)
void GetFPRegisterSet(MachineRepresentation rep, int *num_regs, int *num_codes, const int **codes) const
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(const InstructionBlock *block)
void SplitAndSpillIntersecting(LiveRange *range, SpillMode spill_mode)
ZoneVector< LiveRange * >::iterator ActiveToHandled(ZoneVector< LiveRange * >::iterator it)
void SetLiveRangeAssignedRegister(LiveRange *range, int reg)
void ReloadLiveRanges(RangeRegisterSmallMap const &to_be_live, LifetimePosition position)
void ComputeStateFromManyPredecessors(InstructionBlock *current_block, RangeRegisterSmallMap &to_be_live)
bool HasNonDeferredPredecessor(InstructionBlock *block)
UsePosition * Use(LifetimePosition block_start, LifetimePosition position, InstructionOperand *operand, void *hint, UsePositionHintType hint_type, SpillMode spill_mode)
static SparseBitVector * ComputeLiveOut(const InstructionBlock *block, RegisterAllocationData *data)
void ProcessPhis(const InstructionBlock *block, SparseBitVector *live)
LiveRangeBuilder(const LiveRangeBuilder &)=delete
LiveRangeBuilder & operator=(const LiveRangeBuilder &)=delete
UsePosition * NewUsePosition(LifetimePosition pos)
RegisterAllocationData * data() const
UsePosition * NewUsePosition(LifetimePosition pos, InstructionOperand *operand, void *hint, UsePositionHintType hint_type)
SpillMode SpillModeForBlock(const InstructionBlock *block) const
void Use(LifetimePosition block_start, LifetimePosition position, InstructionOperand *operand, SpillMode spill_mode)
TopLevelLiveRange * LiveRangeFor(InstructionOperand *operand, SpillMode spill_mode)
RegisterAllocationData::SpillMode SpillMode
TopLevelLiveRange * FixedLiveRangeFor(int index, SpillMode spill_mode)
void ProcessInstructions(const InstructionBlock *block, SparseBitVector *live)
TopLevelLiveRange * FixedFPLiveRangeFor(int index, MachineRepresentation rep, SpillMode spill_mode)
void MapPhiHint(InstructionOperand *operand, UsePosition *use_pos)
ZoneVector< SparseBitVector * > & live_in_sets() const
void ProcessLoopHeader(const InstructionBlock *block, SparseBitVector *live)
UsePosition * Define(LifetimePosition position, InstructionOperand *operand, void *hint, UsePositionHintType hint_type, SpillMode spill_mode)
void Define(LifetimePosition position, InstructionOperand *operand, SpillMode spill_mode)
TopLevelLiveRange * FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode)
int FixedFPLiveRangeID(int index, MachineRepresentation rep)
void AddInitialIntervals(const InstructionBlock *block, SparseBitVector *live_out)
void ResolvePhiHint(InstructionOperand *operand, UsePosition *use_pos)
const RegisterConfiguration * config() const
ZoneMap< InstructionOperand *, UsePosition * > phi_hints_
LiveRangeBuilder(RegisterAllocationData *data, Zone *local_zone)
void AddRange(TopLevelLiveRange *range)
ZoneVector< TopLevelLiveRange * > ranges_
bool TryAddRange(TopLevelLiveRange *range)
static LiveRangeBundle * TryMerge(LiveRangeBundle *lhs, LiveRangeBundle *rhs)
bool CanEagerlyResolveControlFlow(const InstructionBlock *block) const
LiveRangeConnector(const LiveRangeConnector &)=delete
LiveRangeConnector & operator=(const LiveRangeConnector &)=delete
LiveRangeConnector(RegisterAllocationData *data)
void CommitSpillsInDeferredBlocks(TopLevelLiveRange *range, Zone *temp_zone)
base::Vector< UsePosition * > positions() const
UsePosition * current_hint_position() const
base::Vector< UsePosition * > positions_span_
const TopLevelLiveRange * TopLevel() const
LiveRange(const LiveRange &)=delete
LiveRange & operator=(const LiveRange &)=delete
const UseIntervalVector & intervals() const
MachineRepresentation representation() const
UseIntervalVector::iterator current_interval_
OperandAssigner(const OperandAssigner &)=delete
OperandAssigner(RegisterAllocationData *data)
RegisterAllocationData * data() const
OperandAssigner & operator=(const OperandAssigner &)=delete
ReferenceMapPopulator & operator=(const ReferenceMapPopulator &)=delete
ReferenceMapPopulator(const ReferenceMapPopulator &)=delete
PhiMapValue(PhiInstruction *phi, const InstructionBlock *block, Zone *zone)
const ZoneVector< TopLevelLiveRange * > & fixed_simd128_live_ranges() const
SpillRange * AssignSpillRangeToLiveRange(TopLevelLiveRange *range, SpillMode spill_mode)
const RegisterConfiguration * config() const
ZoneVector< TopLevelLiveRange * > & fixed_double_live_ranges()
RegisterAllocationData(const RegisterAllocationData &)=delete
ZoneVector< TopLevelLiveRange * > live_ranges_
const ZoneVector< TopLevelLiveRange * > & fixed_live_ranges() const
ZoneVector< SparseBitVector * > & live_out_sets()
SpillRange * CreateSpillRangeForLiveRange(TopLevelLiveRange *range)
ZoneVector< SparseBitVector * > & live_in_sets()
ZoneVector< TopLevelLiveRange * > & fixed_float_live_ranges()
void RememberSpillState(RpoNumber block, const ZoneVector< LiveRange * > &state)
ZoneMap< TopLevelLiveRange *, AllocatedOperand * > slot_for_const_range_
ZoneVector< TopLevelLiveRange * > & live_ranges()
RangesWithPreassignedSlots & preassigned_slot_ranges()
ZoneMap< TopLevelLiveRange *, AllocatedOperand * > & slot_for_const_range()
const ZoneVector< TopLevelLiveRange * > & fixed_double_live_ranges() const
ZoneVector< ZoneVector< LiveRange * > > spill_state_
ZoneVector< LiveRange * > & GetSpillState(RpoNumber block)
MoveOperands * AddGapMove(int index, Instruction::GapPosition position, const InstructionOperand &from, const InstructionOperand &to)
ZoneVector< TopLevelLiveRange * > fixed_double_live_ranges_
ZoneVector< TopLevelLiveRange * > & fixed_live_ranges()
const RegisterConfiguration *const config_
ZoneVector< TopLevelLiveRange * > fixed_float_live_ranges_
bool HasFixedUse(MachineRepresentation rep, int index)
const ZoneVector< TopLevelLiveRange * > & live_ranges() const
RegisterAllocationData & operator=(const RegisterAllocationData &)=delete
void MarkFixedUse(MachineRepresentation rep, int index)
PhiMapValue * GetPhiMapValueFor(TopLevelLiveRange *top_range)
TopLevelLiveRange * NewLiveRange(int index, MachineRepresentation rep)
MachineRepresentation RepresentationFor(int virtual_register)
PhiMapValue * InitializePhiMap(const InstructionBlock *block, PhiInstruction *phi)
const ZoneVector< TopLevelLiveRange * > & fixed_float_live_ranges() const
ZoneVector< TopLevelLiveRange * > fixed_live_ranges_
void MarkAllocated(MachineRepresentation rep, int index)
ZoneVector< TopLevelLiveRange * > fixed_simd128_live_ranges_
ZoneVector< TopLevelLiveRange * > & fixed_simd128_live_ranges()
LifetimePosition FindOptimalSpillingPos(LiveRange *range, LifetimePosition pos, SpillMode spill_mode, LiveRange **begin_spill_out)
RegisterAllocationData * data() const
void Spill(LiveRange *range, SpillMode spill_mode)
LiveRange * SplitBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
RegisterAllocationData::SpillMode SpillMode
const char * RegisterName(int allocation_index) const
RegisterAllocator(RegisterAllocationData *data, RegisterKind kind)
LifetimePosition GetSplitPositionForInstruction(const LiveRange *range, int instruction_index)
RegisterAllocator(const RegisterAllocator &)=delete
LifetimePosition FindOptimalSplitPos(LifetimePosition start, LifetimePosition end)
RegisterAllocator & operator=(const RegisterAllocator &)=delete
const ZoneVector< TopLevelLiveRange * > & GetFixedRegisters() const
LiveRange * SplitRangeAt(LiveRange *range, LifetimePosition pos)
SpillRange(const SpillRange &)=delete
SpillRange(TopLevelLiveRange *range, Zone *zone)
SpillRange & operator=(const SpillRange &)=delete
ZoneVector< TopLevelLiveRange * > ranges_
bool IsSpilledOnlyInDeferredBlocks(const RegisterAllocationData *data) const
void UpdateSpillRangePostMerge(TopLevelLiveRange *merged)
void AddBlockRequiringSpillOperand(RpoNumber block_id, const RegisterAllocationData *data)
TopLevelLiveRange(const TopLevelLiveRange &)=delete
SpillMoveInsertionList * GetSpillMoveInsertionLocations(const RegisterAllocationData *data) const
const ZoneVector< LiveRange * > & Children() const
SparseBitVector * GetListOfBlocksRequiringSpillOperands(const RegisterAllocationData *data) const
SpillMoveInsertionList * spill_move_insertion_locations_
void SetLateSpillingSelected(bool late_spilling_selected)
TopLevelLiveRange & operator=(const TopLevelLiveRange &)=delete
bool operator!=(const UseInterval &other) const
LifetimePosition Intersect(const UseInterval &other) const
UseInterval(LifetimePosition start, LifetimePosition end)
void set_start(LifetimePosition start)
bool operator==(const UseInterval &other) const
UseInterval SplitAt(LifetimePosition pos)
void PrettyPrint(std::ostream &os) const
bool operator<(const UseInterval &other) const
bool Contains(LifetimePosition point) const
InstructionOperand * operand() const
UsePosition & operator=(const UsePosition &)=delete
UsePositionHintType hint_type() const
void set_assigned_register(int register_code)
UsePosition(const UsePosition &)=delete
Operand const operand_
uint8_t *const start_
Definition assembler.cc:131
JSRegExp::Flags flags_
const v8::base::TimeTicks end_
Definition sweeper.cc:54
int start
int end
ArrayReduceDirection direction
ZoneVector< RpoNumber > & result
LiftoffRegister reg
LiftoffAssembler::CacheState state
int position
Definition liveedit.cc:290
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
static const int32_t kUnassignedRegister
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
return value
Definition map-inl.h:893
constexpr int kMaxInt
Definition globals.h:374
ZoneUnorderedMap< int, BytecodeSequenceNode * > children_
#define NON_EXPORTED_BASE(code)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define V8_EXPORT_PRIVATE
Definition macros.h:460
bool operator()(const LiveRange *a, const LiveRange *b) const
bool operator()(const LiveRange *a, const LiveRange *b) const
bool operator()(const LiveRange *left, const LiveRange *right) const
const RegisterConfiguration * register_configuration_
bool operator()(const UsePosition *left, const UsePosition *right) const
#define V8_INLINE
Definition v8config.h:500
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_NOINLINE
Definition v8config.h:586
#define V8_PRESERVE_MOST
Definition v8config.h:598