v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
graph.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_GRAPH_H_
6#define V8_COMPILER_TURBOSHAFT_GRAPH_H_
7
8#include <algorithm>
9#include <iterator>
10#include <limits>
11#include <memory>
12#include <tuple>
13#include <type_traits>
14
15#include "src/base/iterator.h"
16#include "src/base/logging.h"
18#include "src/base/vector.h"
24
26
27template <class Reducers>
29
31
32// `OperationBuffer` is a growable, Zone-allocated buffer to store Turboshaft
33// operations. It is part of a `Graph`.
34// The buffer can be seen as an array of 8-byte `OperationStorageSlot` values.
35// The structure is append-only, that is, we only add operations at the end.
36// There are rare cases (i.e., loop phis) where we overwrite an existing
37// operation, but only if we can guarantee that the new operation is not bigger
38// than the operation we overwrite.
40 public:
41 // A `ReplaceScope` is to overwrite an existing operation.
42 // It moves the end-pointer temporarily so that the next emitted operation
43 // overwrites an old one.
45 public:
47 : buffer_(buffer),
48 replaced_(replaced),
49 old_end_(buffer->end_),
50 old_slot_count_(buffer->SlotCount(replaced)) {
51 buffer_->end_ = buffer_->Get(replaced);
52 }
56 // Preserve the original operation size in case it has become smaller.
59 static_cast<uint32_t>(old_slot_count_) *
61 .id() -
62 1] = old_slot_count_;
63 }
64
65 ReplaceScope(const ReplaceScope&) = delete;
67
68 private:
73 };
74
75 explicit OperationBuffer(Zone* zone, size_t initial_capacity) : zone_(zone) {
76 DCHECK_NE(initial_capacity, 0);
77 begin_ = end_ =
78 zone_->AllocateArray<OperationStorageSlot>(initial_capacity);
80 zone_->AllocateArray<uint16_t>((initial_capacity + 1) / kSlotsPerId);
81 end_cap_ = begin_ + initial_capacity;
82 }
83
84 OperationStorageSlot* Allocate(size_t slot_count) {
85 if (V8_UNLIKELY(static_cast<size_t>(end_cap_ - end_) < slot_count)) {
86 Grow(capacity() + slot_count);
87 DCHECK(slot_count <= static_cast<size_t>(end_cap_ - end_));
88 }
90 end_ += slot_count;
91 OpIndex idx = Index(result);
92 // Store the size in both for the first and last id corresponding to the new
93 // operation. This enables iteration in both directions. The two id's are
94 // the same if the operation is small.
95 operation_sizes_[idx.id()] = slot_count;
96 operation_sizes_[OpIndex(idx.offset() + static_cast<uint32_t>(slot_count) *
98 .id() -
99 1] = slot_count;
100 return result;
101 }
102
103 void RemoveLast() {
104 size_t slot_count = operation_sizes_[EndIndex().id() - 1];
105 end_ -= slot_count;
107 }
108
109 OpIndex Index(const Operation& op) const {
110 return Index(reinterpret_cast<const OperationStorageSlot*>(&op));
111 }
113 DCHECK(begin_ <= ptr && ptr <= end_);
114 return OpIndex(static_cast<uint32_t>(reinterpret_cast<Address>(ptr) -
115 reinterpret_cast<Address>(begin_)));
116 }
117
119 DCHECK_LT(idx.offset() / sizeof(OperationStorageSlot), size());
120 return reinterpret_cast<OperationStorageSlot*>(
121 reinterpret_cast<Address>(begin_) + idx.offset());
122 }
123 uint16_t SlotCount(OpIndex idx) {
124 DCHECK_LT(idx.offset() / sizeof(OperationStorageSlot), size());
125 return operation_sizes_[idx.id()];
126 }
127
128 const OperationStorageSlot* Get(OpIndex idx) const {
129 DCHECK_LT(idx.offset(), capacity() * sizeof(OperationStorageSlot));
130 return reinterpret_cast<const OperationStorageSlot*>(
131 reinterpret_cast<Address>(begin_) + idx.offset());
132 }
133
134 OpIndex Next(OpIndex idx) const {
135 DCHECK_GT(operation_sizes_[idx.id()], 0);
137 sizeof(OperationStorageSlot));
138 DCHECK_LT(0, result.offset());
139 DCHECK_LE(result.offset(), capacity() * sizeof(OperationStorageSlot));
140 return result;
141 }
143 DCHECK_GT(idx.id(), 0);
144 DCHECK_GT(operation_sizes_[idx.id() - 1], 0);
145 OpIndex result = OpIndex(idx.offset() - operation_sizes_[idx.id() - 1] *
146 sizeof(OperationStorageSlot));
147 DCHECK_LE(0, result.offset());
148 DCHECK_LT(result.offset(), capacity() * sizeof(OperationStorageSlot));
149 return result;
150 }
151
152 // Offset of the first operation.
153 OpIndex BeginIndex() const { return OpIndex(0); }
154 // One-past-the-end offset.
155 OpIndex EndIndex() const { return Index(end_); }
156
157 uint32_t size() const { return static_cast<uint32_t>(end_ - begin_); }
158 uint32_t capacity() const { return static_cast<uint32_t>(end_cap_ - begin_); }
159
160 void Grow(size_t min_capacity) {
161 size_t size = this->size();
162 size_t capacity = this->capacity();
163 size_t new_capacity = 2 * capacity;
164 while (new_capacity < min_capacity) new_capacity *= 2;
165 CHECK_LT(new_capacity, std::numeric_limits<uint32_t>::max() /
166 sizeof(OperationStorageSlot));
167
168 OperationStorageSlot* new_buffer =
170 memcpy(new_buffer, begin_, size * sizeof(OperationStorageSlot));
171
172 uint16_t* new_operation_sizes =
173 zone_->AllocateArray<uint16_t>(new_capacity / kSlotsPerId);
174 memcpy(new_operation_sizes, operation_sizes_,
175 size / kSlotsPerId * sizeof(uint16_t));
176
177 begin_ = new_buffer;
178 end_ = new_buffer + size;
179 end_cap_ = new_buffer + new_capacity;
180 operation_sizes_ = new_operation_sizes;
181 }
182
183 void Reset() { end_ = begin_; }
184
185 private:
191};
192
193template <class Derived>
195template <class Derived>
197
198template <class Derived>
200 // A class storing a forward representation of the dominator tree, since the
201 // regular dominator tree is represented as pointers from the children to
202 // parents rather than parents to children.
203 public:
204 void AddChild(Derived* next) {
205 DCHECK_EQ(static_cast<Derived*>(this)->len_ + 1, next->len_);
206 next->neighboring_child_ = last_child_;
207 last_child_ = next;
208 }
209
210 Derived* LastChild() const { return last_child_; }
211 Derived* NeighboringChild() const { return neighboring_child_; }
212 bool HasChildren() const { return last_child_ != nullptr; }
213
216 for (Derived* child = last_child_; child != nullptr;
217 child = child->neighboring_child_) {
218 result.push_back(child);
219 }
220 std::reverse(result.begin(), result.end());
221 return result;
222 }
223
224 private:
225#ifdef DEBUG
226 friend class RandomAccessStackDominatorNode<Derived>;
227#endif
228 Derived* neighboring_child_ = nullptr;
229 Derived* last_child_ = nullptr;
230};
231
232template <class Derived>
234 : public DominatorForwardTreeNode<Derived> {
235 // This class represents a node of a dominator tree implemented using Myers'
236 // Random-Access Stack (see
237 // https://publications.mpi-cbg.de/Myers_1983_6328.pdf). This datastructure
238 // enables searching for a predecessor of a node in log(h) time, where h is
239 // the height of the dominator tree.
240 public:
241 void SetDominator(Derived* dominator);
243 Derived* GetDominator() const { return nxt_; }
244
245 // Returns the lowest common dominator of {this} and {other}.
248
249 bool IsDominatedBy(const Derived* other) const {
250 // TODO(dmercadier): we don't have to call GetCommonDominator and could
251 // determine quicker that {this} isn't dominated by {other}.
252 return GetCommonDominator(other) == other;
253 }
254
255 int Depth() const { return len_; }
256
257 private:
258 friend class DominatorForwardTreeNode<Derived>;
259#ifdef DEBUG
260 friend class Block;
261#endif
262
263 // Myers' original datastructure requires to often check jmp_->len_, which is
264 // not so great on modern computers (memory access, caches & co). To speed up
265 // things a bit, we store here jmp_len_.
266 int jmp_len_ = 0;
267
268 int len_ = 0;
269 Derived* nxt_ = nullptr;
270 Derived* jmp_ = nullptr;
271};
272
273// A simple iterator to walk over the predecessors of a block. Note that the
274// iteration order is reversed.
276 public:
277 explicit PredecessorIterator(const Block* block) : current_(block) {}
278
280 constexpr bool operator==(const PredecessorIterator& other) const {
281 return current_ == other.current_;
282 }
283 constexpr bool operator!=(const PredecessorIterator& other) const {
284 return !(*this == other);
285 }
286
287 const Block* operator*() const { return current_; }
288
289 private:
291};
292
293// An iterable wrapper for the predecessors of a block.
295 public:
297
299 PredecessorIterator end() const { return PredecessorIterator(nullptr); }
300
301 private:
302 const Block* begin_;
303};
304
305// A basic block
307 public:
308 enum class Kind : uint8_t { kMerge, kLoopHeader, kBranchTarget };
309
310 explicit Block(Kind kind) : kind_(kind) {}
311
312 bool IsLoopOrMerge() const { return IsLoop() || IsMerge(); }
313 bool IsLoop() const { return kind_ == Kind::kLoopHeader; }
314 bool IsMerge() const { return kind_ == Kind::kMerge; }
315 bool IsBranchTarget() const { return kind_ == Kind::kBranchTarget; }
316
317 Kind kind() const { return kind_; }
319
320 BlockIndex index() const { return index_; }
321
322 bool Contains(OpIndex op_idx) const {
323 return begin_ <= op_idx && op_idx < end_;
324 }
325
326 bool IsBound() const { return index_ != BlockIndex::Invalid(); }
327
330 for (Block* pred = last_predecessor_; pred != nullptr;
331 pred = pred->neighboring_predecessor_) {
332 result.push_back(pred);
333 }
334 std::reverse(result.begin(), result.end());
335 return result;
336 }
337
338 // Returns an iterable object (defining begin() and end()) to iterate over the
339 // block's predecessors.
343
344 int PredecessorCount() const {
345#ifdef DEBUG
346 CheckPredecessorCount();
347#endif
348 return predecessor_count_;
349 }
350
351#ifdef DEBUG
352 // Checks that the {predecessor_count_} is equal to the number of predecessors
353 // reachable through {last_predecessor_}.
354 void CheckPredecessorCount() const {
355 int count = 0;
356 for (Block* pred = last_predecessor_; pred != nullptr;
357 pred = pred->neighboring_predecessor_) {
358 count++;
359 }
361 }
362#endif
363
364 static constexpr int kInvalidPredecessorIndex = -1;
365
366 // Returns the index of {target} in the predecessors of the current Block.
367 // If {target} is not a direct predecessor, returns -1.
368 int GetPredecessorIndex(const Block* target) const {
369 int pred_count = 0;
370 int pred_reverse_index = -1;
371 for (Block* pred = last_predecessor_; pred != nullptr;
372 pred = pred->neighboring_predecessor_) {
373 if (pred == target) {
374 DCHECK_EQ(pred_reverse_index, -1);
375 pred_reverse_index = pred_count;
376 }
377 pred_count++;
378 }
379 if (pred_reverse_index == -1) {
381 }
382 return pred_count - pred_reverse_index - 1;
383 }
384
387 bool HasPredecessors() const {
389 return last_predecessor_ != nullptr;
390 }
392 last_predecessor_ = nullptr;
394 }
396 Block* pred = last_predecessor_;
397 last_predecessor_ = nullptr;
398 while (pred->neighboring_predecessor_) {
399 Block* tmp = pred->neighboring_predecessor_;
400 pred->neighboring_predecessor_ = nullptr;
401 pred = tmp;
402 }
404 }
405
416
417 // The block from the previous graph which produced the current block. This
418 // has to be updated to be the last block that contributed operations to the
419 // current block to ensure that phi nodes are created correctly.
420 void SetOrigin(const Block* origin) {
421 DCHECK_IMPLIES(origin != nullptr,
422 origin->graph_generation_ + 1 == graph_generation_);
423 origin_ = origin;
424 }
425 // The block from the input graph that is equivalent as a predecessor. It is
426 // only available for bound blocks and it does *not* refer to an equivalent
427 // block as a branch destination.
428 const Block* OriginForBlockEnd() const {
429 DCHECK(IsBound());
430 return origin_;
431 }
432 const Block* OriginForLoopHeader() const {
433 DCHECK(IsLoop());
434 return origin_;
435 }
436
437 bool IsComplete() const { return end_.valid(); }
438 OpIndex begin() const {
440 return begin_;
441 }
442 OpIndex end() const {
443 DCHECK(end_.valid());
444 return end_;
445 }
446
447 // Returns an approximation of the number of operations contained in this
448 // block, by counting how many slots it contains. Depending on the size of the
449 // operations it contains, this could be exactly how many operations it
450 // contains, or it could be less.
451 int OpCountUpperBound() const { return end().id() - begin().id(); }
452
453 const Operation& FirstOperation(const Graph& graph) const;
454 const Operation& LastOperation(const Graph& graph) const;
455 Operation& LastOperation(Graph& graph) const;
456
457 bool EndsWithBranchingOp(const Graph& graph) const {
458 switch (LastOperation(graph).opcode) {
459 case Opcode::kBranch:
460 case Opcode::kSwitch:
461 case Opcode::kCheckException:
462 return true;
463 default:
464 DCHECK_LE(SuccessorBlocks(*this, graph).size(), 1);
465 return false;
466 }
467 }
468
469 bool HasPhis(const Graph& graph) const;
470
471 bool HasBackedge(const Graph& graph) const {
472 if (const GotoOp* gto = LastOperation(graph).TryCast<GotoOp>()) {
473 return gto->destination->index().id() <= index().id();
474 }
475 return false;
476 }
477
478#ifdef DEBUG
479 // {has_peeled_iteration_} is currently only updated for loops peeled in
480 // Turboshaft (it is true only for loop headers of loops that have had their
481 // first iteration peeled). So be aware that while Turbofan loop peeling is
482 // enabled, this is not a reliable way to check if a loop has a peeled
483 // iteration.
484 bool has_peeled_iteration() const {
485 DCHECK(IsLoop());
486 return has_peeled_iteration_;
487 }
488 void set_has_peeled_iteration() {
489 DCHECK(IsLoop());
490 has_peeled_iteration_ = true;
491 }
492#endif
493
494 // Computes the dominators of the this block, assuming that the dominators of
495 // its predecessors are already computed. Returns the depth of the current
496 // block in the dominator tree.
497 uint32_t ComputeDominator();
498
500 std::vector<const char*> tree_symbols = std::vector<const char*>(),
501 bool has_next = false) const;
502
503 enum class CustomDataKind {
504 kUnset, // No custom data has been set for this block.
507 };
508
509 void set_custom_data(uint32_t data, CustomDataKind kind_for_debug_check) {
511#ifdef DEBUG
512 custom_data_kind_for_debug_check_ = kind_for_debug_check;
513#endif
514 }
515
516 uint32_t get_custom_data(CustomDataKind kind_for_debug_check) const {
517 DCHECK_EQ(custom_data_kind_for_debug_check_, kind_for_debug_check);
518 return custom_data_;
519 }
520
522 custom_data_ = 0;
523#ifdef DEBUG
524 custom_data_kind_for_debug_check_ = CustomDataKind::kUnset;
525#endif
526 }
527
528 private:
529 // AddPredecessor should never be called directly except from Assembler's
530 // AddPredecessor and SplitEdge methods, which takes care of maintaining
531 // split-edge form.
532 void AddPredecessor(Block* predecessor) {
533 DCHECK(!IsBound() ||
534 (Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
535 DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
537 last_predecessor_ = predecessor;
539 }
540
541
549 uint32_t predecessor_count_ = 0;
550 const Block* origin_ = nullptr;
551 // The {custom_data_} field can be used by algorithms to temporarily store
552 // block-specific data. This field is not preserved when constructing a new
553 // output graph and algorithms cannot rely on this field being properly reset
554 // after previous uses.
555 uint32_t custom_data_ = 0;
556#ifdef DEBUG
557 CustomDataKind custom_data_kind_for_debug_check_ = CustomDataKind::kUnset;
558 size_t graph_generation_ = 0;
559 // True if this is a loop header of a loop with a peeled iteration.
560 bool has_peeled_iteration_ = false;
561#endif
562
563 friend class Graph;
564 template <class Reducers>
565 friend class Assembler;
566 template <class Assembler>
567 friend class GraphVisitor;
568};
569
570V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Block* b);
571
573 DCHECK_NE(current_, nullptr);
574 current_ = current_->NeighboringPredecessor();
575 return *this;
576}
577
578class Graph {
579 public:
580 // A big initial capacity prevents many growing steps. It also makes sense
581 // because the graph and its memory is recycled for following phases.
582 explicit Graph(Zone* graph_zone, size_t initial_capacity = 2048)
583 : operations_(graph_zone, initial_capacity),
585 all_blocks_(),
592#ifdef DEBUG
593 block_type_refinement_(graph_zone),
594#endif
596 }
597
598 // Reset the graph to recycle its memory.
599 void Reset() {
601 bound_blocks_.clear();
602 // No need to explicitly reset `all_blocks_`, since we will placement-new
603 // new blocks into it, reusing the already allocated backing storage.
604 next_block_ = 0;
605 op_to_block_.Reset();
606 block_permutation_.clear();
607 source_positions_.Reset();
608 operation_origins_.Reset();
609 operation_types_.Reset();
611#ifdef DEBUG
612 block_type_refinement_.Reset();
613 // Do not reset of graph_created_from_turbofan_ as it is propagated along
614 // the phases.
615#endif
616 }
617
619 DCHECK(i.valid());
620 DCHECK(BelongsToThisGraph(i));
621 // `Operation` contains const fields and can be overwritten with placement
622 // new. Therefore, std::launder is necessary to avoid undefined behavior.
623 const Operation* ptr =
624 std::launder(reinterpret_cast<const Operation*>(operations_.Get(i)));
625 // Detect invalid memory by checking if opcode is valid.
627 return *ptr;
628 }
630 DCHECK(i.valid());
631 DCHECK(BelongsToThisGraph(i));
632 // `Operation` contains const fields and can be overwritten with placement
633 // new. Therefore, std::launder is necessary to avoid undefined behavior.
634 Operation* ptr =
635 std::launder(reinterpret_cast<Operation*>(operations_.Get(i)));
636 // Detect invalid memory by checking if opcode is valid.
638 return *ptr;
639 }
640
642
643 Block& StartBlock() { return Get(BlockIndex(0)); }
644 const Block& StartBlock() const { return Get(BlockIndex(0)); }
645
647 DCHECK_LT(i.id(), bound_blocks_.size());
648 return *bound_blocks_[i.id()];
649 }
650 const Block& Get(BlockIndex i) const {
651 DCHECK_LT(i.id(), bound_blocks_.size());
652 return *bound_blocks_[i.id()];
653 }
654
655 OpIndex Index(const Operation& op) const {
657#ifdef DEBUG
658 result.set_generation_mod2(generation_mod2());
659#endif
660 return result;
661 }
664 if (block_permutation_.empty()) {
665 it = std::upper_bound(
666 bound_blocks_.begin(), bound_blocks_.end(), index,
667 [](OpIndex value, const Block* b) { return value < b->begin_; });
668 DCHECK_NE(it, bound_blocks_.begin());
669 } else {
670 it = std::upper_bound(
671 block_permutation_.begin(), block_permutation_.end(), index,
672 [](OpIndex value, const Block* b) { return value < b->begin_; });
673 DCHECK_NE(it, block_permutation_.begin());
674 }
675 it = std::prev(it);
676 DCHECK((*it)->Contains(index));
677 return (*it)->index();
678 }
679
680 void SetBlockOf(BlockIndex block, OpIndex op) { op_to_block_[op] = block; }
681
682 BlockIndex BlockIndexOf(OpIndex op) const { return op_to_block_[op]; }
683
685 return op_to_block_[Index(op)];
686 }
687
688 OpIndex NextIndex(const OpIndex idx) const {
689 OpIndex next = operations_.Next(idx);
690#ifdef DEBUG
691 next.set_generation_mod2(generation_mod2());
692#endif
693 return next;
694 }
695 OpIndex PreviousIndex(const OpIndex idx) const {
696 OpIndex prev = operations_.Previous(idx);
697#ifdef DEBUG
698 prev.set_generation_mod2(generation_mod2());
699#endif
700 return prev;
701 }
705
706 OperationStorageSlot* Allocate(size_t slot_count) {
707 return operations_.Allocate(slot_count);
708 }
709
710 void RemoveLast() {
713#ifdef DEBUG
714 if (v8_flags.turboshaft_trace_emitted) {
715 std::cout << "/!\\ Removed last emitted operation /!\\\n";
716 }
717#endif
718 }
719
720 template <class Op, class... Args>
721 V8_INLINE Op& Add(Args... args) {
722#ifdef DEBUG
724#endif // DEBUG
725 Op& op = Op::New(this, args...);
727
728 DCHECK_EQ(result, Index(op));
729#ifdef DEBUG
730 for (OpIndex input : op.inputs()) {
731 DCHECK_LT(input, result);
732 DCHECK(BelongsToThisGraph(input));
733 }
734
735 if (v8_flags.turboshaft_trace_emitted) {
736 std::cout << "Emitted: " << result << " => " << op << "\n";
737 }
738
739#endif // DEBUG
740
741 return op;
742 }
743
744 template <class Op, class... Args>
745 void Replace(OpIndex replaced, Args... args) {
746 static_assert((std::is_base_of<Operation, Op>::value));
747 static_assert(std::is_trivially_destructible<Op>::value);
748
749 const Operation& old_op = Get(replaced);
750 DecrementInputUses(old_op);
751 auto old_uses = old_op.saturated_use_count;
752 Op* new_op;
753 {
754 OperationBuffer::ReplaceScope replace_scope(&operations_, replaced);
755 new_op = &Op::New(this, args...);
756 }
757 if (!std::is_same_v<Op, DeadOp>) {
758 new_op->saturated_use_count = old_uses;
759 }
760 IncrementInputUses(*new_op);
761 }
762
763 V8_INLINE Block* NewLoopHeader(const Block* origin = nullptr) {
764 return NewBlock(Block::Kind::kLoopHeader, origin);
765 }
766 V8_INLINE Block* NewBlock(const Block* origin = nullptr) {
767 return NewBlock(Block::Kind::kMerge, origin);
768 }
769
770 V8_INLINE Block* NewBlock(Block::Kind kind, const Block* origin = nullptr) {
771 if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
773 }
775 new (result) Block(kind);
776#ifdef DEBUG
777 result->graph_generation_ = generation_;
778#endif
779 result->SetOrigin(origin);
780 return result;
781 }
782
783 V8_INLINE bool Add(Block* block) {
784 DCHECK_EQ(block->graph_generation_, generation_);
785 if (!bound_blocks_.empty() && !block->HasPredecessors()) return false;
786
787 DCHECK(!block->begin_.valid());
788 block->begin_ = next_operation_index();
789 DCHECK_EQ(block->index_, BlockIndex::Invalid());
790 block->index_ = next_block_index();
791 bound_blocks_.push_back(block);
792 uint32_t depth = block->ComputeDominator();
793 dominator_tree_depth_ = std::max<uint32_t>(dominator_tree_depth_, depth);
794
795#ifdef DEBUG
796 if (v8_flags.turboshaft_trace_emitted) {
797 std::cout << "\nBound: " << block->index() << " [predecessors: ";
798 auto preds = block->Predecessors();
799 if (preds.size() >= 1) std::cout << preds[0]->index();
800 for (size_t i = 1; i < preds.size(); i++) {
801 std::cout << ", " << preds[i]->index();
802 }
803 std::cout << "]\n";
804 }
805#endif
806
807 return true;
808 }
809
810 void Finalize(Block* block) {
811 DCHECK(!block->end_.valid());
812 block->end_ = next_operation_index();
813 // Upading mapping from Operations to Blocks for the Operations in {block}.
814 for (const Operation& op : operations(*block)) {
815 SetBlockOf(block->index(), Index(op));
816 }
817 }
818
820 DCHECK(loop->IsLoop());
821 DCHECK_EQ(loop->PredecessorCount(), 1);
823 for (Operation& op : operations(*loop)) {
824 if (auto* pending_phi = op.TryCast<PendingLoopPhiOp>()) {
825 Replace<PhiOp>(Index(*pending_phi),
826 base::VectorOf({pending_phi->first()}),
827 pending_phi->rep);
828 }
829 }
830 }
831
834 return BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
835 }
836
837 Block* last_block() { return bound_blocks_.back(); }
838
839 Zone* graph_zone() const { return graph_zone_; }
840 uint32_t block_count() const {
841 return static_cast<uint32_t>(bound_blocks_.size());
842 }
843 uint32_t op_id_count() const {
844 return (operations_.size() + (kSlotsPerId - 1)) / kSlotsPerId;
845 }
847 uint32_t number_of_operations = 0;
848 for ([[maybe_unused]] auto& op : AllOperations()) {
849 ++number_of_operations;
850 }
851 return number_of_operations;
852 }
853 uint32_t op_id_capacity() const {
855 }
856
859#ifdef DEBUG
860 begin.set_generation_mod2(generation_mod2());
861#endif
862 return begin;
863 }
866#ifdef DEBUG
867 end.set_generation_mod2(generation_mod2());
868#endif
869 return end;
870 }
871
873 : public base::iterator<std::bidirectional_iterator_tag, OpIndex,
874 std::ptrdiff_t, OpIndex*, OpIndex> {
875 public:
877
878 explicit OpIndexIterator(OpIndex index, const Graph* graph)
879 : index_(index), graph_(graph) {}
880 value_type operator*() const { return index_; }
882 index_ = graph_->NextIndex(index_);
883 return *this;
884 }
886 index_ = graph_->PreviousIndex(index_);
887 return *this;
888 }
889 bool operator!=(OpIndexIterator other) const {
890 DCHECK_EQ(graph_, other.graph_);
891 return index_ != other.index_;
892 }
893 bool operator==(OpIndexIterator other) const { return !(*this != other); }
894
895 private:
897 const Graph* const graph_;
898 };
899
900 template <class OperationT, typename GraphT>
902 : public base::iterator<std::bidirectional_iterator_tag, OperationT> {
903 public:
904 static_assert(std::is_same_v<std::remove_const_t<OperationT>, Operation> &&
905 std::is_same_v<std::remove_const_t<GraphT>, Graph>);
907
908 explicit OperationIterator(OpIndex index, GraphT* graph)
909 : index_(index), graph_(graph) {}
910 value_type& operator*() { return graph_->Get(index_); }
911 value_type* operator->() { return &graph_->Get(index_); }
913 DCHECK_NE(index_, graph_->EndIndex());
914 index_ = graph_->NextIndex(index_);
915 return *this;
916 }
918 DCHECK_NE(index_, graph_->BeginIndex());
919 index_ = graph_->PreviousIndex(index_);
920 return *this;
921 }
922 bool operator!=(OperationIterator other) const {
923 DCHECK_EQ(graph_, other.graph_);
924 return index_ != other.index_;
925 }
926 bool operator==(OperationIterator other) const { return !(*this != other); }
927
928 private:
930 GraphT* const graph_;
931 };
932
936
943
947
949 const Block& block) {
950 return operations(block.begin_, block.end_);
951 }
953 const Block& block) const {
954 return operations(block.begin_, block.end_);
955 }
956
958 const Block& block) const {
959 return OperationIndices(block.begin_, block.end_);
960 }
961
976
978 OpIndex end) const {
979 DCHECK(begin.valid());
980 DCHECK(end.valid());
981 return {OpIndexIterator(begin, this), OpIndexIterator(end, this)};
982 }
983
995
996 bool IsLoopBackedge(const GotoOp& op) const {
998 return op.destination->begin() <= Index(op);
999 }
1000
1001 bool IsValid(OpIndex i) const { return i < next_operation_index(); }
1002
1009
1016
1017 uint32_t DominatorTreeDepth() const { return dominator_tree_depth_; }
1018
1023#ifdef DEBUG
1024 // Store refined types per block here for --trace-turbo printing.
1025 // TODO(nicohartmann@): Remove this once we have a proper way to print
1026 // type information inside the reducers.
1027 using TypeRefinements = std::vector<std::pair<OpIndex, Type>>;
1028 const GrowingBlockSidetable<TypeRefinements>& block_type_refinement() const {
1029 return block_type_refinement_;
1030 }
1031 GrowingBlockSidetable<TypeRefinements>& block_type_refinement() {
1032 return block_type_refinement_;
1033 }
1034#endif // DEBUG
1035
1037 DCHECK_EQ(permutation.size(), bound_blocks_.size());
1038 block_permutation_.resize(bound_blocks_.size());
1040
1041 for (size_t i = 0; i < permutation.size(); ++i) {
1042 DCHECK_LE(0, permutation[i]);
1043 DCHECK_LT(permutation[i], block_permutation_.size());
1044 bound_blocks_[i] = block_permutation_[permutation[i]];
1045 bound_blocks_[i]->index_ = BlockIndex(static_cast<uint32_t>(i));
1046 }
1047 }
1048
1050 if (!companion_) {
1052#ifdef DEBUG
1053 companion_->generation_ = generation_ + 1;
1054 if (IsCreatedFromTurbofan()) companion_->SetCreatedFromTurbofan();
1055#endif // DEBUG
1056 }
1057 return *companion_;
1058 }
1059
1060 // Swap the graph with its companion graph to turn the output of one phase
1061 // into the input of the next phase.
1063 Graph& companion = GetOrCreateCompanion();
1064 std::swap(operations_, companion.operations_);
1065 std::swap(bound_blocks_, companion.bound_blocks_);
1066 std::swap(all_blocks_, companion.all_blocks_);
1067 std::swap(next_block_, companion.next_block_);
1068 std::swap(block_permutation_, companion.block_permutation_);
1069 std::swap(graph_zone_, companion.graph_zone_);
1070 op_to_block_.SwapData(companion.op_to_block_);
1071 source_positions_.SwapData(companion.source_positions_);
1072 operation_origins_.SwapData(companion.operation_origins_);
1073 operation_types_.SwapData(companion.operation_types_);
1074#ifdef DEBUG
1075 std::swap(block_type_refinement_, companion.block_type_refinement_);
1076 // Update generation index.
1077 DCHECK_EQ(generation_ + 1, companion.generation_);
1078 generation_ = companion.generation_++;
1079#endif // DEBUG
1080 // Reseting phase-specific fields.
1081 loop_unrolling_analyzer_ = nullptr;
1083 }
1084
1085#ifdef DEBUG
1086 size_t generation() const { return generation_; }
1087 int generation_mod2() const { return generation_ % 2; }
1088
1089 bool BelongsToThisGraph(OpIndex idx) const {
1090 return idx.generation_mod2() == generation_mod2();
1091 }
1092
1093 void SetCreatedFromTurbofan() { graph_created_from_turbofan_ = true; }
1094 bool IsCreatedFromTurbofan() const { return graph_created_from_turbofan_; }
1095#endif // DEBUG
1096
1107#ifdef DEBUG
1108 bool has_loop_unrolling_analyzer() const {
1109 return loop_unrolling_analyzer_ != nullptr;
1110 }
1111#endif
1112
1120
1121 private:
1122 bool InputsValid(const Operation& op) const {
1123 for (OpIndex i : op.inputs()) {
1124 if (!IsValid(i)) return false;
1125 }
1126 return true;
1127 }
1128
1129 template <class Op>
1130 void IncrementInputUses(const Op& op) {
1131 for (OpIndex input : op.inputs()) {
1132 // Tuples should never be used as input, except in other tuples (which is
1133 // used for instance in Int64Lowering::LowerCall).
1134 DCHECK_IMPLIES(Get(input).Is<TupleOp>(), op.template Is<TupleOp>());
1135 Get(input).saturated_use_count.Incr();
1136 }
1137 }
1138
1139 template <class Op>
1140 void DecrementInputUses(const Op& op) {
1141 for (OpIndex input : op.inputs()) {
1142 // Tuples should never be used as input, except in other tuples (which is
1143 // used for instance in Int64Lowering::LowerCall).
1144 DCHECK_IMPLIES(Get(input).Is<TupleOp>(), op.template Is<TupleOp>());
1145 Get(input).saturated_use_count.Decr();
1146 }
1147 }
1148
1149 // Allocates pointer-stable storage for new blocks, and pushes the pointers
1150 // to that storage to `bound_blocks_`. Initialization of the blocks is defered
1151 // to when they are actually constructed in `NewBlocks`.
1153 constexpr size_t kMinCapacity = 32;
1154 size_t next_capacity = std::max(kMinCapacity, all_blocks_.size() * 2);
1155 size_t new_block_count = next_capacity - all_blocks_.size();
1156 DCHECK_GT(new_block_count, 0);
1157 base::Vector<Block> block_storage =
1158 graph_zone_->AllocateVector<Block>(new_block_count);
1159 base::Vector<Block*> new_all_blocks =
1160 graph_zone_->AllocateVector<Block*>(next_capacity);
1161 DCHECK_EQ(new_all_blocks.size(), all_blocks_.size() + new_block_count);
1162 std::copy(all_blocks_.begin(), all_blocks_.end(), new_all_blocks.begin());
1163 Block** insert_begin = new_all_blocks.begin() + all_blocks_.size();
1164 DCHECK_EQ(insert_begin + new_block_count, new_all_blocks.end());
1165 for (size_t i = 0; i < new_block_count; ++i) {
1166 insert_begin[i] = &block_storage[i];
1167 }
1168 base::Vector<Block*> old_all_blocks = all_blocks_;
1169 all_blocks_ = new_all_blocks;
1170 if (!old_all_blocks.empty()) {
1171 graph_zone_->DeleteArray(old_all_blocks.data(), old_all_blocks.length());
1172 }
1173
1174 // Eventually most new blocks will be bound anyway, so pre-allocate as well.
1175 DCHECK_LE(bound_blocks_.size(), all_blocks_.size());
1176 bound_blocks_.reserve(all_blocks_.size());
1177 }
1178
1181 // The next two fields essentially form a `ZoneVector` but with pointer
1182 // stability for the `Block` elements. That is, `all_blocks_` contains
1183 // pointers to (potentially non-contiguous) Zone-allocated `Block`s.
1184 // Each pointer in `all_blocks_` points to already allocated space, but they
1185 // are only properly value-initialized up to index `next_block_`.
1187 size_t next_block_ = 0;
1189 // When `ReorderBlocks` is called, `block_permutation_` contains the original
1190 // order of blocks in order to provide a proper OpIndex->Block mapping for
1191 // `BlockOf`. In non-reordered graphs, this vector is empty.
1198#ifdef DEBUG
1199 GrowingBlockSidetable<TypeRefinements> block_type_refinement_;
1200 bool graph_created_from_turbofan_ = false;
1201#endif
1202
1203 Graph* companion_ = nullptr;
1204#ifdef DEBUG
1205 size_t generation_ = 1;
1206#endif // DEBUG
1207
1208 // Phase specific data.
1209 // For some reducers/phases, we use the graph to pass data around. These data
1210 // should always be invalidated at the end of the graph copy.
1211
1213
1214 // {stack_checks_to_remove_} contains the BlockIndex of loop headers whose
1215 // stack checks should be removed.
1216 // TODO(dmercadier): using the Zone for a resizable structure is not great
1217 // (because it tends to waste memory), but using a newed/malloced structure in
1218 // the Graph means that we have to remember to delete/free it, which isn't
1219 // convenient, because Zone memory typically isn't manually deleted (and the
1220 // Graph thus isn't). Still, it's probably not a big deal, because
1221 // {stack_checks_to_remove_} should never contain more than a handful of
1222 // items, and thus shouldn't waste too much memory.
1224};
1225
1227 size_t slot_count) {
1228 return graph->Allocate(slot_count);
1229}
1230
1231V8_INLINE const Operation& Get(const Graph& graph, OpIndex index) {
1232 return graph.Get(index);
1233}
1234
1236 DCHECK_EQ(graph_generation_, graph.generation());
1237 DCHECK(begin_.valid());
1238 DCHECK(end_.valid());
1239 return graph.Get(begin_);
1240}
1241
1242V8_INLINE const Operation& Block::LastOperation(const Graph& graph) const {
1243 DCHECK_EQ(graph_generation_, graph.generation());
1244 return graph.Get(graph.PreviousIndex(end()));
1245}
1246
1248 DCHECK_EQ(graph_generation_, graph.generation());
1249 return graph.Get(graph.PreviousIndex(end()));
1250}
1251
1252V8_INLINE bool Block::HasPhis(const Graph& graph) const {
1253 // TODO(dmercadier): consider re-introducing the invariant that Phis are
1254 // always at the begining of a block to speed up such functions. Currently,
1255 // in practice, Phis do not appear after the first non-FrameState non-Constant
1256 // operation, but this is not enforced.
1257 DCHECK_EQ(graph_generation_, graph.generation());
1258 for (const auto& op : graph.operations(*this)) {
1259 if (op.Is<PhiOp>()) return true;
1260 }
1261 return false;
1262}
1263
1265 const Block& block;
1267
1268 explicit PrintAsBlockHeader(const Block& block)
1269 : block(block), block_id(block.index()) {}
1271 : block(block), block_id(block_id) {}
1272};
1273V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
1274 PrintAsBlockHeader block);
1275V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
1276 const Graph& graph);
1277V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
1278 const Block::Kind& kind);
1279
1280inline uint32_t Block::ComputeDominator() {
1281 if (V8_UNLIKELY(LastPredecessor() == nullptr)) {
1282 // If the block has no predecessors, then it's the start block. We create a
1283 // jmp_ edge to itself, so that the SetDominator algorithm does not need a
1284 // special case for when the start block is reached.
1286 } else {
1287 // If the block has one or more predecessors, the dominator is the lowest
1288 // common ancestor (LCA) of all of the predecessors.
1289
1290 // Note that for BranchTarget, there is a single predecessor. This doesn't
1291 // change the logic: the loop won't be entered, and the first (and only)
1292 // predecessor is set as the dominator.
1293 // Similarly, since we compute dominators on the fly, when we reach a
1294 // kLoopHeader, we haven't visited its body yet, and it should only have one
1295 // predecessor (the backedge is not here yet), which is its dominator.
1297
1298 Block* dominator = LastPredecessor();
1299 for (Block* pred = dominator->NeighboringPredecessor(); pred != nullptr;
1300 pred = pred->NeighboringPredecessor()) {
1301 dominator = dominator->GetCommonDominator(pred);
1302 }
1303 SetDominator(dominator);
1304 }
1305 DCHECK_NE(jmp_, nullptr);
1306 DCHECK_IMPLIES(nxt_ == nullptr, LastPredecessor() == nullptr);
1307 DCHECK_IMPLIES(len_ == 0, LastPredecessor() == nullptr);
1308 return Depth();
1309}
1310
1311template <class Derived>
1313 jmp_ = static_cast<Derived*>(this);
1314 nxt_ = nullptr;
1315 len_ = 0;
1316 jmp_len_ = 0;
1317}
1318
1319template <class Derived>
1321 Derived* dominator) {
1322 DCHECK_NOT_NULL(dominator);
1323 DCHECK_NULL(static_cast<Block*>(this)->neighboring_child_);
1324 DCHECK_NULL(static_cast<Block*>(this)->last_child_);
1325 // Determining the jmp pointer
1326 Derived* t = dominator->jmp_;
1327 if (dominator->len_ - t->len_ == t->len_ - t->jmp_len_) {
1328 t = t->jmp_;
1329 } else {
1330 t = dominator;
1331 }
1332 // Initializing fields
1333 nxt_ = dominator;
1334 jmp_ = t;
1335 len_ = dominator->len_ + 1;
1336 jmp_len_ = jmp_->len_;
1337 dominator->AddChild(static_cast<Derived*>(this));
1338}
1339
1340template <class Derived>
1343 const RandomAccessStackDominatorNode* a = this;
1344 const RandomAccessStackDominatorNode* b = other;
1345 if (b->len_ > a->len_) {
1346 // Swapping |a| and |b| so that |a| always has a greater length.
1347 std::swap(a, b);
1348 }
1349 DCHECK_GE(a->len_, 0);
1350 DCHECK_GE(b->len_, 0);
1351
1352 // Going up the dominators of |a| in order to reach the level of |b|.
1353 while (a->len_ != b->len_) {
1354 DCHECK_GE(a->len_, 0);
1355 if (a->jmp_len_ >= b->len_) {
1356 a = a->jmp_;
1357 } else {
1358 a = a->nxt_;
1359 }
1360 }
1361
1362 // Going up the dominators of |a| and |b| simultaneously until |a| == |b|
1363 while (a != b) {
1364 DCHECK_EQ(a->len_, b->len_);
1365 DCHECK_GE(a->len_, 0);
1366 if (a->jmp_ == b->jmp_) {
1367 // We found a common dominator, but we actually want to find the smallest
1368 // one, so we go down in the current subtree.
1369 a = a->nxt_;
1370 b = b->nxt_;
1371 } else {
1372 a = a->jmp_;
1373 b = b->jmp_;
1374 }
1375 }
1376
1377 return static_cast<Derived*>(
1379}
1380
1381} // namespace v8::internal::compiler::turboshaft
1382
1383// MSVC needs this definition to know how to deal with the PredecessorIterator.
1384template <>
1385class std::iterator_traits<
1386 v8::internal::compiler::turboshaft::PredecessorIterator> {
1387 public:
1388 using iterator_category = std::forward_iterator_tag;
1389};
1390
1391#endif // V8_COMPILER_TURBOSHAFT_GRAPH_H_
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
int length() const
Definition vector.h:64
constexpr bool empty() const
Definition vector.h:73
constexpr size_t size() const
Definition vector.h:70
constexpr T * data() const
Definition vector.h:100
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
base::Vector< T > AllocateVector(size_t length)
Definition zone.h:136
void DeleteArray(T *pointer, size_t length)
Definition zone.h:171
static constexpr BlockIndex Invalid()
Definition index.h:872
static constexpr int kInvalidPredecessorIndex
Definition graph.h:364
const Block * OriginForBlockEnd() const
Definition graph.h:428
void set_custom_data(uint32_t data, CustomDataKind kind_for_debug_check)
Definition graph.h:509
base::SmallVector< Block *, 8 > Predecessors() const
Definition graph.h:328
bool Contains(OpIndex op_idx) const
Definition graph.h:322
const Operation & FirstOperation(const Graph &graph) const
Definition graph.h:1235
bool HasBackedge(const Graph &graph) const
Definition graph.h:471
bool HasPhis(const Graph &graph) const
Definition graph.h:1252
const Operation & LastOperation(const Graph &graph) const
Definition graph.h:1242
void SetOrigin(const Block *origin)
Definition graph.h:420
bool EndsWithBranchingOp(const Graph &graph) const
Definition graph.h:457
NeighboringPredecessorIterable PredecessorsIterable() const
Definition graph.h:340
void PrintDominatorTree(std::vector< const char * > tree_symbols=std::vector< const char * >(), bool has_next=false) const
Definition graph.cc:36
const Block * OriginForLoopHeader() const
Definition graph.h:432
uint32_t get_custom_data(CustomDataKind kind_for_debug_check) const
Definition graph.h:516
int GetPredecessorIndex(const Block *target) const
Definition graph.h:368
void SetSingleLoopPredecessor(Block *single_loop_predecessor)
Definition graph.h:410
void AddPredecessor(Block *predecessor)
Definition graph.h:532
base::SmallVector< Derived *, 8 > Children() const
Definition graph.h:214
bool operator==(OpIndexIterator other) const
Definition graph.h:893
bool operator!=(OpIndexIterator other) const
Definition graph.h:889
OpIndexIterator(OpIndex index, const Graph *graph)
Definition graph.h:878
bool operator==(OperationIterator other) const
Definition graph.h:926
bool operator!=(OperationIterator other) const
Definition graph.h:922
Graph(Zone *graph_zone, size_t initial_capacity=2048)
Definition graph.h:582
base::iterator_range< ConstOperationIterator > operations(const Block &block) const
Definition graph.h:952
GrowingOpIndexSidetable< OpIndex > operation_origins_
Definition graph.h:1195
const Block & Get(BlockIndex i) const
Definition graph.h:650
V8_INLINE Operation & Get(OpIndex i)
Definition graph.h:629
void ReorderBlocks(base::Vector< uint32_t > permutation)
Definition graph.h:1036
OpIndex PreviousIndex(const OpIndex idx) const
Definition graph.h:695
BlockIndex BlockOf(OpIndex index) const
Definition graph.h:662
GrowingOpIndexSidetable< SourcePosition > source_positions_
Definition graph.h:1194
GrowingOpIndexSidetable< Type > & operation_types()
Definition graph.h:1022
const GrowingOpIndexSidetable< SourcePosition > & source_positions() const
Definition graph.h:1003
base::iterator_range< OpIndexIterator > OperationIndices(OpIndex begin, OpIndex end) const
Definition graph.h:977
base::iterator_range< ConstOperationIterator > operations(OpIndex begin, OpIndex end) const
Definition graph.h:962
ZoneAbslFlatHashSet< uint32_t > stack_checks_to_remove_
Definition graph.h:1223
V8_INLINE Block * NewBlock(Block::Kind kind, const Block *origin=nullptr)
Definition graph.h:770
BlockIndex BlockIndexOf(OpIndex op) const
Definition graph.h:682
base::iterator_range< MutableOperationIterator > AllOperations()
Definition graph.h:937
LoopUnrollingAnalyzer * loop_unrolling_analyzer_
Definition graph.h:1212
ZoneVector< Block * > bound_blocks_
Definition graph.h:1180
void Replace(OpIndex replaced, Args... args)
Definition graph.h:745
ZoneAbslFlatHashSet< uint32_t > & stack_checks_to_remove()
Definition graph.h:1114
V8_INLINE Block * NewLoopHeader(const Block *origin=nullptr)
Definition graph.h:763
base::iterator_range< base::DerefPtrIterator< const Block > > blocks() const
Definition graph.h:989
BlockIndex BlockIndexOf(const Operation &op) const
Definition graph.h:684
base::iterator_range< MutableOperationIterator > operations(OpIndex begin, OpIndex end)
Definition graph.h:969
V8_INLINE Block * NewBlock(const Block *origin=nullptr)
Definition graph.h:766
const Block & StartBlock() const
Definition graph.h:644
OperationStorageSlot * Allocate(size_t slot_count)
Definition graph.h:706
base::iterator_range< MutableOperationIterator > operations(const Block &block)
Definition graph.h:948
OpIndex NextIndex(const OpIndex idx) const
Definition graph.h:688
const ZoneVector< Block * > & blocks_vector() const
Definition graph.h:994
V8_NOINLINE V8_PRESERVE_MOST void AllocateNewBlocks()
Definition graph.h:1152
base::iterator_range< OpIndexIterator > AllOperationIndices() const
Definition graph.h:944
V8_INLINE Op & Add(Args... args)
Definition graph.h:721
base::iterator_range< OpIndexIterator > OperationIndices(const Block &block) const
Definition graph.h:957
base::Vector< Block * > all_blocks_
Definition graph.h:1186
const ZoneAbslFlatHashSet< uint32_t > & stack_checks_to_remove() const
Definition graph.h:1117
OperationIterator< Operation, Graph > MutableOperationIterator
Definition graph.h:933
LoopUnrollingAnalyzer * loop_unrolling_analyzer() const
Definition graph.h:1103
GrowingOpIndexSidetable< Type > operation_types_
Definition graph.h:1197
base::iterator_range< base::DerefPtrIterator< Block > > blocks()
Definition graph.h:984
base::iterator_range< ConstOperationIterator > AllOperations() const
Definition graph.h:940
GrowingOpIndexSidetable< BlockIndex > op_to_block_
Definition graph.h:1188
OpIndex Index(const Operation &op) const
Definition graph.h:655
GrowingOpIndexSidetable< OpIndex > & operation_origins()
Definition graph.h:1013
void SetBlockOf(BlockIndex block, OpIndex op)
Definition graph.h:680
bool InputsValid(const Operation &op) const
Definition graph.h:1122
const GrowingOpIndexSidetable< OpIndex > & operation_origins() const
Definition graph.h:1010
bool IsLoopBackedge(const GotoOp &op) const
Definition graph.h:996
V8_INLINE bool Add(Block *block)
Definition graph.h:783
ZoneVector< Block * > block_permutation_
Definition graph.h:1192
void set_loop_unrolling_analyzer(LoopUnrollingAnalyzer *loop_unrolling_analyzer)
Definition graph.h:1097
const GrowingOpIndexSidetable< Type > & operation_types() const
Definition graph.h:1019
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
OperationIterator< const Operation, const Graph > ConstOperationIterator
Definition graph.h:934
GrowingOpIndexSidetable< SourcePosition > & source_positions()
Definition graph.h:1006
uint32_t NumberOfOperationsForDebugging() const
Definition graph.h:846
static constexpr OpIndex Invalid()
Definition index.h:88
constexpr uint32_t id() const
Definition index.h:61
ReplaceScope(OperationBuffer *buffer, OpIndex replaced)
Definition graph.h:46
ReplaceScope & operator=(const ReplaceScope &)=delete
OperationBuffer(Zone *zone, size_t initial_capacity)
Definition graph.h:75
OperationStorageSlot * Get(OpIndex idx)
Definition graph.h:118
OpIndex Index(const OperationStorageSlot *ptr) const
Definition graph.h:112
const OperationStorageSlot * Get(OpIndex idx) const
Definition graph.h:128
OperationStorageSlot * Allocate(size_t slot_count)
Definition graph.h:84
OpIndex Index(const Operation &op) const
Definition graph.h:109
constexpr bool operator==(const PredecessorIterator &other) const
Definition graph.h:280
constexpr bool operator!=(const PredecessorIterator &other) const
Definition graph.h:283
Derived * GetCommonDominator(RandomAccessStackDominatorNode< Derived > *other) const
Definition graph.h:1341
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
std::optional< TNode< JSArray > > a
RpoNumber block
ZoneVector< RpoNumber > & result
Register tmp
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
std::ostream & operator<<(std::ostream &os, PaddingSpace padding)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
V8_INLINE OperationStorageSlot * AllocateOpStorage(Graph *graph, size_t slot_count)
Definition graph.h:1226
constexpr size_t kSlotsPerId
Definition index.h:31
constexpr std::underlying_type_t< Opcode > OpcodeIndex(Opcode x)
Definition operations.h:373
constexpr uint16_t kNumberOfOpcodes
Definition operations.h:412
base::SmallVector< Block *, 4 > SuccessorBlocks(const Block &block, const Graph &graph)
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_LT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
base::Vector< const OpIndex > inputs() const
PrintAsBlockHeader(const Block &block, BlockIndex block_id)
Definition graph.h:1270
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NOINLINE
Definition v8config.h:586
#define V8_PRESERVE_MOST
Definition v8config.h:598