v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
scheduler.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <iomanip>
8#include <optional>
9
10#include "src/base/iterator.h"
17#include "src/compiler/node.h"
21
22namespace v8 {
23namespace internal {
24namespace compiler {
25
26#define TRACE(...) \
27 do { \
28 if (v8_flags.trace_turbo_scheduler) PrintF(__VA_ARGS__); \
29 } while (false)
30
32 Flags flags, size_t node_count_hint,
33 TickCounter* tick_counter,
34 const ProfileDataFromFile* profile_data)
35 : zone_(zone),
36 graph_(graph),
38 flags_(flags),
39 scheduled_nodes_(zone),
40 schedule_root_nodes_(zone),
41 schedule_queue_(zone),
42 node_data_(zone),
43 tick_counter_(tick_counter),
44 profile_data_(profile_data),
45 common_dominator_cache_(zone) {
46 node_data_.reserve(node_count_hint);
47 node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
48}
49
51 TickCounter* tick_counter,
52 const ProfileDataFromFile* profile_data) {
53 Zone* schedule_zone =
54 (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
55
56 // Reserve 10% more space for nodes if node splitting is enabled to try to
57 // avoid resizing the vector since that would triple its zone memory usage.
58 float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
59 size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
60
62 schedule_zone->New<Schedule>(schedule_zone, node_count_hint);
63 Scheduler scheduler(zone, graph, schedule, flags, node_count_hint,
64 tick_counter, profile_data);
65
66 scheduler.BuildCFG();
67 scheduler.ComputeSpecialRPONumbering();
68 scheduler.GenerateDominatorTree();
69
70 scheduler.PrepareUses();
71 scheduler.ScheduleEarly();
72 scheduler.ScheduleLate();
73
74 scheduler.SealFinalSchedule();
75
76 return schedule;
77}
78
83
84
86 return &node_data_[node->id()];
87}
88
90 SchedulerData* data = GetData(node);
91 if (data->placement_ == kFixed) {
92 // Nothing to do for control nodes that have been already fixed in
93 // the schedule.
94 return data->placement_;
95 }
96 DCHECK_EQ(kUnknown, data->placement_);
97 switch (node->opcode()) {
98 case IrOpcode::kParameter:
99 case IrOpcode::kOsrValue:
100 // Parameters and OSR values are always fixed to the start block.
101 data->placement_ = kFixed;
102 break;
103 case IrOpcode::kPhi:
104 case IrOpcode::kEffectPhi: {
105 // Phis and effect phis are fixed if their control inputs are, whereas
106 // otherwise they are coupled to a floating control node.
108 data->placement_ = (p == kFixed ? kFixed : kCoupled);
109 break;
110 }
111 default:
112 // Control nodes that were not control-reachable from end may float.
113 data->placement_ = kSchedulable;
114 break;
115 }
116 return data->placement_;
117}
118
122
123bool Scheduler::IsLive(Node* node) { return GetPlacement(node) != kUnknown; }
124
126 SchedulerData* data = GetData(node);
127 if (data->placement_ == kUnknown) {
128 // We only update control nodes from {kUnknown} to {kFixed}. Ideally, we
129 // should check that {node} is a control node (including exceptional calls),
130 // but that is expensive.
131 DCHECK_EQ(Scheduler::kFixed, placement);
132 data->placement_ = placement;
133 return;
134 }
135
136 switch (node->opcode()) {
137 case IrOpcode::kParameter:
138 // Parameters are fixed once and for all.
139 UNREACHABLE();
140 case IrOpcode::kPhi:
141 case IrOpcode::kEffectPhi: {
142 // Phis and effect phis are coupled to their respective blocks.
143 DCHECK_EQ(Scheduler::kCoupled, data->placement_);
144 DCHECK_EQ(Scheduler::kFixed, placement);
145 Node* control = NodeProperties::GetControlInput(node);
146 BasicBlock* block = schedule_->block(control);
147 schedule_->AddNode(block, node);
148 break;
149 }
150#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
152#undef DEFINE_CONTROL_CASE
153 {
154 // Control nodes force coupled uses to be placed.
155 for (auto use : node->uses()) {
156 if (GetPlacement(use) == Scheduler::kCoupled) {
158 UpdatePlacement(use, placement);
159 }
160 }
161 break;
162 }
163 default:
164 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
166 break;
167 }
168 // Reduce the use count of the node's inputs to potentially make them
169 // schedulable. If all the uses of a node have been scheduled, then the node
170 // itself can be scheduled.
171 std::optional<int> coupled_control_edge = GetCoupledControlEdge(node);
172 for (Edge const edge : node->input_edges()) {
173 DCHECK_EQ(node, edge.from());
174 if (edge.index() != coupled_control_edge) {
175 DecrementUnscheduledUseCount(edge.to(), node);
176 }
177 }
178 data->placement_ = placement;
179}
180
181std::optional<int> Scheduler::GetCoupledControlEdge(Node* node) {
182 if (GetPlacement(node) == kCoupled) {
184 }
185 return {};
186}
187
189 // Tracking use counts for fixed nodes is useless.
190 if (GetPlacement(node) == kFixed) return;
191
192 // Use count for coupled nodes is summed up on their control.
193 if (GetPlacement(node) == kCoupled) {
197 }
198
199 ++(GetData(node)->unscheduled_count_);
200 if (v8_flags.trace_turbo_scheduler) {
201 TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
202 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
203 GetData(node)->unscheduled_count_);
204 }
205}
206
208 // Tracking use counts for fixed nodes is useless.
209 if (GetPlacement(node) == kFixed) return;
210
211 // Use count for coupled nodes is summed up on their control.
212 if (GetPlacement(node) == kCoupled) {
216 }
217
218 DCHECK_LT(0, GetData(node)->unscheduled_count_);
219 --(GetData(node)->unscheduled_count_);
220 if (v8_flags.trace_turbo_scheduler) {
221 TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
222 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
223 GetData(node)->unscheduled_count_);
224 }
225 if (GetData(node)->unscheduled_count_ == 0) {
226 TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
227 schedule_queue_.push(node);
228 }
229}
230
231// -----------------------------------------------------------------------------
232// Phase 1: Build control-flow graph.
233
234
235// Internal class to build a control flow graph (i.e the basic blocks and edges
236// between them within a Schedule) from the node graph. Visits control edges of
237// the graph backwards from an end node in order to find the connected control
238// subgraph, needed for scheduling.
239class CFGBuilder : public ZoneObject {
240 public:
241 CFGBuilder(Zone* zone, Scheduler* scheduler)
242 : zone_(zone),
243 scheduler_(scheduler),
244 schedule_(scheduler->schedule_),
245 queued_(scheduler->graph_, 2),
246 queue_(zone),
247 control_(zone),
248 component_entry_(nullptr),
249 component_start_(nullptr),
250 component_end_(nullptr) {}
251
252 // Run the control flow graph construction algorithm by walking the graph
253 // backwards from end through control edges, building and connecting the
254 // basic blocks for control nodes.
255 void Run() {
258
259 while (!queue_.empty()) { // Breadth-first backwards traversal.
261 Node* node = queue_.front();
262 queue_.pop();
263 int max = NodeProperties::PastControlIndex(node);
264 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
265 Queue(node->InputAt(i));
266 }
267 }
268
269 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
270 ConnectBlocks(*i); // Connect block to its predecessor/successors.
271 }
272 }
273
274 // Run the control flow graph construction for a minimal control-connected
275 // component ending in {exit} and merge that component into an existing
276 // control flow graph at the bottom of {block}.
277 void Run(BasicBlock* block, Node* exit) {
279 Queue(exit);
280
281 component_entry_ = nullptr;
285 while (!queue_.empty()) { // Breadth-first backwards traversal.
287 Node* node = queue_.front();
288 queue_.pop();
289
290 // Use control dependence equivalence to find a canonical single-entry
291 // single-exit region that makes up a minimal component to be scheduled.
292 if (IsSingleEntrySingleExitRegion(node, exit)) {
293 TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
296 continue;
297 }
298
299 int max = NodeProperties::PastControlIndex(node);
300 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
301 Queue(node->InputAt(i));
302 }
303 }
305
306 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
307 ConnectBlocks(*i); // Connect block to its predecessor/successors.
308 }
309 }
310
311 private:
313 friend class Scheduler;
314
315 void FixNode(BasicBlock* block, Node* node) {
316 schedule_->AddNode(block, node);
318 }
319
320 void Queue(Node* node) {
321 // Mark the connected control nodes as they are queued.
322 if (!queued_.Get(node)) {
323 BuildBlocks(node);
324 queue_.push(node);
325 queued_.Set(node, true);
326 control_.push_back(node);
327 }
328 }
329
330 void BuildBlocks(Node* node) {
331 switch (node->opcode()) {
332 case IrOpcode::kEnd:
333 FixNode(schedule_->end(), node);
334 break;
335 case IrOpcode::kStart:
336 FixNode(schedule_->start(), node);
337 break;
338 case IrOpcode::kLoop:
339 case IrOpcode::kMerge:
340 BuildBlockForNode(node);
341 break;
342 case IrOpcode::kTerminate: {
343 // Put Terminate in the loop to which it refers.
345 BasicBlock* block = BuildBlockForNode(loop);
346 FixNode(block, node);
347 break;
348 }
349 case IrOpcode::kBranch:
350 case IrOpcode::kSwitch:
352 break;
353#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
355// JS opcodes are just like calls => fall through.
356#undef BUILD_BLOCK_JS_CASE
357 case IrOpcode::kCall:
358 case IrOpcode::kFastApiCall:
361 }
362 break;
363 default:
364 break;
365 }
366 }
367
368 void ConnectBlocks(Node* node) {
369 switch (node->opcode()) {
370 case IrOpcode::kLoop:
371 case IrOpcode::kMerge:
372 ConnectMerge(node);
373 break;
374 case IrOpcode::kBranch:
376 ConnectBranch(node);
377 break;
378 case IrOpcode::kSwitch:
380 ConnectSwitch(node);
381 break;
382 case IrOpcode::kDeoptimize:
384 ConnectDeoptimize(node);
385 break;
386 case IrOpcode::kTailCall:
388 ConnectTailCall(node);
389 break;
390 case IrOpcode::kReturn:
392 ConnectReturn(node);
393 break;
394 case IrOpcode::kThrow:
396 ConnectThrow(node);
397 break;
398#define CONNECT_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
400// JS opcodes are just like calls => fall through.
401#undef CONNECT_BLOCK_JS_CASE
402 case IrOpcode::kCall:
403 case IrOpcode::kFastApiCall:
406 ConnectCall(node);
407 }
408 break;
409 default:
410 break;
411 }
412 }
413
415 BasicBlock* block = schedule_->block(node);
416 if (block == nullptr) {
417 block = schedule_->NewBasicBlock();
418 TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
419 node->op()->mnemonic());
420 FixNode(block, node);
421 }
422 return block;
423 }
424
426 size_t const successor_cnt = node->op()->ControlOutputCount();
427 Node** successors = zone_->AllocateArray<Node*>(successor_cnt);
428 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
429 for (size_t index = 0; index < successor_cnt; ++index) {
430 BuildBlockForNode(successors[index]);
431 }
432 }
433
434 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
435 size_t successor_cnt) {
436 Node** successors = reinterpret_cast<Node**>(successor_blocks);
437 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
438 for (size_t index = 0; index < successor_cnt; ++index) {
439 successor_blocks[index] = schedule_->block(successors[index]);
440 }
441 }
442
444 BasicBlock* predecessor_block = nullptr;
445 while (true) {
446 predecessor_block = schedule_->block(node);
447 if (predecessor_block != nullptr) break;
449 }
450 return predecessor_block;
451 }
452
453 void ConnectCall(Node* call) {
454 BasicBlock* successor_blocks[2];
455 CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
456
457 // Consider the exception continuation to be deferred.
458 successor_blocks[1]->set_deferred(true);
459
460 Node* call_control = NodeProperties::GetControlInput(call);
461 BasicBlock* call_block = FindPredecessorBlock(call_control);
462 TraceConnect(call, call_block, successor_blocks[0]);
463 TraceConnect(call, call_block, successor_blocks[1]);
464 schedule_->AddCall(call_block, call, successor_blocks[0],
465 successor_blocks[1]);
466 }
467
468 void ConnectBranch(Node* branch) {
469 BasicBlock* successor_blocks[2];
470 CollectSuccessorBlocks(branch, successor_blocks,
471 arraysize(successor_blocks));
472
473 BranchHint hint_from_profile = BranchHint::kNone;
474 if (const ProfileDataFromFile* profile_data = scheduler_->profile_data()) {
475 hint_from_profile =
476 profile_data->GetHint(successor_blocks[0]->id().ToSize(),
477 successor_blocks[1]->id().ToSize());
478 }
479
480 // Consider branch hints.
481 switch (hint_from_profile) {
483 switch (BranchHintOf(branch->op())) {
485 break;
487 successor_blocks[1]->set_deferred(true);
488 break;
490 successor_blocks[0]->set_deferred(true);
491 break;
492 }
493 break;
495 successor_blocks[1]->set_deferred(true);
496 break;
498 successor_blocks[0]->set_deferred(true);
499 break;
500 }
501
502 if (branch == component_entry_) {
503 TraceConnect(branch, component_start_, successor_blocks[0]);
504 TraceConnect(branch, component_start_, successor_blocks[1]);
506 successor_blocks[0], successor_blocks[1]);
507 } else {
508 Node* branch_control = NodeProperties::GetControlInput(branch);
509 BasicBlock* branch_block = FindPredecessorBlock(branch_control);
510 TraceConnect(branch, branch_block, successor_blocks[0]);
511 TraceConnect(branch, branch_block, successor_blocks[1]);
512 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
513 successor_blocks[1]);
514 }
515 }
516
517 void ConnectSwitch(Node* sw) {
518 size_t const successor_count = sw->op()->ControlOutputCount();
519 BasicBlock** successor_blocks =
520 zone_->AllocateArray<BasicBlock*>(successor_count);
521 CollectSuccessorBlocks(sw, successor_blocks, successor_count);
522
523 if (sw == component_entry_) {
524 for (size_t index = 0; index < successor_count; ++index) {
525 TraceConnect(sw, component_start_, successor_blocks[index]);
526 }
528 successor_blocks, successor_count);
529 } else {
530 Node* switch_control = NodeProperties::GetControlInput(sw);
531 BasicBlock* switch_block = FindPredecessorBlock(switch_control);
532 for (size_t index = 0; index < successor_count; ++index) {
533 TraceConnect(sw, switch_block, successor_blocks[index]);
534 }
535 schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
536 }
537 for (size_t index = 0; index < successor_count; ++index) {
538 if (BranchHintOf(successor_blocks[index]->front()->op()) ==
540 successor_blocks[index]->set_deferred(true);
541 }
542 }
543 }
544
545 void ConnectMerge(Node* merge) {
546 // Don't connect the special merge at the end to its predecessors.
547 if (IsFinalMerge(merge)) return;
548
549 BasicBlock* block = schedule_->block(merge);
550 DCHECK_NOT_NULL(block);
551 // For all of the merge's control inputs, add a goto at the end to the
552 // merge's basic block.
553 for (Node* const input : merge->inputs()) {
554 BasicBlock* predecessor_block = FindPredecessorBlock(input);
555 TraceConnect(merge, predecessor_block, block);
556 schedule_->AddGoto(predecessor_block, block);
557 }
558 }
559
560 void ConnectTailCall(Node* call) {
561 Node* call_control = NodeProperties::GetControlInput(call);
562 BasicBlock* call_block = FindPredecessorBlock(call_control);
563 TraceConnect(call, call_block, nullptr);
564 schedule_->AddTailCall(call_block, call);
565 }
566
567 void ConnectReturn(Node* ret) {
568 Node* return_control = NodeProperties::GetControlInput(ret);
569 BasicBlock* return_block = FindPredecessorBlock(return_control);
570 TraceConnect(ret, return_block, nullptr);
571 schedule_->AddReturn(return_block, ret);
572 }
573
574 void ConnectDeoptimize(Node* deopt) {
575 Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
576 BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
577 TraceConnect(deopt, deoptimize_block, nullptr);
578 schedule_->AddDeoptimize(deoptimize_block, deopt);
579 }
580
581 void ConnectThrow(Node* thr) {
582 Node* throw_control = NodeProperties::GetControlInput(thr);
583 BasicBlock* throw_block = FindPredecessorBlock(throw_control);
584 TraceConnect(thr, throw_block, nullptr);
585 schedule_->AddThrow(throw_block, thr);
586 }
587
588 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
589 DCHECK_NOT_NULL(block);
590 if (succ == nullptr) {
591 TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
592 node->op()->mnemonic(), block->id().ToInt());
593 } else {
594 TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
595 node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
596 }
597 }
598
599 bool IsFinalMerge(Node* node) {
600 return (node->opcode() == IrOpcode::kMerge &&
601 node == scheduler_->graph_->end()->InputAt(0));
602 }
603
604 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
605 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
606 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
607 return entry != exit && entry_class == exit_class;
608 }
609
611 control_.clear();
612 DCHECK(queue_.empty());
614 }
615
619 NodeMarker<bool> queued_; // Mark indicating whether node is queued.
620 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
621 NodeVector control_; // List of encountered control nodes.
622 Node* component_entry_; // Component single-entry node.
623 BasicBlock* component_start_; // Component single-entry block.
624 BasicBlock* component_end_; // Component single-exit block.
625};
626
627
629 TRACE("--- CREATING CFG -------------------------------------------\n");
630
631 // Instantiate a new control equivalence algorithm for the graph.
633
634 // Build a control-flow graph for the main control-connected component that
635 // is being spanned by the graph's start and end nodes.
638
639 // Initialize per-block data.
640 // Reserve an extra 10% to avoid resizing vector when fusing floating control.
643}
644
645
646// -----------------------------------------------------------------------------
647// Phase 2: Compute special RPO and dominator tree.
648
649
650// Compute the special reverse-post-order block ordering, which is essentially
651// a RPO of the graph where loop bodies are contiguous. Properties:
652// 1. If block A is a predecessor of B, then A appears before B in the order,
653// unless B is a loop header and A is in the loop headed at B
654// (i.e. A -> B is a backedge).
655// => If block A dominates block B, then A appears before B in the order.
656// => If block A is a loop header, A appears before all blocks in the loop
657// headed at A.
658// 2. All loops are contiguous in the order (i.e. no intervening blocks that
659// do not belong to the loop.)
660// Note a simple RPO traversal satisfies (1) but not (2).
662 public:
664 : zone_(zone),
666 order_(nullptr),
667 beyond_end_(nullptr),
668 loops_(zone),
669 backedges_(zone),
670 stack_(zone),
672 empty_(0, zone) {}
673
674 // Computes the special reverse-post-order for the main control flow graph,
675 // that is for the graph spanned between the schedule's start and end blocks.
678 DCHECK(!order_); // Main order does not exist yet.
680 }
681
682 // Computes the special reverse-post-order for a partial control flow graph,
683 // that is for the graph spanned between the given {entry} and {end} blocks,
684 // then updates the existing ordering with this new information.
686 DCHECK(order_); // Main order to be updated is present.
688 }
689
690 // Serialize the previously computed order as a special reverse-post-order
691 // numbering for basic blocks into the final schedule.
693 int32_t number = 0;
694 for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
695 b->set_rpo_number(number++);
697 }
699 }
700
701 // Print and verify the special reverse-post-order.
703#if DEBUG
704 if (v8_flags.trace_turbo_scheduler) PrintRPO();
705 VerifySpecialRPO();
706#endif
707 }
708
710 if (HasLoopNumber(block)) {
711 LoopInfo const& loop = loops_[GetLoopNumber(block)];
712 if (loop.outgoing) return *loop.outgoing;
713 }
714 return empty_;
715 }
716
717 bool HasLoopBlocks() const { return !loops_.empty(); }
718
719 private:
720 using Backedge = std::pair<BasicBlock*, size_t>;
721
722 // Numbering for BasicBlock::rpo_number for this block traversal:
723 static const int kBlockOnStack = -2;
724 static const int kBlockVisited1 = -3;
725 static const int kBlockVisited2 = -4;
726 static const int kBlockUnvisited1 = -1;
728
733
749
750 int Push(int depth, BasicBlock* child, int unvisited) {
751 if (child->rpo_number() == unvisited) {
752 stack_[depth].block = child;
753 stack_[depth].index = 0;
755 return depth + 1;
756 }
757 return depth;
758 }
759
761 block->set_rpo_next(head);
762 return block;
763 }
764
765 static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
766 static void SetLoopNumber(BasicBlock* block, int loop_number) {
767 return block->set_loop_number(loop_number);
768 }
769 static bool HasLoopNumber(BasicBlock* block) {
770 return block->loop_number() >= 0;
771 }
772
773 // We only need this special sentinel because some tests use the schedule's
774 // end block in actual control flow (e.g. with end having successors).
776 if (beyond_end_ == nullptr) {
779 }
780 return beyond_end_;
781 }
782
783 // Compute special RPO for the control flow graph between {entry} and {end},
784 // mutating any existing order so that the result is still valid.
786 // RPO should not have been serialized for this schedule yet.
789 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
790
791 // Find correct insertion point within existing order.
792 BasicBlock* insertion_point = entry->rpo_next();
793 BasicBlock* order = insertion_point;
794
795 // Perform an iterative RPO traversal using an explicit stack,
796 // recording backedges that form cycles. O(|B|).
800 int stack_depth = Push(0, entry, kBlockUnvisited1);
801 int num_loops = static_cast<int>(loops_.size());
802
803 while (stack_depth > 0) {
804 int current = stack_depth - 1;
806
807 if (frame->block != end &&
808 frame->index < frame->block->SuccessorCount()) {
809 // Process the next successor.
810 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
811 if (succ->rpo_number() == kBlockVisited1) continue;
812 if (succ->rpo_number() == kBlockOnStack) {
813 // The successor is on the stack, so this is a backedge (cycle).
814 backedges_.push_back(Backedge(frame->block, frame->index - 1));
815 if (!HasLoopNumber(succ)) {
816 // Assign a new loop number to the header if it doesn't have one.
817 SetLoopNumber(succ, num_loops++);
818 }
819 } else {
820 // Push the successor onto the stack.
822 stack_depth = Push(stack_depth, succ, kBlockUnvisited1);
823 }
824 } else {
825 // Finished with all successors; pop the stack and add the block.
826 order = PushFront(order, frame->block);
828 stack_depth--;
829 }
830 }
831
832 // If no loops were encountered, then the order we computed was correct.
833 if (num_loops > static_cast<int>(loops_.size())) {
834 // Otherwise, compute the loop information from the backedges in order
835 // to perform a traversal that groups loop bodies together.
836 ComputeLoopInfo(&stack_, num_loops, &backedges_);
837
838 // Initialize the "loop stack". Note the entry could be a loop header.
839 LoopInfo* loop =
840 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
841 order = insertion_point;
842
843 // Perform an iterative post-order traversal, visiting loop bodies before
844 // edges that lead out of loops. Visits each block once, but linking loop
845 // sections together is linear in the loop size, so overall is
846 // O(|B| + max(loop_depth) * max(|loop|))
847 stack_depth = Push(0, entry, kBlockUnvisited2);
848 while (stack_depth > 0) {
849 SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
850 BasicBlock* block = frame->block;
851 BasicBlock* succ = nullptr;
852
853 if (block != end && frame->index < block->SuccessorCount()) {
854 // Process the next normal successor.
855 succ = block->SuccessorAt(frame->index++);
856 } else if (HasLoopNumber(block)) {
857 // Process additional outgoing edges from the loop header.
858 if (block->rpo_number() == kBlockOnStack) {
859 // Finish the loop body the first time the header is left on the
860 // stack.
861 DCHECK(loop != nullptr && loop->header == block);
862 loop->start = PushFront(order, block);
863 order = loop->end;
865 // Pop the loop stack and continue visiting outgoing edges within
866 // the context of the outer loop, if any.
867 loop = loop->prev;
868 // We leave the loop header on the stack; the rest of this iteration
869 // and later iterations will go through its outgoing edges list.
870 }
871
872 // Use the next outgoing edge if there are any.
873 size_t outgoing_index = frame->index - block->SuccessorCount();
874 LoopInfo* info = &loops_[GetLoopNumber(block)];
875 DCHECK(loop != info);
876 if (block != entry && info->outgoing != nullptr &&
877 outgoing_index < info->outgoing->size()) {
878 succ = info->outgoing->at(outgoing_index);
879 frame->index++;
880 }
881 }
882
883 if (succ != nullptr) {
884 // Process the next successor.
885 if (succ->rpo_number() == kBlockOnStack) continue;
886 if (succ->rpo_number() == kBlockVisited2) continue;
888 if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
889 // The successor is not in the current loop or any nested loop.
890 // Add it to the outgoing edges of this loop and visit it later.
891 loop->AddOutgoing(zone_, succ);
892 } else {
893 // Push the successor onto the stack.
894 stack_depth = Push(stack_depth, succ, kBlockUnvisited2);
895 if (HasLoopNumber(succ)) {
896 // Push the inner loop onto the loop stack.
897 DCHECK(GetLoopNumber(succ) < num_loops);
898 LoopInfo* next = &loops_[GetLoopNumber(succ)];
899 next->end = order;
900 next->prev = loop;
901 loop = next;
902 }
903 }
904 } else {
905 // Finished with all successors of the current block.
906 if (HasLoopNumber(block)) {
907 // If we are going to pop a loop header, then add its entire body.
908 LoopInfo* info = &loops_[GetLoopNumber(block)];
909 for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
910 if (b->rpo_next() == info->end) {
911 b->set_rpo_next(order);
912 info->end = order;
913 break;
914 }
915 }
916 order = info->start;
917 } else {
918 // Pop a single node off the stack and add it to the order.
919 order = PushFront(order, block);
920 block->set_rpo_number(kBlockVisited2);
921 }
922 stack_depth--;
923 }
924 }
925 }
926
927 // Publish new order the first time.
928 if (order_ == nullptr) order_ = order;
929
930 // Compute the correct loop headers and set the correct loop ends.
931 LoopInfo* current_loop = nullptr;
932 BasicBlock* current_header = entry->loop_header();
933 int32_t loop_depth = entry->loop_depth();
934 if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
935 for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
936 BasicBlock* current = b;
937
938 // Reset BasicBlock::rpo_number again.
940
941 // Finish the previous loop(s) if we just exited them.
942 while (current_header != nullptr &&
943 current == current_header->loop_end()) {
944 DCHECK(current_header->IsLoopHeader());
945 DCHECK_NOT_NULL(current_loop);
946 current_loop = current_loop->prev;
947 current_header =
948 current_loop == nullptr ? nullptr : current_loop->header;
949 --loop_depth;
950 }
951 current->set_loop_header(current_header);
952
953 // Push a new loop onto the stack if this loop is a loop header.
954 if (HasLoopNumber(current)) {
955 ++loop_depth;
956 current_loop = &loops_[GetLoopNumber(current)];
957 BasicBlock* loop_end = current_loop->end;
958 current->set_loop_end(loop_end == nullptr ? BeyondEndSentinel()
959 : loop_end);
960 current_header = current_loop->header;
961 TRACE("id:%d is a loop header, increment loop depth to %d\n",
962 current->id().ToInt(), loop_depth);
963 }
964
965 current->set_loop_depth(loop_depth);
966
967 if (current->loop_header() == nullptr) {
968 TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
969 current->loop_depth());
970 } else {
971 TRACE("id:%d has loop header id:%d, (depth == %d)\n",
972 current->id().ToInt(), current->loop_header()->id().ToInt(),
973 current->loop_depth());
974 }
975 }
976 }
977
978 // Computes loop membership from the backedges of the control flow graph.
980 size_t num_loops, ZoneVector<Backedge>* backedges) {
981 // Extend existing loop membership vectors.
982 for (LoopInfo& loop : loops_) {
983 loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
984 zone_);
985 }
986
987 // Extend loop information vector.
988 loops_.resize(num_loops, LoopInfo());
989
990 // Compute loop membership starting from backedges.
991 // O(max(loop_depth) * max(|loop|)
992 for (size_t i = 0; i < backedges->size(); i++) {
993 BasicBlock* member = backedges->at(i).first;
994 BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
995 size_t loop_num = GetLoopNumber(header);
996 if (loops_[loop_num].header == nullptr) {
997 loops_[loop_num].header = header;
998 loops_[loop_num].members = zone_->New<BitVector>(
999 static_cast<int>(schedule_->BasicBlockCount()), zone_);
1000 }
1001
1002 int queue_length = 0;
1003 if (member != header) {
1004 // As long as the header doesn't have a backedge to itself,
1005 // Push the member onto the queue and process its predecessors.
1006 if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
1007 loops_[loop_num].members->Add(member->id().ToInt());
1008 }
1009 (*queue)[queue_length++].block = member;
1010 }
1011
1012 // Propagate loop membership backwards. All predecessors of M up to the
1013 // loop header H are members of the loop too. O(|blocks between M and H|).
1014 while (queue_length > 0) {
1015 BasicBlock* block = (*queue)[--queue_length].block;
1016 for (size_t j = 0; j < block->PredecessorCount(); j++) {
1017 BasicBlock* pred = block->PredecessorAt(j);
1018 if (pred != header) {
1019 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
1020 loops_[loop_num].members->Add(pred->id().ToInt());
1021 (*queue)[queue_length++].block = pred;
1022 }
1023 }
1024 }
1025 }
1026 }
1027 }
1028
1029#if DEBUG
1030 void PrintRPO() {
1031 StdoutStream os;
1032 os << "RPO with " << loops_.size() << " loops";
1033 if (loops_.size() > 0) {
1034 os << " (";
1035 for (size_t i = 0; i < loops_.size(); i++) {
1036 if (i > 0) os << " ";
1037 os << "id:" << loops_[i].header->id();
1038 }
1039 os << ")";
1040 }
1041 os << ":\n";
1042
1043 for (BasicBlock* block = order_; block != nullptr;
1044 block = block->rpo_next()) {
1045 os << std::setw(5) << "B" << block->rpo_number() << ":";
1046 for (size_t i = 0; i < loops_.size(); i++) {
1047 bool range = loops_[i].header->LoopContains(block);
1048 bool membership = loops_[i].header != block && range;
1049 os << (membership ? " |" : " ");
1050 os << (range ? "x" : " ");
1051 }
1052 os << " id:" << block->id() << ": ";
1053 if (block->loop_end() != nullptr) {
1054 os << " range: [B" << block->rpo_number() << ", B"
1055 << block->loop_end()->rpo_number() << ")";
1056 }
1057 if (block->loop_header() != nullptr) {
1058 os << " header: id:" << block->loop_header()->id();
1059 }
1060 if (block->loop_depth() > 0) {
1061 os << " depth: " << block->loop_depth();
1062 }
1063 os << "\n";
1064 }
1065 }
1066
1067 void VerifySpecialRPO() {
1069 DCHECK_LT(0, order->size());
1070 DCHECK_EQ(0, (*order)[0]->id().ToInt()); // entry should be first.
1071
1072 for (size_t i = 0; i < loops_.size(); i++) {
1073 LoopInfo* loop = &loops_[i];
1074 BasicBlock* header = loop->header;
1075 BasicBlock* end = header->loop_end();
1076
1077 DCHECK_NOT_NULL(header);
1078 DCHECK_LE(0, header->rpo_number());
1079 DCHECK_LT(header->rpo_number(), order->size());
1081 DCHECK_LE(end->rpo_number(), order->size());
1082 DCHECK_GT(end->rpo_number(), header->rpo_number());
1083 DCHECK_NE(header->loop_header(), header);
1084
1085 // Verify the start ... end list relationship.
1086 int links = 0;
1087 BasicBlock* block = loop->start;
1088 DCHECK_EQ(header, block);
1089 bool end_found;
1090 while (true) {
1091 if (block == nullptr || block == loop->end) {
1092 end_found = (loop->end == block);
1093 break;
1094 }
1095 // The list should be in same order as the final result.
1096 DCHECK(block->rpo_number() == links + header->rpo_number());
1097 links++;
1098 block = block->rpo_next();
1099 DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
1100 }
1101 DCHECK_LT(0, links);
1102 DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
1103 DCHECK(end_found);
1104
1105 // Check loop depth of the header.
1106 int loop_depth = 0;
1107 for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
1108 loop_depth++;
1109 }
1110 DCHECK_EQ(loop_depth, header->loop_depth());
1111
1112 // Check the contiguousness of loops.
1113 int count = 0;
1114 for (int j = 0; j < static_cast<int>(order->size()); j++) {
1115 block = order->at(j);
1116 DCHECK_EQ(block->rpo_number(), j);
1117 if (j < header->rpo_number() || j >= end->rpo_number()) {
1118 DCHECK(!header->LoopContains(block));
1119 } else {
1120 DCHECK(header->LoopContains(block));
1121 DCHECK_GE(block->loop_depth(), loop_depth);
1122 count++;
1123 }
1124 }
1125 DCHECK_EQ(links, count);
1126 }
1127 }
1128#endif // DEBUG
1129
1139};
1140
1141
1143 SpecialRPONumberer numberer(zone, schedule);
1144 numberer.ComputeSpecialRPO();
1145 numberer.SerializeRPOIntoSchedule();
1146 numberer.PrintAndVerifySpecialRPO();
1147 return schedule->rpo_order();
1148}
1149
1150
1152 TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1153
1154 // Compute the special reverse-post-order for basic blocks.
1157}
1158
1160 BasicBlock* b2) {
1161 auto entry1 = common_dominator_cache_.find(b1->id().ToInt());
1162 if (entry1 == common_dominator_cache_.end()) return nullptr;
1163 auto entry2 = entry1->second->find(b2->id().ToInt());
1164 if (entry2 == entry1->second->end()) return nullptr;
1165 return entry2->second;
1166}
1167
1169 // A very common fast case:
1170 if (b1 == b2) return b1;
1171 // Try to find the common dominator by walking, if there is a chance of
1172 // finding it quickly.
1173 constexpr int kCacheGranularity = 63;
1174 static_assert((kCacheGranularity & (kCacheGranularity + 1)) == 0);
1175 int depth_difference = b1->dominator_depth() - b2->dominator_depth();
1176 if (depth_difference > -kCacheGranularity &&
1177 depth_difference < kCacheGranularity) {
1178 for (int i = 0; i < kCacheGranularity; i++) {
1179 if (b1->dominator_depth() < b2->dominator_depth()) {
1180 b2 = b2->dominator();
1181 } else {
1182 b1 = b1->dominator();
1183 }
1184 if (b1 == b2) return b1;
1185 }
1186 // We might fall out of the loop here if the dominator tree has several
1187 // deep "parallel" subtrees.
1188 }
1189 // If it'd be a long walk, take the bus instead (i.e. use the cache).
1190 // To keep memory consumption low, there'll be a bus stop every 64 blocks.
1191 // First, walk to the nearest bus stop.
1192 if (b1->dominator_depth() < b2->dominator_depth()) std::swap(b1, b2);
1193 while ((b1->dominator_depth() & kCacheGranularity) != 0) {
1194 if (V8_LIKELY(b1->dominator_depth() > b2->dominator_depth())) {
1195 b1 = b1->dominator();
1196 } else {
1197 b2 = b2->dominator();
1198 }
1199 if (b1 == b2) return b1;
1200 }
1201 // Then, walk from bus stop to bus stop until we either find a bus (i.e. an
1202 // existing cache entry) or the result. Make a list of any empty bus stops
1203 // we'd like to populate for next time.
1204 constexpr int kMaxNewCacheEntries = 2 * 50; // Must be even.
1205 // This array stores a flattened list of pairs, e.g. if after finding the
1206 // {result}, we want to cache [(B11, B12) -> result, (B21, B22) -> result],
1207 // then we store [11, 12, 21, 22] here.
1208 int new_cache_entries[kMaxNewCacheEntries];
1209 // Next free slot in {new_cache_entries}.
1210 int new_cache_entries_cursor = 0;
1211 while (b1 != b2) {
1212 if ((b1->dominator_depth() & kCacheGranularity) == 0) {
1213 BasicBlock* maybe_cache_hit = GetCommonDominatorIfCached(b1, b2);
1214 if (maybe_cache_hit != nullptr) {
1215 b1 = b2 = maybe_cache_hit;
1216 break;
1217 } else if (new_cache_entries_cursor < kMaxNewCacheEntries) {
1218 new_cache_entries[new_cache_entries_cursor++] = b1->id().ToInt();
1219 new_cache_entries[new_cache_entries_cursor++] = b2->id().ToInt();
1220 }
1221 }
1222 if (V8_LIKELY(b1->dominator_depth() > b2->dominator_depth())) {
1223 b1 = b1->dominator();
1224 } else {
1225 b2 = b2->dominator();
1226 }
1227 }
1228 // Lastly, create new cache entries we noted down earlier.
1229 BasicBlock* result = b1;
1230 for (int i = 0; i < new_cache_entries_cursor;) {
1231 int id1 = new_cache_entries[i++];
1232 int id2 = new_cache_entries[i++];
1234 auto entry = common_dominator_cache_.find(id1);
1235 if (entry == common_dominator_cache_.end()) {
1237 common_dominator_cache_[id1] = mapping;
1238 } else {
1239 mapping = entry->second;
1240 }
1241 // If there was an existing entry, we would have found it earlier.
1242 DCHECK_EQ(mapping->find(id2), mapping->end());
1243 mapping->insert({id2, result});
1244 }
1245 return result;
1246}
1247
1249 for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1250 auto pred = block->predecessors().begin();
1251 auto end = block->predecessors().end();
1252 DCHECK(pred != end); // All blocks except start have predecessors.
1253 BasicBlock* dominator = *pred;
1254 bool deferred = dominator->deferred();
1255 // For multiple predecessors, walk up the dominator tree until a common
1256 // dominator is found. Visitation order guarantees that all predecessors
1257 // except for backwards edges have been visited.
1258 // We use a one-element cache for previously-seen dominators. This gets
1259 // hit a lot for functions that have long chains of diamonds, and in
1260 // those cases turns quadratic into linear complexity.
1261 BasicBlock* cache = nullptr;
1262 for (++pred; pred != end; ++pred) {
1263 // Don't examine backwards edges.
1264 if ((*pred)->dominator_depth() < 0) continue;
1265 if ((*pred)->dominator_depth() > 3 &&
1266 ((*pred)->dominator()->dominator() == cache ||
1267 (*pred)->dominator()->dominator()->dominator() == cache)) {
1268 // Nothing to do, the last iteration covered this case.
1269 DCHECK_EQ(dominator, BasicBlock::GetCommonDominator(dominator, *pred));
1270 } else {
1271 dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1272 }
1273 cache = (*pred)->dominator();
1274 deferred = deferred & (*pred)->deferred();
1275 }
1276 block->set_dominator(dominator);
1277 block->set_dominator_depth(dominator->dominator_depth() + 1);
1278 block->set_deferred(deferred | block->deferred());
1279 TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1280 dominator->id().ToInt(), block->dominator_depth());
1281 }
1282}
1283
1285 // Seed start block to be the first dominator.
1286 schedule->start()->set_dominator_depth(0);
1287
1288 // Build the block dominator tree resulting from the above seed.
1289 PropagateImmediateDominators(schedule->start()->rpo_next());
1290}
1291
1293 TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1295}
1296
1297// -----------------------------------------------------------------------------
1298// Phase 3: Prepare use counts for nodes.
1299
1300
1302 public:
1303 explicit PrepareUsesVisitor(Scheduler* scheduler, TFGraph* graph, Zone* zone)
1304 : scheduler_(scheduler),
1305 schedule_(scheduler->schedule_),
1306 graph_(graph),
1307 visited_(static_cast<int>(graph_->NodeCount()), zone),
1308 stack_(zone) {}
1309
1310 void Run() {
1312 while (!stack_.empty()) {
1313 Node* node = stack_.top();
1314 stack_.pop();
1315 VisitInputs(node);
1316 }
1317 }
1318
1319 private:
1321 TRACE("Pre #%d:%s\n", node->id(), node->op()->mnemonic());
1322 DCHECK(!Visited(node));
1324 // Fixed nodes are always roots for schedule late.
1326 if (!schedule_->IsScheduled(node)) {
1327 // Make sure root nodes are scheduled in their respective blocks.
1328 TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1329 node->op()->mnemonic());
1330 IrOpcode::Value opcode = node->opcode();
1331 BasicBlock* block =
1332 opcode == IrOpcode::kParameter
1333 ? schedule_->start()
1335 DCHECK_NOT_NULL(block);
1336 schedule_->AddNode(block, node);
1337 }
1338 }
1339 stack_.push(node);
1340 visited_.Add(node->id());
1341 }
1342
1343 void VisitInputs(Node* node) {
1345 bool is_scheduled = schedule_->IsScheduled(node);
1346 std::optional<int> coupled_control_edge =
1348 for (auto edge : node->input_edges()) {
1349 Node* to = edge.to();
1350 DCHECK_EQ(node, edge.from());
1351 if (!Visited(to)) {
1353 }
1354 TRACE("PostEdge #%d:%s->#%d:%s\n", node->id(), node->op()->mnemonic(),
1355 to->id(), to->op()->mnemonic());
1357 if (!is_scheduled && edge.index() != coupled_control_edge) {
1359 }
1360 }
1361 }
1362
1363 bool Visited(Node* node) { return visited_.Contains(node->id()); }
1364
1370};
1371
1372
1374 TRACE("--- PREPARE USES -------------------------------------------\n");
1375
1376 // Count the uses of every node, which is used to ensure that all of a
1377 // node's uses are scheduled before the node itself.
1378 PrepareUsesVisitor prepare_uses(this, graph_, zone_);
1379 prepare_uses.Run();
1380}
1381
1382
1383// -----------------------------------------------------------------------------
1384// Phase 4: Schedule nodes early.
1385
1386
1388 public:
1390 : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1391
1392 // Run the schedule early algorithm on a set of fixed root nodes.
1393 void Run(NodeVector* roots) {
1394 for (Node* const root : *roots) {
1395 queue_.push(root);
1396 }
1397
1398 while (!queue_.empty()) {
1400 VisitNode(queue_.front());
1401 queue_.pop();
1402 }
1403 }
1404
1405 private:
1406 // Visits one node from the queue and propagates its current schedule early
1407 // position to all uses. This in turn might push more nodes onto the queue.
1408 void VisitNode(Node* node) {
1410
1411 // Fixed nodes already know their schedule early position.
1413 data->minimum_block_ = schedule_->block(node);
1414 TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1415 node->id(), node->op()->mnemonic(),
1416 data->minimum_block_->id().ToInt(),
1417 data->minimum_block_->dominator_depth());
1418 }
1419
1420 // No need to propagate unconstrained schedule early positions.
1421 if (data->minimum_block_ == schedule_->start()) return;
1422
1423 // Propagate schedule early position.
1424 DCHECK_NOT_NULL(data->minimum_block_);
1425 for (auto use : node->uses()) {
1426 if (scheduler_->IsLive(use)) {
1427 PropagateMinimumPositionToNode(data->minimum_block_, use);
1428 }
1429 }
1430 }
1431
1432 // Propagates {block} as another minimum position into the given {node}. This
1433 // has the net effect of computing the minimum dominator block of {node} that
1434 // still post-dominates all inputs to {node} when the queue is processed.
1437
1438 // No need to propagate to fixed node, it's guaranteed to be a root.
1439 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1440
1441 // Coupled nodes influence schedule early position of their control.
1443 Node* control = NodeProperties::GetControlInput(node);
1444 PropagateMinimumPositionToNode(block, control);
1445 }
1446
1447 // Propagate new position if it is deeper down the dominator tree than the
1448 // current. Note that all inputs need to have minimum block position inside
1449 // the dominator chain of {node}'s minimum block position.
1450 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1451 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1452 data->minimum_block_ = block;
1453 queue_.push(node);
1454 TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1455 node->id(), node->op()->mnemonic(),
1456 data->minimum_block_->id().ToInt(),
1457 data->minimum_block_->dominator_depth());
1458 }
1459 }
1460
1461#if DEBUG
1462 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1463 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1464 return dominator == b1 || dominator == b2;
1465 }
1466#endif
1467
1471};
1472
1473
1475 if (!special_rpo_->HasLoopBlocks()) {
1476 TRACE("--- NO LOOPS SO SKIPPING SCHEDULE EARLY --------------------\n");
1477 return;
1478 }
1479
1480 TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1481 if (v8_flags.trace_turbo_scheduler) {
1482 TRACE("roots: ");
1483 for (Node* node : schedule_root_nodes_) {
1484 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1485 }
1486 TRACE("\n");
1487 }
1488
1489 // Compute the minimum block for each node thereby determining the earliest
1490 // position each node could be placed within a valid schedule.
1491 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1492 schedule_early_visitor.Run(&schedule_root_nodes_);
1493}
1494
1495
1496// -----------------------------------------------------------------------------
1497// Phase 5: Schedule nodes late.
1498
1499
1501 public:
1503 : zone_(zone),
1504 scheduler_(scheduler),
1506 marking_queue_(scheduler->zone_) {}
1507
1508 // Run the schedule late algorithm on a set of fixed root nodes.
1509 void Run(NodeVector* roots) {
1510 for (Node* const root : *roots) {
1511 ProcessQueue(root);
1512 }
1513 }
1514
1515 private:
1516 void ProcessQueue(Node* root) {
1518 for (Node* node : root->inputs()) {
1519 // Don't schedule coupled nodes on their own.
1522 }
1523
1524 // Test schedulability condition by looking at unscheduled use count.
1525 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1526
1527 queue->push(node);
1528 do {
1530 Node* const n = queue->front();
1531 queue->pop();
1532 VisitNode(n);
1533 } while (!queue->empty());
1534 }
1535 }
1536
1537 // Visits one node from the queue of schedulable nodes and determines its
1538 // schedule late position. Also hoists nodes out of loops to find a more
1539 // optimal scheduling position.
1540 void VisitNode(Node* node) {
1542
1543 // Don't schedule nodes that are already scheduled.
1544 if (schedule_->IsScheduled(node)) return;
1546
1547 // Determine the dominating block for all of the uses of this node. It is
1548 // the latest block that this node can be scheduled in.
1549 TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1550 BasicBlock* block = GetCommonDominatorOfUses(node);
1551 DCHECK_NOT_NULL(block);
1552
1553 // The schedule early block dominates the schedule late block.
1554 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1555 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1556
1557 TRACE(
1558 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1559 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1560 block->loop_depth(), min_block->id().ToInt());
1561
1562 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1563 // into enclosing loop pre-headers until they would precede their schedule
1564 // early position.
1565 BasicBlock* hoist_block = GetHoistBlock(block);
1566 if (hoist_block &&
1567 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1569 do {
1570 TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
1571 node->op()->mnemonic(), hoist_block->id().ToInt());
1572 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1573 block = hoist_block;
1574 hoist_block = GetHoistBlock(hoist_block);
1575 } while (hoist_block &&
1576 hoist_block->dominator_depth() >= min_block->dominator_depth());
1578 // Split the {node} if beneficial and return the new {block} for it.
1579 block = SplitNode(block, node);
1580 }
1581
1582 // Schedule the node or a floating control structure.
1583 if (IrOpcode::IsMergeOpcode(node->opcode())) {
1584 ScheduleFloatingControl(block, node);
1585 } else if (node->opcode() == IrOpcode::kFinishRegion) {
1586 ScheduleRegion(block, node);
1587 } else {
1588 ScheduleNode(block, node);
1589 }
1590 }
1591
1592 bool IsMarked(BasicBlock* block) const {
1593 return marked_.Contains(block->id().ToInt());
1594 }
1595
1596 void Mark(BasicBlock* block) { marked_.Add(block->id().ToInt()); }
1597
1598 // Mark {block} and push its non-marked predecessor on the marking queue.
1599 void MarkBlock(BasicBlock* block) {
1600 Mark(block);
1601 for (BasicBlock* pred_block : block->predecessors()) {
1602 if (IsMarked(pred_block)) continue;
1603 marking_queue_.push_back(pred_block);
1604 }
1605 }
1606
1608 // For now, we limit splitting to pure nodes.
1609 if (!node->op()->HasProperty(Operator::kPure)) return block;
1610 // TODO(titzer): fix the special case of splitting of projections.
1611 if (node->opcode() == IrOpcode::kProjection) return block;
1612
1613 // The {block} is common dominator of all uses of {node}, so we cannot
1614 // split anything unless the {block} has at least two successors.
1615 DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1616 if (block->SuccessorCount() < 2) return block;
1617
1618 // Clear marking bits.
1619 DCHECK(marking_queue_.empty());
1620 marked_.Clear();
1621 int new_size = static_cast<int>(schedule_->BasicBlockCount() + 1);
1622 if (marked_.length() < new_size) {
1623 marked_.Resize(new_size, scheduler_->zone_);
1624 }
1625
1626 // Check if the {node} has uses in {block}.
1627 for (Edge edge : node->use_edges()) {
1628 if (!scheduler_->IsLive(edge.from())) continue;
1629 BasicBlock* use_block = GetBlockForUse(edge);
1630 if (use_block == nullptr || IsMarked(use_block)) continue;
1631 if (use_block == block) {
1632 TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1633 node->op()->mnemonic(), block->id().ToInt());
1634 marking_queue_.clear();
1635 return block;
1636 }
1637 MarkBlock(use_block);
1638 }
1639
1640 // Compute transitive marking closure; a block is marked if all its
1641 // successors are marked.
1642 do {
1643 BasicBlock* top_block = marking_queue_.front();
1644 marking_queue_.pop_front();
1645 if (IsMarked(top_block)) continue;
1646 bool marked = true;
1647 if (top_block->loop_depth() == block->loop_depth()) {
1648 for (BasicBlock* successor : top_block->successors()) {
1649 if (!IsMarked(successor)) {
1650 marked = false;
1651 break;
1652 }
1653 }
1654 }
1655 if (marked) MarkBlock(top_block);
1656 } while (!marking_queue_.empty());
1657
1658 // If the (common dominator) {block} is marked, we know that all paths from
1659 // {block} to the end contain at least one use of {node}, and hence there's
1660 // no point in splitting the {node} in this case.
1661 if (IsMarked(block)) {
1662 TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1663 node->id(), node->op()->mnemonic(), block->id().ToInt());
1664 return block;
1665 }
1666
1667 // Split {node} for uses according to the previously computed marking
1668 // closure. Every marking partition has a unique dominator, which get's a
1669 // copy of the {node} with the exception of the first partition, which get's
1670 // the {node} itself.
1672 for (Edge edge : node->use_edges()) {
1673 if (!scheduler_->IsLive(edge.from())) continue;
1674 BasicBlock* use_block = GetBlockForUse(edge);
1675 if (use_block == nullptr) continue;
1676 while (IsMarked(use_block->dominator())) {
1677 use_block = use_block->dominator();
1678 }
1679 auto& use_node = dominators[use_block];
1680 if (use_node == nullptr) {
1681 if (dominators.size() == 1u) {
1682 // Place the {node} at {use_block}.
1683 block = use_block;
1684 use_node = node;
1685 TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
1686 node->op()->mnemonic(), block->id().ToInt());
1687 } else {
1688 // Place a copy of {node} at {use_block}.
1689 use_node = CloneNode(node);
1690 TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
1691 use_node->op()->mnemonic(), use_block->id().ToInt());
1692 scheduler_->schedule_queue_.push(use_node);
1693 }
1694 }
1695 edge.UpdateTo(use_node);
1696 }
1697 return block;
1698 }
1699
1701 if (!scheduler_->special_rpo_->HasLoopBlocks()) return nullptr;
1702 if (block->IsLoopHeader()) return block->dominator();
1703 // We have to check to make sure that the {block} dominates all
1704 // of the outgoing blocks. If it doesn't, then there is a path
1705 // out of the loop which does not execute this {block}, so we
1706 // can't hoist operations from this {block} out of the loop, as
1707 // that would introduce additional computations.
1708 if (BasicBlock* header_block = block->loop_header()) {
1709 for (BasicBlock* outgoing_block :
1710 scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1711 if (scheduler_->GetCommonDominator(block, outgoing_block) != block) {
1712 return nullptr;
1713 }
1714 }
1715 return header_block->dominator();
1716 }
1717 return nullptr;
1718 }
1719
1721 BasicBlock* block = nullptr;
1722 for (Edge edge : node->use_edges()) {
1723 if (!scheduler_->IsLive(edge.from())) continue;
1724 BasicBlock* use_block = GetBlockForUse(edge);
1725 block = block == nullptr
1726 ? use_block
1727 : use_block == nullptr
1728 ? block
1729 : scheduler_->GetCommonDominator(block, use_block);
1730 }
1731 return block;
1732 }
1733
1737
1739 // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
1740 // Dead uses only occur if the graph is not trimmed before scheduling.
1741 Node* use = edge.from();
1742 if (IrOpcode::IsPhiOpcode(use->opcode())) {
1743 // If the use is from a coupled (i.e. floating) phi, compute the common
1744 // dominator of its uses. This will not recurse more than one level.
1746 TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
1747 use->op()->mnemonic());
1748 // TODO(titzer): reenable once above TODO is addressed.
1749 // DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1750 return GetCommonDominatorOfUses(use);
1751 }
1752 // If the use is from a fixed (i.e. non-floating) phi, we use the
1753 // predecessor block of the corresponding control input to the merge.
1755 TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1756 use->op()->mnemonic());
1757 Node* merge = NodeProperties::GetControlInput(use, 0);
1759 Node* input = NodeProperties::GetControlInput(merge, edge.index());
1760 return FindPredecessorBlock(input);
1761 }
1762 } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1763 // If the use is from a fixed (i.e. non-floating) merge, we use the
1764 // predecessor block of the current input to the merge.
1766 TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1767 use->op()->mnemonic());
1768 return FindPredecessorBlock(edge.to());
1769 }
1770 }
1772 if (result == nullptr) return nullptr;
1773 TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
1774 use->op()->mnemonic(), result->id().ToInt());
1775 return result;
1776 }
1777
1779 scheduler_->FuseFloatingControl(block, node);
1780 }
1781
1782 void ScheduleRegion(BasicBlock* block, Node* region_end) {
1783 // We only allow regions of instructions connected into a linear
1784 // effect chain. The only value allowed to be produced by a node
1785 // in the chain must be the value consumed by the FinishRegion node.
1786
1787 // We schedule back to front; we first schedule FinishRegion.
1788 CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1789 ScheduleNode(block, region_end);
1790
1791 // Schedule the chain.
1792 Node* node = NodeProperties::GetEffectInput(region_end);
1793 while (node->opcode() != IrOpcode::kBeginRegion) {
1795 DCHECK_EQ(1, node->op()->EffectInputCount());
1796 DCHECK_EQ(1, node->op()->EffectOutputCount());
1797 DCHECK_EQ(0, node->op()->ControlOutputCount());
1798 // The value output (if there is any) must be consumed
1799 // by the EndRegion node.
1800 DCHECK(node->op()->ValueOutputCount() == 0 ||
1801 node == region_end->InputAt(0));
1802 ScheduleNode(block, node);
1803 node = NodeProperties::GetEffectInput(node);
1804 }
1805 // Schedule the BeginRegion node.
1807 ScheduleNode(block, node);
1808 }
1809
1810 void ScheduleNode(BasicBlock* block, Node* node) {
1811 schedule_->PlanNode(block, node);
1812 size_t block_id = block->id().ToSize();
1813 if (!scheduler_->scheduled_nodes_[block_id]) {
1815 }
1816 scheduler_->scheduled_nodes_[block_id]->push_back(node);
1818 }
1819
1821 int const input_count = node->InputCount();
1822 std::optional<int> coupled_control_edge =
1824 for (int index = 0; index < input_count; ++index) {
1825 if (index != coupled_control_edge) {
1826 Node* const input = node->InputAt(index);
1828 }
1829 }
1830 Node* const copy = scheduler_->graph_->CloneNode(node);
1831 TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1832 copy->id());
1833 scheduler_->node_data_.resize(copy->id() + 1,
1835 scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1836 return copy;
1837 }
1838
1844};
1845
1846
1848 TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1849 if (v8_flags.trace_turbo_scheduler) {
1850 TRACE("roots: ");
1851 for (Node* node : schedule_root_nodes_) {
1852 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1853 }
1854 TRACE("\n");
1855 }
1856
1857 // Schedule: Places nodes in dominator block of all their uses.
1858 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1859 schedule_late_visitor.Run(&schedule_root_nodes_);
1860}
1861
1862
1863// -----------------------------------------------------------------------------
1864// Phase 6: Seal the final schedule.
1865
1866
1868 TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1869
1870 // Serialize the assembly order and reverse-post-order numbering.
1873
1874 // Add collected nodes for basic blocks to their blocks in the right order.
1875 int block_num = 0;
1876 for (NodeVector* nodes : scheduled_nodes_) {
1877 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1878 BasicBlock* block = schedule_->GetBlockById(id);
1879 if (nodes) {
1880 for (Node* node : base::Reversed(*nodes)) {
1881 schedule_->AddNode(block, node);
1882 }
1883 }
1884 }
1885#ifdef LOG_BUILTIN_BLOCK_COUNT
1886 if (const ProfileDataFromFile* profile_data = this->profile_data()) {
1887 for (BasicBlock* block : *schedule_->all_blocks()) {
1888 uint64_t executed_count =
1889 profile_data->GetExecutedCount(block->id().ToSize());
1890 block->set_pgo_execution_count(executed_count);
1891 }
1892 }
1893#endif
1894}
1895
1896
1897// -----------------------------------------------------------------------------
1898
1899
1901 TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1902 if (v8_flags.trace_turbo_scheduler) {
1903 StdoutStream{} << "Schedule before control flow fusion:\n" << *schedule_;
1904 }
1905
1906 // Iterate on phase 1: Build control-flow graph.
1907 control_flow_builder_->Run(block, node);
1908
1909 // Iterate on phase 2: Compute special RPO and dominator tree.
1911 // TODO(turbofan): Currently "iterate on" means "re-run". Fix that.
1912 for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
1913 b->set_dominator_depth(-1);
1914 b->set_dominator(nullptr);
1915 }
1916 PropagateImmediateDominators(block->rpo_next());
1917
1918 // Iterate on phase 4: Schedule nodes early.
1919 // TODO(turbofan): The following loop gathering the propagation roots is a
1920 // temporary solution and should be merged into the rest of the scheduler as
1921 // soon as the approach settled for all floating loops.
1922 NodeVector propagation_roots(control_flow_builder_->control_);
1923 for (Node* control : control_flow_builder_->control_) {
1924 for (Node* use : control->uses()) {
1925 if (NodeProperties::IsPhi(use) && IsLive(use)) {
1926 propagation_roots.push_back(use);
1927 }
1928 }
1929 }
1930 if (v8_flags.trace_turbo_scheduler) {
1931 TRACE("propagation roots: ");
1932 for (Node* r : propagation_roots) {
1933 TRACE("#%d:%s ", r->id(), r->op()->mnemonic());
1934 }
1935 TRACE("\n");
1936 }
1937 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1938 schedule_early_visitor.Run(&propagation_roots);
1939
1940 // Move previously planned nodes.
1941 // TODO(turbofan): Improve that by supporting bulk moves.
1943 MovePlannedNodes(block, schedule_->block(node));
1944
1945 if (v8_flags.trace_turbo_scheduler) {
1946 StdoutStream{} << "Schedule after control flow fusion:\n" << *schedule_;
1947 }
1948}
1949
1950
1952 TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1953 to->id().ToInt());
1954 NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
1955 NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
1956 if (!from_nodes) return;
1957
1958 for (Node* const node : *from_nodes) {
1959 schedule_->SetBlockForNode(to, node);
1960 }
1961 if (to_nodes) {
1962 to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
1963 from_nodes->clear();
1964 } else {
1965 std::swap(scheduled_nodes_[from->id().ToSize()],
1966 scheduled_nodes_[to->id().ToSize()]);
1967 }
1968}
1969
1970#undef TRACE
1971
1972} // namespace compiler
1973} // namespace internal
1974} // namespace v8
Schedule * schedule
bool Contains(int i) const
Definition bit-vector.h:180
void Resize(int new_length, Zone *zone)
Definition bit-vector.h:161
T * insert(const T *pos, It first, It last)
void push_back(const T &value)
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
void set_rpo_next(BasicBlock *rpo_next)
Definition schedule.h:142
void set_deferred(bool deferred)
Definition schedule.h:133
BasicBlock * SuccessorAt(size_t index)
Definition schedule.h:85
BasicBlock * dominator() const
Definition schedule.h:138
void set_dominator(BasicBlock *dominator)
Definition schedule.h:139
BasicBlock * PredecessorAt(size_t index)
Definition schedule.h:76
BasicBlock * loop_header() const
Definition schedule.h:144
BasicBlockVector & successors()
Definition schedule.h:82
BasicBlockVector & predecessors()
Definition schedule.h:73
BasicBlock * loop_end() const
Definition schedule.h:147
void set_rpo_number(int32_t rpo_number)
Definition schedule.cc:75
static BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
Definition schedule.cc:99
void set_loop_end(BasicBlock *loop_end)
Definition schedule.cc:79
BasicBlock * rpo_next() const
Definition schedule.h:141
BasicBlock * FindPredecessorBlock(Node *node)
Definition scheduler.cc:443
void TraceConnect(Node *node, BasicBlock *block, BasicBlock *succ)
Definition scheduler.cc:588
CFGBuilder(Zone *zone, Scheduler *scheduler)
Definition scheduler.cc:241
void Run(BasicBlock *block, Node *exit)
Definition scheduler.cc:277
BasicBlock * BuildBlockForNode(Node *node)
Definition scheduler.cc:414
void CollectSuccessorBlocks(Node *node, BasicBlock **successor_blocks, size_t successor_cnt)
Definition scheduler.cc:434
bool IsSingleEntrySingleExitRegion(Node *entry, Node *exit) const
Definition scheduler.cc:604
void FixNode(BasicBlock *block, Node *node)
Definition scheduler.cc:315
void ConnectDeoptimize(Node *deopt)
Definition scheduler.cc:574
void BuildBlocksForSuccessors(Node *node)
Definition scheduler.cc:425
Node * from() const
Definition node.h:425
Node * to() const
Definition node.h:426
static bool IsMergeOpcode(Value value)
Definition opcodes.h:1404
static bool IsPhiOpcode(Value value)
Definition opcodes.h:1400
V8_INLINE void Set(Node *node, State state)
Definition node-marker.h:69
V8_INLINE State Get(const Node *node)
Definition node-marker.h:65
static Node * GetEffectInput(Node *node, int index=0)
static void CollectControlProjections(Node *node, Node **proj, size_t count)
static bool IsExceptionalCall(Node *node, Node **out_exception=nullptr)
static Node * GetControlInput(Node *node, int index=0)
constexpr IrOpcode::Value opcode() const
Definition node.h:52
Inputs inputs() const
Definition node.h:478
const Operator * op() const
Definition node.h:50
Node * InputAt(int index) const
Definition node.h:70
NodeId id() const
Definition node.h:57
PrepareUsesVisitor(Scheduler *scheduler, TFGraph *graph, Zone *zone)
ScheduleEarlyNodeVisitor(Zone *zone, Scheduler *scheduler)
void PropagateMinimumPositionToNode(BasicBlock *block, Node *node)
BasicBlock * GetCommonDominatorOfUses(Node *node)
ScheduleLateNodeVisitor(Zone *zone, Scheduler *scheduler)
void ScheduleRegion(BasicBlock *block, Node *region_end)
BasicBlock * SplitNode(BasicBlock *block, Node *node)
void ScheduleNode(BasicBlock *block, Node *node)
void ScheduleFloatingControl(BasicBlock *block, Node *node)
BasicBlock * GetHoistBlock(BasicBlock *block)
BasicBlockVector * rpo_order()
Definition schedule.h:280
void SetBlockForNode(BasicBlock *block, Node *node)
Definition schedule.cc:456
void AddDeoptimize(BasicBlock *block, Node *input)
Definition schedule.cc:293
void AddThrow(BasicBlock *block, Node *input)
Definition schedule.cc:300
const BasicBlockVector * all_blocks() const
Definition schedule.h:279
void PlanNode(BasicBlock *block, Node *node)
Definition schedule.cc:203
void AddGoto(BasicBlock *block, BasicBlock *succ)
Definition schedule.cc:223
void InsertSwitch(BasicBlock *block, BasicBlock *end, Node *sw, BasicBlock **succ_blocks, size_t succ_count)
Definition schedule.cc:322
void AddTailCall(BasicBlock *block, Node *input)
Definition schedule.cc:279
void AddReturn(BasicBlock *block, Node *input)
Definition schedule.cc:286
void AddCall(BasicBlock *block, Node *call, BasicBlock *success_block, BasicBlock *exception_block)
Definition schedule.cc:248
BasicBlock * GetBlockById(BasicBlock::Id block_id)
Definition schedule.cc:181
void InsertBranch(BasicBlock *block, BasicBlock *end, Node *branch, BasicBlock *tblock, BasicBlock *fblock)
Definition schedule.cc:307
void AddBranch(BasicBlock *block, Node *branch, BasicBlock *tblock, BasicBlock *fblock)
Definition schedule.cc:258
void AddNode(BasicBlock *block, Node *node)
Definition schedule.cc:213
BasicBlock * block(Node *node) const
Definition schedule.cc:169
void AddSwitch(BasicBlock *block, Node *sw, BasicBlock **succ_blocks, size_t succ_count)
Definition schedule.cc:268
Scheduler(Zone *zone, TFGraph *graph, Schedule *schedule, Flags flags, size_t node_count_hint_, TickCounter *tick_counter, const ProfileDataFromFile *profile_data)
Definition scheduler.cc:31
BasicBlock * GetCommonDominatorIfCached(BasicBlock *b1, BasicBlock *b2)
SpecialRPONumberer * special_rpo_
Definition scheduler.h:91
const ProfileDataFromFile * profile_data() const
Definition scheduler.h:49
void UpdatePlacement(Node *node, Placement placement)
Definition scheduler.cc:125
SchedulerData * GetData(Node *node)
Definition scheduler.cc:85
ControlEquivalence * equivalence_
Definition scheduler.h:92
void MovePlannedNodes(BasicBlock *from, BasicBlock *to)
static void PropagateImmediateDominators(BasicBlock *block)
CommonDominatorCache common_dominator_cache_
Definition scheduler.h:95
ZoneQueue< Node * > schedule_queue_
Definition scheduler.h:88
ZoneVector< NodeVector * > scheduled_nodes_
Definition scheduler.h:86
std::optional< int > GetCoupledControlEdge(Node *node)
Definition scheduler.cc:181
void FuseFloatingControl(BasicBlock *block, Node *node)
Placement GetPlacement(Node *node)
Definition scheduler.cc:119
SchedulerData DefaultSchedulerData()
Definition scheduler.cc:79
static BasicBlockVector * ComputeSpecialRPO(Zone *zone, Schedule *schedule)
Placement InitializePlacement(Node *node)
Definition scheduler.cc:89
void DecrementUnscheduledUseCount(Node *node, Node *from)
Definition scheduler.cc:207
ZoneVector< SchedulerData > node_data_
Definition scheduler.h:89
BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
TickCounter *const tick_counter_
Definition scheduler.h:93
static Schedule * ComputeSchedule(Zone *temp_zone, TFGraph *graph, Flags flags, TickCounter *tick_counter, const ProfileDataFromFile *profile_data)
Definition scheduler.cc:50
void IncrementUnscheduledUseCount(Node *node, Node *from)
Definition scheduler.cc:188
ZoneVector< SpecialRPOStackFrame > stack_
int Push(int depth, BasicBlock *child, int unvisited)
Definition scheduler.cc:750
static bool HasLoopNumber(BasicBlock *block)
Definition scheduler.cc:769
std::pair< BasicBlock *, size_t > Backedge
Definition scheduler.cc:720
void ComputeLoopInfo(ZoneVector< SpecialRPOStackFrame > *queue, size_t num_loops, ZoneVector< Backedge > *backedges)
Definition scheduler.cc:979
BasicBlock * PushFront(BasicBlock *head, BasicBlock *block)
Definition scheduler.cc:760
static int GetLoopNumber(BasicBlock *block)
Definition scheduler.cc:765
ZoneVector< BasicBlock * > const empty_
void UpdateSpecialRPO(BasicBlock *entry, BasicBlock *end)
Definition scheduler.cc:685
SpecialRPONumberer(Zone *zone, Schedule *schedule)
Definition scheduler.cc:663
void ComputeAndInsertSpecialRPO(BasicBlock *entry, BasicBlock *end)
Definition scheduler.cc:785
const ZoneVector< BasicBlock * > & GetOutgoingBlocks(BasicBlock *block)
Definition scheduler.cc:709
static void SetLoopNumber(BasicBlock *block, int loop_number)
Definition scheduler.cc:766
Node * CloneNode(const Node *node)
Zone * zone_
JSRegExp::Flags flags_
int end
LineAndColumn current
Node * node
RpoNumber block
ZoneVector< RpoNumber > & result
#define TRACE(...)
Schedule const *const schedule_
int r
Definition mul-fft.cc:298
auto Reversed(T &t)
Definition iterator.h:105
BranchHint BranchHintOf(const Operator *const op)
ZoneVector< BasicBlock * > BasicBlockVector
Definition schedule.h:23
V8_EXPORT_PRIVATE FlagValues v8_flags
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation nullptr
Definition flags.cc:1263
#define CONTROL_OP_LIST(V)
Definition opcodes.h:13
#define JS_OP_LIST(V)
Definition opcodes.h:257
#define CONNECT_BLOCK_JS_CASE(Name,...)
#define DEFINE_CONTROL_CASE(V)
#define BUILD_BLOCK_JS_CASE(Name,...)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define arraysize(array)
Definition macros.h:67
void AddOutgoing(Zone *zone, BasicBlock *block)
Definition scheduler.cc:742
#define V8_LIKELY(condition)
Definition v8config.h:661
TFGraph * graph_