28 if (v8_flags.trace_turbo_scheduler) PrintF(__VA_ARGS__); \
32 Flags flags,
size_t node_count_hint,
39 scheduled_nodes_(zone),
40 schedule_root_nodes_(zone),
41 schedule_queue_(zone),
43 tick_counter_(tick_counter),
44 profile_data_(profile_data),
45 common_dominator_cache_(zone) {
59 size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
62 schedule_zone->
New<
Schedule>(schedule_zone, node_count_hint);
67 scheduler.ComputeSpecialRPONumbering();
68 scheduler.GenerateDominatorTree();
70 scheduler.PrepareUses();
71 scheduler.ScheduleEarly();
72 scheduler.ScheduleLate();
74 scheduler.SealFinalSchedule();
91 if (data->placement_ ==
kFixed) {
94 return data->placement_;
97 switch (node->opcode()) {
98 case IrOpcode::kParameter:
99 case IrOpcode::kOsrValue:
101 data->placement_ =
kFixed;
104 case IrOpcode::kEffectPhi: {
116 return data->placement_;
132 data->placement_ = placement;
136 switch (node->opcode()) {
137 case IrOpcode::kParameter:
141 case IrOpcode::kEffectPhi: {
150#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
152#undef DEFINE_CONTROL_CASE
155 for (
auto use : node->uses()) {
172 for (
Edge const edge : node->input_edges()) {
174 if (edge.index() != coupled_control_edge) {
178 data->placement_ = placement;
200 if (
v8_flags.trace_turbo_scheduler) {
201 TRACE(
" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
202 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
203 GetData(node)->unscheduled_count_);
220 if (
v8_flags.trace_turbo_scheduler) {
221 TRACE(
" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
222 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
223 GetData(node)->unscheduled_count_);
225 if (
GetData(node)->unscheduled_count_ == 0) {
226 TRACE(
" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
293 TRACE(
"Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
331 switch (node->opcode()) {
335 case IrOpcode::kStart:
338 case IrOpcode::kLoop:
339 case IrOpcode::kMerge:
342 case IrOpcode::kTerminate: {
349 case IrOpcode::kBranch:
350 case IrOpcode::kSwitch:
353#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
356#undef BUILD_BLOCK_JS_CASE
357 case IrOpcode::kCall:
358 case IrOpcode::kFastApiCall:
369 switch (node->opcode()) {
370 case IrOpcode::kLoop:
371 case IrOpcode::kMerge:
374 case IrOpcode::kBranch:
378 case IrOpcode::kSwitch:
382 case IrOpcode::kDeoptimize:
386 case IrOpcode::kTailCall:
390 case IrOpcode::kReturn:
394 case IrOpcode::kThrow:
398#define CONNECT_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
401#undef CONNECT_BLOCK_JS_CASE
402 case IrOpcode::kCall:
403 case IrOpcode::kFastApiCall:
416 if (block ==
nullptr) {
418 TRACE(
"Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
419 node->op()->mnemonic());
426 size_t const successor_cnt = node->op()->ControlOutputCount();
429 for (
size_t index = 0; index < successor_cnt; ++
index) {
435 size_t successor_cnt) {
436 Node** successors =
reinterpret_cast<Node**
>(successor_blocks);
438 for (
size_t index = 0; index < successor_cnt; ++
index) {
447 if (predecessor_block !=
nullptr)
break;
450 return predecessor_block;
465 successor_blocks[1]);
476 profile_data->GetHint(successor_blocks[0]->
id().ToSize(),
477 successor_blocks[1]->
id().ToSize());
481 switch (hint_from_profile) {
506 successor_blocks[0], successor_blocks[1]);
510 TraceConnect(branch, branch_block, successor_blocks[0]);
511 TraceConnect(branch, branch_block, successor_blocks[1]);
513 successor_blocks[1]);
518 size_t const successor_count = sw->
op()->ControlOutputCount();
524 for (
size_t index = 0; index < successor_count; ++
index) {
528 successor_blocks, successor_count);
532 for (
size_t index = 0; index < successor_count; ++
index) {
533 TraceConnect(sw, switch_block, successor_blocks[index]);
537 for (
size_t index = 0; index < successor_count; ++
index) {
538 if (
BranchHintOf(successor_blocks[index]->front()->op()) ==
590 if (succ ==
nullptr) {
591 TRACE(
"Connect #%d:%s, id:%d -> end\n", node->id(),
592 node->op()->mnemonic(), block->id().ToInt());
594 TRACE(
"Connect #%d:%s, id:%d -> id:%d\n", node->id(),
595 node->op()->mnemonic(), block->id().ToInt(), succ->
id().
ToInt());
600 return (node->opcode() == IrOpcode::kMerge &&
607 return entry != exit && entry_class == exit_class;
629 TRACE(
"--- CREATING CFG -------------------------------------------\n");
695 b->set_rpo_number(number++);
704 if (
v8_flags.trace_turbo_scheduler) PrintRPO();
752 stack_[depth].block = child;
767 return block->set_loop_number(loop_number);
770 return block->loop_number() >= 0;
801 int num_loops =
static_cast<int>(
loops_.size());
803 while (stack_depth > 0) {
804 int current = stack_depth - 1;
833 if (num_loops >
static_cast<int>(
loops_.size())) {
841 order = insertion_point;
848 while (stack_depth > 0) {
853 if (block !=
end && frame->
index < block->SuccessorCount()) {
873 size_t outgoing_index = frame->
index - block->SuccessorCount();
876 if (block != entry && info->outgoing !=
nullptr &&
877 outgoing_index < info->outgoing->
size()) {
878 succ = info->outgoing->at(outgoing_index);
883 if (succ !=
nullptr) {
910 if (b->rpo_next() == info->end) {
911 b->set_rpo_next(order);
942 while (current_header !=
nullptr &&
943 current == current_header->
loop_end()) {
946 current_loop = current_loop->
prev;
951 current->set_loop_header(current_header);
960 current_header = current_loop->
header;
961 TRACE(
"id:%d is a loop header, increment loop depth to %d\n",
962 current->id().ToInt(), loop_depth);
965 current->set_loop_depth(loop_depth);
967 if (current->loop_header() ==
nullptr) {
968 TRACE(
"id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
969 current->loop_depth());
971 TRACE(
"id:%d has loop header id:%d, (depth == %d)\n",
972 current->id().ToInt(), current->loop_header()->id().ToInt(),
973 current->loop_depth());
992 for (
size_t i = 0;
i < backedges->
size();
i++) {
996 if (
loops_[loop_num].header ==
nullptr) {
997 loops_[loop_num].header = header;
1002 int queue_length = 0;
1003 if (member != header) {
1006 if (!
loops_[loop_num].members->Contains(member->
id().
ToInt())) {
1009 (*queue)[queue_length++].block = member;
1014 while (queue_length > 0) {
1015 BasicBlock* block = (*queue)[--queue_length].block;
1016 for (
size_t j = 0; j < block->PredecessorCount(); j++) {
1018 if (pred != header) {
1019 if (!
loops_[loop_num].members->Contains(pred->
id().
ToInt())) {
1021 (*queue)[queue_length++].block = pred;
1032 os <<
"RPO with " <<
loops_.size() <<
" loops";
1036 if (
i > 0) os <<
" ";
1037 os <<
"id:" <<
loops_[
i].header->id();
1043 for (BasicBlock* block =
order_; block !=
nullptr;
1045 os << std::setw(5) <<
"B" << block->rpo_number() <<
":";
1047 bool range =
loops_[
i].header->LoopContains(block);
1048 bool membership =
loops_[
i].header != block && range;
1049 os << (membership ?
" |" :
" ");
1050 os << (range ?
"x" :
" ");
1052 os <<
" id:" << block->id() <<
": ";
1053 if (block->loop_end() !=
nullptr) {
1054 os <<
" range: [B" << block->rpo_number() <<
", B"
1055 << block->loop_end()->rpo_number() <<
")";
1057 if (block->loop_header() !=
nullptr) {
1058 os <<
" header: id:" << block->loop_header()->id();
1060 if (block->loop_depth() > 0) {
1061 os <<
" depth: " << block->loop_depth();
1067 void VerifySpecialRPO() {
1070 DCHECK_EQ(0, (*order)[0]->
id().ToInt());
1074 BasicBlock* header = loop->header;
1075 BasicBlock*
end = header->loop_end();
1079 DCHECK_LT(header->rpo_number(), order->size());
1083 DCHECK_NE(header->loop_header(), header);
1087 BasicBlock* block = loop->start;
1091 if (block ==
nullptr || block == loop->end) {
1092 end_found = (loop->end ==
block);
1096 DCHECK(block->rpo_number() == links + header->rpo_number());
1098 block = block->rpo_next();
1099 DCHECK_LT(links,
static_cast<int>(2 * order->size()));
1102 DCHECK_EQ(links,
end->rpo_number() - header->rpo_number());
1107 for (LoopInfo* outer = loop; outer !=
nullptr; outer = outer->prev) {
1110 DCHECK_EQ(loop_depth, header->loop_depth());
1114 for (
int j = 0; j < static_cast<int>(order->size()); j++) {
1115 block = order->at(j);
1117 if (j < header->rpo_number() || j >=
end->rpo_number()) {
1118 DCHECK(!header->LoopContains(block));
1120 DCHECK(header->LoopContains(block));
1121 DCHECK_GE(block->loop_depth(), loop_depth);
1152 TRACE(
"--- COMPUTING SPECIAL RPO ----------------------------------\n");
1163 auto entry2 = entry1->second->find(b2->
id().
ToInt());
1164 if (entry2 == entry1->second->end())
return nullptr;
1165 return entry2->second;
1170 if (b1 == b2)
return b1;
1173 constexpr int kCacheGranularity = 63;
1174 static_assert((kCacheGranularity & (kCacheGranularity + 1)) == 0);
1176 if (depth_difference > -kCacheGranularity &&
1177 depth_difference < kCacheGranularity) {
1178 for (
int i = 0;
i < kCacheGranularity;
i++) {
1184 if (b1 == b2)
return b1;
1199 if (b1 == b2)
return b1;
1204 constexpr int kMaxNewCacheEntries = 2 * 50;
1208 int new_cache_entries[kMaxNewCacheEntries];
1210 int new_cache_entries_cursor = 0;
1214 if (maybe_cache_hit !=
nullptr) {
1215 b1 = b2 = maybe_cache_hit;
1217 }
else if (new_cache_entries_cursor < kMaxNewCacheEntries) {
1218 new_cache_entries[new_cache_entries_cursor++] = b1->
id().
ToInt();
1219 new_cache_entries[new_cache_entries_cursor++] = b2->
id().
ToInt();
1230 for (
int i = 0;
i < new_cache_entries_cursor;) {
1231 int id1 = new_cache_entries[
i++];
1232 int id2 = new_cache_entries[
i++];
1239 mapping = entry->second;
1242 DCHECK_EQ(mapping->find(id2), mapping->end());
1243 mapping->insert({id2,
result});
1249 for (; block !=
nullptr; block = block->rpo_next()) {
1250 auto pred = block->predecessors().begin();
1251 auto end = block->predecessors().end();
1254 bool deferred = dominator->
deferred();
1262 for (++pred; pred !=
end; ++pred) {
1264 if ((*pred)->dominator_depth() < 0)
continue;
1265 if ((*pred)->dominator_depth() > 3 &&
1266 ((*pred)->dominator()->dominator() == cache ||
1267 (*pred)->dominator()->dominator()->dominator() == cache)) {
1273 cache = (*pred)->dominator();
1274 deferred = deferred & (*pred)->deferred();
1278 block->set_deferred(deferred | block->deferred());
1279 TRACE(
"Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1280 dominator->
id().
ToInt(), block->dominator_depth());
1286 schedule->start()->set_dominator_depth(0);
1293 TRACE(
"--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1312 while (!
stack_.empty()) {
1321 TRACE(
"Pre #%d:%s\n", node->id(), node->op()->mnemonic());
1328 TRACE(
"Scheduling fixed position node #%d:%s\n", node->id(),
1329 node->op()->mnemonic());
1332 opcode == IrOpcode::kParameter
1346 std::optional<int> coupled_control_edge =
1348 for (
auto edge : node->input_edges()) {
1349 Node* to = edge.to();
1354 TRACE(
"PostEdge #%d:%s->#%d:%s\n", node->id(), node->op()->mnemonic(),
1355 to->id(), to->op()->mnemonic());
1357 if (!is_scheduled && edge.index() != coupled_control_edge) {
1374 TRACE(
"--- PREPARE USES -------------------------------------------\n");
1394 for (
Node*
const root : *roots) {
1398 while (!
queue_.empty()) {
1414 TRACE(
"Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1415 node->id(), node->op()->mnemonic(),
1416 data->minimum_block_->id().ToInt(),
1417 data->minimum_block_->dominator_depth());
1425 for (
auto use : node->uses()) {
1450 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1451 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1452 data->minimum_block_ =
block;
1454 TRACE(
"Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1455 node->id(), node->op()->mnemonic(),
1456 data->minimum_block_->id().ToInt(),
1457 data->minimum_block_->dominator_depth());
1464 return dominator == b1 || dominator == b2;
1476 TRACE(
"--- NO LOOPS SO SKIPPING SCHEDULE EARLY --------------------\n");
1480 TRACE(
"--- SCHEDULE EARLY -----------------------------------------\n");
1481 if (
v8_flags.trace_turbo_scheduler) {
1484 TRACE(
"#%d:%s ", node->id(), node->op()->mnemonic());
1510 for (
Node*
const root : *roots) {
1530 Node*
const n = queue->front();
1533 }
while (!queue->empty());
1549 TRACE(
"Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1558 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1559 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1560 block->loop_depth(), min_block->
id().
ToInt());
1570 TRACE(
" hoisting #%d:%s to block id:%d\n", node->id(),
1571 node->op()->mnemonic(), hoist_block->
id().
ToInt());
1573 block = hoist_block;
1575 }
while (hoist_block &&
1585 }
else if (node->opcode() == IrOpcode::kFinishRegion) {
1602 if (
IsMarked(pred_block))
continue;
1611 if (node->opcode() == IrOpcode::kProjection)
return block;
1616 if (block->SuccessorCount() < 2)
return block;
1627 for (
Edge edge : node->use_edges()) {
1630 if (use_block ==
nullptr ||
IsMarked(use_block))
continue;
1631 if (use_block == block) {
1632 TRACE(
" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1633 node->op()->mnemonic(), block->id().ToInt());
1647 if (top_block->
loop_depth() == block->loop_depth()) {
1662 TRACE(
" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1663 node->id(), node->op()->mnemonic(), block->id().ToInt());
1672 for (
Edge edge : node->use_edges()) {
1675 if (use_block ==
nullptr)
continue;
1679 auto& use_node = dominators[use_block];
1680 if (use_node ==
nullptr) {
1681 if (dominators.size() == 1u) {
1685 TRACE(
" pushing #%d:%s down to id:%d\n", node->id(),
1686 node->op()->mnemonic(), block->id().ToInt());
1690 TRACE(
" cloning #%d:%s for id:%d\n", use_node->id(),
1691 use_node->op()->mnemonic(), use_block->
id().
ToInt());
1695 edge.UpdateTo(use_node);
1702 if (block->IsLoopHeader())
return block->
dominator();
1715 return header_block->dominator();
1722 for (
Edge edge : node->use_edges()) {
1725 block = block ==
nullptr
1727 : use_block ==
nullptr
1746 TRACE(
" inspecting uses of coupled #%d:%s\n", use->id(),
1747 use->op()->mnemonic());
1755 TRACE(
" input@%d into a fixed phi #%d:%s\n", edge.
index(), use->id(),
1756 use->op()->mnemonic());
1766 TRACE(
" input@%d into a fixed merge #%d:%s\n", edge.
index(), use->id(),
1767 use->op()->mnemonic());
1772 if (
result ==
nullptr)
return nullptr;
1773 TRACE(
" must dominate use #%d:%s in id:%d\n", use->id(),
1774 use->op()->mnemonic(),
result->id().ToInt());
1793 while (node->opcode() != IrOpcode::kBeginRegion) {
1795 DCHECK_EQ(1, node->op()->EffectInputCount());
1796 DCHECK_EQ(1, node->op()->EffectOutputCount());
1797 DCHECK_EQ(0, node->op()->ControlOutputCount());
1800 DCHECK(node->op()->ValueOutputCount() == 0 ||
1801 node == region_end->
InputAt(0));
1812 size_t block_id = block->id().ToSize();
1821 int const input_count = node->InputCount();
1822 std::optional<int> coupled_control_edge =
1824 for (
int index = 0; index < input_count; ++
index) {
1825 if (index != coupled_control_edge) {
1831 TRACE((
"clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1848 TRACE(
"--- SCHEDULE LATE ------------------------------------------\n");
1849 if (
v8_flags.trace_turbo_scheduler) {
1852 TRACE(
"#%d:%s ", node->id(), node->op()->mnemonic());
1868 TRACE(
"--- SEAL FINAL SCHEDULE ------------------------------------\n");
1885#ifdef LOG_BUILTIN_BLOCK_COUNT
1888 uint64_t executed_count =
1890 block->set_pgo_execution_count(executed_count);
1901 TRACE(
"--- FUSE FLOATING CONTROL ----------------------------------\n");
1902 if (
v8_flags.trace_turbo_scheduler) {
1913 b->set_dominator_depth(-1);
1914 b->set_dominator(
nullptr);
1924 for (
Node* use : control->
uses()) {
1930 if (
v8_flags.trace_turbo_scheduler) {
1931 TRACE(
"propagation roots: ");
1932 for (
Node*
r : propagation_roots) {
1933 TRACE(
"#%d:%s ",
r->id(),
r->op()->mnemonic());
1938 schedule_early_visitor.
Run(&propagation_roots);
1945 if (
v8_flags.trace_turbo_scheduler) {
1952 TRACE(
"Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1956 if (!from_nodes)
return;
1958 for (
Node*
const node : *from_nodes) {
1963 from_nodes->
clear();
bool Contains(int i) const
void Resize(int new_length, Zone *zone)
void TickAndMaybeEnterSafepoint()
T * insert(const T *pos, It first, It last)
void push_back(const T &value)
T * AllocateArray(size_t length)
static Id FromInt(int index)
void set_rpo_next(BasicBlock *rpo_next)
void set_deferred(bool deferred)
BasicBlock * SuccessorAt(size_t index)
int32_t rpo_number() const
BasicBlock * dominator() const
void set_dominator(BasicBlock *dominator)
BasicBlock * PredecessorAt(size_t index)
BasicBlock * loop_header() const
int32_t loop_number() const
size_t SuccessorCount() const
BasicBlockVector & successors()
int32_t loop_depth() const
BasicBlockVector & predecessors()
BasicBlock * loop_end() const
void set_rpo_number(int32_t rpo_number)
static BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
bool IsLoopHeader() const
void set_loop_end(BasicBlock *loop_end)
BasicBlock * rpo_next() const
int32_t dominator_depth() const
NodeMarker< bool > queued_
void BuildBlocks(Node *node)
BasicBlock * FindPredecessorBlock(Node *node)
void ResetDataStructures()
void TraceConnect(Node *node, BasicBlock *block, BasicBlock *succ)
bool IsFinalMerge(Node *node)
CFGBuilder(Zone *zone, Scheduler *scheduler)
void Run(BasicBlock *block, Node *exit)
void ConnectReturn(Node *ret)
BasicBlock * BuildBlockForNode(Node *node)
void ConnectThrow(Node *thr)
ZoneQueue< Node * > queue_
void CollectSuccessorBlocks(Node *node, BasicBlock **successor_blocks, size_t successor_cnt)
void ConnectCall(Node *call)
BasicBlock * component_end_
bool IsSingleEntrySingleExitRegion(Node *entry, Node *exit) const
void FixNode(BasicBlock *block, Node *node)
void ConnectDeoptimize(Node *deopt)
void ConnectMerge(Node *merge)
void ConnectBlocks(Node *node)
BasicBlock * component_start_
void ConnectTailCall(Node *call)
void ConnectBranch(Node *branch)
void ConnectSwitch(Node *sw)
void BuildBlocksForSuccessors(Node *node)
size_t ClassOf(Node *node)
static bool IsMergeOpcode(Value value)
static bool IsPhiOpcode(Value value)
V8_INLINE void Set(Node *node, State state)
V8_INLINE State Get(const Node *node)
static Node * GetEffectInput(Node *node, int index=0)
static int PastControlIndex(Node *node)
static void CollectControlProjections(Node *node, Node **proj, size_t count)
static int FirstControlIndex(Node *node)
static bool IsExceptionalCall(Node *node, Node **out_exception=nullptr)
static bool IsPhi(Node *node)
static Node * GetControlInput(Node *node, int index=0)
constexpr IrOpcode::Value opcode() const
const Operator * op() const
Node * InputAt(int index) const
void InitializePlacement(Node *node)
ZoneStack< Node * > stack_
void VisitInputs(Node *node)
PrepareUsesVisitor(Scheduler *scheduler, TFGraph *graph, Zone *zone)
void VisitNode(Node *node)
ZoneQueue< Node * > queue_
void Run(NodeVector *roots)
ScheduleEarlyNodeVisitor(Zone *zone, Scheduler *scheduler)
void PropagateMinimumPositionToNode(BasicBlock *block, Node *node)
BasicBlock * GetCommonDominatorOfUses(Node *node)
ScheduleLateNodeVisitor(Zone *zone, Scheduler *scheduler)
void VisitNode(Node *node)
void ScheduleRegion(BasicBlock *block, Node *region_end)
void Run(NodeVector *roots)
BasicBlock * SplitNode(BasicBlock *block, Node *node)
void ScheduleNode(BasicBlock *block, Node *node)
Node * CloneNode(Node *node)
void ScheduleFloatingControl(BasicBlock *block, Node *node)
void ProcessQueue(Node *root)
BasicBlock * FindPredecessorBlock(Node *node)
void Mark(BasicBlock *block)
bool IsMarked(BasicBlock *block) const
BasicBlock * GetHoistBlock(BasicBlock *block)
BasicBlock * GetBlockForUse(Edge edge)
ZoneDeque< BasicBlock * > marking_queue_
void MarkBlock(BasicBlock *block)
BasicBlockVector * rpo_order()
void SetBlockForNode(BasicBlock *block, Node *node)
void AddDeoptimize(BasicBlock *block, Node *input)
void AddThrow(BasicBlock *block, Node *input)
const BasicBlockVector * all_blocks() const
void PlanNode(BasicBlock *block, Node *node)
size_t BasicBlockCount() const
void AddGoto(BasicBlock *block, BasicBlock *succ)
void InsertSwitch(BasicBlock *block, BasicBlock *end, Node *sw, BasicBlock **succ_blocks, size_t succ_count)
void AddTailCall(BasicBlock *block, Node *input)
void AddReturn(BasicBlock *block, Node *input)
void AddCall(BasicBlock *block, Node *call, BasicBlock *success_block, BasicBlock *exception_block)
BasicBlock * GetBlockById(BasicBlock::Id block_id)
BasicBlock * NewBasicBlock()
void InsertBranch(BasicBlock *block, BasicBlock *end, Node *branch, BasicBlock *tblock, BasicBlock *fblock)
void AddBranch(BasicBlock *block, Node *branch, BasicBlock *tblock, BasicBlock *fblock)
void AddNode(BasicBlock *block, Node *node)
bool IsScheduled(Node *node)
BasicBlock * block(Node *node) const
void AddSwitch(BasicBlock *block, Node *sw, BasicBlock **succ_blocks, size_t succ_count)
void GenerateDominatorTree()
CFGBuilder * control_flow_builder_
Scheduler(Zone *zone, TFGraph *graph, Schedule *schedule, Flags flags, size_t node_count_hint_, TickCounter *tick_counter, const ProfileDataFromFile *profile_data)
BasicBlock * GetCommonDominatorIfCached(BasicBlock *b1, BasicBlock *b2)
SpecialRPONumberer * special_rpo_
const ProfileDataFromFile * profile_data() const
void UpdatePlacement(Node *node, Placement placement)
void ComputeSpecialRPONumbering()
SchedulerData * GetData(Node *node)
ControlEquivalence * equivalence_
NodeVector schedule_root_nodes_
void MovePlannedNodes(BasicBlock *from, BasicBlock *to)
static void PropagateImmediateDominators(BasicBlock *block)
CommonDominatorCache common_dominator_cache_
ZoneQueue< Node * > schedule_queue_
ZoneVector< NodeVector * > scheduled_nodes_
std::optional< int > GetCoupledControlEdge(Node *node)
void FuseFloatingControl(BasicBlock *block, Node *node)
Placement GetPlacement(Node *node)
SchedulerData DefaultSchedulerData()
static BasicBlockVector * ComputeSpecialRPO(Zone *zone, Schedule *schedule)
Placement InitializePlacement(Node *node)
void DecrementUnscheduledUseCount(Node *node, Node *from)
ZoneVector< SchedulerData > node_data_
BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
TickCounter *const tick_counter_
static Schedule * ComputeSchedule(Zone *temp_zone, TFGraph *graph, Flags flags, TickCounter *tick_counter, const ProfileDataFromFile *profile_data)
void IncrementUnscheduledUseCount(Node *node, Node *from)
ZoneVector< SpecialRPOStackFrame > stack_
ZoneVector< Backedge > backedges_
int Push(int depth, BasicBlock *child, int unvisited)
static bool HasLoopNumber(BasicBlock *block)
std::pair< BasicBlock *, size_t > Backedge
BasicBlock * BeyondEndSentinel()
bool HasLoopBlocks() const
size_t previous_block_count_
void ComputeLoopInfo(ZoneVector< SpecialRPOStackFrame > *queue, size_t num_loops, ZoneVector< Backedge > *backedges)
BasicBlock * PushFront(BasicBlock *head, BasicBlock *block)
static const int kBlockVisited1
static int GetLoopNumber(BasicBlock *block)
static const int kBlockOnStack
void PrintAndVerifySpecialRPO()
static const int kBlockVisited2
ZoneVector< LoopInfo > loops_
ZoneVector< BasicBlock * > const empty_
static const int kBlockUnvisited1
void UpdateSpecialRPO(BasicBlock *entry, BasicBlock *end)
void SerializeRPOIntoSchedule()
SpecialRPONumberer(Zone *zone, Schedule *schedule)
static const int kBlockUnvisited2
void ComputeAndInsertSpecialRPO(BasicBlock *entry, BasicBlock *end)
const ZoneVector< BasicBlock * > & GetOutgoingBlocks(BasicBlock *block)
static void SetLoopNumber(BasicBlock *block, int loop_number)
Node * CloneNode(const Node *node)
ZoneVector< RpoNumber > & result
Schedule const *const schedule_
BranchHint BranchHintOf(const Operator *const op)
ZoneVector< BasicBlock * > BasicBlockVector
V8_EXPORT_PRIVATE FlagValues v8_flags
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation nullptr
#define CONTROL_OP_LIST(V)
#define CONNECT_BLOCK_JS_CASE(Name,...)
#define DEFINE_CONTROL_CASE(V)
#define BUILD_BLOCK_JS_CASE(Name,...)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
BasicBlock * minimum_block_
ZoneVector< BasicBlock * > * outgoing
void AddOutgoing(Zone *zone, BasicBlock *block)
#define V8_LIKELY(condition)