v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
loop-analysis.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
12#include "src/compiler/node.h"
14#include "src/zone/zone.h"
15
16namespace v8 {
17namespace internal {
18
19class TickCounter;
20
21namespace compiler {
22
23#define OFFSET(x) ((x)&0x1F)
24#define BIT(x) (1u << OFFSET(x))
25#define INDEX(x) ((x) >> 5)
26
27// Temporary information for each node during marking.
28struct NodeInfo {
30 NodeInfo* next; // link in chaining loop members
32};
33
34
35// Temporary loop info needed during traversal and building the loop tree.
43
44// Encapsulation of the loop finding algorithm.
45// -----------------------------------------------------------------------------
46// Conceptually, the contents of a loop are those nodes that are "between" the
47// loop header and the backedges of the loop. Graphs in the soup of nodes can
48// form improper cycles, so standard loop finding algorithms that work on CFGs
49// aren't sufficient. However, in valid TurboFan graphs, all cycles involve
50// either a {Loop} node or a phi. The {Loop} node itself and its accompanying
51// phis are treated together as a set referred to here as the loop header.
52// This loop finding algorithm works by traversing the graph in two directions,
53// first from nodes to their inputs, starting at {end}, then in the reverse
54// direction, from nodes to their uses, starting at loop headers.
55// 1 bit per loop per node per direction are required during the marking phase.
56// To handle nested loops correctly, the algorithm must filter some reachability
57// marks on edges into/out-of the loop header nodes.
58// Note: this algorithm assumes there are no unreachable loop header nodes
59// (including loop phis).
61 public:
62 LoopFinderImpl(TFGraph* graph, LoopTree* loop_tree, TickCounter* tick_counter,
63 Zone* zone)
64 : zone_(zone),
65 end_(graph->end()),
66 queue_(zone),
67 queued_(graph, 2),
68 info_(graph->NodeCount(), {nullptr, nullptr, false}, zone),
69 loops_(zone),
70 loop_num_(graph->NodeCount(), -1, zone),
71 loop_tree_(loop_tree),
72 loops_found_(0),
73 width_(0),
74 backward_(nullptr),
75 forward_(nullptr),
76 tick_counter_(tick_counter) {}
77
78 void Run() {
82 }
83
84 void Print() {
85 // Print out the results.
86 for (NodeInfo& ni : info_) {
87 if (ni.node == nullptr) continue;
88 for (int i = 1; i <= loops_found_; i++) {
89 int index = ni.node->id() * width_ + INDEX(i);
90 bool marked_forward = forward_[index] & BIT(i);
91 bool marked_backward = backward_[index] & BIT(i);
92 if (marked_forward && marked_backward) {
93 PrintF("X");
94 } else if (marked_forward) {
95 PrintF(">");
96 } else if (marked_backward) {
97 PrintF("<");
98 } else {
99 PrintF(" ");
100 }
101 }
102 PrintF(" #%d:%s\n", ni.node->id(), ni.node->op()->mnemonic());
103 }
104
105 int i = 0;
106 for (TempLoopInfo& li : loops_) {
107 PrintF("Loop %d headed at #%d\n", i, li.header->id());
108 i++;
109 }
110
111 for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
112 PrintLoop(loop);
113 }
114 }
115
116 private:
127 uint32_t* backward_;
128 uint32_t* forward_;
130
131 int num_nodes() {
132 return static_cast<int>(loop_tree_->node_to_loop_num_.size());
133 }
134
135 // Tb = Tb | (Fb - loop_filter)
136 bool PropagateBackwardMarks(Node* from, Node* to, int loop_filter) {
137 if (from == to) return false;
138 uint32_t* fp = &backward_[from->id() * width_];
139 uint32_t* tp = &backward_[to->id() * width_];
140 bool change = false;
141 for (int i = 0; i < width_; i++) {
142 uint32_t mask = i == INDEX(loop_filter) ? ~BIT(loop_filter) : 0xFFFFFFFF;
143 uint32_t prev = tp[i];
144 uint32_t next = prev | (fp[i] & mask);
145 tp[i] = next;
146 if (!change && (prev != next)) change = true;
147 }
148 return change;
149 }
150
151 // Tb = Tb | B
152 bool SetBackwardMark(Node* to, int loop_num) {
153 uint32_t* tp = &backward_[to->id() * width_ + INDEX(loop_num)];
154 uint32_t prev = tp[0];
155 uint32_t next = prev | BIT(loop_num);
156 tp[0] = next;
157 return next != prev;
158 }
159
160 // Tf = Tf | B
161 bool SetForwardMark(Node* to, int loop_num) {
162 uint32_t* tp = &forward_[to->id() * width_ + INDEX(loop_num)];
163 uint32_t prev = tp[0];
164 uint32_t next = prev | BIT(loop_num);
165 tp[0] = next;
166 return next != prev;
167 }
168
169 // Tf = Tf | (Ff & Tb)
170 bool PropagateForwardMarks(Node* from, Node* to) {
171 if (from == to) return false;
172 bool change = false;
173 int findex = from->id() * width_;
174 int tindex = to->id() * width_;
175 for (int i = 0; i < width_; i++) {
176 uint32_t marks = backward_[tindex + i] & forward_[findex + i];
177 uint32_t prev = forward_[tindex + i];
178 uint32_t next = prev | marks;
179 forward_[tindex + i] = next;
180 if (!change && (prev != next)) change = true;
181 }
182 return change;
183 }
184
185 bool IsInLoop(Node* node, int loop_num) {
186 int offset = node->id() * width_ + INDEX(loop_num);
187 return backward_[offset] & forward_[offset] & BIT(loop_num);
188 }
189
190 // Propagate marks backward from loop headers.
194 Queue(end_);
195
196 while (!queue_.empty()) {
198 Node* node = queue_.front();
199 info(node).backwards_visited = true;
200 queue_.pop_front();
201 queued_.Set(node, false);
202
203 int loop_num = -1;
204 // Setup loop headers first.
205 if (node->opcode() == IrOpcode::kLoop) {
206 // found the loop node first.
207 loop_num = CreateLoopInfo(node);
208 } else if (NodeProperties::IsPhi(node)) {
209 // found a phi first.
210 Node* merge = node->InputAt(node->InputCount() - 1);
211 if (merge->opcode() == IrOpcode::kLoop) {
212 loop_num = CreateLoopInfo(merge);
213 }
214 } else if (node->opcode() == IrOpcode::kLoopExit) {
215 // Intentionally ignore return value. Loop exit node marks
216 // are propagated normally.
217 CreateLoopInfo(node->InputAt(1));
218 } else if (node->opcode() == IrOpcode::kLoopExitValue ||
219 node->opcode() == IrOpcode::kLoopExitEffect) {
220 Node* loop_exit = NodeProperties::GetControlInput(node);
221 // Intentionally ignore return value. Loop exit node marks
222 // are propagated normally.
223 CreateLoopInfo(loop_exit->InputAt(1));
224 }
225
226 // Propagate marks backwards from this node.
227 for (int i = 0; i < node->InputCount(); i++) {
228 Node* input = node->InputAt(i);
229 if (IsBackedge(node, i)) {
230 // Only propagate the loop mark on backedges.
231 if (SetBackwardMark(input, loop_num) ||
232 !info(input).backwards_visited) {
233 Queue(input);
234 }
235 } else {
236 // Entry or normal edge. Propagate all marks except loop_num.
237 // TODO(manoskouk): Add test that needs backwards_visited to function
238 // correctly, probably using wasm loop unrolling when it is available.
239 if (PropagateBackwardMarks(node, input, loop_num) ||
240 !info(input).backwards_visited) {
241 Queue(input);
242 }
243 }
244 }
245 }
246 }
247
248 // Make a new loop if necessary for the given node.
249 int CreateLoopInfo(Node* node) {
250 DCHECK_EQ(IrOpcode::kLoop, node->opcode());
251 int loop_num = LoopNum(node);
252 if (loop_num > 0) return loop_num;
253
254 loop_num = ++loops_found_;
255 if (INDEX(loop_num) >= width_) ResizeBackwardMarks();
256
257 // Create a new loop.
258 loops_.push_back({node, nullptr, nullptr, nullptr, nullptr});
260 SetLoopMarkForLoopHeader(node, loop_num);
261 return loop_num;
262 }
263
264 void SetLoopMark(Node* node, int loop_num) {
265 info(node); // create the NodeInfo
266 SetBackwardMark(node, loop_num);
267 loop_tree_->node_to_loop_num_[node->id()] = loop_num;
268 }
269
270 void SetLoopMarkForLoopHeader(Node* node, int loop_num) {
271 DCHECK_EQ(IrOpcode::kLoop, node->opcode());
272 SetLoopMark(node, loop_num);
273 for (Node* use : node->uses()) {
274 if (NodeProperties::IsPhi(use)) {
275 SetLoopMark(use, loop_num);
276 }
277
278 // Do not keep the loop alive if it does not have any backedges.
279 if (node->InputCount() <= 1) continue;
280
281 if (use->opcode() == IrOpcode::kLoopExit) {
282 SetLoopMark(use, loop_num);
283 for (Node* exit_use : use->uses()) {
284 if (exit_use->opcode() == IrOpcode::kLoopExitValue ||
285 exit_use->opcode() == IrOpcode::kLoopExitEffect) {
286 SetLoopMark(exit_use, loop_num);
287 }
288 }
289 }
290 }
291 }
292
294 int new_width = width_ + 1;
295 int max = num_nodes();
296 uint32_t* new_backward = zone_->AllocateArray<uint32_t>(new_width * max);
297 memset(new_backward, 0, new_width * max * sizeof(uint32_t));
298 if (width_ > 0) { // copy old matrix data.
299 for (int i = 0; i < max; i++) {
300 uint32_t* np = &new_backward[i * new_width];
301 uint32_t* op = &backward_[i * width_];
302 for (int j = 0; j < width_; j++) np[j] = op[j];
303 }
304 }
305 width_ = new_width;
306 backward_ = new_backward;
307 }
308
310 int max = num_nodes();
311 forward_ = zone_->AllocateArray<uint32_t>(width_ * max);
312 memset(forward_, 0, width_ * max * sizeof(uint32_t));
313 }
314
315 // Propagate marks forward from loops.
318 for (TempLoopInfo& li : loops_) {
319 SetForwardMark(li.header, LoopNum(li.header));
320 Queue(li.header);
321 }
322 // Propagate forward on paths that were backward reachable from backedges.
323 while (!queue_.empty()) {
325 Node* node = queue_.front();
326 queue_.pop_front();
327 queued_.Set(node, false);
328 for (Edge edge : node->use_edges()) {
329 Node* use = edge.from();
330 if (!IsBackedge(use, edge.index())) {
331 if (PropagateForwardMarks(node, use)) Queue(use);
332 }
333 }
334 }
335 }
336
337 bool IsLoopHeaderNode(Node* node) {
338 return node->opcode() == IrOpcode::kLoop || NodeProperties::IsPhi(node);
339 }
340
341 bool IsLoopExitNode(Node* node) {
342 return node->opcode() == IrOpcode::kLoopExit ||
343 node->opcode() == IrOpcode::kLoopExitValue ||
344 node->opcode() == IrOpcode::kLoopExitEffect;
345 }
346
347 bool IsBackedge(Node* use, int index) {
348 if (LoopNum(use) <= 0) return false;
349 if (NodeProperties::IsPhi(use)) {
350 return index != NodeProperties::FirstControlIndex(use) &&
351 index != kAssumedLoopEntryIndex;
352 } else if (use->opcode() == IrOpcode::kLoop) {
353 return index != kAssumedLoopEntryIndex;
354 }
356 return false;
357 }
358
359 int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; }
360
362 NodeInfo& i = info_[node->id()];
363 if (i.node == nullptr) i.node = node;
364 return i;
365 }
366
367 void Queue(Node* node) {
368 if (!queued_.Get(node)) {
369 queue_.push_back(node);
370 queued_.Set(node, true);
371 }
372 }
373
374 void AddNodeToLoop(NodeInfo* node_info, TempLoopInfo* loop, int loop_num) {
375 if (LoopNum(node_info->node) == loop_num) {
376 if (IsLoopHeaderNode(node_info->node)) {
377 node_info->next = loop->header_list;
378 loop->header_list = node_info;
379 } else {
380 DCHECK(IsLoopExitNode(node_info->node));
381 node_info->next = loop->exit_list;
382 loop->exit_list = node_info;
383 }
384 } else {
385 node_info->next = loop->body_list;
386 loop->body_list = node_info;
387 }
388 }
389
391 DCHECK(loops_found_ == static_cast<int>(loops_.size()));
392 DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size()));
393
394 // Degenerate cases.
395 if (loops_found_ == 0) return;
396 if (loops_found_ == 1) return FinishSingleLoop();
397
398 for (int i = 1; i <= loops_found_; i++) ConnectLoopTree(i);
399
400 size_t count = 0;
401 // Place the node into the innermost nested loop of which it is a member.
402 for (NodeInfo& ni : info_) {
403 if (ni.node == nullptr) continue;
404
405 TempLoopInfo* innermost = nullptr;
406 int innermost_index = 0;
407 int pos = ni.node->id() * width_;
408 // Search the marks word by word.
409 for (int i = 0; i < width_; i++) {
410 uint32_t marks = backward_[pos + i] & forward_[pos + i];
411
412 for (int j = 0; j < 32; j++) {
413 if (marks & (1u << j)) {
414 int loop_num = i * 32 + j;
415 if (loop_num == 0) continue;
416 TempLoopInfo* loop = &loops_[loop_num - 1];
417 if (innermost == nullptr ||
418 loop->loop->depth_ > innermost->loop->depth_) {
419 innermost = loop;
420 innermost_index = loop_num;
421 }
422 }
423 }
424 }
425 if (innermost == nullptr) continue;
426
427 // Return statements should never be found by forward or backward walk.
428 CHECK(ni.node->opcode() != IrOpcode::kReturn);
429
430 AddNodeToLoop(&ni, innermost, innermost_index);
431 count++;
432 }
433
434 // Serialize the node lists for loops into the loop tree.
436 for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
437 SerializeLoop(loop);
438 }
439 }
440
441 // Handle the simpler case of a single loop (no checks for nesting necessary).
443 // Place nodes into the loop header and body.
444 TempLoopInfo* li = &loops_[0];
445 li->loop = &loop_tree_->all_loops_[0];
446 loop_tree_->SetParent(nullptr, li->loop);
447 size_t count = 0;
448 for (NodeInfo& ni : info_) {
449 if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
450
451 // Return statements should never be found by forward or backward walk.
452 CHECK(ni.node->opcode() != IrOpcode::kReturn);
453
454 AddNodeToLoop(&ni, li, 1);
455 count++;
456 }
457
458 // Serialize the node lists for the loop into the loop tree.
460 SerializeLoop(li->loop);
461 }
462
463 // Recursively serialize the list of header nodes and body nodes
464 // so that nested loops occupy nested intervals.
466 int loop_num = loop_tree_->LoopNum(loop);
467 TempLoopInfo& li = loops_[loop_num - 1];
468
469 // Serialize the header.
470 loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
471 for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) {
473 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
474 }
475
476 // Serialize the body.
477 loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
478 for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) {
480 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
481 }
482
483 // Serialize nested loops.
484 for (LoopTree::Loop* child : loop->children_) SerializeLoop(child);
485
486 // Serialize the exits.
487 loop->exits_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
488 for (NodeInfo* ni = li.exit_list; ni != nullptr; ni = ni->next) {
490 loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
491 }
492
493 loop->exits_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
494 }
495
496 // Connect the LoopTree loops to their parents recursively.
498 TempLoopInfo& li = loops_[loop_num - 1];
499 if (li.loop != nullptr) return li.loop;
500
501 NodeInfo& ni = info(li.header);
502 LoopTree::Loop* parent = nullptr;
503 for (int i = 1; i <= loops_found_; i++) {
504 if (i == loop_num) continue;
505 if (IsInLoop(ni.node, i)) {
506 // recursively create potential parent loops first.
508 if (parent == nullptr || upper->depth_ > parent->depth_) {
509 parent = upper;
510 }
511 }
512 }
513 li.loop = &loop_tree_->all_loops_[loop_num - 1];
514 loop_tree_->SetParent(parent, li.loop);
515 return li.loop;
516 }
517
519 for (int i = 0; i < loop->depth_; i++) PrintF(" ");
520 PrintF("Loop depth = %d ", loop->depth_);
521 int i = loop->header_start_;
522 while (i < loop->body_start_) {
523 PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id());
524 }
525 while (i < loop->exits_start_) {
526 PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id());
527 }
528 while (i < loop->exits_end_) {
529 PrintF(" E#%d", loop_tree_->loop_nodes_[i++]->id());
530 }
531 PrintF("\n");
532 for (LoopTree::Loop* child : loop->children_) PrintLoop(child);
533 }
534};
535
537 Zone* zone) {
538 LoopTree* loop_tree =
539 graph->zone()->New<LoopTree>(graph->NodeCount(), graph->zone());
540 LoopFinderImpl finder(graph, loop_tree, tick_counter, zone);
541 finder.Run();
542 if (v8_flags.trace_turbo_loop) {
543 finder.Print();
544 }
545 return loop_tree;
546}
547
548#if V8_ENABLE_WEBASSEMBLY
549// static
550ZoneUnorderedSet<Node*>* LoopFinder::FindSmallInnermostLoopFromHeader(
551 Node* loop_header, AllNodes& all_nodes, Zone* zone, size_t max_size,
552 Purpose purpose) {
553 auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
554 std::vector<Node*> queue;
555
556 DCHECK_EQ(loop_header->opcode(), IrOpcode::kLoop);
557
558 queue.push_back(loop_header);
559 visited->insert(loop_header);
560
561#define ENQUEUE_USES(use_name, condition) \
562 for (Node * use_name : node->uses()) { \
563 if (condition && visited->count(use_name) == 0) { \
564 visited->insert(use_name); \
565 queue.push_back(use_name); \
566 } \
567 }
568 bool has_instruction_worth_peeling = false;
569 while (!queue.empty()) {
570 Node* node = queue.back();
571 queue.pop_back();
572 if (node->opcode() == IrOpcode::kEnd) {
573 // We reached the end of the graph. The end node is not part of the loop.
574 visited->erase(node);
575 continue;
576 }
577 if (visited->size() > max_size) return nullptr;
578 switch (node->opcode()) {
579 case IrOpcode::kLoop:
580 // Found nested loop.
581 if (node != loop_header) return nullptr;
582 ENQUEUE_USES(use, true);
583 break;
584 case IrOpcode::kLoopExit:
585 // Found nested loop.
586 if (node->InputAt(1) != loop_header) return nullptr;
587 // LoopExitValue/Effect uses are inside the loop. The rest are not.
588 ENQUEUE_USES(use, (use->opcode() == IrOpcode::kLoopExitEffect ||
589 use->opcode() == IrOpcode::kLoopExitValue))
590 break;
591 case IrOpcode::kLoopExitEffect:
592 case IrOpcode::kLoopExitValue:
593 if (NodeProperties::GetControlInput(node)->InputAt(1) != loop_header) {
594 // Found nested loop.
595 return nullptr;
596 }
597 // All uses are outside the loop, do nothing.
598 break;
599 // If unrolling, call nodes are considered to have unbounded size,
600 // i.e. >max_size, with the exception of certain wasm builtins.
601 case IrOpcode::kTailCall:
602 case IrOpcode::kJSWasmCall:
603 case IrOpcode::kJSCall:
604 if (purpose == Purpose::kLoopUnrolling) return nullptr;
605 ENQUEUE_USES(use, true)
606 break;
607 case IrOpcode::kCall: {
608 if (purpose == Purpose::kLoopPeeling) {
609 ENQUEUE_USES(use, true);
610 break;
611 }
612 Node* callee = node->InputAt(0);
613 if (callee->opcode() != IrOpcode::kRelocatableInt32Constant &&
614 callee->opcode() != IrOpcode::kRelocatableInt64Constant) {
615 return nullptr;
616 }
617 Builtin builtin = static_cast<Builtin>(
618 OpParameter<RelocatablePtrConstantInfo>(callee->op()).value());
619 constexpr Builtin unrollable_builtins[] = {
620 // Exists in every stack check.
621 Builtin::kWasmStackGuard,
622 // Fast table operations.
623 Builtin::kWasmTableGet, Builtin::kWasmTableSet,
624 Builtin::kWasmTableGetFuncRef, Builtin::kWasmTableSetFuncRef,
625 Builtin::kWasmTableGrow,
626 // Atomics.
627 Builtin::kWasmI32AtomicWait, Builtin::kWasmI64AtomicWait,
628 // Exceptions.
629 Builtin::kWasmAllocateFixedArray, Builtin::kWasmThrow,
630 Builtin::kWasmRethrow, Builtin::kWasmRethrowExplicitContext,
631 // Fast wasm-gc operations.
632 Builtin::kWasmRefFunc,
633 // While a built-in call, this is the slow path, so it should not
634 // prevent loop unrolling for stringview_wtf16.get_codeunit.
635 Builtin::kWasmStringViewWtf16GetCodeUnit};
636 if (std::count(std::begin(unrollable_builtins),
637 std::end(unrollable_builtins), builtin) == 0) {
638 return nullptr;
639 }
640 ENQUEUE_USES(use, true)
641 break;
642 }
643 case IrOpcode::kWasmStructGet: {
644 // When a chained load occurs in the loop, assume that peeling might
645 // help.
646 Node* object = node->InputAt(0);
647 if (object->opcode() == IrOpcode::kWasmStructGet &&
648 visited->find(object) != visited->end()) {
649 has_instruction_worth_peeling = true;
650 }
651 ENQUEUE_USES(use, true);
652 break;
653 }
654 case IrOpcode::kWasmArrayGet:
655 // Rationale for array.get: loops that contain an array.get also
656 // contain a bounds check, which needs to load the array's length,
657 // which benefits from load elimination after peeling.
658 case IrOpcode::kStringPrepareForGetCodeunit:
659 // Rationale for PrepareForGetCodeunit: this internal operation is
660 // specifically designed for being hoisted out of loops.
661 has_instruction_worth_peeling = true;
662 [[fallthrough]];
663 default:
664 ENQUEUE_USES(use, true)
665 break;
666 }
667 }
668
669 // Check that there is no floating control other than direct nodes to start().
670 // We do this by checking that all non-start control inputs of loop nodes are
671 // also in the loop.
672 // TODO(manoskouk): This is a safety check. Consider making it DEBUG-only when
673 // we are confident there is no incompatible floating control generated in
674 // wasm.
675 for (Node* node : *visited) {
676 // The loop header is allowed to point outside the loop.
677 if (node == loop_header) continue;
678
679 if (!all_nodes.IsLive(node)) continue;
680
681 for (Edge edge : node->input_edges()) {
682 Node* input = edge.to();
683 if (NodeProperties::IsControlEdge(edge) && visited->count(input) == 0 &&
684 input->opcode() != IrOpcode::kStart) {
685 FATAL(
686 "Floating control detected in wasm turbofan graph: Node #%d:%s is "
687 "inside loop headed by #%d, but its control dependency #%d:%s is "
688 "outside",
689 node->id(), node->op()->mnemonic(), loop_header->id(), input->id(),
690 input->op()->mnemonic());
691 }
692 }
693 }
694
695 // Only peel functions containing instructions for which loop peeling is known
696 // to be useful. TODO(14034): Add more instructions to get more benefits out
697 // of loop peeling.
698 if (purpose == Purpose::kLoopPeeling && !has_instruction_worth_peeling) {
699 return nullptr;
700 }
701 return visited;
702}
703#endif // V8_ENABLE_WEBASSEMBLY
704
706 const LoopTree::Loop* loop) {
707 // Look for returns and if projections that are outside the loop but whose
708 // control input is inside the loop.
709 Node* loop_node = loop_tree->GetLoopControl(loop);
710 for (Node* node : loop_tree->LoopNodes(loop)) {
711 for (Node* use : node->uses()) {
712 if (!loop_tree->Contains(loop, use)) {
713 bool unmarked_exit;
714 switch (node->opcode()) {
715 case IrOpcode::kLoopExit:
716 unmarked_exit = (node->InputAt(1) != loop_node);
717 break;
718 case IrOpcode::kLoopExitValue:
719 case IrOpcode::kLoopExitEffect:
720 unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
721 break;
722 default:
723 unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
724 }
725 if (unmarked_exit) {
726 if (v8_flags.trace_turbo_loop) {
727 PrintF(
728 "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
729 "(%s) is inside loop, but its use %i (%s) is outside.\n",
730 loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
731 use->op()->mnemonic());
732 }
733 return false;
734 }
735 }
736 }
737 }
738 return true;
739}
740
742 Node* first = *HeaderNodes(loop).begin();
743 if (first->opcode() == IrOpcode::kLoop) return first;
744 DCHECK(IrOpcode::IsPhiOpcode(first->opcode()));
745 Node* header = NodeProperties::GetControlInput(first);
746 DCHECK_EQ(IrOpcode::kLoop, header->opcode());
747 return header;
748}
749
750Node* NodeCopier::map(Node* node, uint32_t copy_index) {
751 DCHECK_LT(copy_index, copy_count_);
752 if (node_map_.Get(node) == 0) return node;
753 return copies_->at(node_map_.Get(node) + copy_index);
754}
755
756void NodeCopier::Insert(Node* original, const NodeVector& new_copies) {
757 DCHECK_EQ(new_copies.size(), copy_count_);
758 node_map_.Set(original, copies_->size() + 1);
759 copies_->push_back(original);
760 copies_->insert(copies_->end(), new_copies.begin(), new_copies.end());
761}
762
763void NodeCopier::Insert(Node* original, Node* copy) {
764 DCHECK_EQ(copy_count_, 1);
765 node_map_.Set(original, copies_->size() + 1);
766 copies_->push_back(original);
767 copies_->push_back(copy);
768}
769
770} // namespace compiler
771} // namespace internal
772} // namespace v8
SourcePosition pos
void reserve(size_t new_cap)
void push_back(const T &value)
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
bool IsLive(const Node *node) const
Definition all-nodes.h:28
static bool IsPhiOpcode(Value value)
Definition opcodes.h:1400
void PrintLoop(LoopTree::Loop *loop)
bool PropagateForwardMarks(Node *from, Node *to)
void AddNodeToLoop(NodeInfo *node_info, TempLoopInfo *loop, int loop_num)
void SetLoopMark(Node *node, int loop_num)
bool IsInLoop(Node *node, int loop_num)
bool PropagateBackwardMarks(Node *from, Node *to, int loop_filter)
void SetLoopMarkForLoopHeader(Node *node, int loop_num)
bool IsBackedge(Node *use, int index)
LoopTree::Loop * ConnectLoopTree(int loop_num)
LoopFinderImpl(TFGraph *graph, LoopTree *loop_tree, TickCounter *tick_counter, Zone *zone)
ZoneVector< TempLoopInfo > loops_
bool SetBackwardMark(Node *to, int loop_num)
void SerializeLoop(LoopTree::Loop *loop)
bool SetForwardMark(Node *to, int loop_num)
static bool HasMarkedExits(LoopTree *loop_tree, const LoopTree::Loop *loop)
static LoopTree * BuildLoopTree(TFGraph *graph, TickCounter *tick_counter, Zone *temp_zone)
Node * GetLoopControl(const Loop *loop)
ZoneVector< Loop * > outer_loops_
int LoopNum(const Loop *loop) const
ZoneVector< Node * > loop_nodes_
void SetParent(Loop *parent, Loop *child)
Node * HeaderNode(const Loop *loop)
NodeRange LoopNodes(const Loop *loop)
bool Contains(const Loop *loop, Node *node)
Node * map(Node *node, uint32_t copy_index)
void Insert(Node *original, const NodeVector &new_copies)
V8_INLINE void Set(Node *node, State state)
Definition node-marker.h:69
V8_INLINE State Get(const Node *node)
Definition node-marker.h:65
static Node * GetControlInput(Node *node, int index=0)
constexpr IrOpcode::Value opcode() const
Definition node.h:52
Node * InputAt(int index) const
Definition node.h:70
NodeId id() const
Definition node.h:57
Handle< SharedFunctionInfo > info
int end
int32_t offset
Node * node
#define INDEX(x)
#define BIT(x)
uint32_t const mask
static const int kAssumedLoopEntryIndex
T const & OpParameter(const Operator *op)
Definition operator.h:214
void PrintF(const char *format,...)
Definition utils.cc:39
V8_EXPORT_PRIVATE FlagValues v8_flags
#define CHECK(condition)
Definition logging.h:124
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485