v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
loop-peeling-reducer.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_LOOP_PEELING_REDUCER_H_
6#define V8_COMPILER_TURBOSHAFT_LOOP_PEELING_REDUCER_H_
7
8#include "src/base/logging.h"
16
18
20
21#ifdef DEBUG
22#define TRACE(x) \
23 do { \
24 if (v8_flags.turboshaft_trace_peeling) StdoutStream() << x << std::endl; \
25 } while (false)
26#else
27#define TRACE(x)
28#endif
29
30template <class Next>
31class LoopUnrollingReducer;
32
33// LoopPeeling "peels" the first iteration of innermost loops (= it extracts the
34// first iteration from the loop). The goal of this is mainly to hoist checks
35// out of the loop (such as Smi-checks, type-checks, bound-checks, etc).
36
37template <class Next>
38class LoopPeelingReducer : public Next {
39 public:
41
42#if defined(__clang__)
43 // LoopUnrolling and LoopPeeling shouldn't be performed in the same phase, see
44 // the comment in pipeline.cc where LoopUnrolling is triggered.
45 static_assert(
46 !reducer_list_contains<ReducerList, LoopUnrollingReducer>::value);
47#endif
48
50 // Note that the "ShouldSkipOptimizationStep" is placed in the part of
51 // this Reduce method triggering the peeling rather than at the begining.
52 // This is because the backedge skipping is not an optimization but a
53 // mandatory lowering when peeling is being performed.
54 LABEL_BLOCK(no_change) { return Next::ReduceInputGraphGoto(ig_idx, gto); }
55
56 const Block* dst = gto.destination;
57 if (dst->IsLoop() && !gto.is_backedge && CanPeelLoop(dst)) {
58 if (ShouldSkipOptimizationStep()) goto no_change;
60 return {};
61 } else if (IsEmittingPeeledIteration() && dst == current_loop_header_) {
62 // We skip the backedge of the loop: PeelFirstIeration will instead emit a
63 // forward edge to the non-peeled header.
64 return {};
65 }
66
67 goto no_change;
68 }
69
70 // TODO(dmercadier): remove once StackCheckOp are kept in the pipeline until
71 // the very end (which should happen when we have a SimplifiedLowering in
72 // Turboshaft).
74 const CallOp& call) {
75 LABEL_BLOCK(no_change) { return Next::ReduceInputGraphCall(ig_idx, call); }
76 if (ShouldSkipOptimizationStep()) goto no_change;
77
79 call.IsStackCheck(__ input_graph(), broker_,
81 // We remove the stack check of the peeled iteration.
82 return {};
83 }
84
85 goto no_change;
86 }
87
89 const JSStackCheckOp& stack_check) {
91 return Next::ReduceInputGraphJSStackCheck(ig_idx, stack_check);
92 }
93
94 // We remove the stack check of the peeled iteration.
95 return V<None>::Invalid();
96 }
97
98#if V8_ENABLE_WEBASSEMBLY
99 V<None> REDUCE_INPUT_GRAPH(WasmStackCheck)(
100 V<None> ig_idx, const WasmStackCheckOp& stack_check) {
102 return Next::ReduceInputGraphWasmStackCheck(ig_idx, stack_check);
103 }
104
105 // We remove the stack check of the peeled iteration.
106 return V<None>::Invalid();
107 }
108#endif
109
111 if (!IsEmittingUnpeeledBody() ||
112 __ current_input_block() != current_loop_header_) {
113 return Next::ReduceInputGraphPhi(ig_idx, phi);
114 }
115
116 // The 1st input of the loop phis of the unpeeled loop header should be the
117 // 2nd input of the original loop phis, since with the peeling, they
118 // actually come from the backedge of the peeled iteration.
119 return __ PendingLoopPhi(
120 __ MapToNewGraph(phi.input(PhiOp::kLoopPhiBackEdgeIndex)), phi.rep);
121 }
122
123 private:
124 static constexpr int kMaxSizeForPeeling = 1000;
130
131 void PeelFirstIteration(const Block* header) {
132 TRACE("LoopPeeling: peeling loop at " << header->index());
136 current_loop_header_ = header;
137
138 // Emitting the peeled iteration.
139 auto loop_body = loop_finder_.GetLoopBody(header);
140 // Note that this call to CloneSubGraph will not emit the backedge because
141 // we'll skip it in ReduceInputGraphGoto (above). The next CloneSubGraph
142 // call will start with a forward Goto to the header (like all
143 // CloneSubGraphs do), and will end by emitting the backedge, because this
144 // time {peeling_} won't be EmittingPeeledLoop, and the backedge Goto will
145 // thus be emitted.
146 TRACE("> Emitting peeled iteration");
147 __ CloneSubGraph(loop_body, /* keep_loop_kinds */ false);
148
149 if (__ generating_unreachable_operations()) {
150 // While peeling, we realized that the 2nd iteration of the loop is not
151 // reachable.
152 TRACE("> Second iteration is not reachable, stopping now");
153 return;
154 }
155
156 // We now emit the regular unpeeled loop.
158 TRACE("> Emitting unpeeled loop body");
159 __ CloneSubGraph(loop_body, /* keep_loop_kinds */ true,
160 /* is_loop_after_peeling */ true);
161 }
162
163 bool CanPeelLoop(const Block* header) {
164 TRACE("LoopPeeling: considering " << header->index());
165 if (IsPeeling()) {
166 TRACE("> Cannot peel because we're already peeling a loop");
167 return false;
168 }
169 auto info = loop_finder_.GetLoopInfo(header);
170 if (info.has_inner_loops) {
171 TRACE("> Cannot peel because it has inner loops");
172 return false;
173 }
174 if (info.op_count > kMaxSizeForPeeling) {
175 TRACE("> Cannot peel because it contains too many operations");
176 return false;
177 }
178 return true;
179 }
180
181 bool IsPeeling() const {
183 }
190
192 const Block* current_loop_header_ = nullptr;
193
194 LoopFinder loop_finder_{__ phase_zone(), &__ modifiable_input_graph()};
196};
197
198#undef TRACE
199
201
202} // namespace v8::internal::compiler::turboshaft
203
204#endif // V8_COMPILER_TURBOSHAFT_LOOP_PEELING_REDUCER_H_
#define REDUCE_INPUT_GRAPH(operation)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
LoopInfo GetLoopInfo(const Block *block) const
Definition loop-finder.h:74
ZoneSet< const Block *, BlockCmp > GetLoopBody(const Block *loop_header)
V< None > REDUCE_INPUT_GRAPH JSStackCheck(V< None > ig_idx, const JSStackCheckOp &stack_check)
OpIndex REDUCE_INPUT_GRAPH Phi(OpIndex ig_idx, const PhiOp &phi)
V< None > REDUCE_INPUT_GRAPH Goto(V< None > ig_idx, const GotoOp &gto)
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
#define LABEL_BLOCK(label)
Definition assembler.h:910
JSHeapBroker * broker
#define TRACE(...)
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static constexpr size_t kLoopPhiBackEdgeIndex