v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-inlining.h
Go to the documentation of this file.
1// Copyright 2025 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_MAGLEV_INLINING_H_
6#define V8_MAGLEV_MAGLEV_INLINING_H_
7
8#include <algorithm>
9
10#include "src/base/logging.h"
19
20namespace v8::internal::maglev {
21
23 public:
24 MaglevInliner(MaglevCompilationInfo* compilation_info, Graph* graph)
25 : compilation_info_(compilation_info), graph_(graph) {}
26
27 void Run(bool is_tracing_maglev_graphs_enabled) {
28 if (graph_->inlined_functions().empty()) return;
29
30 while (true) {
32 v8_flags.max_maglev_inlined_bytecode_size_cumulative) {
33 // No more inlining.
34 break;
35 }
37 if (!call_site) break;
38
40 if (result.IsFail()) continue;
41 // If --trace-maglev-inlining-verbose, we print the graph after each
42 // inlining step/call.
43 if (is_tracing_maglev_graphs_enabled && v8_flags.print_maglev_graphs &&
44 v8_flags.trace_maglev_inlining_verbose) {
45 std::cout << "\nAfter inlining "
47 << std::endl;
49 }
50 }
51
52 // Otherwise we print just once at the end.
53 if (is_tracing_maglev_graphs_enabled && v8_flags.print_maglev_graphs &&
54 !v8_flags.trace_maglev_inlining_verbose) {
55 std::cout << "\nAfter inlining" << std::endl;
57 }
58 }
59
60 private:
63
65 Zone* zone() const { return compilation_info_->zone(); }
66
68 auto it =
69 v8_flags.maglev_inlining_following_eager_order
70 ? std::ranges::find_if(graph_->inlineable_calls(),
71 [](auto* site) { return site != nullptr; })
72 : std::ranges::max_element(
74 [](const MaglevCallSiteInfo* info1,
75 const MaglevCallSiteInfo* info2) {
76 if (info1 == nullptr || info2 == nullptr) {
77 return info2 != nullptr;
78 }
79 return info1->score < info2->score;
80 });
81 if (it == graph_->inlineable_calls().end()) return nullptr;
82 MaglevCallSiteInfo* call_site = *it;
83 *it = nullptr; // Erase call site.
84 return call_site;
85 }
86
88 CallKnownJSFunction* call_node = call_site->generic_call_node;
89 BasicBlock* call_block = call_node->owner();
90 MaglevCallerDetails* caller_details = &call_site->caller_details;
91 DeoptFrame* caller_deopt_frame = caller_details->deopt_frame;
92 const MaglevCompilationUnit* caller_unit =
93 &caller_deopt_frame->GetCompilationUnit();
95
96 if (!call_block || call_block->is_dead()) {
97 // The block containing the call is unreachable, and it was previously
98 // removed. Do not try to inline the call.
99 return ReduceResult::Fail();
100 }
101
102 if (v8_flags.trace_maglev_inlining) {
103 std::cout << " non-eager inlining " << shared << std::endl;
104 }
105
106 // Truncate the basic block and remove the generic call node.
107 ExceptionHandlerInfo::List rem_handlers_in_call_block;
108 call_block->exception_handlers().TruncateAt(
109 &rem_handlers_in_call_block, call_node->exception_handler_info());
110 ZoneVector<Node*> rem_nodes_in_call_block =
111 call_block->Split(call_node, zone());
112
113 // Create a new compilation unit.
115 zone(), caller_unit, shared, call_site->feedback_cell);
116
117 compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
118 graph_->add_inlined_bytecode_size(bytecode.length());
119
120 // Create a new graph builder for the inlined function.
121 LocalIsolate* local_isolate = broker()->local_isolate_or_isolate();
122 MaglevGraphBuilder inner_graph_builder(local_isolate, inner_unit, graph_,
123 &call_site->caller_details);
124
125 // Update caller deopt frame with inlined arguments.
126 caller_details->deopt_frame =
127 inner_graph_builder.AddInlinedArgumentsToDeoptFrame(
128 caller_deopt_frame, inner_unit, call_node->closure().node(),
129 call_site->caller_details.arguments);
130
131 // We truncate the graph to build the function in-place, preserving the
132 // invariant that all jumps move forward (except JumpLoop).
133 std::vector<BasicBlock*> saved_bb = TruncateGraphAt(call_block);
134 ControlNode* control_node = call_block->reset_control_node();
135
136 // Set the inner graph builder to build in the truncated call block.
137 inner_graph_builder.set_current_block(call_block);
138
139 ReduceResult result = inner_graph_builder.BuildInlineFunction(
140 caller_deopt_frame->GetSourcePosition(), call_node->context().node(),
141 call_node->closure().node(), call_node->new_target().node());
142
143 if (result.IsDoneWithAbort()) {
144 // Since the rest of the block is dead, these nodes don't belong
145 // to any basic block anymore.
146 for (auto node : rem_nodes_in_call_block) {
147 node->set_owner(nullptr);
148 }
149 // Restore the rest of the graph.
150 for (auto bb : saved_bb) {
151 graph_->Add(bb);
152 }
153 RemovePredecessorFollowing(control_node, call_block);
154 // TODO(victorgomes): We probably don't need to iterate all the graph to
155 // remove unreachable blocks, but only the successors of control_node in
156 // saved_bbs.
157 RemoveUnreachableBlocks();
158 return result;
159 }
160
161 DCHECK(result.IsDoneWithValue());
162 ValueNode* returned_value =
163 EnsureTagged(inner_graph_builder, result.value());
164
165 // Resume execution using the final block of the inner builder.
166
167 // Add remaining nodes to the final block and use the control flow of the
168 // old call block.
169 BasicBlock* final_block = inner_graph_builder.FinishInlinedBlockForCaller(
170 control_node, rem_nodes_in_call_block);
171 DCHECK_NOT_NULL(final_block);
172 final_block->exception_handlers().Append(
173 std::move(rem_handlers_in_call_block));
174
175 // Update the predecessor of the successors of the {final_block}, that were
176 // previously pointing to {call_block}.
177 final_block->ForEachSuccessor(
178 [call_block, final_block](BasicBlock* successor) {
179 UpdatePredecessorsOf(successor, call_block, final_block);
180 });
181
182 // Restore the rest of the graph.
183 for (auto bb : saved_bb) {
184 graph_->Add(bb);
185 }
186
187 if (auto alloc = returned_value->TryCast<InlinedAllocation>()) {
188 // TODO(victorgomes): Support eliding VOs.
189 alloc->ForceEscaping();
190#ifdef DEBUG
191 alloc->set_is_returned_value_from_inline_call();
192#endif // DEBUG
193 }
194 call_node->OverwriteWithIdentityTo(returned_value);
195 return ReduceResult::Done();
196 }
197
198 // Truncates the graph at the given basic block `block`. All blocks
199 // following `block` (exclusive) are removed from the graph and returned.
200 // `block` itself is removed from the graph and not returned.
201 std::vector<BasicBlock*> TruncateGraphAt(BasicBlock* block) {
202 // TODO(victorgomes): Consider using a linked list of basic blocks in Maglev
203 // instead of a vector.
204 auto it =
205 std::find(graph_->blocks().begin(), graph_->blocks().end(), block);
206 CHECK_NE(it, graph_->blocks().end());
207 size_t index = std::distance(graph_->blocks().begin(), it);
208 std::vector<BasicBlock*> saved_bb(graph_->blocks().begin() + index + 1,
209 graph_->blocks().end());
210 graph_->blocks().resize(index);
211 return saved_bb;
212 }
213
214 template <class Node, typename... Args>
216 std::initializer_list<ValueNode*> inputs,
217 Args&&... args) {
218 ValueNode* node =
219 NodeBase::New<Node>(zone(), inputs, std::forward<Args>(args)...);
220 DCHECK(!node->properties().can_eager_deopt());
221 DCHECK(!node->properties().can_lazy_deopt());
222 builder.node_buffer().push_back(node);
223 RegisterNode(builder, node);
224 return node;
225 }
226
227 void RegisterNode(MaglevGraphBuilder& builder, Node* node) {
228 if (builder.has_graph_labeller()) {
229 builder.graph_labeller()->RegisterNode(node);
230 }
231 }
232
234 // TODO(victorgomes): Use KNA to create better conversion nodes?
235 switch (node->value_representation()) {
237 return AddNodeAtBlockEnd<Int32ToNumber>(builder, {node});
239 return AddNodeAtBlockEnd<Uint32ToNumber>(builder, {node});
241 return AddNodeAtBlockEnd<Float64ToTagged>(
244 return AddNodeAtBlockEnd<HoleyFloat64ToTagged>(
245 builder, {node},
248 return AddNodeAtBlockEnd<IntPtrToNumber>(builder, {node});
250 return node;
251 }
252 }
253
254 static void UpdatePredecessorsOf(BasicBlock* block, BasicBlock* prev_pred,
255 BasicBlock* new_pred) {
256 if (!block->has_state()) {
257 DCHECK_EQ(block->predecessor(), prev_pred);
258 block->set_predecessor(new_pred);
259 return;
260 }
261 for (int i = 0; i < block->predecessor_count(); i++) {
262 if (block->predecessor_at(i) == prev_pred) {
263 block->state()->set_predecessor_at(i, new_pred);
264 break;
265 }
266 }
267 }
268
270 BasicBlock* call_block) {
272 if (!succ->has_state()) return;
273 if (succ->is_loop() && succ->backedge_predecessor() == call_block) {
275 return;
276 }
277 for (int i = succ->predecessor_count() - 1; i >= 0; i--) {
278 if (succ->predecessor_at(i) == call_block) {
279 succ->state()->RemovePredecessorAt(i);
280 }
281 }
282 });
283 }
284
286 std::vector<bool> reachable_blocks(graph_->max_block_id(), false);
287 std::vector<BasicBlock*> worklist;
288
289 DCHECK(!graph_->blocks().empty());
290 BasicBlock* initial_bb = graph_->blocks().front();
291 worklist.push_back(initial_bb);
292 reachable_blocks[initial_bb->id()] = true;
293 DCHECK(!initial_bb->is_loop());
294
295 while (!worklist.empty()) {
296 BasicBlock* current = worklist.back();
297 worklist.pop_back();
298
299 for (auto handler : current->exception_handlers()) {
300 if (!handler->HasExceptionHandler()) continue;
301 if (handler->ShouldLazyDeopt()) continue;
302 BasicBlock* catch_block = handler->catch_block();
303 if (!reachable_blocks[catch_block->id()]) {
304 reachable_blocks[catch_block->id()] = true;
305 worklist.push_back(catch_block);
306 }
307 }
308
309 current->ForEachSuccessor([&](BasicBlock* succ) {
310 if (!reachable_blocks[succ->id()]) {
311 reachable_blocks[succ->id()] = true;
312 worklist.push_back(succ);
313 }
314 });
315 }
316
317 // Sweep dead blocks and remove unreachable predecessors.
318 graph_->IterateGraphAndSweepDeadBlocks([&](BasicBlock* bb) {
319 if (!reachable_blocks[bb->id()]) return true;
320 // If block doesn't have a merge state, it has only one predecessor, so
321 // it must be the reachable one.
322 if (!bb->has_state()) return false;
323 if (bb->is_loop() &&
324 !reachable_blocks[bb->backedge_predecessor()->id()]) {
325 // If the backedge predecessor is not reachable, we can turn the loop
326 // into a regular block.
328 }
329 for (int i = bb->predecessor_count() - 1; i >= 0; i--) {
330 if (!reachable_blocks[bb->predecessor_at(i)->id()]) {
332 }
333 }
334 return false;
335 });
336 }
337};
338
339} // namespace v8::internal::maglev
340
341#endif // V8_MAGLEV_MAGLEV_INLINING_H_
void TruncateAt(ThreadedListBase *rem, T *v)
void Append(ThreadedListBase &&list)
void push_back(const T &value)
BasicBlock * backedge_predecessor() const
static void ForEachSuccessorFollowing(ControlNode *control, Func &&functor)
void ForEachSuccessor(Func &&functor) const
BasicBlock * predecessor_at(int i) const
ExceptionHandlerInfo::List & exception_handlers()
MergePointInterpreterFrameState * state() const
ZoneVector< Node * > Split(Node *node, Zone *zone)
compiler::SharedFunctionInfoRef shared_function_info() const
SourcePosition GetSourcePosition() const
Definition maglev-ir.h:1600
const MaglevCompilationUnit & GetCompilationUnit() const
Definition maglev-ir.h:1572
ZoneVector< MaglevCallSiteInfo * > & inlineable_calls()
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
int total_inlined_bytecode_size() const
ValueNode * node() const
Definition maglev-ir.h:1300
static MaglevCompilationUnit * NewInner(Zone *zone, const MaglevCompilationUnit *caller, compiler::SharedFunctionInfoRef shared_function_info, compiler::FeedbackCellRef feedback_cell)
ReduceResult BuildInlineFunction(SourcePosition call_site_position, ValueNode *context, ValueNode *function, ValueNode *new_target)
MaglevGraphLabeller * graph_labeller() const
DeoptFrame * AddInlinedArgumentsToDeoptFrame(DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
BasicBlock * FinishInlinedBlockForCaller(ControlNode *control_node, ZoneVector< Node * > rem_nodes_in_call_block)
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
std::vector< BasicBlock * > TruncateGraphAt(BasicBlock *block)
MaybeReduceResult BuildInlineFunction(MaglevCallSiteInfo *call_site)
ValueNode * EnsureTagged(MaglevGraphBuilder &builder, ValueNode *node)
void Run(bool is_tracing_maglev_graphs_enabled)
compiler::JSHeapBroker * broker() const
MaglevCompilationInfo * compilation_info_
MaglevCallSiteInfo * ChooseNextCallSite()
void RemovePredecessorFollowing(ControlNode *control, BasicBlock *call_block)
MaglevInliner(MaglevCompilationInfo *compilation_info, Graph *graph)
static void UpdatePredecessorsOf(BasicBlock *block, BasicBlock *prev_pred, BasicBlock *new_pred)
ValueNode * AddNodeAtBlockEnd(MaglevGraphBuilder &builder, std::initializer_list< ValueNode * > inputs, Args &&... args)
void RegisterNode(MaglevGraphBuilder &builder, Node *node)
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
JSHeapBroker * broker
Node * node
ZoneVector< RpoNumber > & result
void PrintGraph(std::ostream &os, MaglevCompilationInfo *compilation_info, Graph *const graph)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NE(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
TFGraph * graph_