v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-pre-regalloc-codegen-processors.h
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_MAGLEV_PRE_REGALLOC_CODEGEN_PROCESSORS_H_
6#define V8_MAGLEV_MAGLEV_PRE_REGALLOC_CODEGEN_PROCESSORS_H_
7
14
15namespace v8::internal::maglev {
16
18 public:
19 void PreProcessGraph(Graph* graph) {}
20 void PostProcessGraph(Graph* graph) {}
26
27#define DEF_PROCESS_NODE(NAME) \
28 ProcessResult Process(NAME* node, const ProcessingState& state) { \
29 node->InitTemporaries(); \
30 node->SetValueLocationConstraints(); \
31 return ProcessResult::kContinue; \
32 }
34#undef DEF_PROCESS_NODE
35};
36
38 public:
39 void PreProcessGraph(Graph* graph) {}
40 void PostProcessGraph(Graph* graph) {}
46
47 template <typename NodeT>
49#ifdef V8_COMPRESS_POINTERS
50 node->MarkTaggedInputsAsDecompressing();
51#endif
53 }
54};
55
57 public:
58 void PreProcessGraph(Graph* graph) {}
59 void PostProcessGraph(Graph* graph) {
60 graph->set_max_call_stack_args(max_call_stack_args_);
61 graph->set_max_deopted_stack_size(max_deopted_stack_size_);
62 }
68
69 template <typename NodeT>
71 if constexpr (NodeT::kProperties.is_call() ||
72 NodeT::kProperties.needs_register_snapshot()) {
73 int node_stack_args = node->MaxCallStackArgs();
74 if constexpr (NodeT::kProperties.needs_register_snapshot()) {
75 // Pessimistically assume that we'll push all registers in deferred
76 // calls.
77 node_stack_args +=
79 }
80 max_call_stack_args_ = std::max(max_call_stack_args_, node_stack_args);
81 }
82 if constexpr (NodeT::kProperties.can_eager_deopt()) {
83 UpdateMaxDeoptedStackSize(node->eager_deopt_info());
84 }
85 if constexpr (NodeT::kProperties.can_lazy_deopt()) {
86 UpdateMaxDeoptedStackSize(node->lazy_deopt_info());
87 }
89 }
90
91 private:
93 const DeoptFrame* deopt_frame = &deopt_info->top_frame();
94 int frame_size = 0;
95 if (deopt_frame->type() == DeoptFrame::FrameType::kInterpretedFrame) {
96 if (&deopt_frame->as_interpreted().unit() == last_seen_unit_) return;
97 last_seen_unit_ = &deopt_frame->as_interpreted().unit();
98 frame_size = deopt_frame->as_interpreted().unit().max_arguments() *
100 }
101
102 do {
103 frame_size += ConservativeFrameSize(deopt_frame);
104 deopt_frame = deopt_frame->parent();
105 } while (deopt_frame != nullptr);
106 max_deopted_stack_size_ = std::max(frame_size, max_deopted_stack_size_);
107 }
108 int ConservativeFrameSize(const DeoptFrame* deopt_frame) {
109 switch (deopt_frame->type()) {
112 deopt_frame->as_interpreted().unit().parameter_count(),
113 deopt_frame->as_interpreted().unit().register_count());
114 return info.frame_size_in_bytes();
115 }
118 }
120 return std::max(
121 0,
122 static_cast<int>(
123 deopt_frame->as_inlined_arguments().arguments().size() -
124 deopt_frame->as_inlined_arguments().unit().parameter_count()) *
126 }
128 // PC + FP + Closure + Params + Context
131 deopt_frame->as_builtin_continuation().parameters().length(),
133 deopt_frame->as_builtin_continuation().builtin_id()),
134 config);
135 return info.frame_size_in_bytes();
136 }
137 }
138 }
139
142 // Optimize UpdateMaxDeoptedStackSize to not re-calculate if it sees the same
143 // compilation unit multiple times in a row.
145};
146
148 public:
150 Graph* graph,
151 RegallocInfo* regalloc_info)
152 : compilation_info_(compilation_info), regalloc_info_(regalloc_info) {}
153
154 void PreProcessGraph(Graph* graph) {}
158 if (!block->has_state()) return BlockProcessResult::kContinue;
159 if (block->state()->is_loop()) {
160 loop_used_nodes_.push_back(
162 }
164 }
166
167 template <typename NodeT>
169 node->set_id(next_node_id_++);
170 LoopUsedNodes* loop_used_nodes = GetCurrentLoopUsedNodes();
171 if (loop_used_nodes && node->properties().is_call() &&
172 loop_used_nodes->header->has_state()) {
173 if (loop_used_nodes->first_call == kInvalidNodeId) {
174 loop_used_nodes->first_call = node->id();
175 }
176 loop_used_nodes->last_call = node->id();
177 }
178 MarkInputUses(node, state);
180 }
181
182 template <typename NodeT>
183 void MarkInputUses(NodeT* node, const ProcessingState& state) {
184 LoopUsedNodes* loop_used_nodes = GetCurrentLoopUsedNodes();
185 // Mark input uses in the same order as inputs are assigned in the register
186 // allocator (see StraightForwardRegisterAllocator::AssignInputs).
187 node->ForAllInputsInRegallocAssignmentOrder(
189 MarkUse(input->node(), node->id(), input, loop_used_nodes);
190 });
191 if constexpr (NodeT::kProperties.can_eager_deopt()) {
192 MarkCheckpointNodes(node, node->eager_deopt_info(), loop_used_nodes,
193 state);
194 }
195 if constexpr (NodeT::kProperties.can_lazy_deopt()) {
196 MarkCheckpointNodes(node, node->lazy_deopt_info(), loop_used_nodes,
197 state);
198 }
199 }
200
201 void MarkInputUses(Phi* node, const ProcessingState& state) {
202 // Don't mark Phi uses when visiting the node, because of loop phis.
203 // Instead, they'll be visited while processing Jump/JumpLoop.
204 }
205
206 // Specialize the two unconditional jumps to extend their Phis' inputs' live
207 // ranges.
208
209 void MarkInputUses(JumpLoop* node, const ProcessingState& state) {
210 int predecessor_id = state.block()->predecessor_id();
211 BasicBlock* target = node->target();
212 uint32_t use = node->id();
213
214 DCHECK(!loop_used_nodes_.empty());
215 LoopUsedNodes loop_used_nodes = std::move(loop_used_nodes_.back());
216 loop_used_nodes_.pop_back();
217
218 LoopUsedNodes* outer_loop_used_nodes = GetCurrentLoopUsedNodes();
219
220 if (target->has_phi()) {
221 for (Phi* phi : *target->phis()) {
222 DCHECK(phi->is_used());
223 ValueNode* input = phi->input(predecessor_id).node();
224 MarkUse(input, use, &phi->input(predecessor_id), outer_loop_used_nodes);
225 }
226 }
227
228 DCHECK_EQ(loop_used_nodes.header, target);
229 if (!loop_used_nodes.used_nodes.empty()) {
230 // Try to avoid unnecessary reloads or spills across the back-edge based
231 // on use positions and calls inside the loop.
234 .emplace(loop_used_nodes.header->id(), compilation_info_->zone())
235 .first->second;
236 for (auto p : loop_used_nodes.used_nodes) {
237 // If the node is used before the first call and after the last call,
238 // keep it in a register across the back-edge.
239 if (p.second.first_register_use != kInvalidNodeId &&
240 (loop_used_nodes.first_call == kInvalidNodeId ||
241 (p.second.first_register_use <= loop_used_nodes.first_call &&
242 p.second.last_register_use > loop_used_nodes.last_call))) {
243 loop_info.reload_hints_.Add(p.first, compilation_info_->zone());
244 }
245 // If the node is not used, or used after the first call and before the
246 // last call, keep it spilled across the back-edge.
247 if (p.second.first_register_use == kInvalidNodeId ||
248 (loop_used_nodes.first_call != kInvalidNodeId &&
249 p.second.first_register_use > loop_used_nodes.first_call &&
250 p.second.last_register_use <= loop_used_nodes.last_call)) {
251 loop_info.spill_hints_.Add(p.first, compilation_info_->zone());
252 }
253 }
254
255 // Uses of nodes in this loop may need to propagate to an outer loop, so
256 // that they're lifetime is extended there too.
257 // TODO(leszeks): We only need to extend the lifetime in one outermost
258 // loop, allow nodes to be "moved" between lifetime extensions.
259 base::Vector<Input> used_node_inputs =
261 loop_used_nodes.used_nodes.size());
262 int i = 0;
263 for (auto& [used_node, info] : loop_used_nodes.used_nodes) {
264 Input* input = new (&used_node_inputs[i++]) Input(used_node);
265 MarkUse(used_node, use, input, outer_loop_used_nodes);
266 }
267 node->set_used_nodes(used_node_inputs);
268 }
269 }
270 void MarkInputUses(Jump* node, const ProcessingState& state) {
271 MarkJumpInputUses(node->id(), node->target(), state);
272 }
274 MarkJumpInputUses(node->id(), node->target(), state);
275 }
276 void MarkJumpInputUses(uint32_t use, BasicBlock* target,
277 const ProcessingState& state) {
278 int i = state.block()->predecessor_id();
279 if (!target->has_phi()) return;
280 LoopUsedNodes* loop_used_nodes = GetCurrentLoopUsedNodes();
281 Phi::List& phis = *target->phis();
282 for (auto it = phis.begin(); it != phis.end();) {
283 Phi* phi = *it;
284 if (!phi->is_used()) {
285 // Skip unused phis -- we're processing phis out of order with the dead
286 // node sweeping processor, so we will still observe unused phis here.
287 // We can eagerly remove them while we're at it so that the dead node
288 // sweeping processor doesn't have to revisit them.
289 it = phis.RemoveAt(it);
290 } else {
291 ValueNode* input = phi->input(i).node();
292 MarkUse(input, use, &phi->input(i), loop_used_nodes);
293 ++it;
294 }
295 }
296 }
297
298 private:
299 struct NodeUse {
300 // First and last register use inside a loop.
303 };
304
311
313 if (loop_used_nodes_.empty()) return nullptr;
314 return &loop_used_nodes_.back();
315 }
316
317 void MarkUse(ValueNode* node, uint32_t use_id, InputLocation* input,
318 LoopUsedNodes* loop_used_nodes) {
319 DCHECK(!node->Is<Identity>());
320
321 node->record_next_use(use_id, input);
322
323 // If we are in a loop, loop_used_nodes is non-null. In this case, check if
324 // the incoming node is from outside the loop, and make sure to extend its
325 // lifetime to the loop end if yes.
326 if (loop_used_nodes) {
327 // If the node's id is smaller than the smallest id inside the loop, then
328 // it must have been created before the loop. This means that it's alive
329 // on loop entry, and therefore has to be alive across the loop back edge
330 // too.
331 if (node->id() < loop_used_nodes->header->first_id()) {
332 auto [it, info] = loop_used_nodes->used_nodes.emplace(
334 if (input->operand().IsUnallocated()) {
335 const auto& operand =
336 compiler::UnallocatedOperand::cast(input->operand());
337 if (operand.HasRegisterPolicy() || operand.HasFixedRegisterPolicy() ||
338 operand.HasFixedFPRegisterPolicy()) {
339 if (it->second.first_register_use == kInvalidNodeId) {
340 it->second.first_register_use = use_id;
341 }
342 it->second.last_register_use = use_id;
343 }
344 }
345 }
346 }
347 }
348
349 template <typename DeoptInfoT>
350 void MarkCheckpointNodes(NodeBase* node, DeoptInfoT* deopt_info,
351 LoopUsedNodes* loop_used_nodes,
352 const ProcessingState& state) {
353 int use_id = node->id();
354 if (!deopt_info->has_input_locations()) {
355 size_t count = 0;
356 deopt_info->ForEachInput([&](ValueNode*) { count++; });
357 deopt_info->InitializeInputLocations(compilation_info_->zone(), count);
358 }
359 InputLocation* input = deopt_info->input_locations();
360 deopt_info->ForEachInput([&](ValueNode* node) {
361 MarkUse(node, use_id, input, loop_used_nodes);
362 input++;
363 });
364 }
365
368 std::vector<LoopUsedNodes> loop_used_nodes_;
370};
371
372} // namespace v8::internal::maglev
373
374#endif // V8_MAGLEV_MAGLEV_PRE_REGALLOC_CODEGEN_PROCESSORS_H_
Iterator RemoveAt(Iterator it)
static BuiltinContinuationFrameInfo Conservative(int parameters_count, const CallInterfaceDescriptor &continuation_descriptor, const RegisterConfiguration *register_config)
Definition frames.h:2008
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static FastConstructStubFrameInfo Conservative()
Definition frames.h:1971
static const RegisterConfiguration * Default()
static UnoptimizedFrameInfo Conservative(int parameters_count_with_receiver, int locals_count)
Definition frames.h:1915
base::Vector< T > AllocateVector(size_t length)
Definition zone.h:136
ProcessResult Process(NodeT *node, const ProcessingState &state)
const InlinedArgumentsDeoptFrame & as_inlined_arguments() const
Definition maglev-ir.h:1466
const InterpretedDeoptFrame & as_interpreted() const
Definition maglev-ir.h:1428
const BuiltinContinuationDeoptFrame & as_builtin_continuation() const
Definition maglev-ir.h:1549
ValueNode * node() const
Definition maglev-ir.h:1300
void MarkCheckpointNodes(NodeBase *node, DeoptInfoT *deopt_info, LoopUsedNodes *loop_used_nodes, const ProcessingState &state)
void MarkUse(ValueNode *node, uint32_t use_id, InputLocation *input, LoopUsedNodes *loop_used_nodes)
ProcessResult Process(NodeT *node, const ProcessingState &state)
void MarkJumpInputUses(uint32_t use, BasicBlock *target, const ProcessingState &state)
LiveRangeAndNextUseProcessor(MaglevCompilationInfo *compilation_info, Graph *graph, RegallocInfo *regalloc_info)
void MarkInputUses(JumpLoop *node, const ProcessingState &state)
void MarkInputUses(CheckpointedJump *node, const ProcessingState &state)
ProcessResult Process(NodeT *node, const ProcessingState &state)
constexpr Input & input(int index)
Definition maglev-ir.h:1978
Handle< SharedFunctionInfo > info
#define DEF_PROCESS_NODE(NAME)
static constexpr int kAllocatableDoubleRegisterCount
static constexpr NodeIdT kInvalidNodeId
Definition maglev-ir.h:899
static constexpr NodeIdT kFirstValidNodeId
Definition maglev-ir.h:900
static constexpr int kAllocatableGeneralRegisterCount
constexpr int kSystemPointerSize
Definition globals.h:410
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
absl::flat_hash_map< BasicBlock::Id, RegallocLoopInfo > loop_info_