v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-regalloc.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <sstream>
8#include <type_traits>
9
10#include "src/base/bits.h"
11#include "src/base/logging.h"
14#include "src/codegen/reglist.h"
29
30#ifdef V8_TARGET_ARCH_ARM
32#elif V8_TARGET_ARCH_ARM64
34#elif V8_TARGET_ARCH_RISCV64
36#elif V8_TARGET_ARCH_X64
38#elif V8_TARGET_ARCH_S390X
40#else
41#error "Maglev does not supported this architecture."
42#endif
43
44namespace v8 {
45namespace internal {
46
47namespace maglev {
48
49namespace {
50
51constexpr RegisterStateFlags initialized_node{true, false};
52constexpr RegisterStateFlags initialized_merge{true, true};
53
54using BlockReverseIterator = std::vector<BasicBlock>::reverse_iterator;
55
56// A target is a fallthrough of a control node if its ID is the next ID
57// after the control node.
58//
59// TODO(leszeks): Consider using the block iterator instead.
60bool IsTargetOfNodeFallthrough(ControlNode* node, BasicBlock* target) {
61 return node->id() + 1 == target->first_id();
62}
63
64ControlNode* NearestPostDominatingHole(ControlNode* node) {
65 // Conditional control nodes don't cause holes themselves. So, the nearest
66 // post-dominating hole is the conditional control node's next post-dominating
67 // hole.
68 if (node->Is<BranchControlNode>()) {
69 return node->next_post_dominating_hole();
70 }
71
72 // If the node is a Jump, it may be a hole, but only if it is not a
73 // fallthrough (jump to the immediately next block). Otherwise, it will point
74 // to the nearest post-dominating hole in its own "next" field.
75 if (node->Is<Jump>() || node->Is<CheckpointedJump>()) {
76 BasicBlock* target;
77 if (auto jmp = node->TryCast<Jump>()) {
78 target = jmp->target();
79 } else {
80 target = node->Cast<CheckpointedJump>()->target();
81 }
82 if (IsTargetOfNodeFallthrough(node, target)) {
83 return node->next_post_dominating_hole();
84 }
85 }
86
87 // If the node is a Switch, it can only have a hole if there is no
88 // fallthrough.
89 if (Switch* _switch = node->TryCast<Switch>()) {
90 if (_switch->has_fallthrough()) {
91 return _switch->next_post_dominating_hole();
92 }
93 }
94
95 return node;
96}
97
98ControlNode* HighestPostDominatingHole(ControlNode* first,
99 ControlNode* second) {
100 // Either find the merge-point of both branches, or the highest reachable
101 // control-node of the longest branch after the last node of the shortest
102 // branch.
103
104 // As long as there's no merge-point.
105 while (first != second) {
106 // Walk the highest branch to find where it goes.
107 if (first->id() > second->id()) std::swap(first, second);
108
109 // If the first branch terminates or jumps back, we've found highest
110 // reachable control-node of the longest branch (the second control
111 // node).
112 if (first->Is<TerminalControlNode>() || first->Is<JumpLoop>()) {
113 return second;
114 }
115
116 // Continue one step along the highest branch. This may cross over the
117 // lowest branch in case it returns or loops. If labelled blocks are
118 // involved such swapping of which branch is the highest branch can
119 // occur multiple times until a return/jumploop/merge is discovered.
120 first = first->next_post_dominating_hole();
121 }
122
123 // Once the branches merged, we've found the gap-chain that's relevant
124 // for the control node.
125 return first;
126}
127
128template <size_t kSize>
129ControlNode* HighestPostDominatingHole(
130 base::SmallVector<ControlNode*, kSize>& holes) {
131 // Sort them from highest to shortest.
132 std::sort(holes.begin(), holes.end(),
133 [](ControlNode* first, ControlNode* second) {
134 return first->id() > second->id();
135 });
136 DCHECK_GT(holes.size(), 1);
137 // Find the highest post dominating hole.
138 ControlNode* post_dominating_hole = holes.back();
139 holes.pop_back();
140 while (holes.size() > 0) {
141 ControlNode* next_hole = holes.back();
142 holes.pop_back();
143 post_dominating_hole =
144 HighestPostDominatingHole(post_dominating_hole, next_hole);
145 }
146 return post_dominating_hole;
147}
148
149bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
150 DCHECK_NOT_NULL(node);
151 DCHECK(!node->has_no_more_uses());
152
153 // If we're looping, a value can only be live if it was live before the loop.
154 if (target->control_node()->id() <= source->id()) {
155 // Gap moves may already be inserted in the target, so skip over those.
156 return node->id() < target->FirstNonGapMoveId();
157 }
158
159 // Drop all values on resumable loop headers.
160 if (target->is_loop() && target->state()->is_resumable_loop()) return false;
161
162 // TODO(verwaest): This should be true but isn't because we don't yet
163 // eliminate dead code.
164 // DCHECK_GT(node->next_use, source->id());
165 // TODO(verwaest): Since we don't support deopt yet we can only deal with
166 // direct branches. Add support for holes.
167 return node->live_range().end >= target->first_id();
168}
169
170bool IsDeadNodeToSkip(Node* node) {
171 if (!node->Is<ValueNode>()) return false;
172 ValueNode* value = node->Cast<ValueNode>();
173 return value->has_no_more_uses() &&
174 !value->properties().is_required_when_unused();
175}
176
177} // namespace
178
180 // TODO(verwaest): Perhaps don't actually merge these in but let the code
181 // generator pick up the gap moves from a separate list.
182 size_t diff = patches_.size();
183 if (diff == 0) return;
184 block->nodes().resize(block->nodes().size() + diff);
185 auto patches_it = patches_.end() - 1;
186 for (auto node_it = block->nodes().end() - 1 - diff;
187 node_it >= block->nodes().begin(); --node_it) {
188 *(node_it + diff) = *node_it;
189 for (; patches_it->diff == (node_it - block->nodes().begin());
190 --patches_it) {
191 --diff;
192 *(node_it + diff) = patches_it->new_node;
193 if (diff == 0) {
194 patches_.resize(0);
195 return;
196 }
197 }
198 }
199 UNREACHABLE();
200}
201
203 MaglevCompilationInfo* compilation_info, Graph* graph,
204 RegallocInfo* regalloc_info)
205 : compilation_info_(compilation_info),
206 graph_(graph),
207 patches_(compilation_info_->zone()),
208 regalloc_info_(regalloc_info) {
211 uint32_t tagged_stack_slots = tagged_.top;
212 uint32_t untagged_stack_slots = untagged_.top;
213 if (graph_->is_osr()) {
214 // Fix our stack frame to be compatible with the source stack frame of this
215 // OSR transition:
216 // 1) Ensure the section with tagged slots is big enough to receive all
217 // live OSR-in values.
218 for (auto val : graph_->osr_values()) {
219 if (val->result().operand().IsAllocated() &&
220 val->stack_slot() >= tagged_stack_slots) {
221 tagged_stack_slots = val->stack_slot() + 1;
222 }
223 }
224 // 2) Ensure we never have to shrink stack frames when OSR'ing into Maglev.
225 // We don't grow tagged slots or they might end up being uninitialized.
226 uint32_t source_frame_size =
228 uint32_t target_frame_size = tagged_stack_slots + untagged_stack_slots;
229 if (source_frame_size > target_frame_size) {
230 untagged_stack_slots += source_frame_size - target_frame_size;
231 }
232 }
233#ifdef V8_TARGET_ARCH_ARM64
234 // Due to alignment constraints, we add one untagged slot if
235 // stack_slots + fixed_slot_count is odd.
236 static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1);
237 if ((tagged_stack_slots + untagged_stack_slots) % 2 == 0) {
238 untagged_stack_slots++;
239 }
240#endif // V8_TARGET_ARCH_ARM64
241 graph_->set_tagged_stack_slots(tagged_stack_slots);
242 graph_->set_untagged_stack_slots(untagged_stack_slots);
243}
244
246
247// Compute, for all forward control nodes (i.e. excluding Return and JumpLoop) a
248// tree of post-dominating control flow holes.
249//
250// Control flow which interrupts linear control flow fallthrough for basic
251// blocks is considered to introduce a control flow "hole".
252//
253// A──────┐ │
254// │ Jump │ │
255// └──┬───┘ │
256// { │ B──────┐ │
257// Control flow { │ │ Jump │ │ Linear control flow
258// hole after A { │ └─┬────┘ │
259// { ▼ ▼ Fallthrough │
260// C──────┐ │
261// │Return│ │
262// └──────┘ ▼
263//
264// It is interesting, for each such hole, to know what the next hole will be
265// that we will unconditionally reach on our way to an exit node. Such
266// subsequent holes are in "post-dominators" of the current block.
267//
268// As an example, consider the following CFG, with the annotated holes. The
269// post-dominating hole tree is the transitive closure of the post-dominator
270// tree, up to nodes which are holes (in this example, A, D, F and H).
271//
272// CFG Immediate Post-dominating
273// post-dominators holes
274// A──────┐
275// │ Jump │ A A
276// └──┬───┘ │ │
277// { │ B──────┐ │ │
278// Control flow { │ │ Jump │ │ B │ B
279// hole after A { │ └─┬────┘ │ │ │ │
280// { ▼ ▼ │ │ │ │
281// C──────┐ │ │ │ │
282// │Branch│ └►C◄┘ │ C │
283// └┬────┬┘ │ │ │ │
284// ▼ │ │ │ │ │
285// D──────┐│ │ │ │ │
286// │ Jump ││ D │ │ D │ │
287// └──┬───┘▼ │ │ │ │ │ │
288// { │ E──────┐ │ │ │ │ │ │
289// Control flow { │ │ Jump │ │ │ E │ │ │ E │
290// hole after D { │ └─┬────┘ │ │ │ │ │ │ │ │
291// { ▼ ▼ │ │ │ │ │ │ │ │
292// F──────┐ │ ▼ │ │ │ ▼ │ │
293// │ Jump │ └►F◄┘ └─┴►F◄┴─┘
294// └─────┬┘ │ │
295// { │ G──────┐ │ │
296// Control flow { │ │ Jump │ │ G │ G
297// hole after F { │ └─┬────┘ │ │ │ │
298// { ▼ ▼ │ │ │ │
299// H──────┐ ▼ │ ▼ │
300// │Return│ H◄┘ H◄┘
301// └──────┘
302//
303// Since we only care about forward control, loop jumps are treated the same as
304// returns -- they terminate the post-dominating hole chain.
305//
307 // For all blocks, find the list of jumps that jump over code unreachable from
308 // the block. Such a list of jumps terminates in return or jumploop.
309 for (BasicBlock* block : base::Reversed(*graph_)) {
310 ControlNode* control = block->control_node();
311 if (auto unconditional_control =
312 control->TryCast<UnconditionalControlNode>()) {
313 // If the current control node is a jump, prepend it to the list of jumps
314 // at the target.
315 control->set_next_post_dominating_hole(NearestPostDominatingHole(
316 unconditional_control->target()->control_node()));
317 } else if (auto branch = control->TryCast<BranchControlNode>()) {
318 ControlNode* first =
319 NearestPostDominatingHole(branch->if_true()->control_node());
321 NearestPostDominatingHole(branch->if_false()->control_node());
323 HighestPostDominatingHole(first, second));
324 } else if (auto switch_node = control->TryCast<Switch>()) {
325 int num_targets =
326 switch_node->size() + (switch_node->has_fallthrough() ? 1 : 0);
327 if (num_targets == 1) {
328 // If we have a single target, the next post dominating hole
329 // is the same one as the target.
330 DCHECK(!switch_node->has_fallthrough());
331 control->set_next_post_dominating_hole(NearestPostDominatingHole(
332 switch_node->targets()[0].block_ptr()->control_node()));
333 continue;
334 }
335 // Calculate the post dominating hole for each target.
336 base::SmallVector<ControlNode*, 16> holes(num_targets);
337 for (int i = 0; i < switch_node->size(); i++) {
338 holes[i] = NearestPostDominatingHole(
339 switch_node->targets()[i].block_ptr()->control_node());
340 }
341 if (switch_node->has_fallthrough()) {
342 holes[switch_node->size()] = NearestPostDominatingHole(
343 switch_node->fallthrough()->control_node());
344 }
345 control->set_next_post_dominating_hole(HighestPostDominatingHole(holes));
346 }
347 }
348}
349
351 bool first = true;
352 auto print = [&](auto reg, ValueNode* node) {
353 if (first) {
354 first = false;
355 } else {
356 printing_visitor_->os() << ", ";
357 }
358 printing_visitor_->os() << reg << "=v" << node->id();
359 };
360 general_registers_.ForEachUsedRegister(print);
361 double_registers_.ForEachUsedRegister(print);
362}
363
365 if (v8_flags.trace_maglev_regalloc) {
367 compilation_info_->graph_labeller(), std::cout));
368 printing_visitor_->PreProcessGraph(graph_);
369 }
370
371 for (const auto& [ref, constant] : graph_->constants()) {
372 constant->SetConstantLocation();
373 USE(ref);
374 }
375 for (const auto& [index, constant] : graph_->root()) {
376 constant->SetConstantLocation();
377 USE(index);
378 }
379 for (const auto& [value, constant] : graph_->smi()) {
380 constant->SetConstantLocation();
381 USE(value);
382 }
383 for (const auto& [value, constant] : graph_->tagged_index()) {
384 constant->SetConstantLocation();
385 USE(value);
386 }
387 for (const auto& [value, constant] : graph_->int32()) {
388 constant->SetConstantLocation();
389 USE(value);
390 }
391 for (const auto& [value, constant] : graph_->uint32()) {
392 constant->SetConstantLocation();
393 USE(value);
394 }
395 for (const auto& [value, constant] : graph_->float64()) {
396 constant->SetConstantLocation();
397 USE(value);
398 }
399 for (const auto& [address, constant] : graph_->external_references()) {
400 constant->SetConstantLocation();
401 USE(address);
402 }
403 for (const auto& [ref, constant] : graph_->trusted_constants()) {
404 constant->SetConstantLocation();
405 USE(ref);
406 }
407
408 for (block_it_ = graph_->begin(); block_it_ != graph_->end(); ++block_it_) {
409 BasicBlock* block = *block_it_;
410 current_node_ = nullptr;
411
412 // Restore mergepoint state.
413 if (block->has_state()) {
414 if (block->state()->is_exception_handler() ||
415 block->state()->IsUnreachableByForwardEdge()) {
416 // Exceptions and loops only reachable from a JumpLoop (i.e., resumable
417 // loops with no fall-through edge) start with a blank state of register
418 // values.
420 } else {
421 InitializeRegisterValues(block->state()->register_state());
422 }
423 } else if (block->is_edge_split_block()) {
424 InitializeRegisterValues(block->edge_split_block_register_state());
425 }
426
427 if (v8_flags.trace_maglev_regalloc) {
428 printing_visitor_->PreProcessBasicBlock(block);
429 printing_visitor_->os() << "live regs: ";
431
432 ControlNode* control = NearestPostDominatingHole(block->control_node());
433 if (!control->Is<JumpLoop>()) {
434 printing_visitor_->os() << "\n[holes:";
435 while (true) {
436 if (control->Is<JumpLoop>()) {
437 printing_visitor_->os() << " " << control->id() << "↰";
438 break;
439 } else if (control->Is<UnconditionalControlNode>()) {
440 BasicBlock* target =
441 control->Cast<UnconditionalControlNode>()->target();
443 << " " << control->id() << "-" << target->first_id();
444 control = control->next_post_dominating_hole();
445 DCHECK_NOT_NULL(control);
446 continue;
447 } else if (control->Is<Switch>()) {
448 Switch* _switch = control->Cast<Switch>();
449 DCHECK(!_switch->has_fallthrough());
450 DCHECK_GE(_switch->size(), 1);
451 BasicBlock* first_target = _switch->targets()[0].block_ptr();
453 << " " << control->id() << "-" << first_target->first_id();
454 control = control->next_post_dominating_hole();
455 DCHECK_NOT_NULL(control);
456 continue;
457 } else if (control->Is<Return>()) {
458 printing_visitor_->os() << " " << control->id() << ".";
459 break;
460 } else if (control->Is<Deopt>() || control->Is<Abort>()) {
461 printing_visitor_->os() << " " << control->id() << "✖️";
462 break;
463 }
464 UNREACHABLE();
465 }
466 printing_visitor_->os() << "]";
467 }
468 printing_visitor_->os() << std::endl;
469 }
470
471 // Activate phis.
472 if (block->has_phi()) {
473 Phi::List& phis = *block->phis();
474 // Firstly, make the phi live, and try to assign it to an input
475 // location.
476 for (auto phi_it = phis.begin(); phi_it != phis.end();) {
477 Phi* phi = *phi_it;
478 if (!phi->has_valid_live_range()) {
479 // We might still have left over dead Phis, due to phis being kept
480 // alive by deopts that the representation analysis dropped. Clear
481 // them out now.
482 phi_it = phis.RemoveAt(phi_it);
483 } else {
484 DCHECK(phi->has_valid_live_range());
485 phi->SetNoSpill();
487 ++phi_it;
488 }
489 }
490 if (block->is_exception_handler_block()) {
491 // If we are in exception handler block, then we find the ExceptionPhi
492 // (the first one by default) that is marked with the
493 // virtual_accumulator and force kReturnRegister0. This corresponds to
494 // the exception message object.
495 for (Phi* phi : phis) {
496 DCHECK_EQ(phi->input_count(), 0);
497 DCHECK(phi->is_exception_phi());
498 if (phi->owner() == interpreter::Register::virtual_accumulator()) {
499 if (!phi->has_no_more_uses()) {
500 phi->result().SetAllocated(ForceAllocate(kReturnRegister0, phi));
501 if (v8_flags.trace_maglev_regalloc) {
503 printing_visitor_->os() << "phi (exception message object) "
504 << phi->result().operand() << std::endl;
505 }
506 }
507 } else if (phi->owner().is_parameter() &&
508 phi->owner().is_receiver()) {
509 // The receiver is a special case for a fairly silly reason:
510 // OptimizedJSFrame::Summarize requires the receiver (and the
511 // function) to be in a stack slot, since its value must be
512 // available even though we're not deoptimizing (and thus register
513 // states are not available).
514 //
515 // TODO(leszeks):
516 // For inlined functions / nested graph generation, this a) doesn't
517 // work (there's no receiver stack slot); and b) isn't necessary
518 // (Summarize only looks at noninlined functions).
526 phi->result().SetAllocated(phi->spill_slot());
527 // Break once both accumulator and receiver have been processed.
528 break;
529 }
530 }
531 }
532 // Secondly try to assign the phi to a free register.
533 for (Phi* phi : phis) {
534 DCHECK(phi->has_valid_live_range());
535 if (phi->result().operand().IsAllocated()) continue;
536 if (phi->use_double_register()) {
537 if (!double_registers_.UnblockedFreeIsEmpty()) {
538 compiler::AllocatedOperand allocation =
539 double_registers_.AllocateRegister(phi, phi->hint());
540 phi->result().SetAllocated(allocation);
541 SetLoopPhiRegisterHint(phi, allocation.GetDoubleRegister());
542 if (v8_flags.trace_maglev_regalloc) {
545 << "phi (new reg) " << phi->result().operand() << std::endl;
546 }
547 }
548 } else {
549 // We'll use a general purpose register for this Phi.
550 if (!general_registers_.UnblockedFreeIsEmpty()) {
551 compiler::AllocatedOperand allocation =
552 general_registers_.AllocateRegister(phi, phi->hint());
553 phi->result().SetAllocated(allocation);
554 SetLoopPhiRegisterHint(phi, allocation.GetRegister());
555 if (v8_flags.trace_maglev_regalloc) {
558 << "phi (new reg) " << phi->result().operand() << std::endl;
559 }
560 }
561 }
562 }
563 // Finally just use a stack slot.
564 for (Phi* phi : phis) {
565 DCHECK(phi->has_valid_live_range());
566 if (phi->result().operand().IsAllocated()) continue;
568 // TODO(verwaest): Will this be used at all?
569 phi->result().SetAllocated(phi->spill_slot());
570 if (v8_flags.trace_maglev_regalloc) {
573 << "phi (stack) " << phi->result().operand() << std::endl;
574 }
575 }
576
577 if (v8_flags.trace_maglev_regalloc) {
578 printing_visitor_->os() << "live regs: ";
580 printing_visitor_->os() << std::endl;
581 }
582 general_registers_.clear_blocked();
583 double_registers_.clear_blocked();
584 }
585 DCHECK(AllUsedRegistersLiveAt(block));
587
588 node_it_ = block->nodes().begin();
589 for (; node_it_ != block->nodes().end(); ++node_it_) {
590 Node* node = *node_it_;
591 if (node == nullptr) continue;
592
593 if (IsDeadNodeToSkip(node)) {
594 // We remove unused pure nodes.
595 if (v8_flags.trace_maglev_regalloc) {
597 << "Removing unused node "
598 << PrintNodeLabel(graph_labeller(), node) << "\n";
599 }
600
601 if (!node->Is<Identity>()) {
602 // Updating the uses of the inputs in order to free dead input
603 // registers. We don't do this for Identity nodes, because they were
604 // skipped during use marking, and their inputs are thus not aware
605 // that they were used by this node.
606 DCHECK(!node->properties().can_deopt());
607 node->ForAllInputsInRegallocAssignmentOrder(
609 UpdateUse(input);
610 });
611 }
612
613 *node_it_ = nullptr;
614 continue;
615 }
616
617 AllocateNode(node);
618 }
619 AllocateControlNode(block->control_node(), block);
620 ApplyPatches(block);
621 }
622}
623
625 if (node->use_double_register()) {
626 double_registers_.FreeRegistersUsedBy(node);
627 } else {
628 general_registers_.FreeRegistersUsedBy(node);
629 }
630}
631
633 ValueNode* node, InputLocation* input_location) {
634 if (v8_flags.trace_maglev_regalloc) {
636 << "Using " << PrintNodeLabel(graph_labeller(), node) << "...\n";
637 }
638
639 DCHECK(!node->has_no_more_uses());
640
641 // Update the next use.
642 node->advance_next_use(input_location->next_use_id());
643
644 if (!node->has_no_more_uses()) return;
645
646 if (v8_flags.trace_maglev_regalloc) {
648 << " freeing " << PrintNodeLabel(graph_labeller(), node) << "\n";
649 }
650
651 // If a value is dead, make sure it's cleared.
653
654 // If the stack slot is a local slot, free it so it can be reused.
655 if (node->is_spilled()) {
656 compiler::AllocatedOperand slot = node->spill_slot();
657 if (slot.index() > 0) {
658 SpillSlots& slots =
660 : untagged_;
662 slots.free_slots.size() > 0,
663 slots.free_slots.back().freed_at_position <= node->live_range().end);
664 bool double_slot =
665 IsDoubleRepresentation(node->properties().value_representation());
666 slots.free_slots.emplace_back(slot.index(), node->live_range().end,
667 double_slot);
668 }
669 }
670}
671
673 const EagerDeoptInfo& deopt_info) {
674 InputLocation* input = deopt_info.input_locations();
675 deopt_info.ForEachInput([&](ValueNode* node) {
676 DCHECK(!node->Is<Identity>());
677 // We might have dropped this node without spilling it. Spill it now.
678 if (!node->has_register() && !node->is_loadable()) {
679 Spill(node);
680 }
681 input->InjectLocation(node->allocation());
682 UpdateUse(node, input);
683 input++;
684 });
685}
686
688 const LazyDeoptInfo& deopt_info) {
689 InputLocation* input = deopt_info.input_locations();
690 deopt_info.ForEachInput([&](ValueNode* node) {
691 DCHECK(!node->Is<Identity>());
692 // Lazy deopts always need spilling, and should
693 // always be loaded from their loadable slot.
694 Spill(node);
695 input->InjectLocation(node->loadable_slot());
696 UpdateUse(node, input);
697 input++;
698 });
699}
700
701#ifdef DEBUG
702namespace {
703#define GET_NODE_RESULT_REGISTER_T(RegisterT, AssignedRegisterT) \
704 RegisterT GetNodeResult##RegisterT(Node* node) { \
705 ValueNode* value_node = node->TryCast<ValueNode>(); \
706 if (!value_node) return RegisterT::no_reg(); \
707 if (!value_node->result().operand().Is##RegisterT()) { \
708 return RegisterT::no_reg(); \
709 } \
710 return value_node->result().AssignedRegisterT(); \
711 }
712GET_NODE_RESULT_REGISTER_T(Register, AssignedGeneralRegister)
713GET_NODE_RESULT_REGISTER_T(DoubleRegister, AssignedDoubleRegister)
714#undef GET_NODE_RESULT_REGISTER_T
715} // namespace
716#endif // DEBUG
717
719 // We shouldn't be visiting any gap moves during allocation, we should only
720 // have inserted gap moves in past visits.
721 DCHECK(!node->Is<GapMove>());
722 DCHECK(!node->Is<ConstantGapMove>());
723
725 if (v8_flags.trace_maglev_regalloc) {
727 << "Allocating " << PrintNodeLabel(graph_labeller(), node)
728 << " inputs...\n";
729 }
730 AssignInputs(node);
731 VerifyInputs(node);
732
733 if (node->properties().is_call()) SpillAndClearRegisters();
734
735 // Allocate node output.
736 if (node->Is<ValueNode>()) {
737 if (v8_flags.trace_maglev_regalloc) {
738 printing_visitor_->os() << "Allocating result...\n";
739 }
740 AllocateNodeResult(node->Cast<ValueNode>());
741 }
742
743 // Eager deopts might happen after the node result has been set, so allocate
744 // them after result allocation.
745 if (node->properties().can_eager_deopt()) {
746 if (v8_flags.trace_maglev_regalloc) {
747 printing_visitor_->os() << "Allocating eager deopt inputs...\n";
748 }
749 AllocateEagerDeopt(*node->eager_deopt_info());
750 }
751
752 // Lazy deopts are semantically after the node, so allocate them last.
753 if (node->properties().can_lazy_deopt()) {
754 if (v8_flags.trace_maglev_regalloc) {
755 printing_visitor_->os() << "Allocating lazy deopt inputs...\n";
756 }
757 // Ensure all values live from a throwing node across its catch block are
758 // spilled so they can properly be merged after the catch block.
759 if (node->properties().can_throw()) {
760 ExceptionHandlerInfo* info = node->exception_handler_info();
761 if (info->HasExceptionHandler() && !info->ShouldLazyDeopt() &&
762 !node->properties().is_call()) {
763 BasicBlock* block = info->catch_block();
764 auto spill = [&](auto reg, ValueNode* node) {
765 if (node->live_range().end < block->first_id()) return;
766 Spill(node);
767 };
768 general_registers_.ForEachUsedRegister(spill);
769 double_registers_.ForEachUsedRegister(spill);
770 }
771 }
772 AllocateLazyDeopt(*node->lazy_deopt_info());
773 }
774
775 // Make sure to save snapshot after allocate eager deopt registers.
776 if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node);
777
778 if (v8_flags.trace_maglev_regalloc) {
780 printing_visitor_->os() << "live regs: ";
782 printing_visitor_->os() << "\n";
783 }
784
785 // Result register should not be in temporaries.
786 DCHECK_IMPLIES(GetNodeResultRegister(node) != Register::no_reg(),
787 !node->general_temporaries().has(GetNodeResultRegister(node)));
789 GetNodeResultDoubleRegister(node) != DoubleRegister::no_reg(),
790 !node->double_temporaries().has(GetNodeResultDoubleRegister(node)));
791
792 // All the temporaries should be free by the end.
793 DCHECK_EQ(general_registers_.free() | node->general_temporaries(),
794 general_registers_.free());
795 DCHECK_EQ(double_registers_.free() | node->double_temporaries(),
796 double_registers_.free());
797 general_registers_.clear_blocked();
798 double_registers_.clear_blocked();
800}
801
802template <typename RegisterT>
804 RegisterT reg, bool force_spill) {
806 list.unblock(reg);
807 if (!list.free().has(reg)) {
808 ValueNode* node = list.GetValue(reg);
809 // If the register is not live after the current node, just remove its
810 // value.
811 if (IsCurrentNodeLastUseOf(node)) {
812 node->RemoveRegister(reg);
813 } else {
814 DropRegisterValue(list, reg, force_spill);
815 }
816 list.AddToFree(reg);
817 }
818}
819
821 DCHECK(!node->Is<Phi>());
822
823 node->SetNoSpill();
824
826 compiler::UnallocatedOperand::cast(node->result().operand());
827
829 DCHECK(node->Is<InitialValue>());
830 DCHECK_IMPLIES(!graph_->is_osr(), operand.fixed_slot_index() < 0);
831 // Set the stack slot to exactly where the value is.
833 node->GetMachineRepresentation(),
834 operand.fixed_slot_index());
835 node->result().SetAllocated(location);
836 node->Spill(location);
837
838 int idx = operand.fixed_slot_index();
839 if (idx > 0) {
840 // Reserve this slot by increasing the top and also marking slots below as
841 // free. Assumes slots are processed in increasing order.
842 CHECK(node->is_tagged());
843 CHECK_GE(idx, tagged_.top);
844 for (int i = tagged_.top; i < idx; ++i) {
845 bool double_slot =
846 IsDoubleRepresentation(node->properties().value_representation());
847 tagged_.free_slots.emplace_back(i, node->live_range().start,
848 double_slot);
849 }
850 tagged_.top = idx + 1;
851 }
852 return;
853 }
854
855 switch (operand.extended_policy()) {
859 node->result().SetAllocated(ForceAllocate(r, node));
860 break;
861 }
862
864 node->result().SetAllocated(AllocateRegisterAtEnd(node));
865 break;
866
868 Input& input = node->input(operand.input_index());
869 node->result().SetAllocated(ForceAllocate(input, node));
870 // Clear any hint that (probably) comes from this constraint.
871 if (node->has_hint()) input.node()->ClearHint();
872 break;
873 }
874
879 node->result().SetAllocated(ForceAllocate(r, node));
880 break;
881 }
882
884 DCHECK(IsConstantNode(node->opcode()));
885 break;
886
890 UNREACHABLE();
891 }
892
893 // Immediately kill the register use if the node doesn't have a valid
894 // live-range.
895 // TODO(verwaest): Remove once we can avoid allocating such registers.
896 if (!node->has_valid_live_range() &&
897 node->result().operand().IsAnyRegister()) {
898 DCHECK(node->has_register());
900 DCHECK(!node->has_register());
901 DCHECK(node->has_no_more_uses());
902 }
903}
904
905template <typename RegisterT>
907 RegisterFrameState<RegisterT>& registers, RegisterT reg, bool force_spill) {
908 // The register should not already be free.
909 DCHECK(!registers.free().has(reg));
910
911 ValueNode* node = registers.GetValue(reg);
912
913 if (v8_flags.trace_maglev_regalloc) {
914 printing_visitor_->os() << " dropping " << reg << " value "
915 << PrintNodeLabel(graph_labeller(), node) << "\n";
916 }
917
919
920 // Remove the register from the node's list.
921 node->RemoveRegister(reg);
922 // Return if the removed value already has another register or is loadable
923 // from memory.
924 if (node->has_register() || node->is_loadable()) return;
925 // Try to move the value to another register. Do so without blocking that
926 // register, as we may still want to use it elsewhere.
927 if (!registers.UnblockedFreeIsEmpty() && !force_spill) {
928 RegisterT target_reg = registers.unblocked_free().first();
929 RegisterT hint_reg = node->GetRegisterHint<RegisterT>();
930 if (hint_reg.is_valid() && registers.unblocked_free().has(hint_reg)) {
931 target_reg = hint_reg;
932 }
933 registers.RemoveFromFree(target_reg);
934 registers.SetValueWithoutBlocking(target_reg, node);
935 // Emit a gapmove.
937 mach_repr, reg.code());
939 mach_repr, target_reg.code());
940 AddMoveBeforeCurrentNode(node, source, target);
941 return;
942 }
943
944 // If all else fails, spill the value.
945 Spill(node);
946}
947
951
955
957 int predecessor_id, BasicBlock* target) {
958 DCHECK(!target->is_edge_split_block());
959
960 if (!target->has_phi()) return;
961
962 // Phi moves are emitted by resolving all phi moves as a single parallel move,
963 // which means we shouldn't update register state as we go (as if we were
964 // emitting a series of serialised moves) but rather take 'old' register
965 // state as the phi input.
966 Phi::List& phis = *target->phis();
967 for (auto phi_it = phis.begin(); phi_it != phis.end();) {
968 Phi* phi = *phi_it;
969 if (!phi->has_valid_live_range()) {
970 // We might still have left over dead Phis, due to phis being kept
971 // alive by deopts that the representation analysis dropped. Clear
972 // them out now.
973 phi_it = phis.RemoveAt(phi_it);
974 } else {
975 Input& input = phi->input(predecessor_id);
976 input.InjectLocation(input.node()->allocation());
977 ++phi_it;
978 }
979 }
980}
981
982#ifdef DEBUG
983
984bool StraightForwardRegisterAllocator::AllUsedRegistersLiveAt(
985 ConditionalControlNode* control_node, BasicBlock* target) {
986 auto ForAllRegisters = [&](const auto& registers) {
987 for (auto reg : registers.used()) {
988 if (!IsLiveAtTarget(registers.GetValue(reg), control_node, target)) {
989 return false;
990 }
991 }
992 return true;
993 };
994 return ForAllRegisters(general_registers_) &&
995 ForAllRegisters(double_registers_);
996}
997
998bool StraightForwardRegisterAllocator::AllUsedRegistersLiveAt(
999 BasicBlock* target) {
1000 auto ForAllRegisters = [&](const auto& registers) {
1001 for (auto reg : registers.used()) {
1002 if (registers.GetValue(reg)->live_range().end < target->first_id()) {
1003 return false;
1004 }
1005 }
1006 return true;
1007 };
1008 return ForAllRegisters(general_registers_) &&
1009 ForAllRegisters(double_registers_);
1010}
1011
1012#endif // DEBUG
1013
1015 ConditionalControlNode* control_node, BasicBlock* target) {
1016 DCHECK(!target->has_phi());
1017
1018 if (target->has_state()) {
1019 // Not a fall-through branch, copy the state over.
1020 return InitializeBranchTargetRegisterValues(control_node, target);
1021 }
1022 if (target->is_edge_split_block()) {
1023 return InitializeEmptyBlockRegisterValues(control_node, target);
1024 }
1025
1026 DCHECK_EQ(control_node->id() + 1, target->first_id());
1027 DCHECK(AllUsedRegistersLiveAt(control_node, target));
1028}
1029
1031 BasicBlock* block) {
1033
1034 // Control nodes can't lazy deopt at the moment.
1035 DCHECK(!node->properties().can_lazy_deopt());
1036
1037 if (node->Is<Abort>()) {
1038 // Do nothing.
1039 DCHECK(node->general_temporaries().is_empty());
1040 DCHECK(node->double_temporaries().is_empty());
1041 DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
1042 DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
1043 DCHECK_EQ(node->input_count(), 0);
1044 // Either there are no special properties, or there's a call but it doesn't
1045 // matter because we'll abort anyway.
1047 node->properties() != OpProperties(0),
1048 node->properties() == OpProperties::Call() && node->Is<Abort>());
1049
1050 if (v8_flags.trace_maglev_regalloc) {
1052 }
1053 } else if (node->Is<Deopt>()) {
1054 // No temporaries.
1055 DCHECK(node->general_temporaries().is_empty());
1056 DCHECK(node->double_temporaries().is_empty());
1057 DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
1058 DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
1059 DCHECK_EQ(node->input_count(), 0);
1060 DCHECK_EQ(node->properties(), OpProperties::EagerDeopt());
1061
1062 AllocateEagerDeopt(*node->eager_deopt_info());
1063
1064 if (v8_flags.trace_maglev_regalloc) {
1066 }
1067 } else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
1068 // No temporaries.
1069 DCHECK(node->general_temporaries().is_empty());
1070 DCHECK(node->double_temporaries().is_empty());
1071 DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
1072 DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
1073 DCHECK_EQ(node->input_count(), 0);
1074 DCHECK(!node->properties().can_eager_deopt());
1075 DCHECK(!node->properties().can_lazy_deopt());
1076 DCHECK(!node->properties().needs_register_snapshot());
1077 DCHECK(!node->properties().is_call());
1078
1079 auto predecessor_id = block->predecessor_id();
1080 auto target = unconditional->target();
1081
1082 InitializeBranchTargetPhis(predecessor_id, target);
1083 MergeRegisterValues(unconditional, target, predecessor_id);
1084 if (target->has_phi()) {
1085 for (Phi* phi : *target->phis()) {
1086 UpdateUse(&phi->input(predecessor_id));
1087 }
1088 }
1089
1090 // For JumpLoops, now update the uses of any node used in, but not defined
1091 // in the loop. This makes sure that such nodes' lifetimes are extended to
1092 // the entire body of the loop. This must be after phi initialisation so
1093 // that value dropping in the phi initialisation doesn't think these
1094 // extended lifetime nodes are dead.
1095 if (auto jump_loop = node->TryCast<JumpLoop>()) {
1096 for (Input& input : jump_loop->used_nodes()) {
1097 if (!input.node()->has_register() && !input.node()->is_loadable()) {
1098 // If the value isn't loadable by the end of a loop (this can happen
1099 // e.g. when a deferred throw doesn't spill it, and an exception
1100 // handler drops the value)
1101 Spill(input.node());
1102 }
1103 UpdateUse(&input);
1104 }
1105 }
1106
1107 if (v8_flags.trace_maglev_regalloc) {
1109 }
1110 } else {
1111 DCHECK(node->Is<ConditionalControlNode>() || node->Is<Return>());
1112 AssignInputs(node);
1113 VerifyInputs(node);
1114
1115 DCHECK(!node->properties().can_eager_deopt());
1116 DCHECK(!node->properties().can_lazy_deopt());
1117
1118 if (node->properties().is_call()) SpillAndClearRegisters();
1119
1120 DCHECK(!node->properties().needs_register_snapshot());
1121
1122 DCHECK_EQ(general_registers_.free() | node->general_temporaries(),
1123 general_registers_.free());
1124 DCHECK_EQ(double_registers_.free() | node->double_temporaries(),
1125 double_registers_.free());
1126
1127 general_registers_.clear_blocked();
1128 double_registers_.clear_blocked();
1130
1131 if (v8_flags.trace_maglev_regalloc) {
1133 }
1134
1135 // Finally, initialize the merge states of branch targets, including the
1136 // fallthrough, with the final state after all allocation
1137 if (auto conditional = node->TryCast<BranchControlNode>()) {
1138 InitializeConditionalBranchTarget(conditional, conditional->if_true());
1139 InitializeConditionalBranchTarget(conditional, conditional->if_false());
1140 } else if (Switch* control_node = node->TryCast<Switch>()) {
1141 const BasicBlockRef* targets = control_node->targets();
1142 for (int i = 0; i < control_node->size(); i++) {
1143 InitializeConditionalBranchTarget(control_node, targets[i].block_ptr());
1144 }
1145 if (control_node->has_fallthrough()) {
1147 control_node->fallthrough());
1148 }
1149 }
1150 }
1151
1153}
1154
1155template <typename RegisterT>
1157 RegisterT reg) {
1159 std::is_same_v<RegisterT, Register>
1162 reg.code(), kNoVreg);
1163 for (Input& input : *phi) {
1164 if (input.node()->id() > phi->id()) {
1165 input.node()->SetHint(hint);
1166 }
1167 }
1168}
1169
1171 // Try allocate phis to a register used by any of the inputs.
1172 for (Input& input : *phi) {
1173 if (input.operand().IsRegister()) {
1174 // We assume Phi nodes only point to tagged values, and so they use a
1175 // general register.
1176 Register reg = input.AssignedGeneralRegister();
1177 if (general_registers_.unblocked_free().has(reg)) {
1178 phi->result().SetAllocated(ForceAllocate(reg, phi));
1180 DCHECK_EQ(general_registers_.GetValue(reg), phi);
1181 if (v8_flags.trace_maglev_regalloc) {
1183 printing_visitor_->os()
1184 << "phi (reuse) " << input.operand() << std::endl;
1185 }
1186 return;
1187 }
1188 }
1189 }
1190}
1191
1195 Node* gap_move;
1196 if (source.IsConstant()) {
1197 DCHECK(IsConstantNode(node->opcode()));
1198 if (v8_flags.trace_maglev_regalloc) {
1199 printing_visitor_->os()
1200 << " constant gap move: " << target << " ← "
1201 << PrintNodeLabel(graph_labeller(), node) << std::endl;
1202 }
1203 gap_move =
1205 } else {
1206 if (v8_flags.trace_maglev_regalloc) {
1207 printing_visitor_->os() << " gap move: " << target << " ← "
1208 << PrintNodeLabel(graph_labeller(), node) << ":"
1209 << source << std::endl;
1210 }
1211 gap_move =
1213 compiler::AllocatedOperand::cast(source), target);
1214 }
1215 gap_move->InitTemporaries();
1217 graph_labeller()->RegisterNode(gap_move);
1218 }
1219 BasicBlock* block = *block_it_;
1220 if (node_it_ == block->nodes().end()) {
1222 // We're at the control node, so append instead.
1223 block->nodes().push_back(gap_move);
1224 node_it_ = block->nodes().end();
1225 } else {
1226 // We should not add any gap move before a GetSecondReturnedValue.
1227 patches_.emplace_back(node_it_ - block->nodes().begin(), gap_move);
1228 }
1229}
1230
1232 if (node->is_loadable()) return;
1233 AllocateSpillSlot(node);
1234 if (v8_flags.trace_maglev_regalloc) {
1235 printing_visitor_->os()
1236 << " spill: " << node->spill_slot() << " ← "
1237 << PrintNodeLabel(graph_labeller(), node) << std::endl;
1238 }
1239}
1240
1243 compiler::UnallocatedOperand::cast(input.operand());
1244 ValueNode* node = input.node();
1245 compiler::InstructionOperand location = node->allocation();
1246
1247 switch (operand.extended_policy()) {
1249 // Allocated in AssignArbitraryRegisterInput.
1250 if (v8_flags.trace_maglev_regalloc) {
1251 printing_visitor_->os()
1252 << "- " << PrintNodeLabel(graph_labeller(), input.node())
1253 << " has arbitrary register\n";
1254 }
1255 return;
1256
1258 // Allocated in AssignAnyInput.
1259 if (v8_flags.trace_maglev_regalloc) {
1260 printing_visitor_->os()
1261 << "- " << PrintNodeLabel(graph_labeller(), input.node())
1262 << " has arbitrary location\n";
1263 }
1264 return;
1265
1268 input.SetAllocated(ForceAllocate(reg, node));
1269 break;
1270 }
1271
1275 input.SetAllocated(ForceAllocate(reg, node));
1276 break;
1277 }
1278
1283 UNREACHABLE();
1284 }
1285 if (v8_flags.trace_maglev_regalloc) {
1286 printing_visitor_->os()
1287 << "- " << PrintNodeLabel(graph_labeller(), input.node())
1288 << " in forced " << input.operand() << "\n";
1289 }
1290
1291 compiler::AllocatedOperand allocated =
1292 compiler::AllocatedOperand::cast(input.operand());
1293 if (location != allocated) {
1294 AddMoveBeforeCurrentNode(node, location, allocated);
1295 }
1296 UpdateUse(&input);
1297 // Clear any hint that (probably) comes from this fixed use.
1298 input.node()->ClearHint();
1299}
1300
1302 ValueNode* node, const compiler::AllocatedOperand& location) {
1303 if (node->use_double_register()) {
1305 DCHECK(double_registers_.is_blocked(reg));
1307 double_registers_.AddToFree(reg);
1308 } else {
1309 Register reg = location.GetRegister();
1310 DCHECK(general_registers_.is_blocked(reg));
1312 general_registers_.AddToFree(reg);
1313 }
1314}
1315
1316namespace {
1317
1318#ifdef DEBUG
1319bool IsInRegisterLocation(ValueNode* node,
1321 DCHECK(location.IsAnyRegister());
1322 compiler::AllocatedOperand allocation =
1324 DCHECK_IMPLIES(node->use_double_register(), allocation.IsDoubleRegister());
1325 DCHECK_IMPLIES(!node->use_double_register(), allocation.IsRegister());
1326 if (node->use_double_register()) {
1327 return node->is_in_register(allocation.GetDoubleRegister());
1328 } else {
1329 return node->is_in_register(allocation.GetRegister());
1330 }
1331}
1332#endif // DEBUG
1333
1334bool SameAsInput(ValueNode* node, Input& input) {
1335 auto operand = compiler::UnallocatedOperand::cast(node->result().operand());
1336 return operand.HasSameAsInputPolicy() &&
1337 &input == &node->input(operand.input_index());
1338}
1339
1340compiler::InstructionOperand InputHint(NodeBase* node, Input& input) {
1341 ValueNode* value_node = node->TryCast<ValueNode>();
1342 if (!value_node) return input.node()->hint();
1343 DCHECK(value_node->result().operand().IsUnallocated());
1344 if (SameAsInput(value_node, input)) {
1345 return value_node->hint();
1346 } else {
1347 return input.node()->hint();
1348 }
1349}
1350
1351} // namespace
1352
1354 NodeBase* result_node, Input& input) {
1355 // Already assigned in AssignFixedInput
1356 if (!input.operand().IsUnallocated()) return;
1357
1359 compiler::UnallocatedOperand::cast(input.operand());
1360 if (operand.extended_policy() ==
1362 // Allocated in AssignAnyInput.
1363 return;
1364 }
1365
1366 DCHECK_EQ(operand.extended_policy(),
1368
1369 ValueNode* node = input.node();
1370 bool is_clobbered = input.Cloberred();
1371
1372 compiler::AllocatedOperand location = ([&] {
1373 compiler::InstructionOperand existing_register_location;
1374 auto hint = InputHint(result_node, input);
1375 if (is_clobbered) {
1376 // For clobbered inputs, we want to pick a different register than
1377 // non-clobbered inputs, so that we don't clobber those.
1378 existing_register_location =
1379 node->use_double_register()
1380 ? double_registers_.TryChooseUnblockedInputRegister(node)
1381 : general_registers_.TryChooseUnblockedInputRegister(node);
1382 } else {
1383 ValueNode* value_node = result_node->TryCast<ValueNode>();
1384 // Only use the hint if it helps with the result's allocation due to
1385 // same-as-input policy. Otherwise this doesn't affect regalloc.
1386 auto result_hint = value_node && SameAsInput(value_node, input)
1387 ? value_node->hint()
1389 existing_register_location =
1390 node->use_double_register()
1391 ? double_registers_.TryChooseInputRegister(node, result_hint)
1392 : general_registers_.TryChooseInputRegister(node, result_hint);
1393 }
1394
1395 // Reuse an existing register if possible.
1396 if (existing_register_location.IsAnyLocationOperand()) {
1397 if (v8_flags.trace_maglev_regalloc) {
1398 printing_visitor_->os()
1399 << "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
1400 << (is_clobbered ? "clobbered " : "") << existing_register_location
1401 << "\n";
1402 }
1403 return compiler::AllocatedOperand::cast(existing_register_location);
1404 }
1405
1406 // Otherwise, allocate a register for the node and load it in from there.
1407 compiler::InstructionOperand existing_location = node->allocation();
1408 compiler::AllocatedOperand allocation = AllocateRegister(node, hint);
1409 DCHECK_NE(existing_location, allocation);
1410 AddMoveBeforeCurrentNode(node, existing_location, allocation);
1411
1412 if (v8_flags.trace_maglev_regalloc) {
1413 printing_visitor_->os()
1414 << "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
1415 << (is_clobbered ? "clobbered " : "") << allocation << " ← "
1416 << node->allocation() << "\n";
1417 }
1418 return allocation;
1419 })();
1420
1421 input.SetAllocated(location);
1422
1423 UpdateUse(&input);
1424 // Only need to mark the location as clobbered if the node wasn't already
1425 // killed by UpdateUse.
1426 if (is_clobbered && !node->has_no_more_uses()) {
1427 MarkAsClobbered(node, location);
1428 }
1429 // Clobbered inputs should no longer be in the allocated location, as far as
1430 // the register allocator is concerned. This will happen either via
1431 // clobbering, or via this being the last use.
1432 DCHECK_IMPLIES(is_clobbered, !IsInRegisterLocation(node, location));
1433}
1434
1436 // Already assigned in AssignFixedInput or AssignArbitraryRegisterInput.
1437 if (!input.operand().IsUnallocated()) return;
1438
1439 DCHECK_EQ(
1440 compiler::UnallocatedOperand::cast(input.operand()).extended_policy(),
1442
1443 ValueNode* node = input.node();
1444 compiler::InstructionOperand location = node->allocation();
1445
1446 input.InjectLocation(location);
1447 if (location.IsAnyRegister()) {
1448 compiler::AllocatedOperand allocation =
1450 if (allocation.IsDoubleRegister()) {
1451 double_registers_.block(allocation.GetDoubleRegister());
1452 } else {
1453 general_registers_.block(allocation.GetRegister());
1454 }
1455 }
1456 if (v8_flags.trace_maglev_regalloc) {
1457 printing_visitor_->os()
1458 << "- " << PrintNodeLabel(graph_labeller(), input.node())
1459 << " in original " << location << "\n";
1460 }
1461 UpdateUse(&input);
1462}
1463
1465 // We allocate arbitrary register inputs after fixed inputs, since the fixed
1466 // inputs may clobber the arbitrarily chosen ones. Finally we assign the
1467 // location for the remaining inputs. Since inputs can alias a node, one of
1468 // the inputs could be assigned a register in AssignArbitraryRegisterInput
1469 // (and respectivelly its node location), therefore we wait until all
1470 // registers are allocated before assigning any location for these inputs.
1471 // TODO(dmercadier): consider using `ForAllInputsInRegallocAssignmentOrder` to
1472 // iterate the inputs. Since UseMarkingProcessor uses this helper to iterate
1473 // inputs, and it has to iterate them in the same order as this function,
1474 // using the iteration helper in both places would be better.
1475 for (Input& input : *node) AssignFixedInput(input);
1477 for (Input& input : *node) AssignArbitraryRegisterInput(node, input);
1479 for (Input& input : *node) AssignAnyInput(input);
1480}
1481
1483#ifdef DEBUG
1484 for (Input& input : *node) {
1485 if (input.operand().IsRegister()) {
1486 Register reg =
1488 if (general_registers_.GetValueMaybeFreeButBlocked(reg) != input.node()) {
1489 FATAL("Input node n%d is not in expected register %s",
1490 graph_labeller()->NodeId(input.node()), RegisterName(reg));
1491 }
1492 } else if (input.operand().IsDoubleRegister()) {
1495 if (double_registers_.GetValueMaybeFreeButBlocked(reg) != input.node()) {
1496 FATAL("Input node n%d is not in expected register %s",
1497 graph_labeller()->NodeId(input.node()), RegisterName(reg));
1498 }
1499 } else {
1500 if (input.operand() != input.node()->allocation()) {
1501 std::stringstream ss;
1502 ss << input.operand();
1503 FATAL("Input node n%d is not in operand %s",
1504 graph_labeller()->NodeId(input.node()), ss.str().c_str());
1505 }
1506 }
1507 }
1508#endif
1509}
1510
1512#ifdef DEBUG
1513 // We shouldn't have any blocked registers by now.
1514 DCHECK(general_registers_.blocked().is_empty());
1515 DCHECK(double_registers_.blocked().is_empty());
1516
1517 auto NodeNameForFatal = [&](ValueNode* node) {
1518 std::stringstream ss;
1521 } else {
1522 ss << "<" << node << ">";
1523 }
1524 return ss.str();
1525 };
1526
1527 for (Register reg : general_registers_.used()) {
1528 ValueNode* node = general_registers_.GetValue(reg);
1529 if (!node->is_in_register(reg)) {
1530 FATAL("Node %s doesn't think it is in register %s",
1531 NodeNameForFatal(node).c_str(), RegisterName(reg));
1532 }
1533 }
1534 for (DoubleRegister reg : double_registers_.used()) {
1535 ValueNode* node = double_registers_.GetValue(reg);
1536 if (!node->is_in_register(reg)) {
1537 FATAL("Node %s doesn't think it is in register %s",
1538 NodeNameForFatal(node).c_str(), RegisterName(reg));
1539 }
1540 }
1541
1542 auto ValidateValueNode = [this, NodeNameForFatal](ValueNode* node) {
1543 if (node->use_double_register()) {
1544 for (DoubleRegister reg : node->result_registers<DoubleRegister>()) {
1545 if (double_registers_.unblocked_free().has(reg)) {
1546 FATAL("Node %s thinks it's in register %s but it's free",
1547 NodeNameForFatal(node).c_str(), RegisterName(reg));
1548 } else if (double_registers_.GetValue(reg) != node) {
1549 FATAL("Node %s thinks it's in register %s but it contains %s",
1550 NodeNameForFatal(node).c_str(), RegisterName(reg),
1551 NodeNameForFatal(double_registers_.GetValue(reg)).c_str());
1552 }
1553 }
1554 } else {
1555 for (Register reg : node->result_registers<Register>()) {
1556 if (general_registers_.unblocked_free().has(reg)) {
1557 FATAL("Node %s thinks it's in register %s but it's free",
1558 NodeNameForFatal(node).c_str(), RegisterName(reg));
1559 } else if (general_registers_.GetValue(reg) != node) {
1560 FATAL("Node %s thinks it's in register %s but it contains %s",
1561 NodeNameForFatal(node).c_str(), RegisterName(reg),
1562 NodeNameForFatal(general_registers_.GetValue(reg)).c_str());
1563 }
1564 }
1565 }
1566 };
1567
1568 for (BasicBlock* block : *graph_) {
1569 if (block->has_phi()) {
1570 for (Phi* phi : *block->phis()) {
1571 ValidateValueNode(phi);
1572 }
1573 }
1574 for (Node* node : block->nodes()) {
1575 if (node == nullptr) continue;
1576 if (ValueNode* value_node = node->TryCast<ValueNode>()) {
1577 if (node->Is<Identity>()) continue;
1578 ValidateValueNode(value_node);
1579 }
1580 }
1581 }
1582
1583#endif
1584}
1585
1587 auto spill = [&](auto reg, ValueNode* node) { Spill(node); };
1588 general_registers_.ForEachUsedRegister(spill);
1589 double_registers_.ForEachUsedRegister(spill);
1590}
1591
1592template <typename RegisterT>
1595 while (registers.used() != registers.empty()) {
1596 RegisterT reg = registers.used().first();
1597 ValueNode* node = registers.GetValue(reg);
1598 if (v8_flags.trace_maglev_regalloc) {
1599 printing_visitor_->os() << " clearing registers with "
1600 << PrintNodeLabel(graph_labeller(), node) << "\n";
1601 }
1602 Spill(node);
1603 registers.FreeRegistersUsedBy(node);
1604 DCHECK(!registers.used().has(reg));
1605 }
1606}
1607
1612
1614 RegisterSnapshot snapshot;
1615 general_registers_.ForEachUsedRegister([&](Register reg, ValueNode* node) {
1616 if (node->properties().value_representation() ==
1618 snapshot.live_tagged_registers.set(reg);
1619 }
1620 });
1621 snapshot.live_registers = general_registers_.used();
1622 snapshot.live_double_registers = double_registers_.used();
1623 // If a value node, then the result register is removed from the snapshot.
1624 if (ValueNode* value_node = node->TryCast<ValueNode>()) {
1625 if (value_node->use_double_register()) {
1626 snapshot.live_double_registers.clear(
1627 ToDoubleRegister(value_node->result()));
1628 } else {
1629 Register reg = ToRegister(value_node->result());
1630 snapshot.live_registers.clear(reg);
1631 snapshot.live_tagged_registers.clear(reg);
1632 }
1633 }
1634 if (node->properties().can_eager_deopt()) {
1635 // If we eagerly deopt after a deferred call, the registers saved by the
1636 // runtime call might not include the inputs into the eager deopt. Here, we
1637 // make sure that all the eager deopt registers are included in the
1638 // snapshot.
1639 InputLocation* input = node->eager_deopt_info()->input_locations();
1640 node->eager_deopt_info()->ForEachInput([&](ValueNode* node) {
1641 if (input->IsAnyRegister()) {
1642 if (input->IsDoubleRegister()) {
1643 snapshot.live_double_registers.set(input->AssignedDoubleRegister());
1644 } else {
1645 snapshot.live_registers.set(input->AssignedGeneralRegister());
1646 if (node->is_tagged()) {
1647 snapshot.live_tagged_registers.set(
1648 input->AssignedGeneralRegister());
1649 }
1650 }
1651 }
1652 input++;
1653 });
1654 }
1655 node->set_register_snapshot(snapshot);
1656}
1657
1659 DCHECK(!node->is_loadable());
1660 uint32_t free_slot;
1661 bool is_tagged = (node->properties().value_representation() ==
1663 uint32_t slot_size = 1;
1664 bool double_slot =
1665 IsDoubleRepresentation(node->properties().value_representation());
1666 if constexpr (kDoubleSize != kSystemPointerSize) {
1667 if (double_slot) {
1668 slot_size = kDoubleSize / kSystemPointerSize;
1669 }
1670 }
1671 SpillSlots& slots = is_tagged ? tagged_ : untagged_;
1672 MachineRepresentation representation = node->GetMachineRepresentation();
1673 // TODO(victorgomes): We don't currently reuse double slots on arm.
1674 if (!v8_flags.maglev_reuse_stack_slots || slot_size > 1 ||
1675 slots.free_slots.empty()) {
1676 free_slot = slots.top + slot_size - 1;
1677 slots.top += slot_size;
1678 } else {
1679 NodeIdT start = node->live_range().start;
1680 auto it =
1681 std::upper_bound(slots.free_slots.begin(), slots.free_slots.end(),
1682 start, [](NodeIdT s, const SpillSlotInfo& slot_info) {
1683 return slot_info.freed_at_position >= s;
1684 });
1685 // {it} points to the first invalid slot. Decrement it to get to the last
1686 // valid slot freed before {start}.
1687 if (it != slots.free_slots.begin()) {
1688 --it;
1689 }
1690
1691 // TODO(olivf): Currently we cannot mix double and normal stack slots since
1692 // the gap resolver treats them independently and cannot detect cycles via
1693 // shared slots.
1694 while (it != slots.free_slots.begin()) {
1695 if (it->double_slot == double_slot) break;
1696 --it;
1697 }
1698
1699 if (it != slots.free_slots.begin()) {
1700 CHECK_EQ(double_slot, it->double_slot);
1701 CHECK_GT(start, it->freed_at_position);
1702 free_slot = it->slot_index;
1703 slots.free_slots.erase(it);
1704 } else {
1705 free_slot = slots.top++;
1706 }
1707 }
1709 representation, free_slot));
1710}
1711
1712template <typename RegisterT>
1714 RegListBase<RegisterT> reserved) {
1716 if (v8_flags.trace_maglev_regalloc) {
1717 printing_visitor_->os() << " need to free a register... ";
1718 }
1719 int furthest_use = 0;
1720 RegisterT best = RegisterT::no_reg();
1721 for (RegisterT reg : (registers.used() - reserved)) {
1722 ValueNode* value = registers.GetValue(reg);
1723
1724 // The cheapest register to clear is a register containing a value that's
1725 // contained in another register as well. Since we found the register while
1726 // looping over unblocked registers, we can simply use this register.
1727 if (value->num_registers() > 1) {
1728 best = reg;
1729 break;
1730 }
1731 int use = value->current_next_use();
1732 if (use > furthest_use) {
1733 furthest_use = use;
1734 best = reg;
1735 }
1736 }
1737 if (v8_flags.trace_maglev_regalloc) {
1738 printing_visitor_->os()
1739 << " chose " << best << " with next use " << furthest_use << "\n";
1740 }
1741 return best;
1742}
1743
1744template <typename RegisterT>
1746 RegListBase<RegisterT> reserved) {
1748 RegisterT best =
1749 PickRegisterToFree<RegisterT>(registers.blocked() | reserved);
1750 DCHECK(best.is_valid());
1751 DCHECK(!registers.is_blocked(best));
1753 registers.AddToFree(best);
1754 return best;
1755}
1756
1758 ValueNode* node, const compiler::InstructionOperand& hint) {
1760 if (node->use_double_register()) {
1761 if (double_registers_.UnblockedFreeIsEmpty()) {
1763 }
1764 return double_registers_.AllocateRegister(node, hint);
1765 } else {
1766 if (general_registers_.UnblockedFreeIsEmpty()) {
1768 }
1769 return general_registers_.AllocateRegister(node, hint);
1770 }
1771}
1772
1773namespace {
1774template <typename RegisterT>
1775static RegisterT GetRegisterHint(const compiler::InstructionOperand& hint) {
1776 if (hint.IsInvalid()) return RegisterT::no_reg();
1777 DCHECK(hint.IsUnallocated());
1778 return RegisterT::from_code(
1779 compiler::UnallocatedOperand::cast(hint).fixed_register_index());
1780}
1781
1782} // namespace
1783
1785 return node->live_range().end == current_node_->id();
1786}
1787
1788template <typename RegisterT>
1790 const compiler::InstructionOperand& hint) {
1792 // If we still have free registers, pick one of those.
1793 if (!registers.unblocked_free().is_empty()) return;
1794
1795 // If the current node is a last use of an input, pick a register containing
1796 // the input. Prefer the hint register if available.
1797 RegisterT hint_reg = GetRegisterHint<RegisterT>(hint);
1798 if (!registers.free().has(hint_reg) && registers.blocked().has(hint_reg) &&
1799 IsCurrentNodeLastUseOf(registers.GetValue(hint_reg))) {
1800 DropRegisterValueAtEnd(hint_reg);
1801 return;
1802 }
1803 // Only search in the used-blocked list, since we don't want to assign the
1804 // result register to a temporary (free + blocked).
1805 for (RegisterT reg : (registers.blocked() - registers.free())) {
1806 if (IsCurrentNodeLastUseOf(registers.GetValue(reg))) {
1808 return;
1809 }
1810 }
1811
1812 // Pick any input-blocked register based on regular heuristics.
1813 RegisterT reg = hint.IsInvalid()
1815 : GetRegisterHint<RegisterT>(hint);
1817}
1818
1821 if (node->use_double_register()) {
1823 return double_registers_.AllocateRegister(node, node->hint());
1824 } else {
1826 return general_registers_.AllocateRegister(node, node->hint());
1827 }
1828}
1829
1830template <typename RegisterT>
1833 DCHECK(!registers.is_blocked(reg));
1834 if (v8_flags.trace_maglev_regalloc) {
1835 printing_visitor_->os()
1836 << " forcing " << reg << " to "
1837 << PrintNodeLabel(graph_labeller(), node) << "...\n";
1838 }
1839 if (registers.free().has(reg)) {
1840 // If it's already free, remove it from the free list.
1841 registers.RemoveFromFree(reg);
1842 } else if (registers.GetValue(reg) == node) {
1843 registers.block(reg);
1845 node->GetMachineRepresentation(),
1846 reg.code());
1847 } else {
1848 DCHECK(!registers.is_blocked(reg));
1850 }
1851 DCHECK(!registers.free().has(reg));
1852 registers.unblock(reg);
1853 registers.SetValue(reg, node);
1855 node->GetMachineRepresentation(),
1856 reg.code());
1857}
1858
1864
1870
1872 const Input& input, ValueNode* node) {
1873 if (input.IsDoubleRegister()) {
1874 DoubleRegister reg = input.AssignedDoubleRegister();
1876 return ForceAllocate(reg, node);
1877 } else {
1878 Register reg = input.AssignedGeneralRegister();
1880 return ForceAllocate(reg, node);
1881 }
1882}
1883
1884namespace {
1885template <typename RegisterT>
1886compiler::AllocatedOperand OperandForNodeRegister(ValueNode* node,
1887 RegisterT reg) {
1889 node->GetMachineRepresentation(),
1890 reg.code());
1891}
1892} // namespace
1893
1894template <typename RegisterT>
1895compiler::InstructionOperand
1897 ValueNode* node, const compiler::InstructionOperand& hint) {
1898 RegTList result_registers = node->result_registers<RegisterT>();
1899 if (result_registers.is_empty()) return compiler::InstructionOperand();
1900
1901 // Prefer to return an existing blocked register.
1902 RegTList blocked_result_registers = result_registers & blocked_;
1903 if (!blocked_result_registers.is_empty()) {
1904 RegisterT reg = GetRegisterHint<RegisterT>(hint);
1905 if (!blocked_result_registers.has(reg)) {
1906 reg = blocked_result_registers.first();
1907 }
1908 return OperandForNodeRegister(node, reg);
1909 }
1910
1911 RegisterT reg = result_registers.first();
1912 block(reg);
1913 return OperandForNodeRegister(node, reg);
1914}
1915
1916template <typename RegisterT>
1919 ValueNode* node) {
1920 RegTList result_excl_blocked = node->result_registers<RegisterT>() - blocked_;
1921 if (result_excl_blocked.is_empty()) return compiler::InstructionOperand();
1922 RegisterT reg = result_excl_blocked.first();
1923 block(reg);
1924 return OperandForNodeRegister(node, reg);
1925}
1926
1927template <typename RegisterT>
1929 ValueNode* node, const compiler::InstructionOperand& hint) {
1930 DCHECK(!unblocked_free().is_empty());
1931 RegisterT reg = GetRegisterHint<RegisterT>(hint);
1932 if (!unblocked_free().has(reg)) {
1933 reg = unblocked_free().first();
1934 }
1935 RemoveFromFree(reg);
1936
1937 // Allocation succeeded. This might have found an existing allocation.
1938 // Simply update the state anyway.
1939 SetValue(reg, node);
1940 return OperandForNodeRegister(node, reg);
1941}
1942
1943template <typename RegisterT>
1946 RegListBase<RegisterT> fixed_temporaries = node->temporaries<RegisterT>();
1947
1948 // Make sure that any initially set temporaries are definitely free.
1949 for (RegisterT reg : fixed_temporaries) {
1950 DCHECK(!registers.is_blocked(reg));
1951 if (!registers.free().has(reg)) {
1953 registers.AddToFree(reg);
1954 }
1955 registers.block(reg);
1956 }
1957
1958 if (v8_flags.trace_maglev_regalloc && !fixed_temporaries.is_empty()) {
1959 if constexpr (std::is_same_v<RegisterT, Register>) {
1960 printing_visitor_->os()
1961 << "Fixed Temporaries: " << fixed_temporaries << "\n";
1962 } else {
1963 printing_visitor_->os()
1964 << "Fixed Double Temporaries: " << fixed_temporaries << "\n";
1965 }
1966 }
1967
1968 // After allocating the specific/fixed temporary registers, we empty the node
1969 // set, so that it is used to allocate only the arbitrary/available temporary
1970 // register that is going to be inserted in the scratch scope.
1971 node->temporaries<RegisterT>() = {};
1972}
1973
1978
1979namespace {
1980template <typename RegisterT>
1981RegListBase<RegisterT> GetReservedRegisters(NodeBase* node_base) {
1982 if (!node_base->Is<ValueNode>()) return RegListBase<RegisterT>();
1983 ValueNode* node = node_base->Cast<ValueNode>();
1985 compiler::UnallocatedOperand::cast(node->result().operand());
1986 RegListBase<RegisterT> reserved = {node->GetRegisterHint<RegisterT>()};
1987 if (operand.basic_policy() == compiler::UnallocatedOperand::FIXED_SLOT) {
1988 DCHECK(node->Is<InitialValue>());
1989 return reserved;
1990 }
1991 if constexpr (std::is_same_v<RegisterT, Register>) {
1992 if (operand.extended_policy() ==
1994 reserved.set(Register::from_code(operand.fixed_register_index()));
1995 }
1996 } else {
1997 static_assert(std::is_same_v<RegisterT, DoubleRegister>);
1998 if (operand.extended_policy() ==
2000 reserved.set(DoubleRegister::from_code(operand.fixed_register_index()));
2001 }
2002 }
2003 return reserved;
2004}
2005} // namespace
2006
2007template <typename RegisterT>
2010 int num_temporaries_needed = node->num_temporaries_needed<RegisterT>();
2011 if (num_temporaries_needed == 0) return;
2012
2013 DCHECK_GT(num_temporaries_needed, 0);
2014 RegListBase<RegisterT> temporaries = node->temporaries<RegisterT>();
2015 DCHECK(temporaries.is_empty());
2016 int remaining_temporaries_needed = num_temporaries_needed;
2017
2018 // If the node is a ValueNode with a fixed result register, we should not
2019 // assign a temporary to the result register, nor its hint.
2020 RegListBase<RegisterT> reserved = GetReservedRegisters<RegisterT>(node);
2021 for (RegisterT reg : (registers.unblocked_free() - reserved)) {
2022 registers.block(reg);
2023 DCHECK(!temporaries.has(reg));
2024 temporaries.set(reg);
2025 if (--remaining_temporaries_needed == 0) break;
2026 }
2027
2028 // Free extra registers if necessary.
2029 for (int i = 0; i < remaining_temporaries_needed; ++i) {
2030 DCHECK((registers.unblocked_free() - reserved).is_empty());
2031 RegisterT reg = FreeUnblockedRegister<RegisterT>(reserved);
2032 registers.block(reg);
2033 DCHECK(!temporaries.has(reg));
2034 temporaries.set(reg);
2035 }
2036
2037 DCHECK_GE(temporaries.Count(), num_temporaries_needed);
2038
2039 node->assign_temporaries(temporaries);
2040 if (v8_flags.trace_maglev_regalloc) {
2041 if constexpr (std::is_same_v<RegisterT, Register>) {
2042 printing_visitor_->os() << "Temporaries: " << temporaries << "\n";
2043 } else {
2044 printing_visitor_->os() << "Double Temporaries: " << temporaries << "\n";
2045 }
2046 }
2047}
2048
2054
2055template <typename Function>
2057 MergePointRegisterState& merge_point_state, Function&& f) {
2058 merge_point_state.ForEachGeneralRegister(
2059 [&](Register reg, RegisterState& state) {
2060 f(general_registers_, reg, state);
2061 });
2062 merge_point_state.ForEachDoubleRegister(
2063 [&](DoubleRegister reg, RegisterState& state) {
2064 f(double_registers_, reg, state);
2065 });
2066}
2067
2069 auto ClearRegisterState = [&](auto& registers) {
2070 while (!registers.used().is_empty()) {
2071 auto reg = registers.used().first();
2072 ValueNode* node = registers.GetValue(reg);
2073 registers.FreeRegistersUsedBy(node);
2074 DCHECK(!registers.used().has(reg));
2075 }
2076 };
2077
2078 ClearRegisterState(general_registers_);
2079 ClearRegisterState(double_registers_);
2080
2081 // All registers should be free by now.
2082 DCHECK_EQ(general_registers_.unblocked_free(),
2084 DCHECK_EQ(double_registers_.unblocked_free(),
2086}
2087
2089 MergePointRegisterState& target_state) {
2090 // First clear the register state.
2092
2093 // Then fill it in with target information.
2094 auto fill = [&](auto& registers, auto reg, RegisterState& state) {
2095 ValueNode* node;
2096 RegisterMerge* merge;
2097 LoadMergeState(state, &node, &merge);
2098 if (node != nullptr) {
2099 registers.RemoveFromFree(reg);
2100 registers.SetValue(reg, node);
2101 } else {
2102 DCHECK(!state.GetPayload().is_merge);
2103 }
2104 };
2105 ForEachMergePointRegisterState(target_state, fill);
2106
2107 // SetValue will have blocked registers, unblock them.
2108 general_registers_.clear_blocked();
2109 double_registers_.clear_blocked();
2110}
2111
2112#ifdef DEBUG
2113
2114bool StraightForwardRegisterAllocator::IsInRegister(
2115 MergePointRegisterState& target_state, ValueNode* incoming) {
2116 bool found = false;
2117 auto find = [&found, &incoming](auto reg, RegisterState& state) {
2118 ValueNode* node;
2119 RegisterMerge* merge;
2120 LoadMergeState(state, &node, &merge);
2121 if (node == incoming) found = true;
2122 };
2123 if (incoming->use_double_register()) {
2124 target_state.ForEachDoubleRegister(find);
2125 } else {
2126 target_state.ForEachGeneralRegister(find);
2127 }
2128 return found;
2129}
2130
2131// Returns true if {first_id} or {last_id} are forward-reachable from {current}.
2132bool StraightForwardRegisterAllocator::IsForwardReachable(
2133 BasicBlock* start_block, NodeIdT first_id, NodeIdT last_id) {
2134 ZoneQueue<BasicBlock*> queue(compilation_info_->zone());
2135 ZoneSet<BasicBlock*> seen(compilation_info_->zone());
2136 while (!queue.empty()) {
2137 BasicBlock* curr = queue.front();
2138 queue.pop();
2139
2140 if (curr->contains_node_id(first_id) || curr->contains_node_id(last_id)) {
2141 return true;
2142 }
2143
2144 if (curr->control_node()->Is<JumpLoop>()) {
2145 // A JumpLoop will have a backward edge. Since we are only interested in
2146 // checking forward reachability, we ignore its successors.
2147 continue;
2148 }
2149
2150 for (BasicBlock* succ : curr->successors()) {
2151 if (seen.insert(succ).second) {
2152 queue.push(succ);
2153 }
2154 // Since we skipped JumpLoop, only forward edges should remain.
2155 DCHECK_GT(succ->first_id(), curr->first_id());
2156 }
2157 }
2158
2159 return false;
2160}
2161
2162#endif // DEBUG
2163
2164// If a node needs a register before the first call and after the last call of
2165// the loop, initialize the merge state with a register for this node to avoid
2166// an unnecessary spill + reload on every iteration.
2167template <typename RegisterT>
2170 auto info = regalloc_info_->loop_info_.find(target->id());
2171 if (info == regalloc_info_->loop_info_.end()) return;
2172 for (ValueNode* node : info->second.reload_hints_) {
2173 DCHECK(general_registers_.blocked().is_empty());
2174 if (registers.free().is_empty()) break;
2175 if (node->has_register()) continue;
2176 // The value is in a liveness hole, don't try to reload it.
2177 if (!node->is_loadable()) continue;
2178 if ((node->use_double_register() && std::is_same_v<RegisterT, Register>) ||
2179 (!node->use_double_register() &&
2180 std::is_same_v<RegisterT, DoubleRegister>)) {
2181 continue;
2182 }
2183 RegisterT target_reg = node->GetRegisterHint<RegisterT>();
2184 if (!registers.free().has(target_reg)) {
2185 target_reg = registers.free().first();
2186 }
2187 compiler::AllocatedOperand target_operand(
2188 compiler::LocationOperand::REGISTER, node->GetMachineRepresentation(),
2189 target_reg.code());
2190 registers.RemoveFromFree(target_reg);
2191 registers.SetValueWithoutBlocking(target_reg, node);
2192 AddMoveBeforeCurrentNode(node, node->loadable_slot(), target_operand);
2193 }
2194}
2195
2196// Same as above with spills: if the node does not need a register before the
2197// first call and after the last call of the loop, keep it spilled in the merge
2198// state to avoid an unnecessary reload + spill on every iteration.
2200 auto info = regalloc_info_->loop_info_.find(target->id());
2201 if (info == regalloc_info_->loop_info_.end()) return;
2202 for (ValueNode* node : info->second.spill_hints_) {
2203 if (!node->has_register()) continue;
2204 // Do not move to a different register, the goal is to keep the value
2205 // spilled on the back-edge.
2206 const bool kForceSpill = true;
2207 if (node->use_double_register()) {
2208 for (DoubleRegister reg : node->result_registers<DoubleRegister>()) {
2209 DropRegisterValueAtEnd(reg, kForceSpill);
2210 }
2211 } else {
2212 for (Register reg : node->result_registers<Register>()) {
2213 DropRegisterValueAtEnd(reg, kForceSpill);
2214 }
2215 }
2216 }
2217}
2218
2220 ControlNode* source, BasicBlock* target) {
2221 MergePointRegisterState& target_state = target->state()->register_state();
2222 DCHECK(!target_state.is_initialized());
2223 auto init = [&](auto& registers, auto reg, RegisterState& state) {
2224 ValueNode* node = nullptr;
2225 DCHECK(registers.blocked().is_empty());
2226 if (!registers.free().has(reg)) {
2227 node = registers.GetValue(reg);
2228 if (!IsLiveAtTarget(node, source, target)) node = nullptr;
2229 }
2230 state = {node, initialized_node};
2231 };
2234 HoistLoopSpills(target);
2235 ForEachMergePointRegisterState(target_state, init);
2236}
2237
2239 ControlNode* source, BasicBlock* target) {
2240 DCHECK(target->is_edge_split_block());
2241 MergePointRegisterState* register_state =
2243
2244 DCHECK(!register_state->is_initialized());
2245 auto init = [&](auto& registers, auto reg, RegisterState& state) {
2246 ValueNode* node = nullptr;
2247 DCHECK(registers.blocked().is_empty());
2248 if (!registers.free().has(reg)) {
2249 node = registers.GetValue(reg);
2250 if (!IsLiveAtTarget(node, source, target)) node = nullptr;
2251 }
2252 state = {node, initialized_node};
2253 };
2254 ForEachMergePointRegisterState(*register_state, init);
2255
2256 target->set_edge_split_block_register_state(register_state);
2257}
2258
2260 BasicBlock* target,
2261 int predecessor_id) {
2262 if (target->is_edge_split_block()) {
2263 return InitializeEmptyBlockRegisterValues(control, target);
2264 }
2265
2266 MergePointRegisterState& target_state = target->state()->register_state();
2267 if (!target_state.is_initialized()) {
2268 // This is the first block we're merging, initialize the values.
2269 return InitializeBranchTargetRegisterValues(control, target);
2270 }
2271
2272 if (v8_flags.trace_maglev_regalloc) {
2273 printing_visitor_->os() << "Merging registers...\n";
2274 }
2275
2276 int predecessor_count = target->state()->predecessor_count();
2277 auto merge = [&](auto& registers, auto reg, RegisterState& state) {
2278 ValueNode* node;
2279 RegisterMerge* merge;
2280 LoadMergeState(state, &node, &merge);
2281
2282 // This isn't quite the right machine representation for Int32 nodes, but
2283 // those are stored in the same registers as Tagged nodes so in this case it
2284 // doesn't matter.
2285 MachineRepresentation mach_repr = std::is_same_v<decltype(reg), Register>
2288 compiler::AllocatedOperand register_info = {
2289 compiler::LocationOperand::REGISTER, mach_repr, reg.code()};
2290
2291 ValueNode* incoming = nullptr;
2292 DCHECK(registers.blocked().is_empty());
2293 if (!registers.free().has(reg)) {
2294 incoming = registers.GetValue(reg);
2295 if (!IsLiveAtTarget(incoming, control, target)) {
2296 if (v8_flags.trace_maglev_regalloc) {
2297 printing_visitor_->os() << " " << reg << " - incoming node "
2298 << PrintNodeLabel(graph_labeller(), incoming)
2299 << " dead at target\n";
2300 }
2301 incoming = nullptr;
2302 }
2303 }
2304
2305 if (incoming == node) {
2306 // We're using the same register as the target already has. If registers
2307 // are merged, add input information.
2308 if (v8_flags.trace_maglev_regalloc) {
2309 if (node) {
2310 printing_visitor_->os()
2311 << " " << reg << " - incoming node same as node: "
2312 << PrintNodeLabel(graph_labeller(), node) << "\n";
2313 }
2314 }
2315 if (merge) merge->operand(predecessor_id) = register_info;
2316 return;
2317 }
2318
2319 if (node == nullptr) {
2320 // Don't load new nodes at loop headers.
2321 if (control->Is<JumpLoop>()) return;
2322 } else if (!node->is_loadable() && !node->has_register()) {
2323 // If we have a node already, but can't load it here, we must be in a
2324 // liveness hole for it, so nuke the merge state.
2325 // This can only happen for conversion nodes, as they can split and take
2326 // over the liveness of the node they are converting.
2327 // TODO(v8:7700): Overeager DCHECK.
2328 // DCHECK(node->properties().is_conversion());
2329 if (v8_flags.trace_maglev_regalloc) {
2330 printing_visitor_->os() << " " << reg << " - can't load "
2331 << PrintNodeLabel(graph_labeller(), node)
2332 << ", dropping the merge\n";
2333 }
2334 // We always need to be able to restore values on JumpLoop since the value
2335 // is definitely live at the loop header.
2336 CHECK(!control->Is<JumpLoop>());
2337 state = {nullptr, initialized_node};
2338 return;
2339 }
2340
2341 if (merge) {
2342 // The register is already occupied with a different node. Figure out
2343 // where that node is allocated on the incoming branch.
2344 merge->operand(predecessor_id) = node->allocation();
2345 if (v8_flags.trace_maglev_regalloc) {
2346 printing_visitor_->os() << " " << reg << " - merge: loading "
2347 << PrintNodeLabel(graph_labeller(), node)
2348 << " from " << node->allocation() << " \n";
2349 }
2350
2351 if (incoming != nullptr) {
2352 // If {incoming} isn't loadable or available in a register, then we are
2353 // in a liveness hole, and none of its uses should be reachable from
2354 // {target} (for simplicity/speed, we only check the first and last use
2355 // though).
2357 !incoming->is_loadable() && !IsInRegister(target_state, incoming),
2358 !IsForwardReachable(target, incoming->current_next_use(),
2359 incoming->live_range().end));
2360 }
2361
2362 return;
2363 }
2364
2365 DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
2366 if (node == nullptr && !incoming->is_loadable()) {
2367 // If the register is unallocated at the merge point, and the incoming
2368 // value isn't spilled, that means we must have seen it already in a
2369 // different register.
2370 // This maybe not be true for conversion nodes, as they can split and take
2371 // over the liveness of the node they are converting.
2372 // TODO(v8:7700): This DCHECK is overeager, {incoming} can be a Phi node
2373 // containing conversion nodes.
2374 // DCHECK_IMPLIES(!IsInRegister(target_state, incoming),
2375 // incoming->properties().is_conversion());
2376 if (v8_flags.trace_maglev_regalloc) {
2377 printing_visitor_->os()
2378 << " " << reg << " - can't load incoming "
2379 << PrintNodeLabel(graph_labeller(), incoming) << ", bailing out\n";
2380 }
2381 return;
2382 }
2383
2384 const size_t size = sizeof(RegisterMerge) +
2385 predecessor_count * sizeof(compiler::AllocatedOperand);
2386 void* buffer = compilation_info_->zone()->Allocate<void*>(size);
2387 merge = new (buffer) RegisterMerge();
2388 merge->node = node == nullptr ? incoming : node;
2389
2390 // If the register is unallocated at the merge point, allocation so far
2391 // is the loadable slot for the incoming value. Otherwise all incoming
2392 // branches agree that the current node is in the register info.
2393 compiler::InstructionOperand info_so_far =
2394 node == nullptr ? incoming->loadable_slot() : register_info;
2395
2396 // Initialize the entire array with info_so_far since we don't know in
2397 // which order we've seen the predecessors so far. Predecessors we
2398 // haven't seen yet will simply overwrite their entry later.
2399 for (int i = 0; i < predecessor_count; i++) {
2400 merge->operand(i) = info_so_far;
2401 }
2402 // If the register is unallocated at the merge point, fill in the
2403 // incoming value. Otherwise find the merge-point node in the incoming
2404 // state.
2405 if (node == nullptr) {
2406 merge->operand(predecessor_id) = register_info;
2407 if (v8_flags.trace_maglev_regalloc) {
2408 printing_visitor_->os() << " " << reg << " - new merge: loading new "
2409 << PrintNodeLabel(graph_labeller(), incoming)
2410 << " from " << register_info << " \n";
2411 }
2412 } else {
2413 merge->operand(predecessor_id) = node->allocation();
2414 if (v8_flags.trace_maglev_regalloc) {
2415 printing_visitor_->os() << " " << reg << " - new merge: loading "
2416 << PrintNodeLabel(graph_labeller(), node)
2417 << " from " << node->allocation() << " \n";
2418 }
2419 }
2420 state = {merge, initialized_merge};
2421 };
2422 ForEachMergePointRegisterState(target_state, merge);
2423}
2424
2425} // namespace maglev
2426} // namespace internal
2427} // namespace v8
Iterator RemoveAt(Iterator it)
constexpr RegisterT first() const
constexpr void set(RegisterT reg)
constexpr bool is_empty() const
constexpr bool has(RegisterT reg) const
constexpr unsigned Count() const
static constexpr DwVfpRegister from_code(int8_t code)
static constexpr Register from_code(int code)
static constexpr Register no_reg()
T * New(Args &&... args)
Definition zone.h:114
void * Allocate(size_t size)
Definition zone.h:61
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
DoubleRegister GetDoubleRegister() const
static constexpr Register virtual_accumulator()
static constexpr Register receiver()
BasicBlock * block_ptr() const
Definition maglev-ir.h:980
ControlNode * next_post_dominating_hole() const
void set_next_post_dominating_hole(ControlNode *node)
InputLocation * input_locations() const
Definition maglev-ir.h:1637
ZoneMap< int, TaggedIndexConstant * > & tagged_index()
ZoneMap< Address, ExternalConstant * > & external_references()
ZoneMap< uint32_t, Uint32Constant * > & uint32()
BlockConstIterator end() const
compiler::ZoneRefMap< compiler::ObjectRef, Constant * > & constants()
ZoneVector< InitialValue * > & osr_values()
void set_untagged_stack_slots(uint32_t stack_slots)
ZoneMap< int32_t, Int32Constant * > & int32()
void set_tagged_stack_slots(uint32_t stack_slots)
compiler::ZoneRefMap< compiler::HeapObjectRef, TrustedConstant * > & trusted_constants()
BlockConstIterator begin() const
uint32_t min_maglev_stackslots_for_unoptimized_frame_size()
ZoneMap< uint64_t, Float64Constant * > & float64()
ZoneMap< int, SmiConstant * > & smi()
ZoneMap< RootIndex, RootConstant * > & root()
static constexpr DoubleRegList GetAllocatableDoubleRegisters()
static constexpr RegList GetAllocatableRegisters()
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
constexpr bool Is() const
Definition maglev-ir.h:2362
constexpr NodeIdT id() const
Definition maglev-ir.h:1998
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
static constexpr OpProperties Call()
Definition maglev-ir.h:1084
static constexpr OpProperties EagerDeopt()
Definition maglev-ir.h:1087
compiler::AllocatedOperand AllocateRegister(ValueNode *node, const compiler::InstructionOperand &hint=compiler::InstructionOperand())
ValueNode * GetValue(RegisterT reg) const
compiler::InstructionOperand TryChooseInputRegister(ValueNode *node, const compiler::InstructionOperand &hint=compiler::InstructionOperand())
compiler::InstructionOperand TryChooseUnblockedInputRegister(ValueNode *node)
compiler::AllocatedOperand AllocateRegisterAtEnd(ValueNode *node)
void DropRegisterValueAtEnd(RegisterT reg, bool force_spill=false)
RegisterT PickRegisterToFree(RegListBase< RegisterT > reserved)
StraightForwardRegisterAllocator(MaglevCompilationInfo *compilation_info, Graph *graph, RegallocInfo *regalloc_info)
void DropRegisterValue(RegisterFrameState< RegisterT > &registers, RegisterT reg, bool force_spill=false)
void ForEachMergePointRegisterState(MergePointRegisterState &merge_point_state, Function &&f)
void InitializeBranchTargetRegisterValues(ControlNode *source, BasicBlock *target)
RegisterT FreeUnblockedRegister(RegListBase< RegisterT > reserved=RegListBase< RegisterT >())
RegisterFrameState< DoubleRegister > double_registers_
compiler::AllocatedOperand AllocateRegister(ValueNode *node, const compiler::InstructionOperand &hint=compiler::InstructionOperand())
std::unique_ptr< MaglevPrintingVisitor > printing_visitor_
void InitializeEmptyBlockRegisterValues(ControlNode *source, BasicBlock *target)
void AllocateControlNode(ControlNode *node, BasicBlock *block)
void MarkAsClobbered(ValueNode *node, const compiler::AllocatedOperand &location)
void AssignArbitraryTemporaries(RegisterFrameState< RegisterT > &registers, NodeBase *node)
compiler::AllocatedOperand ForceAllocate(RegisterFrameState< RegisterT > &registers, RegisterT reg, ValueNode *node)
RegisterFrameState< RegisterT > & GetRegisterFrameState()
void HoistLoopReloads(BasicBlock *target, RegisterFrameState< RegisterT > &registers)
void InitializeBranchTargetPhis(int predecessor_id, BasicBlock *target)
void InitializeConditionalBranchTarget(ConditionalControlNode *source, BasicBlock *target)
void AddMoveBeforeCurrentNode(ValueNode *node, compiler::InstructionOperand source, compiler::AllocatedOperand target)
void EnsureFreeRegisterAtEnd(const compiler::InstructionOperand &hint=compiler::InstructionOperand())
void AllocateLazyDeopt(const LazyDeoptInfo &deopt_info)
void MergeRegisterValues(ControlNode *control, BasicBlock *target, int predecessor_id)
void InitializeRegisterValues(MergePointRegisterState &target_state)
void AssignFixedTemporaries(RegisterFrameState< RegisterT > &registers, NodeBase *node)
void AssignArbitraryRegisterInput(NodeBase *result_node, Input &input)
void AllocateEagerDeopt(const EagerDeoptInfo &deopt_info)
BasicBlockRef * targets() const
void InjectLocation(compiler::InstructionOperand location)
Definition maglev-ir.h:1250
void SetAllocated(Args &&... args)
Definition maglev-ir.h:1244
compiler::InstructionOperand loadable_slot() const
Definition maglev-ir.h:2510
constexpr bool use_double_register() const
Definition maglev-ir.h:2542
constexpr MachineRepresentation GetMachineRepresentation() const
Definition maglev-ir.h:2581
const compiler::InstructionOperand & hint() const
Definition maglev-ir.h:2466
bool is_empty
Definition sweeper.cc:229
int start
int end
TNode< Object > target
Node * node
double second
RpoNumber block
LiftoffRegister reg
LiftoffAssembler::CacheState state
RegListBase< RegisterT > registers
InstructionOperand source
int r
Definition mul-fft.cc:298
auto Reversed(T &t)
Definition iterator.h:105
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
Register ToRegister(const compiler::InstructionOperand &operand)
static constexpr int kNoVreg
bool LoadMergeState(RegisterState state, RegisterMerge **merge)
constexpr bool IsDoubleRepresentation(ValueRepresentation repr)
Definition maglev-ir.h:601
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
V8_EXPORT_PRIVATE FlagValues v8_flags
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can use(0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining
constexpr int kDoubleSize
Definition globals.h:407
#define FATAL(...)
Definition logging.h:47
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_GT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
absl::flat_hash_map< BasicBlock::Id, RegallocLoopInfo > loop_info_
compiler::InstructionOperand & operand(size_t i)
TFGraph * graph_