v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-phi-representation-selector.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
9#include "src/base/enum-set.h"
10#include "src/base/logging.h"
13#include "src/flags/flags.h"
18
19namespace v8 {
20namespace internal {
21namespace maglev {
22
23#define TRACE_UNTAGGING(...) \
24 do { \
25 if (v8_flags.trace_maglev_phi_untagging) { \
26 StdoutStream{} << __VA_ARGS__ << std::endl; \
27 } \
28 } while (false)
29
31 BasicBlock* block) {
34
35 if (block->has_phi()) {
36 auto& phis = *block->phis();
37
38 auto first_retry = phis.begin();
39 auto end_retry = first_retry;
40 bool any_change = false;
41
42 for (auto it = phis.begin(); it != phis.end(); ++it) {
43 Phi* phi = *it;
44 switch (ProcessPhi(phi)) {
46 break;
48 any_change = true;
49 break;
51 if (end_retry == first_retry) {
52 first_retry = it;
53 }
54 end_retry = it;
55 ++end_retry;
56 break;
57 }
58 }
59 // Give it one more shot in case an earlier phi has a later one as input.
60 if (any_change) {
61 for (auto it = first_retry; it != end_retry; ++it) {
62 ProcessPhi(*it);
63 }
64 }
65 }
66
68}
69
71 if (block->successors().size() != 1) return false;
72 BasicBlock* next = block->successors()[0];
73 // To be able to hoist above resumable loops we would have to be able to
74 // convert during resumption.
75 return !next->state()->is_resumable_loop();
76}
77
80 if (node->value_representation() != ValueRepresentation::kTagged) {
82 }
83
84 if (node->is_exception_phi()) {
85 // Exception phis have no inputs (or, at least, none accessible through
86 // `node->input(...)`), so we don't know if the inputs could be untagged or
87 // not, so we just keep those Phis tagged.
89 }
90
92 "Considering for untagging: " << PrintNodeLabel(graph_labeller(), node));
93
94 // {input_mask} represents the ValueRepresentation that {node} could have,
95 // based on the ValueRepresentation of its inputs.
96 ValueRepresentationSet input_reprs;
97 HoistTypeList hoist_untagging;
98 hoist_untagging.resize(node->input_count(), HoistType::kNone);
99
100 bool has_tagged_phi_input = false;
101 for (int i = 0; i < node->input_count(); i++) {
102 ValueNode* input = node->input(i).node();
103 if (input->Is<SmiConstant>()) {
104 // Could be any representation. We treat such inputs as Int32, since we
105 // later allow ourselves to promote Int32 to Float64 if needed (but we
106 // never downgrade Float64 to Int32, as it could cause deopt loops).
107 input_reprs.Add(ValueRepresentation::kInt32);
108 } else if (Constant* constant = input->TryCast<Constant>()) {
109 if (constant->object().IsHeapNumber()) {
111 } else {
112 // Not a Constant that we can untag.
113 // TODO(leszeks): Consider treating 'undefined' as a potential
114 // HoleyFloat64.
115 input_reprs.RemoveAll();
116 break;
117 }
118 } else if (input->properties().is_conversion()) {
119 DCHECK_EQ(input->input_count(), 1);
120 // The graph builder tags all Phi inputs, so this conversion should
121 // produce a tagged value.
122 DCHECK_EQ(input->value_representation(), ValueRepresentation::kTagged);
123 // If we want to untag {node}, then we'll drop the conversion and use its
124 // input instead.
125 input_reprs.Add(
126 input->input(0).node()->properties().value_representation());
127 } else if (Phi* input_phi = input->TryCast<Phi>()) {
128 if (input_phi->value_representation() != ValueRepresentation::kTagged) {
129 input_reprs.Add(input_phi->value_representation());
130 } else {
131 // An untagged phi is an input of the current phi.
132 if (node->is_backedge_offset(i) &&
133 node->merge_state()->is_loop_with_peeled_iteration()) {
134 // This is the backedge of a loop that has a peeled iteration. We
135 // ignore it and speculatively assume that it will be the same as the
136 // 1st input.
137 DCHECK_EQ(node->input_count(), 2);
138 DCHECK_EQ(i, 1);
139 break;
140 }
141 has_tagged_phi_input = true;
142 input_reprs.RemoveAll();
143 break;
144 }
145 } else {
146 // This is the case where we don't have an existing conversion to attach
147 // the untagging to. In the general case we give up, however in the
148 // special case of the value originating from the loop entry branch, we
149 // can try to hoist untagging out of the loop.
150 if (builder_->graph()->is_osr() &&
151 v8_flags.maglev_hoist_osr_value_phi_untagging &&
152 input->Is<InitialValue>() &&
154 hoist_untagging[i] = HoistType::kPrologue;
155 continue;
156 }
157 if (node->is_loop_phi() && !node->is_backedge_offset(i)) {
158 BasicBlock* pred = node->merge_state()->predecessor_at(i);
159 if (CanHoistUntaggingTo(pred)) {
160 auto static_type = StaticTypeForNode(
161 builder_->broker(), builder_->local_isolate(), input);
162 if (NodeTypeIs(static_type, NodeType::kSmi)) {
163 input_reprs.Add(ValueRepresentation::kInt32);
164 hoist_untagging[i] = HoistType::kLoopEntryUnchecked;
165 continue;
166 }
167 if (NodeTypeIs(static_type, NodeType::kNumber)) {
169 hoist_untagging[i] = HoistType::kLoopEntryUnchecked;
170 continue;
171 }
172
173 // TODO(olivf): Unless we untag OSR values, speculatively untagging
174 // could end us in deopt loops. To enable this by default we need to
175 // add some feedback to be able to back off. Or, ideally find the
176 // respective checked conversion from within the loop to wire up the
177 // feedback collection.
178 if (v8_flags.maglev_speculative_hoist_phi_untagging) {
179 // TODO(olivf): Currently there is no hard guarantee that the phi
180 // merge state has a checkpointed jump.
181 if (pred->control_node()->Is<CheckpointedJump>()) {
182 DCHECK(!node->merge_state()->is_resumable_loop());
183 hoist_untagging[i] = HoistType::kLoopEntry;
184 continue;
185 }
186 }
187 }
188 }
189
190 // This input is tagged, didn't require a tagging operation to be
191 // tagged and we decided not to hosit; we won't untag {node}.
192 // TODO(dmercadier): this is a bit suboptimal, because some nodes start
193 // tagged, and later become untagged (parameters for instance). Such nodes
194 // will have their untagged alternative passed to {node} without any
195 // explicit conversion, and we thus won't untag {node} even though we
196 // could have.
197 input_reprs.RemoveAll();
198 break;
199 }
200 }
201 ProcessPhiResult default_result = has_tagged_phi_input
204
205 UseRepresentationSet use_reprs;
206 if (node->is_loop_phi() && !node->get_same_loop_uses_repr_hints().empty()) {
207 // {node} is a loop phi that has uses inside the loop; we will tag/untag
208 // based on those uses, ignoring uses after the loop.
209 use_reprs = node->get_same_loop_uses_repr_hints();
210 TRACE_UNTAGGING(" + use_reprs : " << use_reprs << " (same loop only)");
211 } else {
212 use_reprs = node->get_uses_repr_hints();
213 TRACE_UNTAGGING(" + use_reprs : " << use_reprs << " (all uses)");
214 }
215
216 TRACE_UNTAGGING(" + input_reprs: " << input_reprs);
217
218 if (use_reprs.contains(UseRepresentation::kTagged) ||
219 use_reprs.contains(UseRepresentation::kUint32) || use_reprs.empty()) {
220 // We don't untag phis that are used as tagged (because we'd have to retag
221 // them later). We also ignore phis that are used as Uint32, because this is
222 // a fairly rare case and supporting it doesn't improve performance all that
223 // much but will increase code complexity.
224 // TODO(dmercadier): consider taking into account where those Tagged uses
225 // are: Tagged uses outside of a loop or for a Return could probably be
226 // ignored.
227 TRACE_UNTAGGING(" => Leaving tagged [incompatible uses]");
229 return default_result;
230 }
231
232 if (input_reprs.contains(ValueRepresentation::kTagged) ||
234 input_reprs.empty()) {
235 TRACE_UNTAGGING(" => Leaving tagged [tagged or intptr inputs]");
237 return default_result;
238 }
239
240 // Only allowed to have Uint32, Int32, Float64 and HoleyFloat64 inputs from
241 // here.
242 DCHECK_EQ(input_reprs -
248
249 DCHECK_EQ(
255
256 // The rules for untagging are that we can only widen input representations,
257 // i.e. promote Int32 -> Float64 -> HoleyFloat64. We cannot convert from Int32
258 // to Uint32 and vise versa, but both can be converted to Float64.
259 //
260 // Inputs can always be used as more generic uses, and tighter uses always
261 // block more generic inputs. So, we can find the minimum generic use and
262 // maximum generic input, extend inputs upwards, uses downwards, and convert
263 // to the least generic use in the intersection.
264 //
265 // Of interest is the fact that we don't want to insert conversions which
266 // reduce genericity, e.g. Float64->Int32 conversions, since they could deopt
267 // and lead to deopt loops. The above logic ensures that if a Phi has Float64
268 // inputs and Int32 uses, we simply don't untag it.
269 //
270 // TODO(leszeks): The above logic could be implemented with bit magic if the
271 // representations were contiguous.
272
273 ValueRepresentationSet possible_inputs;
275 possible_inputs = {ValueRepresentation::kHoleyFloat64};
276 } else if (input_reprs.contains(ValueRepresentation::kFloat64) ||
278 possible_inputs = {ValueRepresentation::kFloat64,
280 } else {
282 possible_inputs = {ValueRepresentation::kInt32,
285 }
286
287 ValueRepresentationSet allowed_inputs_for_uses;
288 if (use_reprs.contains(UseRepresentation::kInt32)) {
289 allowed_inputs_for_uses = {ValueRepresentation::kInt32};
290 } else if (use_reprs.contains(UseRepresentation::kFloat64)) {
291 allowed_inputs_for_uses = {ValueRepresentation::kInt32,
293 } else {
294 DCHECK(!use_reprs.empty() &&
295 use_reprs.is_subset_of({UseRepresentation::kHoleyFloat64,
296 UseRepresentation::kTruncatedInt32}));
297 allowed_inputs_for_uses = {ValueRepresentation::kInt32,
300 }
301
302 // When hoisting we must ensure that we don't turn a tagged flowing into
303 // CheckedSmiUntag into a float64. This would cause us to loose the smi check
304 // which in turn can invalidate assumptions on aliasing values.
305 if (hoist_untagging.size() && node->uses_require_31_bit_value()) {
306 TRACE_UNTAGGING(" => Leaving tagged [depends on smi check]");
308 return default_result;
309 }
310
311 auto intersection = possible_inputs & allowed_inputs_for_uses;
312
313 TRACE_UNTAGGING(" + intersection reprs: " << intersection);
314 if (intersection.contains(ValueRepresentation::kInt32) &&
316 UseRepresentation::kInt32, UseRepresentation::kTruncatedInt32})) {
317 TRACE_UNTAGGING(" => Untagging to Int32");
318 ConvertTaggedPhiTo(node, ValueRepresentation::kInt32, hoist_untagging);
320 } else if (intersection.contains(ValueRepresentation::kFloat64)) {
321 TRACE_UNTAGGING(" => Untagging to kFloat64");
324 } else if (intersection.contains(ValueRepresentation::kHoleyFloat64)) {
325 TRACE_UNTAGGING(" => Untagging to HoleyFloat64");
327 hoist_untagging);
329 }
330
331 DCHECK(intersection.empty());
332 // We don't untag the Phi.
333 TRACE_UNTAGGING(" => Leaving tagged [incompatible inputs/uses]");
335 return default_result;
336}
337
339 // Since we are untagging some Phis, it's possible that one of the inputs of
340 // {phi} is an untagged Phi. However, if this function is called, then we've
341 // decided that {phi} is going to stay tagged, and thus, all of its inputs
342 // should be tagged. We'll thus insert tagging operation on the untagged phi
343 // inputs of {phi}.
344
345 const int skip_backedge = phi->is_loop_phi() ? 1 : 0;
346 for (int i = 0; i < phi->input_count() - skip_backedge; i++) {
347 ValueNode* input = phi->input(i).node();
348 if (Phi* phi_input = input->TryCast<Phi>()) {
349 phi->change_input(
350 i, EnsurePhiTagged(phi_input, phi->predecessor_at(i),
352 } else {
353 // Inputs of Phis that aren't Phi should always be tagged (except for the
354 // phis untagged by this class, but {phi} isn't one of them).
355 DCHECK(input->is_tagged());
356 }
357 }
358}
359
360namespace {
361
362Opcode GetOpcodeForConversion(ValueRepresentation from, ValueRepresentation to,
363 bool truncating) {
366
367 switch (from) {
369 switch (to) {
371 return Opcode::kCheckedInt32ToUint32;
374 return Opcode::kChangeInt32ToFloat64;
375
379 UNREACHABLE();
380 }
382 switch (to) {
384 return Opcode::kCheckedUint32ToInt32;
385
388 return Opcode::kChangeUint32ToFloat64;
389
393 UNREACHABLE();
394 }
396 switch (to) {
398 if (truncating) {
399 return Opcode::kTruncateFloat64ToInt32;
400 }
401 return Opcode::kCheckedTruncateFloat64ToInt32;
403 // The graph builder never inserts Tagged->Uint32 conversions, so we
404 // don't have to handle this case.
405 UNREACHABLE();
407 return Opcode::kIdentity;
408
412 UNREACHABLE();
413 }
415 switch (to) {
417 // Holes are NaNs, so we can truncate them to int32 same as real NaNs.
418 if (truncating) {
419 return Opcode::kTruncateFloat64ToInt32;
420 }
421 return Opcode::kCheckedTruncateFloat64ToInt32;
423 // The graph builder never inserts Tagged->Uint32 conversions, so we
424 // don't have to handle this case.
425 UNREACHABLE();
427 return Opcode::kHoleyFloat64ToMaybeNanFloat64;
428
432 UNREACHABLE();
433 }
434
437 UNREACHABLE();
438 }
439 UNREACHABLE();
440}
441
442} // namespace
443
445 Phi* phi, ValueRepresentation repr, const HoistTypeList& hoist_untagging) {
446 // We currently only support Int32, Float64, and HoleyFloat64 untagged phis.
450 phi->change_representation(repr);
451 // Re-initialise register data, since we might have changed from integer
452 // registers to floating registers.
453 phi->InitializeRegisterData();
454
455 for (int input_index = 0; input_index < phi->input_count(); input_index++) {
456 ValueNode* input = phi->input(input_index).node();
457#define TRACE_INPUT_LABEL \
458 " @ Input " << input_index << " (" \
459 << PrintNodeLabel(graph_labeller(), input) << ")"
460
461 if (input->Is<SmiConstant>()) {
462 switch (repr) {
464 TRACE_UNTAGGING(TRACE_INPUT_LABEL << ": Making Int32 instead of Smi");
465 phi->change_input(input_index,
467 input->Cast<SmiConstant>()->value().value()));
468 break;
472 << ": Making Float64 instead of Smi");
473 phi->change_input(input_index,
475 input->Cast<SmiConstant>()->value().value()));
476 break;
479 default:
480 UNREACHABLE();
481 }
482 } else if (Constant* constant = input->TryCast<Constant>()) {
484 << ": Making Float64 instead of Constant");
485 DCHECK(constant->object().IsHeapNumber());
488 phi->change_input(input_index,
490 constant->object().AsHeapNumber().value()));
491 } else if (input->properties().is_conversion()) {
492 // Unwrapping the conversion.
493 DCHECK_EQ(input->value_representation(), ValueRepresentation::kTagged);
494 // Needs to insert a new conversion.
495 ValueNode* bypassed_input = input->input(0).node();
496 ValueRepresentation from_repr = bypassed_input->value_representation();
497 ValueNode* new_input;
498 if (from_repr == repr) {
499 TRACE_UNTAGGING(TRACE_INPUT_LABEL << ": Bypassing conversion");
500 new_input = bypassed_input;
501 } else {
502 Opcode conv_opcode =
503 GetOpcodeForConversion(from_repr, repr, /*truncating*/ false);
504 switch (conv_opcode) {
505 case Opcode::kChangeInt32ToFloat64: {
506 new_input =
508 input, phi, input_index);
509 break;
510 }
511 case Opcode::kChangeUint32ToFloat64: {
512 new_input =
514 input, phi, input_index);
515 break;
516 }
517 case Opcode::kIdentity:
518 TRACE_UNTAGGING(TRACE_INPUT_LABEL << ": Bypassing conversion");
519 new_input = bypassed_input;
520 break;
521 default:
522 UNREACHABLE();
523 }
524 }
525 phi->change_input(input_index, new_input);
526 } else if (Phi* input_phi = input->TryCast<Phi>()) {
527 ValueRepresentation from_repr = input_phi->value_representation();
528 if (from_repr == ValueRepresentation::kTagged) {
529 // We allow speculative untagging of the backedge for loop phis from
530 // loops that have been peeled.
531 // This can lead to deopt loops (eg, if after the last iteration of a
532 // loop, a loop Phi has a specific representation that it never has in
533 // the loop), but this case should (hopefully) be rare.
534
535 // We know that we are on the backedge input of a peeled loop, because
536 // if it wasn't the case, then Process(Phi*) would not have decided to
537 // untag this Phi, and this function would not have been called (because
538 // except for backedges of peeled loops, tagged inputs prevent phi
539 // untagging).
540 DCHECK(phi->merge_state()->is_loop_with_peeled_iteration());
541 DCHECK(phi->is_backedge_offset(input_index));
542
543 DeoptFrame* deopt_frame = phi->merge_state()->backedge_deopt_frame();
544 switch (repr) {
546 phi->change_input(
547 input_index,
549 builder_->zone(), {input_phi}),
550 phi->predecessor_at(input_index),
551 deopt_frame));
552 break;
553 }
555 phi->change_input(
556 input_index,
559 builder_->zone(), {input_phi},
561 phi->predecessor_at(input_index), deopt_frame));
562 break;
563 }
565 phi->change_input(
566 input_index,
569 builder_->zone(), {input_phi},
571 phi->predecessor_at(input_index), deopt_frame));
572 break;
573 }
577 UNREACHABLE();
578 }
580 << ": Eagerly untagging Phi on backedge");
581 } else if (from_repr != repr &&
582 from_repr == ValueRepresentation::kInt32) {
583 // We allow widening of Int32 inputs to Float64, which can lead to the
584 // current Phi having a Float64 representation but having some Int32
585 // inputs, which will require an Int32ToFloat64 conversion.
588 phi->change_input(input_index,
590 builder_->zone(), {input_phi}),
591 phi->predecessor_at(input_index)));
594 << ": Converting phi input with a ChangeInt32ToFloat64");
595 } else {
596 // We allow Float64 to silently be used as HoleyFloat64.
597 DCHECK_IMPLIES(from_repr != repr,
598 from_repr == ValueRepresentation::kFloat64 &&
601 << ": Keeping untagged Phi input as-is");
602 }
603 } else if (hoist_untagging[input_index] != HoistType::kNone) {
604 CHECK_EQ(input->value_representation(), ValueRepresentation::kTagged);
606 DeoptFrame* deopt_frame;
607 auto GetDeoptFrame = [](BasicBlock* block) {
608 return &block->control_node()
609 ->Cast<CheckpointedJump>()
610 ->eager_deopt_info()
611 ->top_frame();
612 };
613 switch (hoist_untagging[input_index]) {
615 block = phi->merge_state()->predecessor_at(input_index);
616 deopt_frame = nullptr;
617 break;
619 block = phi->merge_state()->predecessor_at(input_index);
620 deopt_frame = GetDeoptFrame(block);
621 break;
623 block = *builder_->graph()->begin();
624 deopt_frame = GetDeoptFrame(block);
625 break;
626 case HoistType::kNone:
627 UNREACHABLE();
628 }
629 // Ensure the hoisted value is actually live at the hoist location.
630 CHECK(input->Is<InitialValue>() ||
631 (phi->is_loop_phi() && !phi->is_backedge_offset(input_index)));
632 ValueNode* untagged;
633 switch (repr) {
635 if (!deopt_frame) {
636 DCHECK(
638 builder_->local_isolate(), input),
639 NodeType::kSmi));
640 untagged = AddNodeAtBlockEnd(
642 block);
643
644 } else {
645 untagged = AddNodeAtBlockEnd(
647 builder_->zone(), {input},
649 block, deopt_frame);
650 untagged =
652 builder_->zone(), {untagged}),
653 block, deopt_frame);
654 }
655 break;
658 if (!deopt_frame) {
659 DCHECK(
661 builder_->local_isolate(), input),
662 NodeType::kNumber));
663 untagged = AddNodeAtBlockEnd(
665 builder_->zone(), {input},
667 block);
668 } else {
669 DCHECK(!phi->uses_require_31_bit_value());
670 untagged = AddNodeAtBlockEnd(
672 builder_->zone(), {input},
674 block, deopt_frame);
676 untagged =
678 builder_->zone(), {untagged}),
679 block, deopt_frame);
680 }
681 }
682 break;
686 UNREACHABLE();
687 }
688 phi->change_input(input_index, untagged);
689 } else {
690 TRACE_UNTAGGING(TRACE_INPUT_LABEL << ": Invalid input for untagged phi");
691 UNREACHABLE();
692 }
693 }
694}
695
696template <class NodeT>
698 ValueNode* input, Phi* phi, uint32_t input_index) {
700 << ": Replacing old conversion with a ChangeInt32ToFloat64");
701 ValueNode* new_node =
702 NodeBase::New<NodeT>(builder_->zone(), {input->input(0).node()});
703 return AddNodeAtBlockEnd(new_node, phi->predecessor_at(input_index));
704}
705
707 switch (op) {
708 case Opcode::kCheckedSmiUntag:
709 case Opcode::kUnsafeSmiUntag:
710 case Opcode::kCheckedNumberToInt32:
711 case Opcode::kCheckedObjectToIndex:
712 case Opcode::kCheckedTruncateNumberOrOddballToInt32:
713 case Opcode::kTruncateNumberOrOddballToInt32:
714 case Opcode::kCheckedNumberOrOddballToFloat64:
715 case Opcode::kUncheckedNumberOrOddballToFloat64:
716 case Opcode::kCheckedNumberOrOddballToHoleyFloat64:
717 return true;
718 default:
719 return false;
720 }
721}
722
724 Phi* phi, ValueNode* old_untagging) {
725 DCHECK_EQ(old_untagging->input_count(), 1);
726 DCHECK(old_untagging->input(0).node()->Is<Phi>());
727
728 ValueRepresentation from_repr =
729 old_untagging->input(0).node()->value_representation();
730 ValueRepresentation to_repr = old_untagging->value_representation();
731
732 // Since initially Phis are tagged, it would make not sense for
733 // {old_conversion} to convert a Phi to a Tagged value.
735 // The graph builder never inserts Tagged->Uint32 conversions (and thus, we
736 // don't handle them in GetOpcodeForCheckedConversion).
738
739 if (from_repr == ValueRepresentation::kTagged) {
740 // The Phi hasn't been untagged, so we leave the conversion as it is.
741 return;
742 }
743
744 if (from_repr == to_repr) {
745 if (from_repr == ValueRepresentation::kInt32) {
746 if (phi->uses_require_31_bit_value() &&
747 old_untagging->Is<CheckedSmiUntag>()) {
748 old_untagging->OverwriteWith<CheckedSmiSizedInt32>();
749 return;
750 }
751 }
752 old_untagging->OverwriteWith<Identity>();
753 return;
754 }
755
756 if (old_untagging->Is<UnsafeSmiUntag>()) {
757 // UnsafeSmiTag are only inserted when the node is a known Smi. If the
758 // current phi has a Float64/Uint32 representation, then we can safely
759 // truncate it to Int32, because we know that the Float64/Uint32 fits in a
760 // Smi, and therefore in an Int32.
761 if (from_repr == ValueRepresentation::kFloat64 ||
763 old_untagging->OverwriteWith<UnsafeTruncateFloat64ToInt32>();
764 } else if (from_repr == ValueRepresentation::kUint32) {
765 old_untagging->OverwriteWith<UnsafeTruncateUint32ToInt32>();
766 } else {
768 old_untagging->OverwriteWith<Identity>();
769 }
770 return;
771 }
772
773 // The graph builder inserts 3 kind of Tagged->Int32 conversions that can have
774 // heap number as input: CheckedTruncateNumberToInt32, which truncates its
775 // input (and deopts if it's not a HeapNumber), TruncateNumberToInt32, which
776 // truncates its input (assuming that it's indeed a HeapNumber) and
777 // CheckedSmiTag, which deopts on non-smi inputs. The first 2 cannot deopt if
778 // we have Float64 phi and will happily truncate it, but the 3rd one should
779 // deopt if it cannot be converted without loss of precision.
780 bool conversion_is_truncating_float64 =
781 old_untagging->Is<CheckedTruncateNumberOrOddballToInt32>() ||
782 old_untagging->Is<TruncateNumberOrOddballToInt32>();
783
784 Opcode needed_conversion = GetOpcodeForConversion(
785 from_repr, to_repr, conversion_is_truncating_float64);
786
787 if (CheckedNumberOrOddballToFloat64* number_untagging =
788 old_untagging->TryCast<CheckedNumberOrOddballToFloat64>()) {
789 if (from_repr == ValueRepresentation::kHoleyFloat64 &&
790 number_untagging->conversion_type() !=
792 // {phi} is a HoleyFloat64 (and thus, it could be a hole), but the
793 // original untagging did not allow holes.
794 needed_conversion = Opcode::kCheckedHoleyFloat64ToFloat64;
795 }
796 }
797
798 if (needed_conversion != old_untagging->opcode()) {
799 old_untagging->OverwriteWith(needed_conversion);
800 }
801}
802
804 CheckSmi* node, Phi* phi, int input_index, const ProcessingState* state) {
805 DCHECK_EQ(input_index, 0);
806
807 switch (phi->value_representation()) {
810
812 if (!SmiValuesAre32Bits()) {
813 node->OverwriteWith<CheckInt32IsSmi>();
815 } else {
817 }
818
821 node->OverwriteWith<CheckHoleyFloat64IsSmi>();
823
826 UNREACHABLE();
827 }
828}
829
831 CheckNumber* node, Phi* phi, int input_index,
832 const ProcessingState* state) {
833 switch (phi->value_representation()) {
836 // The phi was untagged to a Int32 or Float64, so we know that it's a
837 // number. We thus remove this CheckNumber from the graph.
840 // We need to check that the phi is not the hole nan.
841 node->OverwriteWith<CheckHoleyFloat64NotHole>();
844 // {phi} wasn't untagged, so we don't need to do anything.
848 UNREACHABLE();
849 }
850}
851
853 size_t diff = new_nodes_at_start_.size();
854 if (diff == 0) return;
855 size_t old_size = block->nodes().size();
856 block->nodes().resize(old_size + new_nodes_at_start_.size());
857 auto begin = block->nodes().begin();
858 std::copy_backward(begin, begin + old_size, block->nodes().end());
859 std::copy(new_nodes_at_start_.begin(), new_nodes_at_start_.end(), begin);
861}
862
863// If the input of a StoreTaggedFieldNoWriteBarrier was a Phi that got
864// untagged, then we need to retag it, and we might need to actually use a write
865// barrier.
867 StoreTaggedFieldNoWriteBarrier* node, Phi* phi, int input_index,
868 const ProcessingState* state) {
870 // The 1st input of a Store should usually not be untagged. However, it is
871 // possible to write `let x = a ? 4 : 2; x.c = 10`, which will produce a
872 // store whose receiver could be an untagged Phi. So, for such cases, we use
873 // the generic UpdateNodePhiInput method to tag `phi` if needed.
874 return UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index,
875 state);
876 }
878
879 if (phi->value_representation() != ValueRepresentation::kTagged) {
880 // We need to tag {phi}. However, this could turn it into a HeapObject
881 // rather than a Smi (either because {phi} is a Float64 phi, or because it's
882 // an Int32/Uint32 phi that doesn't fit on 31 bits), so we need the write
883 // barrier.
884 node->change_input(
885 input_index,
892 node->OverwriteWith<StoreTaggedFieldWithWriteBarrier>();
893 }
894
896}
897
898// If the input of a StoreFixedArrayElementNoWriteBarrier was a Phi that got
899// untagged, then we need to retag it, and we might need to actually use a write
900// barrier.
902 StoreFixedArrayElementNoWriteBarrier* node, Phi* phi, int input_index,
903 const ProcessingState* state) {
905 return UpdateNodePhiInput(static_cast<NodeBase*>(node), phi, input_index,
906 state);
907 }
908
909 if (phi->value_representation() != ValueRepresentation::kTagged) {
910 // We need to tag {phi}. However, this could turn it into a HeapObject
911 // rather than a Smi (either because {phi} is a Float64 phi, or because it's
912 // an Int32/Uint32 phi that doesn't fit on 31 bits), so we need the write
913 // barrier.
914 node->change_input(
915 input_index,
924 node->OverwriteWith<StoreFixedArrayElementWithWriteBarrier>();
925 }
926
928}
929
930// When a BranchIfToBooleanTrue has an untagged Int32/Float64 Phi as input, we
931// convert it to a BranchIfInt32ToBooleanTrue/BranchIfFloat6ToBooleanTrue to
932// avoid retagging the Phi.
934 BranchIfToBooleanTrue* node, Phi* phi, int input_index,
935 const ProcessingState* state) {
936 DCHECK_EQ(input_index, 0);
937
938 switch (phi->value_representation()) {
940 node->OverwriteWith<BranchIfInt32ToBooleanTrue>();
942
947
950
953 UNREACHABLE();
954 }
955}
956
957// {node} was using {phi} without any untagging, which means that it was using
958// {phi} as a tagged value, so, if we've untagged {phi}, we need to re-tag it
959// for {node}.
961 NodeBase* node, Phi* phi, int input_index, const ProcessingState* state) {
962 if (node->properties().is_conversion()) {
963 // {node} can't be an Untagging if we reached this point (because
964 // UpdateNodePhiInput is not called on untagging nodes).
965 DCHECK(!IsUntagging(node->opcode()));
966 // So, {node} has to be a conversion that takes an input an untagged node,
967 // and this input happens to be {phi}, which means that {node} is aware that
968 // {phi} isn't tagged. This means that {node} was inserted during the
969 // current phase. In this case, we don't do anything.
970 DCHECK_NE(phi->value_representation(), ValueRepresentation::kTagged);
971 DCHECK_NE(new_nodes_.find(node), new_nodes_.end());
972 } else {
973 node->change_input(
974 input_index,
977 }
979}
980
982 Phi* phi, BasicBlock* block, NewNodePosition pos,
983 const ProcessingState* state, std::optional<int> predecessor_index) {
984 DCHECK_IMPLIES(state == nullptr, pos == NewNodePosition::kEndOfBlock);
985
986 if (phi->value_representation() == ValueRepresentation::kTagged) {
987 return phi;
988 }
989
990 // Try to find an existing Tagged conversion for {phi} in {phi_taggings_}.
991 if (phi->has_key()) {
992 if (predecessor_index.has_value()) {
994 phi->key(), predecessor_index.value())) {
995 return tagging;
996 }
997 } else {
998 if (ValueNode* tagging = phi_taggings_.Get(phi->key())) {
999 return tagging;
1000 }
1001 }
1002 }
1003
1004 // We didn't already Tag {phi} on the current path; creating this tagging now.
1005 ValueNode* tagged = nullptr;
1006 switch (phi->value_representation()) {
1008 // It's important to use kCanonicalizeSmi for Float64ToTagged, as
1009 // otherwise, we could end up storing HeapNumbers in Smi fields.
1011 builder_->zone(), {phi},
1013 block, pos, state);
1014 break;
1016 // It's important to use kCanonicalizeSmi for HoleyFloat64ToTagged, as
1017 // otherwise, we could end up storing HeapNumbers in Smi fields.
1018 tagged =
1020 builder_->zone(), {phi},
1022 block, pos, state);
1023 break;
1026 block, pos, state);
1027 break;
1030 block, pos, state);
1031 break;
1033 // Already handled at the begining of this function.
1035 UNREACHABLE();
1036 }
1037
1038 if (predecessor_index.has_value()) {
1039 // We inserted the new tagging node in a predecessor of the current block,
1040 // so we shouldn't update the snapshot table for the current block (and we
1041 // can't update it for the predecessor either since its snapshot is sealed).
1043 block->is_loop() && block->successors().size() == 1 &&
1044 block->successors().at(0) == block);
1045 return tagged;
1046 }
1047
1048 if (phi->has_key()) {
1049 // The Key already existed, but wasn't set on the current path.
1050 phi_taggings_.Set(phi->key(), tagged);
1051 } else {
1052 // The Key didn't already exist, so we create it now.
1053 auto key = phi_taggings_.NewKey();
1054 phi->set_key(key);
1055 phi_taggings_.Set(key, tagged);
1056 }
1057 return tagged;
1058}
1059
1061 // TODO(dmercadier): it would be interesting to compute a fix point for loop
1062 // phis, or at least to go over the loop header twice.
1063 if (!block->has_phi()) return;
1064 for (Phi* phi : *block->phis()) {
1065 int last_input_idx = phi->input_count() - 1;
1066 ValueNode* backedge = phi->input(last_input_idx).node();
1067 if (phi->value_representation() == ValueRepresentation::kTagged) {
1068 // If the backedge is a Phi that was untagged, but {phi} is tagged, then
1069 // we need to retag the backedge.
1070
1071 // Identity nodes are used to replace outdated untagging nodes after a phi
1072 // has been untagged. Here, since the backedge was initially tagged, it
1073 // couldn't have been such an untagging node, so it shouldn't be an
1074 // Identity node now.
1075 DCHECK(!backedge->Is<Identity>());
1076
1078 // Since all Phi inputs are initially tagged, the fact that the backedge
1079 // is not tagged means that it's a Phi that we recently untagged.
1080 DCHECK(backedge->Is<Phi>());
1081 phi->change_input(
1082 last_input_idx,
1083 EnsurePhiTagged(backedge->Cast<Phi>(), current_block_,
1084 NewNodePosition::kEndOfBlock, /*state*/ nullptr));
1085 }
1086 } else {
1087 // If {phi} was untagged and its backedge became Identity, then we need to
1088 // unwrap it.
1089 DCHECK_NE(phi->value_representation(), ValueRepresentation::kTagged);
1090 if (backedge->Is<Identity>()) {
1091 // {backedge} should have the same representation as {phi}, although if
1092 // {phi} has HoleyFloat64 representation, the backedge is allowed to
1093 // have Float64 representation rather than HoleyFloat64.
1094 DCHECK((backedge->input(0).node()->value_representation() ==
1095 phi->value_representation()) ||
1096 (backedge->input(0).node()->value_representation() ==
1098 phi->value_representation() ==
1100 phi->change_input(last_input_idx, backedge->input(0).node());
1101 }
1102 }
1103 }
1104}
1105
1107 ValueNode* node, BasicBlock* block, DeoptFrame* deopt_frame) {
1108 return AddNode(node, block, NewNodePosition::kEndOfBlock, nullptr,
1109 deopt_frame);
1110}
1111
1113 ValueNode* node, BasicBlock* block, NewNodePosition pos,
1114 const ProcessingState* state, DeoptFrame* deopt_frame) {
1115 if (node->properties().can_eager_deopt()) {
1116 DCHECK_NOT_NULL(deopt_frame);
1117 node->SetEagerDeoptInfo(builder_->zone(), *deopt_frame);
1118 }
1119
1121 DCHECK_EQ(block, current_block_);
1122 DCHECK_NOT_NULL(state);
1123 // If the node iterator is currently on the 1st node of the block, we need
1124 // to use `InsertBefore` to insert a new node. Otherwise, we can use
1125 // `AddFront`.
1127 } else {
1128 block->nodes().push_back(node);
1129 }
1130
1131 RegisterNewNode(node);
1132 return node;
1133}
1134
1138 }
1139#ifdef DEBUG
1140 new_nodes_.insert(node);
1141#endif
1142}
1143
1145 BasicBlock* old_block, const BasicBlock* new_block) {
1146 // Sealing and saving current snapshot
1147 if (phi_taggings_.IsSealed()) {
1149 return;
1150 }
1151 snapshots_.emplace(old_block->id(), phi_taggings_.Seal());
1152
1153 // Setting up new snapshot
1154 predecessors_.clear();
1155
1156 if (!new_block->is_merge_block()) {
1157 BasicBlock* pred = new_block->predecessor();
1158 predecessors_.push_back(snapshots_.at(pred->id()));
1159 } else {
1160 int skip_backedge = new_block->is_loop();
1161 for (int i = 0; i < new_block->predecessor_count() - skip_backedge; i++) {
1162 BasicBlock* pred = new_block->predecessor_at(i);
1163 predecessors_.push_back(snapshots_.at(pred->id()));
1164 }
1165 }
1166
1167 auto merge_taggings =
1168 [&](Key key, base::Vector<ValueNode* const> predecessors) -> ValueNode* {
1169 for (ValueNode* node : predecessors) {
1170 if (node == nullptr) {
1171 // There is a predecessor that doesn't have this Tagging, so we'll
1172 // return nullptr, and if we need it in the future, we'll have to
1173 // recreate it. An alternative would be to eagerly insert this Tagging
1174 // in all of the other predecesors, but it's possible that it's not used
1175 // anymore or not on all future path, so this could also introduce
1176 // unnecessary tagging.
1177 return static_cast<Phi*>(nullptr);
1178 }
1179 }
1180
1181 // Only merge blocks should require Phis.
1182 DCHECK(new_block->is_merge_block());
1183
1184 // We create a Phi to merge all of the existing taggings.
1185 int predecessor_count = new_block->predecessor_count();
1186 Phi* phi = Node::New<Phi>(builder_->zone(), predecessor_count,
1187 new_block->state(), interpreter::Register());
1188 for (int i = 0; static_cast<size_t>(i) < predecessors.size(); i++) {
1189 phi->set_input(i, predecessors[i]);
1190 }
1191 if (predecessors.size() != static_cast<size_t>(predecessor_count)) {
1192 // The backedge is omitted from {predecessors}. With set the Phi as its
1193 // own backedge.
1194 DCHECK(new_block->is_loop());
1195 phi->set_input(predecessor_count - 1, phi);
1196 }
1197 RegisterNewNode(phi);
1198 new_block->AddPhi(phi);
1199
1200 return phi;
1201 };
1202
1204}
1205
1206} // namespace maglev
1207} // namespace internal
1208} // namespace v8
SourcePosition pos
constexpr bool is_subset_of(EnumSet set) const
Definition enum-set.h:47
constexpr void Add(E element)
Definition enum-set.h:50
constexpr bool contains_only(E element) const
Definition enum-set.h:44
constexpr bool empty() const
Definition enum-set.h:34
constexpr bool contains_any(EnumSet set) const
Definition enum-set.h:41
constexpr void RemoveAll()
Definition enum-set.h:54
constexpr bool contains(E element) const
Definition enum-set.h:35
size_t size() const
void resize(size_t new_size)
V8_INLINE constexpr int32_t value() const
Definition tagged.h:427
void resize(size_t new_size)
void push_back(const T &value)
Key NewKey(KeyData data, Value initial_value=Value{})
const Value & GetPredecessorValue(Key key, int predecessor_index)
void StartNewSnapshot(base::Vector< const Snapshot > predecessors, const ChangeCallback &change_callback={})
base::SmallVector< BasicBlock *, 2 > successors() const
BasicBlock * predecessor_at(int i) const
MergePointInterpreterFrameState * state() const
BlockConstIterator begin() const
ValueNode * node() const
Definition maglev-ir.h:1300
MaglevGraphLabeller * graph_labeller() const
Float64Constant * GetFloat64Constant(double constant)
compiler::JSHeapBroker * broker() const
Int32Constant * GetInt32Constant(int32_t constant)
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
ValueNode * EnsurePhiTagged(Phi *phi, BasicBlock *block, NewNodePosition pos, const ProcessingState *state, std::optional< int > predecessor_index=std::nullopt)
void ConvertTaggedPhiTo(Phi *phi, ValueRepresentation repr, const HoistTypeList &hoist_untagging)
ValueNode * AddNode(ValueNode *node, BasicBlock *block, NewNodePosition pos, const ProcessingState *state, DeoptFrame *deopt_frame=nullptr)
void PreparePhiTaggings(BasicBlock *old_block, const BasicBlock *new_block)
ProcessResult UpdateNodePhiInput(CheckSmi *node, Phi *phi, int input_index, const ProcessingState *state)
ValueNode * AddNodeAtBlockEnd(ValueNode *new_node, BasicBlock *block, DeoptFrame *deopt_frame=nullptr)
ValueNode * GetReplacementForPhiInputConversion(ValueNode *conversion_node, Phi *phi, uint32_t input_index)
constexpr bool Is() const
Definition maglev-ir.h:2362
void change_input(int index, ValueNode *node)
Definition maglev-ir.h:2727
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
constexpr Input & input(int index)
Definition maglev-ir.h:1978
constexpr int input_count() const
Definition maglev-ir.h:1973
constexpr Opcode opcode() const
Definition maglev-ir.h:1939
Tagged< Smi > value() const
Definition maglev-ir.h:5200
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:2577
Node * node
RpoNumber block
#define TRACE_INPUT_LABEL
#define TRACE_UNTAGGING(...)
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
base::EnumSet< UseRepresentation, int8_t > UseRepresentationSet
Definition maglev-ir.h:9402
base::EnumSet< ValueRepresentation, int8_t > ValueRepresentationSet
Definition maglev-ir.h:9401
constexpr bool NodeTypeIs(NodeType type, NodeType to_check)
Definition maglev-ir.h:669
NodeType StaticTypeForNode(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485