v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-interpreter-frame-state.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8#include "src/base/logging.h"
21
22namespace v8 {
23namespace internal {
24namespace maglev {
25
26namespace {
27
28NodeType GetNodeType(compiler::JSHeapBroker* broker, LocalIsolate* isolate,
29 const KnownNodeAspects& aspects, ValueNode* node) {
30 // We first check the KnownNodeAspects in order to return the most precise
31 // type possible.
32 NodeType type = aspects.NodeTypeFor(node);
33 if (type != NodeType::kUnknown) {
34 return type;
35 }
36 // If this node has no NodeInfo (or not known type in its NodeInfo), we fall
37 // back to its static type.
38 return StaticTypeForNode(broker, isolate, node);
39}
40
41} // namespace
42
44 bool any_merged_map_is_unstable = false;
45 DestructivelyIntersect(node_infos, other.node_infos,
46 [&](NodeInfo& lhs, const NodeInfo& rhs) {
47 lhs.MergeWith(rhs, zone, any_merged_map_is_unstable);
48 return !lhs.no_info_available();
49 });
50
51 if (effect_epoch_ != other.effect_epoch_) {
52 effect_epoch_ = std::max(effect_epoch_, other.effect_epoch_) + 1;
53 }
55 available_expressions, other.available_expressions,
56 [&](const AvailableExpression& lhs, const AvailableExpression& rhs) {
57 DCHECK_IMPLIES(lhs.node == rhs.node,
58 lhs.effect_epoch == rhs.effect_epoch);
59 DCHECK_NE(lhs.effect_epoch, kEffectEpochOverflow);
60 DCHECK_EQ(Node::needs_epoch_check(lhs.node->opcode()),
61 lhs.effect_epoch != kEffectEpochForPureInstructions);
62
63 return lhs.node == rhs.node && lhs.effect_epoch >= effect_epoch_;
64 });
65
66 this->any_map_for_any_node_is_unstable = any_merged_map_is_unstable;
67
68 auto merge_loaded_properties =
71 // Loaded properties are maps of maps, so just do the destructive
72 // intersection recursively.
73 DestructivelyIntersect(lhs, rhs);
74 return !lhs.empty();
75 };
77 other.loaded_constant_properties,
78 merge_loaded_properties);
79 DestructivelyIntersect(loaded_properties, other.loaded_properties,
80 merge_loaded_properties);
82 other.loaded_context_constants);
83 if (may_have_aliasing_contexts() != other.may_have_aliasing_contexts()) {
85 may_have_aliasing_contexts_ = other.may_have_aliasing_contexts_;
86 } else if (other.may_have_aliasing_contexts() !=
89 }
90 }
91 DestructivelyIntersect(loaded_context_slots, other.loaded_context_slots);
92}
93
94namespace {
95
96template <typename Key>
97bool NextInIgnoreList(typename ZoneSet<Key>::const_iterator& ignore,
98 typename ZoneSet<Key>::const_iterator& ignore_end,
99 const Key& cur) {
100 while (ignore != ignore_end && *ignore < cur) {
101 ++ignore;
102 }
103 return ignore != ignore_end && *ignore == cur;
104}
105
106} // namespace
107
109 if (v8_flags.trace_maglev_graph_building) {
110 std::cout << " ! Clearing unstable node aspects" << std::endl;
111 }
113 // Side-effects can change object contents, so we have to clear
114 // our known loaded properties -- however, constant properties are known
115 // to not change (and we added a dependency on this), so we don't have to
116 // clear those.
117 loaded_properties.clear();
118 loaded_context_slots.clear();
120}
121
123 bool optimistic, LoopEffects* loop_effects, Zone* zone) const {
124 return zone->New<KnownNodeAspects>(*this, optimistic, loop_effects, zone);
125}
126
128 bool optimistic_initial_state,
129 LoopEffects* loop_effects, Zone* zone)
130 : any_map_for_any_node_is_unstable(false),
131 loaded_constant_properties(other.loaded_constant_properties),
132 loaded_properties(zone),
133 loaded_context_constants(other.loaded_context_constants),
134 loaded_context_slots(zone),
135 available_expressions(zone),
136 may_have_aliasing_contexts_(
138 effect_epoch_(other.effect_epoch_),
139 node_infos(zone) {
140 if (!other.any_map_for_any_node_is_unstable) {
141 node_infos = other.node_infos;
142#ifdef DEBUG
143 for (const auto& it : node_infos) {
144 DCHECK(!it.second.any_map_is_unstable());
145 }
146#endif
147 } else if (optimistic_initial_state &&
148 !loop_effects->unstable_aspects_cleared) {
149 node_infos = other.node_infos;
150 any_map_for_any_node_is_unstable = other.any_map_for_any_node_is_unstable;
151 } else {
152 for (const auto& it : other.node_infos) {
153 node_infos.emplace(it.first,
154 NodeInfo::ClearUnstableMapsOnCopy{it.second});
155 }
156 }
157 if (optimistic_initial_state && !loop_effects->unstable_aspects_cleared) {
158 // IMPORTANT: Whatever we clone here needs to be checked for consistency
159 // in when we try to terminate the loop in `IsCompatibleWithLoopHeader`.
160 if (loop_effects->objects_written.empty() &&
161 loop_effects->keys_cleared.empty()) {
162 loaded_properties = other.loaded_properties;
163 } else {
164 auto cleared_key = loop_effects->keys_cleared.begin();
165 auto cleared_keys_end = loop_effects->keys_cleared.end();
166 auto cleared_obj = loop_effects->objects_written.begin();
167 auto cleared_objs_end = loop_effects->objects_written.end();
168 for (auto loaded_key : other.loaded_properties) {
169 if (NextInIgnoreList(cleared_key, cleared_keys_end, loaded_key.first)) {
170 continue;
171 }
172 auto& props_for_key =
173 loaded_properties.try_emplace(loaded_key.first, zone).first->second;
174 for (auto loaded_obj : loaded_key.second) {
175 if (!NextInIgnoreList(cleared_obj, cleared_objs_end,
176 loaded_obj.first)) {
177 props_for_key.emplace(loaded_obj);
178 }
179 }
180 }
181 }
182 if (loop_effects->context_slot_written.empty()) {
183 loaded_context_slots = other.loaded_context_slots;
184 } else {
185 auto slot_written = loop_effects->context_slot_written.begin();
186 auto slot_written_end = loop_effects->context_slot_written.end();
187 for (auto loaded : other.loaded_context_slots) {
188 if (!NextInIgnoreList(slot_written, slot_written_end, loaded.first)) {
189 loaded_context_slots.emplace(loaded);
190 }
191 }
192 }
193 if (!loaded_context_slots.empty()) {
194 if (loop_effects->may_have_aliasing_contexts) {
195 may_have_aliasing_contexts_ = ContextSlotLoadsAlias::Yes;
196 } else {
197 may_have_aliasing_contexts_ = other.may_have_aliasing_contexts();
198 }
199 }
200 }
201
202 // To account for the back-jump we must not allow effects to be reshuffled
203 // across loop headers.
204 // TODO(olivf): Only do this if the loop contains write effects.
205 increment_effect_epoch();
206 for (const auto& e : other.available_expressions) {
207 if (e.second.effect_epoch >= effect_epoch()) {
208 available_expressions.emplace(e);
209 }
210 }
211}
212
213namespace {
214
215// Takes two ordered maps and ensures that every element in `as` is
216// * also present in `bs` and
217// * `Compare(a, b)` holds for each value.
218template <typename As, typename Bs, typename CompareFunction,
219 typename IsEmptyFunction = std::nullptr_t>
220bool AspectIncludes(const As& as, const Bs& bs, const CompareFunction& Compare,
221 const IsEmptyFunction IsEmpty = nullptr) {
222 typename As::const_iterator a = as.begin();
223 typename Bs::const_iterator b = bs.begin();
224 while (a != as.end()) {
225 if constexpr (!std::is_same_v<IsEmptyFunction, std::nullptr_t>) {
226 if (IsEmpty(a->second)) {
227 ++a;
228 continue;
229 }
230 }
231 if (b == bs.end()) return false;
232 while (b->first < a->first) {
233 ++b;
234 if (b == bs.end()) return false;
235 }
236 if (!(a->first == b->first)) return false;
237 if (!Compare(a->second, b->second)) {
238 return false;
239 }
240 ++a;
241 ++b;
242 }
243 return true;
244}
245
246// Same as above but allows `as` to contain empty collections as values, which
247// do not need to be present in `bs`.
248template <typename As, typename Bs, typename Function>
249bool MaybeEmptyAspectIncludes(const As& as, const Bs& bs,
250 const Function& Compare) {
251 return AspectIncludes<As, Bs, Function>(as, bs, Compare,
252 [](auto x) { return x.empty(); });
253}
254
255template <typename As, typename Bs, typename Function>
256bool MaybeNullAspectIncludes(const As& as, const Bs& bs,
257 const Function& Compare) {
258 return AspectIncludes<As, Bs, Function>(as, bs, Compare,
259 [](auto x) { return x == nullptr; });
260}
261
262bool NodeInfoIncludes(const NodeInfo& before, const NodeInfo& after) {
263 if (!NodeTypeIs(after.type(), before.type())) {
264 return false;
265 }
266 if (before.possible_maps_are_known() && before.any_map_is_unstable()) {
267 if (!after.possible_maps_are_known()) {
268 return false;
269 }
270 if (!before.possible_maps().contains(after.possible_maps())) {
271 return false;
272 }
273 }
274 return true;
275}
276
277bool NodeInfoIsEmpty(const NodeInfo& info) {
278 return info.type() == NodeType::kUnknown && !info.possible_maps_are_known();
279}
280
281bool NodeInfoTypeIs(const NodeInfo& before, const NodeInfo& after) {
282 return NodeTypeIs(after.type(), before.type());
283}
284
285bool SameValue(ValueNode* before, ValueNode* after) { return before == after; }
286
287} // namespace
288
289bool KnownNodeAspects::IsCompatibleWithLoopHeader(
290 const KnownNodeAspects& loop_header) const {
291 // Needs to be in sync with `CloneForLoopHeader(zone, true)`.
292
293 // Analysis state can change with loads.
294 if (!loop_header.loaded_context_slots.empty() &&
295 loop_header.may_have_aliasing_contexts() != ContextSlotLoadsAlias::Yes &&
296 loop_header.may_have_aliasing_contexts() !=
297 may_have_aliasing_contexts() &&
298 may_have_aliasing_contexts() != ContextSlotLoadsAlias::None) {
299 if (V8_UNLIKELY(v8_flags.trace_maglev_loop_speeling)) {
300 std::cout << "KNA after loop has incompatible "
301 "loop_header.may_have_aliasing_contexts\n";
302 }
303 return false;
304 }
305
306 bool had_effects = effect_epoch() != loop_header.effect_epoch();
307
308 if (!had_effects) {
309 if (!AspectIncludes(loop_header.node_infos, node_infos, NodeInfoTypeIs,
310 NodeInfoIsEmpty)) {
311 if (V8_UNLIKELY(v8_flags.trace_maglev_loop_speeling)) {
312 std::cout << "KNA after effectless loop has incompatible node_infos\n";
313 }
314 return false;
315 }
316 // In debug builds we do a full comparison to ensure that without an effect
317 // epoch change all unstable properties still hold.
318#ifndef DEBUG
319 return true;
320#endif
321 }
322
323 if (!AspectIncludes(loop_header.node_infos, node_infos, NodeInfoIncludes,
324 NodeInfoIsEmpty)) {
325 if (V8_UNLIKELY(v8_flags.trace_maglev_loop_speeling)) {
326 std::cout << "KNA after loop has incompatible node_infos\n";
327 }
328 DCHECK(had_effects);
329 return false;
330 }
331
332 if (!MaybeEmptyAspectIncludes(
333 loop_header.loaded_properties, loaded_properties,
334 [](auto a, auto b) { return AspectIncludes(a, b, SameValue); })) {
335 if (V8_UNLIKELY(v8_flags.trace_maglev_loop_speeling)) {
336 std::cout << "KNA after loop has incompatible loaded_properties\n";
337 }
338 DCHECK(had_effects);
339 return false;
340 }
341
342 if (!MaybeNullAspectIncludes(loop_header.loaded_context_slots,
343 loaded_context_slots, SameValue)) {
344 if (V8_UNLIKELY(v8_flags.trace_maglev_loop_speeling)) {
345 std::cout << "KNA after loop has incompatible loaded_context_slots\n";
346 }
347 DCHECK(had_effects);
348 return false;
349 }
350
351 return true;
352}
353
354// static
355MergePointInterpreterFrameState* MergePointInterpreterFrameState::New(
356 const MaglevCompilationUnit& info, const InterpreterFrameState& state,
357 int merge_offset, int predecessor_count, BasicBlock* predecessor,
358 const compiler::BytecodeLivenessState* liveness) {
361 info, merge_offset, predecessor_count, 1,
362 info.zone()->AllocateArray<BasicBlock*>(predecessor_count),
363 BasicBlockType::kDefault, liveness);
364 int i = 0;
365 merge_state->frame_state_.ForEachValue(
366 info, [&](ValueNode*& entry, interpreter::Register reg) {
367 entry = state.get(reg);
368 // Initialise the alternatives list and cache the alternative
369 // representations of the node.
370 Alternatives::List* per_predecessor_alternatives =
371 new (&merge_state->per_predecessor_alternatives_[i])
373 per_predecessor_alternatives->Add(info.zone()->New<Alternatives>(
374 state.known_node_aspects()->TryGetInfoFor(entry)));
375 i++;
376 });
377 merge_state->predecessors_[0] = predecessor;
378 merge_state->known_node_aspects_ =
379 state.known_node_aspects()->Clone(info.zone());
380 state.virtual_objects().Snapshot();
381 merge_state->set_virtual_objects(state.virtual_objects());
382 return merge_state;
383}
384
385// static
386MergePointInterpreterFrameState* MergePointInterpreterFrameState::NewForLoop(
387 const InterpreterFrameState& start_state, const MaglevCompilationUnit& info,
388 int merge_offset, int predecessor_count,
389 const compiler::BytecodeLivenessState* liveness,
390 const compiler::LoopInfo* loop_info, bool has_been_peeled) {
393 info, merge_offset, predecessor_count, 0,
394 info.zone()->AllocateArray<BasicBlock*>(predecessor_count),
395 BasicBlockType::kLoopHeader, liveness);
396 state->bitfield_ =
397 kIsLoopWithPeeledIterationBit::update(state->bitfield_, has_been_peeled);
398 state->loop_metadata_ = LoopMetadata{loop_info, nullptr};
399 if (loop_info->resumable()) {
400 state->known_node_aspects_ =
401 info.zone()->New<KnownNodeAspects>(info.zone());
402 state->bitfield_ = kIsResumableLoopBit::update(state->bitfield_, true);
403 }
404 auto& assignments = loop_info->assignments();
405 auto& frame_state = state->frame_state_;
406 int i = 0;
407 frame_state.ForEachParameter(
408 info, [&](ValueNode*& entry, interpreter::Register reg) {
409 entry = nullptr;
410 if (assignments.ContainsParameter(reg.ToParameterIndex())) {
411 entry = state->NewLoopPhi(info.zone(), reg);
412 } else if (state->is_resumable_loop()) {
413 // Copy initial values out of the start state.
414 entry = start_state.get(reg);
415 // Initialise the alternatives list for this value.
416 new (&state->per_predecessor_alternatives_[i]) Alternatives::List();
417 DCHECK(entry->Is<InitialValue>());
418 }
419 ++i;
420 });
421 frame_state.context(info) = nullptr;
422 if (state->is_resumable_loop()) {
423 // While contexts are always the same at specific locations, resumable loops
424 // do have different nodes to set the context across resume points. Create a
425 // phi for them.
426 frame_state.context(info) = state->NewLoopPhi(
427 info.zone(), interpreter::Register::current_context());
428 }
429 frame_state.ForEachLocal(
430 info, [&](ValueNode*& entry, interpreter::Register reg) {
431 entry = nullptr;
432 if (assignments.ContainsLocal(reg.index())) {
433 entry = state->NewLoopPhi(info.zone(), reg);
434 }
435 });
436 DCHECK(!frame_state.liveness()->AccumulatorIsLive());
437 return state;
438}
439
440// static
442MergePointInterpreterFrameState::NewForCatchBlock(
444 const compiler::BytecodeLivenessState* liveness, int handler_offset,
445 bool was_used, interpreter::Register context_register, Graph* graph) {
446 Zone* const zone = unit.zone();
449 unit, handler_offset, 0, 0, nullptr,
450 was_used ? BasicBlockType::kExceptionHandlerStart
451 : BasicBlockType::kUnusedExceptionHandlerStart,
452 liveness);
453 auto& frame_state = state->frame_state_;
454 // If the accumulator is live, the ExceptionPhi associated to it is the
455 // first one in the block. That ensures it gets kReturnValue0 in the
456 // register allocator. See
457 // StraightForwardRegisterAllocator::AllocateRegisters.
458 if (frame_state.liveness()->AccumulatorIsLive()) {
459 frame_state.accumulator(unit) = state->NewExceptionPhi(
460 zone, interpreter::Register::virtual_accumulator());
461 }
462 frame_state.ForEachRegister(
463 unit,
464 [&](ValueNode*& entry, interpreter::Register reg) { entry = nullptr; });
465 state->catch_block_context_register_ = context_register;
466 return state;
467}
468
469MergePointInterpreterFrameState::MergePointInterpreterFrameState(
470 const MaglevCompilationUnit& info, int merge_offset, int predecessor_count,
471 int predecessors_so_far, BasicBlock** predecessors, BasicBlockType type,
472 const compiler::BytecodeLivenessState* liveness)
473 : merge_offset_(merge_offset),
474 predecessor_count_(predecessor_count),
475 predecessors_so_far_(predecessors_so_far),
476 bitfield_(kBasicBlockTypeBits::encode(type)),
477 predecessors_(predecessors),
478 frame_state_(info, liveness),
479 per_predecessor_alternatives_(
480 type == BasicBlockType::kExceptionHandlerStart
481 ? nullptr
482 : info.zone()->AllocateArray<Alternatives::List>(
483 frame_state_.size(info))) {}
484
485namespace {
486void PrintBeforeMerge(const MaglevCompilationUnit& compilation_unit,
487 ValueNode* current_value, ValueNode* unmerged_value,
489 if (!v8_flags.trace_maglev_graph_building) return;
490 std::cout << " " << reg.ToString() << ": "
491 << PrintNodeLabel(compilation_unit.graph_labeller(), current_value)
492 << "<";
493 if (kna) {
494 if (auto cur_info = kna->TryGetInfoFor(current_value)) {
495 std::cout << cur_info->type();
496 if (cur_info->possible_maps_are_known()) {
497 std::cout << " " << cur_info->possible_maps().size();
498 }
499 }
500 }
501 std::cout << "> <- "
502 << PrintNodeLabel(compilation_unit.graph_labeller(), unmerged_value)
503 << "<";
504 if (kna) {
505 if (auto in_info = kna->TryGetInfoFor(unmerged_value)) {
506 std::cout << in_info->type();
507 if (in_info->possible_maps_are_known()) {
508 std::cout << " " << in_info->possible_maps().size();
509 }
510 }
511 }
512 std::cout << ">";
513}
514void PrintAfterMerge(const MaglevCompilationUnit& compilation_unit,
515 ValueNode* merged_value, KnownNodeAspects* kna) {
516 if (!v8_flags.trace_maglev_graph_building) return;
517 std::cout << " => "
518 << PrintNodeLabel(compilation_unit.graph_labeller(), merged_value)
519 << ": "
520 << PrintNode(compilation_unit.graph_labeller(), merged_value)
521 << "<";
522
523 if (kna) {
524 if (auto out_info = kna->TryGetInfoFor(merged_value)) {
525 std::cout << out_info->type();
526 if (out_info->possible_maps_are_known()) {
527 std::cout << " " << out_info->possible_maps().size();
528 }
529 }
530 }
531
532 std::cout << ">" << std::endl;
533}
534} // namespace
535
537 InterpreterFrameState& unmerged,
538 BasicBlock* predecessor) {
539 Merge(builder, *builder->compilation_unit(), unmerged, predecessor);
540}
541
543 MaglevGraphBuilder* builder, MaglevCompilationUnit& compilation_unit,
544 InterpreterFrameState& unmerged, BasicBlock* predecessor,
545 bool optimistic_loop_phis) {
546 int i = 0;
548 compilation_unit, [&](ValueNode*& value, interpreter::Register reg) {
549 PrintBeforeMerge(compilation_unit, value, unmerged.get(reg), reg,
551 value = MergeValue(builder, reg, *unmerged.known_node_aspects(), value,
553 optimistic_loop_phis);
554 PrintAfterMerge(compilation_unit, value, known_node_aspects_);
555 ++i;
556 });
557}
558
560 MaglevGraphBuilder* builder, const VirtualObjectList unmerged_vos,
561 const KnownNodeAspects& unmerged_aspects, VirtualObject* merged,
562 VirtualObject* unmerged) {
563 if (merged == unmerged) {
564 // No need to merge.
565 return;
566 }
567 DCHECK(unmerged->compatible_for_merge(merged));
568
569 if (v8_flags.trace_maglev_graph_building) {
570 std::cout << " - Merging VOS: "
572 merged)
573 << "(merged) and "
575 unmerged)
576 << "(unmerged)" << std::endl;
577 }
578
579 auto maybe_result = merged->Merge(
580 unmerged, builder->NewObjectId(), builder->zone(),
581 [&](ValueNode* a, ValueNode* b) {
582 return MergeVirtualObjectValue(builder, unmerged_aspects, a, b);
583 });
584 if (!maybe_result) {
585 return unmerged->allocation()->ForceEscaping();
586 }
587 VirtualObject* result = *maybe_result;
588 result->set_allocation(unmerged->allocation());
589 result->Snapshot();
590 unmerged->allocation()->UpdateObject(result);
592}
593
595 MaglevGraphBuilder* builder, MaglevCompilationUnit& compilation_unit,
596 const VirtualObjectList unmerged_vos,
597 const KnownNodeAspects& unmerged_aspects) {
598 if (frame_state_.virtual_objects().is_empty()) return;
599 if (unmerged_vos.is_empty()) return;
600
601 frame_state_.virtual_objects().Snapshot();
602
603 PrintVirtualObjects(compilation_unit, unmerged_vos, "VOs before merge:");
604
606 builder->zone());
608 builder->zone());
609
610 // We iterate both list in reversed order of ids collecting the umerged
611 // objects into the map, until we find a common virtual object.
613 frame_state_.virtual_objects(), unmerged_vos,
614 [&](VirtualObject* vo, VirtualObjectList vos) {
615 // If we have a version in the map, it should be the most up-to-date,
616 // since the list is in reverse order.
617 auto& map = unmerged_vos == vos ? unmerged_map : merged_map;
618 map.emplace(vo->allocation(), vo);
619 });
620
621 // Walk the merged map (values from the merged state) and merge values.
622 for (auto [_, merged] : merged_map) {
623 VirtualObject* unmerged = nullptr;
624 auto it = unmerged_map.find(merged->allocation());
625 if (it != unmerged_map.end()) {
626 unmerged = it->second;
627 unmerged_map.erase(it);
628 } else {
629 unmerged = unmerged_vos.FindAllocatedWith(merged->allocation());
630 }
631 if (unmerged != nullptr) {
632 MergeVirtualObject(builder, unmerged_vos, unmerged_aspects, merged,
633 unmerged);
634 }
635 }
636
637 // Walk the unmerged map (values from the interpreter frame state) and merge
638 // values. If the value was already merged, we would have removed from the
639 // unmerged_map.
640 for (auto [_, unmerged] : unmerged_map) {
641 VirtualObject* merged = nullptr;
642 auto it = merged_map.find(unmerged->allocation());
643 if (it != merged_map.end()) {
644 merged = it->second;
645 } else {
646 merged = frame_state_.virtual_objects().FindAllocatedWith(
647 unmerged->allocation());
648 }
649 if (merged != nullptr) {
650 MergeVirtualObject(builder, unmerged_vos, unmerged_aspects, merged,
651 unmerged);
652 }
653 }
654
655 PrintVirtualObjects(compilation_unit, unmerged_vos, "VOs after merge:");
656}
657
659 MaglevGraphBuilder* builder, MaglevCompilationUnit& compilation_unit,
660 InterpreterFrameState& unmerged, BasicBlock* predecessor,
661 bool optimistic_initial_state, LoopEffects* loop_effects) {
662 DCHECK_IMPLIES(optimistic_initial_state,
663 v8_flags.maglev_optimistic_peeled_loops);
666 predecessors_[predecessors_so_far_] = predecessor;
667
672 optimistic_initial_state, loop_effects, builder->zone());
673 unmerged.virtual_objects().Snapshot();
675 if (v8_flags.trace_maglev_graph_building) {
676 std::cout << "Initializing "
677 << (optimistic_initial_state ? "optimistic " : "")
678 << "loop state..." << std::endl;
679 }
680
681 MergePhis(builder, compilation_unit, unmerged, predecessor,
682 optimistic_initial_state);
683
685}
686
688 BasicBlock* block) {
689 for (Phi* phi : phis_) {
690 phi->set_owner(block);
691 }
692}
693
695 MaglevGraphBuilder* builder, MaglevCompilationUnit& compilation_unit,
696 InterpreterFrameState& unmerged, BasicBlock* predecessor) {
699 predecessors_[predecessors_so_far_] = predecessor;
700
701 if (known_node_aspects_ == nullptr) {
702 return InitializeLoop(builder, compilation_unit, unmerged, predecessor);
703 }
704
705 known_node_aspects_->Merge(*unmerged.known_node_aspects(), builder->zone());
706 if (v8_flags.trace_maglev_graph_building) {
707 std::cout << "Merging..." << std::endl;
708 }
709
710 MergeVirtualObjects(builder, compilation_unit, unmerged.virtual_objects(),
711 *unmerged.known_node_aspects());
712 MergePhis(builder, compilation_unit, unmerged, predecessor, false);
713
716}
717
719 MaglevGraphBuilder* builder, InterpreterFrameState& loop_end_state,
720 BasicBlock* loop_end_block) {
721 MergeLoop(builder, *builder->compilation_unit(), loop_end_state,
722 loop_end_block);
723}
724
726 MaglevGraphBuilder* builder, MaglevCompilationUnit& compilation_unit,
727 InterpreterFrameState& loop_end_state, BasicBlock* loop_end_block) {
728 // This should be the last predecessor we try to merge.
731 predecessors_[predecessor_count_ - 1] = loop_end_block;
732
734 builder->zone()->New<DeoptFrame>(builder->GetLatestCheckpointedFrame());
735
736 if (v8_flags.trace_maglev_graph_building) {
737 std::cout << "Merging loop backedge..." << std::endl;
738 }
740 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
741 PrintBeforeMerge(compilation_unit, value, loop_end_state.get(reg), reg,
743 MergeLoopValue(builder, reg, *loop_end_state.known_node_aspects(),
744 value, loop_end_state.get(reg));
745 PrintAfterMerge(compilation_unit, value, known_node_aspects_);
746 });
749
750 // We have to clear the LoopInfo (which is used to record more precise use
751 // hints for Phis) for 2 reasons:
752 //
753 // - Phi::RecordUseReprHint checks if a use is inside the loop defining the
754 // Phi by checking if the LoopInfo of the loop Phi "Contains" the current
755 // bytecode offset, but this will be wrong if the Phi is in a function that
756 // was inlined (because the LoopInfo contains the first and last bytecode
757 // offset of the loop **in its own function**).
758 //
759 // - LoopInfo is obtained from the {header_to_info_} member of
760 // BytecodeAnalysis, but the BytecodeAnalysis is a member of the
761 // MaglevGraphBuilder, and thus gets destructed when the MaglevGraphBuilder
762 // created for inlining is destructed. LoopInfo would then become a stale
763 // pointer.
765}
766
768 MaglevGraphBuilder* builder, InterpreterFrameState& loop_end_state,
769 const std::function<BasicBlock*()>& FinishBlock) {
770 // This should be the last predecessor we try to merge.
773
775 builder->zone()->New<DeoptFrame>(builder->GetLatestCheckpointedFrame());
776
777 auto& compilation_unit = *builder->compilation_unit();
778
780 DCHECK(v8_flags.maglev_optimistic_peeled_loops);
781
782 // TODO(olivf): This could be done faster by consulting loop_effects_
783 if (!loop_end_state.known_node_aspects()->IsCompatibleWithLoopHeader(
785 if (v8_flags.trace_maglev_graph_building) {
786 std::cout << "Merging failed, peeling loop instead... " << std::endl;
787 }
789 return false;
790 }
791
792 bool phis_can_merge = true;
793 frame_state_.ForEachValue(compilation_unit, [&](ValueNode* value,
795 if (!value->Is<Phi>()) return;
796 Phi* phi = value->Cast<Phi>();
797 if (!phi->is_loop_phi()) return;
798 if (phi->merge_state() != this) return;
799 NodeType old_type = GetNodeType(builder->broker(), builder->local_isolate(),
800 *known_node_aspects_, phi);
801 if (old_type != NodeType::kUnknown) {
802 NodeType new_type = GetNodeType(
803 builder->broker(), builder->local_isolate(),
804 *loop_end_state.known_node_aspects(), loop_end_state.get(reg));
805 if (!NodeTypeIs(new_type, old_type)) {
806 if (v8_flags.trace_maglev_loop_speeling) {
807 std::cout << "Cannot merge " << new_type << " into " << old_type
808 << " for r" << reg.index() << "\n";
809 }
810 phis_can_merge = false;
811 }
812 }
813 });
814 if (!phis_can_merge) {
816 return false;
817 }
818
819 BasicBlock* loop_end_block = FinishBlock();
820 int input = predecessor_count_ - 1;
821 loop_end_block->set_predecessor_id(input);
822 predecessors_[input] = loop_end_block;
823
824 if (v8_flags.trace_maglev_graph_building) {
825 std::cout << "Next peeling not needed due to compatible state" << std::endl;
826 }
827
829 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
830 PrintBeforeMerge(compilation_unit, value, loop_end_state.get(reg), reg,
832 MergeLoopValue(builder, reg, *loop_end_state.known_node_aspects(),
833 value, loop_end_state.get(reg));
834 PrintAfterMerge(compilation_unit, value, known_node_aspects_);
835 });
839 return true;
840}
841
843 LoopEffects* loop_effects) {
844 DCHECK(is_loop());
845 DCHECK(loop_metadata_.has_value());
846 loop_metadata_->loop_effects = loop_effects;
847}
848
850 DCHECK(is_loop());
851 DCHECK(loop_metadata_.has_value());
852 return loop_metadata_->loop_effects;
853}
854
856 MaglevGraphBuilder* builder, const MaglevCompilationUnit* handler_unit,
857 const KnownNodeAspects& known_node_aspects,
858 const VirtualObjectList virtual_objects) {
859 // We don't count total predecessors on exception handlers, but we do want to
860 // special case the first predecessor so we do count predecessors_so_far
863
864 DCHECK_EQ(builder->compilation_unit(), handler_unit);
865
866 const InterpreterFrameState& builder_frame =
867 builder->current_interpreter_frame();
868
869 if (v8_flags.trace_maglev_graph_building) {
870 std::cout << "- Merging into exception handler @" << this << std::endl;
871 PrintVirtualObjects(*handler_unit, virtual_objects);
872 }
873
874 if (known_node_aspects_ == nullptr) {
876 known_node_aspects_ = known_node_aspects.Clone(builder->zone());
877 virtual_objects.Snapshot();
878 frame_state_.set_virtual_objects(virtual_objects);
879 } else {
880 known_node_aspects_->Merge(known_node_aspects, builder->zone());
881 MergeVirtualObjects(builder, *builder->compilation_unit(), virtual_objects,
882 known_node_aspects);
883 }
884
886 *handler_unit, [&](ValueNode*& value, interpreter::Register reg) {
887 PrintBeforeMerge(*handler_unit, value, builder_frame.get(reg), reg,
889 value = MergeValue(builder, reg, known_node_aspects, value,
890 builder_frame.get(reg), nullptr);
891 PrintAfterMerge(*handler_unit, value, known_node_aspects_);
892 });
894 *handler_unit, [&](ValueNode*& value, interpreter::Register reg) {
895 PrintBeforeMerge(*handler_unit, value, builder_frame.get(reg), reg,
897 value = MergeValue(builder, reg, known_node_aspects, value,
898 builder_frame.get(reg), nullptr);
899 PrintAfterMerge(*handler_unit, value, known_node_aspects_);
900 });
901
902 // Pick out the context value from the incoming registers.
903 // TODO(leszeks): This should be the same for all incoming states, but we lose
904 // the identity for generator-restored context. If generator value restores
905 // were handled differently, we could avoid emitting a Phi here.
906 ValueNode*& context = frame_state_.context(*handler_unit);
907 PrintBeforeMerge(*handler_unit, context,
908 builder_frame.get(catch_block_context_register_),
910 context = MergeValue(
911 builder, catch_block_context_register_, known_node_aspects, context,
912 builder_frame.get(catch_block_context_register_), nullptr);
913 PrintAfterMerge(*handler_unit, context, known_node_aspects_);
914
916}
917
918namespace {
919
920ValueNode* FromInt32ToTagged(const MaglevGraphBuilder* builder,
921 NodeType node_type, ValueNode* value,
922 BasicBlock* predecessor) {
923 DCHECK_EQ(value->properties().value_representation(),
925 DCHECK(!value->properties().is_conversion());
926
927 ValueNode* tagged;
928 if (value->Is<Int32Constant>()) {
929 int32_t constant = value->Cast<Int32Constant>()->value();
930 if (Smi::IsValid(constant)) {
931 return builder->GetSmiConstant(constant);
932 }
933 }
934
935 if (value->Is<StringLength>() ||
936 value->Is<BuiltinStringPrototypeCharCodeOrCodePointAt>()) {
937 static_assert(String::kMaxLength <= kSmiMaxValue,
938 "String length must fit into a Smi");
939 tagged = Node::New<UnsafeSmiTagInt32>(builder->zone(), {value});
940 } else if (NodeTypeIsSmi(node_type)) {
941 // For known Smis, we can tag without a check.
942 tagged = Node::New<UnsafeSmiTagInt32>(builder->zone(), {value});
943 } else {
944 tagged = Node::New<Int32ToNumber>(builder->zone(), {value});
945 }
946
947 predecessor->nodes().push_back(tagged);
949 return tagged;
950}
951
952ValueNode* FromUint32ToTagged(const MaglevGraphBuilder* builder,
953 NodeType node_type, ValueNode* value,
954 BasicBlock* predecessor) {
955 DCHECK_EQ(value->properties().value_representation(),
957 DCHECK(!value->properties().is_conversion());
958
959 ValueNode* tagged;
960 if (NodeTypeIsSmi(node_type)) {
961 tagged = Node::New<UnsafeSmiTagUint32>(builder->zone(), {value});
962 } else {
963 tagged = Node::New<Uint32ToNumber>(builder->zone(), {value});
964 }
965
966 predecessor->nodes().push_back(tagged);
967 builder->compilation_unit()->RegisterNodeInGraphLabeller(tagged);
968 return tagged;
969}
970
971ValueNode* FromIntPtrToTagged(const MaglevGraphBuilder* builder,
972 NodeType node_type, ValueNode* value,
973 BasicBlock* predecessor) {
974 DCHECK_EQ(value->properties().value_representation(),
976 DCHECK(!value->properties().is_conversion());
977
978 ValueNode* tagged = Node::New<IntPtrToNumber>(builder->zone(), {value});
979
980 predecessor->nodes().push_back(tagged);
981 builder->compilation_unit()->RegisterNodeInGraphLabeller(tagged);
982 return tagged;
983}
984
985ValueNode* FromFloat64ToTagged(const MaglevGraphBuilder* builder,
986 NodeType node_type, ValueNode* value,
987 BasicBlock* predecessor) {
988 DCHECK_EQ(value->properties().value_representation(),
990 DCHECK(!value->properties().is_conversion());
991
992 // Create a tagged version, and insert it at the end of the predecessor.
993 ValueNode* tagged = Node::New<Float64ToTagged>(
994 builder->zone(), {value},
996
997 predecessor->nodes().push_back(tagged);
998 builder->compilation_unit()->RegisterNodeInGraphLabeller(tagged);
999 return tagged;
1000}
1001
1002ValueNode* FromHoleyFloat64ToTagged(const MaglevGraphBuilder* builder,
1003 NodeType node_type, ValueNode* value,
1004 BasicBlock* predecessor) {
1005 DCHECK_EQ(value->properties().value_representation(),
1007 DCHECK(!value->properties().is_conversion());
1008
1009 // Create a tagged version, and insert it at the end of the predecessor.
1010 ValueNode* tagged = Node::New<HoleyFloat64ToTagged>(
1011 builder->zone(), {value},
1013
1014 predecessor->nodes().push_back(tagged);
1015 builder->compilation_unit()->RegisterNodeInGraphLabeller(tagged);
1016 return tagged;
1017}
1018
1019ValueNode* NonTaggedToTagged(const MaglevGraphBuilder* builder,
1020 NodeType node_type, ValueNode* value,
1021 BasicBlock* predecessor) {
1022 switch (value->properties().value_representation()) {
1024 UNREACHABLE();
1026 return FromInt32ToTagged(builder, node_type, value, predecessor);
1028 return FromUint32ToTagged(builder, node_type, value, predecessor);
1030 return FromIntPtrToTagged(builder, node_type, value, predecessor);
1032 return FromFloat64ToTagged(builder, node_type, value, predecessor);
1034 return FromHoleyFloat64ToTagged(builder, node_type, value, predecessor);
1035 }
1036}
1037ValueNode* EnsureTagged(const MaglevGraphBuilder* builder,
1038 const KnownNodeAspects& known_node_aspects,
1039 ValueNode* value, BasicBlock* predecessor) {
1040 if (value->properties().value_representation() ==
1042 return value;
1043 }
1044
1045 auto info_it = known_node_aspects.FindInfo(value);
1046 const NodeInfo* info =
1047 known_node_aspects.IsValid(info_it) ? &info_it->second : nullptr;
1048 if (info) {
1049 if (auto alt = info->alternative().tagged()) {
1050 return alt;
1051 }
1052 }
1053 return NonTaggedToTagged(builder, info ? info->type() : NodeType::kUnknown,
1054 value, predecessor);
1055}
1056
1057} // namespace
1058
1060 const Alternatives* alt) {
1061 if (!alt) return NodeType::kUnknown;
1062 return alt->node_type();
1063}
1064
1066 const MaglevGraphBuilder* builder, interpreter::Register owner,
1067 const KnownNodeAspects& unmerged_aspects, ValueNode* merged,
1068 ValueNode* unmerged, Alternatives::List* per_predecessor_alternatives,
1069 bool optimistic_loop_phis) {
1070 // If the merged node is null, this is a pre-created loop header merge
1071 // frame will null values for anything that isn't a loop Phi.
1072 if (merged == nullptr) {
1075 // Initialise the alternatives list and cache the alternative
1076 // representations of the node.
1077 if (per_predecessor_alternatives) {
1078 new (per_predecessor_alternatives) Alternatives::List();
1079 per_predecessor_alternatives->Add(builder->zone()->New<Alternatives>(
1080 unmerged_aspects.TryGetInfoFor(unmerged)));
1081 } else {
1083 }
1084 return unmerged;
1085 }
1086
1087 auto UpdateLoopPhiType = [&](Phi* result, NodeType unmerged_type) {
1088 DCHECK(result->is_loop_phi());
1089 if (predecessors_so_far_ == 0) {
1090 // For loop Phis, `type` is always Unknown until the backedge has been
1091 // bound, so there is no point in updating it here.
1092 result->set_post_loop_type(unmerged_type);
1093 if (optimistic_loop_phis) {
1094 // In the case of optimistic loop headers we try to speculatively use
1095 // the type of the incomming argument as the phi type. We verify if that
1096 // happened to be true before allowing the loop to conclude in
1097 // `TryMergeLoop`. Some types which are known to cause issues are
1098 // generalized here.
1099 NodeType initial_optimistic_type = unmerged_type;
1100 if (!IsEmptyNodeType(CombineType(unmerged_type, NodeType::kString))) {
1101 // Make sure we don't depend on something being an internalized string
1102 // in particular, by making the type cover all String subtypes.
1103 initial_optimistic_type =
1104 IntersectType(unmerged_type, NodeType::kString);
1105 }
1106 result->set_type(initial_optimistic_type);
1107 }
1108 } else {
1109 if (optimistic_loop_phis) {
1110 if (NodeInfo* node_info = known_node_aspects_->TryGetInfoFor(result)) {
1111 node_info->IntersectType(unmerged_type);
1112 }
1113 result->merge_type(unmerged_type);
1114 }
1115 result->merge_post_loop_type(unmerged_type);
1116 }
1117 };
1118
1119 Phi* result = merged->TryCast<Phi>();
1120 if (result != nullptr && result->merge_state() == this) {
1121 // It's possible that merged == unmerged at this point since loop-phis are
1122 // not dropped if they are only assigned to themselves in the loop.
1123 DCHECK_EQ(result->owner(), owner);
1124 // Don't set inputs on exception phis.
1125 DCHECK_EQ(result->is_exception_phi(), is_exception_handler());
1126 if (is_exception_handler()) {
1127 // If an inlined allocation flows to an exception phi, we should consider
1128 // as an use.
1129 if (unmerged->Is<InlinedAllocation>()) {
1130 unmerged->add_use();
1131 }
1132 return result;
1133 }
1134
1135 NodeType unmerged_type =
1136 GetNodeType(builder->broker(), builder->local_isolate(),
1137 unmerged_aspects, unmerged);
1138 if (result->is_loop_phi()) {
1139 UpdateLoopPhiType(result, unmerged_type);
1140 } else {
1141 result->merge_type(unmerged_type);
1142 }
1143 unmerged = EnsureTagged(builder, unmerged_aspects, unmerged,
1145 result->set_input(predecessors_so_far_, unmerged);
1146
1147 return result;
1148 }
1149
1150 if (merged == unmerged) {
1151 // Cache the alternative representations of the unmerged node.
1152 if (per_predecessor_alternatives) {
1153 DCHECK_EQ(per_predecessor_alternatives->LengthForTest(),
1155 per_predecessor_alternatives->Add(builder->zone()->New<Alternatives>(
1156 unmerged_aspects.TryGetInfoFor(unmerged)));
1157 } else {
1159 }
1160 return merged;
1161 }
1162
1163 // We should always statically know what the context is, so we should never
1164 // create Phis for it. The exception is resumable functions and OSR, where the
1165 // context should be statically known but we lose that static information
1166 // across the resume / OSR entry.
1171 ->info()
1174 .kind()) ||
1175 builder->compilation_unit()->info()->toplevel_is_osr());
1176
1177 // Up to this point all predecessors had the same value for this interpreter
1178 // frame slot. Now that we find a distinct value, insert a copy of the first
1179 // value for each predecessor seen so far, in addition to the new value.
1180 // TODO(verwaest): Unclear whether we want this for Maglev: Instead of
1181 // letting the register allocator remove phis, we could always merge through
1182 // the frame slot. In that case we only need the inputs for representation
1183 // selection, and hence could remove duplicate inputs. We'd likely need to
1184 // attach the interpreter register to the phi in that case?
1185
1186 // For exception phis, just allocate exception handlers.
1187 if (is_exception_handler()) {
1188 // ... and add an use if inputs are inlined allocation.
1189 if (merged->Is<InlinedAllocation>()) {
1190 merged->add_use();
1191 }
1192 if (unmerged->Is<InlinedAllocation>()) {
1193 unmerged->add_use();
1194 }
1195 return NewExceptionPhi(builder->zone(), owner);
1196 }
1197
1198 result = Node::New<Phi>(builder->zone(), predecessor_count_, this, owner);
1199 if (v8_flags.trace_maglev_graph_building) {
1200 for (uint32_t i = 0; i < predecessor_count_; i++) {
1201 result->initialize_input_null(i);
1202 }
1203 }
1204
1205 NodeType merged_type =
1206 StaticTypeForNode(builder->broker(), builder->local_isolate(), merged);
1207
1208 bool is_tagged = merged->properties().value_representation() ==
1210 NodeType type = merged_type != NodeType::kUnknown
1211 ? merged_type
1212 : AlternativeType(per_predecessor_alternatives->first());
1213 int i = 0;
1214 for (const Alternatives* alt : *per_predecessor_alternatives) {
1215 ValueNode* tagged = is_tagged ? merged : alt->tagged_alternative();
1216 if (tagged == nullptr) {
1217 DCHECK_NOT_NULL(alt);
1218 tagged = NonTaggedToTagged(builder, alt->node_type(), merged,
1219 predecessors_[i]);
1220 }
1221 result->set_input(i, tagged);
1222 type = IntersectType(type, merged_type != NodeType::kUnknown
1223 ? merged_type
1224 : AlternativeType(alt));
1225 i++;
1226 }
1228
1229 // Note: it's better to call GetNodeType on {unmerged} before updating it with
1230 // EnsureTagged, since untagged nodes have a higher chance of having a
1231 // StaticType.
1232 NodeType unmerged_type = GetNodeType(
1233 builder->broker(), builder->local_isolate(), unmerged_aspects, unmerged);
1234 unmerged = EnsureTagged(builder, unmerged_aspects, unmerged,
1236 result->set_input(predecessors_so_far_, unmerged);
1237
1238 if (result->is_loop_phi()) {
1239 DCHECK(result->is_unmerged_loop_phi());
1240 UpdateLoopPhiType(result, type);
1241 } else {
1242 result->set_type(IntersectType(type, unmerged_type));
1243 }
1244
1245 phis_.Add(result);
1246 return result;
1247}
1248
1249std::optional<ValueNode*>
1251 const MaglevGraphBuilder* builder, const KnownNodeAspects& unmerged_aspects,
1252 ValueNode* merged, ValueNode* unmerged) {
1253 DCHECK_NOT_NULL(merged);
1254 DCHECK_NOT_NULL(unmerged);
1255
1256 Phi* result = merged->TryCast<Phi>();
1257 if (result != nullptr && result->merge_state() == this) {
1258 NodeType unmerged_type =
1259 GetNodeType(builder->broker(), builder->local_isolate(),
1260 unmerged_aspects, unmerged);
1261 unmerged = EnsureTagged(builder, unmerged_aspects, unmerged,
1263 for (uint32_t i = predecessors_so_far_; i < predecessor_count_; i++) {
1264 result->change_input(i, unmerged);
1265 }
1267 result->merge_type(unmerged_type);
1268 result->merge_post_loop_type(unmerged_type);
1269 return result;
1270 }
1271
1272 if (merged == unmerged) {
1273 return merged;
1274 }
1275
1276 if (InlinedAllocation* merged_nested_alloc =
1277 merged->TryCast<InlinedAllocation>()) {
1278 if (InlinedAllocation* unmerged_nested_alloc =
1279 unmerged->TryCast<InlinedAllocation>()) {
1280 // If a nested allocation doesn't point to the same object in both
1281 // objects, then we currently give up merging them and escape the
1282 // allocation.
1283 if (merged_nested_alloc != unmerged_nested_alloc) {
1284 return {};
1285 }
1286 }
1287 }
1288
1289 // We don't support exception phis inside a virtual object.
1290 if (is_exception_handler()) {
1291 return {};
1292 }
1293
1294 // We don't have LoopPhis inside a VirtualObject, but this can happen if the
1295 // block is a diamond-merge and a loop entry at the same time. For now, we
1296 // should escape.
1297 if (is_loop()) return {};
1298
1299 result = Node::New<Phi>(builder->zone(), predecessor_count_, this,
1301 if (v8_flags.trace_maglev_graph_building) {
1302 for (uint32_t i = 0; i < predecessor_count_; i++) {
1303 result->initialize_input_null(i);
1304 }
1305 }
1306
1307 NodeType merged_type =
1308 StaticTypeForNode(builder->broker(), builder->local_isolate(), merged);
1309
1310 // We must have seen the same value so far.
1312 for (uint32_t i = 0; i < predecessors_so_far_; i++) {
1313 ValueNode* tagged_merged =
1314 EnsureTagged(builder, *known_node_aspects_, merged, predecessors_[i]);
1315 result->set_input(i, tagged_merged);
1316 }
1317
1318 NodeType unmerged_type = GetNodeType(
1319 builder->broker(), builder->local_isolate(), unmerged_aspects, unmerged);
1320 unmerged = EnsureTagged(builder, unmerged_aspects, unmerged,
1322 for (uint32_t i = predecessors_so_far_; i < predecessor_count_; i++) {
1323 result->set_input(i, unmerged);
1324 }
1325
1326 result->set_type(IntersectType(merged_type, unmerged_type));
1327
1328 phis_.Add(result);
1329 return result;
1330}
1331
1334 const KnownNodeAspects& unmerged_aspects, ValueNode* merged,
1335 ValueNode* unmerged) {
1336 Phi* result = merged->TryCast<Phi>();
1337 if (result == nullptr || result->merge_state() != this) {
1338 // Not a loop phi, we don't have to do anything.
1339 return;
1340 }
1341 DCHECK_EQ(result->owner(), owner);
1342 NodeType type = GetNodeType(builder->broker(), builder->local_isolate(),
1343 unmerged_aspects, unmerged);
1344 unmerged = EnsureTagged(builder, unmerged_aspects, unmerged,
1346 result->set_input(predecessor_count_ - 1, unmerged);
1347
1348 result->merge_post_loop_type(type);
1349 // We've just merged the backedge, which means that future uses of this Phi
1350 // will be after the loop, so we can now promote `post_loop_type` to the
1351 // regular `type`.
1353 result->promote_post_loop_type();
1354
1355 if (Phi* unmerged_phi = unmerged->TryCast<Phi>()) {
1356 // Propagating the `uses_repr` from {result} to {unmerged_phi}.
1357 builder->RecordUseReprHint(unmerged_phi, result->get_uses_repr_hints());
1358
1359 // Soundness of the loop phi Smi type relies on the back-edge static types
1360 // sminess.
1361 if (result->uses_require_31_bit_value()) {
1362 unmerged_phi->SetUseRequires31BitValue();
1363 }
1364 }
1365}
1366
1370 // Create a new loop phi, which for now is empty.
1372
1373 if (v8_flags.trace_maglev_graph_building) {
1374 for (uint32_t i = 0; i < predecessor_count_; i++) {
1375 result->initialize_input_null(i);
1376 }
1377 }
1378 phis_.Add(result);
1379 return result;
1380}
1381
1383 for (Phi* phi : phis_) {
1384 phi->reduce_input_count(num);
1386 predecessor_count_ > 1 && phi->is_loop_phi()) {
1387 phi->promote_post_loop_type();
1388 }
1389 }
1390}
1391
1395 is_loop(),
1396 predecessor_at(predecessor_count_ - 1)->control_node()->Is<JumpLoop>());
1397 switch (predecessor_count_) {
1398 case 0:
1399 // This happens after the back-edge of a resumable loop died at which
1400 // point we mark it non-looping.
1401 DCHECK(!is_loop());
1402 return true;
1403 case 1:
1404 return is_loop();
1405 default:
1406 return false;
1407 }
1408}
1409
1411 // Only call this function if we have already process all merge points.
1414 // Shift predecessors_ by 1.
1415 for (uint32_t i = predecessor_id; i < predecessor_count_ - 1; i++) {
1417 // Update cache in unconditional control node.
1418 ControlNode* control = predecessors_[i]->control_node();
1419 if (auto unconditional_control =
1420 control->TryCast<UnconditionalControlNode>()) {
1421 DCHECK_EQ(unconditional_control->predecessor_id(), i + 1);
1422 unconditional_control->set_predecessor_id(i);
1423 }
1424 }
1425 // Remove Phi input of index predecessor_id.
1426 for (Phi* phi : *phis()) {
1427 DCHECK_EQ(phi->input_count(), predecessor_count_);
1428 // Shift phi inputs by 1.
1429 for (int i = predecessor_id; i < phi->input_count() - 1; i++) {
1430 phi->change_input(i, phi->input(i + 1).node());
1431 }
1432 phi->reduce_input_count(1);
1433 }
1436}
1437
1438} // namespace maglev
1439} // namespace internal
1440} // namespace v8
iterator erase(const iterator &position)
Definition small-map.h:509
iterator find(const key_type &key)
Definition small-map.h:329
static bool constexpr IsValid(T value)
Definition smi.h:67
static const uint32_t kMaxLength
Definition string.h:511
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
static constexpr Register current_context()
static constexpr Register invalid_value()
void ForEachParameter(const MaglevCompilationUnit &info, Function &&f) const
void ForEachValue(const MaglevCompilationUnit &info, Function &&f)
void ForEachLocal(const MaglevCompilationUnit &info, Function &&f) const
ValueNode *& context(const MaglevCompilationUnit &info)
void UpdateObject(VirtualObject *object)
Definition maglev-ir.h:6024
ValueNode * get(interpreter::Register reg) const
MaglevCompilationUnit * toplevel_compilation_unit() const
compiler::SharedFunctionInfoRef shared_function_info() const
void RecordUseReprHint(Phi *phi, UseRepresentationSet reprs)
const InterpreterFrameState & current_interpreter_frame() const
MaglevCompilationUnit * compilation_unit() const
SmiConstant * GetSmiConstant(int constant) const
compiler::JSHeapBroker * broker() const
static MergePointInterpreterFrameState * New(const MaglevCompilationUnit &info, const InterpreterFrameState &state, int merge_offset, int predecessor_count, BasicBlock *predecessor, const compiler::BytecodeLivenessState *liveness)
ValueNode * NewLoopPhi(Zone *zone, interpreter::Register reg)
void PrintVirtualObjects(const MaglevCompilationUnit &info, VirtualObjectList from_ifs, const char *prelude=nullptr)
ValueNode * NewExceptionPhi(Zone *zone, interpreter::Register reg)
void Merge(MaglevGraphBuilder *graph_builder, InterpreterFrameState &unmerged, BasicBlock *predecessor)
void MergeLoop(MaglevGraphBuilder *graph_builder, InterpreterFrameState &loop_end_state, BasicBlock *loop_end_block)
void MergeVirtualObject(MaglevGraphBuilder *builder, const VirtualObjectList unmerged_vos, const KnownNodeAspects &unmerged_aspects, VirtualObject *merged, VirtualObject *unmerged)
void MergeThrow(MaglevGraphBuilder *handler_builder, const MaglevCompilationUnit *handler_unit, const KnownNodeAspects &known_node_aspects, const VirtualObjectList virtual_objects)
bool TryMergeLoop(MaglevGraphBuilder *graph_builder, InterpreterFrameState &loop_end_state, const std::function< BasicBlock *()> &FinishBlock)
ValueNode * MergeValue(const MaglevGraphBuilder *graph_builder, interpreter::Register owner, const KnownNodeAspects &unmerged_aspects, ValueNode *merged, ValueNode *unmerged, Alternatives::List *per_predecessor_alternatives, bool optimistic_loop_phis=false)
void MergeLoopValue(MaglevGraphBuilder *graph_builder, interpreter::Register owner, const KnownNodeAspects &unmerged_aspects, ValueNode *merged, ValueNode *unmerged)
std::optional< ValueNode * > MergeVirtualObjectValue(const MaglevGraphBuilder *graph_builder, const KnownNodeAspects &unmerged_aspects, ValueNode *merged, ValueNode *unmerged)
void MergePhis(MaglevGraphBuilder *builder, MaglevCompilationUnit &compilation_unit, InterpreterFrameState &unmerged, BasicBlock *predecessor, bool optimistic_loop_phis)
void MergeVirtualObjects(MaglevGraphBuilder *builder, MaglevCompilationUnit &compilation_unit, InterpreterFrameState &unmerged, BasicBlock *predecessor)
void InitializeLoop(MaglevGraphBuilder *graph_builder, MaglevCompilationUnit &compilation_unit, InterpreterFrameState &unmerged, BasicBlock *predecessor, bool optimistic_initial_state=false, LoopEffects *loop_effects=nullptr)
constexpr bool Is() const
Definition maglev-ir.h:2362
void set_input(int index, ValueNode *node)
Definition maglev-ir.h:2720
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
constexpr OpProperties properties() const
Definition maglev-ir.h:1940
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:1050
static VirtualObject * WalkUntilCommon(const VirtualObjectList &list1, const VirtualObjectList &list2, Function &&f)
Definition maglev-ir.h:5903
VirtualObject * FindAllocatedWith(const InlinedAllocation *allocation) const
Definition maglev-ir.h:5887
std::optional< VirtualObject * > Merge(const VirtualObject *other, uint32_t new_object_id, Zone *zone, Function MergeValue) const
Definition maglev-ir.h:5743
void set_allocation(InlinedAllocation *allocation)
Definition maglev-ir.h:5665
bool compatible_for_merge(const VirtualObject *other) const
Definition maglev-ir.h:5669
InlinedAllocation * allocation() const
Definition maglev-ir.h:5664
Handle< SharedFunctionInfo > info
JSHeapBroker * broker
std::optional< TNode< JSArray > > a
#define _
ZoneVector< RpoNumber > & result
LiftoffRegister reg
LiftoffAssembler::CacheState state
int x
constexpr NodeType CombineType(NodeType left, NodeType right)
Definition maglev-ir.h:661
constexpr bool NodeTypeIs(NodeType type, NodeType to_check)
Definition maglev-ir.h:669
constexpr bool IsEmptyNodeType(NodeType type)
Definition maglev-ir.h:706
constexpr NodeType IntersectType(NodeType left, NodeType right)
Definition maglev-ir.h:665
NodeType StaticTypeForNode(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
void DestructivelyIntersect(ZoneMap< Key, Value > &lhs_map, const ZoneMap< Key, Value > &rhs_map, MergeFunc &&func=MergeFunc())
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
bool IsResumableFunction(FunctionKind kind)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
const int kSmiMaxValue
int Compare(const T &a, const T &b)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
BytecodeLoopAssignments & assignments()
ZoneMap< uint32_t, AvailableExpression > available_expressions
KnownNodeAspects(KnownNodeAspects &&other)=delete
ZoneMap< std::tuple< ValueNode *, int >, ValueNode * > loaded_context_constants
const NodeInfo * TryGetInfoFor(ValueNode *node) const
void Merge(const KnownNodeAspects &other, Zone *zone)
bool IsCompatibleWithLoopHeader(const KnownNodeAspects &other) const
KnownNodeAspects * CloneForLoopHeader(bool optimistic_initial_state, LoopEffects *loop_effects, Zone *zone) const
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type