v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-graph-builder.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
6#define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
7
8#include <cmath>
9#include <iomanip>
10#include <map>
11#include <optional>
12#include <type_traits>
13#include <utility>
14
15#include "src/base/hashing.h"
16#include "src/base/logging.h"
17#include "src/base/vector.h"
20#include "src/common/globals.h"
29#include "src/flags/flags.h"
44#include "src/objects/string.h"
45#include "src/utils/memcopy.h"
46
47namespace v8 {
48namespace internal {
49namespace maglev {
50
51class CallArguments;
52
53class ReduceResult;
55 public:
56 enum Kind {
57 kDoneWithValue = 0, // No need to mask while returning the pointer.
61 };
62
63 MaybeReduceResult() : payload_(kFail) {}
64
65 // NOLINTNEXTLINE
66 MaybeReduceResult(ValueNode* value) : payload_(value) {
67 DCHECK_NOT_NULL(value);
68 }
69
71
74
75 ValueNode* value() const {
76 DCHECK(HasValue());
77 return payload_.GetPointerWithKnownPayload(kDoneWithValue);
78 }
79 bool HasValue() const { return kind() == kDoneWithValue; }
80
81 // Either DoneWithValue, DoneWithoutValue or DoneWithAbort.
82 bool IsDone() const { return !IsFail(); }
83
84 // MaybeReduceResult failed.
85 bool IsFail() const { return kind() == kFail; }
86
87 // Done with a ValueNode.
88 bool IsDoneWithValue() const { return HasValue(); }
89
90 // Done without producing a ValueNode.
91 bool IsDoneWithoutValue() const { return kind() == kDoneWithoutValue; }
92
93 // Done with an abort (unconditional deopt, infinite loop in an inlined
94 // function, etc)
95 bool IsDoneWithAbort() const { return kind() == kDoneWithAbort; }
96
97 Kind kind() const { return payload_.GetPayload(); }
98
99 inline ReduceResult Checked();
100
102 return payload_;
103 }
104
105 protected:
106 explicit MaybeReduceResult(Kind kind) : payload_(kind) {}
109 : payload_(payload) {}
111};
112
114 public:
115 // NOLINTNEXTLINE
117
118 explicit ReduceResult(const MaybeReduceResult& other)
119 : MaybeReduceResult(other.GetPayload()) {
120 CHECK(!IsFail());
121 }
122
123 static ReduceResult Done(ValueNode* value) { return ReduceResult(value); }
124 static ReduceResult Done() { return ReduceResult(kDoneWithoutValue); }
125 static ReduceResult DoneWithAbort() { return ReduceResult(kDoneWithAbort); }
126
127 bool IsFail() const { return false; }
128 ReduceResult Checked() { return *this; }
129
130 protected:
132};
133
135
136#define RETURN_IF_DONE(result) \
137 do { \
138 auto res = (result); \
139 if (res.IsDone()) { \
140 return res.Checked(); \
141 } \
142 } while (false)
143
144#define RETURN_IF_ABORT(result) \
145 do { \
146 if ((result).IsDoneWithAbort()) { \
147 return ReduceResult::DoneWithAbort(); \
148 } \
149 } while (false)
150
151#define PROCESS_AND_RETURN_IF_DONE(result, value_processor) \
152 do { \
153 auto res = (result); \
154 if (res.IsDone()) { \
155 if (res.IsDoneWithValue()) { \
156 value_processor(res.value()); \
157 } \
158 return res.Checked(); \
159 } \
160 } while (false)
161
162#define GET_VALUE_OR_ABORT(variable, result) \
163 do { \
164 MaybeReduceResult res = (result); \
165 if (res.IsDoneWithAbort()) { \
166 return ReduceResult::DoneWithAbort(); \
167 } \
168 DCHECK(res.IsDoneWithValue()); \
169 using T = std::remove_pointer_t<std::decay_t<decltype(variable)>>; \
170 variable = res.value()->Cast<T>(); \
171 } while (false)
172
174
176 LocalIsolate* isolate, ValueNode* node);
177
183
196
203
205 public:
206 class DeoptFrameScope;
207
225
228 Graph* graph,
230
231 void Build() {
232 DCHECK(!is_inline());
233
236 compilation_unit_->shared_function_info().StartPosition(),
238
240 for (int i = 0; i < parameter_count(); i++) {
241 // TODO(v8:7700): Consider creating InitialValue nodes lazily.
244 DCHECK_EQ(graph()->parameters().size(), static_cast<size_t>(i));
245 graph()->parameters().push_back(v);
246 SetArgument(i, v);
247 }
248
250
251 // Don't use the AddNewNode helper for the function entry stack check, so
252 // that we can set a custom deopt frame on it.
253 FunctionEntryStackCheck* function_entry_stack_check =
255 new (function_entry_stack_check->lazy_deopt_info()) LazyDeoptInfo(
258 AddInitializedNodeToGraph(function_entry_stack_check);
259
261 EndPrologue();
262 in_prologue_ = false;
263
264 compiler::ScopeInfoRef scope_info =
266 if (scope_info.HasOuterScopeInfo()) {
267 scope_info = scope_info.OuterScopeInfo(broker());
268 CHECK(scope_info.HasContext());
269 graph()->record_scope_info(GetContext(), scope_info);
270 }
271 if (compilation_unit_->is_osr()) {
273 }
274
275 BuildBody();
276 }
277
279 ValueNode* context, ValueNode* function,
281
282 void StartPrologue();
283 void SetArgument(int i, ValueNode* value);
285 ValueNode* GetArgument(int i);
287 void BuildRegisterFrameInitialization(ValueNode* context = nullptr,
288 ValueNode* closure = nullptr,
289 ValueNode* new_target = nullptr);
290 void BuildMergeStates();
292 void PeelLoop();
293 void BuildLoopForPeeling();
294
295 void OsrAnalyzePrequel();
296
320
321 SmiConstant* GetSmiConstant(int constant) const {
322 DCHECK(Smi::IsValid(constant));
323 auto it = graph_->smi().find(constant);
324 if (it == graph_->smi().end()) {
325 SmiConstant* node =
327 graph_->smi().emplace(constant, node);
328 return node;
329 }
330 return it->second;
331 }
332
334 DCHECK(TaggedIndex::IsValid(constant));
335 auto it = graph_->tagged_index().find(constant);
336 if (it == graph_->tagged_index().end()) {
338 0, TaggedIndex::FromIntptr(constant));
339 graph_->tagged_index().emplace(constant, node);
340 return node;
341 }
342 return it->second;
343 }
344
345 Int32Constant* GetInt32Constant(int32_t constant) {
346 auto it = graph_->int32().find(constant);
347 if (it == graph_->int32().end()) {
349 graph_->int32().emplace(constant, node);
350 return node;
351 }
352 return it->second;
353 }
354
356 auto it = graph_->uint32().find(constant);
357 if (it == graph_->uint32().end()) {
359 graph_->uint32().emplace(constant, node);
360 return node;
361 }
362 return it->second;
363 }
364
366 return GetFloat64Constant(
367 Float64::FromBits(base::double_to_uint64(constant)));
368 }
369
371 auto it = graph_->float64().find(constant.get_bits());
372 if (it == graph_->float64().end()) {
373 Float64Constant* node =
375 graph_->float64().emplace(constant.get_bits(), node);
376 return node;
377 }
378 return it->second;
379 }
380
381 ValueNode* GetNumberConstant(double constant);
382
383 static compiler::OptionalHeapObjectRef TryGetConstant(
385
386 Graph* graph() const { return graph_; }
387 Zone* zone() const { return compilation_unit_->zone(); }
395 }
398
399 bool has_graph_labeller() const {
401 }
405
406 // True when this graph builder is building the subgraph of an inlined
407 // function.
408 bool is_inline() const { return caller_details_ != nullptr; }
410
411 bool is_eager_inline() const {
412 DCHECK(is_inline());
414 v8_flags.maglev_non_eager_inlining ||
415 v8_flags.turbolev_non_eager_inlining);
417 }
418
420
422 return v8_flags.maglev_speculative_hoist_phi_untagging ||
423 v8_flags.maglev_licm;
424 }
425
427
429 phi->RecordUseReprHint(reprs);
430 }
435 if (Phi* phi = node->TryCast<Phi>()) {
436 RecordUseReprHint(phi, repr);
437 }
438 }
439
442 ControlNode* control_node, ZoneVector<Node*> rem_nodes_in_call_block);
443
445
446 uint32_t NewObjectId() { return graph_->NewObjectId(); }
447
448 bool is_turbolev() const { return is_turbolev_; }
449
451 if (is_turbolev()) {
452 return v8_flags.turbolev_non_eager_inlining;
453 }
454 return v8_flags.maglev_non_eager_inlining;
455 }
456
457 // Inlining configuration. For Maglev, we use the Maglev flags, and for
458 // Turbolev, we use the Turbofan flags.
460 if (is_turbolev()) {
461 return v8_flags.max_inlined_bytecode_size;
462 } else {
463 return v8_flags.max_maglev_inlined_bytecode_size;
464 }
465 }
467 if (is_turbolev()) {
468 return v8_flags.max_inlined_bytecode_size_small;
469 } else {
470 return v8_flags.max_maglev_inlined_bytecode_size_small;
471 }
472 }
474 if (is_turbolev()) {
475 return v8_flags.min_inlining_frequency;
476 } else {
477 return v8_flags.min_maglev_inlining_frequency;
478 }
479 }
481 if (is_turbolev()) {
482 return v8_flags.max_inlined_bytecode_size_cumulative;
483 } else {
484 return v8_flags.max_maglev_inlined_bytecode_size_cumulative;
485 }
486 }
487
490 ValueNode* closure,
492
493 private:
494 // Helper class for building a subgraph with its own control flow, that is not
495 // attached to any bytecode.
496 //
497 // It does this by creating a fake dummy compilation unit and frame state, and
498 // wrapping up all the places where it pretends to be interpreted but isn't.
500 public:
501 class Variable;
502 class Label;
503 class LoopLabel;
504
505 MaglevSubGraphBuilder(MaglevGraphBuilder* builder, int variable_count);
506 LoopLabel BeginLoop(std::initializer_list<Variable*> loop_vars);
507 template <typename ControlNodeT, typename... Args>
508 void GotoIfTrue(Label* true_target,
509 std::initializer_list<ValueNode*> control_inputs,
510 Args&&... args);
511 template <typename ControlNodeT, typename... Args>
512 void GotoIfFalse(Label* false_target,
513 std::initializer_list<ValueNode*> control_inputs,
514 Args&&... args);
515 void GotoOrTrim(Label* label);
516 void Goto(Label* label);
517 void ReducePredecessorCount(Label* label, unsigned num = 1);
518 void EndLoop(LoopLabel* loop_label);
519 void Bind(Label* label);
521 void set(Variable& var, ValueNode* value);
522 ValueNode* get(const Variable& var) const;
523
524 template <typename FCond, typename FTrue, typename FFalse>
525 ReduceResult Branch(std::initializer_list<Variable*> vars, FCond cond,
526 FTrue if_true, FFalse if_false);
527
528 void MergeIntoLabel(Label* label, BasicBlock* predecessor);
529
530 private:
534
538 };
539
540 // TODO(olivf): Currently identifying dead code relies on the fact that loops
541 // must be entered through the loop header by at least one of the
542 // predecessors. We might want to re-evaluate this in case we want to be able
543 // to OSR into nested loops while compiling the full continuation.
544 static constexpr bool kLoopsMustBeEnteredThroughHeader = true;
545
546 class CallSpeculationScope;
548
549 bool CheckStaticType(ValueNode* node, NodeType type, NodeType* old = nullptr);
550 bool CheckType(ValueNode* node, NodeType type, NodeType* old = nullptr);
551 NodeType CheckTypes(ValueNode* node, std::initializer_list<NodeType> types);
552 bool EnsureType(ValueNode* node, NodeType type, NodeType* old = nullptr);
558
559 // Returns true if we statically know that {lhs} and {rhs} have disjoint
560 // types.
561 bool HaveDisjointTypes(ValueNode* lhs, ValueNode* rhs);
562 bool HasDisjointType(ValueNode* lhs, NodeType rhs_type);
563
564 template <typename Function>
565 bool EnsureType(ValueNode* node, NodeType type, Function ensure_new_type);
567
568 void SetKnownValue(ValueNode* node, compiler::ObjectRef constant,
569 NodeType new_node_type);
571 if (is_inline()) {
572 return false;
573 }
574 if (is_turbolev()) {
575 // As the top-tier compiler, Turboshaft doesn't need interrupt budget
576 // checks.
577 return false;
578 }
579 return v8_flags.force_emit_interrupt_budget_checks || v8_flags.turbofan;
580 }
582 if (!v8_flags.turbofan || !v8_flags.use_osr || !v8_flags.osr_from_maglev)
583 return false;
584 if (!graph_->is_osr() && !v8_flags.always_osr_from_maglev) {
585 return false;
586 }
587 // TODO(olivf) OSR from maglev requires lazy recompilation (see
588 // CompileOptimizedOSRFromMaglev for details). Without this we end up in
589 // deopt loops, e.g., in chromium content_unittests.
591 return false;
592 }
593 // TODO(olivf) OSR'ing from inlined loops is something we might want, but
594 // can't with our current osr-from-maglev implementation. The reason is that
595 // we OSR up by first going down to the interpreter. For inlined loops this
596 // means we would deoptimize to the caller and then probably end up in the
597 // same maglev osr code again, before reaching the turbofan OSR code in the
598 // callee. The solution is to support osr from maglev without
599 // deoptimization.
600 return !(graph_->is_osr() && is_inline());
601 }
602 bool MaglevIsTopTier() const { return !v8_flags.turbofan && v8_flags.maglev; }
604 BasicBlock* predecessor) {
605 if (v8_flags.trace_maglev_graph_building) {
606 std::cout << "== New empty block ==" << std::endl;
608 }
610 current_block_ = zone()->New<BasicBlock>(nullptr, zone());
611 BasicBlock* result = FinishBlock<Jump>({}, &jump_targets);
612 result->set_edge_split_block(predecessor);
613#ifdef DEBUG
614 new_nodes_.clear();
615#endif
616 return result;
617 }
618
621
623 DCHECK_EQ(merge_state.predecessor_count(), 0);
624
625 // Copy state.
627 // Expressions would have to be explicitly preserved across exceptions.
628 // However, at this point we do not know which ones might be used.
631
632 // Merges aren't simple fallthroughs, so we should reset the checkpoint
633 // validity.
635
636 // Register exception phis.
637 if (has_graph_labeller()) {
638 for (Phi* phi : *merge_states_[offset]->phis()) {
642 if (v8_flags.trace_maglev_graph_building) {
643 std::cout << " " << phi << " "
644 << PrintNodeLabel(graph_labeller(), phi) << ": "
645 << PrintNode(graph_labeller(), phi) << std::endl;
646 }
647 }
648 }
649 }
650
651 void ProcessMergePoint(int offset, bool preserve_known_node_aspects) {
652 // First copy the merge state to be the current state.
655 preserve_known_node_aspects, zone());
656
658 }
659
660 // Splits incoming critical edges and labels predecessors.
663 BasicBlockRef& jump_targets) {
664 // TODO(olivf): Support allocation folding across control flow.
666
667 // Merges aren't simple fallthroughs, so we should reset state which is
668 // cached directly on the builder instead of on the merge states.
670
671 if (merge_state.is_loop()) {
672 DCHECK_EQ(merge_state.predecessors_so_far(),
673 merge_state.predecessor_count() - 1);
674 } else {
675 DCHECK_EQ(merge_state.predecessors_so_far(),
676 merge_state.predecessor_count());
677 }
678
679 if (merge_state.predecessor_count() == 1) return;
680
681 // Set up edge-split.
682 int predecessor_index = merge_state.predecessor_count() - 1;
683 if (merge_state.is_loop()) {
684 // For loops, the JumpLoop block hasn't been generated yet, and so isn't
685 // in the list of jump targets. IT's the last predecessor, so drop the
686 // index by one.
687 DCHECK(merge_state.is_unmerged_loop());
688 predecessor_index--;
689 }
690 BasicBlockRef* old_jump_targets = jump_targets.Reset();
691 while (old_jump_targets != nullptr) {
692 BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
693 CHECK(predecessor);
694 ControlNode* control = predecessor->control_node();
695 if (control->Is<ConditionalControlNode>()) {
696 // CreateEmptyBlock automatically registers itself with the offset.
697 predecessor = CreateEdgeSplitBlock(jump_targets, predecessor);
698 // Set the old predecessor's (the conditional block) reference to
699 // point to the new empty predecessor block.
700 old_jump_targets =
701 old_jump_targets->SetToBlockAndReturnNext(predecessor);
702 merge_state.set_predecessor_at(predecessor_index, predecessor);
703 } else {
704 // Re-register the block in the offset's ref list.
705 old_jump_targets = old_jump_targets->MoveToRefList(&jump_targets);
706 }
707 // We only set the predecessor id after splitting critical edges, to make
708 // sure the edge split blocks pick up the correct predecessor index.
709 predecessor->set_predecessor_id(predecessor_index--);
710 }
711 DCHECK_EQ(predecessor_index, -1);
713 }
714
716 MergePointInterpreterFrameState& merge_state) {
717 if (!has_graph_labeller()) return;
718
719 for (Phi* phi : *merge_state.phis()) {
721 if (v8_flags.trace_maglev_graph_building) {
722 std::cout << " " << phi << " "
723 << PrintNodeLabel(graph_labeller(), phi) << ": "
724 << PrintNode(graph_labeller(), phi) << std::endl;
725 }
726 }
727 }
728
729 // Return true if the given offset is a merge point, i.e. there are jumps
730 // targetting it.
732 return merge_states_[offset] != nullptr;
733 }
734
735 ValueNode* GetContextAtDepth(ValueNode* context, size_t depth);
736 bool CheckContextExtensions(size_t depth);
737
738 // Called when a block is killed by an unconditional eager deopt.
741 // Create a block rather than calling finish, since we don't yet know the
742 // next block's offset before the loop skipping the rest of the bytecodes.
743 FinishBlock<Deopt>({}, reason);
745 }
746
747 void KillPeeledLoopTargets(int peelings) {
748 DCHECK_EQ(iterator_.current_bytecode(), interpreter::Bytecode::kJumpLoop);
749 int target = iterator_.GetJumpTargetOffset();
750 // Since we ended up not peeling we must kill all the doubly accounted
751 // jumps out of the loop.
752 interpreter::BytecodeArrayIterator iterator(bytecode().object());
753 for (iterator.SetOffset(target);
755 iterator.Advance()) {
757 DCHECK_NE(bc, interpreter::Bytecode::kJumpLoop);
758 int kill = -1;
761 kill = iterator.GetJumpTargetOffset();
762 } else if (is_inline() && interpreter::Bytecodes::Returns(bc)) {
763 kill = inline_exit_offset();
764 }
765 if (kill != -1) {
766 if (merge_states_[kill]) {
767 for (int i = 0; i < peelings; ++i) {
769 }
770 }
771 UpdatePredecessorCount(kill, -peelings);
772 }
773 }
774 }
775
778 if (v8_flags.trace_maglev_graph_building) {
779 std::cout << "== Dead ==\n"
780 << std::setw(4) << iterator_.current_offset() << " : ";
783 std::cout << std::endl;
784 }
785
786 // If the current bytecode is a jump to elsewhere, then this jump is
787 // also dead and we should make sure to merge it as a dead predecessor.
790 // Jumps merge into their target, and conditional jumps also merge into
791 // the fallthrough.
795 }
796 } else if (bytecode == interpreter::Bytecode::kJumpLoop) {
797 // JumpLoop merges into its loop header, which has to be treated
798 // specially by the merge.
801 }
802 } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
803 // Switches merge into their targets, and into the fallthrough.
805 MergeDeadIntoFrameState(offset.target_offset);
806 }
808 } else if (!interpreter::Bytecodes::Returns(bytecode) &&
810 // Any other bytecode that doesn't return or throw will merge into the
811 // fallthrough.
813 } else if (interpreter::Bytecodes::Returns(bytecode) && is_inline()) {
815 }
816
817 // TODO(leszeks): We could now continue iterating the bytecode
818 }
819
831
833 if (!v8_flags.trace_maglev_graph_building) return;
835 std::cout, "* VOs (Interpreter Frame State): ",
837 }
838
840 if (v8_flags.trace_maglev_graph_building) {
841 std::cout << std::setw(4) << iterator_.current_offset() << " : ";
844 std::cout << std::endl;
845 }
846
849
851 if (V8_UNLIKELY(merge_state != nullptr)) {
852 bool preserve_known_node_aspects = in_optimistic_peeling_iteration() &&
854 if (merge_state->is_resumable_loop()) {
856 }
857 if (current_block_ != nullptr) {
858 DCHECK(!preserve_known_node_aspects);
859 // TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
860 // bytecodes in this basic block were only register juggling.
861 // DCHECK(!node_buffer().empty());
862 BasicBlock* predecessor;
863 if (merge_state->is_loop() && !merge_state->is_resumable_loop() &&
865 predecessor =
867 } else {
868 predecessor = FinishBlock<Jump>({}, &jump_targets_[offset]);
869 }
871 predecessor);
872 }
873 if (v8_flags.trace_maglev_graph_building) {
874 auto detail = merge_state->is_exception_handler() ? "exception handler"
875 : merge_state->is_loop() ? "loop header"
876 : "merge";
877 std::cout << "== New block (" << detail << " @" << merge_state
878 << ") at "
880 << "==" << std::endl;
882 }
883
884 if (V8_UNLIKELY(merge_state->is_exception_handler())) {
886 // If we have no reference to this block, then the exception handler is
887 // dead.
888 if (!jump_targets_[offset].has_ref() ||
889 !merge_state->exception_handler_was_used()) {
891 return;
892 }
894 } else if (merge_state->is_unmerged_unreachable_loop()) {
895 // We encountered a loop header that is only reachable by the JumpLoop
896 // back-edge, but the bytecode_analysis didn't notice upfront. This can
897 // e.g. be a loop that is entered on a dead fall-through.
900 return;
901 } else {
902 ProcessMergePoint(offset, preserve_known_node_aspects);
903 }
904
905 if (is_loop_effect_tracking_enabled() && merge_state->is_loop()) {
907 }
908 // We pass nullptr for the `predecessor` argument of StartNewBlock because
909 // this block is guaranteed to have a merge_state_, and hence to not have
910 // a `predecessor_` field.
911 StartNewBlock(offset, /*predecessor*/ nullptr);
912 } else if (V8_UNLIKELY(current_block_ == nullptr)) {
913 // If we don't have a current block, the bytecode must be dead (because of
914 // some earlier deopt). Mark this bytecode dead too and return.
915 // TODO(leszeks): Merge these two conditions by marking dead states with
916 // a sentinel value.
917 if (predecessor_count(offset) == 1) {
918 CHECK_NULL(merge_state);
919 CHECK(bytecode_analysis().IsLoopHeader(offset));
920 } else {
922 }
924 return;
925 }
926
927 // Handle exceptions if we have a table.
928 if (bytecode().handler_table_size() > 0) {
929 // Pop all entries where offset >= end.
930 while (IsInsideTryBlock()) {
932 if (offset < entry.end) break;
933 catch_block_stack_.pop();
934 }
935 // Push new entries from interpreter handler table where offset >= start
936 // && offset < end.
937 HandlerTable table(*bytecode().object());
938 while (next_handler_table_index_ < table.NumberOfRangeEntries()) {
939 int start = table.GetRangeStart(next_handler_table_index_);
940 if (offset < start) break;
941 int end = table.GetRangeEnd(next_handler_table_index_);
942 if (offset >= end) {
944 continue;
945 }
946 int handler = table.GetRangeHandler(next_handler_table_index_);
947 catch_block_stack_.push({end, handler});
950 }
951 }
952
954#ifdef DEBUG
955 // Clear new nodes for the next VisitFoo
956 new_nodes_.clear();
957#endif
958
959 if (iterator_.current_bytecode() == interpreter::Bytecode::kJumpLoop &&
962 CHECK(EmitUnconditionalDeopt(DeoptimizeReason::kOSREarlyExit)
963 .IsDoneWithAbort());
965 return;
966 }
967
968 switch (iterator_.current_bytecode()) {
969#define BYTECODE_CASE(name, ...) \
970 case interpreter::Bytecode::k##name: { \
971 if (Visit##name().IsDoneWithAbort()) { \
972 MarkBytecodeDead(); \
973 } \
974 break; \
975 }
977#undef BYTECODE_CASE
978 }
979 }
980
981#define BYTECODE_VISITOR(name, ...) ReduceResult Visit##name();
983#undef BYTECODE_VISITOR
984
985#define DECLARE_VISITOR(name, ...) \
986 ReduceResult VisitIntrinsic##name(interpreter::RegisterList args);
988#undef DECLARE_VISITOR
989
991 // VirtualObjects should never be add to the Maglev graph.
992 DCHECK(!node->Is<VirtualObject>());
993 node_buffer().push_back(node);
994 node->set_owner(current_block_);
995 if (has_graph_labeller())
999 if (v8_flags.trace_maglev_graph_building) {
1000 std::cout << " " << node << " "
1001 << PrintNodeLabel(graph_labeller(), node) << ": "
1002 << PrintNode(graph_labeller(), node) << std::endl;
1003 }
1004#ifdef DEBUG
1005 new_nodes_.insert(node);
1006#endif
1007 }
1008
1009 // Add a new node with a dynamic set of inputs which are initialized by the
1010 // `post_create_input_initializer` function before the node is added to the
1011 // graph.
1012 template <typename NodeT, typename Function, typename... Args>
1013 NodeT* AddNewNode(size_t input_count,
1014 Function&& post_create_input_initializer, Args&&... args) {
1015 NodeT* node =
1016 NodeBase::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
1017 post_create_input_initializer(node);
1018 return AttachExtraInfoAndAddToGraph(node);
1019 }
1020
1021 template <typename NodeT, typename... Args>
1022 NodeT* AddNewNodeOrGetEquivalent(std::initializer_list<ValueNode*> raw_inputs,
1023 Args&&... args) {
1024 DCHECK(v8_flags.maglev_cse);
1025 static constexpr Opcode op = Node::opcode_of<NodeT>;
1026 static_assert(Node::participate_in_cse(op));
1027 using options_result =
1028 typename std::invoke_result<decltype(&NodeT::options),
1029 const NodeT>::type;
1030 static_assert(
1031 std::is_assignable<options_result, std::tuple<Args...>>::value,
1032 "Instruction participating in CSE needs options() returning "
1033 "a tuple matching the constructor arguments");
1034 static_assert(IsFixedInputNode<NodeT>());
1035 static_assert(NodeT::kInputCount <= 3);
1036
1037 std::array<ValueNode*, NodeT::kInputCount> inputs;
1038 // Nodes with zero input count don't have kInputTypes defined.
1039 if constexpr (NodeT::kInputCount > 0) {
1040 int i = 0;
1042 for (ValueNode* raw_input : raw_inputs) {
1043 inputs[i] = ConvertInputTo<hint>(raw_input, NodeT::kInputTypes[i]);
1044 i++;
1045 }
1047 static_assert(NodeT::kInputCount == 2);
1048 if ((IsConstantNode(inputs[0]->opcode()) || inputs[0] > inputs[1]) &&
1049 !IsConstantNode(inputs[1]->opcode())) {
1050 std::swap(inputs[0], inputs[1]);
1051 }
1052 }
1053 }
1054
1055 uint32_t value_number;
1056 {
1057 size_t tmp_value_number = base::hash_value(op);
1058 (
1059 [&] {
1060 tmp_value_number =
1061 fast_hash_combine(tmp_value_number, gvn_hash_value(args));
1062 }(),
1063 ...);
1064 for (const auto& inp : inputs) {
1065 tmp_value_number =
1066 fast_hash_combine(tmp_value_number, base::hash_value(inp));
1067 }
1068 value_number = static_cast<uint32_t>(tmp_value_number);
1069 }
1070
1071 auto exists = known_node_aspects().available_expressions.find(value_number);
1072 if (exists != known_node_aspects().available_expressions.end()) {
1073 auto candidate = exists->second.node;
1074 const bool sanity_check =
1075 candidate->Is<NodeT>() &&
1076 static_cast<size_t>(candidate->input_count()) == inputs.size();
1077 DCHECK_IMPLIES(sanity_check,
1079 candidate->properties()) == candidate->properties());
1080 const bool epoch_check =
1082 known_node_aspects().effect_epoch() <= exists->second.effect_epoch;
1083 if (sanity_check && epoch_check) {
1084 if (static_cast<NodeT*>(candidate)->options() ==
1085 std::tuple{std::forward<Args>(args)...}) {
1086 int i = 0;
1087 for (const auto& inp : inputs) {
1088 if (inp != candidate->input(i).node()) {
1089 break;
1090 }
1091 i++;
1092 }
1093 if (static_cast<size_t>(i) == inputs.size()) {
1094 return static_cast<NodeT*>(candidate);
1095 }
1096 }
1097 }
1098 if (!epoch_check) {
1100 }
1101 }
1102 NodeT* node = NodeBase::New<NodeT>(zone(), inputs.size(),
1103 std::forward<Args>(args)...);
1104 int i = 0;
1105 for (ValueNode* input : inputs) {
1106 DCHECK_NOT_NULL(input);
1107 node->set_input(i++, input);
1108 }
1109 DCHECK_EQ(node->options(), std::tuple{std::forward<Args>(args)...});
1110 uint32_t epoch = Node::needs_epoch_check(op)
1114 known_node_aspects().available_expressions[value_number] = {node, epoch};
1115 }
1116 return AttachExtraInfoAndAddToGraph(node);
1117 }
1118
1119 // Add a new node with a static set of inputs.
1120 template <typename NodeT, typename... Args>
1121 NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
1122 static_assert(IsFixedInputNode<NodeT>());
1124 if (v8_flags.maglev_cse) {
1126 std::forward<Args>(args)...);
1127 }
1128 }
1129 NodeT* node = NodeBase::New<NodeT>(zone(), inputs.size(),
1130 std::forward<Args>(args)...);
1131 SetNodeInputs(node, inputs);
1132 return AttachExtraInfoAndAddToGraph(node);
1133 }
1134
1135 template <typename NodeT, typename... Args>
1137 static_assert(IsConstantNode(Node::opcode_of<NodeT>));
1138 NodeT* node = NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
1139 static_assert(!NodeT::kProperties.can_eager_deopt());
1140 static_assert(!NodeT::kProperties.can_lazy_deopt());
1141 static_assert(!NodeT::kProperties.can_throw());
1142 static_assert(!NodeT::kProperties.can_write());
1144 if (v8_flags.trace_maglev_graph_building) {
1145 std::cout << " " << node << " "
1146 << PrintNodeLabel(graph_labeller(), node) << ": "
1147 << PrintNode(graph_labeller(), node) << std::endl;
1148 }
1149 return node;
1150 }
1151
1152 template <typename NodeT>
1154 static_assert(NodeT::kProperties.is_deopt_checkpoint() +
1155 NodeT::kProperties.can_eager_deopt() +
1156 NodeT::kProperties.can_lazy_deopt() <=
1157 1);
1160 AttachLazyDeoptInfo(node);
1164 return node;
1165 }
1166
1167 template <typename NodeT>
1169 if constexpr (NodeT::kProperties.is_deopt_checkpoint()) {
1170 node->SetEagerDeoptInfo(zone(), GetLatestCheckpointedFrame());
1171 }
1172 }
1173
1174 template <typename NodeT>
1176 if constexpr (NodeT::kProperties.can_eager_deopt()) {
1177 node->SetEagerDeoptInfo(zone(), GetLatestCheckpointedFrame(),
1179 }
1180 }
1181
1182 template <typename NodeT>
1184 if constexpr (NodeT::kProperties.can_lazy_deopt()) {
1185 interpreter::Register result_location;
1186 int result_size;
1190 } else {
1191 std::tie(result_location, result_size) = GetResultLocationAndSize();
1192 }
1193 new (node->lazy_deopt_info()) LazyDeoptInfo(
1194 zone(), GetDeoptFrameForLazyDeopt(result_location, result_size),
1195 result_location, result_size, current_speculation_feedback_);
1196 }
1197 }
1198
1199 template <typename NodeT>
1201 if constexpr (NodeT::kProperties.can_throw()) {
1203 if (catch_block.ref) {
1204 if (!catch_block.exception_handler_was_used) {
1205 // Attach an empty live exception handler to mark that there's a
1206 // matching catch but we'll lazy deopt if we ever throw.
1207 new (node->exception_handler_info())
1209 DCHECK(node->exception_handler_info()->HasExceptionHandler());
1210 DCHECK(node->exception_handler_info()->ShouldLazyDeopt());
1211 if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1213 // Ensure that we always have the handler of inline call
1214 // candidates.
1216 node->exception_handler_info());
1217 }
1218 }
1219 return;
1220 }
1221
1223 if (!IsInsideTryBlock() && !is_eager_inline()) {
1224 // If we are inlining a function non-eagerly and we are not inside a
1225 // try block, then the catch block already exists.
1226 new (node->exception_handler_info()) ExceptionHandlerInfo(
1227 catch_block.ref->block_ptr(), catch_block.deopt_frame_distance);
1228 } else {
1229 // If we are inside a try block for the current builder or if we are
1230 // inside an eager inlined call inside a try block, the catch basic
1231 // block doesn't exist yet, use the ref-list mechanism.
1232 new (node->exception_handler_info()) ExceptionHandlerInfo(
1233 catch_block.ref, catch_block.deopt_frame_distance);
1234 }
1235
1236 DCHECK(node->exception_handler_info()->HasExceptionHandler());
1237 DCHECK(!node->exception_handler_info()->ShouldLazyDeopt());
1238
1239 current_block_->AddExceptionHandler(node->exception_handler_info());
1240
1241 if (IsInsideTryBlock()) {
1242 // Merge the current state into the handler state.
1243 auto state = GetCatchBlockFrameState();
1244 DCHECK_NOT_NULL(state);
1245 state->MergeThrow(this, compilation_unit_,
1248 }
1249 } else {
1250 // Patch no exception handler marker.
1251 // TODO(victorgomes): Avoid allocating exception handler data in this
1252 // case.
1253 new (node->exception_handler_info()) ExceptionHandlerInfo();
1254 DCHECK(!node->exception_handler_info()->HasExceptionHandler());
1255 if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1257 // Ensure that we always have the handler of inline call candidates.
1258 current_block_->AddExceptionHandler(node->exception_handler_info());
1259 }
1260 }
1261 }
1262 }
1263 }
1264
1265 // Bytecode iterator of the current graph builder is inside a try-block
1266 // region.
1267 bool IsInsideTryBlock() const { return catch_block_stack_.size() > 0; }
1268
1273
1275 if (IsInsideTryBlock()) {
1276 // Inside a try-block.
1277 int offset = catch_block_stack_.top().handler;
1278 return {&jump_targets_[offset],
1280 }
1281 if (!is_inline()) {
1282 return CatchBlockDetails{};
1283 }
1285 }
1286
1288 if (IsInsideTryBlock()) {
1289 return {info->catch_block_ref_address(), !info->ShouldLazyDeopt(), 0};
1290 }
1291 if (!is_inline()) {
1292 return CatchBlockDetails{};
1293 }
1295 }
1296
1297 bool ContextMayAlias(ValueNode* context,
1298 compiler::OptionalScopeInfoRef scope_info);
1301 ValueNode* context, int slot_index,
1302 ContextSlotMutability slot_mutability);
1305 ContextSlotMutability slot_mutability,
1306 ContextKind context_kind);
1308 int index,
1309 ValueNode* value,
1310 Node** store);
1312 ValueNode* value,
1313 ContextKind context_kind);
1315 void MinimizeContextChainDepth(ValueNode** context, size_t* depth);
1316 void EscapeContext();
1317 void BuildLoadContextSlot(ValueNode* context, size_t depth, int slot_index,
1318 ContextSlotMutability slot_mutability,
1319 ContextKind context_kind);
1320 ReduceResult BuildStoreContextSlot(ValueNode* context, size_t depth,
1321 int slot_index, ValueNode* value,
1322 ContextKind context_kind);
1323
1324 void BuildStoreMap(ValueNode* object, compiler::MapRef map,
1326
1329 ValueNode* property_array);
1330
1331 template <Builtin kBuiltin>
1332 CallBuiltin* BuildCallBuiltin(std::initializer_list<ValueNode*> inputs) {
1334 if constexpr (Descriptor::HasContextParameter()) {
1336 inputs.size() + 1,
1337 [&](CallBuiltin* call_builtin) {
1338 int arg_index = 0;
1339 for (auto* input : inputs) {
1340 call_builtin->set_arg(arg_index++, input);
1341 }
1342 },
1343 kBuiltin, GetContext());
1344 } else {
1346 inputs.size(),
1347 [&](CallBuiltin* call_builtin) {
1348 int arg_index = 0;
1349 for (auto* input : inputs) {
1350 call_builtin->set_arg(arg_index++, input);
1351 }
1352 },
1353 kBuiltin);
1354 }
1355 }
1356
1357 template <Builtin kBuiltin>
1359 std::initializer_list<ValueNode*> inputs,
1360 compiler::FeedbackSource const& feedback,
1361 CallBuiltin::FeedbackSlotType slot_type = CallBuiltin::kTaggedIndex) {
1362 CallBuiltin* call_builtin = BuildCallBuiltin<kBuiltin>(inputs);
1363 call_builtin->set_feedback(feedback, slot_type);
1364#ifdef DEBUG
1365 // Check that the last parameters are kSlot and kVector.
1367 int slot_index = call_builtin->InputCountWithoutContext();
1368 int vector_index = slot_index + 1;
1369 DCHECK_EQ(slot_index, Descriptor::kSlot);
1370 // TODO(victorgomes): Rename all kFeedbackVector parameters in the builtins
1371 // to kVector.
1372 DCHECK_EQ(vector_index, Descriptor::kVector);
1373#endif // DEBUG
1374 return call_builtin;
1375 }
1376
1378 Builtin builtin, ValueNode* target, ValueNode* new_target,
1379 std::initializer_list<ValueNode*> inputs) {
1380 DCHECK(Builtins::IsCpp(builtin));
1381 const size_t input_count = inputs.size() + CallCPPBuiltin::kFixedInputCount;
1382 return AddNewNode<CallCPPBuiltin>(
1383 input_count,
1384 [&](CallCPPBuiltin* call_builtin) {
1385 int arg_index = 0;
1386 for (auto* input : inputs) {
1387 call_builtin->set_arg(arg_index++, input);
1388 }
1389 },
1390 builtin, GetTaggedValue(target), GetTaggedValue(new_target),
1391 GetTaggedValue(GetContext()));
1392 }
1393
1394 ReduceResult BuildLoadGlobal(compiler::NameRef name,
1395 compiler::FeedbackSource& feedback_source,
1396 TypeofMode typeof_mode);
1397
1398 ValueNode* BuildToString(ValueNode* value, ToString::ConversionMode mode);
1399
1400 constexpr bool RuntimeFunctionCanThrow(Runtime::FunctionId function_id) {
1401#define BAILOUT(name, ...) \
1402 if (function_id == Runtime::k##name) { \
1403 return true; \
1404 }
1406#undef BAILOUT
1407 return false;
1408 }
1409
1411 std::initializer_list<ValueNode*> inputs) {
1412 CallRuntime* result = AddNewNode<CallRuntime>(
1413 inputs.size() + CallRuntime::kFixedInputCount,
1414 [&](CallRuntime* call_runtime) {
1415 int arg_index = 0;
1416 for (auto* input : inputs) {
1417 call_runtime->set_arg(arg_index++, GetTaggedValue(input));
1418 }
1419 },
1420 function_id, GetContext());
1421
1422 if (RuntimeFunctionCanThrow(function_id)) {
1423 return BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
1424 }
1425 return result;
1426 }
1427
1429 // Create a block rather than calling finish, since we don't yet know the
1430 // next block's offset before the loop skipping the rest of the bytecodes.
1431 FinishBlock<Abort>({}, reason);
1432 return ReduceResult::DoneWithAbort();
1433 }
1434
1435 void Print(const char* str) {
1436 Handle<String> string_handle =
1437 local_isolate()->factory()->NewStringFromAsciiChecked(
1438 str, AllocationType::kOld);
1439 ValueNode* string_node = GetConstant(MakeRefAssumeMemoryFence(
1440 broker(), broker()->CanonicalPersistentHandle(string_handle)));
1441 CHECK(BuildCallRuntime(Runtime::kGlobalPrint, {string_node}).IsDone());
1442 }
1443
1444 void Print(ValueNode* value) {
1445 CHECK(BuildCallRuntime(Runtime::kDebugPrint, {value}).IsDone());
1446 }
1447
1448 void Print(const char* str, ValueNode* value) {
1449 Print(str);
1450 Print(value);
1451 }
1452
1454 return GetConstant(
1455 compilation_unit_->GetTopLevelCompilationUnit()->feedback_cell());
1456 }
1457
1459 return current_interpreter_frame_.get(
1460 interpreter::Register::function_closure());
1461 }
1462
1464 return current_interpreter_frame_.get(
1465 interpreter::Register::current_context());
1466 }
1467
1468 void SetContext(ValueNode* context) {
1469 current_interpreter_frame_.set(interpreter::Register::current_context(),
1470 context);
1471 }
1472
1473 FeedbackSlot GetSlotOperand(int operand_index) const {
1474 return iterator_.GetSlotOperand(operand_index);
1475 }
1476
1477 uint32_t GetFlag8Operand(int operand_index) const {
1478 return iterator_.GetFlag8Operand(operand_index);
1479 }
1480
1481 uint32_t GetFlag16Operand(int operand_index) const {
1482 return iterator_.GetFlag16Operand(operand_index);
1483 }
1484
1485 template <class T>
1487 requires(is_taggable_v<T>)
1488 {
1489 // The BytecodeArray itself was fetched by using a barrier so all reads
1490 // from the constant pool are safe.
1491 return MakeRefAssumeMemoryFence(
1492 broker(), broker()->CanonicalPersistentHandle(
1493 Cast<T>(iterator_.GetConstantForIndexOperand(
1494 operand_index, local_isolate()))));
1495 }
1496
1498 auto it = graph_->external_references().find(reference.address());
1499 if (it == graph_->external_references().end()) {
1500 ExternalConstant* node =
1501 CreateNewConstantNode<ExternalConstant>(0, reference);
1502 graph_->external_references().emplace(reference.address(), node);
1503 return node;
1504 }
1505 return it->second;
1506 }
1507
1509 auto it = graph_->root().find(index);
1510 if (it == graph_->root().end()) {
1511 RootConstant* node = CreateNewConstantNode<RootConstant>(0, index);
1512 graph_->root().emplace(index, node);
1513 return node;
1514 }
1515 return it->second;
1516 }
1517
1519 return GetRootConstant(value ? RootIndex::kTrueValue
1520 : RootIndex::kFalseValue);
1521 }
1522
1523 ValueNode* GetConstant(compiler::ObjectRef ref);
1524
1525 ValueNode* GetTrustedConstant(compiler::HeapObjectRef ref,
1526 IndirectPointerTag tag);
1527
1529 DCHECK(!graph_->register_inputs().has(reg));
1530 graph_->register_inputs().set(reg);
1531 return AddNewNode<RegisterInput>({}, reg);
1532 }
1533
1534#define DEFINE_IS_ROOT_OBJECT(type, name, CamelName) \
1535 bool Is##CamelName(ValueNode* value) const { \
1536 if (RootConstant* constant = value->TryCast<RootConstant>()) { \
1537 return constant->index() == RootIndex::k##CamelName; \
1538 } \
1539 return false; \
1540 }
1542#undef DEFINE_IS_ROOT_OBJECT
1543
1544 // Move an existing ValueNode between two registers. You can pass
1545 // virtual_accumulator as the src or dst to move in or out of the accumulator.
1548 // We shouldn't be moving newly created nodes between registers.
1549 DCHECK(!IsNodeCreatedForThisBytecode(current_interpreter_frame_.get(src)));
1550 DCHECK_NOT_NULL(current_interpreter_frame_.get(src));
1551
1552 current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
1553 }
1554
1555 ValueNode* GetTaggedValue(ValueNode* value,
1556 UseReprHintRecording record_use_repr_hint =
1557 UseReprHintRecording::kRecord);
1558 ReduceResult GetSmiValue(ValueNode* value,
1559 UseReprHintRecording record_use_repr_hint =
1560 UseReprHintRecording::kRecord);
1561
1563 UseReprHintRecording record_use_repr_hint =
1564 UseReprHintRecording::kRecord) {
1565 ValueNode* value = current_interpreter_frame_.get(reg);
1566 return GetSmiValue(value, record_use_repr_hint);
1567 }
1568
1570 UseReprHintRecording record_use_repr_hint =
1571 UseReprHintRecording::kRecord) {
1572 ValueNode* value = current_interpreter_frame_.get(reg);
1573 return GetTaggedValue(value, record_use_repr_hint);
1574 }
1575
1576 ValueNode* GetInternalizedString(interpreter::Register reg);
1577
1578 // Get an Int32 representation node whose value is equivalent to the ToInt32
1579 // truncation of the given node (including a ToNumber call). Only trivial
1580 // ToNumber is allowed -- values that are already numeric, and optionally
1581 // oddballs.
1582 //
1583 // Deopts if the ToNumber is non-trivial.
1584 ValueNode* GetTruncatedInt32ForToNumber(
1585 ValueNode* value, NodeType allowed_input_type,
1586 TaggedToFloat64ConversionType conversion_type);
1587
1589 interpreter::Register reg, NodeType allowed_input_type,
1590 TaggedToFloat64ConversionType conversion_type) {
1591 return GetTruncatedInt32ForToNumber(current_interpreter_frame_.get(reg),
1592 allowed_input_type, conversion_type);
1593 }
1594
1595 // Get an Int32 representation node whose value is equivalent to the ToUint8
1596 // truncation of the given node (including a ToNumber call). Only trivial
1597 // ToNumber is allowed -- values that are already numeric, and optionally
1598 // oddballs.
1599 //
1600 // Deopts if the ToNumber is non-trivial.
1601 ValueNode* GetUint8ClampedForToNumber(ValueNode* value);
1602
1604 return GetUint8ClampedForToNumber(current_interpreter_frame_.get(reg));
1605 }
1606
1607 std::optional<int32_t> TryGetInt32Constant(ValueNode* value);
1608 std::optional<uint32_t> TryGetUint32Constant(ValueNode* value);
1609
1610 // Get an Int32 representation node whose value is equivalent to the given
1611 // node.
1612 //
1613 // Deopts if the value is not exactly representable as an Int32.
1614 ValueNode* GetInt32(ValueNode* value, bool can_be_heap_number = false);
1615
1616 void EnsureInt32(ValueNode* value, bool can_be_heap_number = false) {
1617 // Either the value is Int32 already, or we force a conversion to Int32 and
1618 // cache the value in its alternative representation node.
1619 GetInt32(value, can_be_heap_number);
1620 }
1621
1623 EnsureInt32(current_interpreter_frame_.get(reg));
1624 }
1625
1626 std::optional<double> TryGetFloat64Constant(
1627 ValueNode* value, TaggedToFloat64ConversionType conversion_type);
1628
1629 // Get a Float64 representation node whose value is equivalent to the given
1630 // node.
1631 //
1632 // Deopts if the value is not exactly representable as a Float64.
1633 ValueNode* GetFloat64(ValueNode* value);
1634
1636 return GetFloat64(current_interpreter_frame_.get(reg));
1637 }
1638
1639 // Get a Float64 representation node whose value is the result of ToNumber on
1640 // the given node. Only trivial ToNumber is allowed -- values that are already
1641 // numeric, and optionally oddballs.
1642 //
1643 // Deopts if the ToNumber value is not exactly representable as a Float64, or
1644 // the ToNumber is non-trivial.
1645 ValueNode* GetFloat64ForToNumber(
1646 ValueNode* value, NodeType allowed_input_type,
1647 TaggedToFloat64ConversionType conversion_type);
1648
1650 interpreter::Register reg, NodeType allowed_input_type,
1651 TaggedToFloat64ConversionType conversion_type) {
1652 return GetFloat64ForToNumber(current_interpreter_frame_.get(reg),
1653 allowed_input_type, conversion_type);
1654 }
1655
1656 ValueNode* GetHoleyFloat64ForToNumber(
1657 ValueNode* value, NodeType allowed_input_type,
1658 TaggedToFloat64ConversionType conversion_type);
1659
1661 interpreter::Register reg, NodeType allowed_input_type,
1662 TaggedToFloat64ConversionType conversion_type) {
1663 return GetHoleyFloat64ForToNumber(current_interpreter_frame_.get(reg),
1664 allowed_input_type, conversion_type);
1665 }
1666
1668 return current_interpreter_frame_.get(
1669 interpreter::Register::virtual_accumulator());
1670 }
1671
1673 UseReprHintRecording record_use_repr_hint =
1674 UseReprHintRecording::kRecord) {
1675 return GetSmiValue(interpreter::Register::virtual_accumulator(),
1676 record_use_repr_hint);
1677 }
1678
1680 NodeType allowed_input_type,
1681 TaggedToFloat64ConversionType conversion_type) {
1682 return GetTruncatedInt32ForToNumber(
1683 interpreter::Register::virtual_accumulator(), allowed_input_type,
1684 conversion_type);
1685 }
1686
1688 return GetUint8ClampedForToNumber(
1689 interpreter::Register::virtual_accumulator());
1690 }
1691
1693 NodeType allowed_input_type,
1694 TaggedToFloat64ConversionType conversion_type) {
1695 return GetHoleyFloat64ForToNumber(
1696 interpreter::Register::virtual_accumulator(), allowed_input_type,
1697 conversion_type);
1698 }
1699
1701 DCHECK_EQ(value->properties().value_representation(),
1702 ValueRepresentation::kFloat64);
1703
1704 // We only need to check for silenced NaN in non-conversion nodes or
1705 // conversion from tagged, since they can't be signalling NaNs.
1706 if (value->properties().is_conversion()) {
1707 // A conversion node should have at least one input.
1708 DCHECK_GE(value->input_count(), 1);
1709 // If the conversion node is tagged, we could be reading a fabricated sNaN
1710 // value (built using a BufferArray for example).
1711 if (!value->input(0).node()->properties().is_tagged()) {
1712 return value;
1713 }
1714 }
1715
1716 // Special case constants, since we know what they are.
1717 Float64Constant* constant = value->TryCast<Float64Constant>();
1718 if (constant) {
1719 constexpr double quiet_NaN = std::numeric_limits<double>::quiet_NaN();
1720 if (!constant->value().is_nan()) return constant;
1721 return GetFloat64Constant(quiet_NaN);
1722 }
1723
1724 // Silence all other values.
1725 return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value});
1726 }
1727
1728 bool IsRegisterEqualToAccumulator(int operand_index) {
1729 interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
1730 return current_interpreter_frame_.get(source) ==
1731 current_interpreter_frame_.accumulator();
1732 }
1733
1734 ValueNode* LoadRegister(int operand_index) {
1735 return current_interpreter_frame_.get(
1736 iterator_.GetRegisterOperand(operand_index));
1737 }
1738
1740 int operand_index, NodeType allowed_input_type,
1741 TaggedToFloat64ConversionType conversion_type) {
1742 return GetHoleyFloat64ForToNumber(
1743 iterator_.GetRegisterOperand(operand_index), allowed_input_type,
1744 conversion_type);
1745 }
1746
1747 template <typename NodeT>
1748 void SetAccumulator(NodeT* node) {
1749 // Accumulator stores are equivalent to stores to the virtual accumulator
1750 // register.
1751 StoreRegister(interpreter::Register::virtual_accumulator(), node);
1752 }
1753
1755 DCHECK(interpreter::Bytecodes::ClobbersAccumulator(
1756 iterator_.current_bytecode()));
1757 current_interpreter_frame_.set_accumulator(
1758 GetRootConstant(RootIndex::kOptimizedOut));
1759 }
1760
1762 // GetSecondReturnedValue must be added just after a node that calls a
1763 // builtin that expects 2 returned values. It simply binds kReturnRegister1
1764 // to a value node. Since the previous node must have been a builtin
1765 // call, the register is available in the register allocator. No gap moves
1766 // would be emitted between these two nodes.
1767 if (result->opcode() == Opcode::kCallRuntime) {
1768 DCHECK_EQ(result->Cast<CallRuntime>()->ReturnCount(), 2);
1769 } else if (result->opcode() == Opcode::kCallBuiltin) {
1770 DCHECK_EQ(result->Cast<CallBuiltin>()->ReturnCount(), 2);
1771 } else {
1772 DCHECK_EQ(result->opcode(), Opcode::kForInPrepare);
1773 }
1774 // {result} must be the last node in the current block.
1775 DCHECK_EQ(node_buffer().back(), result);
1776 return AddNewNode<GetSecondReturnedValue>({});
1777 }
1778
1779 template <typename NodeT>
1781 static_assert(std::is_base_of_v<ValueNode, NodeT>);
1782 DCHECK(HasOutputRegister(target));
1783 current_interpreter_frame_.set(target, value);
1784
1785 // Make sure the lazy deopt info of this value, if any, is registered as
1786 // mutating this register.
1787 DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1788 IsNodeCreatedForThisBytecode(value),
1789 value->lazy_deopt_info()->IsResultRegister(target));
1790 }
1791
1793 DCHECK_IMPLIES(value->properties().can_lazy_deopt(),
1794 !IsNodeCreatedForThisBytecode(value));
1795 current_interpreter_frame_.set(interpreter::Register::virtual_accumulator(),
1796 value);
1797 }
1798
1799 template <typename NodeT>
1801 std::pair<interpreter::Register, interpreter::Register> target,
1802 NodeT* value) {
1803 const interpreter::Register target0 = target.first;
1804 const interpreter::Register target1 = target.second;
1805
1806 DCHECK_EQ(interpreter::Register(target0.index() + 1), target1);
1807 DCHECK_EQ(value->ReturnCount(), 2);
1808
1809 if (!v8_flags.maglev_cse) {
1810 // TODO(olivf): CSE might deduplicate this value and the one below.
1811 DCHECK_NE(0, new_nodes_.count(value));
1812 }
1813 DCHECK(HasOutputRegister(target0));
1814 current_interpreter_frame_.set(target0, value);
1815
1816 ValueNode* second_value = GetSecondValue(value);
1817 if (!v8_flags.maglev_cse) {
1818 DCHECK_NE(0, new_nodes_.count(second_value));
1819 }
1820 DCHECK(HasOutputRegister(target1));
1821 current_interpreter_frame_.set(target1, second_value);
1822
1823 // Make sure the lazy deopt info of this value, if any, is registered as
1824 // mutating these registers.
1825 DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1826 IsNodeCreatedForThisBytecode(value),
1827 value->lazy_deopt_info()->IsResultRegister(target0));
1828 DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1829 IsNodeCreatedForThisBytecode(value),
1830 value->lazy_deopt_info()->IsResultRegister(target1));
1831 }
1832
1833 std::pair<interpreter::Register, int> GetResultLocationAndSize() const;
1834#ifdef DEBUG
1835 bool HasOutputRegister(interpreter::Register reg) const;
1836#endif
1837
1838 DeoptFrame* GetCallerDeoptFrame();
1839 DeoptFrame* GetDeoptFrameForEagerCall(const MaglevCompilationUnit* unit,
1840 ValueNode* closure,
1842 DeoptFrame GetDeoptFrameForLazyDeopt(interpreter::Register result_location,
1843 int result_size);
1844 DeoptFrame GetDeoptFrameForLazyDeoptHelper(
1845 interpreter::Register result_location, int result_size,
1846 DeoptFrameScope* scope, bool mark_accumulator_dead);
1847 InterpretedDeoptFrame GetDeoptFrameForEntryStackCheck();
1848
1849 template <typename NodeT>
1851 if constexpr (NodeT::kProperties.can_read() ||
1852 NodeT::kProperties.can_deopt() ||
1853 NodeT::kProperties.can_throw()) {
1854 unobserved_context_slot_stores_.clear();
1855 }
1856
1857 if constexpr (Node::opcode_of<NodeT> != Opcode::kAllocationBlock &&
1858 (NodeT::kProperties.can_deopt() ||
1859 NodeT::kProperties.can_throw() ||
1860 NodeT::kProperties.can_allocate())) {
1861 ClearCurrentAllocationBlock();
1862 }
1863
1864 // Don't do anything for nodes without side effects.
1865 if constexpr (!NodeT::kProperties.can_write()) return;
1866
1867 if (v8_flags.maglev_cse) {
1868 known_node_aspects().increment_effect_epoch();
1869 }
1870
1871 // We only need to clear unstable node aspects on the current builder, not
1872 // the parent, since we'll anyway copy the known_node_aspects to the parent
1873 // once we finish the inlined function.
1874
1875 if constexpr (IsElementsArrayWrite(Node::opcode_of<NodeT>)) {
1876 node->ClearElementsProperties(known_node_aspects());
1877 if (is_loop_effect_tracking()) {
1878 loop_effects_->keys_cleared.insert(
1879 KnownNodeAspects::LoadedPropertyMapKey::Elements());
1880 }
1881 } else if constexpr (!IsSimpleFieldStore(Node::opcode_of<NodeT>) &&
1882 !IsTypedArrayStore(Node::opcode_of<NodeT>)) {
1883 // Don't change known node aspects for simple field stores. The only
1884 // relevant side effect on these is writes to objects which invalidate
1885 // loaded properties and context slots, and we invalidate these already as
1886 // part of emitting the store.
1887 node->ClearUnstableNodeAspects(known_node_aspects());
1888 if (is_loop_effect_tracking()) {
1889 loop_effects_->unstable_aspects_cleared = true;
1890 }
1891 }
1892
1893 // Simple field stores can't possibly change or migrate the map.
1894 static constexpr bool is_possible_map_change =
1895 !IsSimpleFieldStore(Node::opcode_of<NodeT>);
1896
1897 // All user-observable side effects need to clear state that is cached on
1898 // the builder. This reset has to be propagated up through the parents.
1899 // TODO(leszeks): What side effects aren't observable? Maybe migrations?
1900 ResetBuilderCachedState<is_possible_map_change>();
1901 }
1902
1903 template <bool is_possible_map_change = true>
1905 latest_checkpointed_frame_.reset();
1906
1907 // If a map might have changed, then we need to re-check it for for-in.
1908 // TODO(leszeks): Track this on merge states / known node aspects, rather
1909 // than on the graph, so that it can survive control flow.
1910 if constexpr (is_possible_map_change) {
1911 current_for_in_state.receiver_needs_map_check = true;
1912 }
1913 }
1914
1915 int next_offset() const {
1916 return iterator_.current_offset() + iterator_.current_bytecode_size();
1917 }
1919 return GetInLivenessFor(iterator_.current_offset());
1920 }
1922 return bytecode_analysis().GetInLivenessFor(offset);
1923 }
1925 return GetOutLivenessFor(iterator_.current_offset());
1926 }
1928 return bytecode_analysis().GetOutLivenessFor(offset);
1929 }
1930
1931 void StartNewBlock(int offset, BasicBlock* predecessor) {
1932 StartNewBlock(predecessor, merge_states_[offset], jump_targets_[offset]);
1933 }
1934
1935 void StartNewBlock(BasicBlock* predecessor,
1937 BasicBlockRef& refs_to_block) {
1939 current_block_ = zone()->New<BasicBlock>(merge_state, zone());
1940 if (merge_state == nullptr) {
1941 DCHECK_NOT_NULL(predecessor);
1942 current_block_->set_predecessor(predecessor);
1943 } else {
1945 }
1946 refs_to_block.Bind(current_block_);
1947 }
1948
1949 template <UseReprHintRecording hint = UseReprHintRecording::kRecord>
1952 if (repr == expected) return input;
1953 switch (expected) {
1954 case ValueRepresentation::kTagged:
1955 return GetTaggedValue(input, hint);
1956 case ValueRepresentation::kInt32:
1957 return GetInt32(input);
1958 case ValueRepresentation::kFloat64:
1959 case ValueRepresentation::kHoleyFloat64:
1960 return GetFloat64(input);
1961 case ValueRepresentation::kUint32:
1962 case ValueRepresentation::kIntPtr:
1963 // These conversion should be explicitly done beforehand.
1964 UNREACHABLE();
1965 }
1966 }
1967
1968 template <typename NodeT>
1970 // We do not record a Tagged use on Return, since they are never on the hot
1971 // path, and will lead to a maximum of one additional Tagging operation in
1972 // the worst case. This allows loop accumulator to be untagged even if they
1973 // are later returned.
1974 if constexpr (std::is_same_v<NodeT, Return>) {
1975 return UseReprHintRecording::kDoNotRecord;
1976 } else {
1977 return UseReprHintRecording::kRecord;
1978 }
1979 }
1980
1981 template <typename NodeT>
1982 void SetNodeInputs(NodeT* node, std::initializer_list<ValueNode*> inputs) {
1983 // Nodes with zero input count don't have kInputTypes defined.
1984 if constexpr (NodeT::kInputCount > 0) {
1985 constexpr UseReprHintRecording hint = ShouldRecordUseReprHint<NodeT>();
1986 int i = 0;
1987 for (ValueNode* input : inputs) {
1988 DCHECK_NOT_NULL(input);
1989 node->set_input(i, ConvertInputTo<hint>(input, NodeT::kInputTypes[i]));
1990 i++;
1991 }
1992 }
1993 }
1994
1996 ZoneVector<Node*>& nodes = current_block_->nodes();
1997 size_t old_size = nodes.size();
1998 nodes.resize(old_size + node_buffer().size());
1999 std::copy(node_buffer().begin(), node_buffer().end(),
2000 nodes.begin() + old_size);
2001 node_buffer().clear();
2002 }
2003
2004 template <typename ControlNodeT, typename... Args>
2005 BasicBlock* FinishBlock(std::initializer_list<ValueNode*> control_inputs,
2006 Args&&... args) {
2007 ControlNodeT* control_node = NodeBase::New<ControlNodeT>(
2008 zone(), control_inputs.size(), std::forward<Args>(args)...);
2009 SetNodeInputs(control_node, control_inputs);
2010 AttachEagerDeoptInfo(control_node);
2011 AttachDeoptCheckpoint(control_node);
2012 static_assert(!ControlNodeT::kProperties.can_lazy_deopt());
2013 static_assert(!ControlNodeT::kProperties.can_throw());
2014 static_assert(!ControlNodeT::kProperties.can_write());
2015 control_node->set_owner(current_block_);
2016 current_block_->set_control_node(control_node);
2017 // Clear unobserved context slot stores when there is any controlflow.
2018 // TODO(olivf): More precision could be achieved by tracking dominating
2019 // stores within known_node_aspects. For this we could use a stack of
2020 // stores, which we push on split and pop on merge.
2021 unobserved_context_slot_stores_.clear();
2022
2023 // TODO(olivf): Support allocation folding across control flow.
2024 ClearCurrentAllocationBlock();
2025
2026 BasicBlock* block = current_block_;
2027 FlushNodesToBlock();
2028 current_block_ = nullptr;
2029
2030 graph()->Add(block);
2031 if (has_graph_labeller()) {
2032 graph_labeller()->RegisterNode(control_node, compilation_unit_,
2033 BytecodeOffset(iterator_.current_offset()),
2034 current_source_position_);
2035 if (v8_flags.trace_maglev_graph_building) {
2036 bool kSkipTargets = true;
2037 std::cout << " " << control_node << " "
2038 << PrintNodeLabel(graph_labeller(), control_node) << ": "
2039 << PrintNode(graph_labeller(), control_node, kSkipTargets)
2040 << std::endl;
2041 }
2042 }
2043 return block;
2044 }
2045
2046 void StartFallthroughBlock(int next_block_offset, BasicBlock* predecessor) {
2047 // Start a new block for the fallthrough path, unless it's a merge point, in
2048 // which case we merge our state into it. That merge-point could also be a
2049 // loop header, in which case the merge state might not exist yet (if the
2050 // only predecessors are this path and the JumpLoop).
2052
2053 if (predecessor_count(next_block_offset) == 1) {
2054 if (v8_flags.trace_maglev_graph_building) {
2055 std::cout << "== New block (single fallthrough) at "
2056 << *compilation_unit_->shared_function_info().object()
2057 << "==" << std::endl;
2058 PrintVirtualObjects();
2059 }
2060 StartNewBlock(next_block_offset, predecessor);
2061 } else {
2062 MergeIntoFrameState(predecessor, next_block_offset);
2063 }
2064 }
2065
2067 if (maybe_value == nullptr) {
2068 return GetRootConstant(RootIndex::kUndefinedValue);
2069 }
2070 return maybe_value;
2071 }
2072
2073 ValueNode* GetConvertReceiver(compiler::SharedFunctionInfoRef shared,
2074 const CallArguments& args);
2075 base::Vector<ValueNode*> GetArgumentsAsArrayOfValueNodes(
2077
2078 compiler::OptionalHeapObjectRef TryGetConstant(
2079 ValueNode* node, ValueNode** constant_node = nullptr);
2080 std::optional<ValueNode*> TryGetConstantAlternative(ValueNode* node);
2081
2082 template <typename LoadNode>
2083 MaybeReduceResult TryBuildLoadDataView(const CallArguments& args,
2084 ExternalArrayType type);
2085 template <typename StoreNode, typename Function>
2086 MaybeReduceResult TryBuildStoreDataView(const CallArguments& args,
2087 ExternalArrayType type,
2088 Function&& getValue);
2089
2090#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
2091#define CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V) \
2092 V(GetContinuationPreservedEmbedderData) \
2093 V(SetContinuationPreservedEmbedderData)
2094#else
2095#define CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V)
2096#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
2097
2098#define MAGLEV_REDUCED_BUILTIN(V) \
2099 V(ArrayConstructor) \
2100 V(ArrayForEach) \
2101 V(ArrayIsArray) \
2102 V(ArrayIteratorPrototypeNext) \
2103 V(ArrayMap) \
2104 V(ArrayPrototypeEntries) \
2105 V(ArrayPrototypeKeys) \
2106 V(ArrayPrototypeValues) \
2107 V(ArrayPrototypePush) \
2108 V(ArrayPrototypePop) \
2109 V(DataViewPrototypeGetInt8) \
2110 V(DataViewPrototypeSetInt8) \
2111 V(DataViewPrototypeGetInt16) \
2112 V(DataViewPrototypeSetInt16) \
2113 V(DataViewPrototypeGetInt32) \
2114 V(DataViewPrototypeSetInt32) \
2115 V(DataViewPrototypeGetFloat64) \
2116 V(DataViewPrototypeSetFloat64) \
2117 V(FunctionPrototypeApply) \
2118 V(FunctionPrototypeCall) \
2119 V(FunctionPrototypeHasInstance) \
2120 V(MapPrototypeGet) \
2121 V(ObjectPrototypeGetProto) \
2122 V(ObjectGetPrototypeOf) \
2123 V(ReflectGetPrototypeOf) \
2124 V(ObjectPrototypeHasOwnProperty) \
2125 V(NumberParseInt) \
2126 V(MathCeil) \
2127 V(MathFloor) \
2128 V(MathPow) \
2129 V(MathAbs) \
2130 V(MathRound) \
2131 V(SetPrototypeHas) \
2132 V(StringConstructor) \
2133 V(StringFromCharCode) \
2134 V(StringPrototypeCharCodeAt) \
2135 V(StringPrototypeCodePointAt) \
2136 V(StringPrototypeIterator) \
2137 IF_INTL(V, StringPrototypeLocaleCompareIntl) \
2138 CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V) \
2139 IEEE_754_UNARY_LIST(V)
2140
2141#define DEFINE_BUILTIN_REDUCER(Name, ...) \
2142 MaybeReduceResult TryReduce##Name(compiler::JSFunctionRef target, \
2143 CallArguments& args);
2145#undef DEFINE_BUILTIN_REDUCER
2146
2147 using InitialCallback = std::function<ReduceResult(ValueNode*)>;
2148 using ProcessElementCallback = std::function<void(ValueNode*, ValueNode*)>;
2151 ValueNode*, ValueNode*)>;
2152
2153 // Used for reduding Array.prototype.forEach and Array.prototype.map.
2154 // initial_callback will be called to generate code before starting the
2155 // iteration, and process_element_callback will be called to generate code for
2156 // each result element.
2157 MaybeReduceResult TryReduceArrayIteratingBuiltin(
2158 const char* name, compiler::JSFunctionRef target, CallArguments& args,
2159 GetDeoptScopeCallback get_eager_deopt_scope,
2160 GetDeoptScopeCallback get_lazy_deopt_scope,
2161 const std::optional<InitialCallback>& initial_callback = {},
2162 const std::optional<ProcessElementCallback>& process_element_callback =
2163 {});
2164
2165 MaybeReduceResult TryReduceGetProto(ValueNode* node);
2166
2167 template <typename MapKindsT, typename IndexToElementsKindFunc,
2168 typename BuildKindSpecificFunc>
2169 MaybeReduceResult BuildJSArrayBuiltinMapSwitchOnElementsKind(
2170 ValueNode* receiver, const MapKindsT& map_kinds,
2171 MaglevSubGraphBuilder& sub_graph,
2172 std::optional<MaglevSubGraphBuilder::Label>& do_return,
2173 int unique_kind_count, IndexToElementsKindFunc&& index_to_elements_kind,
2174 BuildKindSpecificFunc&& build_kind_specific);
2175
2176 MaybeReduceResult DoTryReduceMathRound(CallArguments& args,
2177 Float64Round::Kind kind);
2178
2179 template <typename CallNode, typename... Args>
2180 CallNode* AddNewCallNode(const CallArguments& args, Args&&... extra_args);
2181
2182 MaybeReduceResult TryReduceGetIterator(ValueNode* receiver, int load_slot,
2183 int call_slot);
2184
2185 ValueNode* BuildCallSelf(ValueNode* context, ValueNode* function,
2186 ValueNode* new_target,
2188 CallArguments& args);
2189 MaybeReduceResult TryReduceBuiltin(
2191 CallArguments& args, const compiler::FeedbackSource& feedback_source);
2192 bool TargetIsCurrentCompilingUnit(compiler::JSFunctionRef target);
2193 CallKnownJSFunction* BuildCallKnownJSFunction(
2194 ValueNode* context, ValueNode* function, ValueNode* new_target,
2195#ifdef V8_ENABLE_LEAPTIERING
2196 JSDispatchHandle dispatch_handle,
2197#endif
2199 compiler::FeedbackCellRef feedback_cell, CallArguments& args,
2200 const compiler::FeedbackSource& feedback_source);
2201 CallKnownJSFunction* BuildCallKnownJSFunction(
2202 ValueNode* context, ValueNode* function, ValueNode* new_target,
2203#ifdef V8_ENABLE_LEAPTIERING
2204 JSDispatchHandle dispatch_handle,
2205#endif
2207 base::Vector<ValueNode*> arguments);
2208 MaybeReduceResult TryBuildCallKnownJSFunction(
2209 compiler::JSFunctionRef function, ValueNode* new_target,
2210 CallArguments& args, const compiler::FeedbackSource& feedback_source);
2211 MaybeReduceResult TryBuildCallKnownJSFunction(
2212 ValueNode* context, ValueNode* function, ValueNode* new_target,
2213#ifdef V8_ENABLE_LEAPTIERING
2214 JSDispatchHandle dispatch_handle,
2215#endif
2217 compiler::FeedbackCellRef feedback_cell, CallArguments& args,
2218 const compiler::FeedbackSource& feedback_source);
2219 bool CanInlineCall(compiler::SharedFunctionInfoRef shared,
2220 float call_frequency);
2221 bool ShouldEagerInlineCall(compiler::SharedFunctionInfoRef shared);
2222 ReduceResult BuildEagerInlineCall(ValueNode* context, ValueNode* function,
2223 ValueNode* new_target,
2225 compiler::FeedbackCellRef feedback_cell,
2226 CallArguments& args, float call_frequency);
2227 MaybeReduceResult TryBuildInlineCall(
2228 ValueNode* context, ValueNode* function, ValueNode* new_target,
2229#ifdef V8_ENABLE_LEAPTIERING
2230 JSDispatchHandle dispatch_handle,
2231#endif
2233 compiler::FeedbackCellRef feedback_cell, CallArguments& args,
2234 const compiler::FeedbackSource& feedback_source);
2235 ValueNode* BuildGenericCall(ValueNode* target, Call::TargetType target_type,
2236 const CallArguments& args);
2237 MaybeReduceResult TryReduceCallForConstant(
2238 compiler::JSFunctionRef target, CallArguments& args,
2239 const compiler::FeedbackSource& feedback_source =
2241 MaybeReduceResult TryReduceCallForTarget(
2242 ValueNode* target_node, compiler::JSFunctionRef target,
2243 CallArguments& args, const compiler::FeedbackSource& feedback_source);
2244 MaybeReduceResult TryReduceCallForNewClosure(
2245 ValueNode* target_node, ValueNode* target_context,
2246#ifdef V8_ENABLE_LEAPTIERING
2247 JSDispatchHandle dispatch_handle,
2248#endif
2250 compiler::FeedbackCellRef feedback_cell, CallArguments& args,
2251 const compiler::FeedbackSource& feedback_source);
2252 MaybeReduceResult TryBuildCallKnownApiFunction(
2254 CallArguments& args);
2255 compiler::HolderLookupResult TryInferApiHolderValue(
2256 compiler::FunctionTemplateInfoRef function_template_info,
2257 ValueNode* receiver);
2258 MaybeReduceResult TryReduceCallForApiFunction(
2260 compiler::OptionalSharedFunctionInfoRef maybe_shared,
2261 CallArguments& args);
2262 MaybeReduceResult TryReduceFunctionPrototypeApplyCallWithReceiver(
2263 compiler::OptionalHeapObjectRef maybe_receiver, CallArguments& args,
2264 const compiler::FeedbackSource& feedback_source);
2265 ReduceResult ReduceCallWithArrayLikeForArgumentsObject(
2266 ValueNode* target_node, CallArguments& args,
2267 VirtualObject* arguments_object,
2268 const compiler::FeedbackSource& feedback_source);
2269 ReduceResult ReduceCallWithArrayLike(
2270 ValueNode* target_node, CallArguments& args,
2271 const compiler::FeedbackSource& feedback_source);
2272 ReduceResult ReduceCall(ValueNode* target_node, CallArguments& args,
2273 const compiler::FeedbackSource& feedback_source =
2275 ReduceResult BuildCallWithFeedback(
2276 ValueNode* target_node, CallArguments& args,
2277 const compiler::FeedbackSource& feedback_source);
2278 ReduceResult BuildCallFromRegisterList(ConvertReceiverMode receiver_mode);
2279 ReduceResult BuildCallFromRegisters(int argc_count,
2280 ConvertReceiverMode receiver_mode);
2281
2282 ValueNode* BuildElementsArray(int length);
2283 ReduceResult BuildAndAllocateKeyValueArray(ValueNode* key, ValueNode* value);
2284 ReduceResult BuildAndAllocateJSArray(
2285 compiler::MapRef map, ValueNode* length, ValueNode* elements,
2286 const compiler::SlackTrackingPrediction& slack_tracking_prediction,
2287 AllocationType allocation_type);
2288 ValueNode* BuildAndAllocateJSArrayIterator(ValueNode* array,
2289 IterationKind iteration_kind);
2290
2291 MaybeReduceResult TryBuildAndAllocateJSGeneratorObject(ValueNode* closure,
2292 ValueNode* receiver);
2293
2294 ValueNode* BuildGenericConstruct(
2295 ValueNode* target, ValueNode* new_target, ValueNode* context,
2296 const CallArguments& args,
2297 const compiler::FeedbackSource& feedback_source =
2299
2300 MaybeReduceResult TryReduceConstructArrayConstructor(
2301 compiler::JSFunctionRef array_function, CallArguments& args,
2302 compiler::OptionalAllocationSiteRef maybe_allocation_site = {});
2303 MaybeReduceResult TryReduceConstructBuiltin(
2304 compiler::JSFunctionRef builtin,
2305 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
2306 CallArguments& args);
2307 MaybeReduceResult TryReduceConstructGeneric(
2308 compiler::JSFunctionRef function,
2309 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
2310 ValueNode* new_target, CallArguments& args,
2311 compiler::FeedbackSource& feedback_source);
2312 MaybeReduceResult TryReduceConstruct(
2313 compiler::HeapObjectRef feedback_target, ValueNode* target,
2314 ValueNode* new_target, CallArguments& args,
2315 compiler::FeedbackSource& feedback_source);
2316 ReduceResult BuildConstruct(ValueNode* target, ValueNode* new_target,
2317 CallArguments& args,
2318 compiler::FeedbackSource& feedback_source);
2319
2320 MaybeReduceResult TryBuildScriptContextStore(
2321 const compiler::GlobalAccessFeedback& global_access_feedback);
2322 MaybeReduceResult TryBuildPropertyCellStore(
2323 const compiler::GlobalAccessFeedback& global_access_feedback);
2324 MaybeReduceResult TryBuildGlobalStore(
2325 const compiler::GlobalAccessFeedback& global_access_feedback);
2326
2327 MaybeReduceResult TryBuildScriptContextConstantLoad(
2328 const compiler::GlobalAccessFeedback& global_access_feedback);
2329 MaybeReduceResult TryBuildScriptContextLoad(
2330 const compiler::GlobalAccessFeedback& global_access_feedback);
2331 MaybeReduceResult TryBuildPropertyCellLoad(
2332 const compiler::GlobalAccessFeedback& global_access_feedback);
2333 MaybeReduceResult TryBuildGlobalLoad(
2334 const compiler::GlobalAccessFeedback& global_access_feedback);
2335
2336 bool TryBuildFindNonDefaultConstructorOrConstruct(
2337 ValueNode* this_function, ValueNode* new_target,
2338 std::pair<interpreter::Register, interpreter::Register> result);
2339
2340 ValueNode* BuildSmiUntag(ValueNode* node);
2341 ValueNode* BuildNumberOrOddballToFloat64(
2342 ValueNode* node, NodeType allowed_input_type,
2343 TaggedToFloat64ConversionType conversion_type);
2344
2345 ReduceResult BuildCheckSmi(ValueNode* object, bool elidable = true);
2346 ReduceResult BuildCheckNumber(ValueNode* object);
2347 ReduceResult BuildCheckHeapObject(ValueNode* object);
2348 ReduceResult BuildCheckJSReceiver(ValueNode* object);
2349 ReduceResult BuildCheckJSReceiverOrNullOrUndefined(ValueNode* object);
2350 ReduceResult BuildCheckString(ValueNode* object);
2351 ReduceResult BuildCheckStringOrStringWrapper(ValueNode* object);
2352 ReduceResult BuildCheckSymbol(ValueNode* object);
2353 ReduceResult BuildCheckMaps(
2354 ValueNode* object, base::Vector<const compiler::MapRef> maps,
2355 std::optional<ValueNode*> map = {},
2356 bool has_deprecated_map_without_migration_target = false);
2357 ReduceResult BuildTransitionElementsKindOrCheckMap(
2358 ValueNode* heap_object, ValueNode* object_map,
2359 const ZoneVector<compiler::MapRef>& transition_sources,
2360 compiler::MapRef transition_target);
2361 ReduceResult BuildCompareMaps(
2362 ValueNode* heap_object, ValueNode* object_map,
2363 base::Vector<const compiler::MapRef> maps,
2364 MaglevSubGraphBuilder* sub_graph,
2365 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched);
2366 ReduceResult BuildTransitionElementsKindAndCompareMaps(
2367 ValueNode* heap_object, ValueNode* object_map,
2368 const ZoneVector<compiler::MapRef>& transition_sources,
2369 compiler::MapRef transition_target, MaglevSubGraphBuilder* sub_graph,
2370 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched);
2371 // Emits an unconditional deopt and returns false if the node is a constant
2372 // that doesn't match the ref.
2373 ReduceResult BuildCheckInternalizedStringValueOrByReference(
2374 ValueNode* node, compiler::HeapObjectRef ref, DeoptimizeReason reason);
2375 ReduceResult BuildCheckNumericalValueOrByReference(ValueNode* node,
2376 compiler::ObjectRef ref,
2377 DeoptimizeReason reason);
2378 ReduceResult BuildCheckValueByReference(ValueNode* node,
2379 compiler::HeapObjectRef ref,
2380 DeoptimizeReason reason);
2381 ReduceResult BuildCheckNumericalValue(ValueNode* node,
2382 compiler::ObjectRef ref,
2383 DeoptimizeReason reason);
2384
2385 ValueNode* BuildConvertHoleToUndefined(ValueNode* node);
2386 ReduceResult BuildCheckNotHole(ValueNode* node);
2387
2388 template <bool flip = false>
2389 ValueNode* BuildToBoolean(ValueNode* node);
2390 ValueNode* BuildLogicalNot(ValueNode* value);
2391 ValueNode* BuildTestUndetectable(ValueNode* value);
2392 ReduceResult BuildToNumberOrToNumeric(Object::Conversion mode);
2393
2394 enum class TrackObjectMode { kLoad, kStore };
2395 bool CanTrackObjectChanges(ValueNode* object, TrackObjectMode mode);
2396 bool CanElideWriteBarrier(ValueNode* object, ValueNode* value);
2397
2398 void BuildInitializeStore(InlinedAllocation* alloc, ValueNode* value,
2399 int offset);
2400 void TryBuildStoreTaggedFieldToAllocation(ValueNode* object, ValueNode* value,
2401 int offset);
2402 template <typename Instruction = LoadTaggedField, typename... Args>
2404 Args&&... args) {
2405 if (offset != HeapObject::kMapOffset &&
2406 CanTrackObjectChanges(object, TrackObjectMode::kLoad)) {
2407 VirtualObject* vobject =
2408 GetObjectFromAllocation(object->Cast<InlinedAllocation>());
2410 CHECK_NE(vobject->type(), VirtualObject::kHeapNumber);
2411 if (vobject->type() == VirtualObject::kDefault) {
2412 value = vobject->get(offset);
2413 } else {
2414 DCHECK_EQ(vobject->type(), VirtualObject::kFixedDoubleArray);
2415 // The only offset we're allowed to read from the a FixedDoubleArray as
2416 // tagged field is the length.
2418 value = GetInt32Constant(vobject->double_elements_length());
2419 }
2420 if (v8_flags.trace_maglev_object_tracking) {
2421 std::cout << " * Reusing value in virtual object "
2422 << PrintNodeLabel(graph_labeller(), vobject) << "[" << offset
2423 << "]: " << PrintNode(graph_labeller(), value) << std::endl;
2424 }
2425 return value;
2426 }
2427 return AddNewNode<Instruction>({object}, offset,
2428 std::forward<Args>(args)...);
2429 }
2430
2431 Node* BuildStoreTaggedField(ValueNode* object, ValueNode* value, int offset,
2432 StoreTaggedMode store_mode);
2433 void BuildStoreTaggedFieldNoWriteBarrier(ValueNode* object, ValueNode* value,
2434 int offset,
2435 StoreTaggedMode store_mode);
2436 void BuildStoreTrustedPointerField(ValueNode* object, ValueNode* value,
2437 int offset, IndirectPointerTag tag,
2438 StoreTaggedMode store_mode);
2439
2440 ValueNode* BuildLoadFixedArrayElement(ValueNode* elements, int index);
2441 ValueNode* BuildLoadFixedArrayElement(ValueNode* elements, ValueNode* index);
2442 void BuildStoreFixedArrayElement(ValueNode* elements, ValueNode* index,
2443 ValueNode* value);
2444
2445 ValueNode* BuildLoadFixedDoubleArrayElement(ValueNode* elements, int index);
2446 ValueNode* BuildLoadFixedDoubleArrayElement(ValueNode* elements,
2447 ValueNode* index);
2448 void BuildStoreFixedDoubleArrayElement(ValueNode* elements, ValueNode* index,
2449 ValueNode* value);
2450
2451 ValueNode* BuildLoadHoleyFixedDoubleArrayElement(ValueNode* elements,
2452 ValueNode* index,
2453 bool convert_hole);
2454
2456 ValueNode* index_object = current_interpreter_frame_.get(reg);
2457 return GetInt32ElementIndex(index_object);
2458 }
2459 ValueNode* GetInt32ElementIndex(ValueNode* index_object);
2460
2462 ValueNode* index_object = current_interpreter_frame_.get(reg);
2463 return GetUint32ElementIndex(index_object);
2464 }
2465 ReduceResult GetUint32ElementIndex(ValueNode* index_object);
2466
2467 bool CanTreatHoleAsUndefined(
2468 base::Vector<const compiler::MapRef> const& receiver_maps);
2469
2470 compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant(
2471 compiler::PropertyAccessInfo const& access_info);
2472 compiler::OptionalJSObjectRef TryGetConstantDataFieldHolder(
2473 compiler::PropertyAccessInfo const& access_info,
2474 ValueNode* lookup_start_object);
2475 compiler::OptionalObjectRef TryFoldLoadConstantDataField(
2476 compiler::JSObjectRef holder,
2477 compiler::PropertyAccessInfo const& access_info);
2478 std::optional<Float64> TryFoldLoadConstantDoubleField(
2479 compiler::JSObjectRef holder,
2480 compiler::PropertyAccessInfo const& access_info);
2481
2482 // Returns the loaded value node but doesn't update the accumulator yet.
2483 ValueNode* BuildLoadField(compiler::PropertyAccessInfo const& access_info,
2484 ValueNode* lookup_start_object,
2485 compiler::NameRef name);
2486 MaybeReduceResult TryBuildStoreField(
2487 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
2488 compiler::AccessMode access_mode);
2489 MaybeReduceResult TryBuildPropertyGetterCall(
2490 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
2491 ValueNode* lookup_start_object);
2492 MaybeReduceResult TryBuildPropertySetterCall(
2493 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
2494 ValueNode* lookup_start_object, ValueNode* value);
2495 MaybeReduceResult TryBuildGetKeyedPropertyWithEnumeratedKey(
2496 ValueNode* object, const compiler::FeedbackSource& feedback_source,
2497 const compiler::ProcessedFeedback& processed_feedback);
2498 ReduceResult BuildGetKeyedProperty(
2499 ValueNode* object, const compiler::FeedbackSource& feedback_source,
2500 const compiler::ProcessedFeedback& processed_feedback);
2501
2502 ValueNode* BuildLoadFixedArrayLength(ValueNode* fixed_array);
2503 ValueNode* BuildLoadJSArrayLength(ValueNode* js_array,
2504 NodeType length_type = NodeType::kSmi);
2505 ValueNode* BuildLoadElements(ValueNode* object);
2506
2507 MaybeReduceResult TryBuildCheckInt32Condition(
2509 DeoptimizeReason reason, bool allow_unconditional_deopt = true);
2510
2511 MaybeReduceResult TryBuildPropertyLoad(
2512 ValueNode* receiver, ValueNode* lookup_start_object,
2513 compiler::NameRef name, compiler::PropertyAccessInfo const& access_info);
2514 MaybeReduceResult TryBuildPropertyStore(
2515 ValueNode* receiver, ValueNode* lookup_start_object,
2516 compiler::NameRef name, compiler::PropertyAccessInfo const& access_info,
2517 compiler::AccessMode access_mode);
2518 MaybeReduceResult TryBuildPropertyAccess(
2519 ValueNode* receiver, ValueNode* lookup_start_object,
2520 compiler::NameRef name, compiler::PropertyAccessInfo const& access_info,
2521 compiler::AccessMode access_mode);
2522 template <typename GenericAccessFunc>
2523 MaybeReduceResult TryBuildNamedAccess(
2524 ValueNode* receiver, ValueNode* lookup_start_object,
2525 compiler::NamedAccessFeedback const& feedback,
2526 compiler::FeedbackSource const& feedback_source,
2527 compiler::AccessMode access_mode,
2528 GenericAccessFunc&& build_generic_access);
2529
2530 template <typename GenericAccessFunc>
2531 MaybeReduceResult TryBuildLoadNamedProperty(
2532 ValueNode* receiver, ValueNode* lookup_start_object,
2533 compiler::NameRef name, compiler::FeedbackSource& feedback_source,
2534 GenericAccessFunc&& build_generic_access);
2535 MaybeReduceResult TryBuildLoadNamedProperty(
2537 compiler::FeedbackSource& feedback_source);
2538
2539 ReduceResult BuildLoadTypedArrayLength(ValueNode* object,
2540 ElementsKind elements_kind);
2541 ValueNode* BuildLoadTypedArrayElement(ValueNode* object, ValueNode* index,
2542 ElementsKind elements_kind);
2543 void BuildStoreTypedArrayElement(ValueNode* object, ValueNode* index,
2544 ElementsKind elements_kind);
2545
2546 MaybeReduceResult TryBuildElementAccessOnString(
2547 ValueNode* object, ValueNode* index,
2548 compiler::KeyedAccessMode const& keyed_mode);
2549 MaybeReduceResult TryBuildElementAccessOnTypedArray(
2550 ValueNode* object, ValueNode* index,
2551 const compiler::ElementAccessInfo& access_info,
2552 compiler::KeyedAccessMode const& keyed_mode);
2553 MaybeReduceResult TryBuildElementLoadOnJSArrayOrJSObject(
2554 ValueNode* object, ValueNode* index,
2556 KeyedAccessLoadMode load_mode);
2557 MaybeReduceResult TryBuildElementStoreOnJSArrayOrJSObject(
2558 ValueNode* object, ValueNode* index_object, ValueNode* value,
2560 const compiler::KeyedAccessMode& keyed_mode);
2561 MaybeReduceResult TryBuildElementAccessOnJSArrayOrJSObject(
2562 ValueNode* object, ValueNode* index,
2563 const compiler::ElementAccessInfo& access_info,
2564 compiler::KeyedAccessMode const& keyed_mode);
2565 template <typename GenericAccessFunc>
2566 MaybeReduceResult TryBuildElementAccess(
2567 ValueNode* object, ValueNode* index,
2568 compiler::ElementAccessFeedback const& feedback,
2569 compiler::FeedbackSource const& feedback_source,
2570 GenericAccessFunc&& build_generic_access);
2571 template <typename GenericAccessFunc>
2572 MaybeReduceResult TryBuildPolymorphicElementAccess(
2573 ValueNode* object, ValueNode* index,
2574 const compiler::KeyedAccessMode& keyed_mode,
2575 const ZoneVector<compiler::ElementAccessInfo>& access_infos,
2576 GenericAccessFunc&& build_generic_access);
2577 template <typename GenericAccessFunc>
2578 MaybeReduceResult TryBuildPolymorphicPropertyAccess(
2579 ValueNode* receiver, ValueNode* lookup_start_object,
2580 compiler::NamedAccessFeedback const& feedback,
2581 compiler::AccessMode access_mode,
2582 const ZoneVector<compiler::PropertyAccessInfo>& access_infos,
2583 GenericAccessFunc&& build_generic_access);
2584
2585 // Load elimination -- when loading or storing a simple property without
2586 // side effects, record its value, and allow that value to be reused on
2587 // subsequent loads.
2588 void RecordKnownProperty(ValueNode* lookup_start_object,
2590 ValueNode* value, bool is_const,
2591 compiler::AccessMode access_mode);
2592 MaybeReduceResult TryReuseKnownPropertyLoad(ValueNode* lookup_start_object,
2593 compiler::NameRef name);
2594 ValueNode* BuildLoadStringLength(ValueNode* string);
2595
2596 // Converts the input node to a representation that's valid to store into an
2597 // array with elements kind |kind|.
2598 ReduceResult ConvertForStoring(ValueNode* node, ElementsKind kind);
2599
2605 InferHasInPrototypeChainResult InferHasInPrototypeChain(
2607 MaybeReduceResult TryBuildFastHasInPrototypeChain(
2608 ValueNode* object, compiler::HeapObjectRef prototype);
2609 ReduceResult BuildHasInPrototypeChain(ValueNode* object,
2610 compiler::HeapObjectRef prototype);
2611 MaybeReduceResult TryBuildFastOrdinaryHasInstance(
2612 ValueNode* object, compiler::JSObjectRef callable,
2613 ValueNode* callable_node);
2614 ReduceResult BuildOrdinaryHasInstance(ValueNode* object,
2615 compiler::JSObjectRef callable,
2616 ValueNode* callable_node);
2617 MaybeReduceResult TryBuildFastInstanceOf(ValueNode* object,
2618 compiler::JSObjectRef callable_ref,
2619 ValueNode* callable_node);
2620 MaybeReduceResult TryBuildFastInstanceOfWithFeedback(
2621 ValueNode* object, ValueNode* callable,
2622 compiler::FeedbackSource feedback_source);
2623
2624 VirtualObject* GetObjectFromAllocation(InlinedAllocation* allocation);
2625 VirtualObject* GetModifiableObjectFromAllocation(
2626 InlinedAllocation* allocation);
2627
2628 VirtualObject* DeepCopyVirtualObject(VirtualObject* vobj);
2629 VirtualObject* CreateVirtualObject(compiler::MapRef map,
2630 uint32_t slot_count_including_map);
2631 VirtualObject* CreateHeapNumber(Float64 value);
2632 VirtualObject* CreateDoubleFixedArray(uint32_t elements_length,
2634 VirtualObject* CreateJSObject(compiler::MapRef map);
2635 VirtualObject* CreateConsString(ValueNode* map, ValueNode* length,
2636 ValueNode* first, ValueNode* second);
2637 ReduceResult CreateJSArray(compiler::MapRef map, int instance_size,
2638 ValueNode* length);
2639 VirtualObject* CreateJSArrayIterator(compiler::MapRef map,
2640 ValueNode* iterated_object,
2642 VirtualObject* CreateJSConstructor(compiler::JSFunctionRef constructor);
2643 VirtualObject* CreateFixedArray(compiler::MapRef map, int length);
2644 VirtualObject* CreateContext(compiler::MapRef map, int length,
2645 compiler::ScopeInfoRef scope_info,
2646 ValueNode* previous_context,
2647 std::optional<ValueNode*> extension = {});
2648 VirtualObject* CreateArgumentsObject(compiler::MapRef map, ValueNode* length,
2649 ValueNode* elements,
2650 std::optional<ValueNode*> callee = {});
2651 VirtualObject* CreateMappedArgumentsElements(compiler::MapRef map,
2652 int mapped_count,
2653 ValueNode* context,
2654 ValueNode* unmapped_elements);
2655 VirtualObject* CreateRegExpLiteralObject(
2656 compiler::MapRef map, compiler::RegExpBoilerplateDescriptionRef literal);
2657 VirtualObject* CreateJSGeneratorObject(compiler::MapRef map,
2658 int instance_size, ValueNode* context,
2659 ValueNode* closure,
2660 ValueNode* receiver,
2661 ValueNode* register_file);
2662 VirtualObject* CreateJSIteratorResult(compiler::MapRef map, ValueNode* value,
2663 ValueNode* done);
2664 VirtualObject* CreateJSStringIterator(compiler::MapRef map,
2665 ValueNode* string);
2666
2667 InlinedAllocation* ExtendOrReallocateCurrentAllocationBlock(
2668 AllocationType allocation_type, VirtualObject* value);
2669
2670 void ClearCurrentAllocationBlock();
2671
2672 inline void AddDeoptUse(ValueNode* node) {
2673 if (node == nullptr) return;
2674 DCHECK(!node->Is<VirtualObject>());
2675 if (InlinedAllocation* alloc = node->TryCast<InlinedAllocation>()) {
2676 VirtualObject* vobject =
2677 current_interpreter_frame_.virtual_objects().FindAllocatedWith(alloc);
2678 if (vobject) {
2679 CHECK_NOT_NULL(vobject);
2680 AddDeoptUse(vobject);
2681 // Add an escaping use for the allocation.
2682 AddNonEscapingUses(alloc, 1);
2683 } else {
2684 DCHECK(alloc->is_returned_value_from_inline_call());
2685 }
2686 alloc->add_use();
2687 } else {
2688 node->add_use();
2689 }
2690 }
2691 void AddDeoptUse(VirtualObject* alloc);
2692 void AddNonEscapingUses(InlinedAllocation* allocation, int use_count);
2693
2694 std::optional<VirtualObject*> TryGetNonEscapingArgumentsObject(
2695 ValueNode* value);
2696
2697 MaybeReduceResult TryBuildFastCreateObjectOrArrayLiteral(
2698 const compiler::LiteralFeedback& feedback);
2699 std::optional<VirtualObject*> TryReadBoilerplateForFastLiteral(
2700 compiler::JSObjectRef boilerplate, AllocationType allocation,
2701 int max_depth, int* max_properties);
2702
2703 InlinedAllocation* BuildInlinedAllocationForConsString(
2704 VirtualObject* object, AllocationType allocation);
2705 InlinedAllocation* BuildInlinedAllocationForHeapNumber(
2706 VirtualObject* object, AllocationType allocation);
2707 InlinedAllocation* BuildInlinedAllocationForDoubleFixedArray(
2708 VirtualObject* object, AllocationType allocation);
2709 InlinedAllocation* BuildInlinedAllocation(VirtualObject* object,
2710 AllocationType allocation);
2711 ValueNode* BuildInlinedArgumentsElements(int start_index, int length);
2712 ValueNode* BuildInlinedUnmappedArgumentsElements(int mapped_count);
2713
2714 template <CreateArgumentsType type>
2715 VirtualObject* BuildVirtualArgumentsObject();
2716 template <CreateArgumentsType type>
2717 ValueNode* BuildAndAllocateArgumentsObject();
2718
2719 bool CanAllocateSloppyArgumentElements();
2720 bool CanAllocateInlinedArgumentElements();
2721
2722 MaybeReduceResult TryBuildInlinedAllocatedContext(
2723 compiler::MapRef map, compiler::ScopeInfoRef scope, int context_length);
2724
2725 template <Operation kOperation>
2726 void BuildGenericUnaryOperationNode();
2727 template <Operation kOperation>
2728 void BuildGenericBinaryOperationNode();
2729 template <Operation kOperation>
2730 void BuildGenericBinarySmiOperationNode();
2731
2732 template <Operation kOperation>
2733 bool TryReduceCompareEqualAgainstConstant();
2734
2735 template <Operation kOperation>
2736 MaybeReduceResult TryFoldInt32UnaryOperation(ValueNode* value);
2737 template <Operation kOperation>
2738 MaybeReduceResult TryFoldInt32BinaryOperation(ValueNode* left,
2739 ValueNode* right);
2740 template <Operation kOperation>
2741 MaybeReduceResult TryFoldInt32BinaryOperation(ValueNode* left,
2742 int32_t cst_right);
2743
2744 template <Operation kOperation>
2745 ReduceResult BuildInt32UnaryOperationNode();
2746 ReduceResult BuildTruncatingInt32BitwiseNotForToNumber(
2747 NodeType allowed_input_type,
2748 TaggedToFloat64ConversionType conversion_type);
2749 template <Operation kOperation>
2750 ReduceResult BuildInt32BinaryOperationNode();
2751 template <Operation kOperation>
2752 ReduceResult BuildInt32BinarySmiOperationNode();
2753 template <Operation kOperation>
2754 ReduceResult BuildTruncatingInt32BinaryOperationNodeForToNumber(
2755 NodeType allowed_input_type,
2756 TaggedToFloat64ConversionType conversion_type);
2757 template <Operation kOperation>
2758 ReduceResult BuildTruncatingInt32BinarySmiOperationNodeForToNumber(
2759 NodeType allowed_input_type,
2760 TaggedToFloat64ConversionType conversion_type);
2761
2762 template <Operation kOperation>
2763 MaybeReduceResult TryFoldFloat64UnaryOperationForToNumber(
2764 TaggedToFloat64ConversionType conversion_type, ValueNode* value);
2765 template <Operation kOperation>
2766 MaybeReduceResult TryFoldFloat64BinaryOperationForToNumber(
2767 TaggedToFloat64ConversionType conversion_type, ValueNode* left,
2768 ValueNode* right);
2769 template <Operation kOperation>
2770 MaybeReduceResult TryFoldFloat64BinaryOperationForToNumber(
2771 TaggedToFloat64ConversionType conversion_type, ValueNode* left,
2772 double cst_right);
2773
2774 template <Operation kOperation>
2775 ReduceResult BuildFloat64UnaryOperationNodeForToNumber(
2776 NodeType allowed_input_type,
2777 TaggedToFloat64ConversionType conversion_type);
2778 template <Operation kOperation>
2779 ReduceResult BuildFloat64BinaryOperationNodeForToNumber(
2780 NodeType allowed_input_type,
2781 TaggedToFloat64ConversionType conversion_type);
2782 template <Operation kOperation>
2783 ReduceResult BuildFloat64BinarySmiOperationNodeForToNumber(
2784 NodeType allowed_input_type,
2785 TaggedToFloat64ConversionType conversion_type);
2786
2787 template <Operation kOperation>
2788 ReduceResult VisitUnaryOperation();
2789 template <Operation kOperation>
2790 ReduceResult VisitBinaryOperation();
2791 template <Operation kOperation>
2792 ReduceResult VisitBinarySmiOperation();
2793
2794 ValueNode* BuildUnwrapThinString(ValueNode* input);
2795 ValueNode* BuildUnwrapStringWrapper(ValueNode* input);
2796 ReduceResult BuildStringConcat(ValueNode* left, ValueNode* right);
2797 ValueNode* BuildNewConsStringMap(ValueNode* left, ValueNode* right);
2798 size_t StringLengthStaticLowerBound(ValueNode* string, int max_depth = 2);
2799 MaybeReduceResult TryBuildNewConsString(
2800 ValueNode* left, ValueNode* right,
2801 AllocationType allocation_type = AllocationType::kYoung);
2802
2803 template <Operation kOperation>
2804 ReduceResult VisitCompareOperation();
2805
2807 template <typename Function>
2808 MaybeReduceResult TryReduceTypeOf(ValueNode* value,
2809 const Function& GetResult);
2810 MaybeReduceResult TryReduceTypeOf(ValueNode* value);
2811
2812 void BeginLoopEffects(int loop_header);
2813 void EndLoopEffects(int loop_header);
2814 void MergeIntoFrameState(BasicBlock* block, int target);
2815 void MergeDeadIntoFrameState(int target);
2816 void MergeDeadLoopIntoFrameState(int target);
2817 void MergeIntoInlinedReturnFrameState(BasicBlock* block);
2818
2819 bool HasValidInitialMap(compiler::JSFunctionRef new_target,
2820 compiler::JSFunctionRef constructor);
2821
2822 ValueNode* BuildTaggedEqual(ValueNode* lhs, ValueNode* rhs);
2823 ValueNode* BuildTaggedEqual(ValueNode* lhs, RootIndex rhs_index);
2824
2825 class BranchBuilder;
2826
2827 enum class BranchType { kBranchIfTrue, kBranchIfFalse };
2828 enum class BranchSpecializationMode { kDefault, kAlwaysBoolean };
2829 enum class BranchResult {
2830 kDefault,
2831 kAlwaysTrue,
2832 kAlwaysFalse,
2833 };
2834
2835 static inline BranchType NegateBranchType(BranchType jump_type) {
2836 switch (jump_type) {
2837 case BranchType::kBranchIfTrue:
2838 return BranchType::kBranchIfFalse;
2839 case BranchType::kBranchIfFalse:
2840 return BranchType::kBranchIfTrue;
2841 }
2842 }
2843
2844 // This class encapsulates the logic of branch nodes (using the graph builder
2845 // or the sub graph builder).
2847 public:
2852
2854 public:
2856 RootIndex root_index)
2857 : builder_(builder),
2858 node_(node),
2859 root_index_(root_index),
2860 jump_type_(builder.GetCurrentBranchType()) {
2861 if (builder.mode() == kBytecodeJumpTarget) {
2862 builder_.data_.bytecode_target.patch_accumulator_scope = this;
2863 }
2864 }
2865
2867 builder_.data_.bytecode_target.patch_accumulator_scope = nullptr;
2868 }
2869
2870 private:
2875
2876 friend class BranchBuilder;
2877 };
2878
2880 BytecodeJumpTarget(int jump_target_offset, int fallthrough_offset)
2881 : jump_target_offset(jump_target_offset),
2882 fallthrough_offset(fallthrough_offset),
2883 patch_accumulator_scope(nullptr) {}
2887 };
2888
2891 : jump_label(jump_label), fallthrough() {}
2894 };
2895
2896 union Data {
2897 Data(int jump_target_offset, int fallthrough_offset)
2898 : bytecode_target(jump_target_offset, fallthrough_offset) {}
2900 : label_target(jump_label) {}
2903 };
2904
2905 // Creates a branch builder for bytecode offsets.
2907 : builder_(builder),
2908 sub_builder_(nullptr),
2909 jump_type_(jump_type),
2910 data_(builder->iterator_.GetJumpTargetOffset(),
2911 builder->iterator_.next_offset()) {}
2912
2913 // Creates a branch builder for subgraph label.
2915 MaglevSubGraphBuilder* sub_builder, BranchType jump_type,
2916 MaglevSubGraphBuilder::Label* jump_label)
2917 : builder_(builder),
2918 sub_builder_(sub_builder),
2919 jump_type_(jump_type),
2920 data_(jump_label) {}
2921
2922 Mode mode() const {
2923 return sub_builder_ == nullptr ? kBytecodeJumpTarget : kLabelJumpTarget;
2924 }
2925
2926 BranchType GetCurrentBranchType() const { return jump_type_; }
2927
2929 branch_specialization_mode_ = mode;
2930 }
2931 void SwapTargets() { jump_type_ = NegateBranchType(jump_type_); }
2932
2933 BasicBlockRef* jump_target();
2934 BasicBlockRef* fallthrough();
2935 BasicBlockRef* true_target();
2936 BasicBlockRef* false_target();
2937
2938 BranchResult FromBool(bool value) const;
2939 BranchResult AlwaysTrue() const { return FromBool(true); }
2940 BranchResult AlwaysFalse() const { return FromBool(false); }
2941
2942 template <typename NodeT, typename... Args>
2943 BranchResult Build(std::initializer_list<ValueNode*> inputs,
2944 Args&&... args);
2945
2946 private:
2950 BranchSpecializationMode branch_specialization_mode_ =
2951 BranchSpecializationMode::kDefault;
2953
2954 void StartFallthroughBlock(BasicBlock* predecessor);
2955 void SetAccumulatorInBranch(BranchType jump_type) const;
2956 };
2957
2959 BranchType jump_type = BranchType::kBranchIfTrue) {
2960 return BranchBuilder(this, jump_type);
2961 }
2964 BranchType jump_type = BranchType::kBranchIfTrue) {
2965 return BranchBuilder(this, subgraph, jump_type, jump_label);
2966 }
2967
2968 BranchResult BuildBranchIfRootConstant(BranchBuilder& builder,
2969 ValueNode* node, RootIndex root_index);
2970 BranchResult BuildBranchIfToBooleanTrue(BranchBuilder& builder,
2971 ValueNode* node);
2972 BranchResult BuildBranchIfInt32ToBooleanTrue(BranchBuilder& builder,
2973 ValueNode* node);
2974 BranchResult BuildBranchIfIntPtrToBooleanTrue(BranchBuilder& builder,
2975 ValueNode* node);
2976 BranchResult BuildBranchIfFloat64ToBooleanTrue(BranchBuilder& builder,
2977 ValueNode* node);
2978 BranchResult BuildBranchIfFloat64IsHole(BranchBuilder& builder,
2979 ValueNode* node);
2980 BranchResult BuildBranchIfReferenceEqual(BranchBuilder& builder,
2981 ValueNode* lhs, ValueNode* rhs);
2982 BranchResult BuildBranchIfInt32Compare(BranchBuilder& builder, Operation op,
2983 ValueNode* lhs, ValueNode* rhs);
2984 BranchResult BuildBranchIfUint32Compare(BranchBuilder& builder, Operation op,
2985 ValueNode* lhs, ValueNode* rhs);
2986 BranchResult BuildBranchIfUndefinedOrNull(BranchBuilder& builder,
2987 ValueNode* node);
2988 BranchResult BuildBranchIfUndetectable(BranchBuilder& builder,
2989 ValueNode* value);
2990 BranchResult BuildBranchIfJSReceiver(BranchBuilder& builder,
2991 ValueNode* value);
2992
2993 BranchResult BuildBranchIfTrue(BranchBuilder& builder, ValueNode* node);
2994 BranchResult BuildBranchIfNull(BranchBuilder& builder, ValueNode* node);
2995 BranchResult BuildBranchIfUndefined(BranchBuilder& builder, ValueNode* node);
2997 BasicBlockRef* true_target,
2998 BasicBlockRef* false_target);
2999
3000 template <typename FCond, typename FTrue, typename FFalse>
3001 ValueNode* Select(FCond cond, FTrue if_true, FFalse if_false);
3002
3003 template <typename FCond, typename FTrue, typename FFalse>
3004 MaybeReduceResult SelectReduction(FCond cond, FTrue if_true, FFalse if_false);
3005
3006 void MarkBranchDeadAndJumpIfNeeded(bool is_jump_taken);
3007
3009 // Add 1 after the end of the bytecode so we can always write to the offset
3010 // after the last bytecode.
3011 uint32_t array_length = bytecode().length() + 1;
3012 predecessor_count_ = zone()->AllocateArray<uint32_t>(array_length);
3013 MemsetUint32(predecessor_count_, 0, entrypoint_);
3014 MemsetUint32(predecessor_count_ + entrypoint_, 1,
3015 array_length - entrypoint_);
3016
3017 const int max_peelings = v8_flags.maglev_optimistic_peeled_loops ? 2 : 1;
3018 // We count jumps from peeled loops to outside of the loop twice.
3019 bool is_loop_peeling_iteration = false;
3020 std::optional<int> peeled_loop_end;
3021 interpreter::BytecodeArrayIterator iterator(bytecode().object());
3022 for (iterator.AdvanceTo(entrypoint_); !iterator.done();
3023 iterator.Advance()) {
3024 interpreter::Bytecode bytecode = iterator.current_bytecode();
3025 if (allow_loop_peeling_ &&
3026 bytecode_analysis().IsLoopHeader(iterator.current_offset())) {
3027 const compiler::LoopInfo& loop_info =
3028 bytecode_analysis().GetLoopInfoFor(iterator.current_offset());
3029 // Generators use irreducible control flow, which makes loop peeling too
3030 // complicated.
3031 int size = loop_info.loop_end() - loop_info.loop_start();
3032 if (loop_info.innermost() && !loop_info.resumable() &&
3033 iterator.next_offset() < loop_info.loop_end() &&
3034 size < v8_flags.maglev_loop_peeling_max_size &&
3035 size + graph_->total_peeled_bytecode_size() <
3036 v8_flags.maglev_loop_peeling_max_size_cumulative) {
3037 DCHECK(!is_loop_peeling_iteration);
3038 graph_->add_peeled_bytecode_size(size);
3039 is_loop_peeling_iteration = true;
3040 loop_headers_to_peel_.Add(iterator.current_offset());
3041 peeled_loop_end = bytecode_analysis().GetLoopEndOffsetForInnermost(
3042 iterator.current_offset());
3043 }
3044 }
3045 if (interpreter::Bytecodes::IsJump(bytecode)) {
3046 if (is_loop_peeling_iteration &&
3047 bytecode == interpreter::Bytecode::kJumpLoop) {
3048 DCHECK_EQ(iterator.next_offset(), peeled_loop_end);
3049 is_loop_peeling_iteration = false;
3050 peeled_loop_end = {};
3051 }
3052 if (iterator.GetJumpTargetOffset() < entrypoint_) {
3053 static_assert(kLoopsMustBeEnteredThroughHeader);
3054 if (predecessor_count(iterator.GetJumpTargetOffset()) == 1) {
3055 // We encountered a JumpLoop whose loop header is not reachable
3056 // otherwise. This loop is either dead or the JumpLoop will bail
3057 // with DeoptimizeReason::kOSREarlyExit.
3058 InitializePredecessorCount(iterator.GetJumpTargetOffset(), 0);
3059 }
3060 } else {
3061 UpdatePredecessorCount(iterator.GetJumpTargetOffset(), 1);
3062 }
3063 if (is_loop_peeling_iteration &&
3064 iterator.GetJumpTargetOffset() >= *peeled_loop_end) {
3065 // Jumps from within the peeled loop to outside need to be counted
3066 // twice, once for the peeled and once for the regular loop body.
3067 UpdatePredecessorCount(iterator.GetJumpTargetOffset(), max_peelings);
3068 }
3069 if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
3070 UpdatePredecessorCount(iterator.next_offset(), -1);
3071 }
3072 } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
3073 for (auto offset : iterator.GetJumpTableTargetOffsets()) {
3074 UpdatePredecessorCount(offset.target_offset, 1);
3075 }
3076 } else if (interpreter::Bytecodes::Returns(bytecode) ||
3077 interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
3078 UpdatePredecessorCount(iterator.next_offset(), -1);
3079 // Collect inline return jumps in the slot after the last bytecode.
3080 if (is_inline() && interpreter::Bytecodes::Returns(bytecode)) {
3081 UpdatePredecessorCount(array_length - 1, 1);
3082 if (is_loop_peeling_iteration) {
3083 UpdatePredecessorCount(array_length - 1, max_peelings);
3084 }
3085 }
3086 }
3087 // TODO(leszeks): Also consider handler entries (the bytecode analysis)
3088 // will do this automatically I guess if we merge this into that.
3089 }
3090 if (!is_inline()) {
3091 DCHECK_EQ(0, predecessor_count(bytecode().length()));
3092 }
3093 }
3094
3096 return compilation_unit_->feedback();
3097 }
3098 const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const {
3099 return FeedbackNexus(feedback().object(),
3100 GetSlotOperand(slot_operand_index),
3101 broker()->feedback_nexus_config());
3102 }
3104 return FeedbackNexus(feedback().object(), slot,
3105 broker()->feedback_nexus_config());
3106 }
3108 return compilation_unit_->bytecode();
3109 }
3111 return bytecode_analysis_;
3112 }
3113 int parameter_count() const { return compilation_unit_->parameter_count(); }
3115 int register_count() const { return compilation_unit_->register_count(); }
3117 return *current_interpreter_frame_.known_node_aspects();
3118 }
3119
3121 if (!is_inline()) return 1.0f;
3122 return caller_details_->call_frequency;
3123 }
3124
3125 int argument_count() const {
3126 DCHECK(is_inline());
3127 return static_cast<int>(caller_details_->arguments.size());
3128 }
3129 int argument_count_without_receiver() const { return argument_count() - 1; }
3130
3131 bool IsInsideLoop() const {
3132 if (is_inline() && caller_details()->is_inside_loop) return true;
3133 int loop_header_offset =
3134 bytecode_analysis().GetLoopOffsetFor(iterator_.current_offset());
3135 if (loop_header_offset != -1) {
3136 const compiler::LoopInfo& loop_info =
3137 bytecode_analysis().GetLoopInfoFor(loop_header_offset);
3138 if (loop_info.parent_offset() == -1) {
3139 // This is the outmost loop, if we're actually inside the peel, we are
3140 // not really in a loop.
3141 return !in_peeled_iteration() || in_optimistic_peeling_iteration();
3142 }
3143 return true;
3144 }
3145 return false;
3146 }
3147
3148 // The fake offset used as a target for all exits of an inlined function.
3150 DCHECK(is_inline());
3151 return bytecode().length();
3152 }
3153
3157
3158 // Cache the heap broker since we access it a bunch.
3159 compiler::JSHeapBroker* broker_ = compilation_unit_->broker();
3160
3165
3166 // Change the number of predecessors when encountering a dead predecessor.
3167 // In case we are in a peeled iteration the decrement is undone after
3168 // finishing the peel. This is needed since in the next iteration the
3169 // predecessor might not be dead.
3172 DCHECK_GT(predecessor_count_[offset], 0);
3173 DCHECK_IMPLIES(merge_states_[offset],
3174 merge_states_[offset]->predecessor_count() ==
3175 predecessor_count_[offset] - 1);
3176 predecessor_count_[offset]--;
3177 if (in_peeled_iteration()) {
3178 decremented_predecessor_offsets_.push_back(offset);
3179 } else {
3180 DCHECK(decremented_predecessor_offsets_.empty());
3181 }
3182 }
3183 // Set the number of predecessors initially.
3184 void InitializePredecessorCount(uint32_t offset, int amount) {
3186 DCHECK_NULL(merge_states_[offset]);
3187 predecessor_count_[offset] = amount;
3188 }
3189 void UpdatePredecessorCount(uint32_t offset, int diff) {
3191 DCHECK_LE(0, static_cast<int64_t>(predecessor_count_[offset]) + diff);
3192 DCHECK_IMPLIES(merge_states_[offset],
3193 merge_states_[offset]->predecessor_count() ==
3194 predecessor_count_[offset] + diff);
3195 predecessor_count_[offset] += diff;
3196 }
3197 uint32_t predecessor_count(uint32_t offset) {
3199 DCHECK_IMPLIES(!decremented_predecessor_offsets_.empty(),
3200 in_peeled_iteration());
3201 uint32_t actual = predecessor_count_[offset];
3202 DCHECK_IMPLIES(merge_states_[offset],
3203 merge_states_[offset]->predecessor_count() == actual);
3204 return actual;
3205 }
3207
3208 int peeled_iteration_count_ = 0;
3209 bool any_peeled_loop_ = false;
3211
3212 bool in_peeled_iteration() const {
3213 DCHECK_GE(peeled_iteration_count_, 0);
3214 return peeled_iteration_count_ > 0;
3215 }
3216
3217 // When loop SPeeling is enabled then the second-last peeling iteration
3218 // is the optimistic iteration. At the end we try to compile the JumpLoop and
3219 // only proceed with the fallback iteration 0, if the loop state is
3220 // incompatible with the loop end state.
3222 return v8_flags.maglev_optimistic_peeled_loops &&
3223 peeled_iteration_count_ == 1;
3224 }
3226 return v8_flags.maglev_escape_analysis || v8_flags.maglev_licm;
3227 }
3228 bool is_loop_effect_tracking() { return loop_effects_; }
3229 LoopEffects* loop_effects_ = nullptr;
3231
3232 // When processing the peeled iteration of a loop, we need to reset the
3233 // decremented predecessor counts inside of the loop before processing the
3234 // body again. For this, we record offsets where we decremented the
3235 // predecessor count.
3237 // The set of loop headers for which we decided to do loop peeling.
3239
3240 // Current block information.
3241 bool in_prologue_ = true;
3243 std::optional<InterpretedDeoptFrame> entry_stack_check_frame_;
3244 std::optional<DeoptFrame> latest_checkpointed_frame_;
3246 struct ForInState {
3248 ValueNode* cache_type = nullptr;
3249 ValueNode* enum_cache_indices = nullptr;
3250 ValueNode* key = nullptr;
3251 ValueNode* index = nullptr;
3252 bool receiver_needs_map_check = false;
3253 };
3254 // TODO(leszeks): Allow having a stack of these.
3255 ForInState current_for_in_state = ForInState();
3256
3257 AllocationBlock* current_allocation_block_ = nullptr;
3258
3261
3264
3265 ValueNode* inlined_new_target_ = nullptr;
3266
3267 bool is_turbolev_ = false;
3268
3269 // Bytecode offset at which compilation should start.
3272 if (!graph_->is_osr()) return kFunctionEntryBytecodeOffset;
3273 return bytecode_analysis_.osr_bailout_id().ToInt();
3274 }
3275
3276 int inlining_id_ = SourcePosition::kNotInlined;
3277 int next_handler_table_index_ = 0;
3278
3279 DeoptFrameScope* current_deopt_scope_ = nullptr;
3280 LazyDeoptResultLocationScope* lazy_deopt_result_location_scope_ = nullptr;
3281
3283 int end;
3285 };
3287
3288#ifdef DEBUG
3289 bool IsNodeCreatedForThisBytecode(ValueNode* node) const {
3290 return new_nodes_.find(node) != new_nodes_.end();
3291 }
3292
3293 std::unordered_set<Node*> new_nodes_;
3294#endif
3295
3296 // Some helpers for CSE
3297
3298 static size_t fast_hash_combine(size_t seed, size_t h) {
3299 // Implementation from boost. Good enough for GVN.
3300 return h + 0x9e3779b9 + (seed << 6) + (seed >> 2);
3301 }
3302
3303 template <typename T>
3304 static size_t gvn_hash_value(const T& in) {
3305 return base::hash_value(in);
3306 }
3307
3308 static size_t gvn_hash_value(const compiler::MapRef& map) {
3309 return map.hash_value();
3310 }
3311
3313 return base::hash_value(reg.index());
3314 }
3315
3316 static size_t gvn_hash_value(const Representation& rep) {
3317 return base::hash_value(rep.kind());
3318 }
3319
3320 static size_t gvn_hash_value(const ExternalReference& ref) {
3321 return base::hash_value(ref.address());
3322 }
3323
3324 static size_t gvn_hash_value(const PolymorphicAccessInfo& access_info) {
3325 return access_info.hash_value();
3326 }
3327
3328 template <typename T>
3330 size_t hash = base::hash_value(vector.size());
3331 for (auto e : vector) {
3332 hash = fast_hash_combine(hash, gvn_hash_value(e));
3333 }
3334 return hash;
3335 }
3336
3337 template <typename T>
3338 static size_t gvn_hash_value(const v8::internal::ZoneVector<T>& vector) {
3339 size_t hash = base::hash_value(vector.size());
3340 for (auto e : vector) {
3341 hash = fast_hash_combine(hash, gvn_hash_value(e));
3342 }
3343 return hash;
3344 }
3345
3346 bool CanSpeculateCall() const {
3347 return current_speculation_feedback_.IsValid();
3348 }
3349
3350 inline void MarkNodeDead(Node* node) {
3351 for (int i = 0; i < node->input_count(); ++i) {
3352 node->input(i).clear();
3353 }
3354 node->OverwriteWith(Opcode::kDead);
3355 }
3356
3359};
3360
3361} // namespace maglev
3362} // namespace internal
3363} // namespace v8
3364
3365#endif // V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
TFGraph * graph
#define DECLARE_VISITOR(name,...)
#define BYTECODE_CASE(name,...)
uint8_t data_[MAX_STACK_LENGTH]
int16_t parameter_count
Definition builtins.cc:67
interpreter::Bytecode bytecode
Definition builtins.cc:43
Builtins::Kind kind
Definition builtins.cc:40
#define BYTECODE_LIST(V, V_TSA)
Definition bytecodes.h:479
bool Contains(int i) const
Definition bit-vector.h:180
static V8_EXPORT_PRIVATE bool IsCpp(Builtin builtin)
Definition builtins.cc:496
constexpr int ToInt() const
Definition utils.h:673
V8_EXPORT_PRIVATE Address address() const
constexpr Kind kind() const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static bool constexpr IsValid(T value)
Definition smi.h:67
static Tagged< TaggedIndex > FromIntptr(intptr_t value)
static bool constexpr IsValid(intptr_t value)
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
ScopeInfoRef OuterScopeInfo(JSHeapBroker *broker) const
IndirectHandle< SharedFunctionInfo > object() const
ScopeInfoRef scope_info(JSHeapBroker *broker) const
static std::ostream & Decode(std::ostream &os, const uint8_t *bytecode_start, bool with_hex=true)
static constexpr bool UnconditionallyThrows(Bytecode bytecode)
Definition bytecodes.h:879
static constexpr bool Returns(Bytecode bytecode)
Definition bytecodes.h:872
static constexpr bool IsForwardJump(Bytecode bytecode)
Definition bytecodes.h:805
static constexpr bool IsSwitch(Bytecode bytecode)
Definition bytecodes.h:819
static constexpr bool IsConditionalJump(Bytecode bytecode)
Definition bytecodes.h:764
static constexpr bool IsJump(Bytecode bytecode)
Definition bytecodes.h:798
static constexpr Register FromParameterIndex(int index)
static constexpr Register invalid_value()
BasicBlock * block_ptr() const
Definition maglev-ir.h:980
void Bind(BasicBlock *block)
Definition maglev-ir.h:970
BasicBlockRef * MoveToRefList(BasicBlockRef *ref_list_head)
Definition maglev-ir.h:960
BasicBlockRef * SetToBlockAndReturnNext(BasicBlock *block)
Definition maglev-ir.h:937
void AddExceptionHandler(ExceptionHandlerInfo *handler)
void set_edge_split_block(BasicBlock *predecessor)
void set_feedback(compiler::FeedbackSource const &feedback, FeedbackSlotType slot_type)
Definition maglev-ir.h:9711
void set_arg(int i, ValueNode *node)
Definition maglev-ir.h:9821
ZoneMap< int, TaggedIndexConstant * > & tagged_index()
void record_scope_info(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ZoneMap< uint32_t, Uint32Constant * > & uint32()
ZoneVector< Node * > & node_buffer()
ZoneMap< int32_t, Int32Constant * > & int32()
ZoneVector< InitialValue * > & parameters()
ZoneMap< uint64_t, Float64Constant * > & float64()
ZoneMap< int, SmiConstant * > & smi()
void CopyFrom(const MaglevCompilationUnit &info, MergePointInterpreterFrameState &state, bool preserve_known_node_aspects, Zone *zone)
compiler::SharedFunctionInfoRef shared_function_info() const
PatchAccumulatorInBranchScope(BranchBuilder &builder, ValueNode *node, RootIndex root_index)
MaglevGraphBuilder::MaglevSubGraphBuilder * sub_builder_
BranchBuilder(MaglevGraphBuilder *builder, MaglevSubGraphBuilder *sub_builder, BranchType jump_type, MaglevSubGraphBuilder::Label *jump_label)
BranchResult Build(std::initializer_list< ValueNode * > inputs, Args &&... args)
void SetBranchSpecializationMode(BranchSpecializationMode mode)
BranchBuilder(MaglevGraphBuilder *builder, BranchType jump_type)
void GotoIfTrue(Label *true_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
void GotoIfFalse(Label *false_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
MaglevSubGraphBuilder(MaglevGraphBuilder *builder, int variable_count)
ReduceResult Branch(std::initializer_list< Variable * > vars, FCond cond, FTrue if_true, FFalse if_false)
LoopLabel BeginLoop(std::initializer_list< Variable * > loop_vars)
void RecordUseReprHint(Phi *phi, UseRepresentationSet reprs)
ValueNode * BuildLoadTaggedField(ValueNode *object, uint32_t offset, Args &&... args)
void DecrementDeadPredecessorAndAccountForPeeling(uint32_t offset)
static compiler::OptionalHeapObjectRef TryGetConstant(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
ValueNode * TrySpecializeLoadScriptContextSlot(ValueNode *context, int index)
ValueNode * GetTruncatedInt32ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
static BranchType NegateBranchType(BranchType jump_type)
bool HasDisjointType(ValueNode *lhs, NodeType rhs_type)
void SetNodeInputs(NodeT *node, std::initializer_list< ValueNode * > inputs)
const compiler::BytecodeLivenessState * GetInLiveness() const
const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const
NodeT * AddNewNode(std::initializer_list< ValueNode * > inputs, Args &&... args)
Uint32Constant * GetUint32Constant(int constant)
void UpdatePredecessorCount(uint32_t offset, int diff)
ValueNode * ConvertInputTo(ValueNode *input, ValueRepresentation expected)
bool HaveDisjointTypes(ValueNode *lhs, ValueNode *rhs)
NodeT * CreateNewConstantNode(Args &&... args) const
compiler::ref_traits< T >::ref_type GetRefOperand(int operand_index)
NodeType CheckTypes(ValueNode *node, std::initializer_list< NodeType > types)
static size_t fast_hash_combine(size_t seed, size_t h)
ReduceResult BuildInlineFunction(SourcePosition call_site_position, ValueNode *context, ValueNode *function, ValueNode *new_target)
std::optional< InterpretedDeoptFrame > entry_stack_check_frame_
BasicBlock * BuildBranchIfReferenceEqual(ValueNode *lhs, ValueNode *rhs, BasicBlockRef *true_target, BasicBlockRef *false_target)
const DeoptFrameScope * current_deopt_scope() const
const InterpreterFrameState & current_interpreter_frame() const
ExternalConstant * GetExternalConstant(ExternalReference reference)
ReduceResult StoreAndCacheContextSlot(ValueNode *context, int index, ValueNode *value, ContextKind context_kind)
void StartFallthroughBlock(int next_block_offset, BasicBlock *predecessor)
void EnsureInt32(ValueNode *value, bool can_be_heap_number=false)
uint32_t GetFlag16Operand(int operand_index) const
static size_t gvn_hash_value(const v8::internal::ZoneVector< T > &vector)
ValueNode * GetHoleyFloat64ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ValueNode * GetContextAtDepth(ValueNode *context, size_t depth)
BasicBlock * CreateEdgeSplitBlock(BasicBlockRef &jump_targets, BasicBlock *predecessor)
const compiler::BytecodeLivenessState * GetInLivenessFor(int offset) const
SourcePositionTableIterator source_position_iterator_
BasicBlock * FinishBlock(std::initializer_list< ValueNode * > control_inputs, Args &&... args)
ValueNode * GetAccumulatorTruncatedInt32ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
static size_t gvn_hash_value(const interpreter::Register &reg)
NodeT * AddNewNode(size_t input_count, Function &&post_create_input_initializer, Args &&... args)
void StoreRegister(interpreter::Register target, NodeT *value)
uint32_t GetFlag8Operand(int operand_index) const
CallCPPBuiltin * BuildCallCPPBuiltin(Builtin builtin, ValueNode *target, ValueNode *new_target, std::initializer_list< ValueNode * > inputs)
ValueNode * GetTaggedValue(interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
static constexpr UseReprHintRecording ShouldRecordUseReprHint()
ValueNode * GetFloat64(interpreter::Register reg)
static size_t gvn_hash_value(const v8::internal::ZoneCompactSet< T > &vector)
void RegisterPhisWithGraphLabeller(MergePointInterpreterFrameState &merge_state)
ValueNode * GetUint8ClampedForToNumber(interpreter::Register reg)
void ProcessMergePointPredecessors(MergePointInterpreterFrameState &merge_state, BasicBlockRef &jump_targets)
ReduceResult EmitUnconditionalDeopt(DeoptimizeReason reason)
MaglevGraphLabeller * graph_labeller() const
DeoptFrame * AddInlinedArgumentsToDeoptFrame(DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
void ProcessMergePoint(int offset, bool preserve_known_node_aspects)
void MinimizeContextChainDepth(ValueNode **context, size_t *depth)
void MoveNodeBetweenRegisters(interpreter::Register src, interpreter::Register dst)
RootConstant * GetRootConstant(RootIndex index)
BranchBuilder CreateBranchBuilder(BranchType jump_type=BranchType::kBranchIfTrue)
DeoptFrame GetDeoptFrameForLazyDeopt(interpreter::Register result_location, int result_size)
void RecordUseReprHint(Phi *phi, UseRepresentation repr)
BranchBuilder CreateBranchBuilder(MaglevSubGraphBuilder *subgraph, MaglevSubGraphBuilder::Label *jump_label, BranchType jump_type=BranchType::kBranchIfTrue)
bool EnsureType(ValueNode *node, NodeType type, NodeType *old=nullptr)
compiler::FeedbackVectorRef feedback() const
static size_t gvn_hash_value(const ExternalReference &ref)
TaggedIndexConstant * GetTaggedIndexConstant(int constant)
void InitializeRegister(interpreter::Register reg, ValueNode *value)
interpreter::BytecodeArrayIterator iterator_
constexpr bool RuntimeFunctionCanThrow(Runtime::FunctionId function_id)
void BuildStoreMap(ValueNode *object, compiler::MapRef map, StoreMap::Kind kind)
CatchBlockDetails GetTryCatchBlockFromInfo(ExceptionHandlerInfo *info)
void BuildRegisterFrameInitialization(ValueNode *context=nullptr, ValueNode *closure=nullptr, ValueNode *new_target=nullptr)
CallBuiltin * BuildCallBuiltin(std::initializer_list< ValueNode * > inputs, compiler::FeedbackSource const &feedback, CallBuiltin::FeedbackSlotType slot_type=CallBuiltin::kTaggedIndex)
void StartNewBlock(BasicBlock *predecessor, MergePointInterpreterFrameState *merge_state, BasicBlockRef &refs_to_block)
void StartNewBlock(int offset, BasicBlock *predecessor)
void BuildLoadContextSlot(ValueNode *context, size_t depth, int slot_index, ContextSlotMutability slot_mutability, ContextKind context_kind)
compiler::BytecodeArrayRef bytecode() const
const compiler::BytecodeLivenessState * GetOutLiveness() const
void StoreRegisterPair(std::pair< interpreter::Register, interpreter::Register > target, NodeT *value)
ValueNode * LoadAndCacheContextSlot(ValueNode *context, int offset, ContextSlotMutability slot_mutability, ContextKind context_kind)
void SetKnownValue(ValueNode *node, compiler::ObjectRef constant, NodeType new_node_type)
LazyDeoptResultLocationScope * lazy_deopt_result_location_scope_
ValueNode * GetFloat64ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
MergePointInterpreterFrameState ** merge_states_
MaglevCallerDetails * caller_details() const
MergePointInterpreterFrameState * GetCatchBlockFrameState()
BasicBlock * FinishInlinedBlockForCaller(ControlNode *control_node, ZoneVector< Node * > rem_nodes_in_call_block)
ReduceResult GetUint32ElementIndex(interpreter::Register reg)
ReduceResult BuildCallRuntime(Runtime::FunctionId function_id, std::initializer_list< ValueNode * > inputs)
Float64Constant * GetFloat64Constant(double constant)
void Print(const char *str, ValueNode *value)
MaybeReduceResult GetSmiValue(interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const
MaybeReduceResult TrySpecializeStoreScriptContextSlot(ValueNode *context, int index, ValueNode *value, Node **store)
static size_t gvn_hash_value(const compiler::MapRef &map)
void EnsureInt32(interpreter::Register reg)
MaglevGraphBuilder(LocalIsolate *local_isolate, MaglevCompilationUnit *compilation_unit, Graph *graph, MaglevCallerDetails *caller_details=nullptr)
ValueNode * GetAccumulatorHoleyFloat64ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ZoneStack< HandlerTableEntry > catch_block_stack_
ValueNode * TryGetParentContext(ValueNode *node)
static size_t gvn_hash_value(const PolymorphicAccessInfo &access_info)
bool ContextMayAlias(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ValueNode * LoadRegisterHoleyFloat64ForToNumber(int operand_index, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
std::pair< interpreter::Register, int > GetResultLocationAndSize() const
MaybeReduceResult GetAccumulatorSmi(UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
ReduceResult BuildAbort(AbortReason reason)
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores_
MaglevCompilationUnit * compilation_unit() const
CallBuiltin * BuildCallBuiltin(std::initializer_list< ValueNode * > inputs)
void InitializePredecessorCount(uint32_t offset, int amount)
ReduceResult BuildStoreContextSlot(ValueNode *context, size_t depth, int slot_index, ValueNode *value, ContextKind context_kind)
std::function< DeoptFrameScope( compiler::JSFunctionRef, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *)> GetDeoptScopeCallback
Float64Constant * GetFloat64Constant(Float64 constant)
const compiler::BytecodeAnalysis & bytecode_analysis() const
FeedbackSlot GetSlotOperand(int operand_index) const
const compiler::BytecodeLivenessState * GetOutLivenessFor(int offset) const
ValueNode * LoadRegister(int operand_index)
ValueNode * BuildExtendPropertiesBackingStore(compiler::MapRef map, ValueNode *receiver, ValueNode *property_array)
NodeT * AddNewNodeOrGetEquivalent(std::initializer_list< ValueNode * > raw_inputs, Args &&... args)
ValueNode * GetSecondValue(ValueNode *result)
ValueNode * GetValueOrUndefined(ValueNode *maybe_value)
SmiConstant * GetSmiConstant(int constant) const
std::function< ReduceResult(ValueNode *)> InitialCallback
static size_t gvn_hash_value(const Representation &rep)
ValueNode * GetInt32ElementIndex(interpreter::Register reg)
compiler::JSHeapBroker * broker() const
Int32Constant * GetInt32Constant(int32_t constant)
bool CheckStaticType(ValueNode *node, NodeType type, NodeType *old=nullptr)
std::optional< DeoptFrame > latest_checkpointed_frame_
bool TrySpecializeLoadContextSlotToFunctionContext(ValueNode *context, int slot_index, ContextSlotMutability slot_mutability)
std::function< void(ValueNode *, ValueNode *)> ProcessElementCallback
void RecordUseReprHintIfPhi(ValueNode *node, UseRepresentation repr)
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
MaybeReduceResult(base::PointerWithPayload< ValueNode, Kind, 3 > payload)
MaybeReduceResult & operator=(const MaybeReduceResult &) V8_NOEXCEPT=default
base::PointerWithPayload< ValueNode, Kind, 3 > payload_
MaybeReduceResult(const MaybeReduceResult &) V8_NOEXCEPT=default
base::PointerWithPayload< ValueNode, Kind, 3 > GetPayload() const
void MergeDead(const MaglevCompilationUnit &compilation_unit, unsigned num=1)
void Merge(MaglevGraphBuilder *graph_builder, InterpreterFrameState &unmerged, BasicBlock *predecessor)
constexpr bool Is() const
Definition maglev-ir.h:2362
static constexpr Opcode opcode_of
Definition maglev-ir.h:1909
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
constexpr OpProperties properties() const
Definition maglev-ir.h:1940
static constexpr bool needs_epoch_check(Opcode op)
Definition maglev-ir.h:2409
static constexpr bool participate_in_cse(Opcode op)
Definition maglev-ir.h:2401
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:1050
static ReduceResult Done(ValueNode *value)
ReduceResult(const MaybeReduceResult &other)
ValueNode * get(uint32_t offset) const
Definition maglev-ir.h:5626
uint32_t double_elements_length() const
Definition maglev-ir.h:5616
JSHeapBroker *const broker_
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
JSHeapBroker * broker
int32_t offset
#define INTRINSICS_LIST(V)
std::string extension
TNode< Object > receiver
Node * node
double second
DirectHandle< JSReceiver > options
RpoNumber block
ZoneVector< RpoNumber > & result
Builtin builtin
LiftoffRegister reg
FunctionLiteral * literal
Definition liveedit.cc:294
BasicBlock * current_block_
#define DEFINE_BUILTIN_REDUCER(Name,...)
#define MAGLEV_REDUCED_BUILTIN(V)
#define BAILOUT(name,...)
#define BYTECODE_VISITOR(name,...)
#define DEFINE_IS_ROOT_OBJECT(type, name, CamelName)
const int length_
Definition mul-fft.cc:473
V8_INLINE size_t hash_value(unsigned int v)
Definition hashing.h:205
constexpr uint64_t double_to_uint64(double d)
Definition double.h:17
V8_INLINE size_t fast_hash_combine()
Definition fast-hash.h:19
constexpr OpProperties StaticPropertiesForOpcode(Opcode opcode)
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
NodeType StaticTypeForNode(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
constexpr bool IsCommutativeNode(Opcode opcode)
Definition maglev-ir.h:495
constexpr bool IsSimpleFieldStore(Opcode opcode)
Definition maglev-ir.h:547
constexpr bool IsTypedArrayStore(Opcode opcode)
Definition maglev-ir.h:563
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
constexpr bool IsElementsArrayWrite(Opcode opcode)
Definition maglev-ir.h:559
constexpr int kFunctionEntryBytecodeOffset
Definition globals.h:854
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
void MemsetUint32(uint32_t *dest, uint32_t value, size_t counter)
Definition memcopy.h:267
RegExpBuilder builder_
Node * node_
#define ROOT_LIST(V)
Definition roots.h:488
#define FOR_EACH_THROWING_INTRINSIC(F)
Definition runtime.h:865
#define V8_NOEXCEPT
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
ZoneMap< uint32_t, AvailableExpression > available_expressions
NodeInfo * GetOrCreateInfoFor(ValueNode *node, compiler::JSHeapBroker *broker, LocalIsolate *isolate)
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores
Data(int jump_target_offset, int fallthrough_offset)
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693
std::unique_ptr< ValueMirror > value
std::unique_ptr< ValueMirror > key
TFGraph * graph_
int inlining_id_