v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-graph-builder.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <limits>
9#include <optional>
10#include <utility>
11
12#include "src/base/bounds.h"
13#include "src/base/ieee754.h"
14#include "src/base/logging.h"
15#include "src/base/vector.h"
21#include "src/common/globals.h"
33#include "src/flags/flags.h"
47#include "src/numbers/ieee754.h"
65#include "src/roots/roots.h"
66#include "src/utils/utils.h"
67#include "src/zone/zone.h"
68
69#ifdef V8_INTL_SUPPORT
71#endif
72
73#define TRACE(...) \
74 if (v8_flags.trace_maglev_graph_building) { \
75 std::cout << __VA_ARGS__ << std::endl; \
76 }
77
78#define FAIL(...) \
79 TRACE("Failed " << __func__ << ":" << __LINE__ << ": " << __VA_ARGS__); \
80 return {};
81
82namespace v8::internal::maglev {
83
84namespace {
85
86enum class CpuOperation {
87 kFloat64Round,
88};
89
90// TODO(leszeks): Add a generic mechanism for marking nodes as optionally
91// supported.
92bool IsSupported(CpuOperation op) {
93 switch (op) {
94 case CpuOperation::kFloat64Round:
95#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
97#elif defined(V8_TARGET_ARCH_ARM)
98 return CpuFeatures::IsSupported(ARMv8);
99#elif defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_PPC64) || \
100 defined(V8_TARGET_ARCH_S390X) || defined(V8_TARGET_ARCH_RISCV64) || \
101 defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_LOONG64) || \
102 defined(V8_TARGET_ARCH_MIPS64)
103 return true;
104#else
105#error "V8 does not support this architecture."
106#endif
107 }
108}
109
110class FunctionContextSpecialization final : public AllStatic {
111 public:
112 static compiler::OptionalContextRef TryToRef(
113 const MaglevCompilationUnit* unit, ValueNode* context, size_t* depth) {
114 DCHECK(unit->info()->specialize_to_function_context());
115 if (Constant* n = context->TryCast<Constant>()) {
116 return n->ref().AsContext().previous(unit->broker(), depth);
117 }
118 return {};
119 }
120};
121
122} // namespace
123
125 if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) {
126 return n->context().node();
127 }
128
129 if (InlinedAllocation* alloc = node->TryCast<InlinedAllocation>()) {
130 return alloc->object()->get(
132 }
133
134 if (CallRuntime* n = node->TryCast<CallRuntime>()) {
135 switch (n->function_id()) {
136 case Runtime::kPushBlockContext:
137 case Runtime::kPushCatchContext:
138 case Runtime::kNewFunctionContext:
139 return n->context().node();
140 default:
141 break;
142 }
143 }
144
145 return nullptr;
146}
147
148// Attempts to walk up the context chain through the graph in order to reduce
149// depth and thus the number of runtime loads.
151 size_t* depth) {
152 while (*depth > 0) {
153 ValueNode* parent_context = TryGetParentContext(*context);
154 if (parent_context == nullptr) return;
155 *context = parent_context;
156 (*depth)--;
157 }
158}
159
161 ValueNode* context = GetContext();
162 if (InlinedAllocation* alloc = context->TryCast<InlinedAllocation>()) {
163 alloc->ForceEscaping();
164 }
165}
166
168 public:
174
177 const InterpreterFrameState& frame, Mode mode = kDefault)
179 args_(reglist.register_count()),
180 mode_(mode) {
181 for (int i = 0; i < reglist.register_count(); i++) {
182 args_[i] = frame.get(reglist[i]);
183 }
184 DCHECK_IMPLIES(args_.size() == 0,
186 DCHECK_IMPLIES(mode != kDefault,
188 DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
189 }
190
195
204
213
216 return nullptr;
217 }
218 return args_[0];
219 }
220
230
233 DCHECK_GT(count(), 0);
234 return args_[args_.size() - 1];
235 }
236
237 size_t count() const {
239 return args_.size();
240 }
241 return args_.size() - 1;
242 }
243
244 size_t count_with_receiver() const { return count() + 1; }
245
246 ValueNode* operator[](size_t i) const {
248 i++;
249 }
250 if (i >= args_.size()) return nullptr;
251 return args_[i];
252 }
253
254 void set_arg(size_t i, ValueNode* node) {
256 i++;
257 }
258 DCHECK_LT(i, args_.size());
259 DCHECK(!node->properties().is_conversion());
260 args_[i] = node;
261 }
262
263 Mode mode() const { return mode_; }
264
266
269 DCHECK_GT(count(), 0);
270 args_.pop_back();
271 }
272
273 void PopReceiver(ConvertReceiverMode new_receiver_mode) {
276 DCHECK_GT(args_.size(), 0); // We have at least a receiver to pop!
277 // TODO(victorgomes): Do this better!
278 for (size_t i = 0; i < args_.size() - 1; i++) {
279 args_[i] = args_[i + 1];
280 }
281 args_.pop_back();
282
283 // If there is no non-receiver argument to become the new receiver,
284 // consider the new receiver to be known undefined.
286 : new_receiver_mode;
287 }
288
289 private:
293
295#ifdef DEBUG
296 // Arguments can leak to the interpreter frame if the call is inlined,
297 // conversions should be stored in known_node_aspects/NodeInfo.
298 for (ValueNode* arg : args_) {
299 DCHECK(!arg->properties().is_conversion());
300 }
301#endif // DEBUG
302 }
303};
304
306 public:
308 MaglevGraphBuilder* builder,
310 : builder_(builder) {
311 saved_ = builder_->current_speculation_feedback_;
312 // Only set the current speculation feedback if speculation is allowed.
313 if (IsSpeculationAllowed(builder_->broker(), feedback_source)) {
314 builder->current_speculation_feedback_ = feedback_source;
315 } else {
316 builder->current_speculation_feedback_ = compiler::FeedbackSource();
317 }
318 }
320 builder_->current_speculation_feedback_ = saved_;
321 }
322
323 const compiler::FeedbackSource& value() { return saved_; }
324
325 private:
328
330 compiler::FeedbackSource feedback_source) {
331 if (!feedback_source.IsValid()) return false;
332 compiler::ProcessedFeedback const& processed_feedback =
333 broker->GetFeedbackForCall(feedback_source);
334 if (processed_feedback.IsInsufficient()) return false;
335 return processed_feedback.AsCall().speculation_mode() ==
336 SpeculationMode::kAllowSpeculation;
337 }
338};
339
341 MaglevGraphBuilder* builder, interpreter::Register result_location,
342 int result_size)
343 : builder_(builder),
344 previous_(builder->lazy_deopt_result_location_scope_),
345 result_location_(result_location),
346 result_size_(result_size) {
348}
349
352 builder_->lazy_deopt_result_location_scope_ = previous_;
353}
354
356 public:
358 compiler::OptionalJSFunctionRef maybe_js_target = {})
359 : builder_(builder),
360 parent_(builder->current_deopt_scope_),
361 data_(DeoptFrame::BuiltinContinuationFrameData{
362 continuation, {}, builder->GetContext(), maybe_js_target}) {
363 builder_->current_interpreter_frame().virtual_objects().Snapshot();
364 builder_->current_deopt_scope_ = this;
365 builder_->AddDeoptUse(
366 data_.get<DeoptFrame::BuiltinContinuationFrameData>().context);
367 DCHECK(data_.get<DeoptFrame::BuiltinContinuationFrameData>()
368 .parameters.empty());
369 }
370
372 compiler::OptionalJSFunctionRef maybe_js_target,
374 : builder_(builder),
376 data_(DeoptFrame::BuiltinContinuationFrameData{
377 continuation, builder->zone()->CloneVector(parameters),
378 builder->GetContext(), maybe_js_target}) {
379 builder_->current_interpreter_frame().virtual_objects().Snapshot();
380 builder_->current_deopt_scope_ = this;
381 builder_->AddDeoptUse(
383 if (parameters.size() > 0) {
384 if (InlinedAllocation* receiver =
385 parameters[0]->TryCast<InlinedAllocation>()) {
386 // We escape the first argument, since the builtin continuation call can
387 // trigger a stack iteration, which expects the receiver to be a
388 // meterialized object.
389 receiver->ForceEscaping();
390 }
391 }
392 for (ValueNode* node :
394 builder_->AddDeoptUse(node);
395 }
396 }
397
399 : builder_(builder),
400 parent_(builder->current_deopt_scope_),
401 data_(DeoptFrame::ConstructInvokeStubFrameData{
402 *builder->compilation_unit(), builder->current_source_position_,
403 receiver, builder->GetContext()}) {
404 builder_->current_interpreter_frame().virtual_objects().Snapshot();
405 builder_->current_deopt_scope_ = this;
406 builder_->AddDeoptUse(
408 builder_->AddDeoptUse(
410 }
411
413 builder_->current_deopt_scope_ = parent_;
414 // We might have cached a checkpointed frame which includes this scope;
415 // reset it just in case.
416 builder_->latest_checkpointed_frame_.reset();
417 }
418
419 DeoptFrameScope* parent() const { return parent_; }
420
423 return false;
424 }
426 .builtin_id) {
427 case Builtin::kGetIteratorWithFeedbackLazyDeoptContinuation:
428 case Builtin::kCallIteratorWithFeedbackLazyDeoptContinuation:
429 case Builtin::kArrayForEachLoopLazyDeoptContinuation:
430 case Builtin::kArrayMapLoopLazyDeoptContinuation:
431 case Builtin::kGenericLazyDeoptContinuation:
432 case Builtin::kToBooleanLazyDeoptContinuation:
433 return true;
434 default:
435 return false;
436 }
437 }
438
440 const DeoptFrame::FrameData& data() const { return data_; }
441
442 private:
446};
447
449 public:
450 explicit Variable(int index) : pseudo_register_(index) {}
451
452 private:
454
455 // Variables pretend to be interpreter registers as far as the dummy
456 // compilation unit and merge states are concerned.
458};
459
461 public:
464 liveness_(
465 sub_builder->builder_->zone()->New<compiler::BytecodeLivenessState>(
466 sub_builder->compilation_unit_->register_count(),
467 sub_builder->builder_->zone())) {}
469 std::initializer_list<Variable*> vars)
470 : Label(sub_builder, predecessor_count) {
471 for (Variable* var : vars) {
472 liveness_->MarkRegisterLive(var->pseudo_register_.index());
473 }
474 }
475
476 private:
478 BasicBlock* basic_block)
479 : merge_state_(merge_state), ref_(basic_block) {}
480
482 friend class BranchBuilder;
487};
488
490 public:
491 private:
493 BasicBlock* loop_header)
494 : merge_state_(merge_state), loop_header_(loop_header) {}
495
499};
500
503 public:
505 MaglevSubGraphBuilder* sub_builder)
506 : sub_builder_(sub_builder) {
507 sub_builder_->TakeKnownNodeAspectsAndVOsFromParent();
508 }
510 sub_builder_->MoveKnownNodeAspectsAndVOsToParent();
511 }
512
513 private:
515};
516
518 BasicBlock* predecessor) {
519 switch (mode()) {
520 case kBytecodeJumpTarget: {
521 auto& data = data_.bytecode_target;
522 if (data.patch_accumulator_scope &&
523 (data.patch_accumulator_scope->node_ == builder_->GetAccumulator())) {
525 builder_->MergeIntoFrameState(predecessor, data.jump_target_offset);
527 builder_->StartFallthroughBlock(data.fallthrough_offset, predecessor);
528 } else {
529 builder_->MergeIntoFrameState(predecessor, data.jump_target_offset);
530 builder_->StartFallthroughBlock(data.fallthrough_offset, predecessor);
531 }
532 break;
533 }
534 case kLabelJumpTarget:
535 auto& data = data_.label_target;
536 sub_builder_->MergeIntoLabel(data.jump_label, predecessor);
537 builder_->StartNewBlock(predecessor, nullptr, data.fallthrough);
538 break;
539 }
540}
541
543 BranchType jump_type) const {
544 DCHECK_EQ(mode(), kBytecodeJumpTarget);
545 auto& data = data_.bytecode_target;
546 if (branch_specialization_mode_ == BranchSpecializationMode::kAlwaysBoolean) {
547 builder_->SetAccumulatorInBranch(builder_->GetBooleanConstant(
548 data.patch_accumulator_scope->jump_type_ == jump_type));
549 } else if (data.patch_accumulator_scope->jump_type_ == jump_type) {
550 builder_->SetAccumulatorInBranch(
551 builder_->GetRootConstant(data.patch_accumulator_scope->root_index_));
552 } else {
553 builder_->SetAccumulatorInBranch(data.patch_accumulator_scope->node_);
554 }
555}
556
558 switch (mode()) {
559 case kBytecodeJumpTarget:
560 return &builder_->jump_targets_[data_.bytecode_target.jump_target_offset];
561 case kLabelJumpTarget:
562 return &data_.label_target.jump_label->ref_;
563 }
564}
565
567 switch (mode()) {
568 case kBytecodeJumpTarget:
569 return &builder_->jump_targets_[data_.bytecode_target.fallthrough_offset];
570 case kLabelJumpTarget:
571 return &data_.label_target.fallthrough;
572 }
573}
574
576 return jump_type_ == BranchType::kBranchIfTrue ? jump_target()
577 : fallthrough();
578}
579
581 return jump_type_ == BranchType::kBranchIfFalse ? jump_target()
582 : fallthrough();
583}
584
586 bool value) const {
587 switch (mode()) {
588 case kBytecodeJumpTarget: {
589 BranchType type_if_need_to_jump =
591 builder_->MarkBranchDeadAndJumpIfNeeded(jump_type_ ==
592 type_if_need_to_jump);
594 }
595 case kLabelJumpTarget:
597 }
598}
599
600template <typename ControlNodeT, typename... Args>
602 std::initializer_list<ValueNode*> control_inputs, Args&&... args) {
604 BasicBlock* block = builder_->FinishBlock<ControlNodeT>(
605 control_inputs, std::forward<Args>(args)..., true_target(),
606 false_target());
609}
610
612 MaglevGraphBuilder* builder, int variable_count)
613 : builder_(builder),
615 builder->zone(), builder->compilation_unit(), variable_count, 0, 0)),
616 pseudo_frame_(*compilation_unit_, nullptr, VirtualObjectList()) {
617 // We need to set a context, since this is unconditional in the frame state,
618 // so set it to the real context.
623}
624
627 std::initializer_list<Variable*> loop_vars) {
628 // Create fake liveness and loop info for the loop, with all given loop vars
629 // set to be live and assigned inside the loop.
630 compiler::BytecodeLivenessState* loop_header_liveness =
633 compiler::LoopInfo* loop_info = builder_->zone()->New<compiler::LoopInfo>(
636 for (Variable* var : loop_vars) {
637 loop_header_liveness->MarkRegisterLive(var->pseudo_register_.index());
638 loop_info->assignments().Add(var->pseudo_register_);
639 }
640
641 // Finish the current block, jumping (as a fallthrough) to the loop header.
642 BasicBlockRef loop_header_ref;
643 BasicBlock* loop_predecessor =
644 builder_->FinishBlock<Jump>({}, &loop_header_ref);
645
646 // Create a state for the loop header, with two predecessors (the above jump
647 // and the back edge), and initialise with the current state.
650 pseudo_frame_, *compilation_unit_, 0, 2, loop_header_liveness,
651 loop_info);
652
653 {
655 loop_state->Merge(builder_, *compilation_unit_, pseudo_frame_,
656 loop_predecessor);
657 }
658
659 // Start a new basic block for the loop.
660 DCHECK_NULL(pseudo_frame_.known_node_aspects());
661 pseudo_frame_.CopyFrom(*compilation_unit_, *loop_state);
662 MoveKnownNodeAspectsAndVOsToParent();
663
664 builder_->ProcessMergePointPredecessors(*loop_state, loop_header_ref);
665 builder_->StartNewBlock(nullptr, loop_state, loop_header_ref);
666
667 return LoopLabel{loop_state, loop_header_ref.block_ptr()};
668}
669
670template <typename ControlNodeT, typename... Args>
672 Label* true_target, std::initializer_list<ValueNode*> control_inputs,
673 Args&&... args) {
675
676 BasicBlockRef fallthrough_ref;
677
678 // Pass through to FinishBlock, converting Labels to BasicBlockRefs and the
679 // fallthrough label to the fallthrough ref.
680 BasicBlock* block = builder_->FinishBlock<ControlNodeT>(
681 control_inputs, std::forward<Args>(args)..., &true_target->ref_,
682 &fallthrough_ref);
683
684 MergeIntoLabel(true_target, block);
685
686 builder_->StartNewBlock(block, nullptr, fallthrough_ref);
687}
688
689template <typename ControlNodeT, typename... Args>
691 Label* false_target, std::initializer_list<ValueNode*> control_inputs,
692 Args&&... args) {
694
695 BasicBlockRef fallthrough_ref;
696
697 // Pass through to FinishBlock, converting Labels to BasicBlockRefs and the
698 // fallthrough label to the fallthrough ref.
699 BasicBlock* block = builder_->FinishBlock<ControlNodeT>(
700 control_inputs, std::forward<Args>(args)..., &fallthrough_ref,
701 &false_target->ref_);
702
703 MergeIntoLabel(false_target, block);
704
705 builder_->StartNewBlock(block, nullptr, fallthrough_ref);
706}
707
709 if (builder_->current_block_ == nullptr) {
710 ReducePredecessorCount(label);
711 return;
712 }
713 Goto(label);
714}
715
717 CHECK_NOT_NULL(builder_->current_block_);
718 BasicBlock* block = builder_->FinishBlock<Jump>({}, &label->ref_);
719 MergeIntoLabel(label, block);
720}
721
723 Label* label, unsigned num) {
724 DCHECK_GE(label->predecessor_count_, num);
725 if (num == 0) {
726 return;
727 }
728 label->predecessor_count_ -= num;
729 if (label->merge_state_ != nullptr) {
730 label->merge_state_->MergeDead(*compilation_unit_, num);
731 }
732}
733
735 if (builder_->current_block_ == nullptr) {
737 return;
738 }
739
740 BasicBlock* block =
741 builder_->FinishBlock<JumpLoop>({}, loop_label->loop_header_);
742 {
745 pseudo_frame_, block);
746 }
747 block->set_predecessor_id(loop_label->merge_state_->predecessor_count() - 1);
748}
749
751 Label* label) {
752 int predecessors_so_far = label->merge_state_ == nullptr
753 ? 0
754 : label->merge_state_->predecessors_so_far();
755 DCHECK_LE(predecessors_so_far, label->predecessor_count_);
756 builder_->current_block_ = nullptr;
757 ReducePredecessorCount(label,
758 label->predecessor_count_ - predecessors_so_far);
759 if (predecessors_so_far == 0) return ReduceResult::DoneWithAbort();
760 Bind(label);
761 return ReduceResult::Done();
762}
763
765 DCHECK_NULL(builder_->current_block_);
766
767 DCHECK_NULL(pseudo_frame_.known_node_aspects());
768 pseudo_frame_.CopyFrom(*compilation_unit_, *label->merge_state_);
769 MoveKnownNodeAspectsAndVOsToParent();
770
771 CHECK_EQ(label->merge_state_->predecessors_so_far(),
772 label->predecessor_count_);
773
774 builder_->ProcessMergePointPredecessors(*label->merge_state_, label->ref_);
775 builder_->StartNewBlock(nullptr, label->merge_state_, label->ref_);
776}
777
779 ValueNode* value) {
780 pseudo_frame_.set(var.pseudo_register_, value);
781}
783 const Variable& var) const {
784 return pseudo_frame_.get(var.pseudo_register_);
785}
786
787template <typename FCond, typename FTrue, typename FFalse>
789 std::initializer_list<MaglevSubGraphBuilder::Variable*> vars, FCond cond,
790 FTrue if_true, FFalse if_false) {
791 MaglevSubGraphBuilder::Label else_branch(this, 1);
793 &else_branch);
794 BranchResult branch_result = cond(builder);
795 if (branch_result == BranchResult::kAlwaysTrue) {
796 return if_true();
797 }
798 if (branch_result == BranchResult::kAlwaysFalse) {
799 return if_false();
800 }
801 DCHECK(branch_result == BranchResult::kDefault);
802 MaglevSubGraphBuilder::Label done(this, 2, vars);
803 MaybeReduceResult result_if_true = if_true();
804 CHECK(result_if_true.IsDone());
805 GotoOrTrim(&done);
806 Bind(&else_branch);
807 MaybeReduceResult result_if_false = if_false();
808 CHECK(result_if_false.IsDone());
809 if (result_if_true.IsDoneWithAbort() && result_if_false.IsDoneWithAbort()) {
811 }
812 GotoOrTrim(&done);
813 Bind(&done);
814 return ReduceResult::Done();
815}
816
817template <typename FCond, typename FTrue, typename FFalse>
818ValueNode* MaglevGraphBuilder::Select(FCond cond, FTrue if_true,
819 FFalse if_false) {
820 MaglevSubGraphBuilder subgraph(this, 1);
821 MaglevSubGraphBuilder::Label else_branch(&subgraph, 1);
822 BranchBuilder builder(this, &subgraph, BranchType::kBranchIfFalse,
823 &else_branch);
824 BranchResult branch_result = cond(builder);
825 if (branch_result == BranchResult::kAlwaysTrue) {
826 return if_true();
827 }
828 if (branch_result == BranchResult::kAlwaysFalse) {
829 return if_false();
830 }
831 DCHECK(branch_result == BranchResult::kDefault);
833 MaglevSubGraphBuilder::Label done(&subgraph, 2, {&ret_val});
834 subgraph.set(ret_val, if_true());
835 subgraph.Goto(&done);
836 subgraph.Bind(&else_branch);
837 subgraph.set(ret_val, if_false());
838 subgraph.Goto(&done);
839 subgraph.Bind(&done);
840 return subgraph.get(ret_val);
841}
842
843template <typename FCond, typename FTrue, typename FFalse>
845 FFalse if_false) {
846 MaglevSubGraphBuilder subgraph(this, 1);
847 MaglevSubGraphBuilder::Label else_branch(&subgraph, 1);
848 BranchBuilder builder(this, &subgraph, BranchType::kBranchIfFalse,
849 &else_branch);
850 BranchResult branch_result = cond(builder);
851 if (branch_result == BranchResult::kAlwaysTrue) {
852 return if_true();
853 }
854 if (branch_result == BranchResult::kAlwaysFalse) {
855 return if_false();
856 }
857 DCHECK(branch_result == BranchResult::kDefault);
859 MaglevSubGraphBuilder::Label done(&subgraph, 2, {&ret_val});
860 MaybeReduceResult result_if_true = if_true();
861 CHECK(result_if_true.IsDone());
862 if (result_if_true.IsDoneWithValue()) {
863 subgraph.set(ret_val, result_if_true.value());
864 }
865 subgraph.GotoOrTrim(&done);
866 subgraph.Bind(&else_branch);
867 MaybeReduceResult result_if_false = if_false();
868 CHECK(result_if_false.IsDone());
869 if (result_if_true.IsDoneWithAbort() && result_if_false.IsDoneWithAbort()) {
871 }
872 if (result_if_false.IsDoneWithValue()) {
873 subgraph.set(ret_val, result_if_false.value());
874 }
875 subgraph.GotoOrTrim(&done);
876 subgraph.Bind(&done);
877 return subgraph.get(ret_val);
878}
879
880// Known node aspects for the pseudo frame are null aside from when merging --
881// before each merge, we should borrow the node aspects from the parent
882// builder, and after each merge point, we should copy the node aspects back
883// to the parent. This is so that the parent graph builder can update its own
884// known node aspects without having to worry about this pseudo frame.
887 DCHECK_NULL(pseudo_frame_.known_node_aspects());
888 DCHECK(pseudo_frame_.virtual_objects().is_empty());
889 pseudo_frame_.set_known_node_aspects(
890 builder_->current_interpreter_frame_.known_node_aspects());
891 pseudo_frame_.set_virtual_objects(
892 builder_->current_interpreter_frame_.virtual_objects());
893}
894
897 DCHECK_NOT_NULL(pseudo_frame_.known_node_aspects());
898 builder_->current_interpreter_frame_.set_known_node_aspects(
899 pseudo_frame_.known_node_aspects());
900 pseudo_frame_.clear_known_node_aspects();
901 builder_->current_interpreter_frame_.set_virtual_objects(
902 pseudo_frame_.virtual_objects());
903 pseudo_frame_.set_virtual_objects(VirtualObjectList());
904}
905
907 Label* label, BasicBlock* predecessor) {
909
910 if (label->merge_state_ == nullptr) {
911 // If there's no merge state, allocate a new one.
913 *compilation_unit_, pseudo_frame_, 0, label->predecessor_count_,
914 predecessor, label->liveness_);
915 } else {
916 // If there already is a frame state, merge.
917 label->merge_state_->Merge(builder_, *compilation_unit_, pseudo_frame_,
918 predecessor);
919 }
920}
921
924 Graph* graph,
929 graph_(graph),
930 bytecode_analysis_(bytecode().object(), zone(),
931 compilation_unit->osr_offset(), true),
932 iterator_(bytecode().object()),
934 allow_loop_peeling_(v8_flags.maglev_loop_peeling),
937 loop_headers_to_peel_(bytecode().length(), zone()),
939 // Add an extra jump_target slot for the inline exit if needed.
940 jump_targets_(zone()->AllocateArray<BasicBlockRef>(
941 bytecode().length() + (is_inline() ? 1 : 0))),
942 // Overallocate merge_states_ by one to allow always looking up the
943 // next offset. This overallocated slot can also be used for the inline
944 // exit when needed.
946 bytecode().length() + 1)),
952 is_inline() ? caller_details->deopt_frame->GetVirtualObjects()
956 ? bytecode_analysis_.osr_entry_point()
957 : 0),
960 memset(merge_states_, 0,
961 (bytecode().length() + 1) * sizeof(InterpreterFrameState*));
962 // Default construct basic block refs.
963 // TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
964 for (int i = 0; i < bytecode().length(); ++i) {
965 new (&jump_targets_[i]) BasicBlockRef();
966 }
967
968 if (is_inline()) {
971 // The allocation/initialisation logic here relies on inline_exit_offset
972 // being the offset one past the end of the bytecode.
979 }
982 }
983
987 graph_->is_osr());
988 if (compilation_unit_->is_osr()) {
989 CHECK(!is_inline());
990
991 // Make sure that we're at a valid OSR entrypoint.
992 //
993 // This is also a defense-in-depth check to make sure that we're not
994 // compiling invalid bytecode if the OSR offset is wrong (e.g. because it
995 // belongs to different bytecode).
996 //
997 // OSR'ing into the middle of a loop is currently not supported. There
998 // should not be any issue with OSR'ing outside of loops, just we currently
999 // dont do it...
1001 it.AdvanceTo(compilation_unit_->osr_offset().ToInt());
1002 CHECK(it.CurrentBytecodeIsValidOSREntry());
1003 CHECK_EQ(entrypoint_, it.GetJumpTargetOffset());
1004
1006
1007 if (v8_flags.trace_maglev_graph_building) {
1008 std::cout << "- Non-standard entrypoint @" << entrypoint_
1009 << " by OSR from @" << compilation_unit_->osr_offset().ToInt()
1010 << std::endl;
1011 }
1012 }
1014
1016}
1017
1021
1023 BasicBlock* first_block;
1024 if (!is_inline() &&
1025 (v8_flags.maglev_hoist_osr_value_phi_untagging && graph_->is_osr())) {
1026 first_block =
1028 } else {
1029 first_block = FinishBlock<Jump>({}, &jump_targets_[entrypoint_]);
1030 }
1031 MergeIntoFrameState(first_block, entrypoint_);
1032 return first_block;
1033}
1034
1039
1045
1051
1057
1059 ValueNode* context, ValueNode* closure, ValueNode* new_target) {
1060 if (closure == nullptr &&
1063 broker(), broker()->CanonicalPersistentHandle(
1065 closure = GetConstant(function);
1066 context = GetConstant(function.context(broker()));
1067 }
1070
1071 interpreter::Register new_target_or_generator_register =
1073
1074 int register_index = 0;
1075
1076 if (compilation_unit_->is_osr()) {
1077 for (; register_index < register_count(); register_index++) {
1078 auto val =
1080 InitializeRegister(interpreter::Register(register_index), val);
1081 graph_->osr_values().push_back(val);
1082 }
1083 return;
1084 }
1085
1086 // TODO(leszeks): Don't emit if not needed.
1087 ValueNode* undefined_value = GetRootConstant(RootIndex::kUndefinedValue);
1088 if (new_target_or_generator_register.is_valid()) {
1089 int new_target_index = new_target_or_generator_register.index();
1090 for (; register_index < new_target_index; register_index++) {
1092 undefined_value);
1093 }
1095 new_target_or_generator_register,
1098 register_index++;
1099 }
1100 for (; register_index < register_count(); register_index++) {
1101 InitializeRegister(interpreter::Register(register_index), undefined_value);
1102 }
1103}
1104
1106 auto offset_and_info = bytecode_analysis().GetLoopInfos().begin();
1107 auto end = bytecode_analysis().GetLoopInfos().end();
1108 while (offset_and_info != end && offset_and_info->first < entrypoint_) {
1109 ++offset_and_info;
1110 }
1111 for (; offset_and_info != end; ++offset_and_info) {
1112 int offset = offset_and_info->first;
1113 const compiler::LoopInfo& loop_info = offset_and_info->second;
1115 // Peeled loops are treated like normal merges at first. We will construct
1116 // the proper loop header merge state when reaching the `JumpLoop` of the
1117 // peeled iteration.
1118 continue;
1119 }
1122 if (v8_flags.trace_maglev_graph_building) {
1123 std::cout << "- Creating loop merge state at @" << offset << std::endl;
1124 }
1127 predecessor_count(offset), liveness, &loop_info);
1128 }
1129
1130 if (bytecode().handler_table_size() > 0) {
1131 HandlerTable table(*bytecode().object());
1132 for (int i = 0; i < table.NumberOfRangeEntries(); i++) {
1133 const int offset = table.GetRangeHandler(i);
1134 const bool was_used = table.HandlerWasUsed(i);
1135 const interpreter::Register context_reg(table.GetRangeData(i));
1136 const compiler::BytecodeLivenessState* liveness =
1140 if (v8_flags.trace_maglev_graph_building) {
1141 std::cout << "- Creating exception merge state at @" << offset
1142 << (was_used ? "" : " (never used)") << ", context register r"
1143 << context_reg.index() << std::endl;
1144 }
1146 *compilation_unit_, liveness, offset, was_used, context_reg, graph_);
1147 }
1148 }
1149}
1150
1151namespace {
1152
1153template <int index, interpreter::OperandType... operands>
1154struct GetResultLocationAndSizeHelper;
1155
1156// Terminal cases
1157template <int index>
1158struct GetResultLocationAndSizeHelper<index> {
1159 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1160 const interpreter::BytecodeArrayIterator& iterator) {
1161 // TODO(leszeks): This should probably actually be "UNREACHABLE" but we have
1162 // lazy deopt info for interrupt budget updates at returns, not for actual
1163 // lazy deopts, but just for stack iteration purposes.
1165 }
1166 static bool HasOutputRegisterOperand() { return false; }
1167};
1168
1169template <int index, interpreter::OperandType... operands>
1170struct GetResultLocationAndSizeHelper<index, interpreter::OperandType::kRegOut,
1171 operands...> {
1172 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1173 const interpreter::BytecodeArrayIterator& iterator) {
1174 // We shouldn't have any other output operands than this one.
1175 return {iterator.GetRegisterOperand(index), 1};
1176 }
1177 static bool HasOutputRegisterOperand() { return true; }
1178};
1179
1180template <int index, interpreter::OperandType... operands>
1181struct GetResultLocationAndSizeHelper<
1182 index, interpreter::OperandType::kRegOutPair, operands...> {
1183 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1184 const interpreter::BytecodeArrayIterator& iterator) {
1185 // We shouldn't have any other output operands than this one.
1186 return {iterator.GetRegisterOperand(index), 2};
1187 }
1188 static bool HasOutputRegisterOperand() { return true; }
1189};
1190
1191template <int index, interpreter::OperandType... operands>
1192struct GetResultLocationAndSizeHelper<
1193 index, interpreter::OperandType::kRegOutTriple, operands...> {
1194 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1195 const interpreter::BytecodeArrayIterator& iterator) {
1196 // We shouldn't have any other output operands than this one.
1197 DCHECK(!(GetResultLocationAndSizeHelper<
1198 index + 1, operands...>::HasOutputRegisterOperand()));
1199 return {iterator.GetRegisterOperand(index), 3};
1200 }
1201 static bool HasOutputRegisterOperand() { return true; }
1202};
1203
1204// We don't support RegOutList for lazy deopts.
1205template <int index, interpreter::OperandType... operands>
1206struct GetResultLocationAndSizeHelper<
1207 index, interpreter::OperandType::kRegOutList, operands...> {
1208 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1209 const interpreter::BytecodeArrayIterator& iterator) {
1210 interpreter::RegisterList list = iterator.GetRegisterListOperand(index);
1211 return {list.first_register(), list.register_count()};
1212 }
1213 static bool HasOutputRegisterOperand() { return true; }
1214};
1215
1216// Induction case.
1217template <int index, interpreter::OperandType operand,
1218 interpreter::OperandType... operands>
1219struct GetResultLocationAndSizeHelper<index, operand, operands...> {
1220 static std::pair<interpreter::Register, int> GetResultLocationAndSize(
1221 const interpreter::BytecodeArrayIterator& iterator) {
1222 return GetResultLocationAndSizeHelper<
1223 index + 1, operands...>::GetResultLocationAndSize(iterator);
1224 }
1225 static bool HasOutputRegisterOperand() {
1226 return GetResultLocationAndSizeHelper<
1227 index + 1, operands...>::HasOutputRegisterOperand();
1228 }
1229};
1230
1233 interpreter::OperandType... operands>
1234std::pair<interpreter::Register, int> GetResultLocationAndSizeForBytecode(
1235 const interpreter::BytecodeArrayIterator& iterator) {
1236 // We don't support output registers for implicit registers.
1239 // If we write the accumulator, we shouldn't also write an output register.
1240 DCHECK(!(GetResultLocationAndSizeHelper<
1241 0, operands...>::HasOutputRegisterOperand()));
1243 }
1244
1245 // Use template magic to output a the appropriate GetRegisterOperand call and
1246 // size for this bytecode.
1247 return GetResultLocationAndSizeHelper<
1248 0, operands...>::GetResultLocationAndSize(iterator);
1249}
1250
1251} // namespace
1252
1253std::pair<interpreter::Register, int>
1255 using Bytecode = interpreter::Bytecode;
1256 using OperandType = interpreter::OperandType;
1257 using ImplicitRegisterUse = interpreter::ImplicitRegisterUse;
1258 Bytecode bytecode = iterator_.current_bytecode();
1259 // TODO(leszeks): Only emit these cases for bytecodes we know can lazy deopt.
1260 switch (bytecode) {
1261#define CASE(Name, ...) \
1262 case Bytecode::k##Name: \
1263 return GetResultLocationAndSizeForBytecode<Bytecode::k##Name, \
1264 __VA_ARGS__>(iterator_);
1266#undef CASE
1267 }
1268 UNREACHABLE();
1269}
1270
1271#ifdef DEBUG
1272bool MaglevGraphBuilder::HasOutputRegister(interpreter::Register reg) const {
1276 }
1277 for (int i = 0; i < interpreter::Bytecodes::NumberOfOperands(bytecode); ++i) {
1281 int operand_range = iterator_.GetRegisterOperandRange(i);
1282 if (base::IsInRange(reg.index(), operand_reg.index(),
1283 operand_reg.index() + operand_range)) {
1284 return true;
1285 }
1286 }
1287 }
1288 return false;
1289}
1290#endif
1291
1293 DeoptFrame* deopt_frame, const MaglevCompilationUnit* unit,
1295 // Only create InlinedArgumentsDeoptFrame if we have a mismatch between
1296 // formal parameter and arguments count.
1297 if (static_cast<int>(args.size()) != unit->parameter_count()) {
1298 deopt_frame = zone()->New<InlinedArgumentsDeoptFrame>(
1300 deopt_frame);
1301 AddDeoptUse(closure);
1302 for (ValueNode* arg : deopt_frame->as_inlined_arguments().arguments()) {
1303 AddDeoptUse(arg);
1304 }
1305 }
1306 return deopt_frame;
1307}
1308
1310 const MaglevCompilationUnit* unit, ValueNode* closure,
1312 // The parent resumes after the call, which is roughly equivalent to a lazy
1313 // deopt. Use the helper function directly so that we can mark the
1314 // accumulator as dead (since it'll be overwritten by this function's
1315 // return value anyway).
1316 // TODO(leszeks): This is true for our current set of
1317 // inlinings/continuations, but there might be cases in the future where it
1318 // isn't. We may need to store the relevant overwritten register in
1319 // LazyDeoptFrameScope.
1320 DCHECK(
1324 DeoptFrame* deopt_frame = zone()->New<DeoptFrame>(
1326 current_deopt_scope_, true));
1327 return AddInlinedArgumentsToDeoptFrame(deopt_frame, unit, closure, args);
1328}
1329
1334
1336 if (in_prologue_) {
1338 }
1343 zone()->New<CompactInterpreterFrameState>(
1347
1348 latest_checkpointed_frame_->as_interpreted().frame_state()->ForEachValue(
1350 [&](ValueNode* node, interpreter::Register) { AddDeoptUse(node); });
1351 AddDeoptUse(latest_checkpointed_frame_->as_interpreted().closure());
1352
1353 // Skip lazy deopt builtin continuations.
1354 const DeoptFrameScope* deopt_scope = current_deopt_scope_;
1355 while (deopt_scope != nullptr &&
1356 deopt_scope->IsLazyDeoptContinuationFrame()) {
1357 deopt_scope = deopt_scope->parent();
1358 }
1359
1360 if (deopt_scope != nullptr) {
1361 // Support exactly one eager deopt builtin continuation. This can be
1362 // expanded in the future if necessary.
1363 DCHECK_NULL(deopt_scope->parent());
1364 DCHECK_EQ(deopt_scope->data().tag(),
1366#ifdef DEBUG
1367 if (deopt_scope->data().tag() ==
1371 if (frame.maybe_js_target) {
1372 int stack_parameter_count =
1374 DCHECK_EQ(stack_parameter_count, frame.parameters.length());
1375 } else {
1376 CallInterfaceDescriptor descriptor =
1378 DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length());
1379 }
1380 }
1381#endif
1382
1383 // Wrap the above frame in the scope frame.
1385 deopt_scope->data(),
1386 zone()->New<DeoptFrame>(*latest_checkpointed_frame_));
1387 }
1388 }
1390}
1391
1393 interpreter::Register result_location, int result_size) {
1394 return GetDeoptFrameForLazyDeoptHelper(result_location, result_size,
1395 current_deopt_scope_, false);
1396}
1397
1399 interpreter::Register result_location, int result_size,
1400 DeoptFrameScope* scope, bool mark_accumulator_dead) {
1401 if (scope == nullptr) {
1404 // Remove result locations from liveness.
1405 if (result_location == interpreter::Register::virtual_accumulator()) {
1406 DCHECK_EQ(result_size, 1);
1407 liveness->MarkAccumulatorDead();
1408 mark_accumulator_dead = false;
1409 } else {
1410 DCHECK(!result_location.is_parameter());
1411 for (int i = 0; i < result_size; i++) {
1412 liveness->MarkRegisterDead(result_location.index() + i);
1413 }
1414 }
1415 // Explicitly drop the accumulator if needed.
1416 if (mark_accumulator_dead && liveness->AccumulatorIsLive()) {
1417 liveness->MarkAccumulatorDead();
1418 }
1422 zone()->New<CompactInterpreterFrameState>(*compilation_unit_, liveness,
1426 ret.frame_state()->ForEachValue(
1428 // Receiver and closure values have to be materialized, even if
1429 // they don't otherwise escape.
1432 node->add_use();
1433 } else {
1434 AddDeoptUse(node);
1435 }
1436 });
1437 AddDeoptUse(ret.closure());
1438 return ret;
1439 }
1440
1441 // Currently only support builtin continuations for bytecodes that write to
1442 // the accumulator
1445
1446#ifdef DEBUG
1451 if (frame.maybe_js_target) {
1452 int stack_parameter_count =
1454 // The deopt input value is passed by the deoptimizer, so shouldn't be a
1455 // parameter here.
1456 DCHECK_EQ(stack_parameter_count, frame.parameters.length() + 1);
1457 } else {
1458 CallInterfaceDescriptor descriptor =
1460 // The deopt input value is passed by the deoptimizer, so shouldn't be a
1461 // parameter here.
1462 DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length() + 1);
1463 // The deopt input value is passed on the stack.
1464 DCHECK_GT(descriptor.GetStackParameterCount(), 0);
1465 }
1466 }
1467#endif
1468
1469 // Mark the accumulator dead in parent frames since we know that the
1470 // continuation will write it.
1471 return DeoptFrame(scope->data(),
1472 zone()->New<DeoptFrame>(GetDeoptFrameForLazyDeoptHelper(
1473 result_location, result_size, scope->parent(),
1474 scope->data().tag() ==
1476}
1477
1481 DCHECK(!is_inline());
1484 zone()->New<CompactInterpreterFrameState>(
1489 current_source_position_, nullptr);
1490
1491 (*entry_stack_check_frame_)
1492 .frame_state()
1493 ->ForEachValue(
1495 [&](ValueNode* node, interpreter::Register) { AddDeoptUse(node); });
1496 AddDeoptUse((*entry_stack_check_frame_).closure());
1498}
1499
1501 ValueNode* value, UseReprHintRecording record_use_repr_hint) {
1502 if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
1504 }
1505
1506 ValueRepresentation representation =
1508 if (representation == ValueRepresentation::kTagged) return value;
1509
1510 if (Int32Constant* as_int32_constant = value->TryCast<Int32Constant>();
1511 as_int32_constant && Smi::IsValid(as_int32_constant->value())) {
1512 return GetSmiConstant(as_int32_constant->value());
1513 }
1514
1515 NodeInfo* node_info = GetOrCreateInfoFor(value);
1516 auto& alternative = node_info->alternative();
1517
1518 if (ValueNode* alt = alternative.tagged()) {
1519 return alt;
1520 }
1521
1522 switch (representation) {
1524 if (NodeTypeIsSmi(node_info->type())) {
1525 return alternative.set_tagged(AddNewNode<UnsafeSmiTagInt32>({value}));
1526 }
1527 return alternative.set_tagged(AddNewNode<Int32ToNumber>({value}));
1528 }
1530 if (NodeTypeIsSmi(node_info->type())) {
1531 return alternative.set_tagged(AddNewNode<UnsafeSmiTagUint32>({value}));
1532 }
1533 return alternative.set_tagged(AddNewNode<Uint32ToNumber>({value}));
1534 }
1536 return alternative.set_tagged(AddNewNode<Float64ToTagged>(
1538 }
1540 return alternative.set_tagged(AddNewNode<HoleyFloat64ToTagged>(
1542 }
1543
1545 if (NodeTypeIsSmi(node_info->type())) {
1546 return alternative.set_tagged(AddNewNode<UnsafeSmiTagIntPtr>({value}));
1547 }
1548 return alternative.set_tagged(AddNewNode<IntPtrToNumber>({value}));
1549
1551 UNREACHABLE();
1552 }
1553 UNREACHABLE();
1554}
1555
1557 ValueNode* value, UseReprHintRecording record_use_repr_hint) {
1558 if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
1560 }
1561
1562 NodeInfo* node_info = GetOrCreateInfoFor(value);
1563
1564 ValueRepresentation representation =
1565 value->properties().value_representation();
1566 if (representation == ValueRepresentation::kTagged) {
1567 return BuildCheckSmi(value, !value->Is<Phi>());
1568 }
1569
1570 auto& alternative = node_info->alternative();
1571
1572 if (ValueNode* alt = alternative.tagged()) {
1573 // HoleyFloat64ToTagged does not canonicalize Smis by default, since it can
1574 // be expensive. If we are reading a Smi value, we should try to
1575 // canonicalize now.
1576 if (HoleyFloat64ToTagged* conversion_node =
1577 alt->TryCast<HoleyFloat64ToTagged>()) {
1578 conversion_node->SetMode(
1580 }
1581 return BuildCheckSmi(alt, !value->Is<Phi>());
1582 }
1583
1584 switch (representation) {
1586 if (NodeTypeIsSmi(node_info->type())) {
1587 return alternative.set_tagged(AddNewNode<UnsafeSmiTagInt32>({value}));
1588 }
1589 return alternative.set_tagged(AddNewNode<CheckedSmiTagInt32>({value}));
1590 }
1592 if (NodeTypeIsSmi(node_info->type())) {
1593 return alternative.set_tagged(AddNewNode<UnsafeSmiTagUint32>({value}));
1594 }
1595 return alternative.set_tagged(AddNewNode<CheckedSmiTagUint32>({value}));
1596 }
1598 return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
1599 }
1601 return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
1602 }
1604 return alternative.set_tagged(AddNewNode<CheckedSmiTagIntPtr>({value}));
1606 UNREACHABLE();
1607 }
1608 UNREACHABLE();
1609}
1610
1611namespace {
1612CheckType GetCheckType(NodeType type) {
1613 return NodeTypeIs(type, NodeType::kAnyHeapObject)
1616}
1617} // namespace
1618
1622 NodeType old_type;
1623 if (CheckType(node, NodeType::kInternalizedString, &old_type)) return node;
1624 NodeInfo* known_info = GetOrCreateInfoFor(node);
1625 if (known_info->alternative().checked_value()) {
1626 node = known_info->alternative().checked_value();
1627 if (CheckType(node, NodeType::kInternalizedString, &old_type)) return node;
1628 }
1629
1630 if (!NodeTypeIs(old_type, NodeType::kString)) {
1631 known_info->CombineType(NodeType::kString);
1632 }
1633
1634 // This node may unwrap ThinStrings.
1635 ValueNode* maybe_unwrapping_node =
1636 AddNewNode<CheckedInternalizedString>({node}, GetCheckType(old_type));
1637 known_info->alternative().set_checked_value(maybe_unwrapping_node);
1638
1639 current_interpreter_frame_.set(reg, maybe_unwrapping_node);
1640 return maybe_unwrapping_node;
1641}
1642
1644 ValueNode* value, NodeType allowed_input_type,
1645 TaggedToFloat64ConversionType conversion_type) {
1647
1648 ValueRepresentation representation =
1649 value->properties().value_representation();
1650 if (representation == ValueRepresentation::kInt32) return value;
1651 if (representation == ValueRepresentation::kUint32) {
1652 // This node is cheap (no code gen, just a bitcast), so don't cache it.
1653 return AddNewNode<TruncateUint32ToInt32>({value});
1654 }
1655
1656 // Process constants first to avoid allocating NodeInfo for them.
1657 switch (value->opcode()) {
1658 case Opcode::kConstant: {
1659 compiler::ObjectRef object = value->Cast<Constant>()->object();
1660 if (!object.IsHeapNumber()) break;
1661 int32_t truncated_value = DoubleToInt32(object.AsHeapNumber().value());
1662 if (!Smi::IsValid(truncated_value)) break;
1663 return GetInt32Constant(truncated_value);
1664 }
1665 case Opcode::kSmiConstant:
1666 return GetInt32Constant(value->Cast<SmiConstant>()->value().value());
1667 case Opcode::kRootConstant: {
1668 Tagged<Object> root_object =
1669 local_isolate_->root(value->Cast<RootConstant>()->index());
1670 if (!IsOddball(root_object, local_isolate_)) break;
1671 int32_t truncated_value =
1672 DoubleToInt32(Cast<Oddball>(root_object)->to_number_raw());
1673 // All oddball ToNumber truncations are valid Smis.
1674 DCHECK(Smi::IsValid(truncated_value));
1675 return GetInt32Constant(truncated_value);
1676 }
1677 case Opcode::kFloat64Constant: {
1678 int32_t truncated_value =
1679 DoubleToInt32(value->Cast<Float64Constant>()->value().get_scalar());
1680 if (!Smi::IsValid(truncated_value)) break;
1681 return GetInt32Constant(truncated_value);
1682 }
1683
1684 // We could emit unconditional eager deopts for other kinds of constant, but
1685 // it's not necessary, the appropriate checking conversion nodes will deopt.
1686 default:
1687 break;
1688 }
1689
1690 NodeInfo* node_info = GetOrCreateInfoFor(value);
1691 auto& alternative = node_info->alternative();
1692
1693 // If there is an int32_alternative, then that works as a truncated value
1694 // too.
1695 if (ValueNode* alt = alternative.int32()) {
1696 return alt;
1697 }
1698 if (ValueNode* alt = alternative.truncated_int32_to_number()) {
1699 return alt;
1700 }
1701
1702 switch (representation) {
1704 NodeType old_type;
1705 EnsureType(value, allowed_input_type, &old_type);
1706 if (NodeTypeIsSmi(old_type)) {
1707 // Smi untagging can be cached as an int32 alternative, not just a
1708 // truncated alternative.
1709 return alternative.set_int32(BuildSmiUntag(value));
1710 }
1711 if (allowed_input_type == NodeType::kSmi) {
1712 return alternative.set_int32(AddNewNode<CheckedSmiUntag>({value}));
1713 }
1714 if (NodeTypeIs(old_type, allowed_input_type)) {
1715 return alternative.set_truncated_int32_to_number(
1717 conversion_type));
1718 }
1719 return alternative.set_truncated_int32_to_number(
1721 conversion_type));
1722 }
1724 // Ignore conversion_type for HoleyFloat64, and treat them like Float64.
1725 // ToNumber of undefined is anyway a NaN, so we'll simply truncate away
1726 // the NaN-ness of the hole, and don't need to do extra oddball checks so
1727 // we can ignore the hint (though we'll miss updating the feedback).
1729 return alternative.set_truncated_int32_to_number(
1731 }
1732
1734 // This is not an efficient implementation, but this only happens in
1735 // corner cases.
1736 ValueNode* value_to_number = AddNewNode<IntPtrToNumber>({value});
1737 return alternative.set_truncated_int32_to_number(
1739 {value_to_number}, TaggedToFloat64ConversionType::kOnlyNumber));
1740 }
1743 UNREACHABLE();
1744 }
1745 UNREACHABLE();
1746}
1747
1749 ValueNode* value) {
1750 switch (value->opcode()) {
1751 case Opcode::kInt32Constant:
1752 return value->Cast<Int32Constant>()->value();
1753 case Opcode::kUint32Constant: {
1754 uint32_t uint32_value = value->Cast<Uint32Constant>()->value();
1755 if (uint32_value <= INT32_MAX) {
1756 return static_cast<int32_t>(uint32_value);
1757 }
1758 return {};
1759 }
1760 case Opcode::kSmiConstant:
1761 return value->Cast<SmiConstant>()->value().value();
1762 case Opcode::kFloat64Constant: {
1763 double double_value =
1764 value->Cast<Float64Constant>()->value().get_scalar();
1765 if (!IsInt32Double(double_value)) return {};
1766 return FastD2I(value->Cast<Float64Constant>()->value().get_scalar());
1767 }
1768 default:
1769 break;
1770 }
1771 if (auto c = TryGetConstantAlternative(value)) {
1772 return TryGetInt32Constant(*c);
1773 }
1774 return {};
1775}
1776
1778 ValueNode* value) {
1779 switch (value->opcode()) {
1780 case Opcode::kInt32Constant: {
1781 int32_t int32_value = value->Cast<Int32Constant>()->value();
1782 if (int32_value >= 0) {
1783 return static_cast<uint32_t>(int32_value);
1784 }
1785 return {};
1786 }
1787 case Opcode::kUint32Constant:
1788 return value->Cast<Uint32Constant>()->value();
1789 case Opcode::kSmiConstant: {
1790 int32_t smi_value = value->Cast<SmiConstant>()->value().value();
1791 if (smi_value >= 0) {
1792 return static_cast<uint32_t>(smi_value);
1793 }
1794 return {};
1795 }
1796 case Opcode::kFloat64Constant: {
1797 double double_value =
1798 value->Cast<Float64Constant>()->value().get_scalar();
1799 if (!IsUint32Double(double_value)) return {};
1800 return FastD2UI(value->Cast<Float64Constant>()->value().get_scalar());
1801 }
1802 default:
1803 break;
1804 }
1805 if (auto c = TryGetConstantAlternative(value)) {
1806 return TryGetUint32Constant(*c);
1807 }
1808 return {};
1809}
1810
1812 bool can_be_heap_number) {
1814
1815 ValueRepresentation representation =
1816 value->properties().value_representation();
1817 if (representation == ValueRepresentation::kInt32) return value;
1818
1819 // Process constants first to avoid allocating NodeInfo for them.
1820 if (auto cst = TryGetInt32Constant(value)) {
1821 return GetInt32Constant(cst.value());
1822 }
1823 // We could emit unconditional eager deopts for other kinds of constant, but
1824 // it's not necessary, the appropriate checking conversion nodes will deopt.
1825
1826 NodeInfo* node_info = GetOrCreateInfoFor(value);
1827 auto& alternative = node_info->alternative();
1828
1829 if (ValueNode* alt = alternative.int32()) {
1830 return alt;
1831 }
1832
1833 switch (representation) {
1835 if (can_be_heap_number && !CheckType(value, NodeType::kSmi)) {
1836 return alternative.set_int32(AddNewNode<CheckedNumberToInt32>({value}));
1837 }
1838 return alternative.set_int32(BuildSmiUntag(value));
1839 }
1841 if (node_info->is_smi()) {
1842 return alternative.set_int32(
1844 }
1845 return alternative.set_int32(AddNewNode<CheckedUint32ToInt32>({value}));
1846 }
1848 // The check here will also work for the hole NaN, so we can treat
1849 // HoleyFloat64 as Float64.
1851 return alternative.set_int32(
1853 }
1854
1856 return alternative.set_int32(AddNewNode<CheckedIntPtrToInt32>({value}));
1857
1859 UNREACHABLE();
1860 }
1861 UNREACHABLE();
1862}
1863
1865 ValueNode* value, TaggedToFloat64ConversionType conversion_type) {
1866 switch (value->opcode()) {
1867 case Opcode::kConstant: {
1868 compiler::ObjectRef object = value->Cast<Constant>()->object();
1869 if (object.IsHeapNumber()) {
1870 return object.AsHeapNumber().value();
1871 }
1872 // Oddballs should be RootConstants.
1873 DCHECK(!IsOddball(*object.object()));
1874 return {};
1875 }
1876 case Opcode::kInt32Constant:
1877 return value->Cast<Int32Constant>()->value();
1878 case Opcode::kSmiConstant:
1879 return value->Cast<SmiConstant>()->value().value();
1880 case Opcode::kFloat64Constant:
1881 return value->Cast<Float64Constant>()->value().get_scalar();
1882 case Opcode::kRootConstant: {
1883 Tagged<Object> root_object =
1884 local_isolate_->root(value->Cast<RootConstant>()->index());
1885 if (conversion_type == TaggedToFloat64ConversionType::kNumberOrBoolean &&
1886 IsBoolean(root_object)) {
1887 return Cast<Oddball>(root_object)->to_number_raw();
1888 }
1889 if (conversion_type == TaggedToFloat64ConversionType::kNumberOrOddball &&
1890 IsOddball(root_object)) {
1891 return Cast<Oddball>(root_object)->to_number_raw();
1892 }
1893 if (IsHeapNumber(root_object)) {
1894 return Cast<HeapNumber>(root_object)->value();
1895 }
1896 return {};
1897 }
1898 default:
1899 break;
1900 }
1901 if (auto c = TryGetConstantAlternative(value)) {
1902 return TryGetFloat64Constant(*c, conversion_type);
1903 }
1904 return {};
1905}
1906
1912
1914 ValueNode* value, NodeType allowed_input_type,
1915 TaggedToFloat64ConversionType conversion_type) {
1916 ValueRepresentation representation =
1918 if (representation == ValueRepresentation::kFloat64) return value;
1919
1920 // Process constants first to avoid allocating NodeInfo for them.
1921 if (auto cst = TryGetFloat64Constant(value, conversion_type)) {
1922 return GetFloat64Constant(cst.value());
1923 }
1924 // We could emit unconditional eager deopts for other kinds of constant, but
1925 // it's not necessary, the appropriate checking conversion nodes will deopt.
1926
1927 NodeInfo* node_info = GetOrCreateInfoFor(value);
1928 auto& alternative = node_info->alternative();
1929
1930 if (ValueNode* alt = alternative.float64()) {
1931 return alt;
1932 }
1933
1934 switch (representation) {
1936 auto combined_type = CombineType(allowed_input_type, node_info->type());
1937 if (NodeTypeIs(combined_type, NodeType::kSmi)) {
1938 // Get the float64 value of a Smi value its int32 representation.
1939 return GetFloat64(GetInt32(value));
1940 }
1941 if (NodeTypeIs(combined_type, NodeType::kNumber)) {
1942 // Number->Float64 conversions are exact alternatives, so they can
1943 // also become the canonical float64_alternative.
1944 return alternative.set_float64(BuildNumberOrOddballToFloat64(
1945 value, NodeType::kNumber,
1947 }
1948 if (NodeTypeIs(combined_type, NodeType::kNumberOrOddball)) {
1949 // NumberOrOddball->Float64 conversions are not exact alternatives,
1950 // since they lose the information that this is an oddball, so they
1951 // can only become the canonical float64_alternative if they are a
1952 // known number (and therefore not oddball).
1953 return BuildNumberOrOddballToFloat64(value, combined_type,
1954 conversion_type);
1955 }
1956 // The type is impossible. We could generate an unconditional deopt here,
1957 // but it's too invasive. So we just generate a check which will always
1958 // deopt.
1959 return BuildNumberOrOddballToFloat64(value, allowed_input_type,
1960 conversion_type);
1961 }
1963 return alternative.set_float64(AddNewNode<ChangeInt32ToFloat64>({value}));
1965 return alternative.set_float64(
1968 switch (allowed_input_type) {
1969 case NodeType::kSmi:
1970 case NodeType::kNumber:
1971 case NodeType::kNumberOrBoolean:
1972 // Number->Float64 conversions are exact alternatives, so they can
1973 // also become the canonical float64_alternative. The HoleyFloat64
1974 // representation can represent undefined but no other oddballs, so
1975 // booleans cannot occur here and kNumberOrBoolean can be grouped with
1976 // kNumber.
1977 return alternative.set_float64(
1979 case NodeType::kNumberOrOddball:
1980 // NumberOrOddball->Float64 conversions are not exact alternatives,
1981 // since they lose the information that this is an oddball, so they
1982 // cannot become the canonical float64_alternative.
1984 default:
1985 UNREACHABLE();
1986 }
1987 }
1989 return alternative.set_float64(
1992 UNREACHABLE();
1993 }
1994 UNREACHABLE();
1995}
1996
1998 ValueNode* value, NodeType allowed_input_type,
1999 TaggedToFloat64ConversionType conversion_type) {
2001 ValueRepresentation representation =
2002 value->properties().value_representation();
2003 // Ignore the hint for
2004 if (representation == ValueRepresentation::kHoleyFloat64) return value;
2005 return GetFloat64ForToNumber(value, allowed_input_type, conversion_type);
2006}
2007
2008namespace {
2009int32_t ClampToUint8(int32_t value) {
2010 if (value < 0) return 0;
2011 if (value > 255) return 255;
2012 return value;
2013}
2014} // namespace
2015
2017 switch (value->properties().value_representation()) {
2019 // This is not an efficient implementation, but this only happens in
2020 // corner cases.
2022 {AddNewNode<IntPtrToNumber>({value})});
2024 if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
2025 return GetInt32Constant(ClampToUint8(constant->value().value()));
2026 }
2027 NodeInfo* info = known_node_aspects().TryGetInfoFor(value);
2028 if (info && info->alternative().int32()) {
2029 return AddNewNode<Int32ToUint8Clamped>({info->alternative().int32()});
2030 }
2032 }
2033 // HoleyFloat64 is treated like Float64. ToNumber of undefined is anyway a
2034 // NaN, so we'll simply truncate away the NaN-ness of the hole, and don't
2035 // need to do extra oddball checks (though we'll miss updating the
2036 // feedback).
2039 // TODO(leszeks): Handle Float64Constant, which requires the correct
2040 // rounding for clamping.
2041 return AddNewNode<Float64ToUint8Clamped>({value});
2043 if (Int32Constant* constant = value->TryCast<Int32Constant>()) {
2044 return GetInt32Constant(ClampToUint8(constant->value()));
2045 }
2046 return AddNewNode<Int32ToUint8Clamped>({value});
2048 return AddNewNode<Uint32ToUint8Clamped>({value});
2049 }
2050 UNREACHABLE();
2051}
2052
2053namespace {
2054template <Operation kOperation>
2055struct NodeForOperationHelper;
2056
2057#define NODE_FOR_OPERATION_HELPER(Name) \
2058 template <> \
2059 struct NodeForOperationHelper<Operation::k##Name> { \
2060 using generic_type = Generic##Name; \
2061 };
2063#undef NODE_FOR_OPERATION_HELPER
2064
2065template <Operation kOperation>
2066using GenericNodeForOperation =
2067 typename NodeForOperationHelper<kOperation>::generic_type;
2068
2069// Bitwise operations reinterprets the numeric input as Int32 bits for a
2070// bitwise operation, which means we want to do slightly different conversions.
2071template <Operation kOperation>
2072constexpr bool BinaryOperationIsBitwiseInt32() {
2073 switch (kOperation) {
2074 case Operation::kBitwiseNot:
2075 case Operation::kBitwiseAnd:
2076 case Operation::kBitwiseOr:
2077 case Operation::kBitwiseXor:
2078 case Operation::kShiftLeft:
2079 case Operation::kShiftRight:
2080 case Operation::kShiftRightLogical:
2081 return true;
2082 default:
2083 return false;
2084 }
2085}
2086} // namespace
2087
2088// MAP_OPERATION_TO_NODES are tuples with the following format:
2089// - Operation name,
2090// - Int32 operation node,
2091// - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
2092// exists, or otherwise {}.
2093#define MAP_BINARY_OPERATION_TO_INT32_NODE(V) \
2094 V(Add, Int32AddWithOverflow, 0) \
2095 V(Subtract, Int32SubtractWithOverflow, 0) \
2096 V(Multiply, Int32MultiplyWithOverflow, 1) \
2097 V(Divide, Int32DivideWithOverflow, 1) \
2098 V(Modulus, Int32ModulusWithOverflow, {}) \
2099 V(BitwiseAnd, Int32BitwiseAnd, ~0) \
2100 V(BitwiseOr, Int32BitwiseOr, 0) \
2101 V(BitwiseXor, Int32BitwiseXor, 0) \
2102 V(ShiftLeft, Int32ShiftLeft, 0) \
2103 V(ShiftRight, Int32ShiftRight, 0) \
2104 V(ShiftRightLogical, Int32ShiftRightLogical, {})
2105
2106#define MAP_UNARY_OPERATION_TO_INT32_NODE(V) \
2107 V(BitwiseNot, Int32BitwiseNot) \
2108 V(Increment, Int32IncrementWithOverflow) \
2109 V(Decrement, Int32DecrementWithOverflow) \
2110 V(Negate, Int32NegateWithOverflow)
2111
2112// MAP_OPERATION_TO_FLOAT64_NODE are tuples with the following format:
2113// (Operation name, Float64 operation node).
2114#define MAP_OPERATION_TO_FLOAT64_NODE(V) \
2115 V(Add, Float64Add) \
2116 V(Subtract, Float64Subtract) \
2117 V(Multiply, Float64Multiply) \
2118 V(Divide, Float64Divide) \
2119 V(Modulus, Float64Modulus) \
2120 V(Exponentiate, Float64Exponentiate)
2121
2122template <Operation kOperation>
2123static constexpr std::optional<int> Int32Identity() {
2124 switch (kOperation) {
2125#define CASE(op, _, identity) \
2126 case Operation::k##op: \
2127 return identity;
2129#undef CASE
2130 default:
2131 UNREACHABLE();
2132 }
2133}
2134
2135namespace {
2136template <Operation kOperation>
2137struct Int32NodeForHelper;
2138#define SPECIALIZATION(op, OpNode, ...) \
2139 template <> \
2140 struct Int32NodeForHelper<Operation::k##op> { \
2141 using type = OpNode; \
2142 };
2145#undef SPECIALIZATION
2146
2147template <Operation kOperation>
2148using Int32NodeFor = typename Int32NodeForHelper<kOperation>::type;
2149
2150template <Operation kOperation>
2151struct Float64NodeForHelper;
2152#define SPECIALIZATION(op, OpNode) \
2153 template <> \
2154 struct Float64NodeForHelper<Operation::k##op> { \
2155 using type = OpNode; \
2156 };
2158#undef SPECIALIZATION
2159
2160template <Operation kOperation>
2161using Float64NodeFor = typename Float64NodeForHelper<kOperation>::type;
2162} // namespace
2163
2164template <Operation kOperation>
2166 FeedbackSlot slot_index = GetSlotOperand(0);
2167 ValueNode* value = GetAccumulator();
2168 SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2169 {value}, compiler::FeedbackSource{feedback(), slot_index}));
2170}
2171
2172template <Operation kOperation>
2174 ValueNode* left = LoadRegister(0);
2175 ValueNode* right = GetAccumulator();
2176 FeedbackSlot slot_index = GetSlotOperand(1);
2177 SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2178 {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
2179}
2180
2181template <Operation kOperation>
2183 ValueNode* left = GetAccumulator();
2184 int constant = iterator_.GetImmediateOperand(0);
2185 ValueNode* right = GetSmiConstant(constant);
2186 FeedbackSlot slot_index = GetSlotOperand(1);
2187 SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2188 {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
2189}
2190
2191template <Operation kOperation>
2193 ValueNode* node) {
2194 auto cst = TryGetInt32Constant(node);
2195 if (!cst.has_value()) return {};
2196 switch (kOperation) {
2197 case Operation::kBitwiseNot:
2198 return GetInt32Constant(~cst.value());
2199 case Operation::kIncrement:
2200 if (cst.value() < INT32_MAX) {
2201 return GetInt32Constant(cst.value() + 1);
2202 }
2203 return {};
2204 case Operation::kDecrement:
2205 if (cst.value() > INT32_MIN) {
2206 return GetInt32Constant(cst.value() - 1);
2207 }
2208 return {};
2209 case Operation::kNegate:
2210 if (cst.value() == 0) {
2211 return {};
2212 }
2213 if (cst.value() != INT32_MIN) {
2214 return GetInt32Constant(-cst.value());
2215 }
2216 return {};
2217 default:
2218 UNREACHABLE();
2219 }
2220}
2221
2222template <Operation kOperation>
2224 // Use BuildTruncatingInt32BitwiseNotForToNumber with Smi input hint
2225 // for truncating operations.
2226 static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2227 ValueNode* value = GetAccumulator();
2230 using OpNodeT = Int32NodeFor<kOperation>;
2232 return ReduceResult::Done();
2233}
2234
2247
2248template <Operation kOperation>
2250 ValueNode* left, ValueNode* right) {
2251 auto cst_right = TryGetInt32Constant(right);
2252 if (!cst_right.has_value()) return {};
2253 return TryFoldInt32BinaryOperation<kOperation>(left, cst_right.value());
2254}
2255
2256template <Operation kOperation>
2258 ValueNode* left, int32_t cst_right) {
2259 auto cst_left = TryGetInt32Constant(left);
2260 if (!cst_left.has_value()) return {};
2261 switch (kOperation) {
2262 case Operation::kAdd: {
2263 int64_t result = static_cast<int64_t>(cst_left.value()) +
2264 static_cast<int64_t>(cst_right);
2265 if (result >= INT32_MIN && result <= INT32_MAX) {
2266 return GetInt32Constant(static_cast<int32_t>(result));
2267 }
2268 return {};
2269 }
2270 case Operation::kSubtract: {
2271 int64_t result = static_cast<int64_t>(cst_left.value()) -
2272 static_cast<int64_t>(cst_right);
2273 if (result >= INT32_MIN && result <= INT32_MAX) {
2274 return GetInt32Constant(static_cast<int32_t>(result));
2275 }
2276 return {};
2277 }
2278 case Operation::kMultiply: {
2279 int64_t result = static_cast<int64_t>(cst_left.value()) *
2280 static_cast<int64_t>(cst_right);
2281 if (result >= INT32_MIN && result <= INT32_MAX) {
2282 return GetInt32Constant(static_cast<int32_t>(result));
2283 }
2284 return {};
2285 }
2286 case Operation::kModulus:
2287 // TODO(v8:7700): Constant fold mod.
2288 return {};
2289 case Operation::kDivide:
2290 // TODO(v8:7700): Constant fold division.
2291 return {};
2292 case Operation::kBitwiseAnd:
2293 return GetInt32Constant(cst_left.value() & cst_right);
2294 case Operation::kBitwiseOr:
2295 return GetInt32Constant(cst_left.value() | cst_right);
2296 case Operation::kBitwiseXor:
2297 return GetInt32Constant(cst_left.value() ^ cst_right);
2298 case Operation::kShiftLeft:
2299 return GetInt32Constant(cst_left.value()
2300 << (static_cast<uint32_t>(cst_right) % 32));
2301 case Operation::kShiftRight:
2302 return GetInt32Constant(cst_left.value() >>
2303 (static_cast<uint32_t>(cst_right) % 32));
2304 case Operation::kShiftRightLogical:
2305 return GetUint32Constant(static_cast<uint32_t>(cst_left.value()) >>
2306 (static_cast<uint32_t>(cst_right) % 32));
2307 default:
2308 UNREACHABLE();
2309 }
2310}
2311
2312template <Operation kOperation>
2314 // Use BuildTruncatingInt32BinaryOperationNodeForToNumber with Smi input hint
2315 // for truncating operations.
2316 static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2317 ValueNode* left = LoadRegister(0);
2318 ValueNode* right = GetAccumulator();
2321 using OpNodeT = Int32NodeFor<kOperation>;
2322 SetAccumulator(AddNewNode<OpNodeT>({left, right}));
2323 return ReduceResult::Done();
2324}
2325
2326template <Operation kOperation>
2329 NodeType allowed_input_type,
2330 TaggedToFloat64ConversionType conversion_type) {
2331 static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
2332 ValueNode* left;
2333 ValueNode* right;
2335 left = right = GetTruncatedInt32ForToNumber(
2337 allowed_input_type, conversion_type);
2338 } else {
2341 allowed_input_type, conversion_type);
2342 right =
2344 allowed_input_type, conversion_type);
2345 }
2348 SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
2349 return ReduceResult::Done();
2350}
2351
2352template <Operation kOperation>
2354 // Truncating Int32 nodes treat their input as a signed int32 regardless
2355 // of whether it's really signed or not, so we allow Uint32 by loading a
2356 // TruncatedInt32 value.
2357 static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2358 ValueNode* left = GetAccumulator();
2359 int32_t constant = iterator_.GetImmediateOperand(0);
2360 if (std::optional<int>(constant) == Int32Identity<kOperation>()) {
2361 // Deopt if {left} is not an Int32.
2362 EnsureInt32(left);
2363 // If the constant is the unit of the operation, it already has the right
2364 // value, so just return.
2365 return ReduceResult::Done();
2366 }
2369 ValueNode* right = GetInt32Constant(constant);
2370 using OpNodeT = Int32NodeFor<kOperation>;
2371 SetAccumulator(AddNewNode<OpNodeT>({left, right}));
2372 return ReduceResult::Done();
2373}
2374
2375template <Operation kOperation>
2378 NodeType allowed_input_type,
2379 TaggedToFloat64ConversionType conversion_type) {
2380 static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
2381 ValueNode* left =
2383 allowed_input_type, conversion_type);
2384 int32_t constant = iterator_.GetImmediateOperand(0);
2385 if (std::optional<int>(constant) == Int32Identity<kOperation>()) {
2386 // If the constant is the unit of the operation, it already has the right
2387 // value, so use the truncated value (if not just a conversion) and return.
2388 if (!left->properties().is_conversion()) {
2390 }
2391 return ReduceResult::Done();
2392 }
2395 ValueNode* right = GetInt32Constant(constant);
2396 SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
2397 return ReduceResult::Done();
2398}
2399
2401 if (IsSmiDouble(constant)) {
2402 return GetInt32Constant(FastD2I(constant));
2403 }
2404 return GetFloat64Constant(constant);
2405}
2406
2407template <Operation kOperation>
2409 TaggedToFloat64ConversionType conversion_type, ValueNode* value) {
2410 auto cst = TryGetFloat64Constant(value, conversion_type);
2411 if (!cst.has_value()) return {};
2412 switch (kOperation) {
2413 case Operation::kNegate:
2414 return GetNumberConstant(-cst.value());
2415 case Operation::kIncrement:
2416 return GetNumberConstant(cst.value() + 1);
2417 case Operation::kDecrement:
2418 return GetNumberConstant(cst.value() - 1);
2419 default:
2420 UNREACHABLE();
2421 }
2422}
2423
2424template <Operation kOperation>
2426 TaggedToFloat64ConversionType conversion_type, ValueNode* left,
2427 ValueNode* right) {
2428 auto cst_right = TryGetFloat64Constant(right, conversion_type);
2429 if (!cst_right.has_value()) return {};
2431 conversion_type, left, cst_right.value());
2432}
2433
2434template <Operation kOperation>
2436 TaggedToFloat64ConversionType conversion_type, ValueNode* left,
2437 double cst_right) {
2438 auto cst_left = TryGetFloat64Constant(left, conversion_type);
2439 if (!cst_left.has_value()) return {};
2440 switch (kOperation) {
2441 case Operation::kAdd:
2442 return GetNumberConstant(cst_left.value() + cst_right);
2443 case Operation::kSubtract:
2444 return GetNumberConstant(cst_left.value() - cst_right);
2445 case Operation::kMultiply:
2446 return GetNumberConstant(cst_left.value() * cst_right);
2447 case Operation::kDivide:
2448 return GetNumberConstant(cst_left.value() / cst_right);
2449 case Operation::kModulus:
2450 // TODO(v8:7700): Constant fold mod.
2451 return {};
2452 case Operation::kExponentiate:
2453 return GetNumberConstant(math::pow(cst_left.value(), cst_right));
2454 default:
2455 UNREACHABLE();
2456 }
2457}
2458
2459template <Operation kOperation>
2461 NodeType allowed_input_type,
2462 TaggedToFloat64ConversionType conversion_type) {
2463 // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2464 // HoleyFloat64 nodes if folded.
2465 ValueNode* left = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2466 conversion_type);
2467 double constant = static_cast<double>(iterator_.GetImmediateOperand(0));
2470 left, constant),
2472 ValueNode* right = GetFloat64Constant(constant);
2473 SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
2474 return ReduceResult::Done();
2475}
2476
2477template <Operation kOperation>
2479 NodeType allowed_input_type,
2480 TaggedToFloat64ConversionType conversion_type) {
2481 // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2482 // HoleyFloat64 nodes if folded.
2483 ValueNode* value = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2484 conversion_type);
2487 value),
2489 switch (kOperation) {
2490 case Operation::kNegate:
2492 break;
2493 case Operation::kIncrement:
2495 break;
2496 case Operation::kDecrement:
2499 break;
2500 default:
2501 UNREACHABLE();
2502 }
2503 return ReduceResult::Done();
2504}
2505
2506template <Operation kOperation>
2508 NodeType allowed_input_type,
2509 TaggedToFloat64ConversionType conversion_type) {
2510 // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2511 // HoleyFloat64 nodes if folded.
2512 ValueNode* left = LoadRegisterHoleyFloat64ForToNumber(0, allowed_input_type,
2513 conversion_type);
2514 ValueNode* right = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2515 conversion_type);
2518 left, right),
2520 SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
2521 return ReduceResult::Done();
2522}
2523
2524namespace {
2525std::tuple<NodeType, TaggedToFloat64ConversionType>
2526BinopHintToNodeTypeAndConversionType(BinaryOperationHint hint) {
2527 switch (hint) {
2529 return std::make_tuple(NodeType::kSmi,
2534 return std::make_tuple(NodeType::kNumber,
2537 return std::make_tuple(NodeType::kNumberOrOddball,
2545 UNREACHABLE();
2546 }
2547}
2548} // namespace
2549
2550template <Operation kOperation>
2553 BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
2554 switch (feedback_hint) {
2557 DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
2563 auto [allowed_input_type, conversion_type] =
2564 BinopHintToNodeTypeAndConversionType(feedback_hint);
2565 if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
2566 static_assert(kOperation == Operation::kBitwiseNot);
2567 return BuildTruncatingInt32BitwiseNotForToNumber(allowed_input_type,
2568 conversion_type);
2569 } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
2571 }
2573 allowed_input_type, conversion_type);
2574 break;
2575 }
2581 // Fallback to generic node.
2582 break;
2583 }
2585 return ReduceResult::Done();
2586}
2587
2589 ValueNode* right) {
2590 struct Result {
2591 bool static_map;
2592 bool is_two_byte;
2593 // The result map if the other map is known to be one byte.
2594 ValueNode* result_map;
2595 };
2596 // If either is a two byte map, then the result is the kConsTwoByteStringMap.
2597 // If both are non-two byte maps, then the result is the
2598 // kConsOneByteStringMap.
2599 auto GetIsTwoByteAndMap = [&](ValueNode* input) -> Result {
2600 if (auto maybe_constant =
2601 TryGetConstant(broker(), local_isolate(), input)) {
2602 bool two_byte = maybe_constant->map(broker()).IsTwoByteStringMap();
2603 return {true, two_byte,
2604 GetRootConstant(two_byte ? RootIndex::kConsTwoByteStringMap
2605 : RootIndex::kConsOneByteStringMap)};
2606 }
2607 switch (input->opcode()) {
2608 case Opcode::kNumberToString:
2609 return {true, false, GetRootConstant(RootIndex::kConsOneByteStringMap)};
2610 case Opcode::kInlinedAllocation: {
2611 VirtualObject* cons = input->Cast<InlinedAllocation>()->object();
2612 if (cons->type() == VirtualObject::kConsString) {
2613 ValueNode* map = cons->cons_string().map;
2614 if (auto cons_map = TryGetConstant(broker(), local_isolate(), map)) {
2615 return {true, cons_map->AsMap().IsTwoByteStringMap(), map};
2616 }
2617 return {false, false, map};
2618 }
2619 break;
2620 }
2621 default:
2622 break;
2623 }
2624 return {false, false, nullptr};
2625 };
2626
2627 auto left_info = GetIsTwoByteAndMap(left);
2628 auto right_info = GetIsTwoByteAndMap(right);
2629 if (left_info.static_map) {
2630 if (left_info.is_two_byte) {
2631 return GetRootConstant(RootIndex::kConsTwoByteStringMap);
2632 }
2633 // If left is known non-twobyte, then the result only depends on right.
2634 if (right_info.static_map) {
2635 if (right_info.is_two_byte) {
2636 return GetRootConstant(RootIndex::kConsTwoByteStringMap);
2637 } else {
2638 return GetRootConstant(RootIndex::kConsOneByteStringMap);
2639 }
2640 }
2641 if (right_info.result_map) {
2642 return right_info.result_map;
2643 }
2644 } else if (left_info.result_map) {
2645 // Left is not constant, but we have a value for the map.
2646 // If right is known non-twobyte, then the result only depends on left.
2647 if (right_info.static_map && !right_info.is_two_byte) {
2648 return left_info.result_map;
2649 }
2650 }
2651
2652 // Since ConsStringMap only cares about the two-byte-ness of its inputs we
2653 // might as well pass the result map instead if we have one.
2654 ValueNode* left_map =
2655 left_info.result_map ? left_info.result_map
2657 ValueNode* right_map =
2658 right_info.result_map
2659 ? right_info.result_map
2661 // Sort inputs for CSE. Move constants to the left since the instruction
2662 // reuses the lhs input.
2663 if (IsConstantNode(right_map->opcode()) ||
2664 (!IsConstantNode(left_map->opcode()) && left > right)) {
2665 std::swap(left, right);
2666 }
2667 // TODO(olivf): Evaluate if using maglev controlflow to select the map could
2668 // be faster here.
2669 return AddNewNode<ConsStringMap>({left_map, right_map});
2670}
2671
2673 int max_depth) {
2674 if (auto maybe_constant = TryGetConstant(broker(), local_isolate(), string)) {
2675 if (maybe_constant->IsString()) {
2676 return maybe_constant->AsString().length();
2677 }
2678 }
2679 switch (string->opcode()) {
2680 case Opcode::kNumberToString:
2681 return 1;
2682 case Opcode::kInlinedAllocation:
2683 // TODO(olivf): Add a NodeType::kConsString instead of this check.
2684 if (string->Cast<InlinedAllocation>()->object()->type() ==
2687 }
2688 break;
2689 case Opcode::kStringConcat:
2690 if (max_depth == 0) return 0;
2691 return StringLengthStaticLowerBound(string->input(0).node(),
2692 max_depth - 1) +
2693 StringLengthStaticLowerBound(string->input(1).node(),
2694 max_depth - 1);
2695 case Opcode::kPhi: {
2696 // For the builder pattern where the inputs are cons strings, we will see
2697 // a phi from the Select that compares against the empty string. We
2698 // can refine the min_length by checking the phi strings. This might
2699 // help us elide the Select.
2700 if (max_depth == 0) return 0;
2701 auto phi = string->Cast<Phi>();
2702 if (phi->input_count() == 0 ||
2703 (phi->is_loop_phi() && phi->is_unmerged_loop_phi())) {
2704 return 0;
2705 }
2706 size_t overall_min_length =
2707 StringLengthStaticLowerBound(phi->input(0).node(), max_depth - 1);
2708 for (int i = 1; i < phi->input_count(); ++i) {
2709 size_t min =
2710 StringLengthStaticLowerBound(phi->input(i).node(), max_depth - 1);
2711 if (min < overall_min_length) {
2712 overall_min_length = min;
2713 }
2714 }
2715 return overall_min_length;
2716 }
2717 default:
2718 break;
2719 }
2720 return 0;
2721}
2722
2724 ValueNode* left, ValueNode* right, AllocationType allocation_type) {
2725 // This optimization is also done by Turboshaft.
2726 if (is_turbolev()) {
2727 return ReduceResult::Fail();
2728 }
2729 if (!v8_flags.maglev_cons_string_elision) {
2730 return ReduceResult::Fail();
2731 }
2732
2733 DCHECK(NodeTypeIs(GetType(left), NodeType::kString));
2734 DCHECK(NodeTypeIs(GetType(right), NodeType::kString));
2735
2736 size_t left_min_length = StringLengthStaticLowerBound(left);
2737 size_t right_min_length = StringLengthStaticLowerBound(right);
2738 bool result_is_cons_string =
2739 left_min_length + right_min_length >= ConsString::kMinLength;
2740
2741 // TODO(olivf): Support the fast case with a non-cons string fallback.
2742 if (!result_is_cons_string) {
2743 return MaybeReduceResult::Fail();
2744 }
2745
2746 left = BuildUnwrapThinString(left);
2747 right = BuildUnwrapThinString(right);
2748
2749 ValueNode* left_length = BuildLoadStringLength(left);
2750 ValueNode* right_length = BuildLoadStringLength(right);
2751
2752 auto BuildConsString = [&]() {
2754 MaybeReduceResult folded =
2755 TryFoldInt32BinaryOperation<Operation::kAdd>(left_length, right_length);
2756 if (folded.HasValue()) {
2757 new_length = folded.value();
2758 } else {
2759 new_length =
2760 AddNewNode<Int32AddWithOverflow>({left_length, right_length});
2761 }
2762
2763 // TODO(olivf): Add unconditional deopt support to the Select builder
2764 // instead of disabling unconditional deopt it here.
2767 AssertCondition::kUnsignedLessThanEqual,
2768 DeoptimizeReason::kStringTooLarge,
2769 /* allow_unconditional_deopt */ false);
2770 CHECK(!too_long.IsDoneWithAbort());
2771
2772 ValueNode* new_map = BuildNewConsStringMap(left, right);
2773 VirtualObject* cons_string =
2774 CreateConsString(new_map, new_length, left, right);
2775 ValueNode* allocation =
2776 BuildInlinedAllocation(cons_string, allocation_type);
2777
2778 return allocation;
2779 };
2780
2781 return Select(
2782 [&](auto& builder) {
2783 if (left_min_length > 0) return BranchResult::kAlwaysFalse;
2784 return BuildBranchIfInt32Compare(builder, Operation::kEqual,
2785 left_length, GetInt32Constant(0));
2786 },
2787 [&] { return right; },
2788 [&] {
2789 return Select(
2790 [&](auto& builder) {
2791 if (right_min_length > 0) return BranchResult::kAlwaysFalse;
2792 return BuildBranchIfInt32Compare(builder, Operation::kEqual,
2793 right_length,
2794 GetInt32Constant(0));
2795 },
2796 [&] { return left; }, [&] { return BuildConsString(); });
2797 });
2798}
2799
2801 DCHECK(NodeTypeIs(GetType(input), NodeType::kString));
2802 if (NodeTypeIs(GetType(input), NodeType::kNonThinString)) return input;
2803 return AddNewNode<UnwrapThinString>({input});
2804}
2805
2807 DCHECK(NodeTypeIs(GetType(input), NodeType::kStringOrStringWrapper));
2808 if (NodeTypeIs(GetType(input), NodeType::kString)) return input;
2809 return AddNewNode<UnwrapStringWrapper>({input});
2810}
2811
2813 ValueNode* right) {
2814 if (RootConstant* root_constant = left->TryCast<RootConstant>()) {
2815 if (root_constant->index() == RootIndex::kempty_string) {
2817 SetAccumulator(right);
2818 return ReduceResult::Done();
2819 }
2820 }
2821 if (RootConstant* root_constant = right->TryCast<RootConstant>()) {
2822 if (root_constant->index() == RootIndex::kempty_string) {
2824 SetAccumulator(left);
2825 return ReduceResult::Done();
2826 }
2827 }
2833 return ReduceResult::Done();
2834}
2835
2836template <Operation kOperation>
2839 BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
2840 switch (feedback_hint) {
2843 DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
2849 auto [allowed_input_type, conversion_type] =
2850 BinopHintToNodeTypeAndConversionType(feedback_hint);
2851 if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
2853 allowed_input_type, conversion_type);
2854 } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
2855 if constexpr (kOperation == Operation::kExponentiate) {
2856 // Exponentiate never updates the feedback to be a Smi.
2857 UNREACHABLE();
2858 } else {
2860 }
2861 } else {
2863 allowed_input_type, conversion_type);
2864 }
2865 break;
2866 }
2868 if constexpr (kOperation == Operation::kAdd) {
2869 ValueNode* left = LoadRegister(0);
2870 ValueNode* right = GetAccumulator();
2871 return BuildStringConcat(left, right);
2872 }
2873 break;
2875 if constexpr (kOperation == Operation::kAdd) {
2876 if (broker()
2877 ->dependencies()
2878 ->DependOnStringWrapperToPrimitiveProtector()) {
2879 ValueNode* left = LoadRegister(0);
2880 ValueNode* right = GetAccumulator();
2883 left = BuildUnwrapStringWrapper(left);
2884 right = BuildUnwrapStringWrapper(right);
2885 return BuildStringConcat(left, right);
2886 }
2887 }
2888 [[fallthrough]];
2892 // Fallback to generic node.
2893 break;
2894 }
2896 return ReduceResult::Done();
2897}
2898
2899template <Operation kOperation>
2902 BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
2903 switch (feedback_hint) {
2906 DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
2912 const auto [allowed_input_type, conversion_type] =
2913 BinopHintToNodeTypeAndConversionType(feedback_hint);
2914 if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
2916 kOperation>(allowed_input_type, conversion_type);
2917 } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
2918 if constexpr (kOperation == Operation::kExponentiate) {
2919 // Exponentiate never updates the feedback to be a Smi.
2920 UNREACHABLE();
2921 } else {
2923 }
2924 } else {
2926 allowed_input_type, conversion_type);
2927 }
2928 break;
2929 }
2935 // Fallback to generic node.
2936 break;
2937 }
2939 return ReduceResult::Done();
2940}
2941
2942template <Operation kOperation, typename type>
2943bool OperationValue(type left, type right) {
2944 switch (kOperation) {
2945 case Operation::kEqual:
2946 case Operation::kStrictEqual:
2947 return left == right;
2948 case Operation::kLessThan:
2949 return left < right;
2950 case Operation::kLessThanOrEqual:
2951 return left <= right;
2952 case Operation::kGreaterThan:
2953 return left > right;
2954 case Operation::kGreaterThanOrEqual:
2955 return left >= right;
2956 }
2957}
2958
2959// static
2960compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant(
2962 if (Constant* c = node->TryCast<Constant>()) {
2963 return c->object();
2964 }
2965 if (RootConstant* c = node->TryCast<RootConstant>()) {
2966 return MakeRef(broker, isolate->root_handle(c->index())).AsHeapObject();
2967 }
2968 return {};
2969}
2970
2971compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant(
2972 ValueNode* node, ValueNode** constant_node) {
2973 if (auto result = TryGetConstant(broker(), local_isolate(), node)) {
2974 if (constant_node) *constant_node = node;
2975 return result;
2976 }
2977 if (auto c = TryGetConstantAlternative(node)) {
2978 return TryGetConstant(*c, constant_node);
2979 }
2980 return {};
2981}
2982
2984 ValueNode* node) {
2985 const NodeInfo* info = known_node_aspects().TryGetInfoFor(node);
2986 if (info) {
2987 if (auto c = info->alternative().checked_value()) {
2988 if (IsConstantNode(c->opcode())) {
2989 return c;
2990 }
2991 }
2992 }
2993 return {};
2994}
2995
2996template <Operation kOperation>
2998 if (kOperation != Operation::kStrictEqual && kOperation != Operation::kEqual)
2999 return false;
3000
3001 ValueNode* left = LoadRegister(0);
3002 ValueNode* right = GetAccumulator();
3003
3004 ValueNode* other = right;
3005 compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(left);
3006 if (!maybe_constant) {
3007 maybe_constant = TryGetConstant(right);
3008 other = left;
3009 }
3010 if (!maybe_constant) return false;
3011
3012 if (CheckType(other, NodeType::kBoolean)) {
3013 auto CompareOtherWith = [&](bool constant) {
3014 compiler::OptionalHeapObjectRef const_other = TryGetConstant(other);
3015 if (const_other) {
3016 auto bool_other = const_other->TryGetBooleanValue(broker());
3017 if (bool_other.has_value()) {
3018 SetAccumulator(GetBooleanConstant(constant == *bool_other));
3019 return;
3020 }
3021 }
3022 if (constant) {
3023 SetAccumulator(other);
3024 } else {
3026 }
3027 };
3028
3029 if (maybe_constant.equals(broker_->true_value())) {
3030 CompareOtherWith(true);
3031 return true;
3032 } else if (maybe_constant.equals(broker_->false_value())) {
3033 CompareOtherWith(false);
3034 return true;
3035 } else if (kOperation == Operation::kEqual) {
3036 // For `bool == num` we can convert the actual comparison `ToNumber(bool)
3037 // == num` into `(num == 1) ? bool : ((num == 0) ? !bool : false)`,
3038 std::optional<double> val = {};
3039 if (maybe_constant.value().IsSmi()) {
3040 val = maybe_constant.value().AsSmi();
3041 } else if (maybe_constant.value().IsHeapNumber()) {
3042 val = maybe_constant.value().AsHeapNumber().value();
3043 }
3044 if (val) {
3045 if (*val == 0) {
3046 CompareOtherWith(false);
3047 } else if (*val == 1) {
3048 CompareOtherWith(true);
3049 } else {
3050 // The constant number is neither equal to `ToNumber(true)` nor
3051 // `ToNumber(false)`.
3053 }
3054 return true;
3055 }
3056 }
3057 }
3058
3059 if (kOperation != Operation::kStrictEqual) return false;
3060
3061 InstanceType type = maybe_constant.value().map(broker()).instance_type();
3062 if (!InstanceTypeChecker::IsReferenceComparable(type)) return false;
3063
3064 // If the constant is the undefined value, we can compare it
3065 // against holey floats.
3066 if (maybe_constant->IsUndefined()) {
3067 ValueNode* holey_float = nullptr;
3068 if (left->properties().value_representation() ==
3070 holey_float = left;
3071 } else if (right->properties().value_representation() ==
3073 holey_float = right;
3074 }
3075 if (holey_float) {
3077 return true;
3078 }
3079 }
3080
3081 if (left->properties().value_representation() !=
3083 right->properties().value_representation() !=
3086 } else {
3087 SetAccumulator(BuildTaggedEqual(left, right));
3088 }
3089 return true;
3090}
3091
3092template <Operation kOperation>
3095 return ReduceResult::Done();
3096
3097 // Compare opcodes are not always commutative. We sort the ones which are for
3098 // better CSE coverage.
3099 auto SortCommute = [](ValueNode*& left, ValueNode*& right) {
3100 if (!v8_flags.maglev_cse) return;
3101 if (kOperation != Operation::kEqual &&
3102 kOperation != Operation::kStrictEqual) {
3103 return;
3104 }
3105 if (left > right) {
3106 std::swap(left, right);
3107 }
3108 };
3109
3110 auto TryConstantFoldInt32 = [&](ValueNode* left, ValueNode* right) {
3111 if (left->Is<Int32Constant>() && right->Is<Int32Constant>()) {
3112 int left_value = left->Cast<Int32Constant>()->value();
3113 int right_value = right->Cast<Int32Constant>()->value();
3115 OperationValue<kOperation>(left_value, right_value)));
3116 return true;
3117 }
3118 return false;
3119 };
3120
3121 auto TryConstantFoldEqual = [&](ValueNode* left, ValueNode* right) {
3122 if (left == right) {
3124 GetBooleanConstant(kOperation == Operation::kEqual ||
3125 kOperation == Operation::kStrictEqual ||
3126 kOperation == Operation::kLessThanOrEqual ||
3127 kOperation == Operation::kGreaterThanOrEqual));
3128 return true;
3129 }
3130 return false;
3131 };
3132
3133 auto MaybeOddballs = [&]() {
3134 auto MaybeOddball = [&](ValueNode* value) {
3135 ValueRepresentation rep = value->value_representation();
3136 switch (rep) {
3140 return false;
3141 default:
3142 break;
3143 }
3144 return !CheckType(value, NodeType::kNumber);
3145 };
3146 return MaybeOddball(LoadRegister(0)) || MaybeOddball(GetAccumulator());
3147 };
3148
3150 switch (nexus.GetCompareOperationFeedback()) {
3153 DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation);
3154
3156 // TODO(victorgomes): Add a smart equality operator, that compares for
3157 // constants in different representations.
3158 ValueNode* left = GetInt32(LoadRegister(0));
3159 ValueNode* right = GetInt32(GetAccumulator());
3160 if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3161 if (TryConstantFoldInt32(left, right)) return ReduceResult::Done();
3162 SortCommute(left, right);
3164 return ReduceResult::Done();
3165 }
3167 // TODO(leszeks): we could support all kNumberOrOddball with
3168 // BranchIfFloat64Compare, but we'd need to special case comparing
3169 // oddballs with NaN value (e.g. undefined) against themselves.
3170 if (MaybeOddballs()) {
3171 break;
3172 }
3173 [[fallthrough]];
3175 if (kOperation == Operation::kStrictEqual && MaybeOddballs()) {
3176 break;
3177 }
3178 [[fallthrough]];
3180 ValueNode* left = LoadRegister(0);
3181 ValueNode* right = GetAccumulator();
3183 right->value_representation() == ValueRepresentation::kInt32) {
3184 if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3185 if (TryConstantFoldInt32(left, right)) return ReduceResult::Done();
3186 SortCommute(left, right);
3188 return ReduceResult::Done();
3189 }
3190 // In compare operations, booleans should be converted to Float64 but
3191 // non-boolean oddballs shouldn't. Even if the feedback type was
3192 // kNumberOrOddball, we'd still pass
3193 // TaggedToFloat64ConversionType::kNumberOrBoolean.
3194 NodeType allowed_input_type;
3195 TaggedToFloat64ConversionType conversion_type;
3196 if (nexus.GetCompareOperationFeedback() ==
3198 allowed_input_type = NodeType::kNumberOrBoolean;
3200 } else {
3201 allowed_input_type = NodeType::kNumber;
3203 }
3204 left = GetFloat64ForToNumber(left, allowed_input_type, conversion_type);
3205 right = GetFloat64ForToNumber(right, allowed_input_type, conversion_type);
3206 if (left->Is<Float64Constant>() && right->Is<Float64Constant>()) {
3207 double left_value = left->Cast<Float64Constant>()->value().get_scalar();
3208 double right_value =
3209 right->Cast<Float64Constant>()->value().get_scalar();
3211 OperationValue<kOperation>(left_value, right_value)));
3212 return ReduceResult::Done();
3213 }
3214 SortCommute(left, right);
3216 return ReduceResult::Done();
3217 }
3219 DCHECK(kOperation == Operation::kEqual ||
3220 kOperation == Operation::kStrictEqual);
3221 ValueNode *left, *right;
3224 SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
3225 return ReduceResult::Done();
3226 }
3228 right =
3230 if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3231 SetAccumulator(BuildTaggedEqual(left, right));
3232 return ReduceResult::Done();
3233 }
3235 DCHECK(kOperation == Operation::kEqual ||
3236 kOperation == Operation::kStrictEqual);
3237
3238 ValueNode* left = LoadRegister(0);
3239 ValueNode* right = GetAccumulator();
3242 if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3243 SetAccumulator(BuildTaggedEqual(left, right));
3244 return ReduceResult::Done();
3245 }
3247 ValueNode* left = LoadRegister(0);
3248 ValueNode* right = GetAccumulator();
3251
3253 if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3254 ValueNode* tagged_left = GetTaggedValue(left);
3255 ValueNode* tagged_right = GetTaggedValue(right);
3256 switch (kOperation) {
3257 case Operation::kEqual:
3258 case Operation::kStrictEqual:
3259 result = AddNewNode<StringEqual>({tagged_left, tagged_right});
3260 break;
3261 case Operation::kLessThan:
3263 {tagged_left, tagged_right});
3264 break;
3265 case Operation::kLessThanOrEqual:
3267 {tagged_left, tagged_right});
3268 break;
3269 case Operation::kGreaterThan:
3271 {tagged_left, tagged_right});
3272 break;
3273 case Operation::kGreaterThanOrEqual:
3275 {tagged_left, tagged_right});
3276 break;
3277 }
3278
3280 return ReduceResult::Done();
3281 }
3285 break;
3287 if (kOperation == Operation::kEqual) {
3288 break;
3289 }
3290 DCHECK_EQ(kOperation, Operation::kStrictEqual);
3291
3292 ValueNode* left = LoadRegister(0);
3293 ValueNode* right = GetAccumulator();
3296 SetAccumulator(BuildTaggedEqual(left, right));
3297 return ReduceResult::Done();
3298 }
3300 DCHECK(kOperation == Operation::kEqual ||
3301 kOperation == Operation::kStrictEqual);
3302
3303 ValueNode* left = LoadRegister(0);
3304 ValueNode* right = GetAccumulator();
3307 SetAccumulator(BuildTaggedEqual(left, right));
3308 return ReduceResult::Done();
3309 }
3310 }
3311
3313 return ReduceResult::Done();
3314}
3315
3316ReduceResult MaglevGraphBuilder::VisitLdar() {
3319 return ReduceResult::Done();
3320}
3321
3322ReduceResult MaglevGraphBuilder::VisitLdaZero() {
3324 return ReduceResult::Done();
3325}
3326ReduceResult MaglevGraphBuilder::VisitLdaSmi() {
3327 int constant = iterator_.GetImmediateOperand(0);
3328 SetAccumulator(GetSmiConstant(constant));
3329 return ReduceResult::Done();
3330}
3331ReduceResult MaglevGraphBuilder::VisitLdaUndefined() {
3332 SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
3333 return ReduceResult::Done();
3334}
3335ReduceResult MaglevGraphBuilder::VisitLdaNull() {
3336 SetAccumulator(GetRootConstant(RootIndex::kNullValue));
3337 return ReduceResult::Done();
3338}
3339ReduceResult MaglevGraphBuilder::VisitLdaTheHole() {
3340 SetAccumulator(GetRootConstant(RootIndex::kTheHoleValue));
3341 return ReduceResult::Done();
3342}
3343ReduceResult MaglevGraphBuilder::VisitLdaTrue() {
3344 SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
3345 return ReduceResult::Done();
3346}
3347ReduceResult MaglevGraphBuilder::VisitLdaFalse() {
3348 SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
3349 return ReduceResult::Done();
3350}
3351ReduceResult MaglevGraphBuilder::VisitLdaConstant() {
3353 return ReduceResult::Done();
3354}
3355
3357 ValueNode* context, int slot_index, ContextSlotMutability slot_mutability) {
3359
3360 if (slot_mutability == kMutable) return false;
3361
3362 auto constant = TryGetConstant(context);
3363 if (!constant) return false;
3364
3365 compiler::ContextRef context_ref = constant.value().AsContext();
3366
3367 compiler::OptionalObjectRef maybe_slot_value =
3368 context_ref.get(broker(), slot_index);
3369 if (!maybe_slot_value.has_value()) return false;
3370
3371 compiler::ObjectRef slot_value = maybe_slot_value.value();
3372 if (slot_value.IsHeapObject()) {
3373 // Even though the context slot is immutable, the context might have escaped
3374 // before the function to which it belongs has initialized the slot. We
3375 // must be conservative and check if the value in the slot is currently the
3376 // hole or undefined. Only if it is neither of these, can we be sure that it
3377 // won't change anymore.
3378 //
3379 // See also: JSContextSpecialization::ReduceJSLoadContext.
3380 compiler::OddballType oddball_type =
3381 slot_value.AsHeapObject().map(broker()).oddball_type(broker());
3382 if (oddball_type == compiler::OddballType::kUndefined ||
3383 slot_value.IsTheHole()) {
3384 return false;
3385 }
3386 }
3387
3388 // Fold the load of the immutable slot.
3389
3390 SetAccumulator(GetConstant(slot_value));
3391 return true;
3392}
3393
3395 ValueNode* context_node, int index) {
3396 if (!context_node->Is<Constant>()) return {};
3397 compiler::ContextRef context =
3398 context_node->Cast<Constant>()->ref().AsContext();
3399 DCHECK(context.object()->IsScriptContext());
3400 auto maybe_property = context.object()->GetScriptContextSideProperty(index);
3401 if (!maybe_property) return {};
3402 auto property = maybe_property.value();
3404 switch (property) {
3406 compiler::OptionalObjectRef constant = context.get(broker(), index);
3407 if (!constant.has_value()) {
3409 offset);
3410 }
3412 context, index, property, broker());
3413 return GetConstant(*constant);
3414 }
3417 context, index, property, broker());
3419 context_node, offset);
3420 EnsureType(value, NodeType::kSmi);
3421 return value;
3422 }
3424 if (auto mutable_heap_number = context.get(broker(), index)) {
3425 if (!mutable_heap_number->IsHeapNumber()) {
3426 // TODO(victorgomes): In case the tag is out of date by now we could
3427 // retry this reduction.
3428 break;
3429 }
3431 context, index, property, broker());
3432 return AddNewNode<LoadInt32>(
3433 {GetConstant(*mutable_heap_number)},
3434 static_cast<int>(offsetof(HeapNumber, value_)));
3435 }
3437 context, index, property, broker());
3438 return AddNewNode<LoadHeapInt32>({context_node}, offset);
3440 if (auto mutable_heap_number = context.get(broker(), index)) {
3441 if (!mutable_heap_number->IsHeapNumber()) {
3442 // TODO(victorgomes): In case the tag is out of date by now we could
3443 // retry this reduction.
3444 break;
3445 }
3447 context, index, property, broker());
3449 {GetConstant(*mutable_heap_number)},
3450 static_cast<int>(offsetof(HeapNumber, value_)));
3451 }
3453 context, index, property, broker());
3454 return AddNewNode<LoadDoubleField>({context_node}, offset);
3456 break;
3457 default:
3458 UNREACHABLE();
3459 }
3461 offset);
3462}
3463
3465 ValueNode* context, int index, ContextSlotMutability slot_mutability,
3466 ContextKind context_kind) {
3468 ValueNode*& cached_value =
3469 slot_mutability == kMutable
3472 if (cached_value) {
3473 if (v8_flags.trace_maglev_graph_building) {
3474 std::cout << " * Reusing cached context slot "
3475 << PrintNodeLabel(graph_labeller(), context) << "[" << offset
3476 << "]: " << PrintNode(graph_labeller(), cached_value)
3477 << std::endl;
3478 }
3479 return cached_value;
3480 }
3482 if (context_kind == ContextKind::kScriptContext &&
3483 (v8_flags.script_context_mutable_heap_number ||
3484 v8_flags.const_tracking_let) &&
3485 slot_mutability == kMutable) {
3486 // We collect feedback only for mutable context slots.
3487 cached_value = TrySpecializeLoadScriptContextSlot(context, index);
3488 if (cached_value) return cached_value;
3489 return cached_value =
3491 context, index);
3492 }
3494 context, offset);
3495}
3496
3498 ValueNode* context, compiler::OptionalScopeInfoRef scope_info) {
3499 // Distinguishing contexts by their scope info only works if scope infos are
3500 // guaranteed to be unique.
3501 // TODO(crbug.com/401059828): reenable when crashes are gone.
3502 if ((true) || !v8_flags.reuse_scope_infos) return true;
3503 if (!scope_info.has_value()) {
3504 return true;
3505 }
3506 auto other = graph()->TryGetScopeInfo(context, broker());
3507 if (!other.has_value()) {
3508 return true;
3509 }
3510 return scope_info->equals(*other);
3511}
3512
3514 ValueNode* context, int index, ValueNode* value, Node** store) {
3515 DCHECK_NOT_NULL(store);
3516 DCHECK(v8_flags.script_context_mutable_heap_number ||
3517 v8_flags.const_tracking_let);
3518 if (!context->Is<Constant>()) {
3520 {context, value}, index);
3521 return ReduceResult::Done();
3522 }
3523
3524 compiler::ContextRef context_ref =
3525 context->Cast<Constant>()->ref().AsContext();
3526 DCHECK(context_ref.object()->IsScriptContext());
3527 auto maybe_property =
3528 context_ref.object()->GetScriptContextSideProperty(index);
3529 if (!maybe_property) {
3531 {context, value}, index);
3532 return ReduceResult::Done();
3533 }
3534 auto property = maybe_property.value();
3536 if (property == ContextSidePropertyCell::kConst) {
3537 compiler::OptionalObjectRef constant = context_ref.get(broker(), index);
3538 if (!constant.has_value() ||
3539 (constant->IsString() && !constant->IsInternalizedString())) {
3541 {context, value}, index);
3542 return ReduceResult::Done();
3543 }
3545 context_ref, index, property, broker());
3547 value, *constant, DeoptimizeReason::kStoreToConstant);
3548 }
3549
3550 if (!v8_flags.script_context_mutable_heap_number) {
3551 *store = BuildStoreTaggedField(context, value, offset,
3553 return ReduceResult::Done();
3554 }
3555
3556 switch (property) {
3558 UNREACHABLE();
3562 context_ref, index, property, broker());
3563 *store = BuildStoreTaggedField(context, value, offset,
3565 break;
3567 EnsureInt32(value, true);
3568 if (auto mutable_heap_number = context_ref.get(broker(), index)) {
3569 if (!mutable_heap_number->IsHeapNumber()) {
3570 // TODO(victorgomes): In case the tag is out of date by now we could
3571 // retry this reduction.
3572 return {};
3573 }
3574 *store = AddNewNode<StoreInt32>(
3575 {GetConstant(*mutable_heap_number), value},
3576 static_cast<int>(offsetof(HeapNumber, value_)));
3577 } else {
3578 *store = AddNewNode<StoreHeapInt32>({context, value}, offset);
3579 }
3581 context_ref, index, property, broker());
3582 break;
3585 if (auto mutable_heap_number = context_ref.get(broker(), index)) {
3586 if (!mutable_heap_number->IsHeapNumber()) {
3587 // TODO(victorgomes): In case the tag is out of date by now we could
3588 // retry this reduction.
3589 return {};
3590 }
3591 *store = AddNewNode<StoreFloat64>(
3592 {GetConstant(*mutable_heap_number), value},
3593 static_cast<int>(offsetof(HeapNumber, value_)));
3594 } else {
3595 *store = AddNewNode<StoreDoubleField>({context, value}, offset);
3596 }
3598 context_ref, index, property, broker());
3599 break;
3601 *store = BuildStoreTaggedField(context, value, offset,
3603 break;
3604 default:
3605 UNREACHABLE();
3606 }
3607 return ReduceResult::Done();
3608}
3609
3611 ValueNode* context, int index, ValueNode* value, ContextKind context_kind) {
3613 DCHECK_EQ(
3614 known_node_aspects().loaded_context_constants.count({context, offset}),
3615 0);
3616
3617 Node* store = nullptr;
3618 if ((v8_flags.script_context_mutable_heap_number ||
3619 v8_flags.const_tracking_let) &&
3620 context_kind == ContextKind::kScriptContext) {
3622 TrySpecializeStoreScriptContextSlot(context, index, value, &store);
3624 if (!store && result.IsDone()) {
3625 // If we didn't need to emit any store, there is nothing to cache.
3626 return result.Checked();
3627 }
3628 }
3629
3630 if (!store) {
3631 store = BuildStoreTaggedField(context, value, offset,
3633 }
3634
3635 if (v8_flags.trace_maglev_graph_building) {
3636 std::cout << " * Recording context slot store "
3637 << PrintNodeLabel(graph_labeller(), context) << "[" << offset
3638 << "]: " << PrintNode(graph_labeller(), value) << std::endl;
3639 }
3641 KnownNodeAspects::LoadedContextSlots& loaded_context_slots =
3643 if (known_node_aspects().may_have_aliasing_contexts() ==
3645 compiler::OptionalScopeInfoRef scope_info =
3646 graph()->TryGetScopeInfo(context, broker());
3647 for (auto& cache : loaded_context_slots) {
3648 if (std::get<int>(cache.first) == offset &&
3649 std::get<ValueNode*>(cache.first) != context) {
3650 if (ContextMayAlias(std::get<ValueNode*>(cache.first), scope_info) &&
3651 cache.second != value) {
3652 if (v8_flags.trace_maglev_graph_building) {
3653 std::cout << " * Clearing probably aliasing value "
3655 std::get<ValueNode*>(cache.first))
3656 << "[" << offset
3657 << "]: " << PrintNode(graph_labeller(), value)
3658 << std::endl;
3659 }
3660 cache.second = nullptr;
3662 loop_effects_->context_slot_written.insert(cache.first);
3664 }
3665 }
3666 }
3667 }
3668 }
3670 auto updated = loaded_context_slots.emplace(key, value);
3671 if (updated.second) {
3674 }
3676 } else {
3677 if (updated.first->second != value) {
3678 updated.first->second = value;
3681 }
3682 }
3683 if (known_node_aspects().may_have_aliasing_contexts() !=
3685 auto last_store = unobserved_context_slot_stores_.find(key);
3686 if (last_store != unobserved_context_slot_stores_.end()) {
3687 MarkNodeDead(last_store->second);
3688 last_store->second = store;
3689 } else {
3691 }
3692 }
3693 }
3694 return ReduceResult::Done();
3695}
3696
3698 ValueNode* context, size_t depth, int slot_index,
3699 ContextSlotMutability slot_mutability, ContextKind context_kind) {
3700 context = GetContextAtDepth(context, depth);
3703 slot_mutability)) {
3704 return; // Our work here is done.
3705 }
3706
3707 // Always load the slot here as if it were mutable. Immutable slots have a
3708 // narrow range of mutability if the context escapes before the slot is
3709 // initialized, so we can't safely assume that the load can be cached in case
3710 // it's a load before initialization (e.g. var a = a + 42).
3712 LoadAndCacheContextSlot(context, slot_index, kMutable, context_kind));
3713}
3714
3716 ValueNode* context, size_t depth, int slot_index, ValueNode* value,
3717 ContextKind context_kind) {
3718 context = GetContextAtDepth(context, depth);
3719 return StoreAndCacheContextSlot(context, slot_index, value, context_kind);
3720}
3721
3722ReduceResult MaglevGraphBuilder::VisitLdaContextSlot() {
3723 ValueNode* context = LoadRegister(0);
3724 int slot_index = iterator_.GetIndexOperand(1);
3725 size_t depth = iterator_.GetUnsignedImmediateOperand(2);
3726 BuildLoadContextSlot(context, depth, slot_index, kMutable,
3728 return ReduceResult::Done();
3729}
3730ReduceResult MaglevGraphBuilder::VisitLdaScriptContextSlot() {
3731 ValueNode* context = LoadRegister(0);
3732 int slot_index = iterator_.GetIndexOperand(1);
3733 size_t depth = iterator_.GetUnsignedImmediateOperand(2);
3734 BuildLoadContextSlot(context, depth, slot_index, kMutable,
3736 return ReduceResult::Done();
3737}
3738ReduceResult MaglevGraphBuilder::VisitLdaImmutableContextSlot() {
3739 ValueNode* context = LoadRegister(0);
3740 int slot_index = iterator_.GetIndexOperand(1);
3741 size_t depth = iterator_.GetUnsignedImmediateOperand(2);
3742 BuildLoadContextSlot(context, depth, slot_index, kImmutable,
3744 return ReduceResult::Done();
3745}
3746ReduceResult MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
3747 ValueNode* context = GetContext();
3748 int slot_index = iterator_.GetIndexOperand(0);
3749 BuildLoadContextSlot(context, 0, slot_index, kMutable, ContextKind::kDefault);
3750 return ReduceResult::Done();
3751}
3752ReduceResult MaglevGraphBuilder::VisitLdaCurrentScriptContextSlot() {
3753 ValueNode* context = GetContext();
3754 int slot_index = iterator_.GetIndexOperand(0);
3755 BuildLoadContextSlot(context, 0, slot_index, kMutable,
3757 return ReduceResult::Done();
3758}
3759ReduceResult MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
3760 ValueNode* context = GetContext();
3761 int slot_index = iterator_.GetIndexOperand(0);
3762 BuildLoadContextSlot(context, 0, slot_index, kImmutable,
3764 return ReduceResult::Done();
3765}
3766
3767ReduceResult MaglevGraphBuilder::VisitStaContextSlot() {
3768 ValueNode* context = LoadRegister(0);
3769 int slot_index = iterator_.GetIndexOperand(1);
3770 size_t depth = iterator_.GetUnsignedImmediateOperand(2);
3771 return BuildStoreContextSlot(context, depth, slot_index, GetAccumulator(),
3773}
3774ReduceResult MaglevGraphBuilder::VisitStaCurrentContextSlot() {
3775 ValueNode* context = GetContext();
3776 int slot_index = iterator_.GetIndexOperand(0);
3777 return BuildStoreContextSlot(context, 0, slot_index, GetAccumulator(),
3779}
3780
3781ReduceResult MaglevGraphBuilder::VisitStaScriptContextSlot() {
3782 ValueNode* context = LoadRegister(0);
3783 int slot_index = iterator_.GetIndexOperand(1);
3784 size_t depth = iterator_.GetUnsignedImmediateOperand(2);
3785 return BuildStoreContextSlot(context, depth, slot_index, GetAccumulator(),
3787}
3788
3789ReduceResult MaglevGraphBuilder::VisitStaCurrentScriptContextSlot() {
3790 ValueNode* context = GetContext();
3791 int slot_index = iterator_.GetIndexOperand(0);
3792 return BuildStoreContextSlot(context, 0, slot_index, GetAccumulator(),
3794}
3795
3796ReduceResult MaglevGraphBuilder::VisitStar() {
3799 return ReduceResult::Done();
3800}
3801#define SHORT_STAR_VISITOR(Name, ...) \
3802 ReduceResult MaglevGraphBuilder::Visit##Name() { \
3803 MoveNodeBetweenRegisters( \
3804 interpreter::Register::virtual_accumulator(), \
3805 interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name)); \
3806 return ReduceResult::Done(); \
3807 }
3809#undef SHORT_STAR_VISITOR
3810
3811ReduceResult MaglevGraphBuilder::VisitMov() {
3814 return ReduceResult::Done();
3815}
3816
3817ReduceResult MaglevGraphBuilder::VisitPushContext() {
3821 return ReduceResult::Done();
3822}
3823
3824ReduceResult MaglevGraphBuilder::VisitPopContext() {
3826 return ReduceResult::Done();
3827}
3828
3830 ValueNode* rhs) {
3831 ValueNode* tagged_lhs = GetTaggedValue(lhs);
3832 ValueNode* tagged_rhs = GetTaggedValue(rhs);
3833 if (tagged_lhs == tagged_rhs) {
3834 return GetBooleanConstant(true);
3835 }
3836 if (HaveDisjointTypes(tagged_lhs, tagged_rhs)) {
3837 return GetBooleanConstant(false);
3838 }
3839 // TODO(victorgomes): We could retrieve the HeapObjectRef in Constant and
3840 // compare them.
3841 if (IsConstantNode(tagged_lhs->opcode()) && !tagged_lhs->Is<Constant>() &&
3842 tagged_lhs->opcode() == tagged_rhs->opcode()) {
3843 // Constants nodes are canonicalized, except for the node holding
3844 // HeapObjectRef, so equal constants should have been handled above.
3845 return GetBooleanConstant(false);
3846 }
3847 return AddNewNode<TaggedEqual>({tagged_lhs, tagged_rhs});
3848}
3849
3851 RootIndex rhs_index) {
3852 return BuildTaggedEqual(lhs, GetRootConstant(rhs_index));
3853}
3854
3855ReduceResult MaglevGraphBuilder::VisitTestReferenceEqual() {
3856 ValueNode* lhs = LoadRegister(0);
3857 ValueNode* rhs = GetAccumulator();
3859 return ReduceResult::Done();
3860}
3861
3863 if (value->properties().value_representation() ==
3865 return AddNewNode<HoleyFloat64IsHole>({value});
3866 } else if (value->properties().value_representation() !=
3868 return GetBooleanConstant(false);
3869 }
3870
3871 if (auto maybe_constant = TryGetConstant(value)) {
3872 auto map = maybe_constant.value().map(broker());
3873 return GetBooleanConstant(map.is_undetectable());
3874 }
3875
3876 NodeType node_type;
3877 if (CheckType(value, NodeType::kSmi, &node_type)) {
3878 return GetBooleanConstant(false);
3879 }
3880
3881 auto it = known_node_aspects().FindInfo(value);
3882 if (known_node_aspects().IsValid(it)) {
3883 NodeInfo& info = it->second;
3884 if (info.possible_maps_are_known()) {
3885 // We check if all the possible maps have the same undetectable bit value.
3886 DCHECK_GT(info.possible_maps().size(), 0);
3887 bool first_is_undetectable = info.possible_maps()[0].is_undetectable();
3888 bool all_the_same_value =
3889 std::all_of(info.possible_maps().begin(), info.possible_maps().end(),
3890 [first_is_undetectable](compiler::MapRef map) {
3891 bool is_undetectable = map.is_undetectable();
3892 return (first_is_undetectable && is_undetectable) ||
3893 (!first_is_undetectable && !is_undetectable);
3894 });
3895 if (all_the_same_value) {
3896 return GetBooleanConstant(first_is_undetectable);
3897 }
3898 }
3899 }
3900
3901 enum CheckType type = GetCheckType(node_type);
3902 return AddNewNode<TestUndetectable>({value}, type);
3903}
3904
3906 BranchBuilder& builder, ValueNode* value) {
3908 switch (result->opcode()) {
3909 case Opcode::kRootConstant:
3910 switch (result->Cast<RootConstant>()->index()) {
3911 case RootIndex::kTrueValue:
3912 case RootIndex::kUndefinedValue:
3913 case RootIndex::kNullValue:
3914 return builder.AlwaysTrue();
3915 default:
3916 return builder.AlwaysFalse();
3917 }
3918 case Opcode::kHoleyFloat64IsHole:
3920 builder, result->Cast<HoleyFloat64IsHole>()->input().node());
3921 case Opcode::kTestUndetectable:
3922 return builder.Build<BranchIfUndetectable>(
3923 {result->Cast<TestUndetectable>()->value().node()},
3924 result->Cast<TestUndetectable>()->check_type());
3925 default:
3926 UNREACHABLE();
3927 }
3928}
3929
3930ReduceResult MaglevGraphBuilder::VisitTestUndetectable() {
3932 return ReduceResult::Done();
3933}
3934
3935ReduceResult MaglevGraphBuilder::VisitTestNull() {
3936 ValueNode* value = GetAccumulator();
3937 SetAccumulator(BuildTaggedEqual(value, RootIndex::kNullValue));
3938 return ReduceResult::Done();
3939}
3940
3941ReduceResult MaglevGraphBuilder::VisitTestUndefined() {
3942 ValueNode* value = GetAccumulator();
3943 SetAccumulator(BuildTaggedEqual(value, RootIndex::kUndefinedValue));
3944 return ReduceResult::Done();
3945}
3946
3947template <typename Function>
3949 ValueNode* value, const Function& GetResult) {
3950 // Similar to TF, we assume that all undetectable receiver objects are also
3951 // callables. In practice, there is only one: document.all.
3952 switch (CheckTypes(
3953 value, {NodeType::kBoolean, NodeType::kNumber, NodeType::kString,
3954 NodeType::kSymbol, NodeType::kCallable, NodeType::kJSArray})) {
3955 case NodeType::kBoolean:
3956 return GetResult(TypeOfLiteralFlag::kBoolean, RootIndex::kboolean_string);
3957 case NodeType::kNumber:
3958 return GetResult(TypeOfLiteralFlag::kNumber, RootIndex::knumber_string);
3959 case NodeType::kString:
3960 return GetResult(TypeOfLiteralFlag::kString, RootIndex::kstring_string);
3961 case NodeType::kSymbol:
3962 return GetResult(TypeOfLiteralFlag::kSymbol, RootIndex::ksymbol_string);
3963 case NodeType::kCallable:
3964 return Select(
3965 [&](auto& builder) {
3966 return BuildBranchIfUndetectable(builder, value);
3967 },
3968 [&] {
3969 return GetResult(TypeOfLiteralFlag::kUndefined,
3970 RootIndex::kundefined_string);
3971 },
3972 [&] {
3973 return GetResult(TypeOfLiteralFlag::kFunction,
3974 RootIndex::kfunction_string);
3975 });
3976 case NodeType::kJSArray:
3977 // TODO(victorgomes): Track JSReceiver, non-callable types in Maglev.
3978 return GetResult(TypeOfLiteralFlag::kObject, RootIndex::kobject_string);
3979 default:
3980 break;
3981 }
3982
3983 if (IsNullValue(value)) {
3984 return GetResult(TypeOfLiteralFlag::kObject, RootIndex::kobject_string);
3985 }
3986 if (IsUndefinedValue(value)) {
3987 return GetResult(TypeOfLiteralFlag::kUndefined,
3988 RootIndex::kundefined_string);
3989 }
3990
3991 return {};
3992}
3993
3995 return TryReduceTypeOf(value,
3996 [&](TypeOfLiteralFlag _, RootIndex idx) -> ValueNode* {
3997 return GetRootConstant(idx);
3998 });
3999}
4000
4001ReduceResult MaglevGraphBuilder::VisitTestTypeOf() {
4002 // TODO(v8:7700): Add a branch version of TestTypeOf that does not need to
4003 // materialise the boolean value.
4006 if (literal == TypeOfLiteralFlag::kOther) {
4007 SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
4008 return ReduceResult::Done();
4009 }
4010 ValueNode* value = GetAccumulator();
4011 auto GetResult = [&](TypeOfLiteralFlag expected, RootIndex _) {
4012 return GetRootConstant(literal == expected ? RootIndex::kTrueValue
4013 : RootIndex::kFalseValue);
4014 };
4016
4018 return ReduceResult::Done();
4019}
4020
4022 const compiler::GlobalAccessFeedback& global_access_feedback) {
4023 DCHECK(global_access_feedback.IsScriptContextSlot());
4024 if (global_access_feedback.immutable()) return {};
4025 auto script_context = GetConstant(global_access_feedback.script_context());
4027 script_context, global_access_feedback.slot_index(), GetAccumulator(),
4029}
4030
4032 const compiler::GlobalAccessFeedback& global_access_feedback) {
4033 DCHECK(global_access_feedback.IsPropertyCell());
4034
4035 compiler::PropertyCellRef property_cell =
4036 global_access_feedback.property_cell();
4037 if (!property_cell.Cache(broker())) return {};
4038
4039 compiler::ObjectRef property_cell_value = property_cell.value(broker());
4040 if (property_cell_value.IsPropertyCellHole()) {
4041 // The property cell is no longer valid.
4043 DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
4044 }
4045
4046 PropertyDetails property_details = property_cell.property_details();
4047 DCHECK_EQ(PropertyKind::kData, property_details.kind());
4048
4049 if (property_details.IsReadOnly()) {
4050 // Don't even bother trying to lower stores to read-only data
4051 // properties.
4052 // TODO(neis): We could generate code that checks if the new value
4053 // equals the old one and then does nothing or deopts, respectively.
4054 return {};
4055 }
4056
4057 switch (property_details.cell_type()) {
4059 return {};
4061 // Record a code dependency on the cell, and just deoptimize if the new
4062 // value doesn't match the previous value stored inside the cell.
4063 broker()->dependencies()->DependOnGlobalProperty(property_cell);
4064 ValueNode* value = GetAccumulator();
4066 value, property_cell_value, DeoptimizeReason::kStoreToConstant);
4067 }
4069 // We rely on stability further below.
4070 if (property_cell_value.IsHeapObject() &&
4071 !property_cell_value.AsHeapObject().map(broker()).is_stable()) {
4072 return {};
4073 }
4074 // Record a code dependency on the cell, and just deoptimize if the new
4075 // value's type doesn't match the type of the previous value in the cell.
4076 broker()->dependencies()->DependOnGlobalProperty(property_cell);
4077 ValueNode* value = GetAccumulator();
4078 if (property_cell_value.IsHeapObject()) {
4079 compiler::MapRef property_cell_value_map =
4080 property_cell_value.AsHeapObject().map(broker());
4081 broker()->dependencies()->DependOnStableMap(property_cell_value_map);
4084 BuildCheckMaps(value, base::VectorOf({property_cell_value_map})));
4085 } else {
4087 }
4088 ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4089 BuildStoreTaggedField(property_cell_node, value,
4090 PropertyCell::kValueOffset,
4092 break;
4093 }
4095 // Record a code dependency on the cell, and just deoptimize if the
4096 // property ever becomes read-only.
4097 broker()->dependencies()->DependOnGlobalProperty(property_cell);
4098 ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4099 BuildStoreTaggedField(property_cell_node, GetAccumulator(),
4100 PropertyCell::kValueOffset,
4102 break;
4103 }
4105 UNREACHABLE();
4106 }
4107 return ReduceResult::Done();
4108}
4109
4111 const compiler::GlobalAccessFeedback& global_access_feedback) {
4112 DCHECK(global_access_feedback.IsScriptContextSlot());
4113 if (!global_access_feedback.immutable()) return {};
4114 compiler::OptionalObjectRef maybe_slot_value =
4115 global_access_feedback.script_context().get(
4116 broker(), global_access_feedback.slot_index());
4117 if (!maybe_slot_value) return {};
4118 return GetConstant(maybe_slot_value.value());
4119}
4120
4122 const compiler::GlobalAccessFeedback& global_access_feedback) {
4123 DCHECK(global_access_feedback.IsScriptContextSlot());
4124 RETURN_IF_DONE(TryBuildScriptContextConstantLoad(global_access_feedback));
4125 auto script_context = GetConstant(global_access_feedback.script_context());
4126 ContextSlotMutability mutability =
4127 global_access_feedback.immutable() ? kImmutable : kMutable;
4128 return LoadAndCacheContextSlot(script_context,
4129 global_access_feedback.slot_index(),
4130 mutability, ContextKind::kScriptContext);
4131}
4132
4134 const compiler::GlobalAccessFeedback& global_access_feedback) {
4135 // TODO(leszeks): A bunch of this is copied from
4136 // js-native-context-specialization.cc -- I wonder if we can unify it
4137 // somehow.
4138 DCHECK(global_access_feedback.IsPropertyCell());
4139
4140 compiler::PropertyCellRef property_cell =
4141 global_access_feedback.property_cell();
4142 if (!property_cell.Cache(broker())) return {};
4143
4144 compiler::ObjectRef property_cell_value = property_cell.value(broker());
4145 if (property_cell_value.IsPropertyCellHole()) {
4146 // The property cell is no longer valid.
4148 DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
4149 }
4150
4151 PropertyDetails property_details = property_cell.property_details();
4152 PropertyCellType property_cell_type = property_details.cell_type();
4153 DCHECK_EQ(PropertyKind::kData, property_details.kind());
4154
4155 if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
4156 return GetConstant(property_cell_value);
4157 }
4158
4159 // Record a code dependency on the cell if we can benefit from the
4160 // additional feedback, or the global property is configurable (i.e.
4161 // can be deleted or reconfigured to an accessor property).
4162 if (property_cell_type != PropertyCellType::kMutable ||
4163 property_details.IsConfigurable()) {
4164 broker()->dependencies()->DependOnGlobalProperty(property_cell);
4165 }
4166
4167 // Load from constant/undefined global property can be constant-folded.
4168 if (property_cell_type == PropertyCellType::kConstant ||
4169 property_cell_type == PropertyCellType::kUndefined) {
4170 return GetConstant(property_cell_value);
4171 }
4172
4173 ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4174 return BuildLoadTaggedField(property_cell_node, PropertyCell::kValueOffset);
4175}
4176
4178 const compiler::GlobalAccessFeedback& global_access_feedback) {
4179 if (global_access_feedback.IsScriptContextSlot()) {
4180 return TryBuildScriptContextStore(global_access_feedback);
4181 } else if (global_access_feedback.IsPropertyCell()) {
4182 return TryBuildPropertyCellStore(global_access_feedback);
4183 } else {
4184 DCHECK(global_access_feedback.IsMegamorphic());
4185 return {};
4186 }
4187}
4188
4190 const compiler::GlobalAccessFeedback& global_access_feedback) {
4191 if (global_access_feedback.IsScriptContextSlot()) {
4192 return TryBuildScriptContextLoad(global_access_feedback);
4193 } else if (global_access_feedback.IsPropertyCell()) {
4194 return TryBuildPropertyCellLoad(global_access_feedback);
4195 } else {
4196 DCHECK(global_access_feedback.IsMegamorphic());
4197 return {};
4198 }
4199}
4200
4201ReduceResult MaglevGraphBuilder::VisitLdaGlobal() {
4202 // LdaGlobal <name_index> <slot>
4203
4204 static const int kNameOperandIndex = 0;
4205 static const int kSlotOperandIndex = 1;
4206
4207 compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
4208 FeedbackSlot slot = GetSlotOperand(kSlotOperandIndex);
4209 compiler::FeedbackSource feedback_source{feedback(), slot};
4210 return BuildLoadGlobal(name, feedback_source, TypeofMode::kNotInside);
4211}
4212
4213ReduceResult MaglevGraphBuilder::VisitLdaGlobalInsideTypeof() {
4214 // LdaGlobalInsideTypeof <name_index> <slot>
4215
4216 static const int kNameOperandIndex = 0;
4217 static const int kSlotOperandIndex = 1;
4218
4219 compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
4220 FeedbackSlot slot = GetSlotOperand(kSlotOperandIndex);
4221 compiler::FeedbackSource feedback_source{feedback(), slot};
4222 return BuildLoadGlobal(name, feedback_source, TypeofMode::kInside);
4223}
4224
4225ReduceResult MaglevGraphBuilder::VisitStaGlobal() {
4226 // StaGlobal <name_index> <slot>
4227 FeedbackSlot slot = GetSlotOperand(1);
4228 compiler::FeedbackSource feedback_source{feedback(), slot};
4229
4230 const compiler::ProcessedFeedback& access_feedback =
4231 broker()->GetFeedbackForGlobalAccess(feedback_source);
4232
4233 if (access_feedback.IsInsufficient()) {
4235 DeoptimizeReason::kInsufficientTypeFeedbackForGenericGlobalAccess);
4236 }
4237
4238 const compiler::GlobalAccessFeedback& global_access_feedback =
4239 access_feedback.AsGlobalAccess();
4240 RETURN_IF_DONE(TryBuildGlobalStore(global_access_feedback));
4241
4242 ValueNode* value = GetAccumulator();
4243 compiler::NameRef name = GetRefOperand<Name>(0);
4244 ValueNode* context = GetContext();
4245 AddNewNode<StoreGlobal>({context, value}, name, feedback_source);
4246 return ReduceResult::Done();
4247}
4248
4249ReduceResult MaglevGraphBuilder::VisitLdaLookupSlot() {
4250 // LdaLookupSlot <name_index>
4251 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4252 SetAccumulator(BuildCallRuntime(Runtime::kLoadLookupSlot, {name}).value());
4253 return ReduceResult::Done();
4254}
4255
4256ReduceResult MaglevGraphBuilder::VisitLdaLookupContextSlot() {
4257 // LdaLookupContextSlot <name_index> <feedback_slot> <depth>
4258 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4259 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4260 ValueNode* depth =
4264 return ReduceResult::Done();
4265}
4266
4267ReduceResult MaglevGraphBuilder::VisitLdaLookupScriptContextSlot() {
4268 // LdaLookupContextSlot <name_index> <feedback_slot> <depth>
4269 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4270 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4271 ValueNode* depth =
4274 {name, depth, slot}));
4275 return ReduceResult::Done();
4276}
4277
4279 compiler::OptionalScopeInfoRef maybe_scope_info =
4281 if (!maybe_scope_info.has_value()) return false;
4282 compiler::ScopeInfoRef scope_info = maybe_scope_info.value();
4283 for (uint32_t d = 0; d < depth; d++) {
4286 if (scope_info.HasContextExtensionSlot() &&
4287 !broker()->dependencies()->DependOnEmptyContextExtension(scope_info)) {
4288 // Using EmptyContextExtension dependency is not possible for this
4289 // scope_info, so generate dynamic checks.
4290 ValueNode* context = GetContextAtDepth(GetContext(), d);
4291 // Only support known contexts so that we can check that there's no
4292 // extension at compile time. Otherwise we could end up in a deopt loop
4293 // once we do get an extension.
4294 compiler::OptionalHeapObjectRef maybe_ref = TryGetConstant(context);
4295 if (!maybe_ref) return false;
4296 compiler::ContextRef context_ref = maybe_ref.value().AsContext();
4297 compiler::OptionalObjectRef extension_ref =
4298 context_ref.get(broker(), Context::EXTENSION_INDEX);
4299 // The extension may be concurrently installed while we're checking the
4300 // context, in which case it may still be uninitialized. This still
4301 // means an extension is about to appear, so we should block this
4302 // optimization.
4303 if (!extension_ref) return false;
4304 if (!extension_ref->IsUndefined()) return false;
4307 AddNewNode<CheckValue>({extension}, broker()->undefined_value(),
4308 DeoptimizeReason::kUnexpectedContextExtension);
4309 }
4310 CHECK_IMPLIES(!scope_info.HasOuterScopeInfo(), d + 1 == depth);
4311 if (scope_info.HasOuterScopeInfo()) {
4312 scope_info = scope_info.OuterScopeInfo(broker());
4313 }
4314 }
4315 return true;
4316}
4317
4318ReduceResult MaglevGraphBuilder::VisitLdaLookupGlobalSlot() {
4319 // LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
4322 FeedbackSlot slot = GetSlotOperand(1);
4323 compiler::FeedbackSource feedback_source{feedback(), slot};
4324 return BuildLoadGlobal(name, feedback_source, TypeofMode::kNotInside);
4325 } else {
4326 ValueNode* name_node = GetConstant(name);
4327 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4328 ValueNode* depth =
4330 ValueNode* result;
4331 if (is_inline()) {
4332 ValueNode* vector = GetConstant(feedback());
4334 {name_node, depth, slot, vector});
4335 } else {
4337 {name_node, depth, slot});
4338 }
4340 return ReduceResult::Done();
4341 }
4342}
4343
4344ReduceResult MaglevGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
4345 // LdaLookupSlotInsideTypeof <name_index>
4346 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4348 BuildCallRuntime(Runtime::kLoadLookupSlotInsideTypeof, {name}).value());
4349 return ReduceResult::Done();
4350}
4351
4352ReduceResult MaglevGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() {
4353 // LdaLookupContextSlotInsideTypeof <name_index> <context_slot> <depth>
4354 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4355 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4356 ValueNode* depth =
4360 {name, depth, slot}));
4361 return ReduceResult::Done();
4362}
4363
4364ReduceResult MaglevGraphBuilder::VisitLdaLookupScriptContextSlotInsideTypeof() {
4365 // LdaLookupContextSlotInsideTypeof <name_index> <context_slot> <depth>
4366 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4367 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4368 ValueNode* depth =
4372 {name, depth, slot}));
4373 return ReduceResult::Done();
4374}
4375
4376ReduceResult MaglevGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
4377 // LdaLookupGlobalSlotInsideTypeof <name_index> <context_slot> <depth>
4378 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4379 ValueNode* slot = GetTaggedIndexConstant(iterator_.GetIndexOperand(1));
4380 ValueNode* depth =
4382 ValueNode* result;
4383 if (is_inline()) {
4384 ValueNode* vector = GetConstant(feedback());
4386 {name, depth, slot, vector});
4387 } else {
4389 {name, depth, slot});
4390 }
4392 return ReduceResult::Done();
4393}
4394
4395namespace {
4396Runtime::FunctionId StaLookupSlotFunction(uint8_t sta_lookup_slot_flags) {
4397 using Flags = interpreter::StoreLookupSlotFlags;
4398 switch (Flags::GetLanguageMode(sta_lookup_slot_flags)) {
4400 return Runtime::kStoreLookupSlot_Strict;
4402 if (Flags::IsLookupHoistingMode(sta_lookup_slot_flags)) {
4403 return Runtime::kStoreLookupSlot_SloppyHoisting;
4404 } else {
4405 return Runtime::kStoreLookupSlot_Sloppy;
4406 }
4407 }
4408}
4409} // namespace
4410
4411ReduceResult MaglevGraphBuilder::VisitStaLookupSlot() {
4412 // StaLookupSlot <name_index> <flags>
4413 ValueNode* value = GetAccumulator();
4414 ValueNode* name = GetConstant(GetRefOperand<Name>(0));
4415 uint32_t flags = GetFlag8Operand(1);
4416 EscapeContext();
4418 BuildCallRuntime(StaLookupSlotFunction(flags), {name, value}).value());
4419 return ReduceResult::Done();
4420}
4421
4423 LocalIsolate* isolate, ValueNode* node) {
4424 switch (node->properties().value_representation()) {
4429 return NodeType::kNumber;
4431 return NodeType::kNumberOrOddball;
4433 break;
4434 }
4435 switch (node->opcode()) {
4436 case Opcode::kPhi:
4437 return node->Cast<Phi>()->type();
4438 case Opcode::kCheckedSmiTagInt32:
4439 case Opcode::kCheckedSmiTagUint32:
4440 case Opcode::kCheckedSmiTagIntPtr:
4441 case Opcode::kCheckedSmiTagFloat64:
4442 case Opcode::kUnsafeSmiTagInt32:
4443 case Opcode::kUnsafeSmiTagUint32:
4444 case Opcode::kUnsafeSmiTagIntPtr:
4445 case Opcode::kSmiConstant:
4446 return NodeType::kSmi;
4447 case Opcode::kInt32ToNumber:
4448 case Opcode::kUint32ToNumber:
4449 case Opcode::kIntPtrToNumber:
4450 case Opcode::kFloat64ToTagged:
4451 return NodeType::kNumber;
4452 case Opcode::kHoleyFloat64ToTagged:
4453 return NodeType::kNumberOrOddball;
4454 case Opcode::kAllocationBlock:
4455 case Opcode::kInlinedAllocation: {
4456 auto obj = node->Cast<InlinedAllocation>()->object();
4457 if (obj->has_static_map()) {
4458 return StaticTypeForMap(obj->map(), broker);
4459 } else {
4460 switch (obj->type()) {
4462 return NodeType::kNonThinString;
4466 UNREACHABLE();
4467 }
4468 }
4469 }
4470 case Opcode::kRootConstant: {
4471 RootConstant* constant = node->Cast<RootConstant>();
4472 switch (constant->index()) {
4473 case RootIndex::kTrueValue:
4474 case RootIndex::kFalseValue:
4475 return NodeType::kBoolean;
4476 case RootIndex::kUndefinedValue:
4477 case RootIndex::kNullValue:
4478 return NodeType::kOddball;
4479 default:
4480 break;
4481 }
4482 [[fallthrough]];
4483 }
4484 case Opcode::kConstant: {
4486 MaglevGraphBuilder::TryGetConstant(broker, isolate, node).value();
4487 return StaticTypeForConstant(broker, ref);
4488 }
4489 case Opcode::kToNumberOrNumeric:
4490 if (node->Cast<ToNumberOrNumeric>()->mode() ==
4492 return NodeType::kNumber;
4493 }
4494 // TODO(verwaest): Check what we need here.
4495 return NodeType::kUnknown;
4496 case Opcode::kToString:
4497 case Opcode::kNumberToString:
4498 case Opcode::kUnwrapStringWrapper:
4499 return NodeType::kString;
4500 case Opcode::kStringConcat:
4501 case Opcode::kUnwrapThinString:
4502 return NodeType::kNonThinString;
4503 case Opcode::kCheckedInternalizedString:
4504 return NodeType::kInternalizedString;
4505 case Opcode::kToObject:
4506 case Opcode::kCreateObjectLiteral:
4507 case Opcode::kCreateShallowObjectLiteral:
4508 return NodeType::kJSReceiver;
4509 case Opcode::kCreateArrayLiteral:
4510 case Opcode::kCreateShallowArrayLiteral:
4511 return NodeType::kJSArray;
4512 case Opcode::kToName:
4513 return NodeType::kName;
4514 case Opcode::kFastCreateClosure:
4515 case Opcode::kCreateClosure:
4516 return NodeType::kCallable;
4517 case Opcode::kInt32Compare:
4518 case Opcode::kFloat64Compare:
4519 case Opcode::kGenericEqual:
4520 case Opcode::kGenericStrictEqual:
4521 case Opcode::kGenericLessThan:
4522 case Opcode::kGenericLessThanOrEqual:
4523 case Opcode::kGenericGreaterThan:
4524 case Opcode::kGenericGreaterThanOrEqual:
4525 case Opcode::kLogicalNot:
4526 case Opcode::kStringEqual:
4527 case Opcode::kTaggedEqual:
4528 case Opcode::kTaggedNotEqual:
4529 case Opcode::kTestInstanceOf:
4530 case Opcode::kTestTypeOf:
4531 case Opcode::kTestUndetectable:
4532 case Opcode::kToBoolean:
4533 case Opcode::kToBooleanLogicalNot:
4534 case Opcode::kIntPtrToBoolean:
4535 case Opcode::kSetPrototypeHas:
4536 return NodeType::kBoolean;
4537 // Not value nodes:
4538#define GENERATE_CASE(Name) case Opcode::k##Name:
4541#undef GENERATE_CASE
4542 UNREACHABLE();
4543 case Opcode::kCreateFastArrayElements:
4544 case Opcode::kTransitionElementsKind:
4545 // Unsorted value nodes. TODO(maglev): See which of these should return
4546 // something else than kUnknown.
4547 case Opcode::kIdentity:
4548 case Opcode::kArgumentsElements:
4549 case Opcode::kArgumentsLength:
4550 case Opcode::kRestLength:
4551 case Opcode::kCall:
4552 case Opcode::kCallBuiltin:
4553 case Opcode::kCallCPPBuiltin:
4554 case Opcode::kCallForwardVarargs:
4555 case Opcode::kCallRuntime:
4556 case Opcode::kCallWithArrayLike:
4557 case Opcode::kCallWithSpread:
4558 case Opcode::kCallKnownApiFunction:
4559 case Opcode::kCallKnownJSFunction:
4560 case Opcode::kCallSelf:
4561 case Opcode::kConstruct:
4562 case Opcode::kCheckConstructResult:
4563 case Opcode::kCheckDerivedConstructResult:
4564 case Opcode::kConstructWithSpread:
4565 case Opcode::kConvertReceiver:
4566 case Opcode::kConvertHoleToUndefined:
4567 case Opcode::kCreateFunctionContext:
4568 case Opcode::kCreateRegExpLiteral:
4569 case Opcode::kDeleteProperty:
4570 case Opcode::kEnsureWritableFastElements:
4571 case Opcode::kExtendPropertiesBackingStore:
4572 case Opcode::kForInPrepare:
4573 case Opcode::kForInNext:
4574 case Opcode::kGeneratorRestoreRegister:
4575 case Opcode::kGetIterator:
4576 case Opcode::kGetSecondReturnedValue:
4577 case Opcode::kGetTemplateObject:
4578 case Opcode::kHasInPrototypeChain:
4579 case Opcode::kInitialValue:
4580 case Opcode::kLoadTaggedField:
4581 case Opcode::kLoadTaggedFieldForProperty:
4582 case Opcode::kLoadTaggedFieldForContextSlot:
4583 case Opcode::kLoadTaggedFieldForScriptContextSlot:
4584 case Opcode::kLoadDoubleField:
4585 case Opcode::kLoadFloat64:
4586 case Opcode::kLoadInt32:
4587 case Opcode::kLoadHeapInt32:
4588 case Opcode::kLoadTaggedFieldByFieldIndex:
4589 case Opcode::kLoadFixedArrayElement:
4590 case Opcode::kLoadFixedDoubleArrayElement:
4591 case Opcode::kLoadHoleyFixedDoubleArrayElement:
4592 case Opcode::kLoadHoleyFixedDoubleArrayElementCheckedNotHole:
4593 case Opcode::kLoadSignedIntDataViewElement:
4594 case Opcode::kLoadDoubleDataViewElement:
4595 case Opcode::kLoadTypedArrayLength:
4596 case Opcode::kLoadSignedIntTypedArrayElement:
4597 case Opcode::kLoadUnsignedIntTypedArrayElement:
4598 case Opcode::kLoadDoubleTypedArrayElement:
4599 case Opcode::kLoadEnumCacheLength:
4600 case Opcode::kLoadGlobal:
4601 case Opcode::kLoadNamedGeneric:
4602 case Opcode::kLoadNamedFromSuperGeneric:
4603 case Opcode::kMaybeGrowFastElements:
4604 case Opcode::kMigrateMapIfNeeded:
4605 case Opcode::kSetNamedGeneric:
4606 case Opcode::kDefineNamedOwnGeneric:
4607 case Opcode::kStoreInArrayLiteralGeneric:
4608 case Opcode::kStoreGlobal:
4609 case Opcode::kGetKeyedGeneric:
4610 case Opcode::kSetKeyedGeneric:
4611 case Opcode::kDefineKeyedOwnGeneric:
4612 case Opcode::kRegisterInput:
4613 case Opcode::kCheckedSmiSizedInt32:
4614 case Opcode::kCheckedSmiUntag:
4615 case Opcode::kUnsafeSmiUntag:
4616 case Opcode::kCheckedObjectToIndex:
4617 case Opcode::kCheckedTruncateNumberOrOddballToInt32:
4618 case Opcode::kCheckedInt32ToUint32:
4619 case Opcode::kCheckedIntPtrToUint32:
4620 case Opcode::kUnsafeInt32ToUint32:
4621 case Opcode::kCheckedUint32ToInt32:
4622 case Opcode::kCheckedIntPtrToInt32:
4623 case Opcode::kChangeInt32ToFloat64:
4624 case Opcode::kChangeUint32ToFloat64:
4625 case Opcode::kChangeIntPtrToFloat64:
4626 case Opcode::kCheckedTruncateFloat64ToInt32:
4627 case Opcode::kCheckedTruncateFloat64ToUint32:
4628 case Opcode::kTruncateNumberOrOddballToInt32:
4629 case Opcode::kCheckedNumberToInt32:
4630 case Opcode::kTruncateUint32ToInt32:
4631 case Opcode::kTruncateFloat64ToInt32:
4632 case Opcode::kUnsafeTruncateUint32ToInt32:
4633 case Opcode::kUnsafeTruncateFloat64ToInt32:
4634 case Opcode::kInt32ToUint8Clamped:
4635 case Opcode::kUint32ToUint8Clamped:
4636 case Opcode::kFloat64ToUint8Clamped:
4637 case Opcode::kCheckedNumberToUint8Clamped:
4638 case Opcode::kFloat64ToHeapNumberForField:
4639 case Opcode::kCheckedNumberOrOddballToFloat64:
4640 case Opcode::kUncheckedNumberOrOddballToFloat64:
4641 case Opcode::kCheckedNumberOrOddballToHoleyFloat64:
4642 case Opcode::kCheckedHoleyFloat64ToFloat64:
4643 case Opcode::kHoleyFloat64ToMaybeNanFloat64:
4644 case Opcode::kHoleyFloat64IsHole:
4645 case Opcode::kSetPendingMessage:
4646 case Opcode::kStringAt:
4647 case Opcode::kStringLength:
4648 case Opcode::kAllocateElementsArray:
4649 case Opcode::kUpdateJSArrayLength:
4650 case Opcode::kVirtualObject:
4651 case Opcode::kGetContinuationPreservedEmbedderData:
4652 case Opcode::kExternalConstant:
4653 case Opcode::kFloat64Constant:
4654 case Opcode::kInt32Constant:
4655 case Opcode::kUint32Constant:
4656 case Opcode::kTaggedIndexConstant:
4657 case Opcode::kTrustedConstant:
4658 case Opcode::kInt32AbsWithOverflow:
4659 case Opcode::kInt32AddWithOverflow:
4660 case Opcode::kInt32SubtractWithOverflow:
4661 case Opcode::kInt32MultiplyWithOverflow:
4662 case Opcode::kInt32DivideWithOverflow:
4663 case Opcode::kInt32ModulusWithOverflow:
4664 case Opcode::kInt32BitwiseAnd:
4665 case Opcode::kInt32BitwiseOr:
4666 case Opcode::kInt32BitwiseXor:
4667 case Opcode::kInt32ShiftLeft:
4668 case Opcode::kInt32ShiftRight:
4669 case Opcode::kInt32ShiftRightLogical:
4670 case Opcode::kInt32BitwiseNot:
4671 case Opcode::kInt32NegateWithOverflow:
4672 case Opcode::kInt32IncrementWithOverflow:
4673 case Opcode::kInt32DecrementWithOverflow:
4674 case Opcode::kInt32ToBoolean:
4675 case Opcode::kFloat64Abs:
4676 case Opcode::kFloat64Add:
4677 case Opcode::kFloat64Subtract:
4678 case Opcode::kFloat64Multiply:
4679 case Opcode::kFloat64Divide:
4680 case Opcode::kFloat64Exponentiate:
4681 case Opcode::kFloat64Modulus:
4682 case Opcode::kFloat64Negate:
4683 case Opcode::kFloat64Round:
4684 case Opcode::kFloat64ToBoolean:
4685 case Opcode::kFloat64Ieee754Unary:
4686 case Opcode::kCheckedSmiIncrement:
4687 case Opcode::kCheckedSmiDecrement:
4688 case Opcode::kGenericAdd:
4689 case Opcode::kGenericSubtract:
4690 case Opcode::kGenericMultiply:
4691 case Opcode::kGenericDivide:
4692 case Opcode::kGenericModulus:
4693 case Opcode::kGenericExponentiate:
4694 case Opcode::kGenericBitwiseAnd:
4695 case Opcode::kGenericBitwiseOr:
4696 case Opcode::kGenericBitwiseXor:
4697 case Opcode::kGenericShiftLeft:
4698 case Opcode::kGenericShiftRight:
4699 case Opcode::kGenericShiftRightLogical:
4700 case Opcode::kGenericBitwiseNot:
4701 case Opcode::kGenericNegate:
4702 case Opcode::kGenericIncrement:
4703 case Opcode::kGenericDecrement:
4704 case Opcode::kBuiltinStringFromCharCode:
4705 case Opcode::kBuiltinStringPrototypeCharCodeOrCodePointAt:
4706 case Opcode::kConsStringMap:
4707 case Opcode::kMapPrototypeGet:
4708 case Opcode::kMapPrototypeGetInt32Key:
4709 return NodeType::kUnknown;
4710 }
4711}
4712
4714 NodeType* current_type) {
4715 NodeType static_type = StaticTypeForNode(broker(), local_isolate(), node);
4716 if (current_type) *current_type = static_type;
4717 return NodeTypeIs(static_type, type);
4718}
4719
4721 NodeType* old_type) {
4722 if (CheckStaticType(node, type, old_type)) return true;
4723 NodeInfo* known_info = GetOrCreateInfoFor(node);
4724 if (old_type) *old_type = known_info->type();
4725 if (NodeTypeIs(known_info->type(), type)) return true;
4726 known_info->CombineType(type);
4727 if (auto phi = node->TryCast<Phi>()) {
4728 known_info->CombineType(phi->type());
4729 }
4730 return false;
4731}
4732
4733template <typename Function>
4735 Function ensure_new_type) {
4736 if (CheckStaticType(node, type)) return true;
4737 NodeInfo* known_info = GetOrCreateInfoFor(node);
4738 if (NodeTypeIs(known_info->type(), type)) return true;
4739 ensure_new_type(known_info->type());
4740 known_info->CombineType(type);
4741 return false;
4742}
4743
4744void MaglevGraphBuilder::SetKnownValue(ValueNode* node, compiler::ObjectRef ref,
4745 NodeType new_node_type) {
4746 DCHECK(!node->Is<Constant>());
4747 DCHECK(!node->Is<RootConstant>());
4748 NodeInfo* known_info = GetOrCreateInfoFor(node);
4749 // ref type should be compatible with type.
4750 DCHECK(NodeTypeIs(StaticTypeForConstant(broker(), ref), new_node_type));
4751 if (ref.IsHeapObject()) {
4752 DCHECK(IsInstanceOfNodeType(ref.AsHeapObject().map(broker()),
4753 known_info->type(), broker()));
4754 } else {
4755 DCHECK(!NodeTypeIs(known_info->type(), NodeType::kAnyHeapObject));
4756 }
4757 known_info->CombineType(new_node_type);
4758 known_info->alternative().set_checked_value(GetConstant(ref));
4759}
4760
4762 std::initializer_list<NodeType> types) {
4763 auto it = known_node_aspects().FindInfo(node);
4764 bool has_kna = known_node_aspects().IsValid(it);
4765 for (NodeType type : types) {
4766 if (CheckStaticType(node, type)) return type;
4767 if (has_kna) {
4768 if (NodeTypeIs(it->second.type(), type)) return type;
4769 }
4770 }
4771 return NodeType::kUnknown;
4772}
4773
4775 NodeType* current_type) {
4776 if (CheckStaticType(node, type, current_type)) return true;
4777 auto it = known_node_aspects().FindInfo(node);
4778 if (!known_node_aspects().IsValid(it)) return false;
4779 if (current_type) *current_type = it->second.type();
4780 return NodeTypeIs(it->second.type(), type);
4781}
4782
4784 auto it = known_node_aspects().FindInfo(node);
4785 if (!known_node_aspects().IsValid(it)) {
4786 return StaticTypeForNode(broker(), local_isolate(), node);
4787 }
4788 NodeType actual_type = it->second.type();
4789 if (auto phi = node->TryCast<Phi>()) {
4790 actual_type = CombineType(actual_type, phi->type());
4791 }
4792#ifdef DEBUG
4793 NodeType static_type = StaticTypeForNode(broker(), local_isolate(), node);
4794 if (!NodeTypeIs(actual_type, static_type)) {
4795 // In case we needed a numerical alternative of a smi value, the type
4796 // must generalize. In all other cases the node info type should reflect the
4797 // actual type.
4798 DCHECK(static_type == NodeType::kSmi && actual_type == NodeType::kNumber &&
4799 !known_node_aspects().TryGetInfoFor(node)->alternative().has_none());
4800 }
4801#endif // DEBUG
4802 return actual_type;
4803}
4804
4808
4810 NodeType lhs_type = GetType(lhs);
4811 NodeType meet = CombineType(lhs_type, rhs_type);
4812 return IsEmptyNodeType(meet);
4813}
4814
4816 NodeType static_type = StaticTypeForNode(broker(), local_isolate(), node);
4817 if (!NodeTypeMayBeNullOrUndefined(static_type)) return false;
4818 auto it = known_node_aspects().FindInfo(node);
4819 if (!known_node_aspects().IsValid(it)) return true;
4820 return NodeTypeMayBeNullOrUndefined(it->second.type());
4821}
4822
4824 if (EnsureType(node, NodeType::kSmi)) {
4825 if (SmiValuesAre31Bits()) {
4826 if (auto phi = node->TryCast<Phi>()) {
4827 phi->SetUseRequires31BitValue();
4828 }
4829 }
4830 return AddNewNode<UnsafeSmiUntag>({node});
4831 } else {
4832 return AddNewNode<CheckedSmiUntag>({node});
4833 }
4834}
4835
4837 ValueNode* node, NodeType allowed_input_type,
4838 TaggedToFloat64ConversionType conversion_type) {
4839 NodeType old_type;
4840 if (EnsureType(node, allowed_input_type, &old_type)) {
4841 if (old_type == NodeType::kSmi) {
4842 ValueNode* untagged_smi = BuildSmiUntag(node);
4843 return AddNewNode<ChangeInt32ToFloat64>({untagged_smi});
4844 }
4846 conversion_type);
4847 } else {
4848 return AddNewNode<CheckedNumberOrOddballToFloat64>({node}, conversion_type);
4849 }
4850}
4851
4853 bool elidable) {
4854 if (CheckStaticType(object, NodeType::kSmi)) return object;
4855 if (CheckType(object, NodeType::kAnyHeapObject)) {
4856 return EmitUnconditionalDeopt(DeoptimizeReason::kNotASmi);
4857 }
4858 if (EnsureType(object, NodeType::kSmi) && elidable) return object;
4859 switch (object->value_representation()) {
4861 if (!SmiValuesAre32Bits()) {
4863 }
4864 break;
4867 break;
4871 break;
4873 AddNewNode<CheckSmi>({object});
4874 break;
4877 break;
4878 }
4879 return object;
4880}
4881
4883 if (EnsureType(object, NodeType::kAnyHeapObject)) return ReduceResult::Done();
4884 if (IsEmptyNodeType(CombineType(GetType(object), NodeType::kAnyHeapObject))) {
4885 return EmitUnconditionalDeopt(DeoptimizeReason::kSmi);
4886 }
4888 return ReduceResult::Done();
4889}
4890
4892 NodeType known_type;
4893 if (EnsureType(object, NodeType::kString, &known_type))
4894 return ReduceResult::Done();
4895 if (IsEmptyNodeType(CombineType(GetType(object), NodeType::kString))) {
4896 return EmitUnconditionalDeopt(DeoptimizeReason::kNotAString);
4897 }
4898 AddNewNode<CheckString>({object}, GetCheckType(known_type));
4899 return ReduceResult::Done();
4900}
4901
4903 ValueNode* object) {
4904 NodeType known_type;
4905 if (EnsureType(object, NodeType::kStringOrStringWrapper, &known_type))
4906 return ReduceResult::Done();
4907 if (IsEmptyNodeType(
4908 CombineType(GetType(object), NodeType::kStringOrStringWrapper))) {
4909 return EmitUnconditionalDeopt(DeoptimizeReason::kNotAStringOrStringWrapper);
4910 }
4911 AddNewNode<CheckStringOrStringWrapper>({object}, GetCheckType(known_type));
4912 return ReduceResult::Done();
4913}
4914
4916 if (EnsureType(object, NodeType::kNumber)) return ReduceResult::Done();
4917 if (IsEmptyNodeType(CombineType(GetType(object), NodeType::kNumber))) {
4918 return EmitUnconditionalDeopt(DeoptimizeReason::kNotANumber);
4919 }
4921 return ReduceResult::Done();
4922}
4923
4925 NodeType known_type;
4926 if (EnsureType(object, NodeType::kSymbol, &known_type))
4927 return ReduceResult::Done();
4928 if (IsEmptyNodeType(CombineType(GetType(object), NodeType::kSymbol))) {
4929 return EmitUnconditionalDeopt(DeoptimizeReason::kNotASymbol);
4930 }
4931 AddNewNode<CheckSymbol>({object}, GetCheckType(known_type));
4932 return ReduceResult::Done();
4933}
4934
4936 NodeType known_type;
4937 if (EnsureType(object, NodeType::kJSReceiver, &known_type))
4938 return ReduceResult::Done();
4939 if (IsEmptyNodeType(CombineType(GetType(object), NodeType::kJSReceiver))) {
4940 return EmitUnconditionalDeopt(DeoptimizeReason::kWrongInstanceType);
4941 }
4942 AddNewNode<CheckInstanceType>({object}, GetCheckType(known_type),
4943 FIRST_JS_RECEIVER_TYPE, LAST_JS_RECEIVER_TYPE);
4944 return ReduceResult::Done();
4945}
4946
4948 ValueNode* object) {
4949 NodeType known_type;
4950 if (EnsureType(object, NodeType::kJSReceiverOrNullOrUndefined, &known_type)) {
4951 return ReduceResult::Done();
4952 }
4954 NodeType::kJSReceiverOrNullOrUndefined))) {
4956 DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined);
4957 }
4959 GetCheckType(known_type));
4960 return ReduceResult::Done();
4961}
4962
4963namespace {
4964
4965class KnownMapsMerger {
4966 public:
4967 explicit KnownMapsMerger(compiler::JSHeapBroker* broker, Zone* zone,
4969 : broker_(broker), zone_(zone), requested_maps_(requested_maps) {}
4970
4971 void IntersectWithKnownNodeAspects(
4972 ValueNode* object, const KnownNodeAspects& known_node_aspects) {
4973 auto node_info_it = known_node_aspects.FindInfo(object);
4974 bool has_node_info = known_node_aspects.IsValid(node_info_it);
4975 NodeType type =
4976 has_node_info ? node_info_it->second.type() : NodeType::kUnknown;
4977 if (has_node_info && node_info_it->second.possible_maps_are_known()) {
4978 // TODO(v8:7700): Make intersection non-quadratic.
4979 for (compiler::MapRef possible_map :
4980 node_info_it->second.possible_maps()) {
4981 if (std::find(requested_maps_.begin(), requested_maps_.end(),
4982 possible_map) != requested_maps_.end()) {
4983 // No need to add dependencies, we already have them for all known
4984 // possible maps.
4985 // Filter maps which are impossible given this objects type. Since we
4986 // want to prove that an object with map `map` is not an instance of
4987 // `type`, we cannot use `StaticTypeForMap`, as it only provides an
4988 // approximation. This filtering is done to avoid creating
4989 // non-sensical types later (e.g. if we think only a non-string map
4990 // is possible, after a string check).
4991 if (IsInstanceOfNodeType(possible_map, type, broker_)) {
4992 InsertMap(possible_map);
4993 }
4994 } else {
4996 }
4997 }
4998 if (intersect_set_.is_empty()) {
4999 node_type_ = NodeType::kUnknown;
5000 }
5001 } else {
5002 // A missing entry here means the universal set, i.e., we don't know
5003 // anything about the possible maps of the object. Intersect with the
5004 // universal set, which means just insert all requested maps.
5007 for (compiler::MapRef map : requested_maps_) {
5008 InsertMap(map);
5009 }
5010 }
5011 }
5012
5013 void UpdateKnownNodeAspects(ValueNode* object,
5014 KnownNodeAspects& known_node_aspects) {
5015 // Update known maps.
5016 auto node_info = known_node_aspects.GetOrCreateInfoFor(
5017 object, broker_, broker_->local_isolate());
5018 node_info->SetPossibleMaps(intersect_set_, any_map_is_unstable_, node_type_,
5019 broker_);
5020 // Make sure known_node_aspects.any_map_for_any_node_is_unstable is updated
5021 // in case any_map_is_unstable changed to true for this object -- this can
5022 // happen if this was an intersection with the universal set which added new
5023 // possible unstable maps.
5024 if (any_map_is_unstable_) {
5025 known_node_aspects.any_map_for_any_node_is_unstable = true;
5026 }
5027 // At this point, known_node_aspects.any_map_for_any_node_is_unstable may be
5028 // true despite there no longer being any unstable maps for any nodes (if
5029 // this was the only node with unstable maps and this intersection removed
5030 // those). This is ok, because that's at worst just an overestimate -- we
5031 // could track whether this node's any_map_is_unstable flipped from true to
5032 // false, but this is likely overkill.
5033 // Insert stable map dependencies which weren't inserted yet. This is only
5034 // needed if our set of known maps was empty and we created it anew based on
5035 // maps we checked.
5036 if (!existing_known_maps_found_) {
5037 for (compiler::MapRef map : intersect_set_) {
5038 if (map.is_stable()) {
5039 broker_->dependencies()->DependOnStableMap(map);
5040 }
5041 }
5042 } else {
5043 // TODO(victorgomes): Add a DCHECK_SLOW that checks if the maps already
5044 // exist in the CompilationDependencySet.
5045 }
5046 }
5047
5048 bool known_maps_are_subset_of_requested_maps() const {
5050 }
5051 bool emit_check_with_migration() const { return emit_check_with_migration_; }
5052
5053 const compiler::ZoneRefSet<Map>& intersect_set() const {
5054 return intersect_set_;
5055 }
5056
5057 NodeType node_type() const { return node_type_; }
5058
5059 private:
5060 compiler::JSHeapBroker* broker_;
5062 base::Vector<const compiler::MapRef> requested_maps_;
5069
5070 Zone* zone() const { return zone_; }
5071
5072 void InsertMap(compiler::MapRef map) {
5073 if (map.is_migration_target()) {
5075 }
5076 NodeType new_type = StaticTypeForMap(map, broker_);
5077 if (new_type == NodeType::kHeapNumber) {
5078 new_type = IntersectType(new_type, NodeType::kSmi);
5079 }
5080 node_type_ = IntersectType(node_type_, new_type);
5081 if (!map.is_stable()) {
5082 any_map_is_unstable_ = true;
5083 }
5084 intersect_set_.insert(map, zone());
5085 }
5086};
5087
5088} // namespace
5089
5092 std::optional<ValueNode*> map,
5093 bool has_deprecated_map_without_migration_target) {
5094 // TODO(verwaest): Support other objects with possible known stable maps as
5095 // well.
5096 if (compiler::OptionalHeapObjectRef constant = TryGetConstant(object)) {
5097 // For constants with stable maps that match one of the desired maps, we
5098 // don't need to emit a map check, and can use the dependency -- we
5099 // can't do this for unstable maps because the constant could migrate
5100 // during compilation.
5101 compiler::MapRef constant_map = constant.value().map(broker());
5102 if (std::find(maps.begin(), maps.end(), constant_map) != maps.end()) {
5103 if (constant_map.is_stable()) {
5104 broker()->dependencies()->DependOnStableMap(constant_map);
5105 return ReduceResult::Done();
5106 }
5107 // TODO(verwaest): Reduce maps to the constant map.
5108 } else {
5109 // TODO(leszeks): Insert an unconditional deopt if the constant map
5110 // doesn't match the required map.
5111 }
5112 }
5113
5114 NodeInfo* known_info = GetOrCreateInfoFor(object);
5115
5116 // Calculates if known maps are a subset of maps, their map intersection and
5117 // whether we should emit check with migration.
5118 KnownMapsMerger merger(broker(), zone(), maps);
5119 merger.IntersectWithKnownNodeAspects(object, known_node_aspects());
5120
5121 // If the known maps are the subset of the maps to check, we are done.
5122 if (merger.known_maps_are_subset_of_requested_maps()) {
5123 // The node type of known_info can get out of sync with the possible maps.
5124 // For instance after merging with an effectively dead branch (i.e., check
5125 // contradicting all possible maps).
5126 // TODO(olivf) Try to combine node_info and possible maps and ensure that
5127 // narrowing the type also clears impossible possible_maps.
5128 if (!NodeTypeIs(known_info->type(), merger.node_type())) {
5129 known_info->IntersectType(merger.node_type());
5130 }
5131#ifdef DEBUG
5132 // Double check that, for every possible map, it's one of the maps we'd
5133 // want to check.
5134 for (compiler::MapRef possible_map :
5135 known_node_aspects().TryGetInfoFor(object)->possible_maps()) {
5136 DCHECK_NE(std::find(maps.begin(), maps.end(), possible_map), maps.end());
5137 }
5138#endif
5139 return ReduceResult::Done();
5140 }
5141
5142 if (merger.intersect_set().is_empty()) {
5143 return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
5144 }
5145
5146 // TODO(v8:7700): Check if the {maps} - {known_maps} size is smaller than
5147 // {maps} \intersect {known_maps}, we can emit CheckNotMaps instead.
5148
5149 // Emit checks.
5150 if (merger.emit_check_with_migration()) {
5151 AddNewNode<CheckMapsWithMigration>({object}, merger.intersect_set(),
5152 GetCheckType(known_info->type()));
5153 } else if (has_deprecated_map_without_migration_target) {
5155 {object}, merger.intersect_set(), GetCheckType(known_info->type()));
5156 } else if (map) {
5158 merger.intersect_set());
5159 } else {
5160 AddNewNode<CheckMaps>({object}, merger.intersect_set(),
5161 GetCheckType(known_info->type()));
5162 }
5163
5164 merger.UpdateKnownNodeAspects(object, known_node_aspects());
5165 return ReduceResult::Done();
5166}
5167
5169 ValueNode* heap_object, ValueNode* object_map,
5170 const ZoneVector<compiler::MapRef>& transition_sources,
5171 compiler::MapRef transition_target) {
5172 // TODO(marja): Optimizations based on what we know about the intersection of
5173 // known maps and transition sources or transition target.
5174
5175 // TransitionElementsKind doesn't happen in cases where we'd need to do
5176 // CheckMapsWithMigration instead of CheckMaps.
5177 CHECK(!transition_target.is_migration_target());
5178 for (const compiler::MapRef transition_source : transition_sources) {
5179 CHECK(!transition_source.is_migration_target());
5180 }
5181
5182 NodeInfo* known_info = GetOrCreateInfoFor(heap_object);
5183
5185 {heap_object, object_map}, transition_sources, transition_target);
5186 // After this operation, heap_object's map is transition_target (or we
5187 // deopted).
5188 known_info->SetPossibleMaps(
5189 PossibleMaps{transition_target}, !transition_target.is_stable(),
5190 StaticTypeForMap(transition_target, broker()), broker());
5191 DCHECK(transition_target.IsJSReceiverMap());
5192 if (!transition_target.is_stable()) {
5194 } else {
5195 broker()->dependencies()->DependOnStableMap(transition_target);
5196 }
5197 return ReduceResult::Done();
5198}
5199
5201 ValueNode* heap_object, ValueNode* object_map,
5203 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched) {
5204 GetOrCreateInfoFor(heap_object);
5205 KnownMapsMerger merger(broker(), zone(), maps);
5206 merger.IntersectWithKnownNodeAspects(heap_object, known_node_aspects());
5207
5208 if (merger.intersect_set().is_empty()) {
5210 }
5211
5212 // TODO(pthier): Support map packing.
5214
5215 // TODO(pthier): Handle map migrations.
5216 std::optional<MaglevSubGraphBuilder::Label> map_matched;
5217 const compiler::ZoneRefSet<Map>& relevant_maps = merger.intersect_set();
5218 if (relevant_maps.size() > 1) {
5219 map_matched.emplace(sub_graph, static_cast<int>(relevant_maps.size()));
5220 for (size_t map_index = 1; map_index < relevant_maps.size(); map_index++) {
5222 &*map_matched,
5223 {object_map, GetConstant(relevant_maps.at(map_index))});
5224 }
5225 }
5226 if_not_matched.emplace(sub_graph, 1);
5228 &*if_not_matched, {object_map, GetConstant(relevant_maps.at(0))});
5229 if (map_matched.has_value()) {
5230 sub_graph->Goto(&*map_matched);
5231 sub_graph->Bind(&*map_matched);
5232 }
5233 merger.UpdateKnownNodeAspects(heap_object, known_node_aspects());
5234 return ReduceResult::Done();
5235}
5236
5238 ValueNode* heap_object, ValueNode* object_map,
5239 const ZoneVector<compiler::MapRef>& transition_sources,
5240 compiler::MapRef transition_target, MaglevSubGraphBuilder* sub_graph,
5241 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched) {
5242 DCHECK(!transition_target.is_migration_target());
5243
5244 NodeInfo* known_info = GetOrCreateInfoFor(heap_object);
5245
5246 // TODO(pthier): Calculate and use the intersection of known maps with
5247 // (transition_sources union transition_target).
5248
5250 {heap_object, object_map}, transition_sources, transition_target);
5251
5252 // TODO(pthier): Support map packing.
5254 if_not_matched.emplace(sub_graph, 1);
5256 &*if_not_matched, {new_map, GetConstant(transition_target)});
5257 // After the branch, object's map is transition_target.
5258 DCHECK(transition_target.IsJSReceiverMap());
5259 known_info->SetPossibleMaps(
5260 PossibleMaps{transition_target}, !transition_target.is_stable(),
5261 StaticTypeForMap(transition_target, broker()), broker());
5262 if (!transition_target.is_stable()) {
5264 } else {
5265 broker()->dependencies()->DependOnStableMap(transition_target);
5266 }
5267 return ReduceResult::Done();
5268}
5269
5270namespace {
5271AllocationBlock* GetAllocation(ValueNode* object) {
5272 if (object->Is<InlinedAllocation>()) {
5273 object = object->Cast<InlinedAllocation>()->input(0).node();
5274 }
5275 if (object->Is<AllocationBlock>()) {
5276 return object->Cast<AllocationBlock>();
5277 }
5278 return nullptr;
5279}
5280} // namespace
5281
5283 ValueNode* value) {
5284 if (value->Is<RootConstant>() || value->Is<ConsStringMap>()) return true;
5285 if (CheckType(value, NodeType::kSmi)) {
5287 return true;
5288 }
5289
5290 // No need for a write barrier if both object and value are part of the same
5291 // folded young allocation.
5292 AllocationBlock* allocation = GetAllocation(object);
5293 if (allocation != nullptr && current_allocation_block_ == allocation &&
5294 allocation->allocation_type() == AllocationType::kYoung &&
5295 allocation == GetAllocation(value)) {
5296 allocation->set_elided_write_barriers_depend_on_type();
5297 return true;
5298 }
5299
5300 // If tagged and not Smi, we cannot elide write barrier.
5301 if (value->is_tagged()) return false;
5302
5303 // If its alternative conversion node is Smi, {value} will be converted to
5304 // a Smi when tagged.
5305 NodeInfo* node_info = GetOrCreateInfoFor(value);
5306 if (ValueNode* tagged_alt = node_info->alternative().tagged()) {
5307 DCHECK(tagged_alt->properties().is_conversion());
5308 return CheckType(tagged_alt, NodeType::kSmi);
5309 }
5310 return false;
5311}
5312
5314 ValueNode* value, int offset) {
5315 const bool value_is_trusted = value->Is<TrustedConstant>();
5316 DCHECK(value->is_tagged());
5317 if (InlinedAllocation* inlined_value = value->TryCast<InlinedAllocation>()) {
5318 // Add to the escape set.
5319 auto escape_deps = graph()->allocations_escape_map().find(object);
5320 CHECK(escape_deps != graph()->allocations_escape_map().end());
5321 escape_deps->second.push_back(inlined_value);
5322 // Add to the elided set.
5323 auto& elided_map = graph()->allocations_elide_map();
5324 auto elided_deps = elided_map.try_emplace(inlined_value, zone()).first;
5325 elided_deps->second.push_back(object);
5326 inlined_value->AddNonEscapingUses();
5327 }
5328 if (value_is_trusted) {
5330 value->Cast<TrustedConstant>()->tag(),
5332 } else {
5333 BuildStoreTaggedField(object, value, offset,
5335 }
5336}
5337
5338namespace {
5339bool IsEscaping(Graph* graph, InlinedAllocation* alloc) {
5340 if (alloc->IsEscaping()) return true;
5341 auto it = graph->allocations_elide_map().find(alloc);
5342 if (it == graph->allocations_elide_map().end()) return false;
5343 for (InlinedAllocation* inner_alloc : it->second) {
5344 if (IsEscaping(graph, inner_alloc)) {
5345 return true;
5346 }
5347 }
5348 return false;
5349}
5350
5351bool VerifyIsNotEscaping(VirtualObjectList vos, InlinedAllocation* alloc) {
5352 for (VirtualObject* vo : vos) {
5353 if (vo->allocation() == alloc) continue;
5354 bool escaped = false;
5355 vo->ForEachInput([&](ValueNode* nested_value) {
5356 if (escaped) return;
5357 if (!nested_value->Is<InlinedAllocation>()) return;
5358 ValueNode* nested_alloc = nested_value->Cast<InlinedAllocation>();
5359 if (nested_alloc == alloc) {
5360 if (vo->allocation()->IsEscaping() ||
5361 !VerifyIsNotEscaping(vos, vo->allocation())) {
5362 escaped = true;
5363 }
5364 }
5365 });
5366 if (escaped) return false;
5367 }
5368 return true;
5369}
5370} // namespace
5371
5373 TrackObjectMode mode) {
5374 DCHECK(!receiver->Is<VirtualObject>());
5375 if (!v8_flags.maglev_object_tracking) return false;
5376 if (!receiver->Is<InlinedAllocation>()) return false;
5378 if (mode == TrackObjectMode::kStore) {
5379 // If we have two objects A and B, such that A points to B (it contains B in
5380 // one of its field), we cannot change B without also changing A, even if
5381 // both can be elided. For now, we escape both objects instead.
5382 if (graph_->allocations_elide_map().find(alloc) !=
5383 graph_->allocations_elide_map().end()) {
5384 return false;
5385 }
5386 if (alloc->IsEscaping()) return false;
5387 // Ensure object is escaped if we are within a try-catch block. This is
5388 // crucial because a deoptimization point inside the catch handler could
5389 // re-materialize objects differently, depending on whether the throw
5390 // occurred before or after this store. We could potentially relax this
5391 // requirement by verifying that no throwable nodes have been emitted since
5392 // the try-block started, but for now, err on the side of caution and
5393 // always escape.
5394 if (IsInsideTryBlock()) return false;
5395 } else {
5397 if (IsEscaping(graph_, alloc)) return false;
5398 }
5399 // We don't support loop phis inside VirtualObjects, so any access inside a
5400 // loop should escape the object, except for objects that were created since
5401 // the last loop header.
5402 if (IsInsideLoop()) {
5403 if (!is_loop_effect_tracking() ||
5404 !loop_effects_->allocations.contains(alloc)) {
5405 return false;
5406 }
5407 }
5408 // Iterate all live objects to be sure that the allocation is not escaping.
5410 VerifyIsNotEscaping(current_interpreter_frame_.virtual_objects(), alloc));
5411 return true;
5412}
5413
5415 InlinedAllocation* allocation) {
5416 VirtualObject* vobject = allocation->object();
5417 // If it hasn't be snapshotted yet, it is the latest created version of this
5418 // object, we don't need to search for it.
5419 if (vobject->IsSnapshot()) {
5420 vobject = current_interpreter_frame_.virtual_objects().FindAllocatedWith(
5421 allocation);
5422 }
5423 return vobject;
5424}
5425
5427 InlinedAllocation* allocation) {
5428 VirtualObject* vobject = allocation->object();
5429 // If it hasn't be snapshotted yet, it is the latest created version of this
5430 // object and we can still modify it, we don't need to copy it.
5431 if (vobject->IsSnapshot()) {
5432 return DeepCopyVirtualObject(
5433 current_interpreter_frame_.virtual_objects().FindAllocatedWith(
5434 allocation));
5435 }
5436 return vobject;
5437}
5438
5440 ValueNode* value,
5441 int offset) {
5442 if (offset == HeapObject::kMapOffset) return;
5443 if (!CanTrackObjectChanges(object, TrackObjectMode::kStore)) return;
5444 // This avoids loop in the object graph.
5445 if (value->Is<InlinedAllocation>()) return;
5446 InlinedAllocation* allocation = object->Cast<InlinedAllocation>();
5447 VirtualObject* vobject = GetModifiableObjectFromAllocation(allocation);
5448 CHECK_EQ(vobject->type(), VirtualObject::kDefault);
5449 CHECK_NOT_NULL(vobject);
5450 vobject->set(offset, value);
5451 AddNonEscapingUses(allocation, 1);
5452 if (v8_flags.trace_maglev_object_tracking) {
5453 std::cout << " * Setting value in virtual object "
5454 << PrintNodeLabel(graph_labeller(), vobject) << "[" << offset
5455 << "]: " << PrintNode(graph_labeller(), value) << std::endl;
5456 }
5457}
5458
5460 ValueNode* value, int offset,
5461 StoreTaggedMode store_mode) {
5462 // The value may be used to initialize a VO, which can leak to IFS.
5463 // It should NOT be a conversion node, UNLESS it's an initializing value.
5464 // Initializing values are tagged before allocation, since conversion nodes
5465 // may allocate, and are not used to set a VO.
5467 !value->properties().is_conversion());
5468 if (store_mode != StoreTaggedMode::kInitializing) {
5470 }
5471 if (CanElideWriteBarrier(object, value)) {
5472 return AddNewNode<StoreTaggedFieldNoWriteBarrier>({object, value}, offset,
5473 store_mode);
5474 } else {
5475 // Detect stores that would create old-to-new references and pretenure the
5476 // value.
5477 if (v8_flags.maglev_pretenure_store_values) {
5478 if (auto alloc = object->TryCast<InlinedAllocation>()) {
5479 if (alloc->allocation_block()->allocation_type() ==
5481 if (auto value_alloc = value->TryCast<InlinedAllocation>()) {
5482 value_alloc->allocation_block()->TryPretenure();
5483 }
5484 }
5485 }
5486 }
5488 store_mode);
5489 }
5490}
5491
5493 ValueNode* object, ValueNode* value, int offset,
5494 StoreTaggedMode store_mode) {
5495 // The value may be used to initialize a VO, which can leak to IFS.
5496 // It should NOT be a conversion node, UNLESS it's an initializing value.
5497 // Initializing values are tagged before allocation, since conversion nodes
5498 // may allocate, and are not used to set a VO.
5500 !value->properties().is_conversion());
5501 DCHECK(CanElideWriteBarrier(object, value));
5502 if (store_mode != StoreTaggedMode::kInitializing) {
5504 }
5506 store_mode);
5507}
5508
5510 ValueNode* object, ValueNode* value, int offset, IndirectPointerTag tag,
5511 StoreTaggedMode store_mode) {
5512#ifdef V8_ENABLE_SANDBOX
5514 tag, store_mode);
5515#else
5516 BuildStoreTaggedField(object, value, offset, store_mode);
5517#endif // V8_ENABLE_SANDBOX
5518}
5519
5521 int index) {
5522 compiler::OptionalHeapObjectRef maybe_constant;
5523 if ((maybe_constant = TryGetConstant(elements)) &&
5524 maybe_constant.value().IsFixedArray()) {
5525 compiler::FixedArrayRef fixed_array_ref =
5526 maybe_constant.value().AsFixedArray();
5527 if (index >= 0 && static_cast<uint32_t>(index) < fixed_array_ref.length()) {
5528 compiler::OptionalObjectRef maybe_value =
5529 fixed_array_ref.TryGet(broker(), index);
5530 if (maybe_value) return GetConstant(*maybe_value);
5531 } else {
5532 return GetRootConstant(RootIndex::kTheHoleValue);
5533 }
5534 }
5536 VirtualObject* vobject =
5539 DCHECK(vobject->map().IsFixedArrayMap());
5540 ValueNode* length_node = vobject->get(offsetof(FixedArray, length_));
5541 if (auto length = TryGetInt32Constant(length_node)) {
5542 if (index >= 0 && index < length.value()) {
5543 return vobject->get(FixedArray::OffsetOfElementAt(index));
5544 } else {
5545 return GetRootConstant(RootIndex::kTheHoleValue);
5546 }
5547 }
5548 }
5550 return GetRootConstant(RootIndex::kTheHoleValue);
5551 }
5552 return AddNewNode<LoadTaggedField>({elements},
5554}
5555
5557 ValueNode* index) {
5558 if (auto constant = TryGetInt32Constant(index)) {
5559 return BuildLoadFixedArrayElement(elements, constant.value());
5560 }
5561 return AddNewNode<LoadFixedArrayElement>({elements, index});
5562}
5563
5565 ValueNode* index,
5566 ValueNode* value) {
5567 // TODO(victorgomes): Support storing element to a virtual object. If we
5568 // modify the elements array, we need to modify the original object to point
5569 // to the new elements array.
5570 if (CanElideWriteBarrier(elements, value)) {
5572 } else {
5574 {elements, index, value});
5575 }
5576}
5577
5579 ValueNode* elements, int index) {
5581 VirtualObject* vobject =
5583 compiler::FixedDoubleArrayRef elements_array = vobject->double_elements();
5584 if (index >= 0 && static_cast<uint32_t>(index) < elements_array.length()) {
5585 Float64 value = elements_array.GetFromImmutableFixedDoubleArray(index);
5586 return GetFloat64Constant(value.get_scalar());
5587 } else {
5588 return GetRootConstant(RootIndex::kTheHoleValue);
5589 }
5590 }
5592 return GetRootConstant(RootIndex::kTheHoleValue);
5593 }
5595 {elements, GetInt32Constant(index)});
5596}
5597
5599 ValueNode* elements, ValueNode* index) {
5600 if (auto constant = TryGetInt32Constant(index)) {
5601 return BuildLoadFixedDoubleArrayElement(elements, constant.value());
5602 }
5603 return AddNewNode<LoadFixedDoubleArrayElement>({elements, index});
5604}
5605
5607 ValueNode* index,
5608 ValueNode* value) {
5609 // TODO(victorgomes): Support storing double element to a virtual object.
5611}
5612
5614 ValueNode* elements, ValueNode* index, bool convert_hole) {
5615 if (convert_hole) {
5616 return AddNewNode<LoadHoleyFixedDoubleArrayElement>({elements, index});
5617 } else {
5619 {elements, index});
5620 }
5621}
5622
5624 base::Vector<const compiler::MapRef> const& receiver_maps) {
5625 // Check if all {receiver_maps} have one of the initial Array.prototype
5626 // or Object.prototype objects as their prototype (in any of the current
5627 // native contexts, as the global Array protector works isolate-wide).
5628 for (compiler::MapRef receiver_map : receiver_maps) {
5629 compiler::ObjectRef receiver_prototype = receiver_map.prototype(broker());
5630 if (!receiver_prototype.IsJSObject() ||
5631 !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
5632 return false;
5633 }
5634 }
5635
5636 // Check if the array prototype chain is intact.
5638}
5639
5640compiler::OptionalObjectRef
5642 compiler::PropertyAccessInfo const& access_info) {
5645 DCHECK(access_info.holder().has_value());
5646
5647 compiler::OptionalObjectRef constant =
5648 access_info.holder()->GetOwnDictionaryProperty(
5649 broker(), access_info.dictionary_index(), broker()->dependencies());
5650 if (!constant.has_value()) return {};
5651
5652 for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
5653 DirectHandle<Map> map_handle = map.object();
5654 // Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo
5655 // must have different lookup start map.
5656 if (!IsJSReceiverMap(*map_handle)) {
5657 // Perform the implicit ToObject for primitives here.
5658 // Implemented according to ES6 section 7.3.2 GetV (V, P).
5659 Tagged<JSFunction> constructor =
5661 *map_handle, *broker()->target_native_context().object())
5662 .value();
5663 // {constructor.initial_map()} is loaded/stored with acquire-release
5664 // semantics for constructors.
5665 map = MakeRefAssumeMemoryFence(broker(), constructor->initial_map());
5666 DCHECK(IsJSObjectMap(*map.object()));
5667 }
5669 map, access_info.name(), constant.value(), PropertyKind::kData);
5670 }
5671
5672 return constant;
5673}
5674
5676 compiler::PropertyAccessInfo const& access_info,
5677 ValueNode* lookup_start_object) {
5678 if (!access_info.IsFastDataConstant()) return {};
5679 if (access_info.holder().has_value()) {
5680 return access_info.holder();
5681 }
5682 if (compiler::OptionalHeapObjectRef c = TryGetConstant(lookup_start_object)) {
5683 if (c.value().IsJSObject()) {
5684 return c.value().AsJSObject();
5685 }
5686 }
5687 return {};
5688}
5689
5691 compiler::JSObjectRef holder,
5692 compiler::PropertyAccessInfo const& access_info) {
5693 DCHECK(!access_info.field_representation().IsDouble());
5694 return holder.GetOwnFastConstantDataProperty(
5695 broker(), access_info.field_representation(), access_info.field_index(),
5696 broker()->dependencies());
5697}
5698
5700 compiler::JSObjectRef holder,
5701 compiler::PropertyAccessInfo const& access_info) {
5702 DCHECK(access_info.field_representation().IsDouble());
5704 broker(), access_info.field_index(), broker()->dependencies());
5705}
5706
5708 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
5709 ValueNode* lookup_start_object) {
5710 compiler::ObjectRef constant = access_info.constant().value();
5711
5712 if (access_info.IsDictionaryProtoAccessorConstant()) {
5713 // For fast mode holders we recorded dependencies in BuildPropertyLoad.
5714 for (const compiler::MapRef map : access_info.lookup_start_object_maps()) {
5716 map, access_info.name(), constant, PropertyKind::kAccessor);
5717 }
5718 }
5719
5720 // Introduce the call to the getter function.
5721 if (constant.IsJSFunction()) {
5722 ConvertReceiverMode receiver_mode =
5723 receiver == lookup_start_object
5726 CallArguments args(receiver_mode, {receiver});
5727 return TryReduceCallForConstant(constant.AsJSFunction(), args);
5728 } else {
5729 // Disable optimizations for super ICs using API getters, so that we get
5730 // the correct receiver checks.
5731 if (receiver != lookup_start_object) return {};
5732 compiler::FunctionTemplateInfoRef templ = constant.AsFunctionTemplateInfo();
5734
5735 return TryReduceCallForApiFunction(templ, {}, args);
5736 }
5737}
5738
5740 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
5741 ValueNode* lookup_start_object, ValueNode* value) {
5742 // Setting super properties shouldn't end up here.
5743 DCHECK_EQ(receiver, lookup_start_object);
5744 compiler::ObjectRef constant = access_info.constant().value();
5745 if (constant.IsJSFunction()) {
5747 {receiver, value});
5748 RETURN_IF_ABORT(TryReduceCallForConstant(constant.AsJSFunction(), args));
5749 } else {
5750 compiler::FunctionTemplateInfoRef templ = constant.AsFunctionTemplateInfo();
5752 {receiver, value});
5754 }
5755 // Ignore the return value of the setter call.
5756 return ReduceResult::Done();
5757}
5758
5760 compiler::PropertyAccessInfo const& access_info,
5761 ValueNode* lookup_start_object, compiler::NameRef name) {
5762 compiler::OptionalJSObjectRef constant_holder =
5763 TryGetConstantDataFieldHolder(access_info, lookup_start_object);
5764 if (constant_holder) {
5765 if (access_info.field_representation().IsDouble()) {
5766 std::optional<Float64> constant =
5767 TryFoldLoadConstantDoubleField(constant_holder.value(), access_info);
5768 if (constant.has_value()) {
5769 return GetFloat64Constant(constant.value());
5770 }
5771 } else {
5772 compiler::OptionalObjectRef constant =
5773 TryFoldLoadConstantDataField(constant_holder.value(), access_info);
5774 if (constant.has_value()) {
5775 return GetConstant(constant.value());
5776 }
5777 }
5778 }
5779 // Resolve property holder.
5780 ValueNode* load_source;
5781 if (access_info.holder().has_value()) {
5782 load_source = GetConstant(access_info.holder().value());
5783 } else {
5784 load_source = lookup_start_object;
5785 }
5786
5787 FieldIndex field_index = access_info.field_index();
5788 if (!field_index.is_inobject()) {
5789 // The field is in the property array, first load it from there.
5790 load_source =
5791 BuildLoadTaggedField(load_source, JSReceiver::kPropertiesOrHashOffset);
5792 }
5793
5794 // Do the load.
5795 if (field_index.is_double()) {
5796 return AddNewNode<LoadDoubleField>({load_source}, field_index.offset());
5797 }
5799 load_source, field_index.offset(), name);
5800 // Insert stable field information if present.
5801 if (access_info.field_representation().IsSmi()) {
5802 NodeInfo* known_info = GetOrCreateInfoFor(value);
5803 known_info->CombineType(NodeType::kSmi);
5804 } else if (access_info.field_representation().IsHeapObject()) {
5805 NodeInfo* known_info = GetOrCreateInfoFor(value);
5806 if (access_info.field_map().has_value() &&
5807 access_info.field_map().value().is_stable()) {
5808 DCHECK(access_info.field_map().value().IsJSReceiverMap());
5809 auto map = access_info.field_map().value();
5810 known_info->SetPossibleMaps(PossibleMaps{map}, false,
5811 StaticTypeForMap(map, broker()), broker());
5813 } else {
5814 known_info->CombineType(NodeType::kAnyHeapObject);
5815 }
5816 }
5817 return value;
5818}
5819
5821 ValueNode* fixed_array) {
5822 ValueNode* length =
5823 BuildLoadTaggedField(fixed_array, offsetof(FixedArray, length_));
5824 EnsureType(length, NodeType::kSmi);
5825 return length;
5826}
5827
5829 NodeType length_type) {
5830 // TODO(leszeks): JSArray.length is known to be non-constant, don't bother
5831 // searching the constant values.
5832 MaybeReduceResult known_length =
5833 TryReuseKnownPropertyLoad(js_array, broker()->length_string());
5834 if (known_length.IsDone()) {
5835 DCHECK(known_length.IsDoneWithValue());
5836 return known_length.value();
5837 }
5838
5840 js_array, JSArray::kLengthOffset, broker()->length_string());
5841 GetOrCreateInfoFor(length)->CombineType(length_type);
5842 RecordKnownProperty(js_array, broker()->length_string(), length, false,
5844 return length;
5845}
5846
5849 AddNewNode<StoreMap>({object}, map, kind);
5850 NodeType object_type = StaticTypeForMap(map, broker());
5851 NodeInfo* node_info = GetOrCreateInfoFor(object);
5852 if (map.is_stable()) {
5853 node_info->SetPossibleMaps(PossibleMaps{map}, false, object_type, broker());
5855 } else {
5856 node_info->SetPossibleMaps(PossibleMaps{map}, true, object_type, broker());
5858 }
5859}
5860
5862 compiler::MapRef map, ValueNode* receiver, ValueNode* property_array) {
5863 int length = map.NextFreePropertyIndex() - map.GetInObjectProperties();
5864 // Under normal circumstances, NextFreePropertyIndex() will always be larger
5865 // than GetInObjectProperties(). However, an attacker able to corrupt heap
5866 // memory can break this invariant, in which case we'll get confused here,
5867 // potentially causing a sandbox violation. This CHECK defends against that.
5868 SBXCHECK_GE(length, 0);
5869 return AddNewNode<ExtendPropertiesBackingStore>({property_array, receiver},
5870 length);
5871}
5872
5874 compiler::PropertyAccessInfo const& access_info, ValueNode* receiver,
5875 compiler::AccessMode access_mode) {
5876 FieldIndex field_index = access_info.field_index();
5877 Representation field_representation = access_info.field_representation();
5878
5879 compiler::OptionalMapRef original_map;
5880 if (access_info.HasTransitionMap()) {
5881 compiler::MapRef transition = access_info.transition_map().value();
5882 original_map = transition.GetBackPointer(broker()).AsMap();
5883
5884 if (original_map->UnusedPropertyFields() == 0) {
5885 DCHECK(!field_index.is_inobject());
5886 }
5887 if (!field_index.is_inobject()) {
5888 // If slack tracking ends after this compilation started but before it's
5889 // finished, then {original_map} could be out-of-sync with {transition}.
5890 // In particular, its UnusedPropertyFields could be non-zero, which would
5891 // lead us to not extend the property backing store, while the underlying
5892 // Map has actually zero UnusedPropertyFields. Thus, we install a
5893 // dependency on {orininal_map} now, so that if such a situation happens,
5894 // we'll throw away the code.
5896 }
5897 } else if (access_info.IsFastDataConstant() &&
5898 access_mode == compiler::AccessMode::kStore) {
5899 return EmitUnconditionalDeopt(DeoptimizeReason::kStoreToConstant);
5900 }
5901
5902 ValueNode* store_target;
5903 if (field_index.is_inobject()) {
5904 store_target = receiver;
5905 } else {
5906 // The field is in the property array, first load it from there.
5907 store_target =
5908 BuildLoadTaggedField(receiver, JSReceiver::kPropertiesOrHashOffset);
5909 if (original_map && original_map->UnusedPropertyFields() == 0) {
5910 store_target = BuildExtendPropertiesBackingStore(*original_map, receiver,
5911 store_target);
5912 }
5913 }
5914
5915 if (field_representation.IsDouble()) {
5916 ValueNode* float64_value = GetAccumulator();
5917 if (access_info.HasTransitionMap()) {
5918 // Allocate the mutable double box owned by the field.
5919 ValueNode* value =
5921 BuildStoreTaggedField(store_target, value, field_index.offset(),
5923 BuildStoreMap(receiver, access_info.transition_map().value(),
5925 } else {
5926 AddNewNode<StoreDoubleField>({store_target, float64_value},
5927 field_index.offset());
5928 }
5929 return ReduceResult::Done();
5930 }
5931
5932 ValueNode* value = GetAccumulator();
5933 if (field_representation.IsSmi()) {
5935 } else {
5936 if (field_representation.IsHeapObject()) {
5937 // Emit a map check for the field type, if needed, otherwise just a
5938 // HeapObject check.
5939 if (access_info.field_map().has_value()) {
5941 value, base::VectorOf({access_info.field_map().value()})));
5942 } else {
5944 }
5945 }
5946 }
5947
5948 StoreTaggedMode store_mode = access_info.HasTransitionMap()
5951 if (field_representation.IsSmi()) {
5952 BuildStoreTaggedFieldNoWriteBarrier(store_target, value,
5953 field_index.offset(), store_mode);
5954 } else {
5955 DCHECK(field_representation.IsHeapObject() ||
5956 field_representation.IsTagged());
5957 BuildStoreTaggedField(store_target, value, field_index.offset(),
5958 store_mode);
5959 }
5960 if (access_info.HasTransitionMap()) {
5961 BuildStoreMap(receiver, access_info.transition_map().value(),
5963 }
5964
5965 return ReduceResult::Done();
5966}
5967
5968namespace {
5969bool AccessInfoGuaranteedConst(
5970 compiler::PropertyAccessInfo const& access_info) {
5971 if (!access_info.IsFastDataConstant() && !access_info.IsStringLength()) {
5972 return false;
5973 }
5974
5975 // Even if we have a constant load, if the map is not stable, we cannot
5976 // guarantee that the load is preserved across side-effecting calls.
5977 // TODO(v8:7700): It might be possible to track it as const if we know
5978 // that we're still on the main transition tree; and if we add a
5979 // dependency on the stable end-maps of the entire tree.
5980 for (auto& map : access_info.lookup_start_object_maps()) {
5981 if (!map.is_stable()) {
5982 return false;
5983 }
5984 }
5985 return true;
5986}
5987} // namespace
5988
5990 ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
5991 compiler::PropertyAccessInfo const& access_info) {
5992 if (access_info.holder().has_value() && !access_info.HasDictionaryHolder()) {
5995 access_info.holder().value());
5996 }
5997
5998 switch (access_info.kind()) {
6000 UNREACHABLE();
6002 return GetRootConstant(RootIndex::kUndefinedValue);
6005 ValueNode* result =
6006 BuildLoadField(access_info, lookup_start_object, name);
6007 RecordKnownProperty(lookup_start_object, name, result,
6008 AccessInfoGuaranteedConst(access_info),
6010 return result;
6011 }
6013 compiler::OptionalObjectRef constant =
6015 if (!constant.has_value()) return {};
6016 return GetConstant(constant.value());
6017 }
6020 return TryBuildPropertyGetterCall(access_info, receiver,
6021 lookup_start_object);
6023 ValueNode* cell = GetConstant(access_info.constant().value().AsCell());
6025 cell, Cell::kValueOffset, name);
6026 }
6028 DCHECK_EQ(receiver, lookup_start_object);
6030 RecordKnownProperty(lookup_start_object, name, result,
6031 AccessInfoGuaranteedConst(access_info),
6033 return result;
6034 }
6036 // TODO(dmercadier): update KnownNodeInfo.
6038 lookup_start_object, JSPrimitiveWrapper::kValueOffset);
6039 return AddNewNode<StringLength>({string});
6040 }
6043 if (receiver != lookup_start_object) {
6044 // We're accessing the TypedArray length via a prototype (a TypedArray
6045 // object in the prototype chain, objects below it not having a "length"
6046 // property, reading via super.length). That will throw a TypeError.
6047 // This should never occur in any realistic code, so we can deopt here
6048 // instead of implementing special handling for it.
6049 return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
6050 }
6051 return BuildLoadTypedArrayLength(lookup_start_object,
6052 access_info.elements_kind());
6053 }
6054 }
6055}
6056
6058 ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
6059 compiler::PropertyAccessInfo const& access_info,
6060 compiler::AccessMode access_mode) {
6061 if (access_info.holder().has_value()) {
6064 access_info.holder().value());
6065 }
6066
6067 switch (access_info.kind()) {
6069 return TryBuildPropertySetterCall(access_info, receiver,
6070 lookup_start_object, GetAccumulator());
6071 }
6074 MaybeReduceResult res =
6075 TryBuildStoreField(access_info, receiver, access_mode);
6076 if (res.IsDone()) {
6079 AccessInfoGuaranteedConst(access_info), access_mode);
6080 return res;
6081 }
6082 return {};
6083 }
6092 UNREACHABLE();
6093 }
6094}
6095
6097 ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
6098 compiler::PropertyAccessInfo const& access_info,
6099 compiler::AccessMode access_mode) {
6100 switch (access_mode) {
6102 return TryBuildPropertyLoad(receiver, lookup_start_object, name,
6103 access_info);
6107 DCHECK_EQ(receiver, lookup_start_object);
6108 return TryBuildPropertyStore(receiver, lookup_start_object, name,
6109 access_info, access_mode);
6111 // TODO(victorgomes): BuildPropertyTest.
6112 return {};
6113 }
6114}
6115
6116template <typename GenericAccessFunc>
6118 ValueNode* receiver, ValueNode* lookup_start_object,
6119 compiler::NamedAccessFeedback const& feedback,
6120 compiler::FeedbackSource const& feedback_source,
6121 compiler::AccessMode access_mode,
6122 GenericAccessFunc&& build_generic_access) {
6123 compiler::ZoneRefSet<Map> inferred_maps;
6124
6125 bool has_deprecated_map_without_migration_target = false;
6126 if (compiler::OptionalHeapObjectRef c = TryGetConstant(lookup_start_object)) {
6127 compiler::MapRef constant_map = c.value().map(broker());
6128 if (c.value().IsJSFunction() &&
6129 feedback.name().equals(broker()->prototype_string())) {
6130 compiler::JSFunctionRef function = c.value().AsJSFunction();
6131 if (!constant_map.has_prototype_slot() ||
6132 !function.has_instance_prototype(broker()) ||
6133 function.PrototypeRequiresRuntimeLookup(broker()) ||
6134 access_mode != compiler::AccessMode::kLoad) {
6135 return {};
6136 }
6137 compiler::HeapObjectRef prototype =
6139 return GetConstant(prototype);
6140 }
6141 inferred_maps = compiler::ZoneRefSet<Map>(constant_map);
6142 } else if (feedback.maps().empty()) {
6143 // The IC is megamorphic.
6144
6145 // We can't do megamorphic loads for lookups where the lookup start isn't
6146 // the receiver (e.g. load from super).
6147 if (receiver != lookup_start_object) return {};
6148
6149 // Use known possible maps if we have any.
6150 NodeInfo* object_info =
6151 known_node_aspects().TryGetInfoFor(lookup_start_object);
6152 if (object_info && object_info->possible_maps_are_known()) {
6153 inferred_maps = object_info->possible_maps();
6154 } else {
6155 // If we have no known maps, make the access megamorphic.
6156 switch (access_mode) {
6159 {GetTaggedValue(receiver), GetConstant(feedback.name())},
6160 feedback_source);
6163 {GetTaggedValue(receiver), GetConstant(feedback.name()),
6165 feedback_source);
6167 return {};
6170 UNREACHABLE();
6171 }
6172 }
6173 } else {
6174 // TODO(leszeks): This is doing duplicate work with BuildCheckMaps,
6175 // consider passing the merger into there.
6176 KnownMapsMerger merger(broker(), zone(), base::VectorOf(feedback.maps()));
6177 merger.IntersectWithKnownNodeAspects(lookup_start_object,
6179 inferred_maps = merger.intersect_set();
6180 has_deprecated_map_without_migration_target =
6181 feedback.has_deprecated_map_without_migration_target();
6182 }
6183
6184 if (inferred_maps.is_empty()) {
6185 return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
6186 }
6187
6189 ZoneVector<compiler::PropertyAccessInfo> access_infos_for_feedback(zone());
6190
6191 for (compiler::MapRef map : inferred_maps) {
6192 if (map.is_deprecated()) continue;
6193
6194 // TODO(v8:12547): Support writing to objects in shared space, which
6195 // need a write barrier that calls Object::Share to ensure the RHS is
6196 // shared.
6197 if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(map.instance_type()) &&
6198 access_mode == compiler::AccessMode::kStore) {
6199 return {};
6200 }
6201
6202 compiler::PropertyAccessInfo access_info =
6203 broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode);
6204 access_infos_for_feedback.push_back(access_info);
6205 }
6206
6207 compiler::AccessInfoFactory access_info_factory(broker(), zone());
6208 if (!access_info_factory.FinalizePropertyAccessInfos(
6209 access_infos_for_feedback, access_mode, &access_infos)) {
6210 return {};
6211 }
6212
6213 // Check for monomorphic case.
6214 if (access_infos.size() == 1) {
6215 compiler::PropertyAccessInfo const& access_info = access_infos.front();
6218 if (HasOnlyStringMaps(maps)) {
6219 // Check for string maps before checking if we need to do an access
6220 // check. Primitive strings always get the prototype from the native
6221 // context they're operated on, so they don't need the access check.
6222 RETURN_IF_ABORT(BuildCheckString(lookup_start_object));
6223 } else if (HasOnlyNumberMaps(maps)) {
6224 RETURN_IF_ABORT(BuildCheckNumber(lookup_start_object));
6225 } else {
6227 BuildCheckMaps(lookup_start_object, maps, {},
6228 has_deprecated_map_without_migration_target));
6229 }
6230
6231 // Generate the actual property
6232 return TryBuildPropertyAccess(receiver, lookup_start_object,
6233 feedback.name(), access_info, access_mode);
6234 } else {
6235 // TODO(victorgomes): Unify control flow logic with
6236 // TryBuildPolymorphicElementAccess.
6238 receiver, lookup_start_object, feedback, access_mode, access_infos,
6239 build_generic_access);
6240 }
6241}
6242
6245
6246 switch (object->properties().value_representation()) {
6248 return AddNewNode<CheckedIntPtrToInt32>({object});
6250 NodeType old_type;
6251 if (SmiConstant* constant = object->TryCast<SmiConstant>()) {
6252 return GetInt32Constant(constant->value().value());
6253 } else if (CheckType(object, NodeType::kSmi, &old_type)) {
6254 auto& alternative = GetOrCreateInfoFor(object)->alternative();
6255 return alternative.get_or_set_int32(
6256 [&]() { return BuildSmiUntag(object); });
6257 } else {
6258 // TODO(leszeks): Cache this knowledge/converted value somehow on
6259 // the node info.
6260 return AddNewNode<CheckedObjectToIndex>({object},
6261 GetCheckType(old_type));
6262 }
6264 // Already good.
6265 return object;
6269 return GetInt32(object);
6270 }
6271}
6272
6273// TODO(victorgomes): Consider caching the values and adding an
6274// uint32_alternative in node_info.
6276 // Don't record a Uint32 Phi use here, since the tagged path goes via
6277 // GetInt32ElementIndex, making this an Int32 Phi use.
6278
6279 switch (object->properties().value_representation()) {
6281 return AddNewNode<CheckedIntPtrToUint32>({object});
6283 // TODO(victorgomes): Consider creating a CheckedObjectToUnsignedIndex.
6284 if (SmiConstant* constant = object->TryCast<SmiConstant>()) {
6285 int32_t value = constant->value().value();
6286 if (value < 0) {
6287 return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6288 }
6289 return GetUint32Constant(value);
6290 }
6293 if (Int32Constant* constant = object->TryCast<Int32Constant>()) {
6294 int32_t value = constant->value();
6295 if (value < 0) {
6296 return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6297 }
6298 return GetUint32Constant(value);
6299 }
6300 return AddNewNode<CheckedInt32ToUint32>({object});
6302 return object;
6304 if (Float64Constant* constant = object->TryCast<Float64Constant>()) {
6305 double value = constant->value().get_scalar();
6306 uint32_t uint32_value;
6307 if (!DoubleToUint32IfEqualToSelf(value, &uint32_value)) {
6308 return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6309 }
6310 if (Smi::IsValid(uint32_value)) {
6311 return GetUint32Constant(uint32_value);
6312 }
6313 }
6314 [[fallthrough]];
6316 // CheckedTruncateFloat64ToUint32 will gracefully deopt on holes.
6318 }
6319 }
6320}
6321
6323 ValueNode* object, ValueNode* index_object,
6324 compiler::KeyedAccessMode const& keyed_mode) {
6325 // Strings are immutable and `in` cannot be used on strings
6326 if (keyed_mode.access_mode() != compiler::AccessMode::kLoad) {
6327 return {};
6328 }
6329
6330 // Ensure that {object} is actually a String.
6332
6333 ValueNode* length = BuildLoadStringLength(object);
6334 ValueNode* index = GetInt32ElementIndex(index_object);
6335 auto emit_load = [&] { return AddNewNode<StringAt>({object, index}); };
6336
6337 if (LoadModeHandlesOOB(keyed_mode.load_mode()) &&
6338 broker()->dependencies()->DependOnNoElementsProtector()) {
6339 ValueNode* positive_index;
6340 GET_VALUE_OR_ABORT(positive_index, GetUint32ElementIndex(index));
6341 ValueNode* uint32_length = AddNewNode<UnsafeInt32ToUint32>({length});
6342 return Select(
6343 [&](auto& builder) {
6344 return BuildBranchIfUint32Compare(builder, Operation::kLessThan,
6345 positive_index, uint32_length);
6346 },
6347 emit_load, [&] { return GetRootConstant(RootIndex::kUndefinedValue); });
6348 } else {
6350 index, length, AssertCondition::kUnsignedLessThan,
6351 DeoptimizeReason::kOutOfBounds));
6352 return emit_load();
6353 }
6354}
6355
6356namespace {
6357MaybeReduceResult TryFindLoadedProperty(
6358 const KnownNodeAspects::LoadedPropertyMap& loaded_properties,
6359 ValueNode* lookup_start_object,
6361 auto props_for_name = loaded_properties.find(name);
6362 if (props_for_name == loaded_properties.end()) return {};
6363
6364 auto it = props_for_name->second.find(lookup_start_object);
6365 if (it == props_for_name->second.end()) return {};
6366
6367 return it->second;
6368}
6369
6370bool CheckConditionIn32(int32_t lhs, int32_t rhs, AssertCondition condition) {
6371 switch (condition) {
6372 case AssertCondition::kEqual:
6373 return lhs == rhs;
6374 case AssertCondition::kNotEqual:
6375 return lhs != rhs;
6376 case AssertCondition::kLessThan:
6377 return lhs < rhs;
6378 case AssertCondition::kLessThanEqual:
6379 return lhs <= rhs;
6380 case AssertCondition::kGreaterThan:
6381 return lhs > rhs;
6382 case AssertCondition::kGreaterThanEqual:
6383 return lhs >= rhs;
6384 case AssertCondition::kUnsignedLessThan:
6385 return static_cast<uint32_t>(lhs) < static_cast<uint32_t>(rhs);
6386 case AssertCondition::kUnsignedLessThanEqual:
6387 return static_cast<uint32_t>(lhs) <= static_cast<uint32_t>(rhs);
6388 case AssertCondition::kUnsignedGreaterThan:
6389 return static_cast<uint32_t>(lhs) > static_cast<uint32_t>(rhs);
6390 case AssertCondition::kUnsignedGreaterThanEqual:
6391 return static_cast<uint32_t>(lhs) >= static_cast<uint32_t>(rhs);
6392 }
6393}
6394
6395bool CompareInt32(int32_t lhs, int32_t rhs, Operation operation) {
6396 switch (operation) {
6397 case Operation::kEqual:
6398 case Operation::kStrictEqual:
6399 return lhs == rhs;
6400 case Operation::kLessThan:
6401 return lhs < rhs;
6402 case Operation::kLessThanOrEqual:
6403 return lhs <= rhs;
6404 case Operation::kGreaterThan:
6405 return lhs > rhs;
6406 case Operation::kGreaterThanOrEqual:
6407 return lhs >= rhs;
6408 default:
6409 UNREACHABLE();
6410 }
6411}
6412
6413bool CompareUint32(uint32_t lhs, uint32_t rhs, Operation operation) {
6414 switch (operation) {
6415 case Operation::kEqual:
6416 case Operation::kStrictEqual:
6417 return lhs == rhs;
6418 case Operation::kLessThan:
6419 return lhs < rhs;
6420 case Operation::kLessThanOrEqual:
6421 return lhs <= rhs;
6422 case Operation::kGreaterThan:
6423 return lhs > rhs;
6424 case Operation::kGreaterThanOrEqual:
6425 return lhs >= rhs;
6426 default:
6427 UNREACHABLE();
6428 }
6429}
6430
6431} // namespace
6432
6435 DeoptimizeReason reason, bool allow_unconditional_deopt) {
6436 auto lhs_const = TryGetInt32Constant(lhs);
6437 if (lhs_const) {
6438 auto rhs_const = TryGetInt32Constant(rhs);
6439 if (rhs_const) {
6440 if (CheckConditionIn32(lhs_const.value(), rhs_const.value(), condition)) {
6441 return ReduceResult::Done();
6442 }
6443 if (allow_unconditional_deopt) {
6444 return EmitUnconditionalDeopt(reason);
6445 }
6446 }
6447 }
6448 AddNewNode<CheckInt32Condition>({lhs, rhs}, condition, reason);
6449 return ReduceResult::Done();
6450}
6451
6453 MaybeReduceResult known_elements =
6454 TryFindLoadedProperty(known_node_aspects().loaded_properties, object,
6456 if (known_elements.IsDone()) {
6457 DCHECK(known_elements.IsDoneWithValue());
6458 if (v8_flags.trace_maglev_graph_building) {
6459 std::cout << " * Reusing non-constant [Elements] "
6460 << PrintNodeLabel(graph_labeller(), known_elements.value())
6461 << ": " << PrintNode(graph_labeller(), known_elements.value())
6462 << std::endl;
6463 }
6464 return known_elements.value();
6465 }
6466
6467 DCHECK_EQ(JSObject::kElementsOffset, JSArray::kElementsOffset);
6468 ValueNode* elements = BuildLoadTaggedField(object, JSObject::kElementsOffset);
6469 RecordKnownProperty(object,
6471 elements, false, compiler::AccessMode::kLoad);
6472 return elements;
6473}
6474
6476 ValueNode* object, ElementsKind elements_kind) {
6478 bool is_variable_length = IsRabGsabTypedArrayElementsKind(elements_kind);
6479
6480 if (!is_variable_length) {
6481 // Note: We can't use broker()->length_string() here, because it could
6482 // conflict with redefinitions of the TypedArray length property.
6483 RETURN_IF_DONE(TryFindLoadedProperty(
6484 known_node_aspects().loaded_constant_properties, object,
6486 }
6487
6488 ValueNode* result = AddNewNode<LoadTypedArrayLength>({object}, elements_kind);
6489 if (!is_variable_length) {
6493 }
6494 return result;
6495}
6496
6498 ValueNode* object, ValueNode* index, ElementsKind elements_kind) {
6499#define BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Type) \
6500 return AddNewNode<Load##Type##TypedArrayElement>({object, index}, \
6501 elements_kind);
6502
6503 switch (elements_kind) {
6504 case INT8_ELEMENTS:
6505 case INT16_ELEMENTS:
6506 case INT32_ELEMENTS:
6509 case UINT8_ELEMENTS:
6510 case UINT16_ELEMENTS:
6511 case UINT32_ELEMENTS:
6513 case FLOAT32_ELEMENTS:
6514 case FLOAT64_ELEMENTS:
6516 default:
6517 UNREACHABLE();
6518 }
6519#undef BUILD_AND_RETURN_LOAD_TYPED_ARRAY
6520}
6521
6523 ValueNode* object, ValueNode* index, ElementsKind elements_kind) {
6524#define BUILD_STORE_TYPED_ARRAY(Type, value) \
6525 AddNewNode<Store##Type##TypedArrayElement>({object, index, (value)}, \
6526 elements_kind);
6527
6528 // TODO(leszeks): These operations have a deopt loop when the ToNumber
6529 // conversion sees a type other than number or oddball. Turbofan has the same
6530 // deopt loop, but ideally we'd avoid it.
6531 switch (elements_kind) {
6534 break;
6535 }
6536 case INT8_ELEMENTS:
6537 case INT16_ELEMENTS:
6538 case INT32_ELEMENTS:
6539 case UINT8_ELEMENTS:
6540 case UINT16_ELEMENTS:
6541 case UINT32_ELEMENTS:
6544 NodeType::kNumberOrOddball,
6546 break;
6547 case FLOAT32_ELEMENTS:
6548 case FLOAT64_ELEMENTS:
6551 NodeType::kNumberOrOddball,
6553 break;
6554 default:
6555 UNREACHABLE();
6556 }
6557#undef BUILD_STORE_TYPED_ARRAY
6558}
6559
6561 ValueNode* object, ValueNode* index_object,
6562 const compiler::ElementAccessInfo& access_info,
6563 compiler::KeyedAccessMode const& keyed_mode) {
6566 ElementsKind elements_kind = access_info.elements_kind();
6567 if (elements_kind == FLOAT16_ELEMENTS ||
6568 elements_kind == BIGUINT64_ELEMENTS ||
6569 elements_kind == BIGINT64_ELEMENTS) {
6570 return {};
6571 }
6572 if (keyed_mode.access_mode() == compiler::AccessMode::kLoad &&
6573 LoadModeHandlesOOB(keyed_mode.load_mode())) {
6574 // TODO(victorgomes): Handle OOB mode.
6575 return {};
6576 }
6577 if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
6579 // TODO(victorgomes): Handle OOB mode.
6580 return {};
6581 }
6582 if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
6583 elements_kind == UINT8_CLAMPED_ELEMENTS &&
6584 !IsSupported(CpuOperation::kFloat64Round)) {
6585 // TODO(victorgomes): Technically we still support if value (in the
6586 // accumulator) is of type int32. It would be nice to have a roll back
6587 // mechanism instead, so that we do not need to check this early.
6588 return {};
6589 }
6590 if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
6591 // TODO(leszeks): Eliminate this check.
6593 }
6596 GET_VALUE_OR_ABORT(index, GetUint32ElementIndex(index_object));
6597 GET_VALUE_OR_ABORT(length, BuildLoadTypedArrayLength(object, elements_kind));
6599 switch (keyed_mode.access_mode()) {
6601 DCHECK(!LoadModeHandlesOOB(keyed_mode.load_mode()));
6602 return BuildLoadTypedArrayElement(object, index, elements_kind);
6604 DCHECK(StoreModeIsInBounds(keyed_mode.store_mode()));
6605 BuildStoreTypedArrayElement(object, index, elements_kind);
6606 return ReduceResult::Done();
6608 // TODO(victorgomes): Implement has element access.
6609 return {};
6612 UNREACHABLE();
6613 }
6614}
6615
6617 ValueNode* object, ValueNode* index_object,
6619 KeyedAccessLoadMode load_mode) {
6620 DCHECK(IsFastElementsKind(elements_kind));
6621 bool is_jsarray = HasOnlyJSArrayMaps(maps);
6622 DCHECK(is_jsarray || HasOnlyJSObjectMaps(maps));
6623
6624 ValueNode* elements_array = BuildLoadElements(object);
6625 ValueNode* index = GetInt32ElementIndex(index_object);
6626 ValueNode* length = is_jsarray ? GetInt32(BuildLoadJSArrayLength(object))
6627 : BuildLoadFixedArrayLength(elements_array);
6628
6629 auto emit_load = [&]() -> MaybeReduceResult {
6631 if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
6633 elements_array, index,
6634 CanTreatHoleAsUndefined(maps) && LoadModeHandlesHoles(load_mode));
6635 } else if (elements_kind == PACKED_DOUBLE_ELEMENTS) {
6636 result = BuildLoadFixedDoubleArrayElement(elements_array, index);
6637 } else {
6638 DCHECK(!IsDoubleElementsKind(elements_kind));
6639 result = BuildLoadFixedArrayElement(elements_array, index);
6640 if (IsHoleyElementsKind(elements_kind)) {
6641 if (CanTreatHoleAsUndefined(maps) && LoadModeHandlesHoles(load_mode)) {
6643 } else {
6645 if (IsSmiElementsKind(elements_kind)) {
6646 EnsureType(result, NodeType::kSmi);
6647 }
6648 }
6649 } else if (IsSmiElementsKind(elements_kind)) {
6650 EnsureType(result, NodeType::kSmi);
6651 }
6652 }
6653 return result;
6654 };
6655
6656 if (CanTreatHoleAsUndefined(maps) && LoadModeHandlesOOB(load_mode)) {
6657 ValueNode* positive_index;
6658 GET_VALUE_OR_ABORT(positive_index, GetUint32ElementIndex(index));
6659 ValueNode* uint32_length = AddNewNode<UnsafeInt32ToUint32>({length});
6660 return SelectReduction(
6661 [&](auto& builder) {
6662 return BuildBranchIfUint32Compare(builder, Operation::kLessThan,
6663 positive_index, uint32_length);
6664 },
6665 emit_load, [&] { return GetRootConstant(RootIndex::kUndefinedValue); });
6666 } else {
6668 index, length, AssertCondition::kUnsignedLessThan,
6669 DeoptimizeReason::kOutOfBounds));
6670 return emit_load();
6671 }
6672}
6673
6677 // Make sure we do not store signalling NaNs into double arrays.
6678 // TODO(leszeks): Consider making this a bit on StoreFixedDoubleArrayElement
6679 // rather than a separate node.
6680 return GetSilencedNaN(GetFloat64(value));
6681 }
6682 if (IsSmiElementsKind(kind)) return GetSmiValue(value);
6683 return value;
6684}
6685
6687 ValueNode* object, ValueNode* index_object, ValueNode* value,
6689 const compiler::KeyedAccessMode& keyed_mode) {
6690 DCHECK(IsFastElementsKind(elements_kind));
6691
6692 const bool is_jsarray = HasOnlyJSArrayMaps(maps);
6693 DCHECK(is_jsarray || HasOnlyJSObjectMaps(maps));
6694
6695 // Get the elements array.
6696 ValueNode* elements_array = BuildLoadElements(object);
6697 GET_VALUE_OR_ABORT(value, ConvertForStoring(value, elements_kind));
6699
6700 // TODO(verwaest): Loop peeling will turn the first iteration index of spread
6701 // literals into smi constants as well, breaking the assumption that we'll
6702 // have preallocated the space if we see known indices. Turn off this
6703 // optimization if loop peeling is on.
6705 index_object->Is<SmiConstant>() && is_jsarray && !any_peeled_loop_) {
6706 index = GetInt32ElementIndex(index_object);
6707 } else {
6708 // Check boundaries.
6709 ValueNode* elements_array_length = nullptr;
6711 if (is_jsarray) {
6712 length = GetInt32(BuildLoadJSArrayLength(object));
6713 } else {
6714 length = elements_array_length =
6715 BuildLoadFixedArrayLength(elements_array);
6716 }
6717 index = GetInt32ElementIndex(index_object);
6719 if (elements_array_length == nullptr) {
6720 elements_array_length = BuildLoadFixedArrayLength(elements_array);
6721 }
6722
6723 // Validate the {index} depending on holeyness:
6724 //
6725 // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
6726 // backing store capacity plus the maximum allowed gap, as otherwise
6727 // the (potential) backing store growth would normalize and thus
6728 // the elements kind of the {receiver} would change to slow mode.
6729 //
6730 // For JSArray PACKED_*_ELEMENTS the {index} must be within the range
6731 // [0,length+1[ to be valid. In case {index} equals {length},
6732 // the {receiver} will be extended, but kept packed.
6733 //
6734 // Non-JSArray PACKED_*_ELEMENTS always grow by adding holes because they
6735 // lack the magical length property, which requires a map transition.
6736 // So we can assume that this did not happen if we did not see this map.
6737 ValueNode* limit =
6738 IsHoleyElementsKind(elements_kind)
6740 {elements_array_length,
6742 : is_jsarray
6744 : elements_array_length;
6746 index, limit, AssertCondition::kUnsignedLessThan,
6747 DeoptimizeReason::kOutOfBounds));
6748
6749 // Grow backing store if necessary and handle COW.
6750 elements_array = AddNewNode<MaybeGrowFastElements>(
6751 {elements_array, object, index, elements_array_length},
6752 elements_kind);
6753
6754 // If we didn't grow {elements}, it might still be COW, in which case we
6755 // copy it now.
6756 if (IsSmiOrObjectElementsKind(elements_kind)) {
6757 DCHECK_EQ(keyed_mode.store_mode(),
6759 elements_array =
6760 AddNewNode<EnsureWritableFastElements>({elements_array, object});
6761 }
6762
6763 // Update length if necessary.
6764 if (is_jsarray) {
6766 AddNewNode<UpdateJSArrayLength>({length, object, index});
6767 RecordKnownProperty(object, broker()->length_string(), new_length,
6769 }
6770 } else {
6772 index, length, AssertCondition::kUnsignedLessThan,
6773 DeoptimizeReason::kOutOfBounds));
6774
6775 // Handle COW if needed.
6776 if (IsSmiOrObjectElementsKind(elements_kind)) {
6777 if (keyed_mode.store_mode() == KeyedAccessStoreMode::kHandleCOW) {
6778 elements_array =
6779 AddNewNode<EnsureWritableFastElements>({elements_array, object});
6780 } else {
6781 // Ensure that this is not a COW FixedArray.
6783 elements_array, base::VectorOf({broker()->fixed_array_map()})));
6784 }
6785 }
6786 }
6787 }
6788
6789 // Do the store.
6790 if (IsDoubleElementsKind(elements_kind)) {
6791 BuildStoreFixedDoubleArrayElement(elements_array, index, value);
6792 } else {
6793 BuildStoreFixedArrayElement(elements_array, index, value);
6794 }
6795
6796 return ReduceResult::Done();
6797}
6798
6800 ValueNode* object, ValueNode* index_object,
6801 const compiler::ElementAccessInfo& access_info,
6802 compiler::KeyedAccessMode const& keyed_mode) {
6803 if (!IsFastElementsKind(access_info.elements_kind())) {
6804 return {};
6805 }
6806 switch (keyed_mode.access_mode()) {
6809 object, index_object,
6811 access_info.elements_kind(), keyed_mode.load_mode());
6816 ElementsKind elements_kind = access_info.elements_kind();
6817 return TryBuildElementStoreOnJSArrayOrJSObject(object, index_object,
6818 GetAccumulator(), maps,
6819 elements_kind, keyed_mode);
6820 }
6821 default:
6822 // TODO(victorgomes): Implement more access types.
6823 return {};
6824 }
6825}
6826
6827template <typename GenericAccessFunc>
6829 ValueNode* object, ValueNode* index_object,
6830 compiler::ElementAccessFeedback const& feedback,
6831 compiler::FeedbackSource const& feedback_source,
6832 GenericAccessFunc&& build_generic_access) {
6833 const compiler::KeyedAccessMode& keyed_mode = feedback.keyed_mode();
6834 // Check for the megamorphic case.
6835 if (feedback.transition_groups().empty()) {
6836 if (keyed_mode.access_mode() == compiler::AccessMode::kLoad) {
6838 {GetTaggedValue(object), GetTaggedValue(index_object)},
6839 feedback_source);
6840 } else if (keyed_mode.access_mode() == compiler::AccessMode::kStore) {
6842 {GetTaggedValue(object), GetTaggedValue(index_object),
6844 feedback_source);
6845 }
6846 return {};
6847 }
6848
6849 NodeInfo* object_info = known_node_aspects().TryGetInfoFor(object);
6850 compiler::ElementAccessFeedback refined_feedback =
6851 object_info && object_info->possible_maps_are_known()
6852 ? feedback.Refine(broker(), object_info->possible_maps())
6853 : feedback;
6854
6855 if (refined_feedback.HasOnlyStringMaps(broker())) {
6856 return TryBuildElementAccessOnString(object, index_object, keyed_mode);
6857 }
6858
6859 compiler::AccessInfoFactory access_info_factory(broker(), zone());
6861 if (!access_info_factory.ComputeElementAccessInfos(refined_feedback,
6862 &access_infos) ||
6863 access_infos.empty()) {
6864 return {};
6865 }
6866
6867 // TODO(leszeks): This is copied without changes from TurboFan's native
6868 // context specialization. We should figure out a way to share this code.
6869 //
6870 // For holey stores or growing stores, we need to check that the prototype
6871 // chain contains no setters for elements, and we need to guard those checks
6872 // via code dependencies on the relevant prototype maps.
6873 if (keyed_mode.access_mode() == compiler::AccessMode::kStore) {
6874 // TODO(v8:7700): We could have a fast path here, that checks for the
6875 // common case of Array or Object prototype only and therefore avoids
6876 // the zone allocation of this vector.
6877 ZoneVector<compiler::MapRef> prototype_maps(zone());
6878 for (compiler::ElementAccessInfo const& access_info : access_infos) {
6879 for (compiler::MapRef receiver_map :
6880 access_info.lookup_start_object_maps()) {
6881 // If the {receiver_map} has a prototype and its elements backing
6882 // store is either holey, or we have a potentially growing store,
6883 // then we need to check that all prototypes have stable maps with
6884 // with no element accessors and no throwing behavior for elements (and
6885 // we need to guard against changes to that below).
6886 if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
6887 StoreModeCanGrow(refined_feedback.keyed_mode().store_mode())) &&
6888 !receiver_map.PrototypesElementsDoNotHaveAccessorsOrThrow(
6889 broker(), &prototype_maps)) {
6890 return {};
6891 }
6892
6893 // TODO(v8:12547): Support writing to objects in shared space, which
6894 // need a write barrier that calls Object::Share to ensure the RHS is
6895 // shared.
6896 if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
6897 receiver_map.instance_type())) {
6898 return {};
6899 }
6900 }
6901 }
6902 for (compiler::MapRef prototype_map : prototype_maps) {
6903 broker()->dependencies()->DependOnStableMap(prototype_map);
6904 }
6905 }
6906
6907 // Check for monomorphic case.
6908 if (access_infos.size() == 1) {
6909 compiler::ElementAccessInfo const& access_info = access_infos.front();
6910 // TODO(victorgomes): Support RAB/GSAB backed typed arrays.
6912 return {};
6913 }
6914
6915 if (!access_info.transition_sources().empty()) {
6916 compiler::MapRef transition_target =
6917 access_info.lookup_start_object_maps().front();
6918 const ZoneVector<compiler::MapRef>& transition_sources =
6919 access_info.transition_sources();
6920
6921 // There are no transitions in heap number maps. If `object` is a SMI, we
6922 // would anyway fail the transition and deopt later.
6923 DCHECK_NE(transition_target.instance_type(),
6924 InstanceType::HEAP_NUMBER_TYPE);
6925#ifdef DEBUG
6926 for (auto& transition_source : transition_sources) {
6927 DCHECK_NE(transition_source.instance_type(),
6928 InstanceType::HEAP_NUMBER_TYPE);
6929 }
6930#endif // DEBUG
6931
6933 ValueNode* object_map =
6935
6937 object, object_map, transition_sources, transition_target));
6938 } else {
6940 object, base::VectorOf(access_info.lookup_start_object_maps())));
6941 }
6942 if (IsTypedArrayElementsKind(access_info.elements_kind())) {
6943 return TryBuildElementAccessOnTypedArray(object, index_object,
6944 access_info, keyed_mode);
6945 }
6946 return TryBuildElementAccessOnJSArrayOrJSObject(object, index_object,
6947 access_info, keyed_mode);
6948 } else {
6949 return TryBuildPolymorphicElementAccess(object, index_object, keyed_mode,
6950 access_infos, build_generic_access);
6951 }
6952}
6953
6954template <typename GenericAccessFunc>
6956 ValueNode* object, ValueNode* index_object,
6957 const compiler::KeyedAccessMode& keyed_mode,
6958 const ZoneVector<compiler::ElementAccessInfo>& access_infos,
6959 GenericAccessFunc&& build_generic_access) {
6960 if (keyed_mode.access_mode() == compiler::AccessMode::kLoad &&
6961 LoadModeHandlesOOB(keyed_mode.load_mode())) {
6962 // TODO(victorgomes): Handle OOB mode.
6963 return {};
6964 }
6965
6966 const bool is_any_store = compiler::IsAnyStore(keyed_mode.access_mode());
6967 const int access_info_count = static_cast<int>(access_infos.size());
6968 // Stores don't return a value, so we don't need a variable for the result.
6969 MaglevSubGraphBuilder sub_graph(this, is_any_store ? 0 : 1);
6970 std::optional<MaglevSubGraphBuilder::Variable> ret_val;
6971 std::optional<MaglevSubGraphBuilder::Label> done;
6972 std::optional<MaglevSubGraphBuilder::Label> generic_access;
6973
6976
6977 // TODO(pthier): We could do better here than just emitting code for each map,
6978 // as many different maps can produce the exact samce code (e.g. TypedArray
6979 // access for Uint16/Uint32/Int16/Int32/...).
6980 for (int i = 0; i < access_info_count; i++) {
6981 compiler::ElementAccessInfo const& access_info = access_infos[i];
6982 std::optional<MaglevSubGraphBuilder::Label> check_next_map;
6983 const bool handle_transitions = !access_info.transition_sources().empty();
6984 MaybeReduceResult map_check_result;
6985 if (i == access_info_count - 1) {
6986 if (handle_transitions) {
6987 compiler::MapRef transition_target =
6988 access_info.lookup_start_object_maps().front();
6989 map_check_result = BuildTransitionElementsKindOrCheckMap(
6990 object, object_map, access_info.transition_sources(),
6991 transition_target);
6992 } else {
6993 map_check_result = BuildCheckMaps(
6994 object, base::VectorOf(access_info.lookup_start_object_maps()),
6995 object_map);
6996 }
6997 } else {
6998 if (handle_transitions) {
6999 compiler::MapRef transition_target =
7000 access_info.lookup_start_object_maps().front();
7002 object, object_map, access_info.transition_sources(),
7003 transition_target, &sub_graph, check_next_map);
7004 } else {
7005 map_check_result = BuildCompareMaps(
7006 object, object_map,
7007 base::VectorOf(access_info.lookup_start_object_maps()), &sub_graph,
7008 check_next_map);
7009 }
7010 }
7011 if (map_check_result.IsDoneWithAbort()) {
7012 // We know from known possible maps that this branch is not reachable,
7013 // so don't emit any code for it.
7014 continue;
7015 }
7017 // TODO(victorgomes): Support RAB/GSAB backed typed arrays.
7020 } else if (IsTypedArrayElementsKind(access_info.elements_kind())) {
7021 result = TryBuildElementAccessOnTypedArray(object, index_object,
7022 access_info, keyed_mode);
7023 } else {
7025 object, index_object, access_info, keyed_mode);
7026 }
7027
7028 switch (result.kind()) {
7031 DCHECK_EQ(result.HasValue(), !is_any_store);
7032 if (!done.has_value()) {
7033 // We initialize the label {done} lazily on the first possible path.
7034 // If no possible path exists, it is guaranteed that BuildCheckMaps
7035 // emitted an unconditional deopt and we return DoneWithAbort at the
7036 // end. We need one extra predecessor to jump from the generic case.
7037 const int possible_predecessors = access_info_count - i + 1;
7038 if (is_any_store) {
7039 done.emplace(&sub_graph, possible_predecessors);
7040 } else {
7041 ret_val.emplace(0);
7042 done.emplace(
7043 &sub_graph, possible_predecessors,
7044 std::initializer_list<MaglevSubGraphBuilder::Variable*>{
7045 &*ret_val});
7046 }
7047 }
7048 if (!is_any_store) {
7049 sub_graph.set(*ret_val, result.value());
7050 }
7051 sub_graph.Goto(&*done);
7052 break;
7054 if (!generic_access.has_value()) {
7055 // Conservatively assume that all remaining branches can go into the
7056 // generic path, as we have to initialize the predecessors upfront.
7057 // TODO(pthier): Find a better way to do that.
7058 generic_access.emplace(&sub_graph, access_info_count - i);
7059 }
7060 sub_graph.Goto(&*generic_access);
7061 break;
7063 break;
7064 }
7065 if (check_next_map.has_value()) {
7066 sub_graph.Bind(&*check_next_map);
7067 }
7068 }
7069 if (generic_access.has_value() &&
7070 !sub_graph.TrimPredecessorsAndBind(&*generic_access).IsDoneWithAbort()) {
7071 MaybeReduceResult generic_result = build_generic_access();
7072 DCHECK(generic_result.IsDone());
7073 DCHECK_EQ(generic_result.IsDoneWithValue(), !is_any_store);
7074 if (!done.has_value()) {
7075 return is_any_store ? ReduceResult::Done() : generic_result.value();
7076 }
7077 if (!is_any_store) {
7078 sub_graph.set(*ret_val, generic_result.value());
7079 }
7080 sub_graph.Goto(&*done);
7081 }
7082 if (done.has_value()) {
7083 RETURN_IF_ABORT(sub_graph.TrimPredecessorsAndBind(&*done));
7084 return is_any_store ? ReduceResult::Done() : sub_graph.get(*ret_val);
7085 } else {
7087 }
7088}
7089
7090template <typename GenericAccessFunc>
7092 ValueNode* receiver, ValueNode* lookup_start_object,
7093 compiler::NamedAccessFeedback const& feedback,
7094 compiler::AccessMode access_mode,
7095 const ZoneVector<compiler::PropertyAccessInfo>& access_infos,
7096 GenericAccessFunc&& build_generic_access) {
7097 const bool is_any_store = compiler::IsAnyStore(access_mode);
7098 const int access_info_count = static_cast<int>(access_infos.size());
7099 int number_map_index = -1;
7100
7101 bool needs_migration = false;
7102 bool has_deprecated_map_without_migration_target =
7103 feedback.has_deprecated_map_without_migration_target();
7104 for (int i = 0; i < access_info_count; i++) {
7105 compiler::PropertyAccessInfo const& access_info = access_infos[i];
7106 DCHECK(!access_info.IsInvalid());
7107 for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
7108 if (map.is_migration_target()) {
7109 needs_migration = true;
7110 }
7111 if (map.IsHeapNumberMap()) {
7112 GetOrCreateInfoFor(lookup_start_object);
7113 base::SmallVector<compiler::MapRef, 1> known_maps = {map};
7114 KnownMapsMerger merger(broker(), zone(), base::VectorOf(known_maps));
7115 merger.IntersectWithKnownNodeAspects(lookup_start_object,
7117 if (!merger.intersect_set().is_empty()) {
7118 DCHECK_EQ(number_map_index, -1);
7119 number_map_index = i;
7120 }
7121 }
7122 }
7123 }
7124
7125 // Stores don't return a value, so we don't need a variable for the result.
7126 MaglevSubGraphBuilder sub_graph(this, is_any_store ? 0 : 1);
7127 std::optional<MaglevSubGraphBuilder::Variable> ret_val;
7128 std::optional<MaglevSubGraphBuilder::Label> done;
7129 std::optional<MaglevSubGraphBuilder::Label> is_number;
7130 std::optional<MaglevSubGraphBuilder::Label> generic_access;
7131
7132 if (number_map_index >= 0) {
7133 is_number.emplace(&sub_graph, 2);
7134 sub_graph.GotoIfTrue<BranchIfSmi>(&*is_number, {lookup_start_object});
7135 } else {
7136 RETURN_IF_ABORT(BuildCheckHeapObject(lookup_start_object));
7137 }
7138 ValueNode* lookup_start_object_map =
7139 BuildLoadTaggedField(lookup_start_object, HeapObject::kMapOffset);
7140
7141 if (needs_migration &&
7142 !v8_flags.maglev_skip_migration_check_for_polymorphic_access) {
7143 // TODO(marja, v8:7700): Try migrating only if all comparisons failed.
7144 // TODO(marja, v8:7700): Investigate making polymoprhic map comparison (with
7145 // migration) a control node (like switch).
7146 lookup_start_object_map = AddNewNode<MigrateMapIfNeeded>(
7147 {lookup_start_object_map, lookup_start_object});
7148 }
7149
7150 for (int i = 0; i < access_info_count; i++) {
7151 compiler::PropertyAccessInfo const& access_info = access_infos[i];
7152 std::optional<MaglevSubGraphBuilder::Label> check_next_map;
7153 MaybeReduceResult map_check_result;
7154 const auto& maps = access_info.lookup_start_object_maps();
7155 if (i == access_info_count - 1) {
7156 map_check_result =
7157 BuildCheckMaps(lookup_start_object, base::VectorOf(maps), {},
7158 has_deprecated_map_without_migration_target);
7159 } else {
7160 map_check_result =
7161 BuildCompareMaps(lookup_start_object, lookup_start_object_map,
7162 base::VectorOf(maps), &sub_graph, check_next_map);
7163 }
7164 if (map_check_result.IsDoneWithAbort()) {
7165 // We know from known possible maps that this branch is not reachable,
7166 // so don't emit any code for it.
7167 continue;
7168 }
7169 if (i == number_map_index) {
7170 DCHECK(is_number.has_value());
7171 sub_graph.Goto(&*is_number);
7172 sub_graph.Bind(&*is_number);
7173 }
7174
7176 if (is_any_store) {
7177 result = TryBuildPropertyStore(receiver, lookup_start_object,
7178 feedback.name(), access_info, access_mode);
7179 } else {
7180 result = TryBuildPropertyLoad(receiver, lookup_start_object,
7181 feedback.name(), access_info);
7182 }
7183
7184 switch (result.kind()) {
7187 DCHECK_EQ(result.HasValue(), !is_any_store);
7188 if (!done.has_value()) {
7189 // We initialize the label {done} lazily on the first possible path.
7190 // If no possible path exists, it is guaranteed that BuildCheckMaps
7191 // emitted an unconditional deopt and we return DoneWithAbort at the
7192 // end. We need one extra predecessor to jump from the generic case.
7193 const int possible_predecessors = access_info_count - i + 1;
7194 if (is_any_store) {
7195 done.emplace(&sub_graph, possible_predecessors);
7196 } else {
7197 ret_val.emplace(0);
7198 done.emplace(
7199 &sub_graph, possible_predecessors,
7200 std::initializer_list<MaglevSubGraphBuilder::Variable*>{
7201 &*ret_val});
7202 }
7203 }
7204
7205 if (!is_any_store) {
7206 sub_graph.set(*ret_val, result.value());
7207 }
7208 sub_graph.Goto(&*done);
7209 break;
7211 break;
7213 if (!generic_access.has_value()) {
7214 // Conservatively assume that all remaining branches can go into the
7215 // generic path, as we have to initialize the predecessors upfront.
7216 // TODO(pthier): Find a better way to do that.
7217 generic_access.emplace(&sub_graph, access_info_count - i);
7218 }
7219 sub_graph.Goto(&*generic_access);
7220 break;
7221 default:
7222 UNREACHABLE();
7223 }
7224
7225 if (check_next_map.has_value()) {
7226 sub_graph.Bind(&*check_next_map);
7227 }
7228 }
7229
7230 if (generic_access.has_value() &&
7231 !sub_graph.TrimPredecessorsAndBind(&*generic_access).IsDoneWithAbort()) {
7232 MaybeReduceResult generic_result = build_generic_access();
7233 DCHECK(generic_result.IsDone());
7234 DCHECK_EQ(generic_result.IsDoneWithValue(), !is_any_store);
7235 if (!done.has_value()) {
7236 return is_any_store ? ReduceResult::Done() : generic_result.value();
7237 }
7238 if (!is_any_store) {
7239 sub_graph.set(*ret_val, generic_result.value());
7240 }
7241 sub_graph.Goto(&*done);
7242 }
7243
7244 if (done.has_value()) {
7245 RETURN_IF_ABORT(sub_graph.TrimPredecessorsAndBind(&*done));
7246 return is_any_store ? ReduceResult::Done() : sub_graph.get(*ret_val);
7247 } else {
7249 }
7250}
7251
7254 ValueNode* value, bool is_const, compiler::AccessMode access_mode) {
7255 DCHECK(!value->properties().is_conversion());
7256 KnownNodeAspects::LoadedPropertyMap& loaded_properties =
7259 // Try to get loaded_properties[key] if it already exists, otherwise
7260 // construct loaded_properties[key] = ZoneMap{zone()}.
7261 auto& props_for_key =
7262 loaded_properties.try_emplace(key, zone()).first->second;
7263
7264 if (!is_const && IsAnyStore(access_mode)) {
7267 }
7268 // We don't do any aliasing analysis, so stores clobber all other cached
7269 // loads of a property with that key. We only need to do this for
7270 // non-constant properties, since constant properties are known not to
7271 // change and therefore can't be clobbered.
7272 // TODO(leszeks): Do some light aliasing analysis here, e.g. checking
7273 // whether there's an intersection of known maps.
7274 if (v8_flags.trace_maglev_graph_building) {
7275 std::cout << " * Removing all non-constant cached ";
7276 switch (key.type()) {
7278 std::cout << "properties with name " << *key.name().object();
7279 break;
7281 std::cout << "Elements";
7282 break;
7284 std::cout << "TypedArray length";
7285 break;
7287 std::cout << "String length";
7288 break;
7289 }
7290 std::cout << std::endl;
7291 }
7292 props_for_key.clear();
7293 }
7294
7295 if (v8_flags.trace_maglev_graph_building) {
7296 std::cout << " * Recording " << (is_const ? "constant" : "non-constant")
7297 << " known property "
7298 << PrintNodeLabel(graph_labeller(), lookup_start_object) << ": "
7299 << PrintNode(graph_labeller(), lookup_start_object) << " [";
7300 switch (key.type()) {
7302 std::cout << *key.name().object();
7303 break;
7305 std::cout << "Elements";
7306 break;
7308 std::cout << "TypedArray length";
7309 break;
7311 std::cout << "String length";
7312 break;
7313 }
7314 std::cout << "] = " << PrintNodeLabel(graph_labeller(), value) << ": "
7315 << PrintNode(graph_labeller(), value) << std::endl;
7316 }
7317
7318 if (IsAnyStore(access_mode) && !is_const && is_loop_effect_tracking()) {
7319 auto updated = props_for_key.emplace(lookup_start_object, value);
7320 if (updated.second) {
7321 loop_effects_->objects_written.insert(lookup_start_object);
7322 } else if (updated.first->second != value) {
7323 updated.first->second = value;
7324 loop_effects_->objects_written.insert(lookup_start_object);
7325 }
7326 } else {
7327 props_for_key[lookup_start_object] = value;
7328 }
7329}
7330
7332 ValueNode* lookup_start_object, compiler::NameRef name) {
7333 if (MaybeReduceResult result = TryFindLoadedProperty(
7334 known_node_aspects().loaded_properties, lookup_start_object, name);
7335 result.IsDone()) {
7336 if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7337 std::cout << " * Reusing non-constant loaded property "
7338 << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7339 << PrintNode(graph_labeller(), result.value()) << std::endl;
7340 }
7341 return result;
7342 }
7344 TryFindLoadedProperty(known_node_aspects().loaded_constant_properties,
7345 lookup_start_object, name);
7346 result.IsDone()) {
7347 if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7348 std::cout << " * Reusing constant loaded property "
7349 << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7350 << PrintNode(graph_labeller(), result.value()) << std::endl;
7351 }
7352 return result;
7353 }
7354 return {};
7355}
7356
7358 DCHECK(NodeTypeIs(GetType(string), NodeType::kString));
7359 if (auto vo_string = string->TryCast<InlinedAllocation>()) {
7360 if (vo_string->object()->type() == VirtualObject::kConsString) {
7361 return vo_string->object()->string_length();
7362 }
7363 }
7364 if (auto const_string = TryGetConstant(broker(), local_isolate(), string)) {
7365 if (const_string->IsString()) {
7366 return GetInt32Constant(const_string->AsString().length());
7367 }
7368 }
7369 if (auto wrapper = string->TryCast<UnwrapThinString>()) {
7370 ValueNode* input = wrapper->value_input().node();
7371 if (NodeTypeIs(GetType(input), NodeType::kString)) {
7372 return BuildLoadStringLength(input);
7373 }
7374 }
7375 if (MaybeReduceResult result = TryFindLoadedProperty(
7376 known_node_aspects().loaded_constant_properties, string,
7378 result.IsDone()) {
7379 if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7380 std::cout << " * Reusing constant [String length]"
7381 << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7382 << PrintNode(graph_labeller(), result.value()) << std::endl;
7383 }
7384 return result.value();
7385 }
7387 RecordKnownProperty(string,
7390 return result;
7391}
7392
7393template <typename GenericAccessFunc>
7395 ValueNode* receiver, ValueNode* lookup_start_object, compiler::NameRef name,
7396 compiler::FeedbackSource& feedback_source,
7397 GenericAccessFunc&& build_generic_access) {
7398 const compiler::ProcessedFeedback& processed_feedback =
7399 broker()->GetFeedbackForPropertyAccess(feedback_source,
7401 switch (processed_feedback.kind()) {
7404 DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
7406 RETURN_IF_DONE(TryReuseKnownPropertyLoad(lookup_start_object, name));
7407 return TryBuildNamedAccess(
7408 receiver, lookup_start_object, processed_feedback.AsNamedAccess(),
7409 feedback_source, compiler::AccessMode::kLoad, build_generic_access);
7410 }
7411 default:
7412 return {};
7413 }
7414}
7415
7418 compiler::FeedbackSource& feedback_source) {
7419 auto build_generic_access = [this, &receiver, &name, &feedback_source]() {
7420 ValueNode* context = GetContext();
7422 feedback_source);
7423 };
7424 return TryBuildLoadNamedProperty(receiver, receiver, name, feedback_source,
7425 build_generic_access);
7426}
7427
7428ReduceResult MaglevGraphBuilder::VisitGetNamedProperty() {
7429 // GetNamedProperty <object> <name_index> <slot>
7430 ValueNode* object = LoadRegister(0);
7432 FeedbackSlot slot = GetSlotOperand(2);
7433 compiler::FeedbackSource feedback_source{feedback(), slot};
7435 TryBuildLoadNamedProperty(object, name, feedback_source), SetAccumulator);
7436 // Create a generic load in the fallthrough.
7437 ValueNode* context = GetContext();
7439 AddNewNode<LoadNamedGeneric>({context, object}, name, feedback_source));
7440 return ReduceResult::Done();
7441}
7442
7444 if (ref.IsSmi()) return GetSmiConstant(ref.AsSmi());
7445 compiler::HeapObjectRef constant = ref.AsHeapObject();
7446
7447 if (IsThinString(*constant.object())) {
7448 constant = MakeRefAssumeMemoryFence(
7449 broker(), Cast<ThinString>(*constant.object())->actual());
7450 }
7451
7452 auto root_index = broker()->FindRootIndex(constant);
7453 if (root_index.has_value()) {
7454 return GetRootConstant(*root_index);
7455 }
7456
7457 auto it = graph_->constants().find(constant);
7458 if (it == graph_->constants().end()) {
7459 Constant* node = CreateNewConstantNode<Constant>(0, constant);
7460 graph_->constants().emplace(constant, node);
7461 return node;
7462 }
7463 return it->second;
7464}
7465
7467 IndirectPointerTag tag) {
7468#ifdef V8_ENABLE_SANDBOX
7469 auto it = graph_->trusted_constants().find(ref);
7470 if (it == graph_->trusted_constants().end()) {
7472 graph_->trusted_constants().emplace(ref, node);
7473 return node;
7474 }
7475 SBXCHECK_EQ(it->second->tag(), tag);
7476 return it->second;
7477#else
7478 return GetConstant(ref);
7479#endif
7480}
7481
7482ReduceResult MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() {
7483 // GetNamedPropertyFromSuper <receiver> <name_index> <slot>
7485 ValueNode* home_object = GetAccumulator();
7487 FeedbackSlot slot = GetSlotOperand(2);
7488 compiler::FeedbackSource feedback_source{feedback(), slot};
7489 // {home_object} is guaranteed to be a HeapObject.
7490 ValueNode* home_object_map =
7492 ValueNode* lookup_start_object =
7493 BuildLoadTaggedField(home_object_map, Map::kPrototypeOffset);
7494
7495 auto build_generic_access = [this, &receiver, &lookup_start_object, &name,
7496 &feedback_source]() {
7497 ValueNode* context = GetContext();
7499 {context, receiver, lookup_start_object}, name, feedback_source);
7500 };
7501
7503 TryBuildLoadNamedProperty(receiver, lookup_start_object, name,
7504 feedback_source, build_generic_access),
7506 // Create a generic load.
7507 SetAccumulator(build_generic_access());
7508 return ReduceResult::Done();
7509}
7510
7512 ValueNode* object, const compiler::FeedbackSource& feedback_source,
7513 const compiler::ProcessedFeedback& processed_feedback) {
7514 if (current_for_in_state.index != nullptr &&
7517 bool speculating_receiver_map_matches = false;
7518 if (current_for_in_state.receiver != object) {
7519 // When the feedback is uninitialized, it is either a keyed load which
7520 // always hits the enum cache, or a keyed load that had never been
7521 // reached. In either case, we can check the map of the receiver and use
7522 // the enum cache if the map match the {cache_type}.
7523 if (processed_feedback.kind() !=
7525 return MaybeReduceResult::Fail();
7526 }
7527 if (BuildCheckHeapObject(object).IsDoneWithAbort()) {
7529 }
7530 speculating_receiver_map_matches = true;
7531 }
7532
7534 speculating_receiver_map_matches) {
7535 auto* receiver_map = BuildLoadTaggedField(object, HeapObject::kMapOffset);
7537 {receiver_map, current_for_in_state.cache_type},
7538 DeoptimizeReason::kWrongMapDynamic);
7539 if (current_for_in_state.receiver == object) {
7541 }
7542 }
7543 // TODO(leszeks): Cache the field index per iteration.
7544 auto* field_index = BuildLoadFixedArrayElement(
7547 AddNewNode<LoadTaggedFieldByFieldIndex>({object, field_index}));
7548 return ReduceResult::Done();
7549 }
7550 return MaybeReduceResult::Fail();
7551}
7552
7554 ValueNode* object, const compiler::FeedbackSource& feedback_source,
7555 const compiler::ProcessedFeedback& processed_feedback) {
7557 object, feedback_source, processed_feedback));
7558
7559 auto build_generic_access = [this, object, &feedback_source]() {
7560 ValueNode* context = GetContext();
7562 return AddNewNode<GetKeyedGeneric>({context, object, key}, feedback_source);
7563 };
7564
7565 switch (processed_feedback.kind()) {
7568 DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
7569
7571 // Get the accumulator without conversion. TryBuildElementAccess
7572 // will try to pick the best representation.
7575 object, index, processed_feedback.AsElementAccess(), feedback_source,
7576 build_generic_access);
7578 break;
7579 }
7580
7583 compiler::NameRef name = processed_feedback.AsNamedAccess().name();
7585 key, name, DeoptimizeReason::kKeyedAccessChanged));
7586
7589
7591 object, object, processed_feedback.AsNamedAccess(), feedback_source,
7592 compiler::AccessMode::kLoad, build_generic_access);
7594 break;
7595 }
7596
7597 default:
7598 break;
7599 }
7600
7601 // Create a generic load in the fallthrough.
7602 SetAccumulator(build_generic_access());
7603 return ReduceResult::Done();
7604}
7605
7606ReduceResult MaglevGraphBuilder::VisitGetKeyedProperty() {
7607 // GetKeyedProperty <object> <slot>
7608 ValueNode* object = LoadRegister(0);
7609 // TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple
7610 // monomorphic element load.
7611 FeedbackSlot slot = GetSlotOperand(1);
7612 compiler::FeedbackSource feedback_source{feedback(), slot};
7613
7614 const compiler::ProcessedFeedback* processed_feedback =
7616 feedback_source, compiler::AccessMode::kLoad, std::nullopt);
7617 if (processed_feedback->kind() ==
7619 processed_feedback->AsElementAccess().transition_groups().empty()) {
7620 if (auto constant = TryGetConstant(GetAccumulator());
7621 constant.has_value() && constant->IsName()) {
7622 compiler::NameRef name = constant->AsName();
7623 if (name.IsUniqueName() && !name.object()->IsArrayIndex()) {
7624 processed_feedback =
7625 &processed_feedback->AsElementAccess().Refine(broker(), name);
7626 }
7627 }
7628 }
7629
7630 return BuildGetKeyedProperty(object, feedback_source, *processed_feedback);
7631}
7632
7633ReduceResult MaglevGraphBuilder::VisitGetEnumeratedKeyedProperty() {
7634 // GetEnumeratedKeyedProperty <object> <enum_index> <cache_type> <slot>
7635 ValueNode* object = LoadRegister(0);
7636 FeedbackSlot slot = GetSlotOperand(3);
7637 compiler::FeedbackSource feedback_source{feedback(), slot};
7638
7639 const compiler::ProcessedFeedback& processed_feedback =
7641 feedback_source, compiler::AccessMode::kLoad, std::nullopt);
7642
7643 return BuildGetKeyedProperty(object, feedback_source, processed_feedback);
7644}
7645
7646ReduceResult MaglevGraphBuilder::VisitLdaModuleVariable() {
7647 // LdaModuleVariable <cell_index> <depth>
7648 int cell_index = iterator_.GetImmediateOperand(0);
7649 size_t depth = iterator_.GetUnsignedImmediateOperand(1);
7650 ValueNode* context = GetContextAtDepth(GetContext(), depth);
7651
7652 ValueNode* module = LoadAndCacheContextSlot(
7654 ValueNode* exports_or_imports;
7655 if (cell_index > 0) {
7656 exports_or_imports =
7657 BuildLoadTaggedField(module, SourceTextModule::kRegularExportsOffset);
7658 // The actual array index is (cell_index - 1).
7659 cell_index -= 1;
7660 } else {
7661 exports_or_imports =
7662 BuildLoadTaggedField(module, SourceTextModule::kRegularImportsOffset);
7663 // The actual array index is (-cell_index - 1).
7664 cell_index = -cell_index - 1;
7665 }
7666 ValueNode* cell = BuildLoadFixedArrayElement(exports_or_imports, cell_index);
7667 SetAccumulator(BuildLoadTaggedField(cell, Cell::kValueOffset));
7668 return ReduceResult::Done();
7669}
7670
7672 size_t depth) {
7673 MinimizeContextChainDepth(&context, &depth);
7674
7676 compiler::OptionalContextRef maybe_ref =
7677 FunctionContextSpecialization::TryToRef(compilation_unit_, context,
7678 &depth);
7679 if (maybe_ref.has_value()) {
7680 context = GetConstant(maybe_ref.value());
7681 }
7682 }
7683
7684 for (size_t i = 0; i < depth; i++) {
7687 }
7688 return context;
7689}
7690
7691ReduceResult MaglevGraphBuilder::VisitStaModuleVariable() {
7692 // StaModuleVariable <cell_index> <depth>
7693 int cell_index = iterator_.GetImmediateOperand(0);
7694 if (V8_UNLIKELY(cell_index < 0)) {
7695 // TODO(verwaest): Make this fail as well.
7696 return BuildCallRuntime(Runtime::kAbort,
7697 {GetSmiConstant(static_cast<int>(
7698 AbortReason::kUnsupportedModuleOperation))});
7699 }
7700
7701 size_t depth = iterator_.GetUnsignedImmediateOperand(1);
7702 ValueNode* context = GetContextAtDepth(GetContext(), depth);
7703
7704 ValueNode* module = LoadAndCacheContextSlot(
7706 ValueNode* exports =
7707 BuildLoadTaggedField(module, SourceTextModule::kRegularExportsOffset);
7708 // The actual array index is (cell_index - 1).
7709 cell_index -= 1;
7710 ValueNode* cell = BuildLoadFixedArrayElement(exports, cell_index);
7711 BuildStoreTaggedField(cell, GetAccumulator(), Cell::kValueOffset,
7713 return ReduceResult::Done();
7714}
7715
7717 compiler::NameRef name, compiler::FeedbackSource& feedback_source,
7718 TypeofMode typeof_mode) {
7719 const compiler::ProcessedFeedback& access_feedback =
7720 broker()->GetFeedbackForGlobalAccess(feedback_source);
7721
7722 if (access_feedback.IsInsufficient()) {
7724 DeoptimizeReason::kInsufficientTypeFeedbackForGenericGlobalAccess);
7725 }
7726
7727 const compiler::GlobalAccessFeedback& global_access_feedback =
7728 access_feedback.AsGlobalAccess();
7729 PROCESS_AND_RETURN_IF_DONE(TryBuildGlobalLoad(global_access_feedback),
7731
7732 ValueNode* context = GetContext();
7734 AddNewNode<LoadGlobal>({context}, name, feedback_source, typeof_mode));
7735 return ReduceResult::Done();
7736}
7737
7738ReduceResult MaglevGraphBuilder::VisitSetNamedProperty() {
7739 // SetNamedProperty <object> <name_index> <slot>
7740 ValueNode* object = LoadRegister(0);
7742 FeedbackSlot slot = GetSlotOperand(2);
7743 compiler::FeedbackSource feedback_source{feedback(), slot};
7744
7745 const compiler::ProcessedFeedback& processed_feedback =
7747 feedback_source, compiler::AccessMode::kStore, name);
7748
7749 auto build_generic_access = [this, object, &name, &feedback_source]() {
7750 ValueNode* context = GetContext();
7751 ValueNode* value = GetAccumulator();
7752 AddNewNode<SetNamedGeneric>({context, object, value}, name,
7753 feedback_source);
7754 return ReduceResult::Done();
7755 };
7756
7757 switch (processed_feedback.kind()) {
7760 DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
7761
7764 object, object, processed_feedback.AsNamedAccess(), feedback_source,
7765 compiler::AccessMode::kStore, build_generic_access));
7766 break;
7767 default:
7768 break;
7769 }
7770
7771 // Create a generic store in the fallthrough.
7772 return build_generic_access();
7773}
7774
7775ReduceResult MaglevGraphBuilder::VisitDefineNamedOwnProperty() {
7776 // DefineNamedOwnProperty <object> <name_index> <slot>
7777 ValueNode* object = LoadRegister(0);
7778 compiler::NameRef name = GetRefOperand<Name>(1);
7779 FeedbackSlot slot = GetSlotOperand(2);
7780 compiler::FeedbackSource feedback_source{feedback(), slot};
7781
7782 const compiler::ProcessedFeedback& processed_feedback =
7784 feedback_source, compiler::AccessMode::kStore, name);
7785
7786 auto build_generic_access = [this, object, &name, &feedback_source]() {
7787 ValueNode* context = GetContext();
7788 ValueNode* value = GetAccumulator();
7790 feedback_source);
7791 return ReduceResult::Done();
7792 };
7793 switch (processed_feedback.kind()) {
7796 DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
7797
7800 object, object, processed_feedback.AsNamedAccess(), feedback_source,
7801 compiler::AccessMode::kDefine, build_generic_access));
7802 break;
7803
7804 default:
7805 break;
7806 }
7807
7808 // Create a generic store in the fallthrough.
7809 return build_generic_access();
7810}
7811
7812ReduceResult MaglevGraphBuilder::VisitSetKeyedProperty() {
7813 // SetKeyedProperty <object> <key> <slot>
7814 ValueNode* object = LoadRegister(0);
7815 FeedbackSlot slot = GetSlotOperand(2);
7816 compiler::FeedbackSource feedback_source{feedback(), slot};
7817
7818 const compiler::ProcessedFeedback& processed_feedback =
7820 feedback_source, compiler::AccessMode::kStore, std::nullopt);
7821
7822 auto build_generic_access = [this, object, &feedback_source]() {
7823 ValueNode* key = LoadRegister(1);
7824 ValueNode* context = GetContext();
7825 ValueNode* value = GetAccumulator();
7826 AddNewNode<SetKeyedGeneric>({context, object, key, value}, feedback_source);
7827 return ReduceResult::Done();
7828 };
7829
7830 switch (processed_feedback.kind()) {
7833 DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
7834
7836 // Get the key without conversion. TryBuildElementAccess will try to pick
7837 // the best representation.
7838 ValueNode* index =
7841 object, index, processed_feedback.AsElementAccess(), feedback_source,
7842 build_generic_access));
7843 } break;
7844
7845 default:
7846 break;
7847 }
7848
7849 // Create a generic store in the fallthrough.
7850 return build_generic_access();
7851}
7852
7853ReduceResult MaglevGraphBuilder::VisitDefineKeyedOwnProperty() {
7854 // DefineKeyedOwnProperty <object> <key> <flags> <slot>
7855 ValueNode* object = LoadRegister(0);
7856 ValueNode* key = LoadRegister(1);
7857 ValueNode* flags = GetSmiConstant(GetFlag8Operand(2));
7858 FeedbackSlot slot = GetSlotOperand(3);
7859 compiler::FeedbackSource feedback_source{feedback(), slot};
7860
7861 // TODO(victorgomes): Add monomorphic fast path.
7862
7863 // Create a generic store in the fallthrough.
7864 ValueNode* context = GetContext();
7865 ValueNode* value = GetAccumulator();
7867 feedback_source);
7868 return ReduceResult::Done();
7869}
7870
7871ReduceResult MaglevGraphBuilder::VisitStaInArrayLiteral() {
7872 // StaInArrayLiteral <object> <index> <slot>
7873 ValueNode* object = LoadRegister(0);
7874 ValueNode* index = LoadRegister(1);
7875 FeedbackSlot slot = GetSlotOperand(2);
7876 compiler::FeedbackSource feedback_source{feedback(), slot};
7877
7878 const compiler::ProcessedFeedback& processed_feedback =
7880 feedback_source, compiler::AccessMode::kStoreInLiteral, std::nullopt);
7881
7882 auto build_generic_access = [this, object, index, &feedback_source]() {
7883 ValueNode* context = GetContext();
7884 ValueNode* value = GetAccumulator();
7886 feedback_source);
7887 return ReduceResult::Done();
7888 };
7889
7890 switch (processed_feedback.kind()) {
7893 DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
7894
7897 object, index, processed_feedback.AsElementAccess(), feedback_source,
7898 build_generic_access));
7899 break;
7900 }
7901
7902 default:
7903 break;
7904 }
7905
7906 // Create a generic store in the fallthrough.
7907 return build_generic_access();
7908}
7909
7910ReduceResult MaglevGraphBuilder::VisitDefineKeyedOwnPropertyInLiteral() {
7911 ValueNode* object = LoadRegister(0);
7912 ValueNode* name = LoadRegister(1);
7913 ValueNode* value = GetAccumulator();
7914 ValueNode* flags = GetSmiConstant(GetFlag8Operand(2));
7915 ValueNode* slot = GetTaggedIndexConstant(GetSlotOperand(3).ToInt());
7916 ValueNode* feedback_vector = GetConstant(feedback());
7917 return BuildCallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral,
7918 {object, name, value, flags, feedback_vector, slot});
7919}
7920
7921ReduceResult MaglevGraphBuilder::VisitAdd() {
7923}
7924ReduceResult MaglevGraphBuilder::VisitSub() {
7926}
7927ReduceResult MaglevGraphBuilder::VisitMul() {
7929}
7930ReduceResult MaglevGraphBuilder::VisitDiv() {
7932}
7933ReduceResult MaglevGraphBuilder::VisitMod() {
7935}
7936ReduceResult MaglevGraphBuilder::VisitExp() {
7938}
7939ReduceResult MaglevGraphBuilder::VisitBitwiseOr() {
7941}
7942ReduceResult MaglevGraphBuilder::VisitBitwiseXor() {
7944}
7945ReduceResult MaglevGraphBuilder::VisitBitwiseAnd() {
7947}
7948ReduceResult MaglevGraphBuilder::VisitShiftLeft() {
7950}
7951ReduceResult MaglevGraphBuilder::VisitShiftRight() {
7953}
7954ReduceResult MaglevGraphBuilder::VisitShiftRightLogical() {
7956}
7957
7958ReduceResult MaglevGraphBuilder::VisitAddSmi() {
7960}
7961ReduceResult MaglevGraphBuilder::VisitSubSmi() {
7963}
7964ReduceResult MaglevGraphBuilder::VisitMulSmi() {
7966}
7967ReduceResult MaglevGraphBuilder::VisitDivSmi() {
7969}
7970ReduceResult MaglevGraphBuilder::VisitModSmi() {
7972}
7973ReduceResult MaglevGraphBuilder::VisitExpSmi() {
7975}
7976ReduceResult MaglevGraphBuilder::VisitBitwiseOrSmi() {
7978}
7979ReduceResult MaglevGraphBuilder::VisitBitwiseXorSmi() {
7981}
7982ReduceResult MaglevGraphBuilder::VisitBitwiseAndSmi() {
7984}
7985ReduceResult MaglevGraphBuilder::VisitShiftLeftSmi() {
7987}
7988ReduceResult MaglevGraphBuilder::VisitShiftRightSmi() {
7990}
7991ReduceResult MaglevGraphBuilder::VisitShiftRightLogicalSmi() {
7993}
7994
7995ReduceResult MaglevGraphBuilder::VisitInc() {
7997}
7998ReduceResult MaglevGraphBuilder::VisitDec() {
8000}
8001ReduceResult MaglevGraphBuilder::VisitNegate() {
8003}
8004ReduceResult MaglevGraphBuilder::VisitBitwiseNot() {
8006}
8007
8008ReduceResult MaglevGraphBuilder::VisitToBooleanLogicalNot() {
8010 return ReduceResult::Done();
8011}
8012
8014 // TODO(victorgomes): Use NodeInfo to add more type optimizations here.
8015 switch (value->opcode()) {
8016#define CASE(Name) \
8017 case Opcode::k##Name: { \
8018 return GetBooleanConstant( \
8019 !value->Cast<Name>()->ToBoolean(local_isolate())); \
8020 }
8022#undef CASE
8023 default:
8024 return AddNewNode<LogicalNot>({value});
8025 }
8026}
8027
8028ReduceResult MaglevGraphBuilder::VisitLogicalNot() {
8029 // Invariant: accumulator must already be a boolean value.
8031 return ReduceResult::Done();
8032}
8033
8034ReduceResult MaglevGraphBuilder::VisitTypeOf() {
8035 ValueNode* value = GetAccumulator();
8037
8039 TypeOfFeedback::Result feedback = nexus.GetTypeOfFeedback();
8040 switch (feedback) {
8043 DeoptimizeReason::kInsufficientTypeFeedbackForTypeOf);
8046 SetAccumulator(GetRootConstant(RootIndex::knumber_string));
8047 return ReduceResult::Done();
8050 SetAccumulator(GetRootConstant(RootIndex::kstring_string));
8051 return ReduceResult::Done();
8054 GetCheckType(GetType(value)));
8055 EnsureType(value, NodeType::kCallable);
8056 SetAccumulator(GetRootConstant(RootIndex::kfunction_string));
8057 return ReduceResult::Done();
8058 default:
8059 break;
8060 }
8061
8063 return ReduceResult::Done();
8064}
8065
8066ReduceResult MaglevGraphBuilder::VisitDeletePropertyStrict() {
8067 ValueNode* object = LoadRegister(0);
8068 ValueNode* key = GetAccumulator();
8069 ValueNode* context = GetContext();
8072 return ReduceResult::Done();
8073}
8074
8075ReduceResult MaglevGraphBuilder::VisitDeletePropertySloppy() {
8076 ValueNode* object = LoadRegister(0);
8077 ValueNode* key = GetAccumulator();
8078 ValueNode* context = GetContext();
8081 return ReduceResult::Done();
8082}
8083
8084ReduceResult MaglevGraphBuilder::VisitGetSuperConstructor() {
8085 ValueNode* active_function = GetAccumulator();
8086 // TODO(victorgomes): Maybe BuildLoadTaggedField should support constants
8087 // instead.
8088 if (compiler::OptionalHeapObjectRef constant =
8089 TryGetConstant(active_function)) {
8090 compiler::MapRef map = constant->map(broker());
8091 if (map.is_stable()) {
8093 ValueNode* map_proto = GetConstant(map.prototype(broker()));
8095 return ReduceResult::Done();
8096 }
8097 }
8098 ValueNode* map =
8100 ValueNode* map_proto = BuildLoadTaggedField(map, Map::kPrototypeOffset);
8102 return ReduceResult::Done();
8103}
8104
8107 if (!new_target.map(broker()).has_prototype_slot()) return false;
8108 if (!new_target.has_initial_map(broker())) return false;
8109 compiler::MapRef initial_map = new_target.initial_map(broker());
8110 return initial_map.GetConstructor(broker()).equals(constructor);
8111}
8112
8114 ValueNode* this_function, ValueNode* new_target,
8115 std::pair<interpreter::Register, interpreter::Register> result) {
8116 // See also:
8117 // JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct
8118
8119 compiler::OptionalHeapObjectRef maybe_constant =
8120 TryGetConstant(this_function);
8121 if (!maybe_constant) return false;
8122
8123 compiler::MapRef function_map = maybe_constant->map(broker());
8124 compiler::HeapObjectRef current = function_map.prototype(broker());
8125
8126 // TODO(v8:13091): Don't produce incomplete stack traces when debug is active.
8127 // We already deopt when a breakpoint is set. But it would be even nicer to
8128 // avoid producting incomplete stack traces when when debug is active, even if
8129 // there are no breakpoints - then a user inspecting stack traces via Dev
8130 // Tools would always see the full stack trace.
8131
8132 while (true) {
8133 if (!current.IsJSFunction()) return false;
8134 compiler::JSFunctionRef current_function = current.AsJSFunction();
8135
8136 // If there are class fields, bail out. TODO(v8:13091): Handle them here.
8137 if (current_function.shared(broker())
8139 return false;
8140 }
8141
8142 // If there are private methods, bail out. TODO(v8:13091): Handle them here.
8143 if (current_function.context(broker())
8144 .scope_info(broker())
8145 .ClassScopeHasPrivateBrand()) {
8146 return false;
8147 }
8148
8149 FunctionKind kind = current_function.shared(broker()).kind();
8151 // The hierarchy walk will end here; this is the last change to bail out
8152 // before creating new nodes.
8153 if (!broker()->dependencies()->DependOnArrayIteratorProtector()) {
8154 return false;
8155 }
8156
8157 compiler::OptionalHeapObjectRef new_target_function =
8160 // Store the result register first, so that a lazy deopt in
8161 // `FastNewObject` writes `true` to this register.
8163
8164 ValueNode* object;
8165 if (new_target_function && new_target_function->IsJSFunction() &&
8166 HasValidInitialMap(new_target_function->AsJSFunction(),
8167 current_function)) {
8168 object = BuildInlinedAllocation(
8169 CreateJSConstructor(new_target_function->AsJSFunction()),
8171 } else {
8172 // We've already stored "true" into result.first, so a deopt here just
8173 // has to store result.second.
8174 LazyDeoptResultLocationScope new_location(this, result.second, 1);
8176 {GetConstant(current_function), GetTaggedValue(new_target)});
8177 }
8178 StoreRegister(result.second, object);
8179 } else {
8181 StoreRegister(result.second, GetConstant(current));
8182 }
8183
8185 function_map, WhereToStart::kStartAtReceiver, current_function);
8186 return true;
8187 }
8188
8189 // Keep walking up the class tree.
8190 current = current_function.map(broker()).prototype(broker());
8191 }
8192}
8193
8194ReduceResult MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() {
8195 ValueNode* this_function = LoadRegister(0);
8197
8198 auto register_pair = iterator_.GetRegisterPairOperand(2);
8199
8201 register_pair)) {
8202 return ReduceResult::Done();
8203 }
8204
8205 CallBuiltin* result =
8207 {GetTaggedValue(this_function), GetTaggedValue(new_target)});
8208 StoreRegisterPair(register_pair, result);
8209 return ReduceResult::Done();
8210}
8211
8212namespace {
8213void ForceEscapeIfAllocation(ValueNode* value) {
8214 if (InlinedAllocation* alloc = value->TryCast<InlinedAllocation>()) {
8215 alloc->ForceEscaping();
8216 }
8217}
8218} // namespace
8219
8221 SourcePosition call_site_position, ValueNode* context, ValueNode* function,
8223 DCHECK(is_inline());
8225
8230
8231 if (v8_flags.maglev_print_inlined &&
8233 (v8_flags.print_maglev_code || v8_flags.print_maglev_graph ||
8234 v8_flags.print_maglev_graphs ||
8235 v8_flags.trace_maglev_inlining_verbose)) {
8236 std::cout << "== Inlining " << Brief(*shared.object()) << std::endl;
8237 BytecodeArray::Disassemble(bytecode.object(), std::cout);
8238 if (v8_flags.maglev_print_feedback) {
8239 i::Print(*feedback.object(), std::cout);
8240 }
8241 } else if (v8_flags.trace_maglev_graph_building) {
8242 std::cout << "== Inlining " << shared.object() << std::endl;
8243 }
8244
8245 graph()->inlined_functions().push_back(
8247 shared.object(), bytecode.object(), call_site_position));
8248 if (feedback.object()->invocation_count_before_stable(kRelaxedLoad) >
8249 v8_flags.invocation_count_for_early_optimization) {
8251 }
8252 inlining_id_ = static_cast<int>(graph()->inlined_functions().size() - 1);
8253
8257
8258 // Manually create the prologue of the inner function graph, so that we
8259 // can manually set up the arguments.
8261
8262 // Set receiver.
8264
8265 // The inlined function could call a builtin that iterates the frame, the
8266 // receiver needs to have been materialized.
8267 // TODO(victorgomes): Can we relax this requirement? Maybe we can allocate the
8268 // object lazily? This is also only required if the inlined function is not a
8269 // leaf (ie. it calls other functions).
8270 ForceEscapeIfAllocation(caller_details_->arguments[0]);
8271
8272 // Set remaining arguments.
8273 RootConstant* undefined_constant =
8274 GetRootConstant(RootIndex::kUndefinedValue);
8275 int args_count = static_cast<int>(caller_details_->arguments.size()) - 1;
8276 int formal_parameter_count = compilation_unit_->parameter_count() - 1;
8277 for (int i = 0; i < formal_parameter_count; i++) {
8278 ValueNode* arg_value =
8279 i < args_count ? caller_details_->arguments[i + 1] : undefined_constant;
8280 SetArgument(i + 1, arg_value);
8281 }
8282
8284
8285 BuildRegisterFrameInitialization(context, function, new_target);
8287 EndPrologue();
8288 in_prologue_ = false;
8289
8290 // Build the inlined function body.
8291 BuildBody();
8292
8293 // All returns in the inlined body jump to a merge point one past the bytecode
8294 // length (i.e. at offset bytecode.length()). If there isn't one already,
8295 // create a block at this fake offset and have it jump out of the inlined
8296 // function, into a new block that we create which resumes execution of the
8297 // outer function.
8298 if (!current_block_) {
8299 // If we don't have a merge state at the inline_exit_offset, then there is
8300 // no control flow that reaches the end of the inlined function, either
8301 // because of infinite loops or deopts
8302 if (merge_states_[inline_exit_offset()] == nullptr) {
8303 if (v8_flags.trace_maglev_graph_building) {
8304 std::cout << "== Finished inlining (abort) " << shared.object()
8305 << std::endl;
8306 }
8308 }
8309
8310 ProcessMergePoint(inline_exit_offset(), /*preserve_kna*/ false);
8311 StartNewBlock(inline_exit_offset(), /*predecessor*/ nullptr);
8312 }
8313
8314 if (v8_flags.trace_maglev_graph_building) {
8315 std::cout << "== Finished inlining " << shared.object() << std::endl;
8316 }
8317
8318 // Pull the returned accumulator value out of the inlined function's final
8319 // merged return state.
8321}
8322
8323#define TRACE_INLINING(...) \
8324 do { \
8325 if (v8_flags.trace_maglev_inlining) \
8326 StdoutStream{} << __VA_ARGS__ << std::endl; \
8327 } while (false)
8328
8329#define TRACE_CANNOT_INLINE(...) \
8330 TRACE_INLINING(" cannot inline " << shared << ": " << __VA_ARGS__)
8331
8333 float call_frequency) {
8334 if (graph()->total_inlined_bytecode_size() >
8337 TRACE_CANNOT_INLINE("maximum inlined bytecode size");
8338 return false;
8339 }
8340 // TODO(olivf): This is a temporary stopgap to prevent infinite recursion when
8341 // inlining, because we currently excempt small functions from some of the
8342 // negative heuristics. We should refactor these heuristics and make sure they
8343 // make sense in the presence of (mutually) recursive inlining. Please do
8344 // *not* return true before this check.
8345 if (inlining_depth() > v8_flags.max_maglev_hard_inline_depth) {
8346 TRACE_CANNOT_INLINE("inlining depth ("
8347 << inlining_depth() << ") >= hard-max-depth ("
8348 << v8_flags.max_maglev_hard_inline_depth << ")");
8349 return false;
8350 }
8351 if (compilation_unit_->shared_function_info().equals(shared)) {
8352 TRACE_CANNOT_INLINE("direct recursion");
8353 return false;
8354 }
8355 SharedFunctionInfo::Inlineability inlineability =
8356 shared.GetInlineability(broker());
8358 TRACE_CANNOT_INLINE(inlineability);
8359 return false;
8360 }
8361 // TODO(victorgomes): Support NewTarget/RegisterInput in inlined functions.
8362 compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8363 if (bytecode.incoming_new_target_or_generator_register().is_valid()) {
8364 TRACE_CANNOT_INLINE("use unsupported NewTargetOrGenerator register");
8365 return false;
8366 }
8367 if (call_frequency < min_inlining_frequency()) {
8368 TRACE_CANNOT_INLINE("call frequency (" << call_frequency
8369 << ") < minimum threshold ("
8370 << min_inlining_frequency() << ")");
8371 return false;
8372 }
8373 if (bytecode.length() > max_inlined_bytecode_size()) {
8374 TRACE_CANNOT_INLINE("big function, size ("
8375 << bytecode.length() << ") >= max-size ("
8376 << max_inlined_bytecode_size() << ")");
8377 return false;
8378 }
8379 return true;
8380}
8381
8384 ->shared_function_info()
8385 .object()
8386 ->PassesFilter(v8_flags.maglev_print_filter);
8387}
8388
8391 compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8392 if (bytecode.length() < max_inlined_bytecode_size_small()) {
8393 TRACE_INLINING(" greedy inlining "
8394 << shared << ": small function, skipping max-depth");
8395 return true;
8396 }
8397 return false;
8398}
8399
8401 ValueNode* context, ValueNode* function, ValueNode* new_target,
8402#ifdef V8_ENABLE_LEAPTIERING
8403 JSDispatchHandle dispatch_handle,
8404#endif
8407 const compiler::FeedbackSource& feedback_source) {
8409 if (!feedback_cell.feedback_vector(broker())) {
8410 // TODO(verwaest): Soft deopt instead?
8411 TRACE_CANNOT_INLINE("it has not been compiled/run with feedback yet");
8412 return {};
8413 }
8414
8415 float feedback_frequency = 0.0f;
8416 if (feedback_source.IsValid()) {
8417 compiler::ProcessedFeedback const& feedback =
8418 broker()->GetFeedbackForCall(feedback_source);
8419 feedback_frequency =
8420 feedback.IsInsufficient() ? 0.0f : feedback.AsCall().frequency();
8421 }
8422 float call_frequency = feedback_frequency * GetCurrentCallFrequency();
8423
8424 if (!CanInlineCall(shared, call_frequency)) return {};
8425 if (ShouldEagerInlineCall(shared)) {
8426 return BuildEagerInlineCall(context, function, new_target, shared,
8427 feedback_cell, args, call_frequency);
8428 }
8429
8430 // Should we inline call?
8431 if (inlining_depth() > v8_flags.max_maglev_inline_depth) {
8432 TRACE_CANNOT_INLINE("inlining depth ("
8433 << inlining_depth() << ") >= max-depth ("
8434 << v8_flags.max_maglev_inline_depth << ")");
8435 return {};
8436 }
8437
8438 compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8440 graph()->add_inlined_bytecode_size(bytecode.length());
8441 return BuildEagerInlineCall(context, function, new_target, shared,
8442 feedback_cell, args, call_frequency);
8443 }
8444
8445 TRACE_INLINING(" considering " << shared << " for inlining");
8446 auto arguments = GetArgumentsAsArrayOfValueNodes(shared, args);
8447 auto generic_call = BuildCallKnownJSFunction(context, function, new_target,
8448#ifdef V8_ENABLE_LEAPTIERING
8449 dispatch_handle,
8450#endif
8451 shared, arguments);
8452
8453 // Note: We point to the generic call exception handler instead of
8454 // jump_targets_ because the former contains a BasicBlockRef that is
8455 // guaranteed to be updated correctly upon exception block creation.
8456 // BuildLoopForPeeling might reset the BasicBlockRef in jump_targets_. If this
8457 // happens, inlined calls within the peeled loop would incorrectly point to
8458 // the loop's exception handler instead of the original call's.
8459 CatchBlockDetails catch_details =
8460 GetTryCatchBlockFromInfo(generic_call->exception_handler_info());
8461 catch_details.deopt_frame_distance++;
8462 float score = call_frequency / bytecode.length();
8465 arguments, &generic_call->lazy_deopt_info()->top_frame(),
8468 /* is_eager_inline */ false, call_frequency},
8469 generic_call, feedback_cell, score);
8470 graph()->inlineable_calls().push_back(call_site);
8471 return generic_call;
8472}
8473
8475 ValueNode* context, ValueNode* function, ValueNode* new_target,
8478 float call_frequency) {
8480
8481 // Merge catch block state if needed.
8482 CatchBlockDetails catch_block_details = GetCurrentTryCatchBlock();
8483 if (catch_block_details.ref &&
8484 catch_block_details.exception_handler_was_used) {
8485 if (IsInsideTryBlock()) {
8486 // Merge the current state into the handler state.
8488 this, compilation_unit_,
8491 }
8492 catch_block_details.deopt_frame_distance++;
8493 }
8494
8495 // Create a new compilation unit.
8497 zone(), compilation_unit_, shared, feedback_cell);
8498
8499 // Propagate details.
8500 auto arguments_vector = GetArgumentsAsArrayOfValueNodes(shared, args);
8501 DeoptFrame* deopt_frame =
8502 GetDeoptFrameForEagerCall(inner_unit, function, arguments_vector);
8504 arguments_vector, deopt_frame,
8506 unobserved_context_slot_stores_, catch_block_details, IsInsideLoop(),
8507 /* is_eager_inline */ true, call_frequency);
8508
8509 // Create a new graph builder for the inlined function.
8510 MaglevGraphBuilder inner_graph_builder(local_isolate_, inner_unit, graph_,
8512
8513 // Set the inner graph builder to build in the current block.
8514 inner_graph_builder.current_block_ = current_block_;
8515
8516 // Build inline function.
8517 ReduceResult result = inner_graph_builder.BuildInlineFunction(
8518 current_source_position_, context, function, new_target);
8519
8520 // Prapagate back (or reset) builder state.
8522 inner_graph_builder.unobserved_context_slot_stores_;
8525
8526 if (result.IsDoneWithAbort()) {
8527 DCHECK_NULL(inner_graph_builder.current_block_);
8528 current_block_ = nullptr;
8530 }
8531
8532 // Propagate frame information back to the caller.
8534 inner_graph_builder.current_interpreter_frame_.known_node_aspects());
8536 inner_graph_builder.current_interpreter_frame_.virtual_objects());
8539
8540 // Resume execution using the final block of the inner builder.
8541 current_block_ = inner_graph_builder.current_block_;
8542
8543 DCHECK(result.IsDoneWithValue());
8544 return result;
8545}
8546
8547namespace {
8548
8549bool CanInlineArrayIteratingBuiltin(compiler::JSHeapBroker* broker,
8550 const PossibleMaps& maps,
8551 ElementsKind* kind_return) {
8552 DCHECK_NE(0, maps.size());
8553 *kind_return = maps.at(0).elements_kind();
8554 for (compiler::MapRef map : maps) {
8555 if (!map.supports_fast_array_iteration(broker) ||
8556 !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
8557 return false;
8558 }
8559 }
8560 return true;
8561}
8562
8563} // namespace
8564
8565MaybeReduceResult MaglevGraphBuilder::TryReduceArrayIsArray(
8566 compiler::JSFunctionRef target, CallArguments& args) {
8567 if (args.count() == 0) return GetBooleanConstant(false);
8568
8569 ValueNode* node = args[0];
8570
8571 if (CheckType(node, NodeType::kJSArray)) {
8572 return GetBooleanConstant(true);
8573 }
8574
8575 auto node_info = known_node_aspects().TryGetInfoFor(node);
8576 if (node_info && node_info->possible_maps_are_known()) {
8577 bool has_array_map = false;
8578 bool has_proxy_map = false;
8579 bool has_other_map = false;
8580 for (compiler::MapRef map : node_info->possible_maps()) {
8581 InstanceType type = map.instance_type();
8582 if (InstanceTypeChecker::IsJSArray(type)) {
8583 has_array_map = true;
8584 } else if (InstanceTypeChecker::IsJSProxy(type)) {
8585 has_proxy_map = true;
8586 } else {
8587 has_other_map = true;
8588 }
8589 }
8590 if ((has_array_map ^ has_other_map) && !has_proxy_map) {
8591 if (has_array_map) node_info->CombineType(NodeType::kJSArray);
8592 return GetBooleanConstant(has_array_map);
8593 }
8594 }
8595
8596 // TODO(verwaest): Add a node that checks the instance type.
8597 return {};
8598}
8599
8600MaybeReduceResult MaglevGraphBuilder::TryReduceArrayForEach(
8601 compiler::JSFunctionRef target, CallArguments& args) {
8602 if (!CanSpeculateCall()) return {};
8603
8604 ValueNode* receiver = args.receiver();
8605 if (!receiver) return {};
8606
8607 if (args.count() < 1) {
8608 if (v8_flags.trace_maglev_graph_building) {
8609 std::cout << " ! Failed to reduce Array.prototype.forEach - not enough "
8610 "arguments"
8611 << std::endl;
8612 }
8613 return {};
8614 }
8615
8616 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
8617 if (!node_info || !node_info->possible_maps_are_known()) {
8618 if (v8_flags.trace_maglev_graph_building) {
8619 std::cout << " ! Failed to reduce Array.prototype.forEach - receiver "
8620 "map is unknown"
8621 << std::endl;
8622 }
8623 return {};
8624 }
8625
8626 ElementsKind elements_kind;
8627 if (!CanInlineArrayIteratingBuiltin(broker(), node_info->possible_maps(),
8628 &elements_kind)) {
8629 if (v8_flags.trace_maglev_graph_building) {
8630 std::cout << " ! Failed to reduce Array.prototype.forEach - doesn't "
8631 "support fast array iteration or incompatible maps"
8632 << std::endl;
8633 }
8634 return {};
8635 }
8636
8637 // TODO(leszeks): May only be needed for holey elements kinds.
8638 if (!broker()->dependencies()->DependOnNoElementsProtector()) {
8639 if (v8_flags.trace_maglev_graph_building) {
8640 std::cout << " ! Failed to reduce Array.prototype.forEach - invalidated "
8641 "no elements protector"
8642 << std::endl;
8643 }
8644 return {};
8645 }
8646
8647 ValueNode* callback = args[0];
8648 if (!callback->is_tagged()) {
8649 if (v8_flags.trace_maglev_graph_building) {
8650 std::cout << " ! Failed to reduce Array.prototype.forEach - callback is "
8651 "untagged value"
8652 << std::endl;
8653 }
8654 return {};
8655 }
8656
8657 auto get_lazy_deopt_scope =
8658 [this](compiler::JSFunctionRef target, ValueNode* receiver,
8659 ValueNode* callback, ValueNode* this_arg, ValueNode* index_int32,
8660 ValueNode* next_index_int32, ValueNode* original_length) {
8661 return DeoptFrameScope(
8662 this, Builtin::kArrayForEachLoopLazyDeoptContinuation, target,
8664 next_index_int32, original_length}));
8665 };
8666
8667 auto get_eager_deopt_scope =
8668 [this](compiler::JSFunctionRef target, ValueNode* receiver,
8669 ValueNode* callback, ValueNode* this_arg, ValueNode* index_int32,
8670 ValueNode* next_index_int32, ValueNode* original_length) {
8671 return DeoptFrameScope(
8672 this, Builtin::kArrayForEachLoopEagerDeoptContinuation, target,
8674 next_index_int32, original_length}));
8675 };
8676
8677 MaybeReduceResult builtin_result = TryReduceArrayIteratingBuiltin(
8678 "Array.prototype.forEach", target, args, get_eager_deopt_scope,
8679 get_lazy_deopt_scope);
8680 if (builtin_result.IsFail() || builtin_result.IsDoneWithAbort()) {
8681 return builtin_result;
8682 }
8683 DCHECK(builtin_result.IsDoneWithoutValue());
8684 return GetRootConstant(RootIndex::kUndefinedValue);
8685}
8686
8687MaybeReduceResult MaglevGraphBuilder::TryReduceArrayMap(
8688 compiler::JSFunctionRef target, CallArguments& args) {
8689 if (!is_turbolev()) {
8690 return {};
8691 }
8692
8693 if (!broker()->dependencies()->DependOnArraySpeciesProtector()) {
8694 if (v8_flags.trace_maglev_graph_building) {
8695 std::cout << " ! Failed to reduce Array.prototype.map - invalidated "
8696 "array species protector"
8697 << std::endl;
8698 }
8699 return {};
8700 }
8701
8702 compiler::NativeContextRef native_context = broker()->target_native_context();
8703 compiler::MapRef holey_smi_map =
8705 compiler::MapRef holey_map =
8706 native_context.GetInitialJSArrayMap(broker(), HOLEY_ELEMENTS);
8707 compiler::MapRef holey_double_map =
8708 native_context.GetInitialJSArrayMap(broker(), HOLEY_DOUBLE_ELEMENTS);
8709
8710 ValueNode* result_array = nullptr;
8711
8712 // We don't need to check the "array constructor inlining" protector (probably
8713 // Turbofan wouldn't need either for the Array.p.map case, but it does
8714 // anyway). CanInlineArrayIteratingBuiltin allows only fast mode maps, and if
8715 // at runtime we encounter a dictionary mode map, we will deopt. If the
8716 // feedback contains dictionary mode maps to start with, we won't even lower
8717 // Array.prototype.map here, so there's no risk for a deopt loop.
8718
8719 // We always inline the Array ctor here, even if Turbofan doesn't. Since the
8720 // top frame cannot be deopted because of the allocation, we don't need a
8721 // DeoptFrameScope here.
8722
8723 auto initial_callback = [this, &result_array,
8724 holey_smi_map](ValueNode* length_smi) {
8725 ValueNode* elements = AddNewNode<CreateFastArrayElements>(
8726 {length_smi}, AllocationType::kYoung);
8727 VirtualObject* array;
8729 array, CreateJSArray(holey_smi_map, holey_smi_map.instance_size(),
8730 length_smi));
8731 array->set(JSArray::kElementsOffset, elements);
8732 result_array = BuildInlinedAllocation(array, AllocationType::kYoung);
8733 return ReduceResult::Done();
8734 };
8735
8736 auto process_element_callback = [this, &holey_map, &holey_double_map,
8737 &result_array](ValueNode* index_int32,
8738 ValueNode* element) {
8740 {result_array, index_int32, element}, holey_map, holey_double_map);
8741 };
8742
8743 auto get_lazy_deopt_scope = [this, &result_array](
8744 compiler::JSFunctionRef target,
8745 ValueNode* receiver, ValueNode* callback,
8746 ValueNode* this_arg, ValueNode* index_int32,
8747 ValueNode* next_index_int32,
8748 ValueNode* original_length) {
8749 DCHECK_NOT_NULL(result_array);
8750 return DeoptFrameScope(
8751 this, Builtin::kArrayMapLoopLazyDeoptContinuation, target,
8753 index_int32, original_length}));
8754 };
8755
8756 auto get_eager_deopt_scope = [this, &result_array](
8757 compiler::JSFunctionRef target,
8758 ValueNode* receiver, ValueNode* callback,
8759 ValueNode* this_arg, ValueNode* index_int32,
8760 ValueNode* next_index_int32,
8761 ValueNode* original_length) {
8762 DCHECK_NOT_NULL(result_array);
8763 return DeoptFrameScope(
8764 this, Builtin::kArrayMapLoopEagerDeoptContinuation, target,
8766 next_index_int32, original_length}));
8767 };
8768
8769 MaybeReduceResult builtin_result = TryReduceArrayIteratingBuiltin(
8770 "Array.prototype.map", target, args, get_eager_deopt_scope,
8771 get_lazy_deopt_scope, initial_callback, process_element_callback);
8772 if (builtin_result.IsFail() || builtin_result.IsDoneWithAbort()) {
8773 return builtin_result;
8774 }
8775 DCHECK(builtin_result.IsDoneWithoutValue());
8776
8777 // If the result was not fail or abort, the initial callback has successfully
8778 // created the array which we can return now.
8779 DCHECK(result_array);
8780 return result_array;
8781}
8782
8784 const char* name, compiler::JSFunctionRef target, CallArguments& args,
8785 GetDeoptScopeCallback get_eager_deopt_scope,
8786 GetDeoptScopeCallback get_lazy_deopt_scope,
8787 const std::optional<InitialCallback>& initial_callback,
8788 const std::optional<ProcessElementCallback>& process_element_callback) {
8789 DCHECK_EQ(initial_callback.has_value(), process_element_callback.has_value());
8790
8791 if (!CanSpeculateCall()) return {};
8792
8793 ValueNode* receiver = args.receiver();
8794 if (!receiver) return {};
8795
8796 if (args.count() < 1) {
8797 if (v8_flags.trace_maglev_graph_building) {
8798 std::cout << " ! Failed to reduce " << name << " - not enough arguments"
8799 << std::endl;
8800 }
8801 return {};
8802 }
8803
8804 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
8805 if (!node_info || !node_info->possible_maps_are_known()) {
8806 if (v8_flags.trace_maglev_graph_building) {
8807 std::cout << " ! Failed to reduce " << name
8808 << " - receiver map is unknown" << std::endl;
8809 }
8810 return {};
8811 }
8812
8813 ElementsKind elements_kind;
8814 if (!CanInlineArrayIteratingBuiltin(broker(), node_info->possible_maps(),
8815 &elements_kind)) {
8816 if (v8_flags.trace_maglev_graph_building) {
8817 std::cout << " ! Failed to reduce " << name
8818 << " - doesn't support fast array iteration or incompatible"
8819 << " maps" << std::endl;
8820 }
8821 return {};
8822 }
8823
8824 // TODO(leszeks): May only be needed for holey elements kinds.
8825 if (!broker()->dependencies()->DependOnNoElementsProtector()) {
8826 if (v8_flags.trace_maglev_graph_building) {
8827 std::cout << " ! Failed to reduce " << name
8828 << " - invalidated no elements protector" << std::endl;
8829 }
8830 return {};
8831 }
8832
8833 ValueNode* callback = args[0];
8834 if (!callback->is_tagged()) {
8835 if (v8_flags.trace_maglev_graph_building) {
8836 std::cout << " ! Failed to reduce " << name
8837 << " - callback is untagged value" << std::endl;
8838 }
8839 return {};
8840 }
8841
8843 args.count() > 1 ? args[1] : GetRootConstant(RootIndex::kUndefinedValue);
8844
8846
8847 if (initial_callback) {
8848 RETURN_IF_ABORT((*initial_callback)(original_length));
8849 }
8850
8851 // Elide the callable check if the node is known callable.
8852 EnsureType(callback, NodeType::kCallable, [&](NodeType old_type) {
8853 // ThrowIfNotCallable is wrapped in a lazy_deopt_scope to make sure the
8854 // exception has the right call stack.
8855 const DeoptFrameScope& lazy_deopt_scope = get_lazy_deopt_scope(
8859 });
8860
8861 ValueNode* original_length_int32 = GetInt32(original_length);
8862
8863 // Remember the receiver map set before entering the loop the call.
8864 bool receiver_maps_were_unstable = node_info->possible_maps_are_unstable();
8865 PossibleMaps receiver_maps_before_loop(node_info->possible_maps());
8866
8867 // Create a sub graph builder with two variables (index and length).
8868 MaglevSubGraphBuilder sub_builder(this, 2);
8870 MaglevSubGraphBuilder::Variable var_length(1);
8871
8872 MaglevSubGraphBuilder::Label loop_end(&sub_builder, 1);
8873
8874 // ```
8875 // index = 0
8876 // bind loop_header
8877 // ```
8878 sub_builder.set(var_index, GetSmiConstant(0));
8879 sub_builder.set(var_length, original_length);
8881 sub_builder.BeginLoop({&var_index, &var_length});
8882
8883 // Reset known state that is cleared by BeginLoop, but is known to be true on
8884 // the first iteration, and will be re-checked at the end of the loop.
8885
8886 // Reset the known receiver maps if necessary.
8887 if (receiver_maps_were_unstable) {
8888 node_info->SetPossibleMaps(receiver_maps_before_loop,
8889 receiver_maps_were_unstable,
8890 // Node type is monotonic, no need to reset it.
8891 NodeType::kUnknown, broker());
8893 } else {
8894 DCHECK_EQ(node_info->possible_maps().size(),
8895 receiver_maps_before_loop.size());
8896 }
8897 // Reset the cached loaded array length to the length var.
8898 RecordKnownProperty(receiver, broker()->length_string(),
8899 sub_builder.get(var_length), false,
8901
8902 // ```
8903 // if (index_int32 < length_int32)
8904 // fallthrough
8905 // else
8906 // goto end
8907 // ```
8908 Phi* index_tagged = sub_builder.get(var_index)->Cast<Phi>();
8909 EnsureType(index_tagged, NodeType::kSmi);
8910 ValueNode* index_int32 = GetInt32(index_tagged);
8911
8912 sub_builder.GotoIfFalse<BranchIfInt32Compare>(
8913 &loop_end, {index_int32, original_length_int32}, Operation::kLessThan);
8914
8915 // ```
8916 // next_index = index + 1
8917 // ```
8918 ValueNode* next_index_int32 = nullptr;
8919 {
8920 // Eager deopt scope for index increment overflow.
8921 // TODO(pthier): In practice this increment can never overflow, as the max
8922 // possible array length is less than int32 max value. Add a new
8923 // Int32Increment that asserts no overflow instead of deopting.
8924 DeoptFrameScope eager_deopt_scope =
8925 get_eager_deopt_scope(target, receiver, callback, this_arg, index_int32,
8926 index_int32, original_length);
8927 next_index_int32 = AddNewNode<Int32IncrementWithOverflow>({index_int32});
8928 EnsureType(next_index_int32, NodeType::kSmi);
8929 }
8930 // TODO(leszeks): Assert Smi.
8931
8932 // ```
8933 // element = array.elements[index]
8934 // ```
8936 ValueNode* element;
8937 if (IsDoubleElementsKind(elements_kind)) {
8938 element = BuildLoadFixedDoubleArrayElement(elements, index_int32);
8939 } else {
8940 element = BuildLoadFixedArrayElement(elements, index_int32);
8941 }
8942
8943 std::optional<MaglevSubGraphBuilder::Label> skip_call;
8944 if (IsHoleyElementsKind(elements_kind)) {
8945 // ```
8946 // if (element is hole) goto skip_call
8947 // ```
8948 skip_call.emplace(
8949 &sub_builder, 2,
8950 std::initializer_list<MaglevSubGraphBuilder::Variable*>{&var_length});
8951 if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
8952 sub_builder.GotoIfTrue<BranchIfFloat64IsHole>(&*skip_call, {element});
8953 } else {
8954 sub_builder.GotoIfTrue<BranchIfRootConstant>(&*skip_call, {element},
8955 RootIndex::kTheHoleValue);
8956 }
8957 }
8958
8959 // ```
8960 // callback(this_arg, element, array)
8961 // ```
8963 {
8964 const DeoptFrameScope& lazy_deopt_scope =
8965 get_lazy_deopt_scope(target, receiver, callback, this_arg, index_int32,
8966 next_index_int32, original_length);
8967 CallArguments call_args =
8968 args.count() < 2
8970 {element, index_tagged, receiver})
8972 {this_arg, element, index_tagged, receiver});
8973
8974 SaveCallSpeculationScope saved(this);
8975 result = ReduceCall(callback, call_args, saved.value());
8976 }
8977
8978 // ```
8979 // index = next_index
8980 // jump loop_header
8981 // ```
8982 DCHECK_IMPLIES(result.IsDoneWithAbort(), current_block_ == nullptr);
8983
8984 // No need to finish the loop if this code is unreachable.
8985 if (!result.IsDoneWithAbort()) {
8986 if (process_element_callback) {
8987 ValueNode* value = result.value();
8988 (*process_element_callback)(index_int32, value);
8989 }
8990
8991 // If any of the receiver's maps were unstable maps, we have to re-check the
8992 // maps on each iteration, in case the callback changed them. That said, we
8993 // know that the maps are valid on the first iteration, so we can rotate the
8994 // check to _after_ the callback, and then elide it if the receiver maps are
8995 // still known to be valid (i.e. the known maps after the call are contained
8996 // inside the known maps before the call).
8997 bool recheck_maps_after_call = receiver_maps_were_unstable;
8998 if (recheck_maps_after_call) {
8999 // No need to recheck maps if there are known maps...
9000 if (auto receiver_info_after_call =
9001 known_node_aspects().TryGetInfoFor(receiver)) {
9002 // ... and those known maps are equal to, or a subset of, the maps
9003 // before the call.
9004 if (receiver_info_after_call &&
9005 receiver_info_after_call->possible_maps_are_known()) {
9006 recheck_maps_after_call = !receiver_maps_before_loop.contains(
9007 receiver_info_after_call->possible_maps());
9008 }
9009 }
9010 }
9011
9012 // Make sure to finish the loop if we eager deopt in the map check or index
9013 // check.
9014 const DeoptFrameScope& eager_deopt_scope =
9015 get_eager_deopt_scope(target, receiver, callback, this_arg, index_int32,
9016 next_index_int32, original_length);
9017
9018 if (recheck_maps_after_call) {
9019 // Build the CheckMap manually, since we're doing it with already known
9020 // maps rather than feedback, and we don't need to update known node
9021 // aspects or types since we're at the end of the loop anyway.
9022 bool emit_check_with_migration = std::any_of(
9023 receiver_maps_before_loop.begin(), receiver_maps_before_loop.end(),
9024 [](compiler::MapRef map) { return map.is_migration_target(); });
9025 if (emit_check_with_migration) {
9027 receiver_maps_before_loop,
9029 } else {
9030 AddNewNode<CheckMaps>({receiver}, receiver_maps_before_loop,
9032 }
9033 }
9034
9035 // Check if the index is still in bounds, in case the callback changed the
9036 // length.
9037 ValueNode* current_length = BuildLoadJSArrayLength(receiver);
9038 sub_builder.set(var_length, current_length);
9039
9040 // Reference compare the loaded length against the original length. If this
9041 // is the same value node, then we didn't have any side effects and didn't
9042 // clear the cached length.
9043 if (current_length != original_length) {
9045 TryBuildCheckInt32Condition(original_length_int32, current_length,
9046 AssertCondition::kUnsignedLessThanEqual,
9047 DeoptimizeReason::kArrayLengthChanged));
9048 }
9049 }
9050
9051 if (skip_call.has_value()) {
9052 sub_builder.GotoOrTrim(&*skip_call);
9053 sub_builder.Bind(&*skip_call);
9054 }
9055
9056 sub_builder.set(var_index, next_index_int32);
9057 sub_builder.EndLoop(&loop_header);
9058
9059 // ```
9060 // bind end
9061 // ```
9062 sub_builder.Bind(&loop_end);
9063
9064 return ReduceResult::Done();
9065}
9066
9067MaybeReduceResult MaglevGraphBuilder::TryReduceArrayIteratorPrototypeNext(
9069 if (!CanSpeculateCall()) return {};
9070
9071 ValueNode* receiver = args.receiver();
9072 if (!receiver) return {};
9073
9074 if (!receiver->Is<InlinedAllocation>()) return {};
9075 VirtualObject* iterator = receiver->Cast<InlinedAllocation>()->object();
9076 if (!iterator->map().IsJSArrayIteratorMap()) {
9077 FAIL("iterator is not a JS array iterator object");
9078 }
9079
9080 ValueNode* iterated_object =
9081 iterator->get(JSArrayIterator::kIteratedObjectOffset);
9082 ElementsKind elements_kind;
9083 base::SmallVector<compiler::MapRef, 4> maps;
9084 if (iterated_object->Is<InlinedAllocation>()) {
9085 VirtualObject* array = iterated_object->Cast<InlinedAllocation>()->object();
9086 // TODO(victorgomes): Remove this once we track changes in the inlined
9087 // allocated object.
9088 if (iterated_object->Cast<InlinedAllocation>()->IsEscaping()) {
9089 FAIL("allocation is escaping, map could have been changed");
9090 }
9091 // TODO(victorgomes): This effectively disable the optimization for `for-of`
9092 // loops. We need to figure it out a way to re-enable this.
9093 if (IsInsideLoop()) {
9094 FAIL("we're inside a loop, iterated object map could change");
9095 }
9096 auto map = array->map();
9097 if (!map.supports_fast_array_iteration(broker())) {
9098 FAIL("no fast array iteration support");
9099 }
9100 elements_kind = map.elements_kind();
9101 maps.push_back(map);
9102 } else {
9103 auto node_info = known_node_aspects().TryGetInfoFor(iterated_object);
9104 if (!node_info || !node_info->possible_maps_are_known()) {
9105 FAIL("iterated object is unknown");
9106 }
9107 if (!CanInlineArrayIteratingBuiltin(broker(), node_info->possible_maps(),
9108 &elements_kind)) {
9109 FAIL("no fast array iteration support or incompatible maps");
9110 }
9111 for (auto map : node_info->possible_maps()) {
9112 maps.push_back(map);
9113 }
9114 }
9115
9116 // TODO(victorgomes): Support typed arrays.
9117 if (IsTypedArrayElementsKind(elements_kind)) {
9118 FAIL("no typed arrays support");
9119 }
9120
9121 if (IsHoleyElementsKind(elements_kind) &&
9122 !broker()->dependencies()->DependOnNoElementsProtector()) {
9123 FAIL("no elements protector");
9124 }
9125
9126 // Load the [[NextIndex]] from the {iterator}.
9127 // We can assume index and length fit in Uint32.
9128 ValueNode* index =
9129 BuildLoadTaggedField(receiver, JSArrayIterator::kNextIndexOffset);
9130 ValueNode* uint32_index;
9131 GET_VALUE_OR_ABORT(uint32_index, GetUint32ElementIndex(index));
9132 ValueNode* uint32_length;
9133 GET_VALUE_OR_ABORT(uint32_length,
9135 iterated_object, IsFastElementsKind(elements_kind)
9136 ? NodeType::kSmi
9137 : NodeType::kNumber)));
9138
9139 // Check next index is below length
9140 MaglevSubGraphBuilder subgraph(this, 2);
9141 MaglevSubGraphBuilder::Variable is_done(0);
9142 MaglevSubGraphBuilder::Variable ret_value(1);
9143 RETURN_IF_ABORT(subgraph.Branch(
9144 {&is_done, &ret_value},
9145 [&](auto& builder) {
9146 return BuildBranchIfUint32Compare(builder, Operation::kLessThan,
9147 uint32_index, uint32_length);
9148 },
9149 [&] {
9150 ValueNode* int32_index = GetInt32(uint32_index);
9151 subgraph.set(is_done, GetBooleanConstant(false));
9152 DCHECK(
9153 iterator->get(JSArrayIterator::kKindOffset)->Is<Int32Constant>());
9154 IterationKind iteration_kind = static_cast<IterationKind>(
9155 iterator->get(JSArrayIterator::kKindOffset)
9156 ->Cast<Int32Constant>()
9157 ->value());
9158 if (iteration_kind == IterationKind::kKeys) {
9159 subgraph.set(ret_value, index);
9160 } else {
9161 ValueNode* value;
9162 GET_VALUE_OR_ABORT(
9163 value,
9164 TryBuildElementLoadOnJSArrayOrJSObject(
9165 iterated_object, int32_index, base::VectorOf(maps),
9166 elements_kind, KeyedAccessLoadMode::kHandleOOBAndHoles));
9167 if (iteration_kind == IterationKind::kEntries) {
9168 ValueNode* key_value_array;
9169 GET_VALUE_OR_ABORT(key_value_array,
9170 BuildAndAllocateKeyValueArray(index, value));
9171 subgraph.set(ret_value, key_value_array);
9172 } else {
9173 subgraph.set(ret_value, value);
9174 }
9175 }
9176 // Add 1 to index
9177 ValueNode* next_index = AddNewNode<Int32AddWithOverflow>(
9178 {int32_index, GetInt32Constant(1)});
9179 EnsureType(next_index, NodeType::kSmi);
9180 // Update [[NextIndex]]
9181 BuildStoreTaggedFieldNoWriteBarrier(receiver, next_index,
9182 JSArrayIterator::kNextIndexOffset,
9184 return ReduceResult::Done();
9185 },
9186 [&] {
9187 // Index is greater or equal than length.
9188 subgraph.set(is_done, GetBooleanConstant(true));
9189 subgraph.set(ret_value, GetRootConstant(RootIndex::kUndefinedValue));
9190 if (!IsTypedArrayElementsKind(elements_kind)) {
9191 // Mark the {iterator} as exhausted by setting the [[NextIndex]] to a
9192 // value that will never pass the length check again (aka the maximum
9193 // value possible for the specific iterated object). Note that this is
9194 // different from what the specification says, which is changing the
9195 // [[IteratedObject]] field to undefined, but that makes it difficult
9196 // to eliminate the map checks and "length" accesses in for..of loops.
9197 //
9198 // This is not necessary for JSTypedArray's, since the length of those
9199 // cannot change later and so if we were ever out of bounds for them
9200 // we will stay out-of-bounds forever.
9201 BuildStoreTaggedField(receiver, GetFloat64Constant(kMaxUInt32),
9202 JSArrayIterator::kNextIndexOffset,
9204 }
9205 return ReduceResult::Done();
9206 }));
9207
9208 // Allocate result object and return.
9209 compiler::MapRef map =
9210 broker()->target_native_context().iterator_result_map(broker());
9211 VirtualObject* iter_result = CreateJSIteratorResult(
9212 map, subgraph.get(ret_value), subgraph.get(is_done));
9213 ValueNode* allocation =
9214 BuildInlinedAllocation(iter_result, AllocationType::kYoung);
9215 return allocation;
9216}
9217
9218MaybeReduceResult MaglevGraphBuilder::TryReduceArrayPrototypeEntries(
9219 compiler::JSFunctionRef target, CallArguments& args) {
9220 if (!CanSpeculateCall()) return {};
9221 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9222 if (!CheckType(receiver, NodeType::kJSReceiver)) {
9223 return {};
9224 }
9226}
9227
9228MaybeReduceResult MaglevGraphBuilder::TryReduceArrayPrototypeKeys(
9229 compiler::JSFunctionRef target, CallArguments& args) {
9230 if (!CanSpeculateCall()) return {};
9231 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9232 if (!CheckType(receiver, NodeType::kJSReceiver)) {
9233 return {};
9234 }
9236}
9237
9238MaybeReduceResult MaglevGraphBuilder::TryReduceArrayPrototypeValues(
9239 compiler::JSFunctionRef target, CallArguments& args) {
9240 if (!CanSpeculateCall()) return {};
9241 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9242 if (!CheckType(receiver, NodeType::kJSReceiver)) {
9243 return {};
9244 }
9246}
9247
9248MaybeReduceResult MaglevGraphBuilder::TryReduceStringFromCharCode(
9249 compiler::JSFunctionRef target, CallArguments& args) {
9250 if (!CanSpeculateCall()) return {};
9251 if (args.count() != 1) return {};
9253 args[0], NodeType::kNumberOrOddball,
9255}
9256
9257MaybeReduceResult MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt(
9258 compiler::JSFunctionRef target, CallArguments& args) {
9259 if (!CanSpeculateCall()) return {};
9260 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9261 ValueNode* index;
9262 if (args.count() == 0) {
9263 // Index is the undefined object. ToIntegerOrInfinity(undefined) = 0.
9264 index = GetInt32Constant(0);
9265 } else {
9266 index = GetInt32ElementIndex(args[0]);
9267 }
9268 // Any other argument is ignored.
9269
9270 // Try to constant-fold if receiver and index are constant
9271 if (auto cst = TryGetConstant(receiver)) {
9272 if (cst->IsString() && index->Is<Int32Constant>()) {
9273 compiler::StringRef str = cst->AsString();
9274 int idx = index->Cast<Int32Constant>()->value();
9275 if (idx >= 0 && static_cast<uint32_t>(idx) < str.length()) {
9276 if (std::optional<uint16_t> value = str.GetChar(broker(), idx)) {
9277 return GetSmiConstant(*value);
9278 }
9279 }
9280 }
9281 }
9282
9283 // Ensure that {receiver} is actually a String.
9285 // And index is below length.
9286 ValueNode* length = BuildLoadStringLength(receiver);
9288 index, length, AssertCondition::kUnsignedLessThan,
9289 DeoptimizeReason::kOutOfBounds));
9291 {receiver, index},
9293}
9294
9295MaybeReduceResult MaglevGraphBuilder::TryReduceStringPrototypeCodePointAt(
9296 compiler::JSFunctionRef target, CallArguments& args) {
9297 if (!CanSpeculateCall()) return {};
9298 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9299 ValueNode* index;
9300 if (args.count() == 0) {
9301 // Index is the undefined object. ToIntegerOrInfinity(undefined) = 0.
9302 index = GetInt32Constant(0);
9303 } else {
9304 index = GetInt32ElementIndex(args[0]);
9305 }
9306 // Any other argument is ignored.
9307 // Ensure that {receiver} is actually a String.
9309 // And index is below length.
9310 ValueNode* length = BuildLoadStringLength(receiver);
9312 index, length, AssertCondition::kUnsignedLessThan,
9313 DeoptimizeReason::kOutOfBounds));
9315 {receiver, index},
9317}
9318
9319MaybeReduceResult MaglevGraphBuilder::TryReduceStringPrototypeIterator(
9320 compiler::JSFunctionRef target, CallArguments& args) {
9321 if (!CanSpeculateCall()) return {};
9322 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9323 // Ensure that {receiver} is actually a String.
9325 compiler::MapRef map =
9326 broker()->target_native_context().initial_string_iterator_map(broker());
9327 VirtualObject* string_iterator = CreateJSStringIterator(map, receiver);
9328 ValueNode* allocation =
9330 return allocation;
9331}
9332
9333#ifdef V8_INTL_SUPPORT
9334
9335MaybeReduceResult MaglevGraphBuilder::TryReduceStringPrototypeLocaleCompareIntl(
9336 compiler::JSFunctionRef target, CallArguments& args) {
9337 if (args.count() < 1 || args.count() > 3) return {};
9338
9339 LocalFactory* factory = local_isolate()->factory();
9340 compiler::ObjectRef undefined_ref = broker()->undefined_value();
9341
9342 DirectHandle<Object> locales_handle;
9343 ValueNode* locales_node = nullptr;
9344 if (args.count() > 1) {
9345 compiler::OptionalHeapObjectRef maybe_locales = TryGetConstant(args[1]);
9346 if (!maybe_locales) return {};
9347 compiler::HeapObjectRef locales = maybe_locales.value();
9348 if (locales.equals(undefined_ref)) {
9349 locales_handle = factory->undefined_value();
9350 locales_node = GetRootConstant(RootIndex::kUndefinedValue);
9351 } else {
9352 if (!locales.IsString()) return {};
9353 compiler::StringRef sref = locales.AsString();
9354 std::optional<Handle<String>> maybe_locales_handle =
9355 sref.ObjectIfContentAccessible(broker());
9356 if (!maybe_locales_handle) return {};
9357 locales_handle = *maybe_locales_handle;
9358 locales_node = args[1];
9359 }
9360 } else {
9361 locales_handle = factory->undefined_value();
9362 locales_node = GetRootConstant(RootIndex::kUndefinedValue);
9363 }
9364
9365 if (args.count() > 2) {
9366 compiler::OptionalHeapObjectRef maybe_options = TryGetConstant(args[2]);
9367 if (!maybe_options) return {};
9368 if (!maybe_options.value().equals(undefined_ref)) return {};
9369 }
9370
9371 DCHECK(!locales_handle.is_null());
9372 DCHECK_NOT_NULL(locales_node);
9373
9374 if (Intl::CompareStringsOptionsFor(local_isolate(), locales_handle,
9375 factory->undefined_value()) !=
9377 return {};
9378 }
9380 {GetConstant(target),
9382 GetTaggedValue(args[0]), GetTaggedValue(locales_node)});
9383}
9384
9385#endif // V8_INTL_SUPPORT
9386
9387#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
9388MaybeReduceResult
9389MaglevGraphBuilder::TryReduceGetContinuationPreservedEmbedderData(
9390 compiler::JSFunctionRef target, CallArguments& args) {
9392}
9393
9394MaybeReduceResult
9395MaglevGraphBuilder::TryReduceSetContinuationPreservedEmbedderData(
9396 compiler::JSFunctionRef target, CallArguments& args) {
9397 if (args.count() == 0) return {};
9398
9400 return GetRootConstant(RootIndex::kUndefinedValue);
9401}
9402#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
9403
9404template <typename LoadNode>
9406 const CallArguments& args, ExternalArrayType type) {
9407 if (!CanSpeculateCall()) return {};
9408 if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
9409 // TODO(victorgomes): Add checks whether the array has been detached.
9410 return {};
9411 }
9412 // TODO(victorgomes): Add data view to known types.
9415 JS_DATA_VIEW_TYPE, JS_DATA_VIEW_TYPE);
9416 // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
9417 ValueNode* offset =
9420 ValueNode* is_little_endian = args[1] ? args[1] : GetBooleanConstant(false);
9421 return AddNewNode<LoadNode>({receiver, offset, is_little_endian}, type);
9422}
9423
9424template <typename StoreNode, typename Function>
9426 const CallArguments& args, ExternalArrayType type, Function&& getValue) {
9427 if (!CanSpeculateCall()) return {};
9428 if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
9429 // TODO(victorgomes): Add checks whether the array has been detached.
9430 return {};
9431 }
9432 // TODO(victorgomes): Add data view to known types.
9435 JS_DATA_VIEW_TYPE, JS_DATA_VIEW_TYPE);
9436 // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
9437 ValueNode* offset =
9441 ValueNode* value = getValue(args[1]);
9442 ValueNode* is_little_endian = args[2] ? args[2] : GetBooleanConstant(false);
9443 AddNewNode<StoreNode>({receiver, offset, value, is_little_endian}, type);
9444 return GetRootConstant(RootIndex::kUndefinedValue);
9445}
9446
9447MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt8(
9451}
9452MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt8(
9453 compiler::JSFunctionRef target, CallArguments& args) {
9456 [&](ValueNode* value) { return value ? value : GetInt32Constant(0); });
9457}
9458MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt16(
9459 compiler::JSFunctionRef target, CallArguments& args) {
9462}
9463MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt16(
9464 compiler::JSFunctionRef target, CallArguments& args) {
9467 [&](ValueNode* value) { return value ? value : GetInt32Constant(0); });
9468}
9469MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt32(
9470 compiler::JSFunctionRef target, CallArguments& args) {
9473}
9474MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt32(
9475 compiler::JSFunctionRef target, CallArguments& args) {
9478 [&](ValueNode* value) { return value ? value : GetInt32Constant(0); });
9479}
9480MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetFloat64(
9481 compiler::JSFunctionRef target, CallArguments& args) {
9484}
9485MaybeReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64(
9486 compiler::JSFunctionRef target, CallArguments& args) {
9488 args, ExternalArrayType::kExternalFloat64Array, [&](ValueNode* value) {
9489 return value ? GetHoleyFloat64ForToNumber(
9490 value, NodeType::kNumberOrOddball,
9493 std::numeric_limits<double>::quiet_NaN());
9494 });
9495}
9496
9497MaybeReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeCall(
9498 compiler::JSFunctionRef target, CallArguments& args) {
9499 // We can't reduce Function#call when there is no receiver function.
9500 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
9501 return {};
9502 }
9503 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9504 args.PopReceiver(ConvertReceiverMode::kAny);
9505
9506 SaveCallSpeculationScope saved(this);
9507 return ReduceCall(receiver, args, saved.value());
9508}
9509
9510MaybeReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeApply(
9511 compiler::JSFunctionRef target, CallArguments& args) {
9512 compiler::OptionalHeapObjectRef maybe_receiver;
9514 const compiler::ProcessedFeedback& processed_feedback =
9516 DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
9517 const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
9518 if (call_feedback.call_feedback_content() ==
9520 maybe_receiver = call_feedback.target();
9521 }
9522 }
9524 maybe_receiver, args, current_speculation_feedback_);
9525}
9526
9527namespace {
9528
9529template <size_t MaxKindCount, typename KindsToIndexFunc>
9530bool CanInlineArrayResizingBuiltin(
9531 compiler::JSHeapBroker* broker, const PossibleMaps& possible_maps,
9532 std::array<SmallZoneVector<compiler::MapRef, 2>, MaxKindCount>& map_kinds,
9533 KindsToIndexFunc&& elements_kind_to_index, int* unique_kind_count,
9534 bool is_loading) {
9535 uint8_t kind_bitmap = 0;
9536 for (compiler::MapRef map : possible_maps) {
9537 if (!map.supports_fast_array_resize(broker)) {
9538 return false;
9539 }
9540 ElementsKind kind = map.elements_kind();
9541 if (is_loading && kind == HOLEY_DOUBLE_ELEMENTS) {
9542 return false;
9543 }
9544 // Group maps by elements kind, using the provided function to translate
9545 // elements kinds to indices.
9546 // kind_bitmap is used to get the unique kinds (predecessor count for the
9547 // next block).
9548 uint8_t kind_index = elements_kind_to_index(kind);
9549 kind_bitmap |= 1 << kind_index;
9550 map_kinds[kind_index].push_back(map);
9551 }
9552
9553 *unique_kind_count = base::bits::CountPopulation(kind_bitmap);
9554 DCHECK_GE(*unique_kind_count, 1);
9555 return true;
9556}
9557
9558} // namespace
9559
9560template <typename MapKindsT, typename IndexToElementsKindFunc,
9561 typename BuildKindSpecificFunc>
9562MaybeReduceResult
9564 ValueNode* receiver, const MapKindsT& map_kinds,
9565 MaglevSubGraphBuilder& sub_graph,
9566 std::optional<MaglevSubGraphBuilder::Label>& do_return,
9567 int unique_kind_count, IndexToElementsKindFunc&& index_to_elements_kind,
9568 BuildKindSpecificFunc&& build_kind_specific) {
9569 // TODO(pthier): Support map packing.
9571 ValueNode* receiver_map =
9573 int emitted_kind_checks = 0;
9574 bool any_successful = false;
9575 for (size_t kind_index = 0; kind_index < map_kinds.size(); kind_index++) {
9576 const auto& maps = map_kinds[kind_index];
9577 // Skip kinds we haven't observed.
9578 if (maps.empty()) continue;
9579 ElementsKind kind = index_to_elements_kind(kind_index);
9580 // Create branches for all but the last elements kind. We don't need
9581 // to check the maps of the last kind, as all possible maps have already
9582 // been checked when the property (builtin name) was loaded.
9583 if (++emitted_kind_checks < unique_kind_count) {
9584 MaglevSubGraphBuilder::Label check_next_map(&sub_graph, 1);
9585 std::optional<MaglevSubGraphBuilder::Label> do_push;
9586 if (maps.size() > 1) {
9587 do_push.emplace(&sub_graph, static_cast<int>(maps.size()));
9588 for (size_t map_index = 1; map_index < maps.size(); map_index++) {
9590 &*do_push, {receiver_map, GetConstant(maps[map_index])});
9591 }
9592 }
9594 &check_next_map, {receiver_map, GetConstant(maps[0])});
9595 if (do_push.has_value()) {
9596 sub_graph.Goto(&*do_push);
9597 sub_graph.Bind(&*do_push);
9598 }
9599 if (!build_kind_specific(kind).IsDoneWithAbort()) {
9600 any_successful = true;
9601 }
9602 DCHECK(do_return.has_value());
9603 sub_graph.GotoOrTrim(&*do_return);
9604 sub_graph.Bind(&check_next_map);
9605 } else {
9606 if (!build_kind_specific(kind).IsDoneWithAbort()) {
9607 any_successful = true;
9608 }
9609 if (do_return.has_value()) {
9610 sub_graph.GotoOrTrim(&*do_return);
9611 }
9612 }
9613 }
9614 DCHECK_IMPLIES(!any_successful, !current_block_);
9615 return any_successful ? ReduceResult::Done() : ReduceResult::DoneWithAbort();
9616}
9617
9618namespace {
9619bool AllOfInstanceTypesUnsafe(const PossibleMaps& maps,
9620 std::function<bool(InstanceType)> f) {
9621 auto instance_type = [f](compiler::MapRef map) {
9622 return f(map.instance_type());
9623 };
9624 return std::all_of(maps.begin(), maps.end(), instance_type);
9625}
9626bool AllOfInstanceTypesAre(const PossibleMaps& maps, InstanceType type) {
9627 CHECK(!InstanceTypeChecker::IsString(type));
9628 return AllOfInstanceTypesUnsafe(
9629 maps, [type](InstanceType other) { return type == other; });
9630}
9631} // namespace
9632
9633MaybeReduceResult MaglevGraphBuilder::TryReduceMapPrototypeGet(
9634 compiler::JSFunctionRef target, CallArguments& args) {
9635 if (!CanSpeculateCall()) return {};
9636 if (!is_turbolev()) {
9637 // TODO(dmercadier): consider also doing this lowering for Maglev. This is
9638 // currently not done, because to be efficient, this lowering would need to
9639 // inline FindOrderedHashMapEntryInt32Key (cf
9640 // turboshaft/machine-lowering-reducer-inl.h), which might be a bit too
9641 // low-level for Maglev.
9642 return {};
9643 }
9644
9645 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
9646 if (v8_flags.trace_maglev_graph_building) {
9647 std::cout << " ! Failed to reduce Map.prototype.Get - no receiver"
9648 << std::endl;
9649 }
9650 return {};
9651 }
9652 if (args.count() != 1) {
9653 if (v8_flags.trace_maglev_graph_building) {
9654 std::cout << " ! Failed to reduce Map.prototype.Get - invalid "
9655 "argument count"
9656 << std::endl;
9657 }
9658 return {};
9659 }
9660
9661 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9662 const NodeInfo* receiver_info = known_node_aspects().TryGetInfoFor(receiver);
9663 // If the map set is not found, then we don't know anything about the map of
9664 // the receiver, so bail.
9665 if (!receiver_info || !receiver_info->possible_maps_are_known()) {
9666 if (v8_flags.trace_maglev_graph_building) {
9667 std::cout
9668 << " ! Failed to reduce Map.prototype.Get - unknown receiver map"
9669 << std::endl;
9670 }
9671 return {};
9672 }
9673
9674 const PossibleMaps& possible_receiver_maps = receiver_info->possible_maps();
9675 // If the set of possible maps is empty, then there's no possible map for this
9676 // receiver, therefore this path is unreachable at runtime. We're unlikely to
9677 // ever hit this case, BuildCheckMaps should already unconditionally deopt,
9678 // but check it in case another checking operation fails to statically
9679 // unconditionally deopt.
9680 if (possible_receiver_maps.is_empty()) {
9681 // TODO(leszeks): Add an unreachable assert here.
9683 }
9684
9685 if (!AllOfInstanceTypesAre(possible_receiver_maps, JS_MAP_TYPE)) {
9686 if (v8_flags.trace_maglev_graph_building) {
9687 std::cout
9688 << " ! Failed to reduce Map.prototype.Get - wrong receiver maps "
9689 << std::endl;
9690 }
9691 return {};
9692 }
9693
9694 ValueNode* key = args[0];
9695 ValueNode* table = BuildLoadTaggedField(receiver, JSCollection::kTableOffset);
9696
9697 ValueNode* entry;
9698 auto key_info = known_node_aspects().TryGetInfoFor(key);
9699 if (key_info && key_info->alternative().int32()) {
9701 {table, key_info->alternative().int32()});
9702 } else {
9703 entry = AddNewNode<MapPrototypeGet>({table, key});
9704 }
9705
9706 return entry;
9707}
9708
9709MaybeReduceResult MaglevGraphBuilder::TryReduceSetPrototypeHas(
9710 compiler::JSFunctionRef target, CallArguments& args) {
9711 if (!CanSpeculateCall()) return {};
9712 if (!is_turbolev()) {
9713 // See the comment in TryReduceMapPrototypeGet.
9714 return {};
9715 }
9716
9717 ValueNode* receiver = args.receiver();
9718 if (!receiver) return {};
9719
9720 if (args.count() != 1) {
9721 if (v8_flags.trace_maglev_graph_building) {
9722 std::cout << " ! Failed to reduce Set.prototype.has - invalid "
9723 "argument count"
9724 << std::endl;
9725 }
9726 return {};
9727 }
9728
9729 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
9730 if (!node_info || !node_info->possible_maps_are_known()) {
9731 if (v8_flags.trace_maglev_graph_building) {
9732 std::cout << " ! Failed to reduce Set.prototype.has"
9733 << " - receiver map is unknown" << std::endl;
9734 }
9735 return {};
9736 }
9737
9738 const PossibleMaps& possible_receiver_maps = node_info->possible_maps();
9739 // If the set of possible maps is empty, then there's no possible map for this
9740 // receiver, therefore this path is unreachable at runtime. We're unlikely to
9741 // ever hit this case, BuildCheckMaps should already unconditionally deopt,
9742 // but check it in case another checking operation fails to statically
9743 // unconditionally deopt.
9744 if (possible_receiver_maps.is_empty()) {
9745 // TODO(leszeks): Add an unreachable assert here.
9747 }
9748
9749 if (!AllOfInstanceTypesAre(possible_receiver_maps, JS_SET_TYPE)) {
9750 if (v8_flags.trace_maglev_graph_building) {
9751 std::cout
9752 << " ! Failed to reduce Set.prototype.has - wrong receiver maps "
9753 << std::endl;
9754 }
9755 return {};
9756 }
9757
9758 ValueNode* key = args[0];
9759 ValueNode* table = BuildLoadTaggedField(receiver, JSCollection::kTableOffset);
9760
9761 return AddNewNode<SetPrototypeHas>({table, key});
9762}
9763
9764MaybeReduceResult MaglevGraphBuilder::TryReduceArrayPrototypePush(
9765 compiler::JSFunctionRef target, CallArguments& args) {
9766 if (!CanSpeculateCall()) return {};
9767 // We can't reduce Function#call when there is no receiver function.
9768 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
9769 if (v8_flags.trace_maglev_graph_building) {
9770 std::cout << " ! Failed to reduce Array.prototype.push - no receiver"
9771 << std::endl;
9772 }
9773 return {};
9774 }
9775 // TODO(pthier): Support multiple arguments.
9776 if (args.count() != 1) {
9777 if (v8_flags.trace_maglev_graph_building) {
9778 std::cout << " ! Failed to reduce Array.prototype.push - invalid "
9779 "argument count"
9780 << std::endl;
9781 }
9782 return {};
9783 }
9784 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9785
9786 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
9787 // If the map set is not found, then we don't know anything about the map of
9788 // the receiver, so bail.
9789 if (!node_info || !node_info->possible_maps_are_known()) {
9790 if (v8_flags.trace_maglev_graph_building) {
9791 std::cout
9792 << " ! Failed to reduce Array.prototype.push - unknown receiver map"
9793 << std::endl;
9794 }
9795 return {};
9796 }
9797
9798 const PossibleMaps& possible_maps = node_info->possible_maps();
9799 // If the set of possible maps is empty, then there's no possible map for this
9800 // receiver, therefore this path is unreachable at runtime. We're unlikely to
9801 // ever hit this case, BuildCheckMaps should already unconditionally deopt,
9802 // but check it in case another checking operation fails to statically
9803 // unconditionally deopt.
9804 if (possible_maps.is_empty()) {
9805 // TODO(leszeks): Add an unreachable assert here.
9807 }
9808
9809 if (!broker()->dependencies()->DependOnNoElementsProtector()) {
9810 if (v8_flags.trace_maglev_graph_building) {
9811 std::cout << " ! Failed to reduce Array.prototype.push - "
9812 "NoElementsProtector invalidated"
9813 << std::endl;
9814 }
9815 return {};
9816 }
9817
9818 // Check that inlining resizing array builtins is supported and group maps
9819 // by elements kind.
9820 std::array<SmallZoneVector<compiler::MapRef, 2>, 3> map_kinds = {
9821 SmallZoneVector<compiler::MapRef, 2>(zone()),
9822 SmallZoneVector<compiler::MapRef, 2>(zone()),
9823 SmallZoneVector<compiler::MapRef, 2>(zone())};
9824 // Function to group maps by elements kind, ignoring packedness. Packedness
9825 // doesn't matter for push().
9826 // Kinds we care about are all paired in the first 6 values of ElementsKind,
9827 // so we can use integer division to truncate holeyness.
9828 auto elements_kind_to_index = [&](ElementsKind kind) {
9829 static_assert(kFastElementsKindCount <= 6);
9830 static_assert(kFastElementsKindPackedToHoley == 1);
9831 return static_cast<uint8_t>(kind) / 2;
9832 };
9833 auto index_to_elements_kind = [&](uint8_t kind_index) {
9834 return static_cast<ElementsKind>(kind_index * 2);
9835 };
9836 int unique_kind_count;
9837 if (!CanInlineArrayResizingBuiltin(broker(), possible_maps, map_kinds,
9838 elements_kind_to_index, &unique_kind_count,
9839 false)) {
9840 if (v8_flags.trace_maglev_graph_building) {
9841 std::cout << " ! Failed to reduce Array.prototype.push - Map doesn't "
9842 "support fast resizing"
9843 << std::endl;
9844 }
9845 return {};
9846 }
9847
9848 MaglevSubGraphBuilder sub_graph(this, 0);
9849
9850 std::optional<MaglevSubGraphBuilder::Label> do_return;
9851 if (unique_kind_count > 1) {
9852 do_return.emplace(&sub_graph, unique_kind_count);
9853 }
9854
9855 ValueNode* old_array_length_smi;
9856 GET_VALUE_OR_ABORT(old_array_length_smi,
9858 ValueNode* old_array_length =
9859 AddNewNode<UnsafeSmiUntag>({old_array_length_smi});
9860 ValueNode* new_array_length_smi =
9861 AddNewNode<CheckedSmiIncrement>({old_array_length_smi});
9862
9863 ValueNode* elements_array = BuildLoadElements(receiver);
9864 ValueNode* elements_array_length = BuildLoadFixedArrayLength(elements_array);
9865
9866 auto build_array_push = [&](ElementsKind kind) {
9867 ValueNode* value;
9869
9870 ValueNode* writable_elements_array = AddNewNode<MaybeGrowFastElements>(
9871 {elements_array, receiver, old_array_length, elements_array_length},
9872 kind);
9873
9875 JSArray::kLengthOffset,
9877
9878 // Do the store
9880 BuildStoreFixedDoubleArrayElement(writable_elements_array,
9881 old_array_length, value);
9882 } else {
9884 BuildStoreFixedArrayElement(writable_elements_array, old_array_length,
9885 value);
9886 }
9887 return ReduceResult::Done();
9888 };
9889
9891 receiver, map_kinds, sub_graph, do_return, unique_kind_count,
9892 index_to_elements_kind, build_array_push));
9893
9894 if (do_return.has_value()) {
9895 sub_graph.Bind(&*do_return);
9896 }
9897 RecordKnownProperty(receiver, broker()->length_string(), new_array_length_smi,
9899 return new_array_length_smi;
9900}
9901
9902MaybeReduceResult MaglevGraphBuilder::TryReduceArrayPrototypePop(
9903 compiler::JSFunctionRef target, CallArguments& args) {
9904 if (!CanSpeculateCall()) return {};
9905 // We can't reduce Function#call when there is no receiver function.
9906 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
9907 if (v8_flags.trace_maglev_graph_building) {
9908 std::cout << " ! Failed to reduce Array.prototype.pop - no receiver"
9909 << std::endl;
9910 }
9911 return {};
9912 }
9913
9914 ValueNode* receiver = GetValueOrUndefined(args.receiver());
9915
9916 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
9917 // If the map set is not found, then we don't know anything about the map of
9918 // the receiver, so bail.
9919 if (!node_info || !node_info->possible_maps_are_known()) {
9920 if (v8_flags.trace_maglev_graph_building) {
9921 std::cout
9922 << " ! Failed to reduce Array.prototype.pop - unknown receiver map"
9923 << std::endl;
9924 }
9925 return {};
9926 }
9927
9928 const PossibleMaps& possible_maps = node_info->possible_maps();
9929
9930 // If the set of possible maps is empty, then there's no possible map for this
9931 // receiver, therefore this path is unreachable at runtime. We're unlikely to
9932 // ever hit this case, BuildCheckMaps should already unconditionally deopt,
9933 // but check it in case another checking operation fails to statically
9934 // unconditionally deopt.
9935 if (possible_maps.is_empty()) {
9936 // TODO(leszeks): Add an unreachable assert here.
9938 }
9939
9940 if (!broker()->dependencies()->DependOnNoElementsProtector()) {
9941 if (v8_flags.trace_maglev_graph_building) {
9942 std::cout << " ! Failed to reduce Array.prototype.pop - "
9943 "NoElementsProtector invalidated"
9944 << std::endl;
9945 }
9946 return {};
9947 }
9948
9949 constexpr int max_kind_count = 4;
9950 std::array<SmallZoneVector<compiler::MapRef, 2>, max_kind_count> map_kinds = {
9951 SmallZoneVector<compiler::MapRef, 2>(zone()),
9952 SmallZoneVector<compiler::MapRef, 2>(zone()),
9953 SmallZoneVector<compiler::MapRef, 2>(zone()),
9954 SmallZoneVector<compiler::MapRef, 2>(zone())};
9955 // Smi and Object elements kinds are treated as identical for pop, so we can
9956 // group them together without differentiation.
9957 // ElementsKind is mapped to an index in the 4 element array using:
9958 // - Bit 2 (Only set for double in the fast element range) is mapped to bit
9959 // 1)
9960 // - Bit 0 (packedness)
9961 // The complete mapping:
9962 // +-------+----------------------------------------------+
9963 // | Index | ElementsKinds |
9964 // +-------+----------------------------------------------+
9965 // | 0 | PACKED_SMI_ELEMENTS and PACKED_ELEMENTS |
9966 // | 1 | HOLEY_SMI_ELEMENETS and HOLEY_ELEMENTS |
9967 // | 2 | PACKED_DOUBLE_ELEMENTS |
9968 // | 3 | HOLEY_DOUBLE_ELEMENTS |
9969 // +-------+----------------------------------------------+
9970 auto elements_kind_to_index = [&](ElementsKind kind) {
9971 uint8_t kind_int = static_cast<uint8_t>(kind);
9972 uint8_t kind_index = ((kind_int & 0x4) >> 1) | (kind_int & 0x1);
9973 DCHECK_LT(kind_index, max_kind_count);
9974 return kind_index;
9975 };
9976 auto index_to_elements_kind = [&](uint8_t kind_index) {
9977 uint8_t kind_int;
9978 kind_int = ((kind_index & 0x2) << 1) | (kind_index & 0x1);
9979 return static_cast<ElementsKind>(kind_int);
9980 };
9981
9982 int unique_kind_count;
9983 if (!CanInlineArrayResizingBuiltin(broker(), possible_maps, map_kinds,
9984 elements_kind_to_index, &unique_kind_count,
9985 true)) {
9986 if (v8_flags.trace_maglev_graph_building) {
9987 std::cout << " ! Failed to reduce Array.prototype.pop - Map doesn't "
9988 "support fast resizing"
9989 << std::endl;
9990 }
9991 return {};
9992 }
9993
9994 MaglevSubGraphBuilder sub_graph(this, 2);
9995 MaglevSubGraphBuilder::Variable var_value(0);
9996 MaglevSubGraphBuilder::Variable var_new_array_length(1);
9997
9998 std::optional<MaglevSubGraphBuilder::Label> do_return =
9999 std::make_optional<MaglevSubGraphBuilder::Label>(
10000 &sub_graph, unique_kind_count + 1,
10001 std::initializer_list<MaglevSubGraphBuilder::Variable*>{
10002 &var_value, &var_new_array_length});
10003 MaglevSubGraphBuilder::Label empty_array(&sub_graph, 1);
10004
10005 ValueNode* old_array_length_smi;
10006 GET_VALUE_OR_ABORT(old_array_length_smi,
10008
10009 // If the array is empty, skip the pop and return undefined.
10010 sub_graph.GotoIfTrue<BranchIfReferenceEqual>(
10011 &empty_array, {old_array_length_smi, GetSmiConstant(0)});
10012
10013 ValueNode* elements_array = BuildLoadElements(receiver);
10014 ValueNode* new_array_length_smi =
10015 AddNewNode<CheckedSmiDecrement>({old_array_length_smi});
10016 ValueNode* new_array_length =
10017 AddNewNode<UnsafeSmiUntag>({new_array_length_smi});
10018 sub_graph.set(var_new_array_length, new_array_length_smi);
10019
10020 auto build_array_pop = [&](ElementsKind kind) {
10021 // Handle COW if needed.
10022 ValueNode* writable_elements_array =
10025 : elements_array;
10026
10027 // Store new length.
10029 JSArray::kLengthOffset,
10031
10032 // Load the value and store the hole in it's place.
10033 ValueNode* value;
10035 value = BuildLoadFixedDoubleArrayElement(writable_elements_array,
10036 new_array_length);
10038 writable_elements_array, new_array_length,
10039 GetFloat64Constant(Float64::FromBits(kHoleNanInt64)));
10040 } else {
10042 value =
10043 BuildLoadFixedArrayElement(writable_elements_array, new_array_length);
10044 BuildStoreFixedArrayElement(writable_elements_array, new_array_length,
10045 GetRootConstant(RootIndex::kTheHoleValue));
10046 }
10047
10049 value = AddNewNode<ConvertHoleToUndefined>({value});
10050 }
10051 sub_graph.set(var_value, value);
10052 return ReduceResult::Done();
10053 };
10054
10056 receiver, map_kinds, sub_graph, do_return, unique_kind_count,
10057 index_to_elements_kind, build_array_pop));
10058
10059 sub_graph.Bind(&empty_array);
10060 sub_graph.set(var_new_array_length, GetSmiConstant(0));
10061 sub_graph.set(var_value, GetRootConstant(RootIndex::kUndefinedValue));
10062 sub_graph.Goto(&*do_return);
10063
10064 sub_graph.Bind(&*do_return);
10065 RecordKnownProperty(receiver, broker()->length_string(),
10066 sub_graph.get(var_new_array_length), false,
10068 return sub_graph.get(var_value);
10069}
10070
10071MaybeReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeHasInstance(
10072 compiler::JSFunctionRef target, CallArguments& args) {
10073 // We can't reduce Function#hasInstance when there is no receiver function.
10074 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10075 return {};
10076 }
10077 if (args.count() != 1) {
10078 return {};
10079 }
10080 compiler::OptionalHeapObjectRef maybe_receiver_constant =
10081 TryGetConstant(args.receiver());
10082 if (!maybe_receiver_constant) {
10083 return {};
10084 }
10085 compiler::HeapObjectRef receiver_object = maybe_receiver_constant.value();
10086 if (!receiver_object.IsJSObject() ||
10087 !receiver_object.map(broker()).is_callable()) {
10088 return {};
10089 }
10090 return BuildOrdinaryHasInstance(args[0], receiver_object.AsJSObject(),
10091 nullptr);
10092}
10093
10094MaybeReduceResult MaglevGraphBuilder::TryReduceObjectPrototypeHasOwnProperty(
10095 compiler::JSFunctionRef target, CallArguments& args) {
10096 if (!CanSpeculateCall()) return {};
10097 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10098 return {};
10099 }
10100
10101 // We can constant-fold the {receiver.hasOwnProperty(name)} builtin call to
10102 // the {True} node in this case:
10103
10104 // for (name in receiver) {
10105 // if (receiver.hasOwnProperty(name)) {
10106 // ...
10107 // }
10108 // }
10109
10110 if (args.count() != 1 || args[0] != current_for_in_state.key) {
10111 return {};
10112 }
10113 ValueNode* receiver = args.receiver();
10116 auto* receiver_map =
10119 {receiver_map, current_for_in_state.cache_type},
10120 DeoptimizeReason::kWrongMapDynamic);
10122 }
10123 return GetRootConstant(RootIndex::kTrueValue);
10124 }
10125
10126 // We can also optimize for this case below:
10127
10128 // receiver(is a heap constant with fast map)
10129 // ^
10130 // | object(all keys are enumerable)
10131 // | ^
10132 // | |
10133 // | JSForInNext
10134 // | ^
10135 // +----+ |
10136 // | |
10137 // JSCall[hasOwnProperty]
10138
10139 // We can replace the {JSCall} with several internalized string
10140 // comparisons.
10141
10142 compiler::OptionalMapRef maybe_receiver_map;
10143 compiler::OptionalHeapObjectRef receiver_ref = TryGetConstant(receiver);
10144 if (receiver_ref.has_value()) {
10145 compiler::HeapObjectRef receiver_object = receiver_ref.value();
10146 compiler::MapRef receiver_map = receiver_object.map(broker());
10147 maybe_receiver_map = receiver_map;
10148 } else {
10149 NodeInfo* known_info = GetOrCreateInfoFor(receiver);
10150 if (known_info->possible_maps_are_known()) {
10151 compiler::ZoneRefSet<Map> possible_maps = known_info->possible_maps();
10152 if (possible_maps.size() == 1) {
10153 compiler::MapRef receiver_map = *(possible_maps.begin());
10154 maybe_receiver_map = receiver_map;
10155 }
10156 }
10157 }
10158 if (!maybe_receiver_map.has_value()) return {};
10159
10160 compiler::MapRef receiver_map = maybe_receiver_map.value();
10161 InstanceType instance_type = receiver_map.instance_type();
10162 int const nof = receiver_map.NumberOfOwnDescriptors();
10163 // We set a heuristic value to limit the compare instructions number.
10164 if (nof > 4 || IsSpecialReceiverInstanceType(instance_type) ||
10165 receiver_map.is_dictionary_map()) {
10166 return {};
10167 }
10169 // Replace builtin call with several internalized string comparisons.
10170 MaglevSubGraphBuilder sub_graph(this, 1);
10171 MaglevSubGraphBuilder::Variable var_result(0);
10172 MaglevSubGraphBuilder::Label done(
10173 &sub_graph, nof + 1,
10174 std::initializer_list<MaglevSubGraphBuilder::Variable*>{&var_result});
10175 const compiler::DescriptorArrayRef descriptor_array =
10176 receiver_map.instance_descriptors(broker());
10177 for (InternalIndex key_index : InternalIndex::Range(nof)) {
10178 compiler::NameRef receiver_key =
10179 descriptor_array.GetPropertyKey(broker(), key_index);
10180 ValueNode* lhs = GetConstant(receiver_key);
10181 sub_graph.set(var_result, GetRootConstant(RootIndex::kTrueValue));
10182 sub_graph.GotoIfTrue<BranchIfReferenceEqual>(&done, {lhs, args[0]});
10183 }
10184 sub_graph.set(var_result, GetRootConstant(RootIndex::kFalseValue));
10185 sub_graph.Goto(&done);
10186 sub_graph.Bind(&done);
10187 return sub_graph.get(var_result);
10188}
10189
10191 NodeInfo* info = known_node_aspects().TryGetInfoFor(object);
10192 if (!info || !info->possible_maps_are_known()) {
10193 return {};
10194 }
10195 auto& possible_maps = info->possible_maps();
10196 if (possible_maps.is_empty()) {
10198 }
10199 auto it = possible_maps.begin();
10200 compiler::MapRef map = *it;
10201 if (IsSpecialReceiverInstanceType(map.instance_type())) {
10202 return {};
10203 }
10204 DCHECK(!map.IsPrimitiveMap() && map.IsJSReceiverMap());
10205 compiler::HeapObjectRef proto = map.prototype(broker());
10206 ++it;
10207 for (; it != possible_maps.end(); ++it) {
10208 map = *it;
10209 if (IsSpecialReceiverInstanceType(map.instance_type()) ||
10210 !proto.equals(map.prototype(broker()))) {
10211 return {};
10212 }
10213 DCHECK(!map.IsPrimitiveMap() && map.IsJSReceiverMap());
10214 }
10215 return GetConstant(proto);
10216}
10217
10218MaybeReduceResult MaglevGraphBuilder::TryReduceObjectPrototypeGetProto(
10220 if (args.count() != 0) return {};
10221 return TryReduceGetProto(args.receiver());
10222}
10223
10224MaybeReduceResult MaglevGraphBuilder::TryReduceObjectGetPrototypeOf(
10225 compiler::JSFunctionRef target, CallArguments& args) {
10226 if (args.count() != 1) return {};
10227 return TryReduceGetProto(args[0]);
10228}
10229
10230MaybeReduceResult MaglevGraphBuilder::TryReduceReflectGetPrototypeOf(
10231 compiler::JSFunctionRef target, CallArguments& args) {
10232 return TryReduceObjectGetPrototypeOf(target, args);
10233}
10234
10235MaybeReduceResult MaglevGraphBuilder::TryReduceMathRound(
10236 compiler::JSFunctionRef target, CallArguments& args) {
10238}
10239
10240MaybeReduceResult MaglevGraphBuilder::TryReduceNumberParseInt(
10241 compiler::JSFunctionRef target, CallArguments& args) {
10242 if (args.count() == 0) {
10243 return GetRootConstant(RootIndex::kNanValue);
10244 }
10245 if (args.count() != 1) {
10246 if (RootConstant* root_cst = args[1]->TryCast<RootConstant>()) {
10247 if (root_cst->index() != RootIndex::kUndefinedValue) {
10248 return {};
10249 }
10250 } else if (SmiConstant* smi_cst = args[1]->TryCast<SmiConstant>()) {
10251 if (smi_cst->value().value() != 10 && smi_cst->value().value() != 0) {
10252 return {};
10253 }
10254 } else {
10255 return {};
10256 }
10257 }
10258
10259 ValueNode* arg = args[0];
10260
10261 switch (arg->value_representation()) {
10265 return arg;
10267 switch (CheckTypes(arg, {NodeType::kSmi})) {
10268 case NodeType::kSmi:
10269 return arg;
10270 default:
10271 // TODO(verwaest): Support actually parsing strings, converting
10272 // doubles to ints, ...
10273 return {};
10274 }
10277 return {};
10278 }
10279}
10280
10281MaybeReduceResult MaglevGraphBuilder::TryReduceMathAbs(
10282 compiler::JSFunctionRef target, CallArguments& args) {
10283 if (args.count() == 0) {
10284 return GetRootConstant(RootIndex::kNanValue);
10285 }
10286 ValueNode* arg = args[0];
10287
10288 switch (arg->value_representation()) {
10291 // TODO(388844115): Rename IntPtr to make it clear it's non-negative.
10292 return arg;
10294 if (!CanSpeculateCall()) return {};
10297 switch (CheckTypes(arg, {NodeType::kSmi, NodeType::kNumberOrOddball})) {
10298 case NodeType::kSmi:
10299 if (!CanSpeculateCall()) return {};
10301 case NodeType::kNumberOrOddball:
10303 arg, NodeType::kNumberOrOddball,
10305 // TODO(verwaest): Add support for ToNumberOrNumeric and deopt.
10306 default:
10307 break;
10308 }
10309 break;
10312 return AddNewNode<Float64Abs>({arg});
10313 }
10314 return {};
10315}
10316
10317MaybeReduceResult MaglevGraphBuilder::TryReduceMathFloor(
10318 compiler::JSFunctionRef target, CallArguments& args) {
10320}
10321
10322MaybeReduceResult MaglevGraphBuilder::TryReduceMathCeil(
10323 compiler::JSFunctionRef target, CallArguments& args) {
10325}
10326
10329 if (args.count() == 0) {
10330 return GetRootConstant(RootIndex::kNanValue);
10331 }
10332 ValueNode* arg = args[0];
10333 auto arg_repr = arg->value_representation();
10334 if (arg_repr == ValueRepresentation::kInt32 ||
10335 arg_repr == ValueRepresentation::kUint32 ||
10336 arg_repr == ValueRepresentation::kIntPtr) {
10337 return arg;
10338 }
10339 if (CheckType(arg, NodeType::kSmi)) return arg;
10340 if (!IsSupported(CpuOperation::kFloat64Round)) {
10341 return {};
10342 }
10343 if (arg_repr == ValueRepresentation::kFloat64 ||
10345 return AddNewNode<Float64Round>({arg}, kind);
10346 }
10348 if (CheckType(arg, NodeType::kNumberOrOddball)) {
10351 arg, NodeType::kNumberOrOddball,
10353 kind);
10354 }
10355 if (!CanSpeculateCall()) return {};
10356 DeoptFrameScope continuation_scope(this, Float64Round::continuation(kind));
10357 ToNumberOrNumeric* conversion =
10361 return AddNewNode<Float64Round>({float64_value}, kind);
10362}
10363
10364MaybeReduceResult MaglevGraphBuilder::TryReduceArrayConstructor(
10367}
10368
10369MaybeReduceResult MaglevGraphBuilder::TryReduceStringConstructor(
10370 compiler::JSFunctionRef target, CallArguments& args) {
10371 if (args.count() == 0) {
10372 return GetRootConstant(RootIndex::kempty_string);
10373 }
10374
10376}
10377
10378MaybeReduceResult MaglevGraphBuilder::TryReduceMathPow(
10379 compiler::JSFunctionRef target, CallArguments& args) {
10380 if (args.count() < 2) {
10381 // For < 2 args, we'll be calculating Math.Pow(arg[0], undefined), which is
10382 // ToNumber(arg[0]) ** NaN == NaN. So we can just return NaN.
10383 // However, if there is a single argument and it's tagged, we have to call
10384 // ToNumber on it before returning NaN, for side effects. This call could
10385 // lazy deopt, which would mean we'd need a continuation to actually set
10386 // the NaN return value... it's easier to just bail out, this should be
10387 // an uncommon case anyway.
10388 if (args.count() == 1 && args[0]->properties().is_tagged()) {
10389 return {};
10390 }
10391 return GetRootConstant(RootIndex::kNanValue);
10392 }
10393 if (!CanSpeculateCall()) return {};
10394 // If both arguments are tagged, it is cheaper to call Math.Pow builtin,
10395 // instead of Float64Exponentiate, since we are still making a call and we
10396 // don't need to unbox both inputs. See https://crbug.com/1393643.
10397 if (args[0]->properties().is_tagged() && args[1]->properties().is_tagged()) {
10398 // The Math.pow call will be created in CallKnownJSFunction reduction.
10399 return {};
10400 }
10401 ValueNode* left = GetHoleyFloat64ForToNumber(
10402 args[0], NodeType::kNumber, TaggedToFloat64ConversionType::kOnlyNumber);
10403 ValueNode* right = GetHoleyFloat64ForToNumber(
10404 args[1], NodeType::kNumber, TaggedToFloat64ConversionType::kOnlyNumber);
10405 return AddNewNode<Float64Exponentiate>({left, right});
10406}
10407
10408#define MATH_UNARY_IEEE_BUILTIN_REDUCER(MathName, ExtName, EnumName) \
10409 MaybeReduceResult MaglevGraphBuilder::TryReduce##MathName( \
10410 compiler::JSFunctionRef target, CallArguments& args) { \
10411 if (args.count() < 1) { \
10412 return GetRootConstant(RootIndex::kNanValue); \
10413 } \
10414 if (!CanSpeculateCall()) { \
10415 ValueRepresentation rep = args[0]->properties().value_representation(); \
10416 if (rep == ValueRepresentation::kTagged || \
10417 rep == ValueRepresentation::kHoleyFloat64) { \
10418 return {}; \
10419 } \
10420 } \
10421 ValueNode* value = \
10422 GetFloat64ForToNumber(args[0], NodeType::kNumber, \
10423 TaggedToFloat64ConversionType::kOnlyNumber); \
10424 return AddNewNode<Float64Ieee754Unary>( \
10425 {value}, Float64Ieee754Unary::Ieee754Function::k##EnumName); \
10426 }
10427
10429#undef MATH_UNARY_IEEE_BUILTIN_REDUCER
10430
10433 CallArguments& args, const compiler::FeedbackSource& feedback_source) {
10434 if (args.mode() != CallArguments::kDefault) {
10435 // TODO(victorgomes): Maybe inline the spread stub? Or call known function
10436 // directly if arguments list is an array.
10437 return {};
10438 }
10439 SaveCallSpeculationScope speculate(this, feedback_source);
10440 if (!shared.HasBuiltinId()) return {};
10441 if (v8_flags.trace_maglev_graph_building) {
10442 std::cout << " ! Trying to reduce builtin "
10443 << Builtins::name(shared.builtin_id()) << std::endl;
10444 }
10445 switch (shared.builtin_id()) {
10446#define CASE(Name, ...) \
10447 case Builtin::k##Name: \
10448 return TryReduce##Name(target, args);
10450#undef CASE
10451 default:
10452 // TODO(v8:7700): Inline more builtins.
10453 return {};
10454 }
10455}
10456
10459 if (shared.native() || shared.language_mode() == LanguageMode::kStrict) {
10460 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10461 return GetRootConstant(RootIndex::kUndefinedValue);
10462 } else {
10463 return args.receiver();
10464 }
10465 }
10466 if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10467 return GetConstant(
10468 broker()->target_native_context().global_proxy_object(broker()));
10469 }
10470 ValueNode* receiver = args.receiver();
10471 if (CheckType(receiver, NodeType::kJSReceiver)) return receiver;
10472 if (compiler::OptionalHeapObjectRef maybe_constant =
10474 compiler::HeapObjectRef constant = maybe_constant.value();
10475 if (constant.IsNullOrUndefined()) {
10476 return GetConstant(
10477 broker()->target_native_context().global_proxy_object(broker()));
10478 }
10479 }
10481 {receiver}, broker()->target_native_context(), args.receiver_mode());
10482}
10483
10486 // TODO(victorgomes): Investigate if we can avoid this copy.
10487 int arg_count = static_cast<int>(args.count());
10488 auto arguments = zone()->AllocateVector<ValueNode*>(arg_count + 1);
10489 arguments[0] = GetConvertReceiver(shared, args);
10490 for (int i = 0; i < arg_count; i++) {
10491 arguments[i + 1] = args[i];
10492 }
10493 return arguments;
10494}
10495
10496template <typename CallNode, typename... Args>
10498 Args&&... extra_args) {
10499 size_t input_count = args.count_with_receiver() + CallNode::kFixedInputCount;
10500 return AddNewNode<CallNode>(
10501 input_count,
10502 [&](CallNode* call) {
10503 int arg_index = 0;
10504 call->set_arg(arg_index++,
10506 for (size_t i = 0; i < args.count(); ++i) {
10507 call->set_arg(arg_index++, GetTaggedValue(args[i]));
10508 }
10509 },
10510 std::forward<Args>(extra_args)...);
10511}
10512
10514 Call::TargetType target_type,
10515 const CallArguments& args) {
10516 // TODO(victorgomes): We do not collect call feedback from optimized/inlined
10517 // calls. In order to be consistent, we don't pass the feedback_source to the
10518 // IR, so that we avoid collecting for generic calls as well. We might want to
10519 // revisit this in the future.
10520 switch (args.mode()) {
10522 return AddNewCallNode<Call>(args, args.receiver_mode(), target_type,
10523 GetTaggedValue(target),
10526 DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
10530 DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
10531 // We don't use AddNewCallNode here, because the number of required
10532 // arguments is known statically.
10534 {target, GetValueOrUndefined(args.receiver()), args[0],
10535 GetContext()});
10536 }
10537}
10538
10540 ValueNode* context, ValueNode* function, ValueNode* new_target,
10543 size_t input_count = args.count() + CallSelf::kFixedInputCount;
10545 DCHECK_EQ(
10547 shared.internal_formal_parameter_count_with_receiver());
10548 return AddNewNode<CallSelf>(
10549 input_count,
10550 [&](CallSelf* call) {
10551 for (int i = 0; i < static_cast<int>(args.count()); i++) {
10552 call->set_arg(i, GetTaggedValue(args[i]));
10553 }
10554 },
10555 compilation_unit_->info()->toplevel_compilation_unit()->parameter_count(),
10556 GetTaggedValue(function), GetTaggedValue(context),
10558}
10559
10561 compiler::JSFunctionRef target) {
10563 return target.object().equals(
10565 }
10566 return target.object()->shared() ==
10568}
10569
10572 compiler::OptionalSharedFunctionInfoRef maybe_shared, CallArguments& args) {
10573 if (args.mode() != CallArguments::kDefault) {
10574 // TODO(victorgomes): Maybe inline the spread stub? Or call known function
10575 // directly if arguments list is an array.
10576 return {};
10577 }
10578 // Check if the function has an associated C++ code to execute.
10579 compiler::OptionalObjectRef maybe_callback_data =
10580 api_callback.callback_data(broker());
10581 if (!maybe_callback_data.has_value()) {
10582 // TODO(ishell): consider generating "return undefined" for empty function
10583 // instead of failing.
10584 return {};
10585 }
10586
10587 size_t input_count = args.count() + CallKnownApiFunction::kFixedInputCount;
10589 if (maybe_shared.has_value()) {
10590 receiver = GetConvertReceiver(maybe_shared.value(), args);
10591 } else {
10592 receiver = args.receiver();
10594 }
10595
10598 ? (v8_flags.maglev_inline_api_calls
10602
10604 input_count,
10605 [&](CallKnownApiFunction* call) {
10606 for (int i = 0; i < static_cast<int>(args.count()); i++) {
10607 call->set_arg(i, GetTaggedValue(args[i]));
10608 }
10609 },
10610 mode, api_callback, GetTaggedValue(GetContext()),
10612}
10613
10617 compiler::OptionalFunctionTemplateInfoRef maybe_function_template_info =
10618 shared.function_template_info(broker());
10619 if (!maybe_function_template_info.has_value()) {
10620 // Not an Api function.
10621 return {};
10622 }
10623
10624 // See if we can optimize this API call.
10625 compiler::FunctionTemplateInfoRef function_template_info =
10626 maybe_function_template_info.value();
10627
10629 if (function_template_info.accept_any_receiver() &&
10630 function_template_info.is_signature_undefined(broker())) {
10631 // We might be able to optimize the API call depending on the
10632 // {function_template_info}.
10633 // If the API function accepts any kind of {receiver}, we only need to
10634 // ensure that the {receiver} is actually a JSReceiver at this point,
10635 // and also pass that as the {holder}. There are two independent bits
10636 // here:
10637 //
10638 // a. When the "accept any receiver" bit is set, it means we don't
10639 // need to perform access checks, even if the {receiver}'s map
10640 // has the "needs access check" bit set.
10641 // b. When the {function_template_info} has no signature, we don't
10642 // need to do the compatible receiver check, since all receivers
10643 // are considered compatible at that point, and the {receiver}
10644 // will be pass as the {holder}.
10645
10646 api_holder =
10648 } else {
10649 // Try to infer API holder from the known aspects of the {receiver}.
10650 api_holder =
10651 TryInferApiHolderValue(function_template_info, args.receiver());
10652 }
10653
10654 switch (api_holder.lookup) {
10657 return TryReduceCallForApiFunction(function_template_info, shared, args);
10658
10660 break;
10661 }
10662
10663 // We don't have enough information to eliminate the access check
10664 // and/or the compatible receiver check, so use the generic builtin
10665 // that does those checks dynamically. This is still significantly
10666 // faster than the generic call sequence.
10667 Builtin builtin_name;
10668 // TODO(ishell): create no-profiling versions of kCallFunctionTemplate
10669 // builtins and use them here based on DependOnNoProfilingProtector()
10670 // dependency state.
10671 if (function_template_info.accept_any_receiver()) {
10672 DCHECK(!function_template_info.is_signature_undefined(broker()));
10673 builtin_name = Builtin::kCallFunctionTemplate_CheckCompatibleReceiver;
10674 } else if (function_template_info.is_signature_undefined(broker())) {
10675 builtin_name = Builtin::kCallFunctionTemplate_CheckAccess;
10676 } else {
10677 builtin_name =
10678 Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver;
10679 }
10680
10681 // The CallFunctionTemplate builtin requires the {receiver} to be
10682 // an actual JSReceiver, so make sure we do the proper conversion
10683 // first if necessary.
10685 int kContext = 1;
10686 int kFunctionTemplateInfo = 1;
10687 int kArgc = 1;
10689 kFunctionTemplateInfo + kArgc + kContext + args.count_with_receiver(),
10690 [&](CallBuiltin* call_builtin) {
10691 int arg_index = 0;
10692 call_builtin->set_arg(arg_index++, GetConstant(function_template_info));
10693 call_builtin->set_arg(
10694 arg_index++,
10695 GetInt32Constant(JSParameterCount(static_cast<int>(args.count()))));
10696
10697 call_builtin->set_arg(arg_index++, GetTaggedValue(receiver));
10698 for (int i = 0; i < static_cast<int>(args.count()); i++) {
10699 call_builtin->set_arg(arg_index++, GetTaggedValue(args[i]));
10700 }
10701 },
10702 builtin_name, GetTaggedValue(GetContext()));
10703}
10704
10707 CallArguments& args, const compiler::FeedbackSource& feedback_source) {
10708 // Don't inline CallFunction stub across native contexts.
10709 if (function.native_context(broker()) != broker()->target_native_context()) {
10710 return {};
10711 }
10712 compiler::SharedFunctionInfoRef shared = function.shared(broker());
10714
10715 ValueNode* closure = GetConstant(function);
10716 compiler::ContextRef context = function.context(broker());
10717 ValueNode* context_node = GetConstant(context);
10719 if (MaglevIsTopTier() && TargetIsCurrentCompilingUnit(function) &&
10720 !graph_->is_osr()) {
10721 DCHECK(!shared.HasBuiltinId());
10722 res = BuildCallSelf(context_node, closure, new_target, shared, args);
10723 } else {
10725 context_node, closure, new_target,
10726#ifdef V8_ENABLE_LEAPTIERING
10727 function.dispatch_handle(),
10728#endif
10729 shared, function.raw_feedback_cell(broker()), args, feedback_source);
10730 }
10731 return res;
10732}
10733
10735 ValueNode* context, ValueNode* function, ValueNode* new_target,
10736#ifdef V8_ENABLE_LEAPTIERING
10737 JSDispatchHandle dispatch_handle,
10738#endif
10741 const compiler::FeedbackSource& feedback_source) {
10743 size_t input_count = args.count() + CallKnownJSFunction::kFixedInputCount;
10745 input_count,
10746 [&](CallKnownJSFunction* call) {
10747 for (int i = 0; i < static_cast<int>(args.count()); i++) {
10748 call->set_arg(i, GetTaggedValue(args[i]));
10749 }
10750 },
10751#ifdef V8_ENABLE_LEAPTIERING
10752 dispatch_handle,
10753#endif
10754 shared, GetTaggedValue(function), GetTaggedValue(context),
10756}
10757
10759 ValueNode* context, ValueNode* function, ValueNode* new_target,
10760#ifdef V8_ENABLE_LEAPTIERING
10761 JSDispatchHandle dispatch_handle,
10762#endif
10764 base::Vector<ValueNode*> arguments) {
10765 DCHECK_GT(arguments.size(), 0);
10766 constexpr int kSkipReceiver = 1;
10767 int argcount_without_receiver =
10768 static_cast<int>(arguments.size()) - kSkipReceiver;
10769 size_t input_count =
10770 argcount_without_receiver + CallKnownJSFunction::kFixedInputCount;
10772 input_count,
10773 [&](CallKnownJSFunction* call) {
10774 for (int i = 0; i < argcount_without_receiver; i++) {
10775 call->set_arg(i, GetTaggedValue(arguments[i + kSkipReceiver]));
10776 }
10777 },
10778#ifdef V8_ENABLE_LEAPTIERING
10779 dispatch_handle,
10780#endif
10781 shared, GetTaggedValue(function), GetTaggedValue(context),
10782 GetTaggedValue(arguments[0]), GetTaggedValue(new_target));
10783}
10784
10786 ValueNode* context, ValueNode* function, ValueNode* new_target,
10787#ifdef V8_ENABLE_LEAPTIERING
10788 JSDispatchHandle dispatch_handle,
10789#endif
10792 const compiler::FeedbackSource& feedback_source) {
10793 if (v8_flags.maglev_inlining) {
10794 RETURN_IF_DONE(TryBuildInlineCall(context, function, new_target,
10795#ifdef V8_ENABLE_LEAPTIERING
10796 dispatch_handle,
10797#endif
10798 shared, feedback_cell, args,
10799 feedback_source));
10800 }
10801 return BuildCallKnownJSFunction(context, function, new_target,
10802#ifdef V8_ENABLE_LEAPTIERING
10803 dispatch_handle,
10804#endif
10805 shared, feedback_cell, args, feedback_source);
10806}
10807
10810 DCHECK(!ref.IsSmi());
10811 DCHECK(!ref.IsHeapNumber());
10812
10813 if (!IsInstanceOfNodeType(ref.map(broker()), GetType(node), broker())) {
10814 return EmitUnconditionalDeopt(reason);
10815 }
10816 if (compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node)) {
10817 if (maybe_constant.value().equals(ref)) {
10818 return ReduceResult::Done();
10819 }
10820 return EmitUnconditionalDeopt(reason);
10821 }
10822 AddNewNode<CheckValue>({node}, ref, reason);
10823 SetKnownValue(node, ref, StaticTypeForConstant(broker(), ref));
10824
10825 return ReduceResult::Done();
10826}
10827
10829 ValueNode* node, compiler::ObjectRef ref, DeoptimizeReason reason) {
10830 if (ref.IsHeapObject() && !ref.IsHeapNumber()) {
10831 return BuildCheckValueByReference(node, ref.AsHeapObject(), reason);
10832 }
10833 return BuildCheckNumericalValue(node, ref, reason);
10834}
10835
10838 if (!IsConstantNode(node->opcode()) && ref.IsInternalizedString()) {
10839 if (!IsInstanceOfNodeType(ref.map(broker()), GetType(node), broker())) {
10840 return EmitUnconditionalDeopt(reason);
10841 }
10842 AddNewNode<CheckValueEqualsString>({node}, ref.AsInternalizedString(),
10843 reason);
10844 SetKnownValue(node, ref, NodeType::kString);
10845 return ReduceResult::Done();
10846 }
10847 return BuildCheckValueByReference(node, ref, reason);
10848}
10849
10851 ValueNode* node, compiler::ObjectRef ref, DeoptimizeReason reason) {
10852 DCHECK(ref.IsSmi() || ref.IsHeapNumber());
10853 if (ref.IsSmi()) {
10854 int ref_value = ref.AsSmi();
10855 if (IsConstantNode(node->opcode())) {
10856 if (node->Is<SmiConstant>() &&
10857 node->Cast<SmiConstant>()->value().value() == ref_value) {
10858 return ReduceResult::Done();
10859 }
10860 if (node->Is<Int32Constant>() &&
10861 node->Cast<Int32Constant>()->value() == ref_value) {
10862 return ReduceResult::Done();
10863 }
10864 return EmitUnconditionalDeopt(reason);
10865 }
10866 if (NodeTypeIs(GetType(node), NodeType::kAnyHeapObject)) {
10867 return EmitUnconditionalDeopt(reason);
10868 }
10869 AddNewNode<CheckValueEqualsInt32>({node}, ref_value, reason);
10870 } else {
10871 DCHECK(ref.IsHeapNumber());
10872 Float64 ref_value = Float64::FromBits(ref.AsHeapNumber().value_as_bits());
10873 DCHECK(!ref_value.is_hole_nan());
10874 if (node->Is<Float64Constant>()) {
10875 Float64 f64 = node->Cast<Float64Constant>()->value();
10876 DCHECK(!f64.is_hole_nan());
10877 if (f64 == ref_value) {
10878 return ReduceResult::Done();
10879 }
10880 return EmitUnconditionalDeopt(reason);
10881 } else if (compiler::OptionalHeapObjectRef constant =
10882 TryGetConstant(node)) {
10883 if (constant.value().IsHeapNumber()) {
10884 Float64 f64 =
10885 Float64::FromBits(constant.value().AsHeapNumber().value_as_bits());
10886 DCHECK(!f64.is_hole_nan());
10887 if (f64 == ref_value) {
10888 return ReduceResult::Done();
10889 }
10890 }
10891 return EmitUnconditionalDeopt(reason);
10892 }
10893 if (!NodeTypeIs(NodeType::kNumber, GetType(node))) {
10894 return EmitUnconditionalDeopt(reason);
10895 }
10896 AddNewNode<CheckFloat64SameValue>({node}, ref_value, reason);
10897 }
10898
10899 SetKnownValue(node, ref, NodeType::kNumber);
10900 return ReduceResult::Done();
10901}
10902
10904 if (!node->is_tagged()) return node;
10905 compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
10906 if (maybe_constant) {
10907 return maybe_constant.value().IsTheHole()
10908 ? GetRootConstant(RootIndex::kUndefinedValue)
10909 : node;
10910 }
10912}
10913
10915 if (!node->is_tagged()) return ReduceResult::Done();
10916 compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
10917 if (maybe_constant) {
10918 if (maybe_constant.value().IsTheHole()) {
10919 return EmitUnconditionalDeopt(DeoptimizeReason::kHole);
10920 }
10921 return ReduceResult::Done();
10922 }
10924 return ReduceResult::Done();
10925}
10926
10929 const compiler::FeedbackSource& feedback_source) {
10930 if (args.mode() != CallArguments::kDefault) {
10931 // TODO(victorgomes): Maybe inline the spread stub? Or call known function
10932 // directly if arguments list is an array.
10933 return {};
10934 }
10935 compiler::SharedFunctionInfoRef shared = target.shared(broker());
10936 ValueNode* target_node = GetConstant(target);
10937 // Do not reduce calls to functions with break points.
10938 if (!shared.HasBreakInfo(broker())) {
10939 if (IsClassConstructor(shared.kind())) {
10940 // If we have a class constructor, we should raise an exception.
10941 return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
10942 {target_node});
10943 }
10944 DCHECK(IsCallable(*target.object()));
10945 RETURN_IF_DONE(TryReduceBuiltin(target, shared, args, feedback_source));
10947 target, GetRootConstant(RootIndex::kUndefinedValue), args,
10948 feedback_source));
10949 }
10951}
10952
10954 compiler::FunctionTemplateInfoRef function_template_info,
10956 const compiler::HolderLookupResult not_found;
10957
10958 auto receiver_info = known_node_aspects().TryGetInfoFor(receiver);
10959 if (!receiver_info || !receiver_info->possible_maps_are_known()) {
10960 // No info about receiver, can't infer API holder.
10961 return not_found;
10962 }
10963 DCHECK(!receiver_info->possible_maps().is_empty());
10964 compiler::MapRef first_receiver_map = receiver_info->possible_maps()[0];
10965
10966 // See if we can constant-fold the compatible receiver checks.
10967 compiler::HolderLookupResult api_holder =
10968 function_template_info.LookupHolderOfExpectedType(broker(),
10969 first_receiver_map);
10970 if (api_holder.lookup == CallOptimization::kHolderNotFound) {
10971 // Can't infer API holder.
10972 return not_found;
10973 }
10974
10975 // Check that all {receiver_maps} are actually JSReceiver maps and
10976 // that the {function_template_info} accepts them without access
10977 // checks (even if "access check needed" is set for {receiver}).
10978 //
10979 // API holder might be a receivers's hidden prototype (i.e. the receiver is
10980 // a global proxy), so in this case the map check or stability dependency on
10981 // the receiver guard us from detaching a global object from global proxy.
10982 CHECK(first_receiver_map.IsJSReceiverMap());
10983 CHECK(!first_receiver_map.is_access_check_needed() ||
10984 function_template_info.accept_any_receiver());
10985
10986 for (compiler::MapRef receiver_map : receiver_info->possible_maps()) {
10988 function_template_info.LookupHolderOfExpectedType(broker(),
10989 receiver_map);
10990
10991 if (api_holder.lookup != holder_i.lookup) {
10992 // Different API holders, dynamic lookup is required.
10993 return not_found;
10994 }
10997 if (holder_i.lookup == CallOptimization::kHolderFound) {
10998 DCHECK(api_holder.holder.has_value() && holder_i.holder.has_value());
10999 if (!api_holder.holder->equals(*holder_i.holder)) {
11000 // Different API holders, dynamic lookup is required.
11001 return not_found;
11002 }
11003 }
11004
11005 CHECK(receiver_map.IsJSReceiverMap());
11006 CHECK(!receiver_map.is_access_check_needed() ||
11007 function_template_info.accept_any_receiver());
11008 }
11009 return api_holder;
11010}
11011
11013 ValueNode* target_node, compiler::JSFunctionRef target, CallArguments& args,
11014 const compiler::FeedbackSource& feedback_source) {
11016 target_node, target, DeoptimizeReason::kWrongCallTarget));
11017 return TryReduceCallForConstant(target, args, feedback_source);
11018}
11019
11021 ValueNode* target_node, ValueNode* target_context,
11022#ifdef V8_ENABLE_LEAPTIERING
11023 JSDispatchHandle dispatch_handle,
11024#endif
11027 const compiler::FeedbackSource& feedback_source) {
11028 // Do not reduce calls to functions with break points.
11029 if (args.mode() != CallArguments::kDefault) {
11030 // TODO(victorgomes): Maybe inline the spread stub? Or call known function
11031 // directly if arguments list is an array.
11032 return {};
11033 }
11034 if (!shared.HasBreakInfo(broker())) {
11035 if (IsClassConstructor(shared.kind())) {
11036 // If we have a class constructor, we should raise an exception.
11037 return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
11038 {target_node});
11039 }
11041 target_context, target_node,
11042 GetRootConstant(RootIndex::kUndefinedValue),
11043#ifdef V8_ENABLE_LEAPTIERING
11044 dispatch_handle,
11045#endif
11046 shared, feedback_cell, args, feedback_source));
11047 }
11049}
11050
11053 compiler::OptionalHeapObjectRef maybe_receiver, CallArguments& args,
11054 const compiler::FeedbackSource& feedback_source) {
11055 if (args.mode() != CallArguments::kDefault) return {};
11056
11057 ValueNode* function = GetValueOrUndefined(args.receiver());
11058 if (maybe_receiver.has_value()) {
11060 function, maybe_receiver.value(), DeoptimizeReason::kWrongCallTarget));
11061 function = GetConstant(maybe_receiver.value());
11062 }
11063
11064 SaveCallSpeculationScope saved(this);
11065 if (args.count() == 0) {
11067 return ReduceCall(function, empty_args, feedback_source);
11068 }
11069 auto build_call_only_with_new_receiver = [&] {
11071 return ReduceCall(function, new_args, feedback_source);
11072 };
11073 if (args.count() == 1 || IsNullValue(args[1]) || IsUndefinedValue(args[1])) {
11074 return build_call_only_with_new_receiver();
11075 }
11076 auto build_call_with_array_like = [&] {
11079 return ReduceCallWithArrayLike(function, new_args, feedback_source);
11080 };
11081 if (!MayBeNullOrUndefined(args[1])) {
11082 return build_call_with_array_like();
11083 }
11084 return SelectReduction(
11085 [&](auto& builder) {
11086 return BuildBranchIfUndefinedOrNull(builder, args[1]);
11087 },
11088 build_call_only_with_new_receiver, build_call_with_array_like);
11089}
11090
11092 ValueNode* target_node, CallArguments& args,
11093 const compiler::FeedbackSource& feedback_source) {
11094 const compiler::ProcessedFeedback& processed_feedback =
11095 broker()->GetFeedbackForCall(feedback_source);
11096 if (processed_feedback.IsInsufficient()) {
11098 DeoptimizeReason::kInsufficientTypeFeedbackForCall);
11099 }
11100
11101 DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
11102 const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
11103
11104 if (call_feedback.target().has_value() &&
11105 call_feedback.target()->IsJSFunction()) {
11106 CallFeedbackContent content = call_feedback.call_feedback_content();
11107 compiler::JSFunctionRef feedback_target =
11108 call_feedback.target()->AsJSFunction();
11109 if (content == CallFeedbackContent::kReceiver) {
11112 compiler::JSFunctionRef apply_function =
11113 native_context.function_prototype_apply(broker());
11115 target_node, apply_function, DeoptimizeReason::kWrongCallTarget));
11118 feedback_source),
11120 feedback_target = apply_function;
11121 } else {
11123 }
11125 target_node, feedback_target, DeoptimizeReason::kWrongCallTarget));
11126 }
11127
11128 PROCESS_AND_RETURN_IF_DONE(ReduceCall(target_node, args, feedback_source),
11130 UNREACHABLE();
11131}
11132
11134 ValueNode* target_node, CallArguments& args,
11135 VirtualObject* arguments_object,
11136 const compiler::FeedbackSource& feedback_source) {
11138 DCHECK(arguments_object->map().IsJSArgumentsObjectMap() ||
11139 arguments_object->map().IsJSArrayMap());
11140 args.PopArrayLikeArgument();
11141 ValueNode* elements_value =
11142 arguments_object->get(JSArgumentsObject::kElementsOffset);
11143 if (elements_value->Is<ArgumentsElements>()) {
11145 // TODO(victorgomes): Add JSFunction node type in KNA and use the info here.
11146 if (compiler::OptionalHeapObjectRef maybe_constant =
11147 TryGetConstant(target_node)) {
11148 if (maybe_constant->IsJSFunction()) {
11150 maybe_constant->AsJSFunction().shared(broker());
11151 if (!IsClassConstructor(shared.kind())) {
11152 target_type = Call::TargetType::kJSFunction;
11153 }
11154 }
11155 }
11156 int start_index = 0;
11157 if (elements_value->Cast<ArgumentsElements>()->type() ==
11159 start_index =
11160 elements_value->Cast<ArgumentsElements>()->formal_parameter_count();
11161 }
11164 start_index, target_type);
11165 }
11166
11167 if (elements_value->Is<RootConstant>()) {
11168 // It is a RootConstant, Elements can only be the empty fixed array.
11169 DCHECK_EQ(elements_value->Cast<RootConstant>()->index(),
11170 RootIndex::kEmptyFixedArray);
11171 CallArguments new_args(ConvertReceiverMode::kAny, {args.receiver()});
11172 return ReduceCall(target_node, new_args, feedback_source);
11173 }
11174
11175 if (Constant* constant_value = elements_value->TryCast<Constant>()) {
11176 DCHECK(constant_value->object().IsFixedArray());
11177 compiler::FixedArrayRef elements = constant_value->object().AsFixedArray();
11179 DCHECK_NOT_NULL(args.receiver());
11180 arg_list.push_back(args.receiver());
11181 for (int i = 0; i < static_cast<int>(args.count()); i++) {
11182 arg_list.push_back(args[i]);
11183 }
11184 for (uint32_t i = 0; i < elements.length(); i++) {
11185 arg_list.push_back(GetConstant(*elements.TryGet(broker(), i)));
11186 }
11187 CallArguments new_args(ConvertReceiverMode::kAny, std::move(arg_list));
11188 return ReduceCall(target_node, new_args, feedback_source);
11189 }
11190
11191 DCHECK(elements_value->Is<InlinedAllocation>());
11192 InlinedAllocation* allocation = elements_value->Cast<InlinedAllocation>();
11193 VirtualObject* elements = allocation->object();
11194
11196 DCHECK_NOT_NULL(args.receiver());
11197 arg_list.push_back(args.receiver());
11198 for (int i = 0; i < static_cast<int>(args.count()); i++) {
11199 arg_list.push_back(args[i]);
11200 }
11201 DCHECK(elements->get(offsetof(FixedArray, length_))->Is<Int32Constant>());
11202 int length = elements->get(offsetof(FixedArray, length_))
11203 ->Cast<Int32Constant>()
11204 ->value();
11205 for (int i = 0; i < length; i++) {
11206 arg_list.push_back(elements->get(FixedArray::OffsetOfElementAt(i)));
11207 }
11208 CallArguments new_args(ConvertReceiverMode::kAny, std::move(arg_list));
11209 return ReduceCall(target_node, new_args, feedback_source);
11210}
11211
11212namespace {
11213bool IsSloppyMappedArgumentsObject(compiler::JSHeapBroker* broker,
11214 compiler::MapRef map) {
11215 return broker->target_native_context()
11216 .fast_aliased_arguments_map(broker)
11217 .equals(map);
11218}
11219} // namespace
11220
11221std::optional<VirtualObject*>
11223 if (!value->Is<InlinedAllocation>()) return {};
11224 InlinedAllocation* alloc = value->Cast<InlinedAllocation>();
11225 // Although the arguments object has not been changed so far, since it is not
11226 // escaping, it could be modified after this bytecode if it is inside a loop.
11227 if (IsInsideLoop()) {
11228 if (!is_loop_effect_tracking() ||
11229 !loop_effects_->allocations.contains(alloc)) {
11230 return {};
11231 }
11232 }
11233 // TODO(victorgomes): We can probably loosen the IsNotEscaping requirement if
11234 // we keep track of the arguments object changes so far.
11235 if (alloc->IsEscaping()) return {};
11236 VirtualObject* object = alloc->object();
11237 if (!object->has_static_map()) return {};
11238 // TODO(victorgomes): Support simple JSArray forwarding.
11239 compiler::MapRef map = object->map();
11240 // It is a rest parameter, if it is an array with ArgumentsElements node as
11241 // the elements array.
11242 if (map.IsJSArrayMap() && object->get(JSArgumentsObject::kElementsOffset)
11243 ->Is<ArgumentsElements>()) {
11244 return object;
11245 }
11246 // TODO(victorgomes): We can loosen the IsSloppyMappedArgumentsObject
11247 // requirement if there is no stores to the mapped arguments.
11248 if (map.IsJSArgumentsObjectMap() &&
11249 !IsSloppyMappedArgumentsObject(broker(), map)) {
11250 return object;
11251 }
11252 return {};
11253}
11254
11256 ValueNode* target_node, CallArguments& args,
11257 const compiler::FeedbackSource& feedback_source) {
11259
11260 // TODO(victorgomes): Add the case for JSArrays and Rest parameter.
11261 if (std::optional<VirtualObject*> arguments_object =
11262 TryGetNonEscapingArgumentsObject(args.array_like_argument())) {
11264 target_node, args, *arguments_object, feedback_source));
11265 }
11266
11267 // On fallthrough, create a generic call.
11268 return BuildGenericCall(target_node, Call::TargetType::kAny, args);
11269}
11270
11272 ValueNode* target_node, CallArguments& args,
11273 const compiler::FeedbackSource& feedback_source) {
11274 if (compiler::OptionalHeapObjectRef maybe_constant =
11275 TryGetConstant(target_node)) {
11276 if (maybe_constant->IsJSFunction()) {
11278 target_node, maybe_constant->AsJSFunction(), args, feedback_source);
11280 }
11281 }
11282
11283 // If the implementation here becomes more complex, we could probably
11284 // deduplicate the code for FastCreateClosure and CreateClosure by using
11285 // templates or giving them a shared base class.
11286 if (FastCreateClosure* fast_create_closure =
11287 target_node->TryCast<FastCreateClosure>()) {
11289 fast_create_closure, fast_create_closure->context().node(),
11290#ifdef V8_ENABLE_LEAPTIERING
11291 fast_create_closure->feedback_cell().dispatch_handle(),
11292#endif
11293 fast_create_closure->shared_function_info(),
11294 fast_create_closure->feedback_cell(), args, feedback_source);
11296 } else if (CreateClosure* create_closure =
11297 target_node->TryCast<CreateClosure>()) {
11299 create_closure, create_closure->context().node(),
11300#ifdef V8_ENABLE_LEAPTIERING
11301 create_closure->feedback_cell().dispatch_handle(),
11302#endif
11303 create_closure->shared_function_info(), create_closure->feedback_cell(),
11304 args, feedback_source);
11306 }
11307
11308 // On fallthrough, create a generic call.
11309 return BuildGenericCall(target_node, Call::TargetType::kAny, args);
11310}
11311
11313 ConvertReceiverMode receiver_mode) {
11314 ValueNode* target = LoadRegister(0);
11316 FeedbackSlot slot = GetSlotOperand(3);
11317 compiler::FeedbackSource feedback_source(feedback(), slot);
11318 CallArguments args(receiver_mode, reg_list, current_interpreter_frame_);
11319 return BuildCallWithFeedback(target, args, feedback_source);
11320}
11321
11323 int arg_count, ConvertReceiverMode receiver_mode) {
11324 ValueNode* target = LoadRegister(0);
11325 const int receiver_count =
11326 (receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1;
11327 const int reg_count = arg_count + receiver_count;
11328 FeedbackSlot slot = GetSlotOperand(reg_count + 1);
11329 compiler::FeedbackSource feedback_source(feedback(), slot);
11330 switch (reg_count) {
11331 case 0: {
11333 CallArguments args(receiver_mode);
11334 return BuildCallWithFeedback(target, args, feedback_source);
11335 }
11336 case 1: {
11337 CallArguments args(receiver_mode, {LoadRegister(1)});
11338 return BuildCallWithFeedback(target, args, feedback_source);
11339 }
11340 case 2: {
11341 CallArguments args(receiver_mode, {LoadRegister(1), LoadRegister(2)});
11342 return BuildCallWithFeedback(target, args, feedback_source);
11343 }
11344 case 3: {
11345 CallArguments args(receiver_mode,
11347 return BuildCallWithFeedback(target, args, feedback_source);
11348 }
11349 default:
11350 UNREACHABLE();
11351 }
11352}
11353
11354ReduceResult MaglevGraphBuilder::VisitCallAnyReceiver() {
11356}
11357ReduceResult MaglevGraphBuilder::VisitCallProperty() {
11359}
11360ReduceResult MaglevGraphBuilder::VisitCallProperty0() {
11362}
11363ReduceResult MaglevGraphBuilder::VisitCallProperty1() {
11365}
11366ReduceResult MaglevGraphBuilder::VisitCallProperty2() {
11368}
11369ReduceResult MaglevGraphBuilder::VisitCallUndefinedReceiver() {
11371}
11372ReduceResult MaglevGraphBuilder::VisitCallUndefinedReceiver0() {
11374}
11375ReduceResult MaglevGraphBuilder::VisitCallUndefinedReceiver1() {
11377}
11378ReduceResult MaglevGraphBuilder::VisitCallUndefinedReceiver2() {
11380}
11381
11382ReduceResult MaglevGraphBuilder::VisitCallWithSpread() {
11383 ValueNode* function = LoadRegister(0);
11384 interpreter::RegisterList reglist = iterator_.GetRegisterListOperand(1);
11385 FeedbackSlot slot = GetSlotOperand(3);
11386 compiler::FeedbackSource feedback_source(feedback(), slot);
11387 CallArguments args(ConvertReceiverMode::kAny, reglist,
11389 return BuildCallWithFeedback(function, args, feedback_source);
11390}
11391
11392ReduceResult MaglevGraphBuilder::VisitCallRuntime() {
11394 interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
11395 ValueNode* context = GetContext();
11396 size_t input_count = args.register_count() + CallRuntime::kFixedInputCount;
11397 CallRuntime* call_runtime = AddNewNode<CallRuntime>(
11398 input_count,
11399 [&](CallRuntime* call_runtime) {
11400 for (int i = 0; i < args.register_count(); ++i) {
11401 call_runtime->set_arg(i, GetTaggedValue(args[i]));
11402 }
11403 },
11404 function_id, context);
11405 SetAccumulator(call_runtime);
11406
11407 if (RuntimeFunctionCanThrow(function_id)) {
11408 return BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
11409 }
11410 return ReduceResult::Done();
11411}
11412
11413ReduceResult MaglevGraphBuilder::VisitCallJSRuntime() {
11414 // Get the function to call from the native context.
11415 compiler::NativeContextRef native_context = broker()->target_native_context();
11416 ValueNode* context = GetConstant(native_context);
11417 uint32_t slot = iterator_.GetNativeContextIndexOperand(0);
11418 ValueNode* callee =
11420 // Call the function.
11421 interpreter::RegisterList reglist = iterator_.GetRegisterListOperand(1);
11422 CallArguments args(ConvertReceiverMode::kNullOrUndefined, reglist,
11425 return ReduceResult::Done();
11426}
11427
11428ReduceResult MaglevGraphBuilder::VisitCallRuntimeForPair() {
11430 interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
11431 ValueNode* context = GetContext();
11432
11433 size_t input_count = args.register_count() + CallRuntime::kFixedInputCount;
11434 CallRuntime* call_runtime = AddNewNode<CallRuntime>(
11435 input_count,
11436 [&](CallRuntime* call_runtime) {
11437 for (int i = 0; i < args.register_count(); ++i) {
11438 call_runtime->set_arg(i, GetTaggedValue(args[i]));
11439 }
11440 },
11441 function_id, context);
11443 StoreRegisterPair(result, call_runtime);
11444 return ReduceResult::Done();
11445}
11446
11447ReduceResult MaglevGraphBuilder::VisitInvokeIntrinsic() {
11448 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
11450 interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
11451 switch (intrinsic_id) {
11452#define CASE(Name, _, arg_count) \
11453 case Runtime::kInline##Name: \
11454 DCHECK_IMPLIES(arg_count != -1, arg_count == args.register_count()); \
11455 return VisitIntrinsic##Name(args);
11457#undef CASE
11458 default:
11459 UNREACHABLE();
11460 }
11461}
11462
11463ReduceResult MaglevGraphBuilder::VisitIntrinsicCopyDataProperties(
11464 interpreter::RegisterList args) {
11465 DCHECK_EQ(args.register_count(), 2);
11468 return ReduceResult::Done();
11469}
11470
11471ReduceResult MaglevGraphBuilder::
11472 VisitIntrinsicCopyDataPropertiesWithExcludedPropertiesOnStack(
11473 interpreter::RegisterList args) {
11474 SmiConstant* excluded_property_count =
11475 GetSmiConstant(args.register_count() - 1);
11476 int kContext = 1;
11477 int kExcludedPropertyCount = 1;
11478 CallBuiltin* call_builtin = AddNewNode<CallBuiltin>(
11479 args.register_count() + kContext + kExcludedPropertyCount,
11480 [&](CallBuiltin* call_builtin) {
11481 int arg_index = 0;
11482 call_builtin->set_arg(arg_index++, GetTaggedValue(args[0]));
11483 call_builtin->set_arg(arg_index++, excluded_property_count);
11484 for (int i = 1; i < args.register_count(); i++) {
11485 call_builtin->set_arg(arg_index++, GetTaggedValue(args[i]));
11486 }
11487 },
11488 Builtin::kCopyDataPropertiesWithExcludedProperties,
11489 GetTaggedValue(GetContext()));
11490 SetAccumulator(call_builtin);
11491 return ReduceResult::Done();
11492}
11493
11494ReduceResult MaglevGraphBuilder::VisitIntrinsicCreateIterResultObject(
11495 interpreter::RegisterList args) {
11496 DCHECK_EQ(args.register_count(), 2);
11497 ValueNode* value = current_interpreter_frame_.get(args[0]);
11498 ValueNode* done = current_interpreter_frame_.get(args[1]);
11499 compiler::MapRef map =
11500 broker()->target_native_context().iterator_result_map(broker());
11501 VirtualObject* iter_result = CreateJSIteratorResult(map, value, done);
11502 ValueNode* allocation =
11504 SetAccumulator(allocation);
11505 return ReduceResult::Done();
11506}
11507
11508ReduceResult MaglevGraphBuilder::VisitIntrinsicCreateAsyncFromSyncIterator(
11509 interpreter::RegisterList args) {
11510 DCHECK_EQ(args.register_count(), 1);
11513 {GetTaggedValue(args[0])}));
11514 return ReduceResult::Done();
11515}
11516
11517ReduceResult MaglevGraphBuilder::VisitIntrinsicCreateJSGeneratorObject(
11518 interpreter::RegisterList args) {
11519 DCHECK_EQ(args.register_count(), 2);
11520 ValueNode* closure = current_interpreter_frame_.get(args[0]);
11525 {GetTaggedValue(closure), GetTaggedValue(receiver)}));
11526 return ReduceResult::Done();
11527}
11528
11529ReduceResult MaglevGraphBuilder::VisitIntrinsicGeneratorGetResumeMode(
11530 interpreter::RegisterList args) {
11531 DCHECK_EQ(args.register_count(), 1);
11532 ValueNode* generator = current_interpreter_frame_.get(args[0]);
11534 BuildLoadTaggedField(generator, JSGeneratorObject::kResumeModeOffset));
11535 return ReduceResult::Done();
11536}
11537
11538ReduceResult MaglevGraphBuilder::VisitIntrinsicGeneratorClose(
11539 interpreter::RegisterList args) {
11540 DCHECK_EQ(args.register_count(), 1);
11541 ValueNode* generator = current_interpreter_frame_.get(args[0]);
11543 BuildStoreTaggedFieldNoWriteBarrier(generator, value,
11544 JSGeneratorObject::kContinuationOffset,
11546 SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
11547 return ReduceResult::Done();
11548}
11549
11550ReduceResult MaglevGraphBuilder::VisitIntrinsicGetImportMetaObject(
11551 interpreter::RegisterList args) {
11552 DCHECK_EQ(args.register_count(), 0);
11553 SetAccumulator(BuildCallRuntime(Runtime::kGetImportMetaObject, {}).value());
11554 return ReduceResult::Done();
11555}
11556
11557ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncFunctionAwait(
11558 interpreter::RegisterList args) {
11559 DCHECK_EQ(args.register_count(), 2);
11562 return ReduceResult::Done();
11563}
11564
11565ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncFunctionEnter(
11566 interpreter::RegisterList args) {
11567 DCHECK_EQ(args.register_count(), 2);
11570 return ReduceResult::Done();
11571}
11572
11573ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncFunctionReject(
11574 interpreter::RegisterList args) {
11575 DCHECK_EQ(args.register_count(), 2);
11578 return ReduceResult::Done();
11579}
11580
11581ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncFunctionResolve(
11582 interpreter::RegisterList args) {
11583 DCHECK_EQ(args.register_count(), 2);
11586 return ReduceResult::Done();
11587}
11588
11589ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorAwait(
11590 interpreter::RegisterList args) {
11591 DCHECK_EQ(args.register_count(), 2);
11594 return ReduceResult::Done();
11595}
11596
11597ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorReject(
11598 interpreter::RegisterList args) {
11599 DCHECK_EQ(args.register_count(), 2);
11602 return ReduceResult::Done();
11603}
11604
11605ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorResolve(
11606 interpreter::RegisterList args) {
11607 DCHECK_EQ(args.register_count(), 3);
11610 GetTaggedValue(args[2])}));
11611 return ReduceResult::Done();
11612}
11613
11614ReduceResult MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorYieldWithAwait(
11615 interpreter::RegisterList args) {
11616 DCHECK_EQ(args.register_count(), 2);
11619 return ReduceResult::Done();
11620}
11621
11623 ValueNode* target, ValueNode* new_target, ValueNode* context,
11624 const CallArguments& args,
11625 const compiler::FeedbackSource& feedback_source) {
11626 size_t input_count = args.count_with_receiver() + Construct::kFixedInputCount;
11628 return AddNewNode<Construct>(
11629 input_count,
11630 [&](Construct* construct) {
11631 int arg_index = 0;
11632 // Add undefined receiver.
11633 construct->set_arg(arg_index++,
11634 GetRootConstant(RootIndex::kUndefinedValue));
11635 for (size_t i = 0; i < args.count(); i++) {
11636 construct->set_arg(arg_index++, GetTaggedValue(args[i]));
11637 }
11638 },
11639 feedback_source, GetTaggedValue(target), GetTaggedValue(new_target),
11640 GetTaggedValue(context));
11641}
11642
11644 ValueNode* key, ValueNode* value) {
11645 VirtualObject* elements = CreateFixedArray(broker()->fixed_array_map(), 2);
11646 elements->set(FixedArray::OffsetOfElementAt(0), key);
11647 elements->set(FixedArray::OffsetOfElementAt(1), value);
11648 compiler::MapRef map =
11649 broker()->target_native_context().js_array_packed_elements_map(broker());
11650 VirtualObject* array;
11652 array, CreateJSArray(map, map.instance_size(), GetInt32Constant(2)));
11653 array->set(JSArray::kElementsOffset, elements);
11655 return allocation;
11656}
11657
11659 compiler::MapRef map, ValueNode* length, ValueNode* elements,
11660 const compiler::SlackTrackingPrediction& slack_tracking_prediction,
11661 AllocationType allocation_type) {
11662 VirtualObject* array;
11664 array,
11665 CreateJSArray(map, slack_tracking_prediction.instance_size(), length));
11666 array->set(JSArray::kElementsOffset, elements);
11667 for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
11668 i++) {
11669 array->set(map.GetInObjectPropertyOffset(i),
11670 GetRootConstant(RootIndex::kUndefinedValue));
11671 }
11672 array->ClearSlots(map.GetInObjectPropertyOffset(
11673 slack_tracking_prediction.inobject_property_count()),
11674 GetRootConstant(RootIndex::kOnePointerFillerMap));
11675 ValueNode* allocation = BuildInlinedAllocation(array, allocation_type);
11676 return allocation;
11677}
11678
11680 ValueNode* array, IterationKind iteration_kind) {
11681 compiler::MapRef map =
11682 broker()->target_native_context().initial_array_iterator_map(broker());
11683 VirtualObject* iterator = CreateJSArrayIterator(map, array, iteration_kind);
11684 ValueNode* allocation =
11686 return allocation;
11687}
11688
11690 ValueNode* closure, ValueNode* receiver) {
11691 compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(closure);
11692 if (!maybe_constant.has_value()) return {};
11693 if (!maybe_constant->IsJSFunction()) return {};
11694 compiler::JSFunctionRef function = maybe_constant->AsJSFunction();
11695 if (!function.has_initial_map(broker())) return {};
11696
11697 // Create the register file.
11698 compiler::SharedFunctionInfoRef shared = function.shared(broker());
11699 DCHECK(shared.HasBytecodeArray());
11700 compiler::BytecodeArrayRef bytecode_array = shared.GetBytecodeArray(broker());
11701 int parameter_count_no_receiver = bytecode_array.parameter_count() - 1;
11702 int length = parameter_count_no_receiver + bytecode_array.register_count();
11704 return {};
11705 }
11706 auto undefined = GetRootConstant(RootIndex::kUndefinedValue);
11707 VirtualObject* register_file =
11708 CreateFixedArray(broker()->fixed_array_map(), length);
11709 for (int i = 0; i < length; i++) {
11710 register_file->set(FixedArray::OffsetOfElementAt(i), undefined);
11711 }
11712
11713 // Create the JS[Async]GeneratorObject instance.
11714 compiler::SlackTrackingPrediction slack_tracking_prediction =
11716 function);
11717 compiler::MapRef initial_map = function.initial_map(broker());
11719 initial_map, slack_tracking_prediction.instance_size(), GetContext(),
11720 closure, receiver, register_file);
11721
11722 // Handle in-object properties.
11723 for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
11724 i++) {
11725 generator->set(initial_map.GetInObjectPropertyOffset(i), undefined);
11726 }
11727 generator->ClearSlots(
11728 initial_map.GetInObjectPropertyOffset(
11729 slack_tracking_prediction.inobject_property_count()),
11730 GetRootConstant(RootIndex::kOnePointerFillerMap));
11731
11732 ValueNode* allocation =
11734 return allocation;
11735}
11736
11737namespace {
11738
11739compiler::OptionalMapRef GetArrayConstructorInitialMap(
11741 ElementsKind elements_kind, size_t argc, std::optional<int> maybe_length) {
11742 compiler::MapRef initial_map = array_function.initial_map(broker);
11743 if (argc == 1 && (!maybe_length.has_value() || *maybe_length > 0)) {
11744 // Constructing an Array via new Array(N) where N is an unsigned
11745 // integer, always creates a holey backing store.
11746 elements_kind = GetHoleyElementsKind(elements_kind);
11747 }
11748 return initial_map.AsElementsKind(broker, elements_kind);
11749}
11750
11751} // namespace
11752
11754 if (length == 0) {
11755 return GetRootConstant(RootIndex::kEmptyFixedArray);
11756 }
11757 VirtualObject* elements =
11758 CreateFixedArray(broker()->fixed_array_map(), length);
11759 auto hole = GetRootConstant(RootIndex::kTheHoleValue);
11760 for (int i = 0; i < length; i++) {
11761 elements->set(FixedArray::OffsetOfElementAt(i), hole);
11762 }
11763 return elements;
11764}
11765
11768 compiler::OptionalAllocationSiteRef maybe_allocation_site) {
11769 ElementsKind elements_kind =
11770 maybe_allocation_site.has_value()
11771 ? maybe_allocation_site->GetElementsKind()
11772 : array_function.initial_map(broker()).elements_kind();
11773 // TODO(victorgomes): Support double elements array.
11774 if (IsDoubleElementsKind(elements_kind)) return {};
11775 DCHECK(IsFastElementsKind(elements_kind));
11776
11777 std::optional<int> maybe_length;
11778 if (args.count() == 1) {
11779 maybe_length = TryGetInt32Constant(args[0]);
11780 }
11781 compiler::OptionalMapRef maybe_initial_map = GetArrayConstructorInitialMap(
11782 broker(), array_function, elements_kind, args.count(), maybe_length);
11783 if (!maybe_initial_map.has_value()) return {};
11784 compiler::MapRef initial_map = maybe_initial_map.value();
11785 compiler::SlackTrackingPrediction slack_tracking_prediction =
11787 array_function);
11788
11789 // Tells whether we are protected by either the {site} or a
11790 // protector cell to do certain speculative optimizations.
11791 bool can_inline_call = false;
11792 AllocationType allocation_type = AllocationType::kYoung;
11793
11794 if (maybe_allocation_site) {
11795 can_inline_call = maybe_allocation_site->CanInlineCall();
11796 allocation_type =
11797 broker()->dependencies()->DependOnPretenureMode(*maybe_allocation_site);
11798 broker()->dependencies()->DependOnElementsKind(*maybe_allocation_site);
11799 } else {
11800 compiler::PropertyCellRef array_constructor_protector = MakeRef(
11801 broker(), local_isolate()->factory()->array_constructor_protector());
11802 array_constructor_protector.CacheAsProtector(broker());
11803 can_inline_call = array_constructor_protector.value(broker()).AsSmi() ==
11805 }
11806
11807 if (args.count() == 0) {
11809 initial_map, GetSmiConstant(0),
11811 slack_tracking_prediction, allocation_type);
11812 }
11813
11814 if (maybe_length.has_value() && *maybe_length >= 0 &&
11815 *maybe_length < JSArray::kInitialMaxFastElementArray) {
11816 return BuildAndAllocateJSArray(initial_map, GetSmiConstant(*maybe_length),
11817 BuildElementsArray(*maybe_length),
11818 slack_tracking_prediction, allocation_type);
11819 }
11820
11821 // TODO(victorgomes): If we know the argument cannot be a number, we should
11822 // allocate an array with one element.
11823 // We don't know anything about the length, so we rely on the allocation
11824 // site to avoid deopt loops.
11825 if (args.count() == 1 && can_inline_call) {
11826 return SelectReduction(
11827 [&](auto& builder) {
11828 return BuildBranchIfInt32Compare(builder,
11829 Operation::kGreaterThanOrEqual,
11830 args[0], GetInt32Constant(0));
11831 },
11832 [&] {
11833 ValueNode* elements =
11834 AddNewNode<AllocateElementsArray>({args[0]}, allocation_type);
11835 return BuildAndAllocateJSArray(initial_map, args[0], elements,
11836 slack_tracking_prediction,
11837 allocation_type);
11838 },
11839 [&] {
11840 ValueNode* error = GetSmiConstant(
11841 static_cast<int>(MessageTemplate::kInvalidArrayLength));
11842 return BuildCallRuntime(Runtime::kThrowRangeError, {error});
11843 });
11844 }
11845
11846 // TODO(victorgomes): Support the constructor with argument count larger
11847 // than 1.
11848 return {};
11849}
11850
11853 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
11855 // TODO(victorgomes): specialize more known constants builtin targets.
11856 switch (shared_function_info.builtin_id()) {
11857 case Builtin::kArrayConstructor: {
11859 break;
11860 }
11861 case Builtin::kObjectConstructor: {
11862 // If no value is passed, we can immediately lower to a simple
11863 // constructor.
11864 if (args.count() == 0) {
11866 target, builtin, DeoptimizeReason::kWrongConstructor));
11869 return result;
11870 }
11871 break;
11872 }
11873 default:
11874 break;
11875 }
11876 return {};
11877}
11878
11880 compiler::JSFunctionRef function,
11881 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
11883 compiler::FeedbackSource& feedback_source) {
11885 target, function, DeoptimizeReason::kWrongConstructor));
11886
11887 int construct_arg_count = static_cast<int>(args.count());
11888 base::Vector<ValueNode*> construct_arguments_without_receiver =
11889 zone()->AllocateVector<ValueNode*>(construct_arg_count);
11890 for (int i = 0; i < construct_arg_count; i++) {
11891 construct_arguments_without_receiver[i] = args[i];
11892 }
11893
11894 if (IsDerivedConstructor(shared_function_info.kind())) {
11895 ValueNode* implicit_receiver = GetRootConstant(RootIndex::kTheHoleValue);
11896 args.set_receiver(implicit_receiver);
11897 ValueNode* call_result;
11898 {
11899 DeoptFrameScope construct(this, implicit_receiver);
11901 function, new_target, args, feedback_source);
11903 call_result = result.value();
11904 }
11905 if (CheckType(call_result, NodeType::kJSReceiver)) return call_result;
11906 ValueNode* constant_node;
11907 if (compiler::OptionalHeapObjectRef maybe_constant =
11908 TryGetConstant(call_result, &constant_node)) {
11909 compiler::HeapObjectRef constant = maybe_constant.value();
11910 if (constant.IsJSReceiver()) return constant_node;
11911 }
11912 if (!call_result->properties().is_tagged()) {
11913 return BuildCallRuntime(Runtime::kThrowConstructorReturnedNonObject, {});
11914 }
11915 return AddNewNode<CheckDerivedConstructResult>({call_result});
11916 }
11917
11918 // We do not create a construct stub lazy deopt frame, since
11919 // FastNewObject cannot fail if target is a JSFunction.
11920 ValueNode* implicit_receiver = nullptr;
11921 if (function.has_initial_map(broker())) {
11922 compiler::MapRef map = function.initial_map(broker());
11923 if (map.GetConstructor(broker()).equals(function)) {
11924 implicit_receiver = BuildInlinedAllocation(CreateJSConstructor(function),
11926 }
11927 }
11928 if (implicit_receiver == nullptr) {
11931 }
11932 EnsureType(implicit_receiver, NodeType::kJSReceiver);
11933
11934 args.set_receiver(implicit_receiver);
11935 ValueNode* call_result;
11936 {
11937 DeoptFrameScope construct(this, implicit_receiver);
11939 function, new_target, args, feedback_source);
11941 call_result = result.value();
11942 }
11943 if (CheckType(call_result, NodeType::kJSReceiver)) return call_result;
11944 if (!call_result->properties().is_tagged()) return implicit_receiver;
11945 ValueNode* constant_node;
11946 if (compiler::OptionalHeapObjectRef maybe_constant =
11947 TryGetConstant(call_result, &constant_node)) {
11948 compiler::HeapObjectRef constant = maybe_constant.value();
11949 DCHECK(CheckType(implicit_receiver, NodeType::kJSReceiver));
11950 if (constant.IsJSReceiver()) return constant_node;
11951 return implicit_receiver;
11952 }
11953 return AddNewNode<CheckConstructResult>({call_result, implicit_receiver});
11954}
11955
11957 compiler::HeapObjectRef feedback_target, ValueNode* target,
11959 compiler::FeedbackSource& feedback_source) {
11960 DCHECK(!feedback_target.IsAllocationSite());
11961 if (!feedback_target.map(broker()).is_constructor()) {
11962 // TODO(victorgomes): Deal the case where target is not a constructor.
11963 return {};
11964 }
11965
11966 if (target != new_target) return {};
11967
11968 // TODO(v8:7700): Add fast paths for other callables.
11969 if (!feedback_target.IsJSFunction()) return {};
11970 compiler::JSFunctionRef function = feedback_target.AsJSFunction();
11971
11972 // Do not inline constructors with break points.
11973 compiler::SharedFunctionInfoRef shared_function_info =
11974 function.shared(broker());
11975 if (shared_function_info.HasBreakInfo(broker())) {
11976 return {};
11977 }
11978
11979 // Do not inline cross natives context.
11980 if (function.native_context(broker()) != broker()->target_native_context()) {
11981 return {};
11982 }
11983
11984 if (args.mode() != CallArguments::kDefault) {
11985 // TODO(victorgomes): Maybe inline the spread stub? Or call known
11986 // function directly if arguments list is an array.
11987 return {};
11988 }
11989
11990 if (shared_function_info.HasBuiltinId()) {
11991 RETURN_IF_DONE(TryReduceConstructBuiltin(function, shared_function_info,
11992 target, args));
11993 }
11994
11995 if (shared_function_info.construct_as_builtin()) {
11996 // TODO(victorgomes): Inline JSBuiltinsConstructStub.
11997 return {};
11998 }
11999
12000 return TryReduceConstructGeneric(function, shared_function_info, target,
12001 new_target, args, feedback_source);
12002}
12003
12006 compiler::FeedbackSource& feedback_source) {
12007 compiler::ProcessedFeedback const& processed_feedback =
12008 broker()->GetFeedbackForCall(feedback_source);
12009 if (processed_feedback.IsInsufficient()) {
12011 DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
12012 }
12013
12014 DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
12015 compiler::OptionalHeapObjectRef feedback_target =
12016 processed_feedback.AsCall().target();
12017 if (feedback_target.has_value() && feedback_target->IsAllocationSite()) {
12018 // The feedback is an AllocationSite, which means we have called the
12019 // Array function and collected transition (and pretenuring) feedback
12020 // for the resulting arrays.
12021 compiler::JSFunctionRef array_function =
12022 broker()->target_native_context().array_function(broker());
12024 target, array_function, DeoptimizeReason::kWrongConstructor));
12027 feedback_target->AsAllocationSite()),
12029 } else {
12030 if (feedback_target.has_value()) {
12032 TryReduceConstruct(feedback_target.value(), target, new_target, args,
12033 feedback_source),
12035 }
12036 if (compiler::OptionalHeapObjectRef maybe_constant =
12037 TryGetConstant(target)) {
12039 TryReduceConstruct(maybe_constant.value(), target, new_target, args,
12040 feedback_source),
12042 }
12043 }
12044 ValueNode* context = GetContext();
12046 feedback_source));
12047 return ReduceResult::Done();
12048}
12049
12050ReduceResult MaglevGraphBuilder::VisitConstruct() {
12052 ValueNode* target = LoadRegister(0);
12054 FeedbackSlot slot = GetSlotOperand(3);
12055 compiler::FeedbackSource feedback_source{feedback(), slot};
12056 CallArguments args(ConvertReceiverMode::kNullOrUndefined, reg_list,
12058 return BuildConstruct(target, new_target, args, feedback_source);
12059}
12060
12061ReduceResult MaglevGraphBuilder::VisitConstructWithSpread() {
12062 ValueNode* new_target = GetAccumulator();
12063 ValueNode* constructor = LoadRegister(0);
12064 interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
12065 ValueNode* context = GetContext();
12066 FeedbackSlot slot = GetSlotOperand(3);
12067 compiler::FeedbackSource feedback_source(feedback(), slot);
12068
12069 int kReceiver = 1;
12070 size_t input_count =
12072 ConstructWithSpread* construct = AddNewNode<ConstructWithSpread>(
12073 input_count,
12074 [&](ConstructWithSpread* construct) {
12075 int arg_index = 0;
12076 // Add undefined receiver.
12077 construct->set_arg(arg_index++,
12078 GetRootConstant(RootIndex::kUndefinedValue));
12079 for (int i = 0; i < args.register_count(); i++) {
12080 construct->set_arg(arg_index++, GetTaggedValue(args[i]));
12081 }
12082 },
12083 feedback_source, GetTaggedValue(constructor), GetTaggedValue(new_target),
12084 GetTaggedValue(context));
12085 SetAccumulator(construct);
12086 return ReduceResult::Done();
12087}
12088
12089ReduceResult MaglevGraphBuilder::VisitConstructForwardAllArgs() {
12090 ValueNode* new_target = GetAccumulator();
12091 ValueNode* target = LoadRegister(0);
12092 FeedbackSlot slot = GetSlotOperand(1);
12093 compiler::FeedbackSource feedback_source{feedback(), slot};
12094
12095 if (is_inline()) {
12096 base::SmallVector<ValueNode*, 8> forwarded_args(argument_count());
12097 for (int i = 1 /* skip receiver */; i < argument_count(); ++i) {
12098 forwarded_args[i] = GetInlinedArgument(i);
12099 }
12101 std::move(forwarded_args));
12102 return BuildConstruct(target, new_target, args, feedback_source);
12103 } else {
12104 // TODO(syg): Add ConstructForwardAllArgs reductions and support inlining.
12108 feedback_source));
12109 return ReduceResult::Done();
12110 }
12111}
12112
12113ReduceResult MaglevGraphBuilder::VisitTestEqual() {
12115}
12116ReduceResult MaglevGraphBuilder::VisitTestEqualStrict() {
12118}
12119ReduceResult MaglevGraphBuilder::VisitTestLessThan() {
12121}
12122ReduceResult MaglevGraphBuilder::VisitTestLessThanOrEqual() {
12124}
12125ReduceResult MaglevGraphBuilder::VisitTestGreaterThan() {
12127}
12128ReduceResult MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
12130}
12131
12135 auto node_info = known_node_aspects().TryGetInfoFor(receiver);
12136 // If the map set is not found, then we don't know anything about the map of
12137 // the receiver, so bail.
12138 if (!node_info || !node_info->possible_maps_are_known()) {
12140 }
12141
12142 // If the set of possible maps is empty, then there's no possible map for this
12143 // receiver, therefore this path is unreachable at runtime. We're unlikely to
12144 // ever hit this case, BuildCheckMaps should already unconditionally deopt,
12145 // but check it in case another checking operation fails to statically
12146 // unconditionally deopt.
12147 if (node_info->possible_maps().is_empty()) {
12148 // TODO(leszeks): Add an unreachable assert here.
12150 }
12151
12152 ZoneVector<compiler::MapRef> receiver_map_refs(zone());
12153
12154 // Try to determine either that all of the {receiver_maps} have the given
12155 // {prototype} in their chain, or that none do. If we can't tell, return
12156 // kMayBeInPrototypeChain.
12157 bool all = true;
12158 bool none = true;
12159 for (compiler::MapRef map : node_info->possible_maps()) {
12160 receiver_map_refs.push_back(map);
12161 while (true) {
12162 if (IsSpecialReceiverInstanceType(map.instance_type())) {
12164 }
12165 if (!map.IsJSObjectMap()) {
12166 all = false;
12167 break;
12168 }
12169 compiler::HeapObjectRef map_prototype = map.prototype(broker());
12170 if (map_prototype.equals(prototype)) {
12171 none = false;
12172 break;
12173 }
12174 map = map_prototype.map(broker());
12175 // TODO(v8:11457) Support dictionary mode protoypes here.
12176 if (!map.is_stable() || map.is_dictionary_map()) {
12178 }
12179 if (map.oddball_type(broker()) == compiler::OddballType::kNull) {
12180 all = false;
12181 break;
12182 }
12183 }
12184 }
12185 DCHECK(!receiver_map_refs.empty());
12186 DCHECK_IMPLIES(all, !none);
12187 if (!all && !none) return kMayBeInPrototypeChain;
12188
12189 {
12190 compiler::OptionalJSObjectRef last_prototype;
12191 if (all) {
12192 // We don't need to protect the full chain if we found the prototype, we
12193 // can stop at {prototype}. In fact we could stop at the one before
12194 // {prototype} but since we're dealing with multiple receiver maps this
12195 // might be a different object each time, so it's much simpler to include
12196 // {prototype}. That does, however, mean that we must check {prototype}'s
12197 // map stability.
12198 if (!prototype.IsJSObject() || !prototype.map(broker()).is_stable()) {
12200 }
12201 last_prototype = prototype.AsJSObject();
12202 }
12204 receiver_map_refs, kStartAtPrototype, last_prototype);
12205 }
12206
12207 DCHECK_EQ(all, !none);
12209}
12210
12212 ValueNode* object, compiler::HeapObjectRef prototype) {
12213 auto in_prototype_chain = InferHasInPrototypeChain(object, prototype);
12214 if (in_prototype_chain == kMayBeInPrototypeChain) return {};
12215
12216 return GetBooleanConstant(in_prototype_chain == kIsInPrototypeChain);
12217}
12218
12224
12226 ValueNode* object, compiler::JSObjectRef callable,
12227 ValueNode* callable_node_if_not_constant) {
12228 const bool is_constant = callable_node_if_not_constant == nullptr;
12229 if (!is_constant) return {};
12230
12231 if (callable.IsJSBoundFunction()) {
12232 // OrdinaryHasInstance on bound functions turns into a recursive
12233 // invocation of the instanceof operator again.
12234 compiler::JSBoundFunctionRef function = callable.AsJSBoundFunction();
12235 compiler::JSReceiverRef bound_target_function =
12236 function.bound_target_function(broker());
12237
12238 if (bound_target_function.IsJSObject()) {
12240 object, bound_target_function.AsJSObject(), nullptr));
12241 }
12242
12243 // If we can't build a fast instance-of, build a slow one with the
12244 // partial optimisation of using the bound target function constant.
12246 {GetTaggedValue(object), GetConstant(bound_target_function)});
12247 }
12248
12249 if (callable.IsJSFunction()) {
12250 // Optimize if we currently know the "prototype" property.
12251 compiler::JSFunctionRef function = callable.AsJSFunction();
12252
12253 // TODO(v8:7700): Remove the has_prototype_slot condition once the broker
12254 // is always enabled.
12255 if (!function.map(broker()).has_prototype_slot() ||
12256 !function.has_instance_prototype(broker()) ||
12257 function.PrototypeRequiresRuntimeLookup(broker())) {
12258 return {};
12259 }
12260
12261 compiler::HeapObjectRef prototype =
12263 return BuildHasInPrototypeChain(object, prototype);
12264 }
12265
12266 return {};
12267}
12268
12270 ValueNode* object, compiler::JSObjectRef callable,
12271 ValueNode* callable_node_if_not_constant) {
12273 object, callable, callable_node_if_not_constant));
12274
12276 {callable_node_if_not_constant
12277 ? GetTaggedValue(callable_node_if_not_constant)
12278 : GetConstant(callable),
12279 GetTaggedValue(object)});
12280}
12281
12283 ValueNode* object, compiler::JSObjectRef callable,
12284 ValueNode* callable_node_if_not_constant) {
12285 compiler::MapRef receiver_map = callable.map(broker());
12286 compiler::NameRef name = broker()->has_instance_symbol();
12288 receiver_map, name, compiler::AccessMode::kLoad);
12289
12290 // TODO(v8:11457) Support dictionary mode holders here.
12291 if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
12292 return {};
12293 }
12294 access_info.RecordDependencies(broker()->dependencies());
12295
12296 if (access_info.IsNotFound()) {
12297 // If there's no @@hasInstance handler, the OrdinaryHasInstance operation
12298 // takes over, but that requires the constructor to be callable.
12299 if (!receiver_map.is_callable()) return {};
12300
12303
12304 // Monomorphic property access.
12305 if (callable_node_if_not_constant) {
12307 callable_node_if_not_constant,
12309 } else {
12310 // Even if we have a constant receiver, we still have to make sure its
12311 // map is correct, in case it migrates.
12312 if (receiver_map.is_stable()) {
12313 broker()->dependencies()->DependOnStableMap(receiver_map);
12314 } else {
12316 GetConstant(callable),
12318 }
12319 }
12320
12321 return BuildOrdinaryHasInstance(object, callable,
12322 callable_node_if_not_constant);
12323 }
12324
12325 if (access_info.IsFastDataConstant()) {
12326 compiler::OptionalJSObjectRef holder = access_info.holder();
12327 bool found_on_proto = holder.has_value();
12328 compiler::JSObjectRef holder_ref =
12329 found_on_proto ? holder.value() : callable;
12330 if (access_info.field_representation().IsDouble()) return {};
12331 compiler::OptionalObjectRef has_instance_field =
12333 broker(), access_info.field_representation(),
12334 access_info.field_index(), broker()->dependencies());
12335 if (!has_instance_field.has_value() ||
12336 !has_instance_field->IsHeapObject() ||
12337 !has_instance_field->AsHeapObject().map(broker()).is_callable()) {
12338 return {};
12339 }
12340
12341 if (found_on_proto) {
12344 holder.value());
12345 }
12346
12347 ValueNode* callable_node;
12348 if (callable_node_if_not_constant) {
12349 // Check that {callable_node_if_not_constant} is actually {callable}.
12351 BuildCheckValueByReference(callable_node_if_not_constant, callable,
12352 DeoptimizeReason::kWrongValue));
12353 callable_node = callable_node_if_not_constant;
12354 } else {
12355 callable_node = GetConstant(callable);
12356 }
12358 callable_node, base::VectorOf(access_info.lookup_start_object_maps())));
12359
12360 // Special case the common case, where @@hasInstance is
12361 // Function.p.hasInstance. In this case we don't need to call ToBoolean (or
12362 // use the continuation), since OrdinaryHasInstance is guaranteed to return
12363 // a boolean.
12364 if (has_instance_field->IsJSFunction()) {
12366 has_instance_field->AsJSFunction().shared(broker());
12367 if (shared.HasBuiltinId() &&
12368 shared.builtin_id() == Builtin::kFunctionPrototypeHasInstance) {
12369 return BuildOrdinaryHasInstance(object, callable,
12370 callable_node_if_not_constant);
12371 }
12372 }
12373
12374 // Call @@hasInstance
12376 {callable_node, object});
12377 ValueNode* call_result;
12378 {
12379 // Make sure that a lazy deopt after the @@hasInstance call also performs
12380 // ToBoolean before returning to the interpreter.
12381 DeoptFrameScope continuation_scope(
12382 this, Builtin::kToBooleanLazyDeoptContinuation);
12383
12384 if (has_instance_field->IsJSFunction()) {
12385 SaveCallSpeculationScope saved(this);
12387 call_result,
12388 TryReduceCallForConstant(has_instance_field->AsJSFunction(), args));
12389 } else {
12390 call_result = BuildGenericCall(GetConstant(*has_instance_field),
12392 }
12393 // TODO(victorgomes): Propagate the case if we need to soft deopt.
12394 }
12395
12396 return BuildToBoolean(call_result);
12397 }
12398
12399 return {};
12400}
12401
12402template <bool flip>
12404 if (IsConstantNode(value->opcode())) {
12406 flip);
12407 }
12408
12409 switch (value->value_representation()) {
12412 // The ToBoolean of both the_hole and NaN is false, so we can use the
12413 // same operation for HoleyFloat64 and Float64.
12414 return AddNewNode<Float64ToBoolean>({value}, flip);
12415
12417 // Uint32 has the same logic as Int32 when converting ToBoolean, namely
12418 // comparison against zero, so we can cast it and ignore the signedness.
12419 value = AddNewNode<TruncateUint32ToInt32>({value});
12420 [[fallthrough]];
12422 return AddNewNode<Int32ToBoolean>({value}, flip);
12423
12425 return AddNewNode<IntPtrToBoolean>({value}, flip);
12426
12428 break;
12429 }
12430
12431 NodeInfo* node_info = known_node_aspects().TryGetInfoFor(value);
12432 if (node_info) {
12433 if (ValueNode* as_int32 = node_info->alternative().int32()) {
12434 return AddNewNode<Int32ToBoolean>({as_int32}, flip);
12435 }
12436 if (ValueNode* as_float64 = node_info->alternative().float64()) {
12437 return AddNewNode<Float64ToBoolean>({as_float64}, flip);
12438 }
12439 }
12440
12441 NodeType value_type;
12442 if (CheckType(value, NodeType::kJSReceiver, &value_type)) {
12444 // TODO(victorgomes): Check if it is worth to create
12445 // TestUndetectableLogicalNot or to remove ToBooleanLogicalNot, since we
12446 // already optimize LogicalNots by swapping the branches.
12447 if constexpr (!flip) {
12449 }
12450 return result;
12451 }
12452 ValueNode* falsy_value = nullptr;
12453 if (CheckType(value, NodeType::kString)) {
12454 falsy_value = GetRootConstant(RootIndex::kempty_string);
12455 } else if (CheckType(value, NodeType::kSmi)) {
12456 falsy_value = GetSmiConstant(0);
12457 }
12458 if (falsy_value != nullptr) {
12460 {value, falsy_value});
12461 }
12462 if (CheckType(value, NodeType::kBoolean)) {
12463 if constexpr (flip) {
12464 value = BuildLogicalNot(value);
12465 }
12466 return value;
12467 }
12469 {value}, GetCheckType(value_type));
12470}
12471
12473 ValueNode* object, ValueNode* callable,
12474 compiler::FeedbackSource feedback_source) {
12475 compiler::ProcessedFeedback const& feedback =
12476 broker()->GetFeedbackForInstanceOf(feedback_source);
12477
12478 if (feedback.IsInsufficient()) {
12480 DeoptimizeReason::kInsufficientTypeFeedbackForInstanceOf);
12481 }
12482
12483 // Check if the right hand side is a known receiver, or
12484 // we have feedback from the InstanceOfIC.
12485 compiler::OptionalHeapObjectRef maybe_constant;
12486 if ((maybe_constant = TryGetConstant(callable)) &&
12487 maybe_constant.value().IsJSObject()) {
12488 compiler::JSObjectRef callable_ref = maybe_constant.value().AsJSObject();
12489 return TryBuildFastInstanceOf(object, callable_ref, nullptr);
12490 }
12491 if (feedback_source.IsValid()) {
12492 compiler::OptionalJSObjectRef callable_from_feedback =
12493 feedback.AsInstanceOf().value();
12494 if (callable_from_feedback) {
12495 return TryBuildFastInstanceOf(object, *callable_from_feedback, callable);
12496 }
12497 }
12498 return {};
12499}
12500
12501ReduceResult MaglevGraphBuilder::VisitTestInstanceOf() {
12502 // TestInstanceOf <src> <feedback_slot>
12503 ValueNode* object = LoadRegister(0);
12504 ValueNode* callable = GetAccumulator();
12505 FeedbackSlot slot = GetSlotOperand(1);
12506 compiler::FeedbackSource feedback_source{feedback(), slot};
12507
12508 MaybeReduceResult result =
12509 TryBuildFastInstanceOfWithFeedback(object, callable, feedback_source);
12511
12512 ValueNode* context = GetContext();
12514 AddNewNode<TestInstanceOf>({context, object, callable}, feedback_source));
12515 return ReduceResult::Done();
12516}
12517
12518ReduceResult MaglevGraphBuilder::VisitTestIn() {
12519 // TestIn <src> <feedback_slot>
12520 ValueNode* object = GetAccumulator();
12521 ValueNode* name = LoadRegister(0);
12522 FeedbackSlot slot = GetSlotOperand(1);
12523 compiler::FeedbackSource feedback_source{feedback(), slot};
12524
12525 // TODO(victorgomes): Create fast path using feedback.
12526 USE(feedback_source);
12527
12529 {GetTaggedValue(object), GetTaggedValue(name)}, feedback_source));
12530 return ReduceResult::Done();
12531}
12532
12533ReduceResult MaglevGraphBuilder::VisitToName() {
12534 // ToObject <dst>
12535 if (!CheckType(GetAccumulator(), NodeType::kName)) {
12537 }
12538 return ReduceResult::Done();
12539}
12540
12543 if (CheckType(value, NodeType::kString)) return value;
12544 // TODO(victorgomes): Add fast path for constant primitives.
12545 if (CheckType(value, NodeType::kNumber)) {
12546 // TODO(verwaest): Float64ToString if float.
12547 return AddNewNode<NumberToString>({value});
12548 }
12549 return AddNewNode<ToString>({GetContext(), value}, mode);
12550}
12551
12553 Object::Conversion mode) {
12554 ValueNode* value = GetAccumulator();
12555 switch (value->value_representation()) {
12560 return ReduceResult::Done();
12561
12564 return ReduceResult::Done();
12565 }
12566
12568 // We'll insert the required checks depending on the feedback.
12569 break;
12570 }
12571
12572 FeedbackSlot slot = GetSlotOperand(0);
12573 switch (broker()->GetFeedbackForBinaryOperation(
12577 break;
12580 UNREACHABLE();
12584 if (mode == Object::Conversion::kToNumber &&
12585 EnsureType(value, NodeType::kNumber)) {
12586 return ReduceResult::Done();
12587 }
12588 AddNewNode<CheckNumber>({value}, mode);
12589 break;
12591 // TODO(leszeks): Faster ToNumber for kNumberOrOddball
12596 if (CheckType(value, NodeType::kNumber)) return ReduceResult::Done();
12598 break;
12599 }
12600 return ReduceResult::Done();
12601}
12602
12603ReduceResult MaglevGraphBuilder::VisitToNumber() {
12605}
12606ReduceResult MaglevGraphBuilder::VisitToNumeric() {
12608}
12609
12610ReduceResult MaglevGraphBuilder::VisitToObject() {
12611 // ToObject <dst>
12612 ValueNode* value = GetAccumulator();
12613 interpreter::Register destination = iterator_.GetRegisterOperand(0);
12614 NodeType old_type;
12615 if (CheckType(value, NodeType::kJSReceiver, &old_type)) {
12617 destination);
12618 } else {
12620 GetCheckType(old_type)));
12621 }
12622 return ReduceResult::Done();
12623}
12624
12625ReduceResult MaglevGraphBuilder::VisitToString() {
12626 // ToString
12628 return ReduceResult::Done();
12629}
12630
12631ReduceResult MaglevGraphBuilder::VisitToBoolean() {
12633 return ReduceResult::Done();
12634}
12635
12636ReduceResult MaglevGraphBuilder::VisitCreateRegExpLiteral() {
12637 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
12638 compiler::StringRef pattern = GetRefOperand<String>(0);
12639 FeedbackSlot slot = GetSlotOperand(1);
12640 uint32_t flags = GetFlag16Operand(2);
12641 compiler::FeedbackSource feedback_source{feedback(), slot};
12642 compiler::ProcessedFeedback const& processed_feedback =
12643 broker()->GetFeedbackForRegExpLiteral(feedback_source);
12644 if (!processed_feedback.IsInsufficient()) {
12645 compiler::RegExpBoilerplateDescriptionRef literal =
12646 processed_feedback.AsRegExpLiteral().value();
12647 compiler::NativeContextRef native_context =
12649 compiler::MapRef map =
12650 native_context.regexp_function(broker()).initial_map(broker());
12653 return ReduceResult::Done();
12654 }
12655 // Fallback.
12657 AddNewNode<CreateRegExpLiteral>({}, pattern, feedback_source, flags));
12658 return ReduceResult::Done();
12659}
12660
12661ReduceResult MaglevGraphBuilder::VisitCreateArrayLiteral() {
12662 compiler::HeapObjectRef constant_elements = GetRefOperand<HeapObject>(0);
12663 FeedbackSlot slot_index = GetSlotOperand(1);
12664 int bytecode_flags = GetFlag8Operand(2);
12665 int literal_flags =
12667 compiler::FeedbackSource feedback_source(feedback(), slot_index);
12668
12669 compiler::ProcessedFeedback const& processed_feedback =
12670 broker()->GetFeedbackForArrayOrObjectLiteral(feedback_source);
12671
12672 if (processed_feedback.IsInsufficient()) {
12674 DeoptimizeReason::kInsufficientTypeFeedbackForArrayLiteral);
12675 }
12676
12677 MaybeReduceResult result =
12678 TryBuildFastCreateObjectOrArrayLiteral(processed_feedback.AsLiteral());
12680
12682 bytecode_flags)) {
12683 // TODO(victorgomes): CreateShallowArrayLiteral should not need the
12684 // boilerplate descriptor. However the current builtin checks that the
12685 // feedback exists and fallsback to CreateArrayLiteral if it doesn't.
12687 {}, constant_elements, feedback_source, literal_flags));
12688 } else {
12690 {}, constant_elements, feedback_source, literal_flags));
12691 }
12692 return ReduceResult::Done();
12693}
12694
12695ReduceResult MaglevGraphBuilder::VisitCreateArrayFromIterable() {
12696 ValueNode* iterable = GetAccumulator();
12698 {GetTaggedValue(iterable)}));
12699 return ReduceResult::Done();
12700}
12701
12702ReduceResult MaglevGraphBuilder::VisitCreateEmptyArrayLiteral() {
12703 FeedbackSlot slot_index = GetSlotOperand(0);
12704 compiler::FeedbackSource feedback_source(feedback(), slot_index);
12705 compiler::ProcessedFeedback const& processed_feedback =
12706 broker()->GetFeedbackForArrayOrObjectLiteral(feedback_source);
12707 if (processed_feedback.IsInsufficient()) {
12709 DeoptimizeReason::kInsufficientTypeFeedbackForArrayLiteral);
12710 }
12711 compiler::AllocationSiteRef site = processed_feedback.AsLiteral().value();
12712
12714 ElementsKind kind = site.GetElementsKind();
12715
12716 compiler::NativeContextRef native_context = broker()->target_native_context();
12717 compiler::MapRef map = native_context.GetInitialJSArrayMap(broker(), kind);
12718 // Initial JSArray map shouldn't have any in-object properties.
12719 SBXCHECK_EQ(map.GetInObjectProperties(), 0);
12720 VirtualObject* array;
12722 array, CreateJSArray(map, map.instance_size(), GetSmiConstant(0)));
12724 return ReduceResult::Done();
12725}
12726
12727std::optional<VirtualObject*>
12729 compiler::JSObjectRef boilerplate, AllocationType allocation, int max_depth,
12730 int* max_properties) {
12731 DCHECK_GE(max_depth, 0);
12732 DCHECK_GE(*max_properties, 0);
12733
12734 if (max_depth == 0) return {};
12735
12736 // Prevent concurrent migrations of boilerplate objects.
12738 boilerplate_access_guard(broker());
12739
12740 // Now that we hold the migration lock, get the current map.
12741 compiler::MapRef boilerplate_map = boilerplate.map(broker());
12742 // Protect against concurrent changes to the boilerplate object by checking
12743 // for an identical value at the end of the compilation.
12745 boilerplate, HeapObject::kMapOffset, boilerplate_map);
12746 {
12747 compiler::OptionalMapRef current_boilerplate_map =
12748 boilerplate.map_direct_read(broker());
12749 if (!current_boilerplate_map.has_value() ||
12750 !current_boilerplate_map->equals(boilerplate_map)) {
12751 // TODO(leszeks): Emit an eager deopt for this case, so that we can
12752 // re-learn the boilerplate. This will be easier once we get rid of the
12753 // two-pass approach, since we'll be able to create the eager deopt here
12754 // and return a ReduceResult::DoneWithAbort().
12755 return {};
12756 }
12757 }
12758
12759 // Bail out if the boilerplate map has been deprecated. The map could of
12760 // course be deprecated at some point after the line below, but it's not a
12761 // correctness issue -- it only means the literal won't be created with the
12762 // most up to date map(s).
12763 if (boilerplate_map.is_deprecated()) return {};
12764
12765 // We currently only support in-object properties.
12766 if (boilerplate.map(broker()).elements_kind() == DICTIONARY_ELEMENTS ||
12767 boilerplate.map(broker()).is_dictionary_map() ||
12768 !boilerplate.raw_properties_or_hash(broker()).has_value()) {
12769 return {};
12770 }
12771 {
12772 compiler::ObjectRef properties =
12773 *boilerplate.raw_properties_or_hash(broker());
12774 bool const empty =
12775 properties.IsSmi() ||
12776 properties.equals(MakeRef(
12777 broker(), local_isolate()->factory()->empty_fixed_array())) ||
12778 properties.equals(MakeRef(
12779 broker(),
12780 Cast<Object>(local_isolate()->factory()->empty_property_array())));
12781 if (!empty) return {};
12782 }
12783
12784 compiler::OptionalFixedArrayBaseRef maybe_elements =
12785 boilerplate.elements(broker(), kRelaxedLoad);
12786 if (!maybe_elements.has_value()) return {};
12787 compiler::FixedArrayBaseRef boilerplate_elements = maybe_elements.value();
12789 boilerplate, JSObject::kElementsOffset, boilerplate_elements);
12790 const uint32_t elements_length = boilerplate_elements.length();
12791
12792 VirtualObject* fast_literal;
12793 if (boilerplate_map.IsJSArrayMap()) {
12794 MaybeReduceResult fast_array = CreateJSArray(
12795 boilerplate_map, boilerplate_map.instance_size(),
12796 GetConstant(boilerplate.AsJSArray().GetBoilerplateLength(broker())));
12797 CHECK(fast_array.HasValue());
12798 fast_literal = fast_array.value()->Cast<VirtualObject>();
12799 } else {
12800 fast_literal = CreateJSObject(boilerplate_map);
12801 }
12802
12803 int inobject_properties = boilerplate_map.GetInObjectProperties();
12804
12805 // Compute the in-object properties to store first.
12806 int index = 0;
12807 for (InternalIndex i :
12808 InternalIndex::Range(boilerplate_map.NumberOfOwnDescriptors())) {
12809 PropertyDetails const property_details =
12810 boilerplate_map.GetPropertyDetails(broker(), i);
12811 if (property_details.location() != PropertyLocation::kField) continue;
12812 DCHECK_EQ(PropertyKind::kData, property_details.kind());
12813 if ((*max_properties)-- == 0) return {};
12814
12815 int offset = boilerplate_map.GetInObjectPropertyOffset(index);
12816#ifdef DEBUG
12817 FieldIndex field_index =
12818 FieldIndex::ForDetails(*boilerplate_map.object(), property_details);
12819 DCHECK(field_index.is_inobject());
12820 DCHECK_EQ(index, field_index.property_index());
12821 DCHECK_EQ(field_index.offset(), offset);
12822#endif
12823
12824 // The index is derived from the in-sandbox `NumberOfOwnDescriptors` value,
12825 // but the access is out-of-sandbox fast_literal fields.
12826 SBXCHECK_LT(index, inobject_properties);
12827
12828 // Note: the use of RawInobjectPropertyAt (vs. the higher-level
12829 // GetOwnFastConstantDataProperty) here is necessary, since the underlying
12830 // value may be `uninitialized`, which the latter explicitly does not
12831 // support.
12832 compiler::OptionalObjectRef maybe_boilerplate_value =
12833 boilerplate.RawInobjectPropertyAt(
12834 broker(),
12836 if (!maybe_boilerplate_value.has_value()) return {};
12837
12838 // Note: We don't need to take a compilation dependency verifying the value
12839 // of `boilerplate_value`, since boilerplate properties are constant after
12840 // initialization modulo map migration. We protect against concurrent map
12841 // migrations (other than elements kind transition, which don't affect us)
12842 // via the boilerplate_migration_access lock.
12843 compiler::ObjectRef boilerplate_value = maybe_boilerplate_value.value();
12844
12845 if (boilerplate_value.IsJSObject()) {
12846 compiler::JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
12847 std::optional<VirtualObject*> maybe_object_value =
12848 TryReadBoilerplateForFastLiteral(boilerplate_object, allocation,
12849 max_depth - 1, max_properties);
12850 if (!maybe_object_value.has_value()) return {};
12851 fast_literal->set(offset, maybe_object_value.value());
12852 } else if (property_details.representation().IsDouble()) {
12853 fast_literal->set(offset,
12854 CreateHeapNumber(Float64::FromBits(
12855 boilerplate_value.AsHeapNumber().value_as_bits())));
12856 } else {
12857 // It's fine to store the 'uninitialized' Oddball into a Smi field since
12858 // it will get overwritten anyway.
12859 DCHECK_IMPLIES(property_details.representation().IsSmi() &&
12860 !boilerplate_value.IsSmi(),
12861 IsUninitialized(*boilerplate_value.object()));
12862 fast_literal->set(offset, GetConstant(boilerplate_value));
12863 }
12864 index++;
12865 }
12866
12867 // Fill slack at the end of the boilerplate object with filler maps.
12868 for (; index < inobject_properties; ++index) {
12870 // TODO(wenyuzhao): Fix incorrect MachineType when V8_MAP_PACKING is
12871 // enabled.
12872 int offset = boilerplate_map.GetInObjectPropertyOffset(index);
12873 fast_literal->set(offset, GetRootConstant(RootIndex::kOnePointerFillerMap));
12874 }
12875
12876 DCHECK_EQ(JSObject::kElementsOffset, JSArray::kElementsOffset);
12877 // Empty or copy-on-write elements just store a constant.
12878 compiler::MapRef elements_map = boilerplate_elements.map(broker());
12879 // Protect against concurrent changes to the boilerplate object by checking
12880 // for an identical value at the end of the compilation.
12882 boilerplate_elements, HeapObject::kMapOffset, elements_map);
12883 if (boilerplate_elements.length() == 0 ||
12884 elements_map.IsFixedCowArrayMap(broker())) {
12885 if (allocation == AllocationType::kOld &&
12886 !boilerplate.IsElementsTenured(boilerplate_elements)) {
12887 return {};
12888 }
12889 fast_literal->set(JSObject::kElementsOffset,
12890 GetConstant(boilerplate_elements));
12891 } else {
12892 // Compute the elements to store first (might have effects).
12893 if (boilerplate_elements.IsFixedDoubleArray()) {
12894 int const size = FixedDoubleArray::SizeFor(elements_length);
12895 if (size > kMaxRegularHeapObjectSize) return {};
12896 fast_literal->set(
12897 JSObject::kElementsOffset,
12898 CreateDoubleFixedArray(elements_length,
12899 boilerplate_elements.AsFixedDoubleArray()));
12900 } else {
12901 int const size = FixedArray::SizeFor(elements_length);
12902 if (size > kMaxRegularHeapObjectSize) return {};
12903 VirtualObject* elements =
12904 CreateFixedArray(broker()->fixed_array_map(), elements_length);
12905 compiler::FixedArrayRef boilerplate_elements_as_fixed_array =
12906 boilerplate_elements.AsFixedArray();
12907 for (uint32_t i = 0; i < elements_length; ++i) {
12908 if ((*max_properties)-- == 0) return {};
12909 compiler::OptionalObjectRef element_value =
12910 boilerplate_elements_as_fixed_array.TryGet(broker(), i);
12911 if (!element_value.has_value()) return {};
12912 if (element_value->IsJSObject()) {
12913 std::optional<VirtualObject*> object =
12914 TryReadBoilerplateForFastLiteral(element_value->AsJSObject(),
12915 allocation, max_depth - 1,
12916 max_properties);
12917 if (!object.has_value()) return {};
12918 elements->set(FixedArray::OffsetOfElementAt(i), *object);
12919 } else {
12921 GetConstant(*element_value));
12922 }
12923 }
12924
12925 fast_literal->set(JSObject::kElementsOffset, elements);
12926 }
12927 }
12928
12929 return fast_literal;
12930}
12931
12933 CHECK_EQ(old->type(), VirtualObject::kDefault);
12934 VirtualObject* vobject = old->Clone(NewObjectId(), zone());
12936 old->allocation()->UpdateObject(vobject);
12937 return vobject;
12938}
12939
12941 compiler::MapRef map, uint32_t slot_count_including_map) {
12942 // VirtualObjects are not added to the Maglev graph.
12943 DCHECK_GT(slot_count_including_map, 0);
12944 uint32_t slot_count = slot_count_including_map - 1;
12945 ValueNode** slots = zone()->AllocateArray<ValueNode*>(slot_count);
12947 zone(), 0, map, NewObjectId(), slot_count, slots);
12948 std::fill_n(slots, slot_count,
12949 GetRootConstant(RootIndex::kOnePointerFillerMap));
12950 return vobject;
12951}
12952
12954 // VirtualObjects are not added to the Maglev graph.
12956 zone(), 0, broker()->heap_number_map(), NewObjectId(), value);
12957 return vobject;
12958}
12959
12961 uint32_t elements_length, compiler::FixedDoubleArrayRef elements) {
12962 // VirtualObjects are not added to the Maglev graph.
12964 zone(), 0, broker()->fixed_double_array_map(), NewObjectId(),
12965 elements_length, elements);
12966 return vobject;
12967}
12968
12977
12979 DCHECK(!map.is_dictionary_map());
12980 DCHECK(!map.IsInobjectSlackTrackingInProgress());
12981 int slot_count = map.instance_size() / kTaggedSize;
12982 SBXCHECK_GE(slot_count, 3);
12983 VirtualObject* object = CreateVirtualObject(map, slot_count);
12984 object->set(JSObject::kPropertiesOrHashOffset,
12985 GetRootConstant(RootIndex::kEmptyFixedArray));
12986 object->set(JSObject::kElementsOffset,
12987 GetRootConstant(RootIndex::kEmptyFixedArray));
12988 object->ClearSlots(JSObject::kElementsOffset,
12989 GetRootConstant(RootIndex::kOnePointerFillerMap));
12990 return object;
12991}
12992
12994 int instance_size,
12995 ValueNode* length) {
12996 int slot_count = instance_size / kTaggedSize;
12997 SBXCHECK_GE(slot_count, 4);
12998 VirtualObject* object = CreateVirtualObject(map, slot_count);
12999 object->set(JSArray::kPropertiesOrHashOffset,
13000 GetRootConstant(RootIndex::kEmptyFixedArray));
13001 // Either the value is a Smi already, or we force a conversion to Smi and
13002 // cache the value in its alternative representation node.
13004 object->set(JSArray::kElementsOffset,
13005 GetRootConstant(RootIndex::kEmptyFixedArray));
13006 object->set(JSArray::kLengthOffset, length);
13007 object->ClearSlots(JSArray::kLengthOffset,
13008 GetRootConstant(RootIndex::kOnePointerFillerMap));
13009 return object;
13010}
13011
13013 compiler::MapRef map, ValueNode* iterated_object, IterationKind kind) {
13014 int slot_count = map.instance_size() / kTaggedSize;
13015 SBXCHECK_EQ(slot_count, 6);
13016 VirtualObject* object = CreateVirtualObject(map, slot_count);
13017 object->set(JSArrayIterator::kPropertiesOrHashOffset,
13018 GetRootConstant(RootIndex::kEmptyFixedArray));
13019 object->set(JSArrayIterator::kElementsOffset,
13020 GetRootConstant(RootIndex::kEmptyFixedArray));
13021 object->set(JSArrayIterator::kIteratedObjectOffset, iterated_object);
13022 object->set(JSArrayIterator::kNextIndexOffset, GetInt32Constant(0));
13023 object->set(JSArrayIterator::kKindOffset,
13024 GetInt32Constant(static_cast<int>(kind)));
13025 return object;
13026}
13027
13029 compiler::JSFunctionRef constructor) {
13032 constructor);
13033 int slot_count = prediction.instance_size() / kTaggedSize;
13034 VirtualObject* object =
13035 CreateVirtualObject(constructor.initial_map(broker()), slot_count);
13036 SBXCHECK_GE(slot_count, 3);
13037 object->set(JSObject::kPropertiesOrHashOffset,
13038 GetRootConstant(RootIndex::kEmptyFixedArray));
13039 object->set(JSObject::kElementsOffset,
13040 GetRootConstant(RootIndex::kEmptyFixedArray));
13041 object->ClearSlots(JSObject::kElementsOffset,
13042 GetRootConstant(RootIndex::kOnePointerFillerMap));
13043 return object;
13044}
13045
13047 int length) {
13048 int slot_count = FixedArray::SizeFor(length) / kTaggedSize;
13049 VirtualObject* array = CreateVirtualObject(map, slot_count);
13050 array->set(offsetof(FixedArray, length_), GetInt32Constant(length));
13051 array->ClearSlots(offsetof(FixedArray, length_),
13052 GetRootConstant(RootIndex::kOnePointerFillerMap));
13053 return array;
13054}
13055
13057 compiler::MapRef map, int length, compiler::ScopeInfoRef scope_info,
13058 ValueNode* previous_context, std::optional<ValueNode*> extension) {
13059 int slot_count = FixedArray::SizeFor(length) / kTaggedSize;
13060 VirtualObject* context = CreateVirtualObject(map, slot_count);
13061 context->set(Context::kLengthOffset, GetInt32Constant(length));
13063 GetConstant(scope_info));
13065 previous_context);
13066 int index = Context::PREVIOUS_INDEX + 1;
13067 if (extension.has_value()) {
13069 extension.value());
13070 index++;
13071 }
13072 for (; index < length; index++) {
13073 context->set(Context::OffsetOfElementAt(index),
13074 GetRootConstant(RootIndex::kUndefinedValue));
13075 }
13076 return context;
13077}
13078
13080 compiler::MapRef map, ValueNode* length, ValueNode* elements,
13081 std::optional<ValueNode*> callee) {
13082 DCHECK_EQ(JSSloppyArgumentsObject::kLengthOffset, JSArray::kLengthOffset);
13083 DCHECK_EQ(JSStrictArgumentsObject::kLengthOffset, JSArray::kLengthOffset);
13084 int slot_count = map.instance_size() / kTaggedSize;
13085 SBXCHECK_EQ(slot_count, callee.has_value() ? 5 : 4);
13086 VirtualObject* arguments = CreateVirtualObject(map, slot_count);
13087 arguments->set(JSArray::kPropertiesOrHashOffset,
13088 GetRootConstant(RootIndex::kEmptyFixedArray));
13089 arguments->set(JSArray::kElementsOffset, elements);
13090 CHECK(length->Is<Int32Constant>() || length->Is<ArgumentsLength>() ||
13091 length->Is<RestLength>());
13092 arguments->set(JSArray::kLengthOffset, length);
13093 if (callee.has_value()) {
13094 arguments->set(JSSloppyArgumentsObject::kCalleeOffset, callee.value());
13095 }
13096 DCHECK(arguments->map().IsJSArgumentsObjectMap() ||
13097 arguments->map().IsJSArrayMap());
13098 return arguments;
13099}
13100
13102 compiler::MapRef map, int mapped_count, ValueNode* context,
13103 ValueNode* unmapped_elements) {
13104 int slot_count = SloppyArgumentsElements::SizeFor(mapped_count) / kTaggedSize;
13105 VirtualObject* elements = CreateVirtualObject(map, slot_count);
13106 elements->set(offsetof(SloppyArgumentsElements, length_),
13107 GetInt32Constant(mapped_count));
13108 elements->set(offsetof(SloppyArgumentsElements, context_), context);
13109 elements->set(offsetof(SloppyArgumentsElements, arguments_),
13110 unmapped_elements);
13111 return elements;
13112}
13113
13117 int slot_count = JSRegExp::Size() / kTaggedSize;
13118 VirtualObject* regexp = CreateVirtualObject(map, slot_count);
13119 regexp->set(JSRegExp::kPropertiesOrHashOffset,
13120 GetRootConstant(RootIndex::kEmptyFixedArray));
13121 regexp->set(JSRegExp::kElementsOffset,
13122 GetRootConstant(RootIndex::kEmptyFixedArray));
13123 regexp->set(JSRegExp::kDataOffset,
13125 kRegExpDataIndirectPointerTag));
13126 regexp->set(JSRegExp::kSourceOffset, GetConstant(literal.source(broker())));
13127 regexp->set(JSRegExp::kFlagsOffset, GetInt32Constant(literal.flags()));
13128 regexp->set(JSRegExp::kLastIndexOffset,
13130 return regexp;
13131}
13132
13134 compiler::MapRef map, int instance_size, ValueNode* context,
13135 ValueNode* closure, ValueNode* receiver, ValueNode* register_file) {
13136 int slot_count = instance_size / kTaggedSize;
13137 InstanceType instance_type = map.instance_type();
13138 DCHECK(instance_type == JS_GENERATOR_OBJECT_TYPE ||
13139 instance_type == JS_ASYNC_GENERATOR_OBJECT_TYPE);
13140 SBXCHECK_GE(slot_count, instance_type == JS_GENERATOR_OBJECT_TYPE ? 10 : 12);
13141 VirtualObject* object = CreateVirtualObject(map, slot_count);
13142 object->set(JSGeneratorObject::kPropertiesOrHashOffset,
13143 GetRootConstant(RootIndex::kEmptyFixedArray));
13144 object->set(JSGeneratorObject::kElementsOffset,
13145 GetRootConstant(RootIndex::kEmptyFixedArray));
13146 object->set(JSGeneratorObject::kContextOffset, context);
13147 object->set(JSGeneratorObject::kFunctionOffset, closure);
13148 object->set(JSGeneratorObject::kReceiverOffset, receiver);
13149 object->set(JSGeneratorObject::kInputOrDebugPosOffset,
13150 GetRootConstant(RootIndex::kUndefinedValue));
13151 object->set(JSGeneratorObject::kResumeModeOffset,
13153 object->set(JSGeneratorObject::kContinuationOffset,
13155 object->set(JSGeneratorObject::kParametersAndRegistersOffset, register_file);
13156 if (instance_type == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
13157 object->set(JSAsyncGeneratorObject::kQueueOffset,
13158 GetRootConstant(RootIndex::kUndefinedValue));
13159 object->set(JSAsyncGeneratorObject::kIsAwaitingOffset, GetInt32Constant(0));
13160 }
13161 return object;
13162}
13163
13165 ValueNode* value,
13166 ValueNode* done) {
13167 static_assert(JSIteratorResult::kSize == 5 * kTaggedSize);
13168 int slot_count = JSIteratorResult::kSize / kTaggedSize;
13169 VirtualObject* iter_result = CreateVirtualObject(map, slot_count);
13170 iter_result->set(JSIteratorResult::kPropertiesOrHashOffset,
13171 GetRootConstant(RootIndex::kEmptyFixedArray));
13172 iter_result->set(JSIteratorResult::kElementsOffset,
13173 GetRootConstant(RootIndex::kEmptyFixedArray));
13174 iter_result->set(JSIteratorResult::kValueOffset, value);
13175 iter_result->set(JSIteratorResult::kDoneOffset, done);
13176 return iter_result;
13177}
13178
13180 ValueNode* string) {
13181 static_assert(JSStringIterator::kHeaderSize == 5 * kTaggedSize);
13182 int slot_count = JSStringIterator::kHeaderSize / kTaggedSize;
13183 VirtualObject* string_iter = CreateVirtualObject(map, slot_count);
13184 string_iter->set(JSStringIterator::kPropertiesOrHashOffset,
13185 GetRootConstant(RootIndex::kEmptyFixedArray));
13186 string_iter->set(JSStringIterator::kElementsOffset,
13187 GetRootConstant(RootIndex::kEmptyFixedArray));
13188 string_iter->set(JSStringIterator::kStringOffset, string);
13189 string_iter->set(JSStringIterator::kIndexOffset, GetInt32Constant(0));
13190 return string_iter;
13191}
13192
13194 AllocationType allocation_type, VirtualObject* vobject) {
13196 if (!current_allocation_block_ || v8_flags.maglev_allocation_folding == 0 ||
13197 current_allocation_block_->allocation_type() != allocation_type ||
13198 !v8_flags.inline_new || is_turbolev()) {
13200 AddNewNode<AllocationBlock>({}, allocation_type);
13201 }
13202
13203 int current_size = current_allocation_block_->size();
13204 if (current_size + vobject->size() > kMaxRegularHeapObjectSize) {
13206 AddNewNode<AllocationBlock>({}, allocation_type);
13207 }
13208
13209 DCHECK_GE(current_size, 0);
13210 InlinedAllocation* allocation =
13212 graph()->allocations_escape_map().emplace(allocation, zone());
13213 current_allocation_block_->Add(allocation);
13214 vobject->set_allocation(allocation);
13215 return allocation;
13216}
13217
13221
13223 int use_count) {
13224 if (!v8_flags.maglev_escape_analysis) return;
13225 allocation->AddNonEscapingUses(use_count);
13226}
13227
13229 vobject->ForEachInput([&](ValueNode* value) {
13230 if (InlinedAllocation* nested_allocation =
13231 value->TryCast<InlinedAllocation>()) {
13232 VirtualObject* nested_object =
13233 current_interpreter_frame_.virtual_objects().FindAllocatedWith(
13234 nested_allocation);
13235 CHECK_NOT_NULL(nested_object);
13236 AddDeoptUse(nested_object);
13237 } else if (!IsConstantNode(value->opcode()) &&
13238 value->opcode() != Opcode::kArgumentsElements &&
13239 value->opcode() != Opcode::kArgumentsLength &&
13240 value->opcode() != Opcode::kRestLength) {
13241 AddDeoptUse(value);
13242 }
13243 });
13244}
13245
13247 VirtualObject* vobject, AllocationType allocation_type) {
13248 InlinedAllocation* allocation =
13249 ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13250 DCHECK_EQ(vobject->size(), sizeof(ConsString));
13251 DCHECK_EQ(vobject->cons_string().length->value_representation(),
13253 AddNonEscapingUses(allocation, 5);
13254 BuildInitializeStore(allocation, vobject->cons_string().map,
13258 static_cast<int>(offsetof(ConsString, raw_hash_field_)));
13259 AddNewNode<StoreInt32>({allocation, vobject->cons_string().length},
13260 static_cast<int>(offsetof(ConsString, length_)));
13261 BuildInitializeStore(allocation, vobject->cons_string().first(),
13262 offsetof(ConsString, first_));
13263 BuildInitializeStore(allocation, vobject->cons_string().second(),
13264 offsetof(ConsString, second_));
13266 loop_effects_->allocations.insert(allocation);
13267 }
13268 return allocation;
13269}
13270
13272 VirtualObject* vobject, AllocationType allocation_type) {
13273 DCHECK(vobject->map().IsHeapNumberMap());
13274 InlinedAllocation* allocation =
13275 ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13276 AddNonEscapingUses(allocation, 2);
13277 BuildStoreMap(allocation, broker()->heap_number_map(),
13279 AddNewNode<StoreFloat64>({allocation, GetFloat64Constant(vobject->number())},
13280 static_cast<int>(offsetof(HeapNumber, value_)));
13281 return allocation;
13282}
13283
13286 VirtualObject* vobject, AllocationType allocation_type) {
13287 DCHECK(vobject->map().IsFixedDoubleArrayMap());
13288 InlinedAllocation* allocation =
13289 ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13290 int length = vobject->double_elements_length();
13291 AddNonEscapingUses(allocation, length + 2);
13292 BuildStoreMap(allocation, broker()->fixed_double_array_map(),
13295 {allocation, GetSmiConstant(length)},
13296 static_cast<int>(offsetof(FixedDoubleArray, length_)),
13298 for (int i = 0; i < length; ++i) {
13300 {allocation,
13304 }
13305 return allocation;
13306}
13307
13309 VirtualObject* vobject, AllocationType allocation_type) {
13311 InlinedAllocation* allocation;
13312 switch (vobject->type()) {
13314 allocation =
13315 BuildInlinedAllocationForHeapNumber(vobject, allocation_type);
13316 break;
13318 allocation =
13319 BuildInlinedAllocationForDoubleFixedArray(vobject, allocation_type);
13320 break;
13322 allocation =
13323 BuildInlinedAllocationForConsString(vobject, allocation_type);
13324 break;
13327 vobject->ForEachInput([&](ValueNode*& node) {
13328 ValueNode* value_to_push;
13329 if (node->Is<VirtualObject>()) {
13330 VirtualObject* nested = node->Cast<VirtualObject>();
13331 node = BuildInlinedAllocation(nested, allocation_type);
13332 value_to_push = node;
13333 } else if (node->Is<Float64Constant>()) {
13335 CreateHeapNumber(node->Cast<Float64Constant>()->value()),
13336 allocation_type);
13337 } else {
13338 value_to_push = GetTaggedValue(node);
13339 }
13340 values.push_back(value_to_push);
13341 });
13342 allocation =
13343 ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13344 AddNonEscapingUses(allocation, static_cast<int>(values.size()));
13345 if (vobject->has_static_map()) {
13346 AddNonEscapingUses(allocation, 1);
13347 BuildStoreMap(allocation, vobject->map(),
13349 }
13350 for (uint32_t i = 0; i < values.size(); i++) {
13351 BuildInitializeStore(allocation, values[i], (i + 1) * kTaggedSize);
13352 }
13354 loop_effects_->allocations.insert(allocation);
13355 }
13356 break;
13357 }
13358 }
13359 if (v8_flags.maglev_allocation_folding < 2) {
13361 }
13362 return allocation;
13363}
13364
13366 int length) {
13367 DCHECK(is_inline());
13368 if (length == 0) {
13369 return GetRootConstant(RootIndex::kEmptyFixedArray);
13370 }
13371 VirtualObject* elements =
13372 CreateFixedArray(broker()->fixed_array_map(), length);
13373 for (int i = 0; i < length; i++) {
13375 caller_details_->arguments[i + start_index + 1]);
13376 }
13377 return elements;
13378}
13379
13381 int mapped_count) {
13382 int length = argument_count_without_receiver();
13383 if (length == 0) {
13384 return GetRootConstant(RootIndex::kEmptyFixedArray);
13385 }
13386 VirtualObject* unmapped_elements =
13387 CreateFixedArray(broker()->fixed_array_map(), length);
13388 int i = 0;
13389 for (; i < mapped_count; i++) {
13390 unmapped_elements->set(FixedArray::OffsetOfElementAt(i),
13391 GetRootConstant(RootIndex::kTheHoleValue));
13392 }
13393 for (; i < length; i++) {
13394 unmapped_elements->set(FixedArray::OffsetOfElementAt(i),
13396 }
13397 return unmapped_elements;
13398}
13399
13400template <CreateArgumentsType type>
13402 switch (type) {
13405 // If there is no aliasing, the arguments object elements are not
13406 // special in any way, we can just return an unmapped backing store.
13407 if (is_inline()) {
13408 int length = argument_count_without_receiver();
13409 ValueNode* elements = BuildInlinedArgumentsElements(0, length);
13410 return CreateArgumentsObject(
13411 broker()->target_native_context().sloppy_arguments_map(broker()),
13412 GetInt32Constant(length), elements, GetClosure());
13413 } else {
13415 EnsureType(length, NodeType::kSmi);
13419 return CreateArgumentsObject(
13420 broker()->target_native_context().sloppy_arguments_map(broker()),
13421 length, elements, GetClosure());
13422 }
13423 } else {
13424 // If the parameter count is zero, we should have used the unmapped
13425 // backing store.
13426 int param_count = parameter_count_without_receiver();
13427 DCHECK_GT(param_count, 0);
13429 int param_idx_in_ctxt = compilation_unit_->shared_function_info()
13431 param_count - 1;
13432 // The {unmapped_elements} correspond to the extra arguments
13433 // (overapplication) that do not need be "mapped" to the actual
13434 // arguments. Mapped arguments are accessed via the context, whereas
13435 // unmapped arguments are simply accessed via this fixed array. See
13436 // SloppyArgumentsElements in src/object/arguments.h.
13437 if (is_inline()) {
13438 int length = argument_count_without_receiver();
13439 int mapped_count = std::min(param_count, length);
13440 ValueNode* unmapped_elements =
13443 broker()->sloppy_arguments_elements_map(), mapped_count,
13444 GetContext(), unmapped_elements);
13445 for (int i = 0; i < mapped_count; i++, param_idx_in_ctxt--) {
13447 GetInt32Constant(param_idx_in_ctxt));
13448 }
13449 return CreateArgumentsObject(
13450 broker()->target_native_context().fast_aliased_arguments_map(
13451 broker()),
13452 GetInt32Constant(length), elements, GetClosure());
13453 } else {
13455 EnsureType(length, NodeType::kSmi);
13457 {length}, CreateArgumentsType::kMappedArguments, param_count);
13459 broker()->sloppy_arguments_elements_map(), param_count,
13460 GetContext(), unmapped_elements);
13461 ValueNode* the_hole_value = GetConstant(broker()->the_hole_value());
13462 for (int i = 0; i < param_count; i++, param_idx_in_ctxt--) {
13463 ValueNode* value = Select(
13464 [&](auto& builder) {
13465 return BuildBranchIfInt32Compare(builder,
13466 Operation::kLessThan,
13467 GetInt32Constant(i), length);
13468 },
13469 [&] { return GetSmiConstant(param_idx_in_ctxt); },
13470 [&] { return the_hole_value; });
13471 elements->set(SloppyArgumentsElements::OffsetOfElementAt(i), value);
13472 }
13473 return CreateArgumentsObject(
13474 broker()->target_native_context().fast_aliased_arguments_map(
13475 broker()),
13476 length, elements, GetClosure());
13477 }
13478 }
13480 if (is_inline()) {
13481 int length = argument_count_without_receiver();
13482 ValueNode* elements = BuildInlinedArgumentsElements(0, length);
13483 return CreateArgumentsObject(
13484 broker()->target_native_context().strict_arguments_map(broker()),
13485 GetInt32Constant(length), elements);
13486 } else {
13488 EnsureType(length, NodeType::kSmi);
13492 return CreateArgumentsObject(
13493 broker()->target_native_context().strict_arguments_map(broker()),
13494 length, elements);
13495 }
13497 if (is_inline()) {
13498 int start_index = parameter_count_without_receiver();
13499 int length =
13500 std::max(0, argument_count_without_receiver() - start_index);
13501 ValueNode* elements =
13502 BuildInlinedArgumentsElements(start_index, length);
13503 return CreateArgumentsObject(
13504 broker()->target_native_context().js_array_packed_elements_map(
13505 broker()),
13506 GetInt32Constant(length), elements);
13507 } else {
13509 EnsureType(length, NodeType::kSmi);
13513 RestLength* rest_length =
13515 return CreateArgumentsObject(
13516 broker()->target_native_context().js_array_packed_elements_map(
13517 broker()),
13518 rest_length, elements);
13519 }
13520 }
13521}
13522
13523template <CreateArgumentsType type>
13525 auto arguments = BuildVirtualArgumentsObject<type>();
13526 ValueNode* allocation =
13528 return allocation;
13529}
13530
13532 const compiler::LiteralFeedback& feedback) {
13533 compiler::AllocationSiteRef site = feedback.value();
13534 if (!site.boilerplate(broker()).has_value()) return {};
13535 AllocationType allocation_type =
13537
13538 // First try to extract out the shape and values of the boilerplate, bailing
13539 // out on complex boilerplates.
13540 int max_properties = compiler::kMaxFastLiteralProperties;
13541 std::optional<VirtualObject*> maybe_value = TryReadBoilerplateForFastLiteral(
13542 *site.boilerplate(broker()), allocation_type,
13543 compiler::kMaxFastLiteralDepth, &max_properties);
13544 if (!maybe_value.has_value()) return {};
13545
13546 // Then, use the collected information to actually create nodes in the graph.
13547 // TODO(leszeks): Add support for unwinding graph modifications, so that we
13548 // can get rid of this two pass approach.
13551 BuildInlinedAllocation(*maybe_value, allocation_type);
13552 return result;
13553}
13554
13555ReduceResult MaglevGraphBuilder::VisitCreateObjectLiteral() {
13558 FeedbackSlot slot_index = GetSlotOperand(1);
13559 int bytecode_flags = GetFlag8Operand(2);
13560 int literal_flags =
13562 compiler::FeedbackSource feedback_source(feedback(), slot_index);
13563
13564 compiler::ProcessedFeedback const& processed_feedback =
13565 broker()->GetFeedbackForArrayOrObjectLiteral(feedback_source);
13566 if (processed_feedback.IsInsufficient()) {
13568 DeoptimizeReason::kInsufficientTypeFeedbackForObjectLiteral);
13569 }
13570
13571 MaybeReduceResult result =
13574
13576 bytecode_flags)) {
13577 // TODO(victorgomes): CreateShallowObjectLiteral should not need the
13578 // boilerplate descriptor. However the current builtin checks that the
13579 // feedback exists and fallsback to CreateObjectLiteral if it doesn't.
13581 {}, boilerplate_desc, feedback_source, literal_flags));
13582 } else {
13584 {}, boilerplate_desc, feedback_source, literal_flags));
13585 }
13586
13587 return ReduceResult::Done();
13588}
13589
13590ReduceResult MaglevGraphBuilder::VisitCreateEmptyObjectLiteral() {
13591 compiler::NativeContextRef native_context = broker()->target_native_context();
13592 compiler::MapRef map =
13593 native_context.object_function(broker()).initial_map(broker());
13594 DCHECK(!map.is_dictionary_map());
13595 DCHECK(!map.IsInobjectSlackTrackingInProgress());
13598 return ReduceResult::Done();
13599}
13600
13601ReduceResult MaglevGraphBuilder::VisitCloneObject() {
13602 // CloneObject <source_idx> <flags> <feedback_slot>
13603 ValueNode* source = LoadRegister(0);
13604 ValueNode* flags =
13606 GetFlag8Operand(1)));
13607 FeedbackSlot slot = GetSlotOperand(2);
13608 compiler::FeedbackSource feedback_source{feedback(), slot};
13610 {GetTaggedValue(source), flags}, feedback_source));
13611 return ReduceResult::Done();
13612}
13613
13614ReduceResult MaglevGraphBuilder::VisitGetTemplateObject() {
13615 // GetTemplateObject <descriptor_idx> <literal_idx>
13616 compiler::SharedFunctionInfoRef shared_function_info =
13618 ValueNode* description = GetConstant(GetRefOperand<HeapObject>(0));
13619 FeedbackSlot slot = GetSlotOperand(1);
13620 compiler::FeedbackSource feedback_source{feedback(), slot};
13621
13622 const compiler::ProcessedFeedback& feedback =
13623 broker()->GetFeedbackForTemplateObject(feedback_source);
13624 if (feedback.IsInsufficient()) {
13626 {description}, shared_function_info, feedback_source));
13627 return ReduceResult::Done();
13628 }
13629 compiler::JSArrayRef template_object = feedback.AsTemplateObject().value();
13630 SetAccumulator(GetConstant(template_object));
13631 return ReduceResult::Done();
13632}
13633
13634ReduceResult MaglevGraphBuilder::VisitCreateClosure() {
13635 compiler::SharedFunctionInfoRef shared_function_info =
13637 compiler::FeedbackCellRef feedback_cell =
13639 uint32_t flags = GetFlag8Operand(2);
13640
13643 {GetContext()}, shared_function_info, feedback_cell));
13644 } else {
13645 bool pretenured =
13648 {GetContext()}, shared_function_info, feedback_cell, pretenured));
13649 }
13650 return ReduceResult::Done();
13651}
13652
13654 compiler::MapRef map, compiler::ScopeInfoRef scope, int context_length) {
13655 const int kContextAllocationLimit = 16;
13656 if (context_length > kContextAllocationLimit) return {};
13657 DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
13658 auto context = CreateContext(map, context_length, scope, GetContext());
13660 return result;
13661}
13662
13663ReduceResult MaglevGraphBuilder::VisitCreateBlockContext() {
13664 // CreateBlockContext <scope_info_idx>
13666 compiler::MapRef map =
13667 broker()->target_native_context().block_context_map(broker());
13668
13669 auto done = [&](ValueNode* res) {
13670 graph()->record_scope_info(res, scope_info);
13671 SetAccumulator(res);
13672 };
13673
13675 map, scope_info, scope_info.ContextLength()),
13676 done);
13677 // Fallback.
13678 done(BuildCallRuntime(Runtime::kPushBlockContext, {GetConstant(scope_info)})
13679 .value());
13680 return ReduceResult::Done();
13681}
13682
13683ReduceResult MaglevGraphBuilder::VisitCreateCatchContext() {
13684 // CreateCatchContext <exception> <scope_info_idx>
13685 ValueNode* exception = LoadRegister(0);
13686 compiler::ScopeInfoRef scope_info = GetRefOperand<ScopeInfo>(1);
13687 auto context = CreateContext(
13688 broker()->target_native_context().catch_context_map(broker()),
13689 Context::MIN_CONTEXT_EXTENDED_SLOTS, scope_info, GetContext(), exception);
13691 graph()->record_scope_info(GetAccumulator(), scope_info);
13692 return ReduceResult::Done();
13693}
13694
13695ReduceResult MaglevGraphBuilder::VisitCreateFunctionContext() {
13696 compiler::ScopeInfoRef info = GetRefOperand<ScopeInfo>(0);
13697 uint32_t slot_count = iterator_.GetUnsignedImmediateOperand(1);
13698 compiler::MapRef map =
13699 broker()->target_native_context().function_context_map(broker());
13700
13701 auto done = [&](ValueNode* res) {
13702 graph()->record_scope_info(res, info);
13703 SetAccumulator(res);
13704 };
13705
13708 slot_count + Context::MIN_CONTEXT_SLOTS),
13709 done);
13710 // Fallback.
13713 return ReduceResult::Done();
13714}
13715
13716ReduceResult MaglevGraphBuilder::VisitCreateEvalContext() {
13717 compiler::ScopeInfoRef info = GetRefOperand<ScopeInfo>(0);
13718 uint32_t slot_count = iterator_.GetUnsignedImmediateOperand(1);
13719 compiler::MapRef map =
13720 broker()->target_native_context().eval_context_map(broker());
13721
13722 auto done = [&](ValueNode* res) {
13723 graph()->record_scope_info(res, info);
13724 SetAccumulator(res);
13725 };
13726
13729 slot_count + Context::MIN_CONTEXT_SLOTS),
13730 done);
13731 if (slot_count <= static_cast<uint32_t>(
13735 } else {
13736 done(BuildCallRuntime(Runtime::kNewFunctionContext, {GetConstant(info)})
13737 .value());
13738 }
13739 return ReduceResult::Done();
13740}
13741
13742ReduceResult MaglevGraphBuilder::VisitCreateWithContext() {
13743 // CreateWithContext <register> <scope_info_idx>
13744 ValueNode* object = LoadRegister(0);
13745 compiler::ScopeInfoRef scope_info = GetRefOperand<ScopeInfo>(1);
13746 auto context = CreateContext(
13747 broker()->target_native_context().with_context_map(broker()),
13748 Context::MIN_CONTEXT_EXTENDED_SLOTS, scope_info, GetContext(), object);
13750 graph()->record_scope_info(GetAccumulator(), scope_info);
13751 return ReduceResult::Done();
13752}
13753
13758
13764
13765ReduceResult MaglevGraphBuilder::VisitCreateMappedArguments() {
13768 if (!shared.object()->has_duplicate_parameters()) {
13773 return ReduceResult::Done();
13774 } else if (!is_inline()) {
13777 return ReduceResult::Done();
13778 }
13779 }
13780 // Generic fallback.
13782 BuildCallRuntime(Runtime::kNewSloppyArguments, {GetClosure()}).value());
13783 return ReduceResult::Done();
13784}
13785
13786ReduceResult MaglevGraphBuilder::VisitCreateUnmappedArguments() {
13790 return ReduceResult::Done();
13791 }
13792 // Generic fallback.
13794 BuildCallRuntime(Runtime::kNewStrictArguments, {GetClosure()}).value());
13795 return ReduceResult::Done();
13796}
13797
13798ReduceResult MaglevGraphBuilder::VisitCreateRestParameter() {
13802 return ReduceResult::Done();
13803 }
13804 // Generic fallback.
13806 BuildCallRuntime(Runtime::kNewRestParameter, {GetClosure()}).value());
13807 return ReduceResult::Done();
13808}
13809
13811 int loop_header = iterator_.current_offset();
13814 peeled_iteration_count_ = v8_flags.maglev_optimistic_peeled_loops ? 2 : 1;
13815 any_peeled_loop_ = true;
13816 allow_loop_peeling_ = false;
13817
13818 if (v8_flags.trace_maglev_graph_building) {
13819 std::cout << " * Begin loop peeling...." << std::endl;
13820 }
13821
13822 while (in_peeled_iteration()) {
13824 }
13825 // Emit the actual (not peeled) loop if needed.
13826 if (loop_header == iterator_.current_offset()) {
13828 }
13829 allow_loop_peeling_ = true;
13830}
13831
13833 ControlNode* control_node, ZoneVector<Node*> rem_nodes_in_call_block) {
13836 rem_nodes_in_call_block.size());
13838 current_block_ = nullptr;
13839 for (Node* n : rem_nodes_in_call_block) {
13840 n->set_owner(result);
13841 result->nodes().push_back(n);
13842 }
13843 control_node->set_owner(result);
13844 CHECK_NULL(result->control_node());
13845 result->set_control_node(control_node);
13846
13847 // Add the final block to the graph.
13848 graph_->Add(result);
13849 return result;
13850}
13851
13853 int loop_header = iterator_.current_offset();
13855
13856 // Since peeled loops do not start with a loop merge state, we need to
13857 // explicitly enter e loop effect tracking scope for the peeled iteration.
13858 bool track_peeled_effects =
13859 v8_flags.maglev_optimistic_peeled_loops && peeled_iteration_count_ == 2;
13860 if (track_peeled_effects) {
13861 BeginLoopEffects(loop_header);
13862 }
13863
13864#ifdef DEBUG
13865 bool was_in_peeled_iteration = in_peeled_iteration();
13866#endif // DEBUG
13867
13868 while (iterator_.current_bytecode() != interpreter::Bytecode::kJumpLoop) {
13872 }
13873
13874 VisitSingleBytecode(); // VisitJumpLoop
13875
13876 DCHECK_EQ(was_in_peeled_iteration, in_peeled_iteration());
13877 if (!in_peeled_iteration()) {
13878 return;
13879 }
13880
13881 // In case the peeled iteration was mergeable (see TryMergeLoop) or the
13882 // JumpLoop was dead, we are done.
13883 if (!current_block_) {
13887 if (track_peeled_effects) {
13888 EndLoopEffects(loop_header);
13889 }
13890 return;
13891 }
13892
13894
13895 // After processing the peeled iteration and reaching the `JumpLoop`, we
13896 // re-process the loop body. For this, we need to reset the graph building
13897 // state roughly as if we didn't process it yet.
13898
13899 // Reset position in exception handler table to before the loop.
13900 HandlerTable table(*bytecode().object());
13901 while (next_handler_table_index_ > 0) {
13903 int start = table.GetRangeStart(next_handler_table_index_);
13904 if (start < loop_header) break;
13905 }
13906
13907 // Re-create catch handler merge states.
13908 for (int offset = loop_header; offset <= iterator_.current_offset();
13909 ++offset) {
13910 if (auto& merge_state = merge_states_[offset]) {
13911 if (merge_state->is_exception_handler()) {
13913 *compilation_unit_, merge_state->frame_state().liveness(), offset,
13914 merge_state->exception_handler_was_used(),
13915 merge_state->catch_block_context_register(), graph_);
13916 } else {
13917 // We only peel innermost loops.
13918 DCHECK(!merge_state->is_loop());
13919 merge_state = nullptr;
13920 }
13921 }
13923 }
13924
13925 // Reset predecessors as if the loop body had not been visited.
13927 DCHECK_GE(offset, loop_header);
13928 if (offset <= iterator_.current_offset()) {
13930 }
13931 }
13933
13935 // After resetting, the actual loop header always has exactly 2
13936 // predecessors: the two copies of `JumpLoop`.
13937 InitializePredecessorCount(loop_header, 2);
13940 GetInLivenessFor(loop_header),
13941 &bytecode_analysis_.GetLoopInfoFor(loop_header),
13942 /* has_been_peeled */ true);
13943
13944 BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[loop_header]);
13945 // If we ever want more peelings, we should ensure that only the last one
13946 // creates a loop header.
13949 v8_flags.maglev_optimistic_peeled_loops);
13950 merge_states_[loop_header]->InitializeLoop(
13953
13954 if (track_peeled_effects) {
13955 EndLoopEffects(loop_header);
13956 }
13957 DCHECK_NE(iterator_.current_offset(), loop_header);
13958 iterator_.SetOffset(loop_header);
13959}
13960
13964
13965 // TODO(olivf) We might want to start collecting known_node_aspects_ here.
13967 iterator_.Advance()) {
13968 switch (iterator_.current_bytecode()) {
13969 case interpreter::Bytecode::kPushContext: {
13971 // Nothing left to analyze...
13972 return;
13973 }
13974 default:
13975 continue;
13976 }
13977 }
13978}
13979
13981 loop_effects_stack_.push_back(zone()->New<LoopEffects>(loop_header, zone()));
13983}
13984
13987 DCHECK_EQ(loop_effects_->loop_header, loop_header);
13988 // TODO(olivf): Update merge states dominated by the loop header with
13989 // information we know to be unaffected by the loop.
13990 if (merge_states_[loop_header] && merge_states_[loop_header]->is_loop()) {
13992 }
13993 if (loop_effects_stack_.size() > 1) {
13994 LoopEffects* inner_effects = loop_effects_;
13995 loop_effects_ = *(loop_effects_stack_.end() - 2);
13996 loop_effects_->Merge(inner_effects);
13997 } else {
13998 loop_effects_ = nullptr;
13999 }
14000 loop_effects_stack_.pop_back();
14001}
14002
14003ReduceResult MaglevGraphBuilder::VisitJumpLoop() {
14004 const uint32_t relative_jump_bytecode_offset =
14006 const int32_t loop_offset = iterator_.GetImmediateOperand(1);
14007 const FeedbackSlot feedback_slot = iterator_.GetSlotOperand(2);
14008 int target = iterator_.GetJumpTargetOffset();
14009
14011 int reduction = relative_jump_bytecode_offset *
14012 v8_flags.osr_from_maglev_interrupt_scale_factor;
14014 reduction > 0 ? reduction : 1);
14015 } else {
14017 }
14018
14021 {GetClosure()}, loop_offset, feedback_slot,
14022 BytecodeOffset(iterator_.current_offset()), compilation_unit_);
14023 }
14024
14025 bool is_peeled_loop = loop_headers_to_peel_.Contains(target);
14026 auto FinishLoopBlock = [&]() {
14028 };
14029 if (is_peeled_loop && in_peeled_iteration()) {
14032 // Let's see if we can finish this loop without peeling it.
14033 if (!merge_states_[target]->TryMergeLoop(this, current_interpreter_frame_,
14034 FinishLoopBlock)) {
14036 }
14038 EndLoopEffects(target);
14039 }
14040 }
14041 } else {
14042 BasicBlock* block = FinishLoopBlock();
14044 block->set_predecessor_id(merge_states_[target]->predecessor_count() - 1);
14045 if (is_peeled_loop) {
14047 }
14049 EndLoopEffects(target);
14050 }
14051 }
14052 return ReduceResult::Done();
14053}
14054ReduceResult MaglevGraphBuilder::VisitJump() {
14055 BasicBlock* block =
14058 DCHECK_EQ(current_block_, nullptr);
14060 return ReduceResult::Done();
14061}
14062ReduceResult MaglevGraphBuilder::VisitJumpConstant() { return VisitJump(); }
14063ReduceResult MaglevGraphBuilder::VisitJumpIfNullConstant() {
14064 return VisitJumpIfNull();
14065}
14066ReduceResult MaglevGraphBuilder::VisitJumpIfNotNullConstant() {
14067 return VisitJumpIfNotNull();
14068}
14069ReduceResult MaglevGraphBuilder::VisitJumpIfUndefinedConstant() {
14070 return VisitJumpIfUndefined();
14071}
14072ReduceResult MaglevGraphBuilder::VisitJumpIfNotUndefinedConstant() {
14073 return VisitJumpIfNotUndefined();
14074}
14075ReduceResult MaglevGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
14076 return VisitJumpIfUndefinedOrNull();
14077}
14078ReduceResult MaglevGraphBuilder::VisitJumpIfTrueConstant() {
14079 return VisitJumpIfTrue();
14080}
14081ReduceResult MaglevGraphBuilder::VisitJumpIfFalseConstant() {
14082 return VisitJumpIfFalse();
14083}
14084ReduceResult MaglevGraphBuilder::VisitJumpIfJSReceiverConstant() {
14085 return VisitJumpIfJSReceiver();
14086}
14087ReduceResult MaglevGraphBuilder::VisitJumpIfForInDoneConstant() {
14088 return VisitJumpIfForInDone();
14089}
14090ReduceResult MaglevGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
14091 return VisitJumpIfToBooleanTrue();
14092}
14093ReduceResult MaglevGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
14094 return VisitJumpIfToBooleanFalse();
14095}
14096
14098 int target) {
14099 if (merge_states_[target] == nullptr) {
14100 bool jumping_to_peeled_iteration = bytecode_analysis().IsLoopHeader(target);
14101 DCHECK_EQ(jumping_to_peeled_iteration,
14103 const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(target);
14104 if (jumping_to_peeled_iteration) {
14105 // The peeled iteration is missing the backedge.
14107 }
14108 // If there's no target frame state, allocate a new one.
14111 predecessor_count(target), predecessor, liveness);
14112 } else {
14113 // If there already is a frame state, merge.
14115 }
14116}
14117
14119 // If there already is a frame state, merge.
14120 if (merge_states_[target]) {
14122 predecessor_count(target));
14124 // If this merge is the last one which kills a loop merge, remove that
14125 // merge state.
14126 if (merge_states_[target]->is_unmerged_unreachable_loop()) {
14127 if (v8_flags.trace_maglev_graph_building) {
14128 std::cout << "! Killing loop merge state at @" << target << std::endl;
14129 }
14130 merge_states_[target] = nullptr;
14131 }
14132 }
14133 // If there is no merge state yet, don't create one, but just reduce the
14134 // number of possible predecessors to zero.
14136}
14137
14139 // Check if the Loop entry is dead already (e.g. an outer loop from OSR).
14140 if (V8_UNLIKELY(!merge_states_[target]) && predecessor_count(target) == 0) {
14141 static_assert(kLoopsMustBeEnteredThroughHeader);
14142 return;
14143 }
14144 // If there already is a frame state, merge.
14145 if (V8_LIKELY(merge_states_[target])) {
14147 predecessor_count(target));
14149 !merge_states_[target]->is_unmerged_unreachable_loop()) {
14150 EndLoopEffects(target);
14151 }
14153 }
14154 // If there is no merge state yet, don't create one, but just reduce the
14155 // number of possible predecessors to zero.
14157}
14158
14160 BasicBlock* predecessor) {
14161 int target = inline_exit_offset();
14162 if (merge_states_[target] == nullptr) {
14163 // All returns should have the same liveness, which is that only the
14164 // accumulator is live.
14166 DCHECK(liveness->AccumulatorIsLive());
14167 DCHECK_EQ(liveness->live_value_count(), 1);
14168
14169 // If there's no target frame state, allocate a new one.
14172 predecessor_count(target), predecessor, liveness);
14173 } else {
14174 // Again, all returns should have the same liveness, so double check this.
14175 DCHECK(GetInLiveness()->Equals(
14176 *merge_states_[target]->frame_state().liveness()));
14178 }
14179}
14180
14183 ValueNode* lhs,
14184 ValueNode* rhs) {
14185 if (RootConstant* root_constant = rhs->TryCast<RootConstant>()) {
14186 return builder.Build<BranchIfRootConstant>({lhs}, root_constant->index());
14187 }
14188 if (RootConstant* root_constant = lhs->TryCast<RootConstant>()) {
14189 return builder.Build<BranchIfRootConstant>({rhs}, root_constant->index());
14190 }
14191 if (InlinedAllocation* alloc_lhs = lhs->TryCast<InlinedAllocation>()) {
14192 if (InlinedAllocation* alloc_rhs = rhs->TryCast<InlinedAllocation>()) {
14193 return builder.FromBool(alloc_lhs == alloc_rhs);
14194 }
14195 }
14196
14197 return builder.Build<BranchIfReferenceEqual>({lhs, rhs});
14198}
14199
14201 int jump_offset = iterator_.GetJumpTargetOffset();
14202 if (is_jump_taken) {
14203 BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[jump_offset]);
14205 MergeIntoFrameState(block, jump_offset);
14206 } else {
14207 MergeDeadIntoFrameState(jump_offset);
14208 }
14209}
14210
14211#ifdef DEBUG
14212namespace {
14213bool IsNumberRootConstant(RootIndex root_index) {
14214 switch (root_index) {
14215#define CASE(type, name, label) case RootIndex::k##label:
14218 return true;
14219 default:
14220 return false;
14221 }
14222#undef CASE
14223}
14224} // namespace
14225#endif
14226
14228 BranchBuilder& builder, ValueNode* node, RootIndex root_index) {
14229 // We assume that Maglev never emits a comparison to a root number.
14230 DCHECK(!IsNumberRootConstant(root_index));
14231
14232 // If the node we're checking is in the accumulator, swap it in the branch
14233 // with the checked value. Cache whether we want to swap, since after we've
14234 // swapped the accumulator isn't the original node anymore.
14235 BranchBuilder::PatchAccumulatorInBranchScope scope(builder, node, root_index);
14236
14237 if (node->properties().value_representation() ==
14239 if (root_index == RootIndex::kUndefinedValue) {
14240 return builder.Build<BranchIfFloat64IsHole>({node});
14241 }
14242 return builder.AlwaysFalse();
14243 }
14244
14245 if (CheckType(node, NodeType::kNumber)) {
14246 return builder.AlwaysFalse();
14247 }
14248 CHECK(node->is_tagged());
14249
14250 if (root_index != RootIndex::kTrueValue &&
14251 root_index != RootIndex::kFalseValue &&
14252 CheckType(node, NodeType::kBoolean)) {
14253 return builder.AlwaysFalse();
14254 }
14255
14256 while (LogicalNot* logical_not = node->TryCast<LogicalNot>()) {
14257 // Bypassing logical not(s) on the input and swapping true/false
14258 // destinations.
14259 node = logical_not->value().node();
14260 builder.SwapTargets();
14261 }
14262
14263 if (RootConstant* constant = node->TryCast<RootConstant>()) {
14264 return builder.FromBool(constant->index() == root_index);
14265 }
14266
14267 if (root_index == RootIndex::kUndefinedValue) {
14268 if (Constant* constant = node->TryCast<Constant>()) {
14269 return builder.FromBool(constant->object().IsUndefined());
14270 }
14271 }
14272
14273 if (root_index != RootIndex::kTrueValue &&
14274 root_index != RootIndex::kFalseValue) {
14275 return builder.Build<BranchIfRootConstant>({node}, root_index);
14276 }
14277 if (root_index == RootIndex::kFalseValue) {
14278 builder.SwapTargets();
14279 }
14280 switch (node->opcode()) {
14281 case Opcode::kTaggedEqual:
14283 builder, node->Cast<TaggedEqual>()->lhs().node(),
14284 node->Cast<TaggedEqual>()->rhs().node());
14285 case Opcode::kTaggedNotEqual:
14286 // Swapped true and false targets.
14287 builder.SwapTargets();
14289 builder, node->Cast<TaggedNotEqual>()->lhs().node(),
14290 node->Cast<TaggedNotEqual>()->rhs().node());
14291 case Opcode::kInt32Compare:
14292 return builder.Build<BranchIfInt32Compare>(
14293 {node->Cast<Int32Compare>()->left_input().node(),
14294 node->Cast<Int32Compare>()->right_input().node()},
14295 node->Cast<Int32Compare>()->operation());
14296 case Opcode::kFloat64Compare:
14297 return builder.Build<BranchIfFloat64Compare>(
14298 {node->Cast<Float64Compare>()->left_input().node(),
14299 node->Cast<Float64Compare>()->right_input().node()},
14300 node->Cast<Float64Compare>()->operation());
14301 case Opcode::kInt32ToBoolean:
14302 if (node->Cast<Int32ToBoolean>()->flip()) {
14303 builder.SwapTargets();
14304 }
14305 return builder.Build<BranchIfInt32ToBooleanTrue>(
14306 {node->Cast<Int32ToBoolean>()->value().node()});
14307 case Opcode::kIntPtrToBoolean:
14308 if (node->Cast<IntPtrToBoolean>()->flip()) {
14309 builder.SwapTargets();
14310 }
14311 return builder.Build<BranchIfIntPtrToBooleanTrue>(
14312 {node->Cast<IntPtrToBoolean>()->value().node()});
14313 case Opcode::kFloat64ToBoolean:
14314 if (node->Cast<Float64ToBoolean>()->flip()) {
14315 builder.SwapTargets();
14316 }
14317 return builder.Build<BranchIfFloat64ToBooleanTrue>(
14318 {node->Cast<Float64ToBoolean>()->value().node()});
14319 case Opcode::kTestUndetectable:
14320 return builder.Build<BranchIfUndetectable>(
14321 {node->Cast<TestUndetectable>()->value().node()},
14322 node->Cast<TestUndetectable>()->check_type());
14323 case Opcode::kHoleyFloat64IsHole:
14324 return builder.Build<BranchIfFloat64IsHole>(
14325 {node->Cast<HoleyFloat64IsHole>()->input().node()});
14326 default:
14327 return builder.Build<BranchIfRootConstant>({node}, RootIndex::kTrueValue);
14328 }
14329}
14330
14336
14338 BranchBuilder& builder, ValueNode* node) {
14339 return BuildBranchIfRootConstant(builder, node, RootIndex::kNullValue);
14340}
14341
14343 BranchBuilder& builder, ValueNode* node) {
14344 return BuildBranchIfRootConstant(builder, node, RootIndex::kUndefinedValue);
14345}
14346
14349 ValueNode* node) {
14350 compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
14351 if (maybe_constant.has_value()) {
14352 return builder.FromBool(maybe_constant->IsNullOrUndefined());
14353 }
14354 if (!node->is_tagged()) {
14355 if (node->properties().value_representation() ==
14357 return BuildBranchIfFloat64IsHole(builder, node);
14358 }
14359 return builder.AlwaysFalse();
14360 }
14361 if (HasDisjointType(node, NodeType::kOddball)) {
14362 return builder.AlwaysFalse();
14363 }
14364 return builder.Build<BranchIfUndefinedOrNull>({node});
14365}
14366
14368 BranchBuilder& builder, ValueNode* node) {
14369 // If this is a known boolean, use the non-ToBoolean version.
14370 if (CheckType(node, NodeType::kBoolean)) {
14371 return BuildBranchIfTrue(builder, node);
14372 }
14373
14374 // There shouldn't be any LogicalNots here, for swapping true/false, since
14375 // these are known to be boolean and should have gone throught the
14376 // non-ToBoolean path.
14377 DCHECK(!node->Is<LogicalNot>());
14378
14379 bool known_to_boolean_value = false;
14380 bool direction_is_true = true;
14381 if (IsConstantNode(node->opcode())) {
14382 known_to_boolean_value = true;
14383 direction_is_true = FromConstantToBool(local_isolate(), node);
14384 } else {
14385 // TODO(victorgomes): Unify this with TestUndetectable?
14386 // JSReceivers are true iff they are not marked as undetectable. Check if
14387 // all maps have the same detectability, and if yes, the boolean value is
14388 // known.
14389 NodeInfo* node_info = known_node_aspects().TryGetInfoFor(node);
14390 if (node_info && NodeTypeIs(node_info->type(), NodeType::kJSReceiver) &&
14391 node_info->possible_maps_are_known()) {
14392 bool all_detectable = true;
14393 bool all_undetectable = true;
14394 for (compiler::MapRef map : node_info->possible_maps()) {
14395 bool is_undetectable = map.is_undetectable();
14396 all_detectable &= !is_undetectable;
14397 all_undetectable &= is_undetectable;
14398 }
14399 if (all_detectable || all_undetectable) {
14400 known_to_boolean_value = true;
14401 direction_is_true = all_detectable;
14402 }
14403 }
14404 }
14405 if (known_to_boolean_value) {
14406 return builder.FromBool(direction_is_true);
14407 }
14408
14409 switch (node->value_representation()) {
14410 // The ToBoolean of both the_hole and NaN is false, so we can use the
14411 // same operation for HoleyFloat64 and Float64.
14414 return BuildBranchIfFloat64ToBooleanTrue(builder, node);
14415
14417 // Uint32 has the same logic as Int32 when converting ToBoolean, namely
14418 // comparison against zero, so we can cast it and ignore the signedness.
14419 node = AddNewNode<TruncateUint32ToInt32>({node});
14420 [[fallthrough]];
14422 return BuildBranchIfInt32ToBooleanTrue(builder, node);
14423
14425 return BuildBranchIfIntPtrToBooleanTrue(builder, node);
14426
14428 break;
14429 }
14430
14431 NodeInfo* node_info = known_node_aspects().TryGetInfoFor(node);
14432 if (node_info) {
14433 if (ValueNode* as_int32 = node_info->alternative().int32()) {
14434 return BuildBranchIfInt32ToBooleanTrue(builder, as_int32);
14435 }
14436 if (ValueNode* as_float64 = node_info->alternative().float64()) {
14437 return BuildBranchIfFloat64ToBooleanTrue(builder, as_float64);
14438 }
14439 }
14440
14441 NodeType old_type;
14442 if (CheckType(node, NodeType::kBoolean, &old_type)) {
14443 return builder.Build<BranchIfRootConstant>({node}, RootIndex::kTrueValue);
14444 }
14445 if (CheckType(node, NodeType::kSmi)) {
14446 builder.SwapTargets();
14447 return builder.Build<BranchIfReferenceEqual>({node, GetSmiConstant(0)});
14448 }
14449 if (CheckType(node, NodeType::kString)) {
14450 builder.SwapTargets();
14451 return builder.Build<BranchIfRootConstant>({node},
14452 RootIndex::kempty_string);
14453 }
14454 // TODO(verwaest): Number or oddball.
14455 return builder.Build<BranchIfToBooleanTrue>({node}, GetCheckType(old_type));
14456}
14457
14460 ValueNode* node) {
14461 // TODO(victorgomes): Optimize.
14462 return builder.Build<BranchIfInt32ToBooleanTrue>({node});
14463}
14464
14467 ValueNode* node) {
14468 // TODO(victorgomes): Optimize.
14469 return builder.Build<BranchIfIntPtrToBooleanTrue>({node});
14470}
14471
14474 ValueNode* node) {
14475 // TODO(victorgomes): Optimize.
14476 return builder.Build<BranchIfFloat64ToBooleanTrue>({node});
14477}
14478
14480 BranchBuilder& builder, ValueNode* node) {
14481 // TODO(victorgomes): Optimize.
14482 return builder.Build<BranchIfFloat64IsHole>({node});
14483}
14484
14485ReduceResult MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
14486 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfTrue);
14487 BuildBranchIfToBooleanTrue(branch_builder, GetAccumulator());
14488 return ReduceResult::Done();
14489}
14490ReduceResult MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
14491 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfFalse);
14492 BuildBranchIfToBooleanTrue(branch_builder, GetAccumulator());
14493 return ReduceResult::Done();
14494}
14495ReduceResult MaglevGraphBuilder::VisitJumpIfTrue() {
14496 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfTrue);
14497 BuildBranchIfTrue(branch_builder, GetAccumulator());
14498 return ReduceResult::Done();
14499}
14500ReduceResult MaglevGraphBuilder::VisitJumpIfFalse() {
14501 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfFalse);
14502 BuildBranchIfTrue(branch_builder, GetAccumulator());
14503 return ReduceResult::Done();
14504}
14505ReduceResult MaglevGraphBuilder::VisitJumpIfNull() {
14506 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfTrue);
14507 BuildBranchIfNull(branch_builder, GetAccumulator());
14508 return ReduceResult::Done();
14509}
14510ReduceResult MaglevGraphBuilder::VisitJumpIfNotNull() {
14511 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfFalse);
14512 BuildBranchIfNull(branch_builder, GetAccumulator());
14513 return ReduceResult::Done();
14514}
14515ReduceResult MaglevGraphBuilder::VisitJumpIfUndefined() {
14516 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfTrue);
14517 BuildBranchIfUndefined(branch_builder, GetAccumulator());
14518 return ReduceResult::Done();
14519}
14520ReduceResult MaglevGraphBuilder::VisitJumpIfNotUndefined() {
14521 auto branch_builder = CreateBranchBuilder(BranchType::kBranchIfFalse);
14522 BuildBranchIfUndefined(branch_builder, GetAccumulator());
14523 return ReduceResult::Done();
14524}
14525ReduceResult MaglevGraphBuilder::VisitJumpIfUndefinedOrNull() {
14526 auto branch_builder = CreateBranchBuilder();
14528 return ReduceResult::Done();
14529}
14530
14532 BranchBuilder& builder, ValueNode* value) {
14533 if (!value->is_tagged() && value->properties().value_representation() !=
14535 return builder.AlwaysFalse();
14536 }
14537 if (CheckType(value, NodeType::kJSReceiver)) {
14538 return builder.AlwaysTrue();
14539 } else if (HasDisjointType(value, NodeType::kJSReceiver)) {
14540 return builder.AlwaysFalse();
14541 }
14542 return builder.Build<BranchIfJSReceiver>({value});
14543}
14544
14546 BranchBuilder& builder, Operation op, ValueNode* lhs, ValueNode* rhs) {
14547 auto lhs_const = TryGetInt32Constant(lhs);
14548 if (lhs_const) {
14549 auto rhs_const = TryGetInt32Constant(rhs);
14550 if (rhs_const) {
14551 return builder.FromBool(
14552 CompareInt32(lhs_const.value(), rhs_const.value(), op));
14553 }
14554 }
14555 return builder.Build<BranchIfInt32Compare>({lhs, rhs}, op);
14556}
14557
14559 BranchBuilder& builder, Operation op, ValueNode* lhs, ValueNode* rhs) {
14560 auto lhs_const = TryGetUint32Constant(lhs);
14561 if (lhs_const) {
14562 auto rhs_const = TryGetUint32Constant(rhs);
14563 if (rhs_const) {
14564 return builder.FromBool(
14565 CompareUint32(lhs_const.value(), rhs_const.value(), op));
14566 }
14567 }
14568 return builder.Build<BranchIfUint32Compare>({lhs, rhs}, op);
14569}
14570
14571ReduceResult MaglevGraphBuilder::VisitJumpIfJSReceiver() {
14572 auto branch_builder = CreateBranchBuilder();
14573 BuildBranchIfJSReceiver(branch_builder, GetAccumulator());
14574 return ReduceResult::Done();
14575}
14576
14577ReduceResult MaglevGraphBuilder::VisitJumpIfForInDone() {
14578 // JumpIfForInDone <target> <index> <cache_length>
14579 ValueNode* index = LoadRegister(1);
14580 ValueNode* cache_length = LoadRegister(2);
14581 auto branch_builder = CreateBranchBuilder();
14582 BuildBranchIfInt32Compare(branch_builder, Operation::kEqual, index,
14583 cache_length);
14584 return ReduceResult::Done();
14585}
14586
14587ReduceResult MaglevGraphBuilder::VisitSwitchOnSmiNoFeedback() {
14588 // SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
14589 interpreter::JumpTableTargetOffsets offsets =
14591
14592 if (offsets.size() == 0) return ReduceResult::Done();
14593
14594 int case_value_base = (*offsets.begin()).case_value;
14595 BasicBlockRef* targets = zone()->AllocateArray<BasicBlockRef>(offsets.size());
14596 for (interpreter::JumpTableTargetOffset offset : offsets) {
14597 BasicBlockRef* ref = &targets[offset.case_value - case_value_base];
14598 new (ref) BasicBlockRef(&jump_targets_[offset.target_offset]);
14599 }
14600
14601 ValueNode* case_value = GetAccumulator();
14602 BasicBlock* block =
14603 FinishBlock<Switch>({case_value}, case_value_base, targets,
14604 offsets.size(), &jump_targets_[next_offset()]);
14605 for (interpreter::JumpTableTargetOffset offset : offsets) {
14606 MergeIntoFrameState(block, offset.target_offset);
14607 }
14609 return ReduceResult::Done();
14610}
14611
14612ReduceResult MaglevGraphBuilder::VisitForInEnumerate() {
14613 // ForInEnumerate <receiver>
14614 ValueNode* receiver = LoadRegister(0);
14615 // Pass receiver to ForInPrepare.
14619 return ReduceResult::Done();
14620}
14621
14622ReduceResult MaglevGraphBuilder::VisitForInPrepare() {
14623 // ForInPrepare <cache_info_triple>
14624 ValueNode* enumerator = GetAccumulator();
14625 // Catch the receiver value passed from ForInEnumerate.
14627 FeedbackSlot slot = GetSlotOperand(1);
14628 compiler::FeedbackSource feedback_source{feedback(), slot};
14629 // TODO(v8:7700): Use feedback and create fast path.
14630 ValueNode* context = GetContext();
14631 interpreter::Register cache_type_reg = iterator_.GetRegisterOperand(0);
14632 interpreter::Register cache_array_reg{cache_type_reg.index() + 1};
14633 interpreter::Register cache_length_reg{cache_type_reg.index() + 2};
14634
14635 ForInHint hint = broker()->GetFeedbackForForIn(feedback_source);
14636
14637 current_for_in_state = ForInState();
14638 switch (hint) {
14639 case ForInHint::kNone:
14642 // Check that the {enumerator} is a Map.
14643 // The direct IsMap check requires reading of an instance type, so in
14644 // order to avoid additional load we compare the {enumerator} against
14645 // receiver's Map instead (by definition, the {enumerator} is either
14646 // the receiver's Map or a FixedArray).
14647 auto* receiver_map =
14649 AddNewNode<CheckDynamicValue>({receiver_map, enumerator},
14650 DeoptimizeReason::kWrongMapDynamic);
14651
14652 auto* descriptor_array =
14653 BuildLoadTaggedField(enumerator, Map::kInstanceDescriptorsOffset);
14654 auto* enum_cache = BuildLoadTaggedField(
14655 descriptor_array, DescriptorArray::kEnumCacheOffset);
14656 auto* cache_array =
14657 BuildLoadTaggedField(enum_cache, EnumCache::kKeysOffset);
14658
14659 auto* cache_length = AddNewNode<LoadEnumCacheLength>({enumerator});
14660
14662 auto* cache_indices =
14663 BuildLoadTaggedField(enum_cache, EnumCache::kIndicesOffset);
14665 AddNewNode<CheckCacheIndicesNotCleared>({cache_indices, cache_length});
14666 } else {
14668 }
14669
14671 cache_type_reg);
14672 StoreRegister(cache_array_reg, cache_array);
14673 StoreRegister(cache_length_reg, cache_length);
14674 break;
14675 }
14676 case ForInHint::kAny: {
14677 // The result of the bytecode is output in registers |cache_info_triple|
14678 // to |cache_info_triple + 2|, with the registers holding cache_type,
14679 // cache_array, and cache_length respectively.
14680 //
14681 // We set the cache type first (to the accumulator value), and write
14682 // the other two with a ForInPrepare builtin call. This can lazy deopt,
14683 // which will write to cache_array and cache_length, with cache_type
14684 // already set on the translation frame.
14685
14686 // This move needs to happen before ForInPrepare to avoid lazy deopt
14687 // extending the lifetime of the {cache_type} register.
14689 cache_type_reg);
14690 ForInPrepare* result =
14691 AddNewNode<ForInPrepare>({context, enumerator}, feedback_source);
14692 StoreRegisterPair({cache_array_reg, cache_length_reg}, result);
14693 // Force a conversion to Int32 for the cache length value.
14694 EnsureInt32(cache_length_reg);
14695 break;
14696 }
14697 }
14698 return ReduceResult::Done();
14699}
14700
14701ReduceResult MaglevGraphBuilder::VisitForInNext() {
14702 // ForInNext <receiver> <index> <cache_info_pair>
14703 ValueNode* receiver = LoadRegister(0);
14704 interpreter::Register cache_type_reg, cache_array_reg;
14705 std::tie(cache_type_reg, cache_array_reg) =
14707 ValueNode* cache_type = current_interpreter_frame_.get(cache_type_reg);
14708 ValueNode* cache_array = current_interpreter_frame_.get(cache_array_reg);
14709 FeedbackSlot slot = GetSlotOperand(3);
14710 compiler::FeedbackSource feedback_source{feedback(), slot};
14711
14712 ForInHint hint = broker()->GetFeedbackForForIn(feedback_source);
14713
14714 switch (hint) {
14715 case ForInHint::kNone:
14718 ValueNode* index = LoadRegister(1);
14719 // Ensure that the expected map still matches that of the {receiver}.
14720 auto* receiver_map =
14722 AddNewNode<CheckDynamicValue>({receiver_map, cache_type},
14723 DeoptimizeReason::kWrongMapDynamic);
14724 auto* key = BuildLoadFixedArrayElement(cache_array, index);
14725 EnsureType(key, NodeType::kInternalizedString);
14727
14729 if (ToObject* to_object =
14730 current_for_in_state.receiver->TryCast<ToObject>()) {
14731 current_for_in_state.receiver = to_object->value_input().node();
14732 }
14734 current_for_in_state.cache_type = cache_type;
14738 }
14739 // We know that the enum cache entry is not undefined, so skip over the
14740 // next JumpIfUndefined.
14742 interpreter::Bytecode::kJumpIfUndefined ||
14744 interpreter::Bytecode::kJumpIfUndefinedConstant);
14747 break;
14748 }
14749 case ForInHint::kAny: {
14750 ValueNode* index = LoadRegister(1);
14751 ValueNode* context = GetContext();
14753 {context, receiver, cache_array, cache_type, index},
14754 feedback_source));
14755 break;
14756 };
14757 }
14758 return ReduceResult::Done();
14759}
14760
14761ReduceResult MaglevGraphBuilder::VisitForInStep() {
14762 interpreter::Register index_reg = iterator_.GetRegisterOperand(0);
14763 ValueNode* index = current_interpreter_frame_.get(index_reg);
14764 StoreRegister(index_reg,
14765 AddNewNode<Int32NodeFor<Operation::kIncrement>>({index}));
14766 if (!in_peeled_iteration()) {
14767 // With loop peeling, only the `ForInStep` in the non-peeled loop body marks
14768 // the end of for-in.
14769 current_for_in_state = ForInState();
14770 }
14771 return ReduceResult::Done();
14772}
14773
14774ReduceResult MaglevGraphBuilder::VisitSetPendingMessage() {
14775 ValueNode* message = GetAccumulator();
14777 return ReduceResult::Done();
14778}
14779
14780ReduceResult MaglevGraphBuilder::VisitThrow() {
14781 ValueNode* exception = GetAccumulator();
14782 return BuildCallRuntime(Runtime::kThrow, {exception});
14783}
14784ReduceResult MaglevGraphBuilder::VisitReThrow() {
14785 ValueNode* exception = GetAccumulator();
14786 return BuildCallRuntime(Runtime::kReThrow, {exception});
14787}
14788
14789ReduceResult MaglevGraphBuilder::VisitReturn() {
14790 // See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn.
14791 const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
14792 if (ShouldEmitInterruptBudgetChecks() && relative_jump_bytecode_offset > 0) {
14794 relative_jump_bytecode_offset);
14795 }
14796
14797 if (!is_inline()) {
14799 return ReduceResult::Done();
14800 }
14801
14802 // All inlined function returns instead jump to one past the end of the
14803 // bytecode, where we'll later create a final basic block which resumes
14804 // execution of the caller. If there is only one return, at the end of the
14805 // function, we can elide this jump and just continue in the same basic block.
14808 BasicBlock* block =
14810 // The context is dead by now, set it to optimized out to avoid creating
14811 // unnecessary phis.
14812 SetContext(GetRootConstant(RootIndex::kOptimizedOut));
14814 }
14815 return ReduceResult::Done();
14816}
14817
14818ReduceResult MaglevGraphBuilder::VisitThrowReferenceErrorIfHole() {
14819 // ThrowReferenceErrorIfHole <variable_name>
14820 compiler::NameRef name = GetRefOperand<Name>(0);
14821 ValueNode* value = GetAccumulator();
14822
14823 // Avoid the check if we know it is not the hole.
14824 if (IsConstantNode(value->opcode())) {
14825 if (IsTheHoleValue(value)) {
14826 ValueNode* constant = GetConstant(name);
14827 return BuildCallRuntime(Runtime::kThrowAccessedUninitializedVariable,
14828 {constant});
14829 }
14830 return ReduceResult::Done();
14831 }
14832
14833 // Avoid the check if {value}'s representation doesn't allow the hole.
14834 switch (value->value_representation()) {
14840 // Can't be the hole.
14841 // Note that HoleyFloat64 when converted to Tagged becomes Undefined
14842 // rather than the_hole, hence the early return for HoleyFloat64.
14843 return ReduceResult::Done();
14844
14846 // Could be the hole.
14847 break;
14848 }
14849
14850 // Avoid the check if {value} has an alternative whose representation doesn't
14851 // allow the hole.
14852 if (const NodeInfo* info = known_node_aspects().TryGetInfoFor(value)) {
14853 auto& alt = info->alternative();
14854 if (alt.int32() || alt.truncated_int32_to_number() || alt.float64()) {
14855 return ReduceResult::Done();
14856 }
14857 }
14858
14859 DCHECK(value->value_representation() == ValueRepresentation::kTagged);
14861 return ReduceResult::Done();
14862}
14863
14864ReduceResult MaglevGraphBuilder::VisitThrowSuperNotCalledIfHole() {
14865 // ThrowSuperNotCalledIfHole
14866 ValueNode* value = GetAccumulator();
14867 if (CheckType(value, NodeType::kJSReceiver)) return ReduceResult::Done();
14868 // Avoid the check if we know it is not the hole.
14869 if (IsConstantNode(value->opcode())) {
14870 if (IsTheHoleValue(value)) {
14871 return BuildCallRuntime(Runtime::kThrowSuperNotCalled, {});
14872 }
14873 return ReduceResult::Done();
14874 }
14876 return ReduceResult::Done();
14877}
14878ReduceResult MaglevGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
14879 // ThrowSuperAlreadyCalledIfNotHole
14880 ValueNode* value = GetAccumulator();
14881 // Avoid the check if we know it is the hole.
14882 if (IsConstantNode(value->opcode())) {
14883 if (!IsTheHoleValue(value)) {
14884 return BuildCallRuntime(Runtime::kThrowSuperAlreadyCalledError, {});
14885 }
14886 return ReduceResult::Done();
14887 }
14889 return ReduceResult::Done();
14890}
14891ReduceResult MaglevGraphBuilder::VisitThrowIfNotSuperConstructor() {
14892 // ThrowIfNotSuperConstructor <constructor>
14893 ValueNode* constructor = LoadRegister(0);
14894 ValueNode* function = GetClosure();
14895 AddNewNode<ThrowIfNotSuperConstructor>({constructor, function});
14896 return ReduceResult::Done();
14897}
14898
14899ReduceResult MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
14900 // SwitchOnGeneratorState <generator> <table_start> <table_length>
14901 // It should be the first bytecode in the bytecode array.
14903 int generator_prologue_block_offset = 1;
14904 DCHECK_LT(generator_prologue_block_offset, next_offset());
14905
14906 interpreter::JumpTableTargetOffsets offsets =
14908 // If there are no jump offsets, then this generator is not resumable, which
14909 // means we can skip checking for it and switching on its state.
14910 if (offsets.size() == 0) return ReduceResult::Done();
14911
14913
14914 // We create an initial block that checks if the generator is undefined.
14915 ValueNode* maybe_generator = LoadRegister(0);
14916 // Neither the true nor the false path jump over any bytecode
14917 BasicBlock* block_is_generator_undefined = FinishBlock<BranchIfRootConstant>(
14918 {maybe_generator}, RootIndex::kUndefinedValue,
14920 &jump_targets_[generator_prologue_block_offset]);
14921 MergeIntoFrameState(block_is_generator_undefined, next_offset());
14922
14923 // We create the generator prologue block.
14924 StartNewBlock(generator_prologue_block_offset, block_is_generator_undefined);
14925
14926 // Generator prologue.
14927 ValueNode* generator = maybe_generator;
14928 ValueNode* state =
14929 BuildLoadTaggedField(generator, JSGeneratorObject::kContinuationOffset);
14931 BuildStoreTaggedFieldNoWriteBarrier(generator, new_state,
14932 JSGeneratorObject::kContinuationOffset,
14934 ValueNode* context =
14935 BuildLoadTaggedField(generator, JSGeneratorObject::kContextOffset);
14936 graph()->record_scope_info(context, {});
14937 SetContext(context);
14938
14939 // Guarantee that we have something in the accumulator.
14942
14943 // Switch on generator state.
14944 int case_value_base = (*offsets.begin()).case_value;
14945 BasicBlockRef* targets = zone()->AllocateArray<BasicBlockRef>(offsets.size());
14946 for (interpreter::JumpTableTargetOffset offset : offsets) {
14947 BasicBlockRef* ref = &targets[offset.case_value - case_value_base];
14948 new (ref) BasicBlockRef(&jump_targets_[offset.target_offset]);
14949 }
14950 ValueNode* case_value =
14951 state->is_tagged() ? AddNewNode<UnsafeSmiUntag>({state}) : state;
14952 BasicBlock* generator_prologue_block = FinishBlock<Switch>(
14953 {case_value}, case_value_base, targets, offsets.size());
14954 for (interpreter::JumpTableTargetOffset offset : offsets) {
14955 MergeIntoFrameState(generator_prologue_block, offset.target_offset);
14956 }
14957 return ReduceResult::Done();
14958}
14959
14960ReduceResult MaglevGraphBuilder::VisitSuspendGenerator() {
14961 // SuspendGenerator <generator> <first input register> <register count>
14962 // <suspend_id>
14963 ValueNode* generator = LoadRegister(0);
14964 ValueNode* context = GetContext();
14965 interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
14966 uint32_t suspend_id = iterator_.GetUnsignedImmediateOperand(3);
14967
14968 int input_count = parameter_count_without_receiver() + args.register_count() +
14970 int debug_pos_offset = iterator_.current_offset() +
14973 input_count,
14974 [&](GeneratorStore* node) {
14975 int arg_index = 0;
14976 for (int i = 1 /* skip receiver */; i < parameter_count(); ++i) {
14977 node->set_parameters_and_registers(arg_index++,
14979 }
14980 const compiler::BytecodeLivenessState* liveness = GetOutLiveness();
14981 for (int i = 0; i < args.register_count(); ++i) {
14982 ValueNode* value = liveness->RegisterIsLive(args[i].index())
14985 node->set_parameters_and_registers(arg_index++, value);
14986 }
14987 },
14988
14989 context, generator, suspend_id, debug_pos_offset);
14990
14992 return ReduceResult::Done();
14993}
14994
14995ReduceResult MaglevGraphBuilder::VisitResumeGenerator() {
14996 // ResumeGenerator <generator> <first output register> <register count>
14997 ValueNode* generator = LoadRegister(0);
14998 ValueNode* array = BuildLoadTaggedField(
14999 generator, JSGeneratorObject::kParametersAndRegistersOffset);
15000 interpreter::RegisterList registers = iterator_.GetRegisterListOperand(1);
15001
15002 if (v8_flags.maglev_assert) {
15003 // Check if register count is invalid, that is, larger than the
15004 // register file length.
15005 ValueNode* array_length = BuildLoadFixedArrayLength(array);
15006 ValueNode* register_size = GetInt32Constant(
15007 parameter_count_without_receiver() + registers.register_count());
15009 {register_size, array_length}, AssertCondition::kLessThanEqual,
15010 AbortReason::kInvalidParametersAndRegistersInGenerator);
15011 }
15012
15013 const compiler::BytecodeLivenessState* liveness =
15015 RootConstant* stale = GetRootConstant(RootIndex::kStaleRegister);
15016 for (int i = 0; i < registers.register_count(); ++i) {
15017 if (liveness->RegisterIsLive(registers[i].index())) {
15018 int array_index = parameter_count_without_receiver() + i;
15020 {array, stale}, array_index));
15021 }
15022 }
15024 generator, JSGeneratorObject::kInputOrDebugPosOffset));
15025 return ReduceResult::Done();
15026}
15027
15029 ValueNode* receiver, int load_slot_index, int call_slot_index) {
15030 // Load iterator method property.
15031 FeedbackSlot load_slot = FeedbackVector::ToSlot(load_slot_index);
15032 compiler::FeedbackSource load_feedback{feedback(), load_slot};
15033 compiler::NameRef iterator_symbol = broker()->iterator_symbol();
15034 ValueNode* iterator_method;
15035 {
15036 DeoptFrameScope deopt_continuation(
15037 this, Builtin::kGetIteratorWithFeedbackLazyDeoptContinuation, {},
15039 GetConstant(feedback())}));
15040 MaybeReduceResult result_load =
15041 TryBuildLoadNamedProperty(receiver, iterator_symbol, load_feedback);
15042 if (result_load.IsDoneWithAbort() || result_load.IsFail()) {
15043 return result_load;
15044 }
15045 DCHECK(result_load.IsDoneWithValue());
15046 iterator_method = result_load.value();
15047 }
15048 auto throw_iterator_error = [&] {
15049 return BuildCallRuntime(Runtime::kThrowIteratorError, {receiver});
15050 };
15051 if (!iterator_method->is_tagged()) {
15052 return throw_iterator_error();
15053 }
15054 auto throw_symbol_iterator_invalid = [&] {
15055 return BuildCallRuntime(Runtime::kThrowSymbolIteratorInvalid, {});
15056 };
15057 auto call_iterator_method = [&] {
15058 DeoptFrameScope deopt_continuation(
15059 this, Builtin::kCallIteratorWithFeedbackLazyDeoptContinuation);
15060
15061 FeedbackSlot call_slot = FeedbackVector::ToSlot(call_slot_index);
15062 compiler::FeedbackSource call_feedback{feedback(), call_slot};
15064 MaybeReduceResult result_call =
15065 ReduceCall(iterator_method, args, call_feedback);
15066
15067 if (result_call.IsDoneWithAbort()) return result_call;
15068 DCHECK(result_call.IsDoneWithValue());
15069 return SelectReduction(
15070 [&](auto& builder) {
15071 return BuildBranchIfJSReceiver(builder, result_call.value());
15072 },
15073 [&] { return result_call; }, throw_symbol_iterator_invalid);
15074 };
15075 // Check if the iterator_method is undefined and call the method otherwise.
15076 return SelectReduction(
15077 [&](auto& builder) {
15078 return BuildBranchIfUndefined(builder, iterator_method);
15079 },
15080 throw_iterator_error, call_iterator_method);
15081}
15082
15083ReduceResult MaglevGraphBuilder::VisitGetIterator() {
15084 // GetIterator <object>
15086 int load_slot = iterator_.GetIndexOperand(1);
15087 int call_slot = iterator_.GetIndexOperand(2);
15089 TryReduceGetIterator(receiver, load_slot, call_slot), SetAccumulator);
15090 // Fallback to the builtin.
15091 ValueNode* context = GetContext();
15093 call_slot, feedback()));
15094 return ReduceResult::Done();
15095}
15096
15097ReduceResult MaglevGraphBuilder::VisitDebugger() {
15098 return BuildCallRuntime(Runtime::kHandleDebuggerStatement, {});
15099}
15100
15101ReduceResult MaglevGraphBuilder::VisitIncBlockCounter() {
15102 ValueNode* closure = GetClosure();
15103 ValueNode* coverage_array_slot = GetSmiConstant(iterator_.GetIndexOperand(0));
15105 {GetTaggedValue(closure), coverage_array_slot});
15106 return ReduceResult::Done();
15107}
15108
15109ReduceResult MaglevGraphBuilder::VisitAbort() {
15110 AbortReason reason = static_cast<AbortReason>(GetFlag8Operand(0));
15111 return BuildAbort(reason);
15112}
15113
15114ReduceResult MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
15115ReduceResult MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
15116#define DEBUG_BREAK(Name, ...) \
15117 ReduceResult MaglevGraphBuilder::Visit##Name() { UNREACHABLE(); }
15119#undef DEBUG_BREAK
15120ReduceResult MaglevGraphBuilder::VisitIllegal() { UNREACHABLE(); }
15121
15122} // namespace v8::internal::maglev
#define FAIL(msg)
Definition asm-parser.cc:44
friend Zone
Definition asm-types.cc:195
#define SHORT_STAR_VISITOR(Name,...)
#define DEBUG_BREAK(Name,...)
Address * arguments_
uint8_t data_[MAX_STACK_LENGTH]
interpreter::Bytecode bytecode
Definition builtins.cc:43
Builtins::Kind kind
Definition builtins.cc:40
#define SHORT_STAR_BYTECODE_LIST(V)
Definition bytecodes.h:24
#define DEBUG_BREAK_BYTECODE_LIST(V)
Definition bytecodes.h:502
#define BYTECODE_LIST(V, V_TSA)
Definition bytecodes.h:479
#define SBXCHECK_LT(lhs, rhs)
Definition check.h:66
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
#define SBXCHECK_GE(lhs, rhs)
Definition check.h:65
#define SLOW_DCHECK(condition)
Definition checks.h:21
static constexpr T decode(U value)
Definition bit-field.h:66
constexpr const auto & get() const
constexpr size_t size() const
Definition vector.h:70
bool Contains(int i) const
Definition bit-vector.h:180
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
Definition builtins.cc:160
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition builtins.cc:226
V8_EXPORT_PRIVATE void Disassemble(std::ostream &os)
static constexpr BytecodeOffset None()
Definition utils.h:675
constexpr int ToInt() const
Definition utils.h:673
static const uint32_t kMinLength
Definition string.h:1029
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static bool IsSupported(CpuFeature f)
CompareOperationHint GetCompareOperationFeedback() const
TypeOfFeedback::Result GetTypeOfFeedback() const
BinaryOperationHint GetBinaryOperationFeedback() const
static FeedbackSlot ToSlot(intptr_t index)
static FieldIndex ForDetails(Tagged< Map > map, PropertyDetails details)
static FieldIndex ForInObjectOffset(int offset, Encoding encoding)
static constexpr int kMaxLength
bool is_hole_nan() const
Definition boxed-float.h:82
double get_scalar() const
Definition boxed-float.h:81
static constexpr int kHeaderSize
static constexpr int kMapOffset
static V8_EXPORT_PRIVATE CompareStringsOptions CompareStringsOptionsFor(IsolateT *isolate, DirectHandle< Object > locales, DirectHandle< Object > options)
static const int kInitialMaxFastElementArray
Definition js-array.h:144
static const int kPreallocatedArrayElements
Definition js-array.h:122
static const int kGeneratorClosed
static const int kGeneratorExecuting
static const uint32_t kMaxGap
Definition js-objects.h:931
static constexpr int Size()
Definition js-regexp.h:119
static constexpr int kLastIndexOffset
Definition js-regexp.h:108
static constexpr int kInitialLastIndexValue
Definition js-regexp.h:111
v8::internal::LocalFactory * factory()
Tagged< Object > root(RootIndex index) const
static std::optional< Tagged< JSFunction > > GetConstructorFunction(Tagged< Map > map, Tagged< Context > native_context)
Definition map.cc:53
static constexpr int kEmptyHashField
Definition name.h:133
PropertyLocation location() const
Representation representation() const
PropertyCellType cell_type() const
static const int kProtectorValid
Definition protectors.h:15
constexpr bool IsHeapObject() const
constexpr bool IsTagged() const
constexpr bool IsSmi() const
constexpr bool IsDouble() const
static bool constexpr IsValid(T value)
Definition smi.h:67
static const uint32_t kMaxLength
Definition string.h:511
V8_INLINE constexpr int32_t value() const
Definition tagged.h:427
const_iterator begin() const
bool contains(ZoneCompactSet< T > const &other) const
const_iterator end() const
void reserve(size_t new_cap)
void push_back(const T &value)
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
base::Vector< T > AllocateVector(size_t length)
Definition zone.h:136
bool ComputeElementAccessInfos(ElementAccessFeedback const &feedback, ZoneVector< ElementAccessInfo > *access_infos) const
bool FinalizePropertyAccessInfos(ZoneVector< PropertyAccessInfo > infos, AccessMode access_mode, ZoneVector< PropertyAccessInfo > *result) const
const LoopInfo & GetLoopInfoFor(int header_offset) const
interpreter::Register incoming_new_target_or_generator_register() const
OptionalHeapObjectRef target() const
CallFeedbackContent call_feedback_content() const
void DependOnConstantInDictionaryPrototypeChain(MapRef receiver_map, NameRef property_name, ObjectRef constant, PropertyKind kind)
HeapObjectRef DependOnPrototypeProperty(JSFunctionRef function)
SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(JSFunctionRef function)
void DependOnObjectSlotValue(HeapObjectRef object, int offset, ObjectRef value)
void DependOnStablePrototypeChain(MapRef receiver_maps, WhereToStart start, OptionalJSObjectRef last_prototype=OptionalJSObjectRef())
void DependOnStablePrototypeChains(ZoneVector< MapRef > const &receiver_maps, WhereToStart start, OptionalJSObjectRef last_prototype=OptionalJSObjectRef())
bool DependOnScriptContextSlotProperty(ContextRef script_context, size_t index, ContextSidePropertyCell::Property property, JSHeapBroker *broker)
AllocationType DependOnPretenureMode(AllocationSiteRef site)
OptionalObjectRef get(JSHeapBroker *broker, int index) const
IndirectHandle< Context > object() const
bool HasOnlyStringMaps(JSHeapBroker *broker) const
ZoneVector< TransitionGroup > const & transition_groups() const
ElementAccessFeedback const & Refine(JSHeapBroker *broker, ZoneVector< MapRef > const &inferred_maps) const
ZoneVector< MapRef > const & lookup_start_object_maps() const
Definition access-info.h:39
ZoneVector< MapRef > const & transition_sources() const
Definition access-info.h:42
OptionalFeedbackVectorRef feedback_vector(JSHeapBroker *broker) const
FeedbackCellRef GetClosureFeedbackCell(JSHeapBroker *broker, int index) const
OptionalObjectRef TryGet(JSHeapBroker *broker, int i) const
IndirectHandle< FixedArray > object() const
Float64 GetFromImmutableFixedDoubleArray(int i) const
HolderLookupResult LookupHolderOfExpectedType(JSHeapBroker *broker, MapRef receiver_map)
bool is_signature_undefined(JSHeapBroker *broker) const
OptionalObjectRef callback_data(JSHeapBroker *broker) const
OptionalMapRef map_direct_read(JSHeapBroker *broker) const
V8_EXPORT_PRIVATE MapRef map(JSHeapBroker *broker) const
ContextRef context(JSHeapBroker *broker) const
SharedFunctionInfoRef shared(JSHeapBroker *broker) const
MapRef initial_map(JSHeapBroker *broker) const
ProcessedFeedback const & GetFeedbackForPropertyAccess(FeedbackSource const &source, AccessMode mode, OptionalNameRef static_name)
CompilationDependencies * dependencies() const
ProcessedFeedback const & GetFeedbackForArrayOrObjectLiteral(FeedbackSource const &source)
std::optional< RootIndex > FindRootIndex(HeapObjectRef object)
ProcessedFeedback const & GetFeedbackForGlobalAccess(FeedbackSource const &source)
ProcessedFeedback const & GetFeedbackForInstanceOf(FeedbackSource const &source)
ForInHint GetFeedbackForForIn(FeedbackSource const &source)
ProcessedFeedback const & GetFeedbackForCall(FeedbackSource const &source)
PropertyAccessInfo GetPropertyAccessInfo(MapRef map, NameRef name, AccessMode access_mode)
NativeContextRef target_native_context() const
ProcessedFeedback const & GetFeedbackForTemplateObject(FeedbackSource const &source)
ProcessedFeedback const & GetFeedbackForRegExpLiteral(FeedbackSource const &source)
OptionalObjectRef GetOwnFastConstantDataProperty(JSHeapBroker *broker, Representation field_representation, FieldIndex index, CompilationDependencies *dependencies) const
OptionalFixedArrayBaseRef elements(JSHeapBroker *broker, RelaxedLoadTag) const
OptionalObjectRef RawInobjectPropertyAt(JSHeapBroker *broker, FieldIndex index) const
std::optional< Float64 > GetOwnFastConstantDoubleProperty(JSHeapBroker *broker, FieldIndex index, CompilationDependencies *dependencies) const
OptionalObjectRef raw_properties_or_hash(JSHeapBroker *broker) const
bool IsElementsTenured(FixedArrayBaseRef elements)
KeyedAccessStoreMode store_mode() const
KeyedAccessLoadMode load_mode() const
PropertyDetails GetPropertyDetails(JSHeapBroker *broker, InternalIndex descriptor_index) const
IndirectHandle< Map > object() const
int GetInObjectPropertyOffset(int index) const
bool IsFixedCowArrayMap(JSHeapBroker *broker) const
HeapObjectRef prototype(JSHeapBroker *broker) const
InstanceType instance_type() const
HeapObjectRef GetBackPointer(JSHeapBroker *broker) const
ElementsKind elements_kind() const
MapRef GetInitialJSArrayMap(JSHeapBroker *broker, ElementsKind kind) const
ElementAccessFeedback const & AsElementAccess() const
GlobalAccessFeedback const & AsGlobalAccess() const
TemplateObjectFeedback const & AsTemplateObject() const
NamedAccessFeedback const & AsNamedAccess() const
LiteralFeedback const & AsLiteral() const
Representation field_representation() const
OptionalJSObjectRef holder() const
ZoneVector< MapRef > const & lookup_start_object_maps() const
void RecordDependencies(CompilationDependencies *dependencies)
void CacheAsProtector(JSHeapBroker *broker) const
Definition heap-refs.h:545
V8_WARN_UNUSED_RESULT bool Cache(JSHeapBroker *broker) const
PropertyDetails property_details() const
ObjectRef value(JSHeapBroker *broker) const
ScopeInfoRef OuterScopeInfo(JSHeapBroker *broker) const
bool HasBreakInfo(JSHeapBroker *broker) const
FeedbackSlot GetSlotOperand(int operand_index) const
uint32_t GetUnsignedImmediateOperand(int operand_index) const
std::pair< Register, Register > GetRegisterPairOperand(int operand_index) const
RegisterList GetRegisterListOperand(int operand_index) const
uint32_t GetNativeContextIndexOperand(int operand_index) const
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const
static constexpr bool WritesImplicitRegister(ImplicitRegisterUse implicit_register_use)
static constexpr bool WritesAccumulator(ImplicitRegisterUse implicit_register_use)
static bool ClobbersAccumulator(Bytecode bytecode)
Definition bytecodes.h:698
static bool WritesAccumulator(Bytecode bytecode)
Definition bytecodes.h:692
static bool IsRegisterOutputOperandType(OperandType operand_type)
Definition bytecodes.cc:241
static OperandType GetOperandType(Bytecode bytecode, int i)
Definition bytecodes.h:894
static int NumberOfOperands(Bytecode bytecode)
Definition bytecodes.h:886
static bool WritesOrClobbersAccumulator(Bytecode bytecode)
Definition bytecodes.h:704
static constexpr Register virtual_accumulator()
static constexpr Register FromParameterIndex(int index)
static constexpr Register current_context()
static constexpr Register receiver()
static constexpr Register invalid_value()
static constexpr Register function_closure()
static LiteralFlag Decode(uint8_t raw_flag)
AllocationType allocation_type() const
Definition maglev-ir.h:6150
void Add(InlinedAllocation *alloc)
Definition maglev-ir.h:6156
CreateArgumentsType type() const
Definition maglev-ir.h:6233
BasicBlock * block_ptr() const
Definition maglev-ir.h:980
CallArguments(ConvertReceiverMode receiver_mode, interpreter::RegisterList reglist, const InterpreterFrameState &frame, Mode mode=kDefault)
base::SmallVector< ValueNode *, 8 > args_
CallArguments(ConvertReceiverMode receiver_mode, base::SmallVector< ValueNode *, 8 > &&args, Mode mode=kDefault)
void set_arg(size_t i, ValueNode *node)
ConvertReceiverMode receiver_mode() const
void PopReceiver(ConvertReceiverMode new_receiver_mode)
CallArguments(ConvertReceiverMode receiver_mode, std::initializer_list< ValueNode * > args, Mode mode=kDefault)
CallArguments(ConvertReceiverMode receiver_mode)
static constexpr int kFixedInputCount
Definition maglev-ir.h:9905
static constexpr int kFixedInputCount
void set_arg(int i, ValueNode *node)
Definition maglev-ir.h:9652
static constexpr int kFixedInputCount
Definition maglev-ir.h:9626
const InlinedArgumentsDeoptFrame & as_inlined_arguments() const
Definition maglev-ir.h:1466
constexpr Operation operation() const
Definition maglev-ir.h:3311
static Builtin continuation(Kind kind)
Definition maglev-ir.h:4241
static constexpr int kFixedInputCount
Definition maglev-ir.h:4861
void record_scope_info(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ZoneMap< InlinedAllocation *, SmallAllocationVector > & allocations_elide_map()
compiler::ZoneRefMap< compiler::ObjectRef, Constant * > & constants()
ZoneVector< MaglevCallSiteInfo * > & inlineable_calls()
ZoneVector< InitialValue * > & osr_values()
compiler::OptionalScopeInfoRef TryGetScopeInfo(ValueNode *context, compiler::JSHeapBroker *broker)
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
compiler::ZoneRefMap< compiler::HeapObjectRef, TrustedConstant * > & trusted_constants()
void set_has_recursive_calls(bool value)
void Add(BasicBlock *block)
ZoneMap< InlinedAllocation *, SmallAllocationVector > & allocations_escape_map()
void add_inlined_bytecode_size(int size)
ValueNode * node() const
Definition maglev-ir.h:1300
constexpr Operation operation() const
Definition maglev-ir.h:3122
const CompactInterpreterFrameState * frame_state() const
Definition maglev-ir.h:1407
void set(interpreter::Register reg, ValueNode *value)
void set_known_node_aspects(KnownNodeAspects *known_node_aspects)
void set_virtual_objects(const VirtualObjectList &virtual_objects)
ValueNode * get(interpreter::Register reg) const
IndirectHandle< JSFunction > toplevel_function() const
MaglevCompilationUnit * toplevel_compilation_unit() const
compiler::SharedFunctionInfoRef shared_function_info() const
const MaglevCompilationUnit * GetTopLevelCompilationUnit() const
static MaglevCompilationUnit * NewInner(Zone *zone, const MaglevCompilationUnit *caller, compiler::SharedFunctionInfoRef shared_function_info, compiler::FeedbackCellRef feedback_cell)
compiler::BytecodeArrayRef bytecode() const
compiler::FeedbackVectorRef feedback() const
MaglevGraphBuilder::MaglevSubGraphBuilder * sub_builder_
BranchResult Build(std::initializer_list< ValueNode * > inputs, Args &&... args)
void SetBranchSpecializationMode(BranchSpecializationMode mode)
DeoptFrameScope(MaglevGraphBuilder *builder, Builtin continuation, compiler::OptionalJSFunctionRef maybe_js_target={})
DeoptFrameScope(MaglevGraphBuilder *builder, ValueNode *receiver)
DeoptFrameScope(MaglevGraphBuilder *builder, Builtin continuation, compiler::OptionalJSFunctionRef maybe_js_target, base::Vector< ValueNode *const > parameters)
LazyDeoptResultLocationScope(MaglevGraphBuilder *builder, interpreter::Register result_location, int result_size)
Label(MaglevSubGraphBuilder *sub_builder, int predecessor_count, std::initializer_list< Variable * > vars)
Label(MergePointInterpreterFrameState *merge_state, BasicBlock *basic_block)
Label(MaglevSubGraphBuilder *sub_builder, int predecessor_count)
LoopLabel(MergePointInterpreterFrameState *merge_state, BasicBlock *loop_header)
void GotoIfTrue(Label *true_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
void GotoIfFalse(Label *false_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
MaglevSubGraphBuilder(MaglevGraphBuilder *builder, int variable_count)
ReduceResult Branch(std::initializer_list< Variable * > vars, FCond cond, FTrue if_true, FFalse if_false)
LoopLabel BeginLoop(std::initializer_list< Variable * > loop_vars)
SaveCallSpeculationScope(MaglevGraphBuilder *builder, compiler::FeedbackSource feedback_source=compiler::FeedbackSource())
static bool IsSpeculationAllowed(compiler::JSHeapBroker *broker, compiler::FeedbackSource feedback_source)
MaybeReduceResult TryBuildPropertyCellLoad(const compiler::GlobalAccessFeedback &global_access_feedback)
ValueNode * BuildLoadTaggedField(ValueNode *object, uint32_t offset, Args &&... args)
void DecrementDeadPredecessorAndAccountForPeeling(uint32_t offset)
static compiler::OptionalHeapObjectRef TryGetConstant(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
MaybeReduceResult TryFoldInt32BinaryOperation(ValueNode *left, ValueNode *right)
BranchResult BuildBranchIfIntPtrToBooleanTrue(BranchBuilder &builder, ValueNode *node)
MaybeReduceResult TryBuildPolymorphicElementAccess(ValueNode *object, ValueNode *index, const compiler::KeyedAccessMode &keyed_mode, const ZoneVector< compiler::ElementAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
ValueNode * TrySpecializeLoadScriptContextSlot(ValueNode *context, int index)
ValueNode * GetTruncatedInt32ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ReduceResult BuildCheckString(ValueNode *object)
bool HasDisjointType(ValueNode *lhs, NodeType rhs_type)
MaybeReduceResult TryReduceCallForTarget(ValueNode *target_node, compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
BranchResult BuildBranchIfToBooleanTrue(BranchBuilder &builder, ValueNode *node)
void BuildInitializeStore(InlinedAllocation *alloc, ValueNode *value, int offset)
ReduceResult BuildEagerInlineCall(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, float call_frequency)
const compiler::BytecodeLivenessState * GetInLiveness() const
const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const
MaybeReduceResult TryBuildLoadDataView(const CallArguments &args, ExternalArrayType type)
ValueNode * BuildGenericConstruct(ValueNode *target, ValueNode *new_target, ValueNode *context, const CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
Uint32Constant * GetUint32Constant(int constant)
void BuildStoreTaggedFieldNoWriteBarrier(ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
BranchResult BuildBranchIfTrue(BranchBuilder &builder, ValueNode *node)
void UpdatePredecessorCount(uint32_t offset, int diff)
MaybeReduceResult TryBuildGetKeyedPropertyWithEnumeratedKey(ValueNode *object, const compiler::FeedbackSource &feedback_source, const compiler::ProcessedFeedback &processed_feedback)
BranchResult BuildBranchIfFloat64ToBooleanTrue(BranchBuilder &builder, ValueNode *node)
bool HaveDisjointTypes(ValueNode *lhs, ValueNode *rhs)
std::optional< Float64 > TryFoldLoadConstantDoubleField(compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
VirtualObject * DeepCopyVirtualObject(VirtualObject *vobj)
NodeT * CreateNewConstantNode(Args &&... args) const
compiler::ref_traits< T >::ref_type GetRefOperand(int operand_index)
ValueNode * BuildLoadFixedArrayLength(ValueNode *fixed_array)
InlinedAllocation * ExtendOrReallocateCurrentAllocationBlock(AllocationType allocation_type, VirtualObject *value)
std::optional< VirtualObject * > TryGetNonEscapingArgumentsObject(ValueNode *value)
NodeType CheckTypes(ValueNode *node, std::initializer_list< NodeType > types)
bool CanTreatHoleAsUndefined(base::Vector< const compiler::MapRef > const &receiver_maps)
ValueNode * GetUint8ClampedForToNumber(ValueNode *value)
VirtualObject * CreateFixedArray(compiler::MapRef map, int length)
ReduceResult BuildInlineFunction(SourcePosition call_site_position, ValueNode *context, ValueNode *function, ValueNode *new_target)
std::optional< InterpretedDeoptFrame > entry_stack_check_frame_
const InterpreterFrameState & current_interpreter_frame() const
void BuildStoreTrustedPointerField(ValueNode *object, ValueNode *value, int offset, IndirectPointerTag tag, StoreTaggedMode store_mode)
MaybeReduceResult TryBuildPropertyCellStore(const compiler::GlobalAccessFeedback &global_access_feedback)
ReduceResult BuildCheckStringOrStringWrapper(ValueNode *object)
ValueNode * BuildToString(ValueNode *value, ToString::ConversionMode mode)
compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant(compiler::PropertyAccessInfo const &access_info)
ValueNode * BuildUnwrapThinString(ValueNode *input)
BranchResult BuildBranchIfUint32Compare(BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
MaybeReduceResult TryBuildElementAccessOnString(ValueNode *object, ValueNode *index, compiler::KeyedAccessMode const &keyed_mode)
ReduceResult StoreAndCacheContextSlot(ValueNode *context, int index, ValueNode *value, ContextKind context_kind)
bool TargetIsCurrentCompilingUnit(compiler::JSFunctionRef target)
void StartFallthroughBlock(int next_block_offset, BasicBlock *predecessor)
MaybeReduceResult TryReuseKnownPropertyLoad(ValueNode *lookup_start_object, compiler::NameRef name)
void EnsureInt32(ValueNode *value, bool can_be_heap_number=false)
uint32_t GetFlag16Operand(int operand_index) const
BranchResult BuildBranchIfReferenceEqual(BranchBuilder &builder, ValueNode *lhs, ValueNode *rhs)
MaybeReduceResult TryBuildCallKnownJSFunction(compiler::JSFunctionRef function, ValueNode *new_target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryBuildElementAccessOnTypedArray(ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
MaybeReduceResult TryBuildPropertyStore(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
MaybeReduceResult TryReduceCallForConstant(compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
ValueNode * GetContextAtDepth(ValueNode *context, size_t depth)
MaybeReduceResult TryBuildStoreField(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, compiler::AccessMode access_mode)
MaybeReduceResult TryBuildElementAccessOnJSArrayOrJSObject(ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
ReduceResult BuildCallFromRegisters(int argc_count, ConvertReceiverMode receiver_mode)
MaybeReduceResult TryBuildCheckInt32Condition(ValueNode *lhs, ValueNode *rhs, AssertCondition condition, DeoptimizeReason reason, bool allow_unconditional_deopt=true)
DeoptFrame * GetDeoptFrameForEagerCall(const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
ValueNode * Select(FCond cond, FTrue if_true, FFalse if_false)
std::optional< VirtualObject * > TryReadBoilerplateForFastLiteral(compiler::JSObjectRef boilerplate, AllocationType allocation, int max_depth, int *max_properties)
const compiler::BytecodeLivenessState * GetInLivenessFor(int offset) const
MaybeReduceResult TryReduceFunctionPrototypeApplyCallWithReceiver(compiler::OptionalHeapObjectRef maybe_receiver, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryBuildFastCreateObjectOrArrayLiteral(const compiler::LiteralFeedback &feedback)
VirtualObject * CreateJSStringIterator(compiler::MapRef map, ValueNode *string)
SourcePositionTableIterator source_position_iterator_
std::optional< uint32_t > TryGetUint32Constant(ValueNode *value)
size_t StringLengthStaticLowerBound(ValueNode *string, int max_depth=2)
ReduceResult BuildCheckMaps(ValueNode *object, base::Vector< const compiler::MapRef > maps, std::optional< ValueNode * > map={}, bool has_deprecated_map_without_migration_target=false)
BasicBlock * FinishBlock(std::initializer_list< ValueNode * > control_inputs, Args &&... args)
ValueNode * BuildLoadElements(ValueNode *object)
ValueNode * BuildNewConsStringMap(ValueNode *left, ValueNode *right)
bool CanTrackObjectChanges(ValueNode *object, TrackObjectMode mode)
ReduceResult ReduceCall(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
VirtualObject * CreateMappedArgumentsElements(compiler::MapRef map, int mapped_count, ValueNode *context, ValueNode *unmapped_elements)
ValueNode * GetAccumulatorTruncatedInt32ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
MaybeReduceResult TryBuildGlobalStore(const compiler::GlobalAccessFeedback &global_access_feedback)
MaybeReduceResult TryBuildCallKnownApiFunction(compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared, CallArguments &args)
MaybeReduceResult TryBuildLoadNamedProperty(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::FeedbackSource &feedback_source, GenericAccessFunc &&build_generic_access)
compiler::OptionalJSObjectRef TryGetConstantDataFieldHolder(compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object)
NodeT * AddNewNode(size_t input_count, Function &&post_create_input_initializer, Args &&... args)
ValueNode * GetTrustedConstant(compiler::HeapObjectRef ref, IndirectPointerTag tag)
ValueNode * GetTaggedValue(ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
MaybeReduceResult TryReduceCallForNewClosure(ValueNode *target_node, ValueNode *target_context, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
void StoreRegister(interpreter::Register target, NodeT *value)
ReduceResult BuildTransitionElementsKindOrCheckMap(ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target)
MaybeReduceResult TryBuildFastInstanceOf(ValueNode *object, compiler::JSObjectRef callable_ref, ValueNode *callable_node)
uint32_t GetFlag8Operand(int operand_index) const
ValueNode * BuildInlinedArgumentsElements(int start_index, int length)
bool CheckType(ValueNode *node, NodeType type, NodeType *old=nullptr)
ReduceResult BuildFloat64BinaryOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ValueNode * BuildLoadTypedArrayElement(ValueNode *object, ValueNode *index, ElementsKind elements_kind)
ValueNode * GetConstant(compiler::ObjectRef ref)
ValueNode * BuildTestUndetectable(ValueNode *value)
ValueNode * BuildInlinedUnmappedArgumentsElements(int mapped_count)
ValueNode * BuildUnwrapStringWrapper(ValueNode *input)
VirtualObject * CreateHeapNumber(Float64 value)
MaybeReduceResult BuildJSArrayBuiltinMapSwitchOnElementsKind(ValueNode *receiver, const MapKindsT &map_kinds, MaglevSubGraphBuilder &sub_graph, std::optional< MaglevSubGraphBuilder::Label > &do_return, int unique_kind_count, IndexToElementsKindFunc &&index_to_elements_kind, BuildKindSpecificFunc &&build_kind_specific)
MaybeReduceResult TryBuildAndAllocateJSGeneratorObject(ValueNode *closure, ValueNode *receiver)
std::optional< double > TryGetFloat64Constant(ValueNode *value, TaggedToFloat64ConversionType conversion_type)
ValueNode * BuildLoadFixedDoubleArrayElement(ValueNode *elements, int index)
ReduceResult EmitUnconditionalDeopt(DeoptimizeReason reason)
MaglevGraphLabeller * graph_labeller() const
DeoptFrame * AddInlinedArgumentsToDeoptFrame(DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
interpreter::TestTypeOfFlags::LiteralFlag TypeOfLiteralFlag
ValueNode * BuildLoadStringLength(ValueNode *string)
void ProcessMergePoint(int offset, bool preserve_known_node_aspects)
ReduceResult BuildTruncatingInt32BitwiseNotForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
void MinimizeContextChainDepth(ValueNode **context, size_t *depth)
InlinedAllocation * BuildInlinedAllocationForConsString(VirtualObject *object, AllocationType allocation)
VirtualObject * CreateContext(compiler::MapRef map, int length, compiler::ScopeInfoRef scope_info, ValueNode *previous_context, std::optional< ValueNode * > extension={})
ReduceResult BuildCheckValueByReference(ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
ValueNode * BuildNumberOrOddballToFloat64(ValueNode *node, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
Node * BuildStoreTaggedField(ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
MaybeReduceResult TryBuildNamedAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, compiler::AccessMode access_mode, GenericAccessFunc &&build_generic_access)
void MoveNodeBetweenRegisters(interpreter::Register src, interpreter::Register dst)
ReduceResult BuildCheckSmi(ValueNode *object, bool elidable=true)
ReduceResult BuildCheckInternalizedStringValueOrByReference(ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
RootConstant * GetRootConstant(RootIndex index)
BranchResult BuildBranchIfUndefined(BranchBuilder &builder, ValueNode *node)
BranchBuilder CreateBranchBuilder(BranchType jump_type=BranchType::kBranchIfTrue)
ReduceResult BuildFloat64UnaryOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
DeoptFrame GetDeoptFrameForLazyDeopt(interpreter::Register result_location, int result_size)
VirtualObject * CreateConsString(ValueNode *map, ValueNode *length, ValueNode *first, ValueNode *second)
ReduceResult BuildCompareMaps(ValueNode *heap_object, ValueNode *object_map, base::Vector< const compiler::MapRef > maps, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)
ValueNode * GetFloat64ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
bool EnsureType(ValueNode *node, NodeType type, NodeType *old=nullptr)
MaybeReduceResult TryBuildFastOrdinaryHasInstance(ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)
MaybeReduceResult TryBuildInlinedAllocatedContext(compiler::MapRef map, compiler::ScopeInfoRef scope, int context_length)
MaybeReduceResult TryBuildScriptContextStore(const compiler::GlobalAccessFeedback &global_access_feedback)
CallNode * AddNewCallNode(const CallArguments &args, Args &&... extra_args)
CallKnownJSFunction * BuildCallKnownJSFunction(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryBuildScriptContextLoad(const compiler::GlobalAccessFeedback &global_access_feedback)
BranchResult BuildBranchIfUndefinedOrNull(BranchBuilder &builder, ValueNode *node)
compiler::FeedbackVectorRef feedback() const
TaggedIndexConstant * GetTaggedIndexConstant(int constant)
MaybeReduceResult DoTryReduceMathRound(CallArguments &args, Float64Round::Kind kind)
void InitializeRegister(interpreter::Register reg, ValueNode *value)
MaybeReduceResult TryReduceGetIterator(ValueNode *receiver, int load_slot, int call_slot)
MaybeReduceResult TryBuildElementAccess(ValueNode *object, ValueNode *index, compiler::ElementAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, GenericAccessFunc &&build_generic_access)
interpreter::BytecodeArrayIterator iterator_
void AddNonEscapingUses(InlinedAllocation *allocation, int use_count)
void BuildStoreTypedArrayElement(ValueNode *object, ValueNode *index, ElementsKind elements_kind)
constexpr bool RuntimeFunctionCanThrow(Runtime::FunctionId function_id)
MaybeReduceResult TryReduceConstructGeneric(compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
ValueNode * BuildAndAllocateJSArrayIterator(ValueNode *array, IterationKind iteration_kind)
ReduceResult BuildTruncatingInt32BinaryOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
base::Vector< ValueNode * > GetArgumentsAsArrayOfValueNodes(compiler::SharedFunctionInfoRef shared, const CallArguments &args)
BranchResult BuildBranchIfInt32Compare(BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
MaybeReduceResult TryReduceCallForApiFunction(compiler::FunctionTemplateInfoRef api_callback, compiler::OptionalSharedFunctionInfoRef maybe_shared, CallArguments &args)
ValueNode * BuildGenericCall(ValueNode *target, Call::TargetType target_type, const CallArguments &args)
ValueNode * BuildLoadJSArrayLength(ValueNode *js_array, NodeType length_type=NodeType::kSmi)
void BuildStoreMap(ValueNode *object, compiler::MapRef map, StoreMap::Kind kind)
CatchBlockDetails GetTryCatchBlockFromInfo(ExceptionHandlerInfo *info)
MaybeReduceResult TryReduceGetProto(ValueNode *node)
ReduceResult BuildOrdinaryHasInstance(ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)
VirtualObject * GetObjectFromAllocation(InlinedAllocation *allocation)
void BuildRegisterFrameInitialization(ValueNode *context=nullptr, ValueNode *closure=nullptr, ValueNode *new_target=nullptr)
ValueNode * BuildTaggedEqual(ValueNode *lhs, ValueNode *rhs)
ReduceResult BuildToNumberOrToNumeric(Object::Conversion mode)
void StartNewBlock(int offset, BasicBlock *predecessor)
VirtualObject * GetModifiableObjectFromAllocation(InlinedAllocation *allocation)
ReduceResult BuildCheckNumericalValueOrByReference(ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)
ReduceResult ReduceCallWithArrayLikeForArgumentsObject(ValueNode *target_node, CallArguments &args, VirtualObject *arguments_object, const compiler::FeedbackSource &feedback_source)
void BuildStoreFixedArrayElement(ValueNode *elements, ValueNode *index, ValueNode *value)
VirtualObject * CreateJSIteratorResult(compiler::MapRef map, ValueNode *value, ValueNode *done)
void BuildLoadContextSlot(ValueNode *context, size_t depth, int slot_index, ContextSlotMutability slot_mutability, ContextKind context_kind)
compiler::BytecodeArrayRef bytecode() const
ReduceResult CreateJSArray(compiler::MapRef map, int instance_size, ValueNode *length)
VirtualObject * CreateVirtualObject(compiler::MapRef map, uint32_t slot_count_including_map)
const compiler::BytecodeLivenessState * GetOutLiveness() const
bool ShouldEagerInlineCall(compiler::SharedFunctionInfoRef shared)
void StoreRegisterPair(std::pair< interpreter::Register, interpreter::Register > target, NodeT *value)
DeoptFrame GetDeoptFrameForLazyDeoptHelper(interpreter::Register result_location, int result_size, DeoptFrameScope *scope, bool mark_accumulator_dead)
MaybeReduceResult TryReduceConstructBuiltin(compiler::JSFunctionRef builtin, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, CallArguments &args)
ValueNode * LoadAndCacheContextSlot(ValueNode *context, int offset, ContextSlotMutability slot_mutability, ContextKind context_kind)
bool TryBuildFindNonDefaultConstructorOrConstruct(ValueNode *this_function, ValueNode *new_target, std::pair< interpreter::Register, interpreter::Register > result)
ReduceResult BuildCheckNumericalValue(ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)
MaybeReduceResult TryBuildFastHasInPrototypeChain(ValueNode *object, compiler::HeapObjectRef prototype)
void SetKnownValue(ValueNode *node, compiler::ObjectRef constant, NodeType new_node_type)
bool CanInlineCall(compiler::SharedFunctionInfoRef shared, float call_frequency)
ValueNode * GetInternalizedString(interpreter::Register reg)
BranchResult BuildBranchIfJSReceiver(BranchBuilder &builder, ValueNode *value)
bool CanElideWriteBarrier(ValueNode *object, ValueNode *value)
MaybeReduceResult TryReduceConstructArrayConstructor(compiler::JSFunctionRef array_function, CallArguments &args, compiler::OptionalAllocationSiteRef maybe_allocation_site={})
LazyDeoptResultLocationScope * lazy_deopt_result_location_scope_
MaybeReduceResult TryReduceConstruct(compiler::HeapObjectRef feedback_target, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
MergePointInterpreterFrameState ** merge_states_
MaybeReduceResult TryReduceTypeOf(ValueNode *value, const Function &GetResult)
MaybeReduceResult TryBuildScriptContextConstantLoad(const compiler::GlobalAccessFeedback &global_access_feedback)
std::optional< int32_t > TryGetInt32Constant(ValueNode *value)
MaglevCallerDetails * caller_details() const
MergePointInterpreterFrameState * GetCatchBlockFrameState()
BasicBlock * FinishInlinedBlockForCaller(ControlNode *control_node, ZoneVector< Node * > rem_nodes_in_call_block)
ReduceResult GetUint32ElementIndex(interpreter::Register reg)
ReduceResult BuildCheckJSReceiver(ValueNode *object)
VirtualObject * CreateArgumentsObject(compiler::MapRef map, ValueNode *length, ValueNode *elements, std::optional< ValueNode * > callee={})
ReduceResult BuildCallRuntime(Runtime::FunctionId function_id, std::initializer_list< ValueNode * > inputs)
ReduceResult BuildCheckHeapObject(ValueNode *object)
VirtualObject * CreateJSConstructor(compiler::JSFunctionRef constructor)
Float64Constant * GetFloat64Constant(double constant)
InlinedAllocation * BuildInlinedAllocationForDoubleFixedArray(VirtualObject *object, AllocationType allocation)
MaybeReduceResult TryFoldFloat64UnaryOperationForToNumber(TaggedToFloat64ConversionType conversion_type, ValueNode *value)
void MergeIntoFrameState(BasicBlock *block, int target)
MaybeReduceResult TrySpecializeStoreScriptContextSlot(ValueNode *context, int index, ValueNode *value, Node **store)
ValueNode * GetInt32(ValueNode *value, bool can_be_heap_number=false)
MaglevGraphBuilder(LocalIsolate *local_isolate, MaglevCompilationUnit *compilation_unit, Graph *graph, MaglevCallerDetails *caller_details=nullptr)
VirtualObject * CreateJSObject(compiler::MapRef map)
ValueNode * BuildLoadFixedArrayElement(ValueNode *elements, int index)
ValueNode * BuildLoadHoleyFixedDoubleArrayElement(ValueNode *elements, ValueNode *index, bool convert_hole)
VirtualObject * CreateJSGeneratorObject(compiler::MapRef map, int instance_size, ValueNode *context, ValueNode *closure, ValueNode *receiver, ValueNode *register_file)
compiler::OptionalObjectRef TryFoldLoadConstantDataField(compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
ValueNode * GetAccumulatorHoleyFloat64ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ReduceResult BuildStringConcat(ValueNode *left, ValueNode *right)
ZoneStack< HandlerTableEntry > catch_block_stack_
BranchResult BuildBranchIfUndetectable(BranchBuilder &builder, ValueNode *value)
MaybeReduceResult TryFoldFloat64BinaryOperationForToNumber(TaggedToFloat64ConversionType conversion_type, ValueNode *left, ValueNode *right)
MaybeReduceResult TryBuildPropertyLoad(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info)
ReduceResult BuildHasInPrototypeChain(ValueNode *object, compiler::HeapObjectRef prototype)
ValueNode * TryGetParentContext(ValueNode *node)
MaybeReduceResult TryBuildPropertySetterCall(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object, ValueNode *value)
compiler::HolderLookupResult TryInferApiHolderValue(compiler::FunctionTemplateInfoRef function_template_info, ValueNode *receiver)
ReduceResult BuildCheckNumber(ValueNode *object)
VirtualObject * CreateJSArrayIterator(compiler::MapRef map, ValueNode *iterated_object, IterationKind kind)
ReduceResult BuildCallFromRegisterList(ConvertReceiverMode receiver_mode)
MaybeReduceResult TryBuildElementLoadOnJSArrayOrJSObject(ValueNode *object, ValueNode *index, base::Vector< const compiler::MapRef > maps, ElementsKind kind, KeyedAccessLoadMode load_mode)
ValueNode * GetConvertReceiver(compiler::SharedFunctionInfoRef shared, const CallArguments &args)
BranchResult BuildBranchIfFloat64IsHole(BranchBuilder &builder, ValueNode *node)
MaybeReduceResult TryBuildNewConsString(ValueNode *left, ValueNode *right, AllocationType allocation_type=AllocationType::kYoung)
ReduceResult BuildLoadGlobal(compiler::NameRef name, compiler::FeedbackSource &feedback_source, TypeofMode typeof_mode)
bool ContextMayAlias(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ValueNode * LoadRegisterHoleyFloat64ForToNumber(int operand_index, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ReduceResult BuildTruncatingInt32BinarySmiOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
void RecordKnownProperty(ValueNode *lookup_start_object, KnownNodeAspects::LoadedPropertyMapKey key, ValueNode *value, bool is_const, compiler::AccessMode access_mode)
std::pair< interpreter::Register, int > GetResultLocationAndSize() const
ReduceResult BuildCheckSymbol(ValueNode *object)
ReduceResult BuildCallWithFeedback(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryBuildGlobalLoad(const compiler::GlobalAccessFeedback &global_access_feedback)
BranchResult BuildBranchIfRootConstant(BranchBuilder &builder, ValueNode *node, RootIndex root_index)
MaybeReduceResult GetAccumulatorSmi(UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
ValueNode * BuildCallSelf(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, CallArguments &args)
ReduceResult BuildAbort(AbortReason reason)
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores_
MaglevCompilationUnit * compilation_unit() const
CallBuiltin * BuildCallBuiltin(std::initializer_list< ValueNode * > inputs)
MaybeReduceResult TryBuildFastInstanceOfWithFeedback(ValueNode *object, ValueNode *callable, compiler::FeedbackSource feedback_source)
ValueNode * GetHoleyFloat64ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
MaybeReduceResult TryBuildInlineCall(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
void InitializePredecessorCount(uint32_t offset, int amount)
VirtualObject * CreateDoubleFixedArray(uint32_t elements_length, compiler::FixedDoubleArrayRef elements)
ReduceResult BuildStoreContextSlot(ValueNode *context, size_t depth, int slot_index, ValueNode *value, ContextKind context_kind)
std::function< DeoptFrameScope( compiler::JSFunctionRef, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *)> GetDeoptScopeCallback
MaybeReduceResult TryBuildPropertyGetterCall(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object)
ReduceResult GetSmiValue(ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
ValueNode * BuildLoadField(compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object, compiler::NameRef name)
ReduceResult BuildGetKeyedProperty(ValueNode *object, const compiler::FeedbackSource &feedback_source, const compiler::ProcessedFeedback &processed_feedback)
ReduceResult BuildLoadTypedArrayLength(ValueNode *object, ElementsKind elements_kind)
const compiler::BytecodeAnalysis & bytecode_analysis() const
ReduceResult BuildAndAllocateJSArray(compiler::MapRef map, ValueNode *length, ValueNode *elements, const compiler::SlackTrackingPrediction &slack_tracking_prediction, AllocationType allocation_type)
FeedbackSlot GetSlotOperand(int operand_index) const
MaybeReduceResult SelectReduction(FCond cond, FTrue if_true, FFalse if_false)
std::optional< ValueNode * > TryGetConstantAlternative(ValueNode *node)
void TryBuildStoreTaggedFieldToAllocation(ValueNode *object, ValueNode *value, int offset)
ReduceResult BuildCheckJSReceiverOrNullOrUndefined(ValueNode *object)
const compiler::BytecodeLivenessState * GetOutLivenessFor(int offset) const
ValueNode * LoadRegister(int operand_index)
ValueNode * BuildExtendPropertiesBackingStore(compiler::MapRef map, ValueNode *receiver, ValueNode *property_array)
ReduceResult ConvertForStoring(ValueNode *node, ElementsKind kind)
MaybeReduceResult TryBuildElementStoreOnJSArrayOrJSObject(ValueNode *object, ValueNode *index_object, ValueNode *value, base::Vector< const compiler::MapRef > maps, ElementsKind kind, const compiler::KeyedAccessMode &keyed_mode)
MaybeReduceResult TryBuildPolymorphicPropertyAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::AccessMode access_mode, const ZoneVector< compiler::PropertyAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
ValueNode * GetValueOrUndefined(ValueNode *maybe_value)
SmiConstant * GetSmiConstant(int constant) const
InferHasInPrototypeChainResult InferHasInPrototypeChain(ValueNode *receiver, compiler::HeapObjectRef prototype)
ReduceResult BuildConstruct(ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
ValueNode * GetInt32ElementIndex(interpreter::Register reg)
compiler::JSHeapBroker * broker() const
VirtualObject * CreateRegExpLiteralObject(compiler::MapRef map, compiler::RegExpBoilerplateDescriptionRef literal)
MaybeReduceResult TryFoldInt32UnaryOperation(ValueNode *value)
BranchResult BuildBranchIfInt32ToBooleanTrue(BranchBuilder &builder, ValueNode *node)
MaybeReduceResult TryReduceArrayIteratingBuiltin(const char *name, compiler::JSFunctionRef target, CallArguments &args, GetDeoptScopeCallback get_eager_deopt_scope, GetDeoptScopeCallback get_lazy_deopt_scope, const std::optional< InitialCallback > &initial_callback={}, const std::optional< ProcessElementCallback > &process_element_callback={})
Int32Constant * GetInt32Constant(int32_t constant)
MaybeReduceResult TryReduceBuiltin(compiler::JSFunctionRef target, compiler::SharedFunctionInfoRef shared, CallArguments &args, const compiler::FeedbackSource &feedback_source)
bool CheckStaticType(ValueNode *node, NodeType type, NodeType *old=nullptr)
ValueNode * BuildConvertHoleToUndefined(ValueNode *node)
ReduceResult BuildAndAllocateKeyValueArray(ValueNode *key, ValueNode *value)
InlinedAllocation * BuildInlinedAllocationForHeapNumber(VirtualObject *object, AllocationType allocation)
MaybeReduceResult TryBuildStoreDataView(const CallArguments &args, ExternalArrayType type, Function &&getValue)
std::optional< DeoptFrame > latest_checkpointed_frame_
bool TrySpecializeLoadContextSlotToFunctionContext(ValueNode *context, int slot_index, ContextSlotMutability slot_mutability)
ReduceResult BuildFloat64BinarySmiOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
bool HasValidInitialMap(compiler::JSFunctionRef new_target, compiler::JSFunctionRef constructor)
InlinedAllocation * BuildInlinedAllocation(VirtualObject *object, AllocationType allocation)
void RecordUseReprHintIfPhi(ValueNode *node, UseRepresentation repr)
BranchResult BuildBranchIfNull(BranchBuilder &builder, ValueNode *node)
ReduceResult ReduceCallWithArrayLike(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)
void BuildStoreFixedDoubleArrayElement(ValueNode *elements, ValueNode *index, ValueNode *value)
MaybeReduceResult TryBuildPropertyAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
ReduceResult BuildTransitionElementsKindAndCompareMaps(ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)
void MergeDead(const MaglevCompilationUnit &compilation_unit, unsigned num=1)
void Merge(MaglevGraphBuilder *graph_builder, InterpreterFrameState &unmerged, BasicBlock *predecessor)
void MergeLoop(MaglevGraphBuilder *graph_builder, InterpreterFrameState &loop_end_state, BasicBlock *loop_end_block)
static MergePointInterpreterFrameState * NewForLoop(const InterpreterFrameState &start_state, const MaglevCompilationUnit &info, int merge_offset, int predecessor_count, const compiler::BytecodeLivenessState *liveness, const compiler::LoopInfo *loop_info, bool has_been_peeled=false)
void MergeDeadLoop(const MaglevCompilationUnit &compilation_unit)
void MergeThrow(MaglevGraphBuilder *handler_builder, const MaglevCompilationUnit *handler_unit, const KnownNodeAspects &known_node_aspects, const VirtualObjectList virtual_objects)
static MergePointInterpreterFrameState * NewForCatchBlock(const MaglevCompilationUnit &unit, const compiler::BytecodeLivenessState *liveness, int handler_offset, bool was_used, interpreter::Register context_register, Graph *graph)
void InitializeLoop(MaglevGraphBuilder *graph_builder, MaglevCompilationUnit &compilation_unit, InterpreterFrameState &unmerged, BasicBlock *predecessor, bool optimistic_initial_state=false, LoopEffects *loop_effects=nullptr)
void set_owner(BasicBlock *block)
Definition maglev-ir.h:2137
constexpr bool Is() const
Definition maglev-ir.h:2362
static constexpr Opcode opcode_of
Definition maglev-ir.h:1909
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
Definition maglev-ir.h:1912
constexpr Opcode opcode() const
Definition maglev-ir.h:1939
constexpr OpProperties properties() const
Definition maglev-ir.h:1940
void SetPossibleMaps(const PossibleMaps &possible_maps, bool any_map_is_unstable, NodeType possible_type, compiler::JSHeapBroker *broker)
const AlternativeNodes & alternative() const
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:1050
constexpr bool is_conversion() const
Definition maglev-ir.h:1056
constexpr bool is_tagged() const
Definition maglev-ir.h:1053
Tagged< Smi > value() const
Definition maglev-ir.h:5200
Object::Conversion mode() const
Definition maglev-ir.h:4815
IndirectPointerTag tag() const
Definition maglev-ir.h:5345
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:2577
constexpr bool is_tagged() const
Definition maglev-ir.h:2546
constexpr bool has_static_map() const
Definition maglev-ir.h:5580
void set(uint32_t offset, ValueNode *value)
Definition maglev-ir.h:5634
void ForEachInput(Function &&callback)
Definition maglev-ir.h:5694
compiler::FixedDoubleArrayRef double_elements() const
Definition maglev-ir.h:5621
ValueNode * get(uint32_t offset) const
Definition maglev-ir.h:5626
VirtualObject * Clone(uint32_t new_object_id, Zone *zone, bool empty_clone=false) const
Definition maglev-ir.h:5769
void set_allocation(InlinedAllocation *allocation)
Definition maglev-ir.h:5665
uint32_t double_elements_length() const
Definition maglev-ir.h:5616
const VirtualConsString & cons_string() const
Definition maglev-ir.h:5651
compiler::MapRef map() const
Definition maglev-ir.h:5591
Zone * zone_
Register const value_
#define V8_MAP_PACKING_BOOL
Definition globals.h:93
#define V8_DICT_PROPERTY_CONST_TRACKING_BOOL
Definition globals.h:249
JSHeapBroker *const broker_
int start
Handle< SharedFunctionInfo > info
int end
Handle< Context > context_
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
JSHeapBroker * broker
int32_t offset
#define INTRINSICS_LIST(V)
std::string extension
TNode< Object > original_length
TNode< Context > context
TNode< Object > target
TNode< Object > this_arg
TNode< Object > receiver
SharedFunctionInfoRef shared
TNode< Object > callback
#define _
std::map< const std::string, const std::string > map
std::string pattern
Node * node
double second
ZoneVector< RpoNumber > & result
LiftoffRegister reg
MovableLabel continuation
FunctionLiteral * literal
Definition liveedit.cc:294
RegListBase< RegisterT > registers
#define BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Type)
#define BUILD_STORE_TYPED_ARRAY(Type, value)
#define MAP_UNARY_OPERATION_TO_INT32_NODE(V)
bool known_maps_are_subset_of_requested_maps_
bool existing_known_maps_found_
#define MAP_BINARY_OPERATION_TO_INT32_NODE(V)
bool any_map_is_unstable_
#define NODE_FOR_OPERATION_HELPER(Name)
compiler::ZoneRefSet< Map > intersect_set_
bool emit_check_with_migration_
#define MAP_OPERATION_TO_FLOAT64_NODE(V)
base::Vector< const compiler::MapRef > requested_maps_
NodeType node_type_
#define SPECIALIZATION(op, OpNode,...)
#define TRACE_CANNOT_INLINE(...)
#define GENERATE_CASE(Name)
#define MATH_UNARY_IEEE_BUILTIN_REDUCER(MathName, ExtName, EnumName)
#define TRACE_INLINING(...)
#define GET_VALUE_OR_ABORT(variable, result)
#define RETURN_IF_ABORT(result)
#define PROCESS_AND_RETURN_IF_DONE(result, value_processor)
#define RETURN_IF_DONE(result)
#define MAGLEV_REDUCED_BUILTIN(V)
#define NON_VALUE_NODE_LIST(V)
Definition maglev-ir.h:321
#define CONTROL_NODE_LIST(V)
Definition maglev-ir.h:421
#define IEEE_754_UNARY_LIST(V)
Definition maglev-ir.h:3366
#define CONSTANT_VALUE_NODE_LIST(V)
Definition maglev-ir.h:138
InstructionOperand destination
const int length_
Definition mul-fft.cc:473
STL namespace.
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_INLINE constexpr bool IsReferenceComparable(InstanceType instance_type)
const int kMaxFastLiteralDepth
Definition globals.h:127
bool IsAnyStore(AccessMode mode)
Definition heap-refs.h:65
const int kMaxFastLiteralProperties
Definition globals.h:128
ref_traits< T >::ref_type MakeRefAssumeMemoryFence(JSHeapBroker *broker, Tagged< T > object)
ZoneCompactSet< typename ref_traits< T >::ref_type > ZoneRefSet
Definition heap-refs.h:1301
UINT32_ELEMENTS INT32_ELEMENTS
Definition maglev-ir.h:8384
constexpr NodeType CombineType(NodeType left, NodeType right)
Definition maglev-ir.h:661
static constexpr std::optional< int > Int32Identity()
NodeType StaticTypeForConstant(compiler::JSHeapBroker *broker, compiler::ObjectRef ref)
Definition maglev-ir.h:711
bool OperationValue(type left, type right)
bool HasOnlyStringMaps(base::Vector< const compiler::MapRef > maps)
Definition maglev-ir.h:873
constexpr NodeType EmptyNodeType()
Definition maglev-ir.h:659
UINT32_ELEMENTS INT8_ELEMENTS
Definition maglev-ir.h:8383
bool HasOnlyNumberMaps(base::Vector< const compiler::MapRef > maps)
Definition maglev-ir.h:880
constexpr bool NodeTypeIs(NodeType type, NodeType to_check)
Definition maglev-ir.h:669
constexpr bool IsEmptyNodeType(NodeType type)
Definition maglev-ir.h:706
bool HasOnlyJSTypedArrayMaps(base::Vector< const compiler::MapRef > maps)
Definition maglev-ir.h:852
constexpr NodeType IntersectType(NodeType left, NodeType right)
Definition maglev-ir.h:665
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
UINT32_ELEMENTS INT16_ELEMENTS
Definition maglev-ir.h:8383
NodeType StaticTypeForNode(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
bool IsInstanceOfNodeType(compiler::MapRef map, NodeType type, compiler::JSHeapBroker *broker)
Definition maglev-ir.h:756
bool FromConstantToBool(LocalIsolate *local_isolate, ValueNode *node)
Definition maglev-ir.cc:364
constexpr bool IsConditionalControlNode(Opcode opcode)
Definition maglev-ir.h:533
NodeType StaticTypeForMap(compiler::MapRef map, compiler::JSHeapBroker *broker)
Definition maglev-ir.h:680
compiler::ZoneRefSet< Map > PossibleMaps
bool NodeTypeMayBeNullOrUndefined(NodeType type)
Definition maglev-ir.h:805
bool HasOnlyJSArrayMaps(base::Vector< const compiler::MapRef > maps)
Definition maglev-ir.h:859
bool HasOnlyJSObjectMaps(base::Vector< const compiler::MapRef > maps)
Definition maglev-ir.h:866
double pow(double x, double y)
Definition ieee754.cc:14
bool IsUint32Double(double value)
constexpr int kFastElementsKindCount
V8_EXPORT_PRIVATE base::Vector< Flag > Flags()
Definition flags.cc:300
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
bool StoreModeIsInBounds(KeyedAccessStoreMode store_mode)
Definition globals.h:2724
bool StoreModeCanGrow(KeyedAccessStoreMode store_mode)
Definition globals.h:2742
constexpr int kMaxRegularHeapObjectSize
Definition globals.h:680
constexpr bool IsHoleyElementsKind(ElementsKind kind)
bool IsClassConstructor(FunctionKind kind)
bool DoubleToUint32IfEqualToSelf(double value, uint32_t *uint32_value)
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
bool IsSmiDouble(double value)
bool IsSpecialReceiverInstanceType(InstanceType instance_type)
unsigned int FastD2UI(double x)
bool IsTypedArrayElementsKind(ElementsKind kind)
bool IsRabGsabTypedArrayElementsKind(ElementsKind kind)
bool IsDerivedConstructor(FunctionKind kind)
bool IsInt32Double(double value)
constexpr bool IsSmiElementsKind(ElementsKind kind)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
constexpr int kFastElementsKindPackedToHoley
constexpr uint64_t kHoleNanInt64
Definition globals.h:1960
constexpr bool IsObjectElementsKind(ElementsKind kind)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in name
Definition flags.cc:2086
Flag flags[]
Definition flags.cc:3797
Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit is_undetectable
Definition map-inl.h:113
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
void Print(Tagged< Object > obj)
Definition objects.h:774
bool IsSmiOrObjectElementsKind(ElementsKind kind)
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
DONT_OVERRIDE DISABLE_ALLOCATION_SITES HOLEY_ELEMENTS
constexpr bool SmiValuesAre31Bits()
bool IsFastElementsKind(ElementsKind kind)
DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES HOLEY_DOUBLE_ELEMENTS
int32_t DoubleToInt32(double x)
bool IsHoleyOrDictionaryElementsKind(ElementsKind kind)
const int kHeapObjectTag
Definition v8-internal.h:72
int FastD2I(double x)
bool UnionElementsKindUptoSize(ElementsKind *a_out, ElementsKind b)
bool StoreModeIgnoresTypeArrayOOB(KeyedAccessStoreMode store_mode)
Definition globals.h:2738
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
@ kExternalFloat64Array
Definition globals.h:2461
@ kExternalInt32Array
Definition globals.h:2457
@ kExternalInt8Array
Definition globals.h:2453
@ kExternalInt16Array
Definition globals.h:2455
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
Definition flags.cc:2422
base::Vector< T > CloneVector(Zone *zone, base::Vector< const T > other)
Definition zone-utils.h:18
return value
Definition map-inl.h:893
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit requires_instance_members_initializer
bool IsTypedArrayOrRabGsabTypedArrayElementsKind(ElementsKind kind)
constexpr int kMaxInt
Definition globals.h:374
bool LoadModeHandlesHoles(KeyedAccessLoadMode load_mode)
Definition globals.h:2695
constexpr bool IsDoubleElementsKind(ElementsKind kind)
constexpr uint32_t kMaxUInt32
Definition globals.h:387
bool LoadModeHandlesOOB(KeyedAccessLoadMode load_mode)
Definition globals.h:2689
@ kStartAtPrototype
Definition globals.h:1714
constexpr Register kJavaScriptCallNewTargetRegister
kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset prototype
Definition map-inl.h:69
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
Operation
Definition operation.h:43
#define OPERATION_LIST(V)
Definition operation.h:38
uint32_t equals
BytecodeSequenceNode * parent_
RegExpBuilder builder_
#define STRONG_READ_ONLY_HEAP_NUMBER_ROOT_LIST(V)
Definition roots.h:32
#define SMI_ROOT_LIST(V)
Definition roots.h:418
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
CallOptimization::HolderLookup lookup
Definition heap-refs.h:951
BytecodeLoopAssignments & assignments()
ZoneMap< std::tuple< ValueNode *, int >, ValueNode * > loaded_context_constants
const NodeInfo * TryGetInfoFor(ValueNode *node) const
NodeInfos::iterator FindInfo(ValueNode *node)
ZoneSet< KnownNodeAspects::LoadedContextSlotsKey > context_slot_written
ZoneSet< KnownNodeAspects::LoadedPropertyMapKey > keys_cleared
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693
wasm::ValueType type