v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <cmath>
8#include <limits>
9#include <optional>
10
11#include "src/base/bounds.h"
12#include "src/base/logging.h"
19#include "src/common/globals.h"
25#include "src/heap/local-heap.h"
35#ifdef V8_ENABLE_MAGLEV
39#endif
44#include "src/roots/roots.h"
45
46namespace v8 {
47namespace internal {
48namespace maglev {
49
50#define __ masm->
51
52const char* OpcodeToString(Opcode opcode) {
53#define DEF_NAME(Name) #Name,
54 static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
55#undef DEF_NAME
56 return names[static_cast<int>(opcode)];
57}
58
62
63namespace {
64
65// Prevent people from accidentally using kScratchRegister here and having their
66// code break in arm64.
67[[maybe_unused]] struct Do_not_use_kScratchRegister_in_arch_independent_code {
69[[maybe_unused]] struct
70 Do_not_use_kScratchDoubleRegister_in_arch_independent_code {
71} kScratchDoubleRegister;
72static_assert(!std::is_same_v<decltype(kScratchRegister), Register>);
73static_assert(
74 !std::is_same_v<decltype(kScratchDoubleRegister), DoubleRegister>);
75
76} // namespace
77
78#ifdef DEBUG
79namespace {
80
81template <size_t InputCount, typename Base, typename Derived>
82int StaticInputCount(FixedInputNodeTMixin<InputCount, Base, Derived>*) {
83 return InputCount;
84}
85
86int StaticInputCount(NodeBase*) { UNREACHABLE(); }
87
88} // namespace
89
91 OpProperties new_properties) {
92 if (new_opcode == Opcode::kDead) return;
93
94 DCHECK_IMPLIES(new_properties.can_eager_deopt(),
95 properties().can_eager_deopt());
96 DCHECK_IMPLIES(new_properties.can_lazy_deopt(),
97 properties().can_lazy_deopt());
98 DCHECK_IMPLIES(new_properties.needs_register_snapshot(),
99 properties().needs_register_snapshot());
100
101 int old_input_count = input_count();
102 size_t old_sizeof = -1;
103 switch (opcode()) {
104#define CASE(op) \
105 case Opcode::k##op: \
106 old_sizeof = sizeof(op); \
107 break;
109#undef CASE
110 }
111
112 switch (new_opcode) {
113#define CASE(op) \
114 case Opcode::k##op: { \
115 DCHECK_EQ(old_input_count, StaticInputCount(static_cast<op*>(this))); \
116 DCHECK_LE(sizeof(op), old_sizeof); \
117 break; \
118 }
120#undef CASE
121 }
122}
123
124#endif // DEBUG
125
126bool Phi::is_loop_phi() const { return merge_state()->is_loop(); }
127
130 return merge_state()->is_unmerged_loop();
131}
132
136 }
137
138 if (!repr_mask.is_subset_of(uses_repr_hint_)) {
139 uses_repr_hint_.Add(repr_mask);
140
141 // Propagate in inputs, ignoring unbounded loop backedges.
142 int bound_inputs = input_count();
143 if (merge_state()->is_unmerged_loop()) --bound_inputs;
144
145 for (int i = 0; i < bound_inputs; i++) {
146 if (Phi* phi_input = input(i).node()->TryCast<Phi>()) {
147 phi_input->RecordUseReprHint(repr_mask);
148 }
149 }
150 }
151}
152
154 if (uses_require_31_bit_value()) return;
156 auto inputs =
157 is_loop_phi() ? merge_state_->predecessors_so_far() : input_count();
158 for (uint32_t i = 0; i < inputs; ++i) {
159 ValueNode* input_node = input(i).node();
160 DCHECK(input_node);
161 if (auto phi = input_node->TryCast<Phi>()) {
162 phi->SetUseRequires31BitValue();
163 }
164 }
165}
166
168 : Base(bitfield), source_(source) {}
169
170namespace {
171
172// ---
173// Print
174// ---
175
176bool IsStoreToNonEscapedObject(const NodeBase* node) {
177 if (CanBeStoreToNonEscapedObject(node->opcode())) {
178 DCHECK_GT(node->input_count(), 0);
179 if (InlinedAllocation* alloc =
180 node->input(0).node()->template TryCast<InlinedAllocation>()) {
181 return alloc->HasBeenAnalysed() && alloc->HasBeenElided();
182 }
183 }
184 return false;
185}
186
187void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
188 const NodeBase* node) {
189 if (!node->has_inputs()) return;
190
191 os << " [";
192 for (int i = 0; i < node->input_count(); i++) {
193 if (i != 0) os << ", ";
194 graph_labeller->PrintInput(os, node->input(i));
195 }
196 os << "]";
197}
198
199void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
200 const NodeBase* node) {}
201
202void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
203 const ValueNode* node) {
204 os << " → " << node->result().operand();
205 if (node->result().operand().IsAllocated() && node->is_spilled() &&
206 node->spill_slot() != node->result().operand()) {
207 os << " (spilled: " << node->spill_slot() << ")";
208 }
209 if (node->has_valid_live_range()) {
210 os << ", live range: [" << node->live_range().start << "-"
211 << node->live_range().end << "]";
212 }
213 if (!node->has_id()) {
214 os << ", " << node->use_count() << " uses";
215 if (const InlinedAllocation* alloc = node->TryCast<InlinedAllocation>()) {
216 os << " (" << alloc->non_escaping_use_count() << " non escaping uses)";
217 if (alloc->HasBeenAnalysed() && alloc->HasBeenElided()) {
218 os << " 🪦";
219 }
220 } else if (!node->is_used()) {
221 if (node->opcode() != Opcode::kAllocationBlock &&
222 node->properties().is_required_when_unused()) {
223 os << ", but required";
224 } else {
225 os << " 🪦";
226 }
227 }
228 }
229}
230
231void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
232 const NodeBase* node) {}
233
234void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
235 const UnconditionalControlNode* node) {
236 os << " b" << node->target()->id();
237}
238
239void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
240 const BranchControlNode* node) {
241 os << " b" << node->if_true()->id() << " b" << node->if_false()->id();
242}
243
244void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
245 const Switch* node) {
246 for (int i = 0; i < node->size(); i++) {
247 const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
248 os << " b" << target.block_ptr()->id();
249 }
250 if (node->Cast<Switch>()->has_fallthrough()) {
251 BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
252 os << " b" << fallthrough_target->id();
253 }
254}
255
256class MaybeUnparkForPrint {
257 public:
258 MaybeUnparkForPrint() {
259 LocalHeap* local_heap = LocalHeap::Current();
260 if (!local_heap) {
262 }
263 DCHECK_NOT_NULL(local_heap);
264 if (local_heap->IsParked()) {
265 scope_.emplace(local_heap);
266 }
267 }
268
269 private:
270 std::optional<UnparkedScope> scope_;
271};
272
273template <typename NodeT>
274void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
275 const NodeT* node, bool skip_targets) {
276 MaybeUnparkForPrint unpark;
277 os << node->opcode();
278 node->PrintParams(os, graph_labeller);
279 PrintInputs(os, graph_labeller, node);
280 PrintResult(os, graph_labeller, node);
281 if (IsStoreToNonEscapedObject(node)) {
282 os << " 🪦";
283 }
284 if (!skip_targets) {
285 PrintTargets(os, graph_labeller, node);
286 }
287}
288
289bool RootToBoolean(RootIndex index) {
290 switch (index) {
291 case RootIndex::kFalseValue:
292 case RootIndex::kNullValue:
293 case RootIndex::kUndefinedValue:
294 case RootIndex::kNanValue:
295 case RootIndex::kHoleNanValue:
296 case RootIndex::kMinusZeroValue:
297 case RootIndex::kempty_string:
298#ifdef V8_ENABLE_WEBASSEMBLY
299 case RootIndex::kWasmNull:
300#endif
301 return false;
302 default:
303 return true;
304 }
305}
306
307#ifdef DEBUG
308// For all RO roots, check that RootToBoolean returns the same value as
309// BooleanValue on that root.
310bool CheckToBooleanOnAllRoots(LocalIsolate* local_isolate) {
311 ReadOnlyRoots roots(local_isolate);
312 // Use the READ_ONLY_ROOT_LIST macro list rather than a for loop to get nicer
313 // error messages if there is a failure.
314#define DO_CHECK(type, name, CamelName) \
315 /* Ignore 'undefined' roots that are not the undefined value itself. */ \
316 if (roots.name() != roots.undefined_value() || \
317 RootIndex::k##CamelName == RootIndex::kUndefinedValue) { \
318 DCHECK_EQ(Object::BooleanValue(roots.name(), local_isolate), \
319 RootToBoolean(RootIndex::k##CamelName)); \
320 }
321 READ_ONLY_ROOT_LIST(DO_CHECK)
322#undef DO_CHECK
323 return true;
324}
325#endif
326
327} // namespace
328
329void VirtualObjectList::Print(std::ostream& os, const char* prefix,
330 MaglevGraphLabeller* labeller) const {
331 CHECK_NOT_NULL(labeller);
332 os << prefix;
333 for (const VirtualObject* vo : *this) {
334 labeller->PrintNodeLabel(os, vo);
335 os << "; ";
336 }
337 os << std::endl;
338}
339
343 // Initialise locations so that they correctly don't have a next use id.
344 for (size_t i = 0; i < count; ++i) {
346 }
347#ifdef DEBUG
348 input_location_count_ = count;
349#endif // DEBUG
350}
351
352bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
353#ifdef DEBUG
354 // (Ab)use static locals to call CheckToBooleanOnAllRoots once, on first
355 // call to this function.
356 static bool check_once = CheckToBooleanOnAllRoots(local_isolate);
357 DCHECK(check_once);
358#endif
359 // ToBoolean is only supported for RO roots.
361 return RootToBoolean(index_);
362}
363
364bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node) {
365 DCHECK(IsConstantNode(node->opcode()));
366 switch (node->opcode()) {
367#define CASE(Name) \
368 case Opcode::k##Name: { \
369 return node->Cast<Name>()->ToBoolean(local_isolate); \
370 }
372#undef CASE
373 default:
374 UNREACHABLE();
375 }
376}
377
379 node_->remove_use();
380 node_ = nullptr;
381}
382
383DeoptInfo::DeoptInfo(Zone* zone, const DeoptFrame top_frame,
384 compiler::FeedbackSource feedback_to_update)
385 : top_frame_(top_frame), feedback_to_update_(feedback_to_update) {}
386
390 }
391 if (V8_LIKELY(result_size() == 1)) {
392 return reg == result_location_;
393 }
394 if (result_size() == 0) {
395 return false;
396 }
398 return reg == result_location_ ||
400}
401
403 interpreter::Register result_location,
404 int result_size) {
405 if (result_size == 0 || !result_location.is_valid()) {
406 return false;
407 }
408 return base::IsInRange(reg.index(), result_location.index(),
410}
411
413 interpreter::Register result_location, int result_size) const {
414 // Return offsets are counted from the end of the translation frame,
415 // which is the array [parameters..., locals..., accumulator]. Since
416 // it's the end, we don't need to worry about earlier frames.
417 if (result_location == interpreter::Register::virtual_accumulator()) {
418 return 0;
419 } else if (result_location.is_parameter()) {
420 // This is slightly tricky to reason about because of zero indexing
421 // and fence post errors. As an example, consider a frame with 2
422 // locals and 2 parameters, where we want argument index 1 -- looking
423 // at the array in reverse order we have:
424 // [acc, r1, r0, a1, a0]
425 // ^
426 // and this calculation gives, correctly:
427 // 2 + 2 - 1 = 3
428 return unit().register_count() + unit().parameter_count() -
429 result_location.ToParameterIndex();
430 } else {
431 return unit().register_count() - result_location.index();
432 }
433}
434
436 const ExceptionHandlerInfo* handler_info) {
437 const DeoptFrame* target_frame = &top_frame();
438 for (int i = 0;; i++) {
439 while (target_frame->type() != DeoptFrame::FrameType::kInterpretedFrame) {
440 target_frame = target_frame->parent();
441 }
442 if (i == handler_info->depth()) break;
443 target_frame = target_frame->parent();
444 }
445 return target_frame->as_interpreted();
446}
447
448void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller,
449 bool skip_targets) const {
450 switch (opcode()) {
451#define V(Name) \
452 case Opcode::k##Name: \
453 return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets);
455#undef V
456 }
457 UNREACHABLE();
458}
459
460void NodeBase::Print() const {
461 MaglevGraphLabeller labeller;
462 Print(std::cout, &labeller);
463 std::cout << std::endl;
464}
465
467 if (!hint_.IsInvalid()) return;
468 hint_ = hint;
469 if (result_.operand().IsUnallocated()) {
470 auto operand = compiler::UnallocatedOperand::cast(result_.operand());
471 if (operand.HasSameAsInputPolicy()) {
472 input(operand.input_index()).node()->SetHint(hint);
473 }
474 }
475 if (this->Is<Phi>()) {
476 for (Input& input : *this) {
477 if (input.node()->has_id() && input.node()->id() < this->id()) {
478 input.node()->SetHint(hint);
479 }
480 }
481 }
482}
483
486#ifdef DEBUG
487 state_ = kSpill;
488#endif // DEBUG
490}
491
494#ifdef DEBUG
495 state_ = kSpill;
496#endif // DEBUG
498 compiler::UnallocatedOperand::cast(result().operand())
499 .virtual_register());
500}
501
503 switch (ieee_function_) {
504#define CASE(MathName, ExtName, EnumName) \
505 case Ieee754Function::k##EnumName: \
506 return ExternalReference::ieee754_##ExtName##_function();
508#undef CASE
509 }
510}
511
512// ---
513// Check input value representation
514// ---
515
531
532void CheckValueInputIs(const NodeBase* node, int i,
533 ValueRepresentation expected,
534 MaglevGraphLabeller* graph_labeller) {
535 ValueNode* input = node->input(i).node();
536 DCHECK(!input->Is<Identity>());
537 ValueRepresentation got = input->properties().value_representation();
538 // Allow Float64 values to be inputs when HoleyFloat64 is expected.
539 bool valid =
540 (got == expected) || (got == ValueRepresentation::kFloat64 &&
542 if (!valid) {
543 std::ostringstream str;
544 str << "Type representation error: node ";
545 if (graph_labeller) {
546 str << "#" << graph_labeller->NodeId(node) << " : ";
547 }
548 str << node->opcode() << " (input @" << i << " = " << input->opcode()
549 << ") type " << got << " is not " << expected;
550 FATAL("%s", str.str().c_str());
551 }
552}
553
554void CheckValueInputIs(const NodeBase* node, int i, Opcode expected,
555 MaglevGraphLabeller* graph_labeller) {
556 ValueNode* input = node->input(i).node();
557 Opcode got = input->opcode();
558 if (got != expected) {
559 std::ostringstream str;
560 str << "Opcode error: node ";
561 if (graph_labeller) {
562 str << "#" << graph_labeller->NodeId(node) << " : ";
563 }
564 str << node->opcode() << " (input @" << i << " = " << input->opcode()
565 << ") opcode " << got << " is not " << expected;
566 FATAL("%s", str.str().c_str());
567 }
568}
569
571 for (int i = 0; i < input_count(); i++) {
572 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
573 }
574}
575
576void Phi::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
577 switch (value_representation()) {
578#define CASE_REPR(repr) \
579 case ValueRepresentation::k##repr: \
580 for (int i = 0; i < input_count(); i++) { \
581 CheckValueInputIs(this, i, ValueRepresentation::k##repr, \
582 graph_labeller); \
583 } \
584 break;
585
591#undef CASE_REPR
593 UNREACHABLE();
594 }
595}
596
597void Call::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
598 for (int i = 0; i < input_count(); i++) {
599 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
600 }
601}
602
603#ifdef V8_COMPRESS_POINTERS
604void Call::MarkTaggedInputsAsDecompressing() {
605 for (int i = 0; i < input_count(); i++) {
606 input(i).node()->SetTaggedResultNeedsDecompress();
607 }
608}
609#endif
610
612 MaglevGraphLabeller* graph_labeller) const {
613 for (int i = 0; i < input_count(); i++) {
614 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
615 }
616}
617
618#ifdef V8_COMPRESS_POINTERS
619void CallForwardVarargs::MarkTaggedInputsAsDecompressing() {
620 for (int i = 0; i < input_count(); i++) {
621 input(i).node()->SetTaggedResultNeedsDecompress();
622 }
623}
624#endif
625
627 MaglevGraphLabeller* graph_labeller) const {
628 for (int i = 0; i < input_count(); i++) {
629 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
630 }
631}
632
633#ifdef V8_COMPRESS_POINTERS
634void CallWithArrayLike::MarkTaggedInputsAsDecompressing() {
635 for (int i = 0; i < input_count(); i++) {
636 input(i).node()->SetTaggedResultNeedsDecompress();
637 }
638}
639#endif
640
642 for (int i = 0; i < input_count(); i++) {
643 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
644 }
645}
646
647#ifdef V8_COMPRESS_POINTERS
648void CallWithSpread::MarkTaggedInputsAsDecompressing() {
649 for (int i = 0; i < input_count(); i++) {
650 input(i).node()->SetTaggedResultNeedsDecompress();
651 }
652}
653#endif
654
655void CallSelf::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
656 for (int i = 0; i < input_count(); i++) {
657 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
658 }
659}
660
661#ifdef V8_COMPRESS_POINTERS
662void CallSelf::MarkTaggedInputsAsDecompressing() {
663 for (int i = 0; i < input_count(); i++) {
664 input(i).node()->SetTaggedResultNeedsDecompress();
665 }
666}
667#endif
668
670 MaglevGraphLabeller* graph_labeller) const {
671 for (int i = 0; i < input_count(); i++) {
672 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
673 }
674}
675
676#ifdef V8_COMPRESS_POINTERS
677void CallKnownJSFunction::MarkTaggedInputsAsDecompressing() {
678 for (int i = 0; i < input_count(); i++) {
679 input(i).node()->SetTaggedResultNeedsDecompress();
680 }
681}
682#endif
683
685 MaglevGraphLabeller* graph_labeller) const {
686 for (int i = 0; i < input_count(); i++) {
687 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
688 }
689}
690
691#ifdef V8_COMPRESS_POINTERS
692void CallKnownApiFunction::MarkTaggedInputsAsDecompressing() {
693 for (int i = 0; i < input_count(); i++) {
694 input(i).node()->SetTaggedResultNeedsDecompress();
695 }
696}
697#endif
698
699void Construct::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
700 for (int i = 0; i < input_count(); i++) {
701 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
702 }
703}
704
705#ifdef V8_COMPRESS_POINTERS
706void Construct::MarkTaggedInputsAsDecompressing() {
707 for (int i = 0; i < input_count(); i++) {
708 input(i).node()->SetTaggedResultNeedsDecompress();
709 }
710}
711#endif
712
714 MaglevGraphLabeller* graph_labeller) const {
715 for (int i = 0; i < input_count(); i++) {
716 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
717 }
718}
719
720#ifdef V8_COMPRESS_POINTERS
721void ConstructWithSpread::MarkTaggedInputsAsDecompressing() {
722 for (int i = 0; i < input_count(); i++) {
723 input(i).node()->SetTaggedResultNeedsDecompress();
724 }
725}
726#endif
727
730 int count = input_count();
731 // Verify context.
732 if (descriptor.HasContextParameter()) {
734 graph_labeller);
735 count--;
736 }
737
738// {all_input_count} includes the feedback slot and vector.
739#ifdef DEBUG
740 int all_input_count = count + (has_feedback() ? 2 : 0);
741 if (descriptor.AllowVarArgs()) {
742 DCHECK_GE(all_input_count, descriptor.GetParameterCount());
743 } else {
744 DCHECK_EQ(all_input_count, descriptor.GetParameterCount());
745 }
746#endif
747 int i = 0;
748 // Check the rest of inputs.
749 for (; i < count; ++i) {
750 MachineType type = i < descriptor.GetParameterCount()
751 ? descriptor.GetParameterType(i)
753 CheckValueInputIs(this, i, ToValueRepresentation(type), graph_labeller);
754 }
755}
756
757#ifdef V8_COMPRESS_POINTERS
758void CallBuiltin::MarkTaggedInputsAsDecompressing() {
760 int count = input_count();
761 // Set context.
762 if (descriptor.HasContextParameter()) {
763 input(count - 1).node()->SetTaggedResultNeedsDecompress();
764 count--;
765 }
766 int i = 0;
767 // Set the rest of the tagged inputs.
768 for (; i < count; ++i) {
769 MachineType type = i < descriptor.GetParameterCount()
770 ? descriptor.GetParameterType(i)
772 if (type.IsTagged() && !type.IsTaggedSigned()) {
773 input(i).node()->SetTaggedResultNeedsDecompress();
774 }
775 }
776}
777#endif
778
780 for (int i = 0; i < input_count(); i++) {
781 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
782 }
783}
784
785#ifdef V8_COMPRESS_POINTERS
786void CallCPPBuiltin::MarkTaggedInputsAsDecompressing() {
787 for (int i = 0; i < input_count(); i++) {
788 input(i).node()->SetTaggedResultNeedsDecompress();
789 }
790}
791#endif
792
794 for (int i = 0; i < input_count(); i++) {
795 CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller);
796 }
797}
798
799#ifdef V8_COMPRESS_POINTERS
800void CallRuntime::MarkTaggedInputsAsDecompressing() {
801 for (int i = 0; i < input_count(); i++) {
802 input(i).node()->SetTaggedResultNeedsDecompress();
803 }
804}
805#endif
806
808 MaglevGraphLabeller* graph_labeller) const {
809 Base::VerifyInputs(graph_labeller);
810 if (auto host_alloc =
812 if (input(kValueIndex).node()->Is<InlinedAllocation>()) {
813 CHECK_EQ(host_alloc->allocation_block()->allocation_type(),
815 }
816 }
817}
818
820 MaglevGraphLabeller* graph_labeller) const {
821 Base::VerifyInputs(graph_labeller);
822 CheckValueInputIs(this, 0, Opcode::kAllocationBlock, graph_labeller);
824}
825
829
831 DCHECK(v8_flags.maglev_pretenure_store_values);
832
835 return;
836 }
838
839 // Recurse over my own inputs
840 for (auto alloc : allocation_list_) {
841 alloc->object()->ForEachInput([&](ValueNode* value) {
842 if (auto next_alloc = value->TryCast<InlinedAllocation>()) {
843 next_alloc->allocation_block()->TryPretenure();
844 } else if (auto phi = value->TryCast<Phi>()) {
845 for (int i = 0; i < phi->input_count(); ++i) {
846 if (auto phi_alloc =
847 phi->input(i).node()->TryCast<InlinedAllocation>()) {
848 phi_alloc->allocation_block()->TryPretenure();
849 }
850 }
851 }
852 });
853 }
854}
855
856// ---
857// Reify constants
858// ---
859
861 switch (opcode()) {
862#define V(Name) \
863 case Opcode::k##Name: \
864 return this->Cast<Name>()->DoReify(isolate);
866#undef V
867 default:
868 UNREACHABLE();
869 }
870}
871
875
877 return direct_handle(value_, isolate);
878}
879
883
885 return isolate->factory()->NewNumberFromInt<AllocationType::kOld>(value());
886}
887
889 return isolate->factory()->NewNumberFromUint<AllocationType::kOld>(value());
890}
891
893 return isolate->factory()->NewNumber<AllocationType::kOld>(
894 value_.get_scalar());
895}
896
898 return object_.object();
899}
900
902 return object_.object();
903}
904
906 return isolate->root_handle(index());
907}
908
909#ifdef V8_ENABLE_MAGLEV
910
912 // TODO(leszeks): Getting the main thread local isolate is not what we
913 // actually want here, but it's all we have, and it happens to work because
914 // really all we're using it for is ReadOnlyRoots. We should change ToBoolean
915 // to be able to pass ReadOnlyRoots in directly.
916 return FromConstantToBool(masm->isolate()->AsLocalIsolate(), node);
917}
918
919// ---
920// Load node to registers
921// ---
922
923namespace {
924template <typename NodeT>
925void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, Register reg) {
926 if constexpr (!IsDoubleRepresentation(
927 NodeT::kProperties.value_representation())) {
928 return node->DoLoadToRegister(masm, reg);
929 } else {
930 UNREACHABLE();
931 }
932}
933template <typename NodeT>
934void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm,
936 if constexpr (IsDoubleRepresentation(
937 NodeT::kProperties.value_representation())) {
938 return node->DoLoadToRegister(masm, reg);
939 } else {
940 UNREACHABLE();
941 }
942}
943} // namespace
944
945void ValueNode::LoadToRegister(MaglevAssembler* masm, Register reg) {
946 switch (opcode()) {
947#define V(Name) \
948 case Opcode::k##Name: \
949 return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
951#undef V
952 default:
953 UNREACHABLE();
954 }
955}
956void ValueNode::LoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
957 switch (opcode()) {
958#define V(Name) \
959 case Opcode::k##Name: \
960 return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
962#undef V
963 default:
964 UNREACHABLE();
965 }
966}
967
968void ValueNode::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
971 __ Move(reg,
972 masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
973}
974
975void ValueNode::DoLoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
978 __ LoadFloat64(
979 reg, masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
980}
981
982void ExternalConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
983 __ Move(reg, reference());
984}
985
986void SmiConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
987 __ Move(reg, value());
988}
989
990void TaggedIndexConstant::DoLoadToRegister(MaglevAssembler* masm,
991 Register reg) {
992 __ Move(reg, value());
993}
994
995void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
996 __ Move(reg, value());
997}
998
999void Uint32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
1000 __ Move(reg, value());
1001}
1002
1003void Float64Constant::DoLoadToRegister(MaglevAssembler* masm,
1005 __ Move(reg, value());
1006}
1007
1008void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
1009 __ Move(reg, object_.object());
1010}
1011
1012void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
1013 __ LoadRoot(reg, index());
1014}
1015
1016void TrustedConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
1017 __ Move(reg, object_.object());
1018}
1019
1020// ---
1021// Arch agnostic nodes
1022// ---
1023
1024#define TURBOLEV_UNREACHABLE_NODE(Name) \
1025 void Name::SetValueLocationConstraints() { UNREACHABLE(); } \
1026 void Name::GenerateCode(MaglevAssembler*, const ProcessingState&) { \
1027 UNREACHABLE(); \
1028 }
1029
1030TURBOLEV_VALUE_NODE_LIST(TURBOLEV_UNREACHABLE_NODE)
1031TURBOLEV_NON_VALUE_NODE_LIST(TURBOLEV_UNREACHABLE_NODE)
1032#undef TURBOLEV_UNREACHABLE_NODE
1033
1035void ExternalConstant::GenerateCode(MaglevAssembler* masm,
1036 const ProcessingState& state) {}
1037
1039void SmiConstant::GenerateCode(MaglevAssembler* masm,
1040 const ProcessingState& state) {}
1041
1043 DefineAsConstant(this);
1044}
1045void TaggedIndexConstant::GenerateCode(MaglevAssembler* masm,
1046 const ProcessingState& state) {}
1047
1049void Int32Constant::GenerateCode(MaglevAssembler* masm,
1050 const ProcessingState& state) {}
1051
1053void Uint32Constant::GenerateCode(MaglevAssembler* masm,
1054 const ProcessingState& state) {}
1055
1057void Float64Constant::GenerateCode(MaglevAssembler* masm,
1058 const ProcessingState& state) {}
1059
1061void Constant::GenerateCode(MaglevAssembler* masm,
1062 const ProcessingState& state) {}
1063
1065void TrustedConstant::GenerateCode(MaglevAssembler* masm,
1066 const ProcessingState& state) {
1067#ifndef V8_ENABLE_SANDBOX
1068 UNREACHABLE();
1069#endif
1070}
1071
1073void RootConstant::GenerateCode(MaglevAssembler* masm,
1074 const ProcessingState& state) {}
1075
1078 stack_slot(), kNoVreg);
1079}
1080void InitialValue::GenerateCode(MaglevAssembler* masm,
1081 const ProcessingState& state) {
1082 // No-op, the value is already in the appropriate slot.
1083}
1084
1085// static
1086uint32_t InitialValue::stack_slot(uint32_t register_index) {
1087 // TODO(leszeks): Make this nicer.
1091 register_index;
1092}
1093
1094uint32_t InitialValue::stack_slot() const {
1095 return stack_slot(source_.index());
1096}
1097
1098int FunctionEntryStackCheck::MaxCallStackArgs() const { return 0; }
1100 set_temporaries_needed(2);
1101 // kReturnRegister0 should not be one of the available temporary registers.
1102 RequireSpecificTemporary(kReturnRegister0);
1103}
1104void FunctionEntryStackCheck::GenerateCode(MaglevAssembler* masm,
1105 const ProcessingState& state) {
1106 // Stack check. This folds the checks for both the interrupt stack limit
1107 // check and the real stack limit into one by just checking for the
1108 // interrupt limit. The interrupt limit is either equal to the real
1109 // stack limit or tighter. By ensuring we have space until that limit
1110 // after building the frame we can quickly precheck both at once.
1111 const int stack_check_offset = masm->code_gen_state()->stack_check_offset();
1112 // Only NewTarget can be live at this point.
1113 DCHECK_LE(register_snapshot().live_registers.Count(), 1);
1114 Builtin builtin =
1115 register_snapshot().live_tagged_registers.has(
1117 ? Builtin::kMaglevFunctionEntryStackCheck_WithNewTarget
1118 : Builtin::kMaglevFunctionEntryStackCheck_WithoutNewTarget;
1119 ZoneLabelRef done(masm);
1120 Condition cond = __ FunctionEntryStackCheck(stack_check_offset);
1121 if (masm->isolate()->is_short_builtin_calls_enabled()) {
1122 __ JumpIf(cond, *done, Label::kNear);
1123 __ Move(kReturnRegister0, Smi::FromInt(stack_check_offset));
1125 masm->DefineLazyDeoptPoint(lazy_deopt_info());
1126 } else {
1127 __ JumpToDeferredIf(
1128 NegateCondition(cond),
1129 [](MaglevAssembler* masm, ZoneLabelRef done,
1130 FunctionEntryStackCheck* node, Builtin builtin,
1131 int stack_check_offset) {
1132 __ Move(kReturnRegister0, Smi::FromInt(stack_check_offset));
1134 masm->DefineLazyDeoptPoint(node->lazy_deopt_info());
1135 __ Jump(*done);
1136 },
1137 done, this, builtin, stack_check_offset);
1138 }
1139 __ bind(*done);
1140}
1141
1143 DefineAsFixed(this, input());
1144}
1145void RegisterInput::GenerateCode(MaglevAssembler* masm,
1146 const ProcessingState& state) {
1147 // Nothing to be done, the value is already in the register.
1148}
1149
1152}
1153void GetSecondReturnedValue::GenerateCode(MaglevAssembler* masm,
1154 const ProcessingState& state) {
1155 // No-op. This is just a hack that binds kReturnRegister1 to a value node.
1156 // kReturnRegister1 is guaranteed to be free in the register allocator, since
1157 // previous node in the basic block is a call.
1158#ifdef DEBUG
1159 // Check if the previous node is call.
1160 Node* previous = nullptr;
1161 for (Node* node : state.block()->nodes()) {
1162 if (node == this) {
1163 break;
1164 }
1165 previous = node;
1166 }
1167 DCHECK_NE(previous, nullptr);
1168 DCHECK(previous->properties().is_call());
1169#endif // DEBUG
1170}
1171
1173void Deopt::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
1174 __ EmitEagerDeopt(this, deoptimize_reason());
1175}
1176
1178 for (Input& input : *this) {
1179 UseAny(input);
1180 }
1181
1182 // We have to pass a policy for the result, but it is ignored during register
1183 // allocation. See StraightForwardRegisterAllocator::AllocateRegisters which
1184 // has special handling for Phis.
1185 static const compiler::UnallocatedOperand::ExtendedPolicy kIgnoredPolicy =
1187
1188 result().SetUnallocated(kIgnoredPolicy, kNoVreg);
1189}
1190
1191void Phi::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {}
1192
1194 using SloppyArgsD =
1195 CallInterfaceDescriptorFor<Builtin::kNewSloppyArgumentsElements>::type;
1196 using StrictArgsD =
1197 CallInterfaceDescriptorFor<Builtin::kNewStrictArgumentsElements>::type;
1198 using RestArgsD =
1199 CallInterfaceDescriptorFor<Builtin::kNewRestArgumentsElements>::type;
1200 static_assert(
1201 SloppyArgsD::GetRegisterParameter(SloppyArgsD::kArgumentCount) ==
1202 StrictArgsD::GetRegisterParameter(StrictArgsD::kArgumentCount));
1203 static_assert(
1204 SloppyArgsD::GetRegisterParameter(SloppyArgsD::kArgumentCount) ==
1205 StrictArgsD::GetRegisterParameter(RestArgsD::kArgumentCount));
1207 SloppyArgsD::GetRegisterParameter(SloppyArgsD::kArgumentCount));
1209}
1210
1211void ArgumentsElements::GenerateCode(MaglevAssembler* masm,
1212 const ProcessingState& state) {
1213 Register arguments_count = ToRegister(arguments_count_input());
1214 switch (type()) {
1216 __ CallBuiltin<Builtin::kNewSloppyArgumentsElements>(
1217 __ GetFramePointer(), formal_parameter_count(), arguments_count);
1218 break;
1220 __ CallBuiltin<Builtin::kNewStrictArgumentsElements>(
1221 __ GetFramePointer(), formal_parameter_count(), arguments_count);
1222 break;
1224 __ CallBuiltin<Builtin::kNewRestArgumentsElements>(
1225 __ GetFramePointer(), formal_parameter_count(), arguments_count);
1226 break;
1227 }
1228}
1229
1232 DefineAsRegister(this);
1233 set_temporaries_needed(1);
1234}
1235void AllocateElementsArray::GenerateCode(MaglevAssembler* masm,
1236 const ProcessingState& state) {
1237 Register length = ToRegister(length_input());
1238 Register elements = ToRegister(result());
1239 Label allocate_elements, done;
1240 MaglevAssembler::TemporaryRegisterScope temps(masm);
1241 Register scratch = temps.Acquire();
1242 // Be sure to save the length in the register snapshot.
1243 RegisterSnapshot snapshot = register_snapshot();
1244 snapshot.live_registers.set(length);
1245
1246 // Return empty fixed array if length equal zero.
1247 __ CompareInt32AndJumpIf(length, 0, kNotEqual, &allocate_elements,
1249 __ LoadRoot(elements, RootIndex::kEmptyFixedArray);
1250 __ Jump(&done);
1251
1252 // Allocate a fixed array object.
1253 __ bind(&allocate_elements);
1254 __ CompareInt32AndJumpIf(
1256 __ GetDeoptLabel(this,
1257 DeoptimizeReason::kGreaterThanMaxFastElementArray));
1258 {
1259 Register size_in_bytes = scratch;
1260 __ Move(size_in_bytes, length);
1261 __ ShiftLeft(size_in_bytes, kTaggedSizeLog2);
1262 __ AddInt32(size_in_bytes, OFFSET_OF_DATA_START(FixedArray));
1263 __ Allocate(snapshot, elements, size_in_bytes, allocation_type_);
1264 __ SetMapAsRoot(elements, RootIndex::kFixedArrayMap);
1265 }
1266 {
1267 Register smi_length = scratch;
1268 __ UncheckedSmiTagInt32(smi_length, length);
1269 __ StoreTaggedFieldNoWriteBarrier(elements, offsetof(FixedArray, length_),
1270 smi_length);
1271 }
1272
1273 // Initialize the array with holes.
1274 {
1275 Label loop;
1276 Register the_hole = scratch;
1277 __ LoadTaggedRoot(the_hole, RootIndex::kTheHoleValue);
1278 __ bind(&loop);
1279 __ DecrementInt32(length);
1280 // TODO(victorgomes): This can be done more efficiently by have the root
1281 // (the_hole) as an immediate in the store.
1282 __ StoreFixedArrayElementNoWriteBarrier(elements, length, the_hole);
1283 __ CompareInt32AndJumpIf(length, 0, kGreaterThan, &loop,
1285 }
1286 __ bind(&done);
1287}
1288
1289namespace {
1290
1291constexpr Builtin BuiltinFor(Operation operation) {
1292 switch (operation) {
1293#define CASE(name) \
1294 case Operation::k##name: \
1295 return Builtin::k##name##_WithFeedback;
1297#undef CASE
1298 }
1299}
1300
1301} // namespace
1302
1303template <class Derived, Operation kOperation>
1305 using D = UnaryOp_WithFeedbackDescriptor;
1306 UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
1308}
1309
1310template <class Derived, Operation kOperation>
1312 MaglevAssembler* masm, const ProcessingState& state) {
1313 __ CallBuiltin<BuiltinFor(kOperation)>(
1314 masm->native_context().object(), // context
1315 operand_input(), // value
1316 feedback().index(), // feedback slot
1317 feedback().vector // feedback vector
1318 );
1319 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
1320}
1321
1322template <class Derived, Operation kOperation>
1323void BinaryWithFeedbackNode<Derived,
1324 kOperation>::SetValueLocationConstraints() {
1325 using D = BinaryOp_WithFeedbackDescriptor;
1326 UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
1327 UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
1329}
1330
1331template <class Derived, Operation kOperation>
1333 MaglevAssembler* masm, const ProcessingState& state) {
1334 __ CallBuiltin<BuiltinFor(kOperation)>(
1335 masm->native_context().object(), // context
1336 left_input(), // left
1337 right_input(), // right
1338 feedback().index(), // feedback slot
1339 feedback().vector // feedback vector
1340 );
1341 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
1342}
1343
1344#define DEF_OPERATION(Name) \
1345 void Name::SetValueLocationConstraints() { \
1346 Base::SetValueLocationConstraints(); \
1347 } \
1348 void Name::GenerateCode(MaglevAssembler* masm, \
1349 const ProcessingState& state) { \
1350 Base::GenerateCode(masm, state); \
1351 }
1352GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
1353#undef DEF_OPERATION
1354
1356
1357namespace {
1358template <typename T>
1359struct GetRegister;
1360template <>
1361struct GetRegister<Register> {
1362 static Register Get(compiler::AllocatedOperand target) {
1363 return target.GetRegister();
1364 }
1365};
1366template <>
1367struct GetRegister<DoubleRegister> {
1368 static DoubleRegister Get(compiler::AllocatedOperand target) {
1369 return target.GetDoubleRegister();
1370 }
1371};
1372} // namespace
1373
1374void ConstantGapMove::GenerateCode(MaglevAssembler* masm,
1375 const ProcessingState& state) {
1376 switch (node_->opcode()) {
1377#define CASE(Name) \
1378 case Opcode::k##Name: \
1379 return node_->Cast<Name>()->DoLoadToRegister( \
1380 masm, GetRegister<Name::OutputRegister>::Get(target()));
1382#undef CASE
1383 default:
1384 UNREACHABLE();
1385 }
1386}
1387
1389void GapMove::GenerateCode(MaglevAssembler* masm,
1390 const ProcessingState& state) {
1391 DCHECK_EQ(source().representation(), target().representation());
1393 if (source().IsRegister()) {
1394 Register source_reg = ToRegister(source());
1395 if (target().IsAnyRegister()) {
1396 DCHECK(target().IsRegister());
1397 __ MoveRepr(repr, ToRegister(target()), source_reg);
1398 } else {
1399 __ MoveRepr(repr, masm->ToMemOperand(target()), source_reg);
1400 }
1401 } else if (source().IsDoubleRegister()) {
1402 DoubleRegister source_reg = ToDoubleRegister(source());
1403 if (target().IsAnyRegister()) {
1404 DCHECK(target().IsDoubleRegister());
1405 __ Move(ToDoubleRegister(target()), source_reg);
1406 } else {
1407 __ StoreFloat64(masm->ToMemOperand(target()), source_reg);
1408 }
1409 } else {
1410 DCHECK(source().IsAnyStackSlot());
1411 MemOperand source_op = masm->ToMemOperand(source());
1412 if (target().IsRegister()) {
1414 source_op);
1415 } else if (target().IsDoubleRegister()) {
1416 __ LoadFloat64(ToDoubleRegister(target()), source_op);
1417 } else {
1418 DCHECK(target().IsAnyStackSlot());
1420 __ MoveRepr(repr, masm->ToMemOperand(target()), source_op);
1421 }
1422 }
1423}
1424
1428}
1429void AssertInt32::GenerateCode(MaglevAssembler* masm,
1430 const ProcessingState& state) {
1431 __ CompareInt32AndAssert(ToRegister(left_input()), ToRegister(right_input()),
1433}
1434
1436void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm,
1437 const ProcessingState& state) {
1439 // Perform an unsigned comparison against Smi::kMaxValue.
1440 __ CompareUInt32AndEmitEagerDeoptIf(reg, Smi::kMaxValue, kUnsignedGreaterThan,
1441 DeoptimizeReason::kNotASmi, this);
1442}
1443
1445 UseRegister(input());
1446 DefineSameAsFirst(this);
1447}
1448
1449void CheckedSmiUntag::GenerateCode(MaglevAssembler* masm,
1450 const ProcessingState& state) {
1451 Register value = ToRegister(input());
1452 // TODO(leszeks): Consider optimizing away this test and using the carry bit
1453 // of the `sarl` for cases where the deopt uses the value from a different
1454 // register.
1455 __ EmitEagerDeoptIfNotSmi(this, value, DeoptimizeReason::kNotASmi);
1456 __ SmiToInt32(value);
1457}
1458
1460 UseRegister(input());
1461 DefineSameAsFirst(this);
1462}
1463
1464void UnsafeSmiUntag::GenerateCode(MaglevAssembler* masm,
1465 const ProcessingState& state) {
1466 Register value = ToRegister(input());
1467 __ AssertSmi(value);
1468 __ SmiToInt32(value);
1469}
1470
1472void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm,
1473 const ProcessingState& state) {
1474 // We shouldn't be emitting this node for 32-bit Smis.
1476
1477 // TODO(leszeks): This basically does a SmiTag and throws the result away.
1478 // Don't throw the result away if we want to actually use it.
1480 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1481 __ CheckInt32IsSmi(reg, fail);
1482}
1483
1485void CheckIntPtrIsSmi::GenerateCode(MaglevAssembler* masm,
1486 const ProcessingState& state) {
1488 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1489 __ CheckIntPtrIsSmi(reg, fail);
1490}
1491
1493 UseRegister(input());
1494 DefineSameAsFirst(this);
1495}
1496void CheckedInt32ToUint32::GenerateCode(MaglevAssembler* masm,
1497 const ProcessingState& state) {
1498 __ CompareInt32AndJumpIf(
1499 ToRegister(input()), 0, kLessThan,
1500 __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32));
1501}
1502
1504 UseRegister(input());
1505 DefineSameAsFirst(this);
1506 set_temporaries_needed(1);
1507}
1508
1509void CheckedIntPtrToUint32::GenerateCode(MaglevAssembler* masm,
1510 const ProcessingState& state) {
1511 MaglevAssembler::TemporaryRegisterScope temps(masm);
1512 Register scratch = temps.Acquire();
1513 __ Move(scratch, std::numeric_limits<uint32_t>::max());
1514 __ CompareIntPtrAndJumpIf(
1516 __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32));
1517}
1518
1520 UseRegister(input());
1521 DefineSameAsFirst(this);
1522}
1523void UnsafeInt32ToUint32::GenerateCode(MaglevAssembler* masm,
1524 const ProcessingState& state) {}
1525
1527 UseRegister(input());
1528 set_temporaries_needed(1);
1529}
1530void CheckHoleyFloat64IsSmi::GenerateCode(MaglevAssembler* masm,
1531 const ProcessingState& state) {
1533 MaglevAssembler::TemporaryRegisterScope temps(masm);
1534 Register scratch = temps.Acquire();
1535 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1536 __ TryTruncateDoubleToInt32(scratch, value, fail);
1537 if (!SmiValuesAre32Bits()) {
1538 __ CheckInt32IsSmi(scratch, fail, scratch);
1539 }
1540}
1541
1544 DefineSameAsFirst(this);
1545}
1546void CheckedSmiTagInt32::GenerateCode(MaglevAssembler* masm,
1547 const ProcessingState& state) {
1549 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1550 // None of the mutated input registers should be a register input into the
1551 // eager deopt info.
1553 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
1554 __ SmiTagInt32AndJumpIfFail(reg, fail);
1555}
1556
1559 DefineSameAsFirst(this);
1560}
1561void CheckedSmiSizedInt32::GenerateCode(MaglevAssembler* masm,
1562 const ProcessingState& state) {
1563 // We shouldn't be emitting this node for 32-bit Smis.
1565
1567 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1568 __ CheckInt32IsSmi(reg, fail);
1569}
1570
1572 UseRegister(input());
1573 DefineSameAsFirst(this);
1574}
1575void CheckedSmiTagUint32::GenerateCode(MaglevAssembler* masm,
1576 const ProcessingState& state) {
1578 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1579 // None of the mutated input registers should be a register input into the
1580 // eager deopt info.
1582 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
1583 __ SmiTagUint32AndJumpIfFail(reg, fail);
1584}
1585
1588 DefineSameAsFirst(this);
1589}
1590void CheckedSmiTagIntPtr::GenerateCode(MaglevAssembler* masm,
1591 const ProcessingState& state) {
1593 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi);
1594 // None of the mutated input registers should be a register input into the
1595 // eager deopt info.
1597 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
1598 __ SmiTagIntPtrAndJumpIfFail(reg, reg, fail);
1599}
1600
1602 UseRegister(input());
1603 DefineSameAsFirst(this);
1604}
1605void UnsafeSmiTagInt32::GenerateCode(MaglevAssembler* masm,
1606 const ProcessingState& state) {
1607 __ UncheckedSmiTagInt32(ToRegister(input()));
1608}
1609
1611 UseRegister(input());
1612 DefineSameAsFirst(this);
1613}
1614void UnsafeSmiTagUint32::GenerateCode(MaglevAssembler* masm,
1615 const ProcessingState& state) {
1616 __ UncheckedSmiTagUint32(ToRegister(input()));
1617}
1618
1620 UseRegister(input());
1621 DefineSameAsFirst(this);
1622}
1623void UnsafeSmiTagIntPtr::GenerateCode(MaglevAssembler* masm,
1624 const ProcessingState& state) {
1625 // If the IntPtr is guaranteed to be a SMI, we can treat it as Int32.
1626 // TODO(388844115): Rename IntPtr to make it clear it's non-negative.
1627 __ UncheckedSmiTagInt32(ToRegister(input()));
1628}
1629
1632 DefineSameAsFirst(this);
1633}
1634
1635void CheckedSmiIncrement::GenerateCode(MaglevAssembler* masm,
1636 const ProcessingState& state) {
1637 Label* deopt_label = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
1638 __ SmiAddConstant(ToRegister(value_input()), 1, deopt_label);
1639}
1640
1643 DefineSameAsFirst(this);
1644}
1645
1646void CheckedSmiDecrement::GenerateCode(MaglevAssembler* masm,
1647 const ProcessingState& state) {
1648 Label* deopt_label = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
1649 __ SmiSubConstant(ToRegister(value_input()), 1, deopt_label);
1650}
1651
1652namespace {
1653
1654void JumpToFailIfNotHeapNumberOrOddball(
1655 MaglevAssembler* masm, Register value,
1656 TaggedToFloat64ConversionType conversion_type, Label* fail) {
1657 if (!fail && !v8_flags.debug_code) return;
1658
1659 static_assert(InstanceType::HEAP_NUMBER_TYPE + 1 ==
1660 InstanceType::ODDBALL_TYPE);
1661 switch (conversion_type) {
1663 // Check if HeapNumber or Boolean, jump to fail otherwise.
1664 MaglevAssembler::TemporaryRegisterScope temps(masm);
1665 Register map = temps.AcquireScratch();
1666
1667#if V8_STATIC_ROOTS_BOOL
1668 static_assert(StaticReadOnlyRoot::kBooleanMap + Map::kSize ==
1669 StaticReadOnlyRoot::kHeapNumberMap);
1670 __ LoadMapForCompare(map, value);
1671 if (fail) {
1672 __ JumpIfObjectNotInRange(map, StaticReadOnlyRoot::kBooleanMap,
1673 StaticReadOnlyRoot::kHeapNumberMap, fail);
1674 } else {
1675 __ AssertObjectInRange(map, StaticReadOnlyRoot::kBooleanMap,
1676 StaticReadOnlyRoot::kHeapNumberMap,
1677 AbortReason::kUnexpectedValue);
1678 }
1679#else
1680 Label done;
1681 __ LoadMap(map, value);
1682 __ CompareRoot(map, RootIndex::kHeapNumberMap);
1683 __ JumpIf(kEqual, &done);
1684 __ CompareRoot(map, RootIndex::kBooleanMap);
1685 if (fail) {
1686 __ JumpIf(kNotEqual, fail);
1687 } else {
1688 __ Assert(kEqual, AbortReason::kUnexpectedValue);
1689 }
1690 __ bind(&done);
1691#endif
1692 break;
1693 }
1695 // Check if HeapNumber or Oddball, jump to fail otherwise.
1696 if (fail) {
1697 __ JumpIfObjectTypeNotInRange(value, InstanceType::HEAP_NUMBER_TYPE,
1698 InstanceType::ODDBALL_TYPE, fail);
1699 } else {
1700 __ AssertObjectTypeInRange(value, InstanceType::HEAP_NUMBER_TYPE,
1701 InstanceType::ODDBALL_TYPE,
1702 AbortReason::kUnexpectedValue);
1703 }
1704 break;
1706 // Check if HeapNumber, jump to fail otherwise.
1707 if (fail) {
1708 __ JumpIfNotObjectType(value, InstanceType::HEAP_NUMBER_TYPE, fail);
1709 } else {
1710 __ AssertObjectType(value, InstanceType::HEAP_NUMBER_TYPE,
1711 AbortReason::kUnexpectedValue);
1712 }
1713 break;
1714 }
1715}
1716
1717void TryUnboxNumberOrOddball(MaglevAssembler* masm, DoubleRegister dst,
1718 Register clobbered_src,
1719 TaggedToFloat64ConversionType conversion_type,
1720 Label* fail) {
1721 Label is_not_smi, done;
1722 // Check if Smi.
1723 __ JumpIfNotSmi(clobbered_src, &is_not_smi, Label::kNear);
1724 // If Smi, convert to Float64.
1725 __ SmiToInt32(clobbered_src);
1726 __ Int32ToDouble(dst, clobbered_src);
1727 __ Jump(&done);
1728 __ bind(&is_not_smi);
1729 JumpToFailIfNotHeapNumberOrOddball(masm, clobbered_src, conversion_type,
1730 fail);
1731 __ LoadHeapNumberOrOddballValue(dst, clobbered_src);
1732 __ bind(&done);
1733}
1734
1735} // namespace
1736template <typename Derived, ValueRepresentation FloatType>
1737 requires(FloatType == ValueRepresentation::kFloat64 ||
1739void CheckedNumberOrOddballToFloat64OrHoleyFloat64<
1740 Derived, FloatType>::SetValueLocationConstraints() {
1741 UseAndClobberRegister(input());
1742 DefineAsRegister(this);
1743}
1744template <typename Derived, ValueRepresentation FloatType>
1745 requires(FloatType == ValueRepresentation::kFloat64 ||
1747void CheckedNumberOrOddballToFloat64OrHoleyFloat64<
1748 Derived, FloatType>::GenerateCode(MaglevAssembler* masm,
1749 const ProcessingState& state) {
1750 Register value = ToRegister(input());
1751 TryUnboxNumberOrOddball(masm, ToDoubleRegister(result()), value,
1752 conversion_type(),
1753 __ GetDeoptLabel(this, deoptimize_reason()));
1754}
1755
1758 DefineAsRegister(this);
1759}
1761 MaglevAssembler* masm, const ProcessingState& state) {
1762 Register value = ToRegister(input());
1763 TryUnboxNumberOrOddball(masm, ToDoubleRegister(result()), value,
1764 conversion_type(), nullptr);
1765}
1766
1768 UseRegister(input());
1769 DefineAsRegister(this);
1770 set_double_temporaries_needed(1);
1771}
1772void CheckedNumberToInt32::GenerateCode(MaglevAssembler* masm,
1773 const ProcessingState& state) {
1774 MaglevAssembler::TemporaryRegisterScope temps(masm);
1775 DoubleRegister double_value = temps.AcquireDouble();
1776 Register value = ToRegister(input());
1777 Label is_not_smi, done;
1778 Label* deopt_label = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
1779 // Check if Smi.
1780 __ JumpIfNotSmi(value, &is_not_smi, Label::kNear);
1781 __ SmiToInt32(ToRegister(result()), value);
1782 __ Jump(&done);
1783 __ bind(&is_not_smi);
1784 // Check if Number.
1785 JumpToFailIfNotHeapNumberOrOddball(
1786 masm, value, TaggedToFloat64ConversionType::kOnlyNumber, deopt_label);
1787 __ LoadHeapNumberValue(double_value, value);
1788 __ TryTruncateDoubleToInt32(ToRegister(result()), double_value, deopt_label);
1789 __ bind(&done);
1790}
1791
1792namespace {
1793
1794void EmitTruncateNumberOrOddballToInt32(
1795 MaglevAssembler* masm, Register value, Register result_reg,
1796 TaggedToFloat64ConversionType conversion_type, Label* not_a_number) {
1797 Label is_not_smi, done;
1798 // Check if Smi.
1799 __ JumpIfNotSmi(value, &is_not_smi, Label::kNear);
1800 // If Smi, convert to Int32.
1801 __ SmiToInt32(value);
1802 __ Jump(&done, Label::kNear);
1803 __ bind(&is_not_smi);
1804 JumpToFailIfNotHeapNumberOrOddball(masm, value, conversion_type,
1805 not_a_number);
1806 MaglevAssembler::TemporaryRegisterScope temps(masm);
1807 DoubleRegister double_value = temps.AcquireScratchDouble();
1808 __ LoadHeapNumberOrOddballValue(double_value, value);
1809 __ TruncateDoubleToInt32(result_reg, double_value);
1810 __ bind(&done);
1811}
1812
1813} // namespace
1814
1817 DefineAsRegister(this);
1818 set_double_temporaries_needed(1);
1819}
1820void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
1821 const ProcessingState& state) {
1822 Register object = ToRegister(object_input());
1823 Register result_reg = ToRegister(result());
1824 ZoneLabelRef done(masm);
1825 __ JumpIfNotSmi(
1826 object,
1827 __ MakeDeferredCode(
1828 [](MaglevAssembler* masm, Register object, Register result_reg,
1829 ZoneLabelRef done, CheckedObjectToIndex* node) {
1830 MaglevAssembler::TemporaryRegisterScope temps(masm);
1831 Register map = temps.AcquireScratch();
1832 Label check_string;
1833 __ LoadMapForCompare(map, object);
1834 __ JumpIfNotRoot(
1835 map, RootIndex::kHeapNumberMap, &check_string,
1836 v8_flags.deopt_every_n_times > 0 ? Label::kFar : Label::kNear);
1837 {
1838 DoubleRegister number_value = temps.AcquireDouble();
1839 __ LoadHeapNumberValue(number_value, object);
1840 __ TryChangeFloat64ToIndex(
1841 result_reg, number_value, *done,
1842 __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32));
1843 }
1844 __ bind(&check_string);
1845 // The IC will go generic if it encounters something other than a
1846 // Number or String key.
1847 __ JumpIfStringMap(
1848 map, __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32),
1849 Label::kFar, false);
1850 // map is clobbered after this call.
1851
1852 {
1853 // TODO(verwaest): Load the cached number from the string hash.
1854 RegisterSnapshot snapshot = node->register_snapshot();
1855 snapshot.live_registers.clear(result_reg);
1856 DCHECK(!snapshot.live_tagged_registers.has(result_reg));
1857 {
1858 SaveRegisterStateForCall save_register_state(masm, snapshot);
1859 AllowExternalCallThatCantCauseGC scope(masm);
1860 __ PrepareCallCFunction(1);
1861 __ Move(kCArgRegs[0], object);
1862 __ CallCFunction(
1863 ExternalReference::string_to_array_index_function(), 1);
1864 // No need for safepoint since this is a fast C call.
1865 __ Move(result_reg, kReturnRegister0);
1866 }
1867 __ CompareInt32AndJumpIf(
1868 result_reg, 0, kLessThan,
1869 __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32));
1870 __ Jump(*done);
1871 }
1872 },
1873 object, result_reg, done, this));
1874
1875 // If we didn't enter the deferred block, we're a Smi.
1876 __ SmiToInt32(result_reg, object);
1877 __ bind(*done);
1878}
1879
1881 UseRegister(input());
1882 DefineSameAsFirst(this);
1883}
1885 MaglevAssembler* masm, const ProcessingState& state) {
1886 Register value = ToRegister(input());
1887 Register result_reg = ToRegister(result());
1888 DCHECK_EQ(value, result_reg);
1889 Label* deopt_label =
1890 __ GetDeoptLabel(this, DeoptimizeReason::kNotANumberOrOddball);
1891 EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
1892 deopt_label);
1893}
1894
1896 UseRegister(input());
1897 DefineSameAsFirst(this);
1898}
1900 MaglevAssembler* masm, const ProcessingState& state) {
1901 Register value = ToRegister(input());
1902 Register result_reg = ToRegister(result());
1903 DCHECK_EQ(value, result_reg);
1904 EmitTruncateNumberOrOddballToInt32(masm, value, result_reg, conversion_type(),
1905 nullptr);
1906}
1907
1909 UseRegister(input());
1910 DefineAsRegister(this);
1911}
1912void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm,
1913 const ProcessingState& state) {
1914 __ Int32ToDouble(ToDoubleRegister(result()), ToRegister(input()));
1915}
1916
1918 UseRegister(input());
1919 DefineAsRegister(this);
1920}
1921void ChangeUint32ToFloat64::GenerateCode(MaglevAssembler* masm,
1922 const ProcessingState& state) {
1923 __ Uint32ToDouble(ToDoubleRegister(result()), ToRegister(input()));
1924}
1925
1927 UseRegister(input());
1928 DefineAsRegister(this);
1929}
1930void ChangeIntPtrToFloat64::GenerateCode(MaglevAssembler* masm,
1931 const ProcessingState& state) {
1932 __ IntPtrToDouble(ToDoubleRegister(result()), ToRegister(input()));
1933}
1934
1937 set_temporaries_needed(MapCompare::TemporaryCount(maps_.size()));
1938}
1939
1940void CheckMaps::GenerateCode(MaglevAssembler* masm,
1941 const ProcessingState& state) {
1943
1944 // We emit an unconditional deopt if we intersect the map sets and the
1945 // intersection is empty.
1946 DCHECK(!maps().is_empty());
1947
1948 bool maps_include_heap_number = compiler::AnyMapIsHeapNumber(maps());
1949
1950 // Experimentally figured out map limit (with slack) which allows us to use
1951 // near jumps in the code below. If --deopt-every-n-times is on, we generate
1952 // a bit more code, so disable the near jump optimization.
1953 constexpr int kMapCountForNearJumps = kTaggedSize == 4 ? 10 : 5;
1954 Label::Distance jump_distance = (maps().size() <= kMapCountForNearJumps &&
1955 v8_flags.deopt_every_n_times <= 0)
1957 : Label::Distance::kFar;
1958
1959 Label done;
1961 __ AssertNotSmi(object);
1962 } else {
1963 if (maps_include_heap_number) {
1964 // Smis count as matching the HeapNumber map, so we're done.
1965 __ JumpIfSmi(object, &done, jump_distance);
1966 } else {
1967 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kWrongMap);
1968 }
1969 }
1970
1971 MapCompare map_compare(masm, object, maps_.size());
1972 size_t map_count = maps().size();
1973 for (size_t i = 0; i < map_count - 1; ++i) {
1974 Handle<Map> map = maps().at(i).object();
1975 map_compare.Generate(map, kEqual, &done, jump_distance);
1976 }
1977 Handle<Map> last_map = maps().at(map_count - 1).object();
1978 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kWrongMap);
1979 map_compare.Generate(last_map, kNotEqual, fail);
1980 __ bind(&done);
1981}
1982
1985 Runtime::kTryMigrateInstanceAndMarkMapAsMigrationTarget)
1986 ->nargs,
1987 1);
1988 return 1;
1989}
1990
1993 set_temporaries_needed(MapCompare::TemporaryCount(maps_.size()));
1994}
1995
1997 MaglevAssembler* masm, const ProcessingState& state) {
1999
2000 // We emit an unconditional deopt if we intersect the map sets and the
2001 // intersection is empty.
2002 DCHECK(!maps().is_empty());
2003
2004 bool maps_include_heap_number = compiler::AnyMapIsHeapNumber(maps());
2005
2006 // Experimentally figured out map limit (with slack) which allows us to use
2007 // near jumps in the code below. If --deopt-every-n-times is on, we generate
2008 // a bit more code, so disable the near jump optimization.
2009 constexpr int kMapCountForNearJumps = kTaggedSize == 4 ? 10 : 5;
2010 Label::Distance jump_distance = (maps().size() <= kMapCountForNearJumps &&
2011 v8_flags.deopt_every_n_times <= 0)
2013 : Label::Distance::kFar;
2014
2015 Label done;
2017 __ AssertNotSmi(object);
2018 } else {
2019 if (maps_include_heap_number) {
2020 // Smis count as matching the HeapNumber map, so we're done.
2021 __ JumpIfSmi(object, &done, jump_distance);
2022 } else {
2023 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kWrongMap);
2024 }
2025 }
2026
2027 MapCompare map_compare(masm, object, maps_.size());
2028 size_t map_count = maps().size();
2029 for (size_t i = 0; i < map_count - 1; ++i) {
2030 Handle<Map> map = maps().at(i).object();
2031 map_compare.Generate(map, kEqual, &done, jump_distance);
2032 }
2033
2034 Handle<Map> last_map = maps().at(map_count - 1).object();
2035 map_compare.Generate(
2036 last_map, kNotEqual,
2037 __ MakeDeferredCode(
2038 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
2039 MapCompare map_compare, CheckMapsWithMigrationAndDeopt* node) {
2040 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kWrongMap);
2041 // If the map is not deprecated, we fail the map check.
2042 __ TestInt32AndJumpIfAllClear(
2043 FieldMemOperand(map_compare.GetMap(), Map::kBitField3Offset),
2044 Map::Bits3::IsDeprecatedBit::kMask, deopt);
2045
2046 // Otherwise, try migrating the object.
2047 __ TryMigrateInstanceAndMarkMapAsMigrationTarget(
2048 map_compare.GetObject(), register_snapshot);
2049 // Deopt even if the migration was successful.
2050 __ JumpToDeopt(deopt);
2051 },
2052 register_snapshot(), map_compare, this));
2053 // If the jump to deferred code was not taken, the map was equal to the
2054 // last map.
2055 __ bind(&done);
2056}
2057
2059 DCHECK_EQ(Runtime::FunctionForId(Runtime::kTryMigrateInstance)->nargs, 1);
2060 return 1;
2061}
2062
2065 set_temporaries_needed(MapCompare::TemporaryCount(maps_.size()));
2066}
2067
2068void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
2069 const ProcessingState& state) {
2070 // We emit an unconditional deopt if we intersect the map sets and the
2071 // intersection is empty.
2072 DCHECK(!maps().is_empty());
2073
2074 MaglevAssembler::TemporaryRegisterScope temps(masm);
2076
2077 bool maps_include_heap_number = compiler::AnyMapIsHeapNumber(maps());
2078
2079 ZoneLabelRef map_checks(masm), done(masm);
2080
2082 __ AssertNotSmi(object);
2083 } else {
2084 if (maps_include_heap_number) {
2085 // Smis count as matching the HeapNumber map, so we're done.
2086 __ JumpIfSmi(object, *done);
2087 } else {
2088 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kWrongMap);
2089 }
2090 }
2091
2092 // If we jump from here from the deferred code (below), we need to reload
2093 // the map.
2094 __ bind(*map_checks);
2095
2096 RegisterSnapshot save_registers = register_snapshot();
2097 // Make sure that the object register is not clobbered by the
2098 // Runtime::kMigrateInstance runtime call. It's ok to clobber the register
2099 // where the object map is, since the map is reloaded after the runtime call.
2100 save_registers.live_registers.set(object);
2101 save_registers.live_tagged_registers.set(object);
2102
2103 size_t map_count = maps().size();
2104 bool has_migration_targets = false;
2105 MapCompare map_compare(masm, object, maps_.size());
2106 Handle<Map> map_handle;
2107 for (size_t i = 0; i < map_count; ++i) {
2108 map_handle = maps().at(i).object();
2109 const bool last_map = (i == map_count - 1);
2110 if (!last_map) {
2111 map_compare.Generate(map_handle, kEqual, *done);
2112 }
2113 if (map_handle->is_migration_target()) {
2114 has_migration_targets = true;
2115 }
2116 }
2117
2118 if (!has_migration_targets) {
2119 // Emit deopt for the last map.
2120 map_compare.Generate(map_handle, kNotEqual,
2121 __ GetDeoptLabel(this, DeoptimizeReason::kWrongMap));
2122 } else {
2123 map_compare.Generate(
2124 map_handle, kNotEqual,
2125 __ MakeDeferredCode(
2126 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
2127 ZoneLabelRef map_checks, MapCompare map_compare,
2128 CheckMapsWithMigration* node) {
2129 Label* deopt =
2130 __ GetDeoptLabel(node, DeoptimizeReason::kWrongMap);
2131 // If the map is not deprecated, we fail the map check.
2132 __ TestInt32AndJumpIfAllClear(
2133 FieldMemOperand(map_compare.GetMap(), Map::kBitField3Offset),
2134 Map::Bits3::IsDeprecatedBit::kMask, deopt);
2135
2136 // Otherwise, try migrating the object.
2137 __ TryMigrateInstance(map_compare.GetObject(), register_snapshot,
2138 deopt);
2139 __ Jump(*map_checks);
2140 // We'll need to reload the map since it might have changed; it's
2141 // done right after the map_checks label.
2142 },
2143 save_registers, map_checks, map_compare, this));
2144 // If the jump to deferred code was not taken, the map was equal to the
2145 // last map.
2146 } // End of the `has_migration_targets` case.
2147 __ bind(*done);
2148}
2149
2153}
2154
2155void CheckMapsWithAlreadyLoadedMap::GenerateCode(MaglevAssembler* masm,
2156 const ProcessingState& state) {
2157 Register map = ToRegister(map_input());
2158
2159 // We emit an unconditional deopt if we intersect the map sets and the
2160 // intersection is empty.
2161 DCHECK(!maps().is_empty());
2162
2163 // CheckMapsWithAlreadyLoadedMap can only be used in contexts where SMIs /
2164 // HeapNumbers don't make sense (e.g., if we're loading properties from them).
2166
2167 // Experimentally figured out map limit (with slack) which allows us to use
2168 // near jumps in the code below. If --deopt-every-n-times is on, we generate
2169 // a bit more code, so disable the near jump optimization.
2170 constexpr int kMapCountForNearJumps = kTaggedSize == 4 ? 10 : 5;
2171 Label::Distance jump_distance = (maps().size() <= kMapCountForNearJumps &&
2172 v8_flags.deopt_every_n_times <= 0)
2174 : Label::Distance::kFar;
2175
2176 Label done;
2177 size_t map_count = maps().size();
2178 for (size_t i = 0; i < map_count - 1; ++i) {
2179 Handle<Map> map_at_i = maps().at(i).object();
2180 __ CompareTaggedAndJumpIf(map, map_at_i, kEqual, &done, jump_distance);
2181 }
2182 Handle<Map> last_map = maps().at(map_count - 1).object();
2183 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kWrongMap);
2184 __ CompareTaggedAndJumpIf(map, last_map, kNotEqual, fail);
2185 __ bind(&done);
2186}
2187
2189 DCHECK_EQ(Runtime::FunctionForId(Runtime::kTryMigrateInstance)->nargs, 1);
2190 return 1;
2191}
2192
2196 DefineSameAsFirst(this);
2197}
2198
2199void MigrateMapIfNeeded::GenerateCode(MaglevAssembler* masm,
2200 const ProcessingState& state) {
2201 MaglevAssembler::TemporaryRegisterScope temps(masm);
2202 Register object = ToRegister(object_input());
2203 Register map = ToRegister(map_input());
2204 DCHECK_EQ(map, ToRegister(result()));
2205
2206 ZoneLabelRef done(masm);
2207
2208 RegisterSnapshot save_registers = register_snapshot();
2209 // Make sure that the object register are not clobbered by TryMigrateInstance
2210 // (which does a runtime call). We need the object register for reloading the
2211 // map. It's okay to clobber the map register, since we will always reload (or
2212 // deopt) after the runtime call.
2213 save_registers.live_registers.set(object);
2214 save_registers.live_tagged_registers.set(object);
2215
2216 // If the map is deprecated, jump to the deferred code which will migrate it.
2217 __ TestInt32AndJumpIfAnySet(
2218 FieldMemOperand(map, Map::kBitField3Offset),
2219 Map::Bits3::IsDeprecatedBit::kMask,
2220 __ MakeDeferredCode(
2221 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
2222 ZoneLabelRef done, Register object, Register map,
2223 MigrateMapIfNeeded* node) {
2224 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kWrongMap);
2225 __ TryMigrateInstance(object, register_snapshot, deopt);
2226 // Reload the map since TryMigrateInstance might have changed it.
2227 __ LoadTaggedField(map, object, HeapObject::kMapOffset);
2228 __ Jump(*done);
2229 },
2230 save_registers, done, object, map, this));
2231
2232 // No migration needed. Return the original map. We already have it in the
2233 // first input register which is the same as the return register.
2234
2235 __ bind(*done);
2236}
2237
2239 using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
2240 return D::GetStackParameterCount();
2241}
2243 using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
2245 UseFixed(object(), D::GetRegisterParameter(D::kObject));
2246 UseFixed(key(), D::GetRegisterParameter(D::kKey));
2248}
2249void DeleteProperty::GenerateCode(MaglevAssembler* masm,
2250 const ProcessingState& state) {
2251 __ CallBuiltin<Builtin::kDeleteProperty>(
2252 context(), // context
2253 object(), // object
2254 key(), // key
2255 Smi::FromInt(static_cast<int>(mode())) // language mode
2256 );
2257 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
2258}
2259
2261 using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type;
2262 return D::GetStackParameterCount();
2263}
2265 using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type;
2267 UseFixed(enumerator(), D::GetRegisterParameter(D::kEnumerator));
2269}
2270void ForInPrepare::GenerateCode(MaglevAssembler* masm,
2271 const ProcessingState& state) {
2272 __ CallBuiltin<Builtin::kForInPrepare>(
2273 context(), // context
2274 enumerator(), // enumerator
2275 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
2276 feedback().vector // feedback vector
2277 );
2278}
2279
2280int ForInNext::MaxCallStackArgs() const {
2281 using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type;
2282 return D::GetStackParameterCount();
2283}
2285 using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type;
2287 UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
2288 UseFixed(cache_array(), D::GetRegisterParameter(D::kCacheArray));
2289 UseFixed(cache_type(), D::GetRegisterParameter(D::kCacheType));
2290 UseFixed(cache_index(), D::GetRegisterParameter(D::kCacheIndex));
2292}
2293void ForInNext::GenerateCode(MaglevAssembler* masm,
2294 const ProcessingState& state) {
2295 __ CallBuiltin<Builtin::kForInNext>(context(), // context
2296 feedback().index(), // feedback slot
2297 receiver(), // receiver
2298 cache_array(), // cache array
2299 cache_type(), // cache type
2300 cache_index(), // cache index
2301 feedback().vector // feedback vector
2302 );
2303 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
2304}
2305
2307 using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type;
2308 return D::GetStackParameterCount();
2309}
2311 using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type;
2313 UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
2315}
2316void GetIterator::GenerateCode(MaglevAssembler* masm,
2317 const ProcessingState& state) {
2318 __ CallBuiltin<Builtin::kGetIteratorWithFeedback>(
2319 context(), // context
2320 receiver(), // receiver
2321 TaggedIndex::FromIntptr(load_slot()), // feedback load slot
2322 TaggedIndex::FromIntptr(call_slot()), // feedback call slot
2323 feedback() // feedback vector
2324 );
2325 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
2326}
2327
2330 if (right_input().node()->Is<Int32Constant>()) {
2332 } else {
2334 }
2335 DefineAsRegister(this);
2336}
2337
2338void Int32Compare::GenerateCode(MaglevAssembler* masm,
2339 const ProcessingState& state) {
2340 Register result = ToRegister(this->result());
2341 Label is_true, end;
2342 if (Int32Constant* constant =
2344 int32_t right_value = constant->value();
2345 __ CompareInt32AndJumpIf(ToRegister(left_input()), right_value,
2346 ConditionFor(operation()), &is_true,
2348 } else {
2349 __ CompareInt32AndJumpIf(
2352 }
2353 // TODO(leszeks): Investigate loading existing materialisations of roots here,
2354 // if available.
2355 __ LoadRoot(result, RootIndex::kFalseValue);
2356 __ jmp(&end);
2357 {
2358 __ bind(&is_true);
2359 __ LoadRoot(result, RootIndex::kTrueValue);
2360 }
2361 __ bind(&end);
2362}
2363
2365 UseRegister(value());
2366 DefineAsRegister(this);
2367}
2368
2369void Int32ToBoolean::GenerateCode(MaglevAssembler* masm,
2370 const ProcessingState& state) {
2371 Register result = ToRegister(this->result());
2372 Label is_true, end;
2373 __ CompareInt32AndJumpIf(ToRegister(value()), 0, kNotEqual, &is_true,
2375 // TODO(leszeks): Investigate loading existing materialisations of roots here,
2376 // if available.
2377 __ LoadRoot(result, flip() ? RootIndex::kTrueValue : RootIndex::kFalseValue);
2378 __ jmp(&end);
2379 {
2380 __ bind(&is_true);
2381 __ LoadRoot(result,
2382 flip() ? RootIndex::kFalseValue : RootIndex::kTrueValue);
2383 }
2384 __ bind(&end);
2385}
2386
2388 UseRegister(value());
2389 DefineAsRegister(this);
2390}
2391
2392void IntPtrToBoolean::GenerateCode(MaglevAssembler* masm,
2393 const ProcessingState& state) {
2394 Register result = ToRegister(this->result());
2395 Label is_true, end;
2396 __ CompareIntPtrAndJumpIf(ToRegister(value()), 0, kNotEqual, &is_true,
2398 // TODO(leszeks): Investigate loading existing materialisations of roots here,
2399 // if available.
2400 __ LoadRoot(result, flip() ? RootIndex::kTrueValue : RootIndex::kFalseValue);
2401 __ jmp(&end);
2402 {
2403 __ bind(&is_true);
2404 __ LoadRoot(result,
2405 flip() ? RootIndex::kFalseValue : RootIndex::kTrueValue);
2406 }
2407 __ bind(&end);
2408}
2409
2413 DefineAsRegister(this);
2414}
2415
2416void Float64Compare::GenerateCode(MaglevAssembler* masm,
2417 const ProcessingState& state) {
2420 Register result = ToRegister(this->result());
2421 Label is_false, end;
2422 __ CompareFloat64AndJumpIf(left, right,
2424 &is_false, &is_false, Label::Distance::kNear);
2425 // TODO(leszeks): Investigate loading existing materialisations of roots here,
2426 // if available.
2427 __ LoadRoot(result, RootIndex::kTrueValue);
2428 __ Jump(&end);
2429 {
2430 __ bind(&is_false);
2431 __ LoadRoot(result, RootIndex::kFalseValue);
2432 }
2433 __ bind(&end);
2434}
2435
2437 UseRegister(value());
2438 set_double_temporaries_needed(1);
2439 DefineAsRegister(this);
2440}
2441void Float64ToBoolean::GenerateCode(MaglevAssembler* masm,
2442 const ProcessingState& state) {
2443 MaglevAssembler::TemporaryRegisterScope temps(masm);
2444 DoubleRegister double_scratch = temps.AcquireDouble();
2445 Register result = ToRegister(this->result());
2446 Label is_false, end;
2447
2448 __ Move(double_scratch, 0.0);
2449 __ CompareFloat64AndJumpIf(ToDoubleRegister(value()), double_scratch, kEqual,
2450 &is_false, &is_false, Label::Distance::kNear);
2451
2452 __ LoadRoot(result, flip() ? RootIndex::kFalseValue : RootIndex::kTrueValue);
2453 __ Jump(&end);
2454 {
2455 __ bind(&is_false);
2456 __ LoadRoot(result,
2457 flip() ? RootIndex::kTrueValue : RootIndex::kFalseValue);
2458 }
2459 __ bind(&end);
2460}
2461
2463 UseRegister(input());
2464 DefineSameAsFirst(this);
2465 set_temporaries_needed(1);
2466}
2467void CheckedHoleyFloat64ToFloat64::GenerateCode(MaglevAssembler* masm,
2468 const ProcessingState& state) {
2469 MaglevAssembler::TemporaryRegisterScope temps(masm);
2470 __ JumpIfHoleNan(ToDoubleRegister(input()), temps.Acquire(),
2471 __ GetDeoptLabel(this, DeoptimizeReason::kHole));
2472}
2473
2476 DefineAsRegister(this);
2477 set_temporaries_needed(1);
2478}
2479void LoadHeapInt32::GenerateCode(MaglevAssembler* masm,
2480 const ProcessingState& state) {
2481 MaglevAssembler::TemporaryRegisterScope temps(masm);
2482 Register tmp = temps.Acquire();
2483 Register object = ToRegister(object_input());
2484 __ AssertNotSmi(object);
2485 __ LoadTaggedField(tmp, object, offset());
2486 __ AssertNotSmi(tmp);
2487 __ LoadHeapInt32Value(ToRegister(result()), tmp);
2488}
2489
2492 DefineAsRegister(this);
2493 set_temporaries_needed(1);
2494}
2495void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
2496 const ProcessingState& state) {
2497 MaglevAssembler::TemporaryRegisterScope temps(masm);
2498 Register tmp = temps.Acquire();
2499 Register object = ToRegister(object_input());
2500 __ AssertNotSmi(object);
2501 __ LoadTaggedField(tmp, object, offset());
2502 __ AssertNotSmi(tmp);
2503 __ LoadHeapNumberValue(ToDoubleRegister(result()), tmp);
2504}
2505
2508 DefineAsRegister(this);
2509}
2510void LoadFloat64::GenerateCode(MaglevAssembler* masm,
2511 const ProcessingState& state) {
2512 Register object = ToRegister(object_input());
2513 __ AssertNotSmi(object);
2515}
2516
2519 DefineAsRegister(this);
2520}
2521void LoadInt32::GenerateCode(MaglevAssembler* masm,
2522 const ProcessingState& state) {
2523 Register object = ToRegister(object_input());
2524 __ AssertNotSmi(object);
2526}
2527
2528template <typename T>
2530 UseRegister(object_input());
2531 DefineAsRegister(this);
2532}
2533template <typename T>
2534void AbstractLoadTaggedField<T>::GenerateCode(MaglevAssembler* masm,
2535 const ProcessingState& state) {
2536 Register object = ToRegister(object_input());
2537 __ AssertNotSmi(object);
2538 if (this->decompresses_tagged_result()) {
2539 __ LoadTaggedField(ToRegister(result()), object, offset());
2540 } else {
2541 __ LoadTaggedFieldWithoutDecompressing(ToRegister(result()), object,
2542 offset());
2543 }
2544}
2545
2548 set_temporaries_needed(2);
2549 set_double_temporaries_needed(1);
2550 DefineAsRegister(this);
2551}
2552
2554 MaglevAssembler* masm, const ProcessingState& state) {
2555 MaglevAssembler::TemporaryRegisterScope temps(masm);
2556 Register script_context = ToRegister(context());
2557 Register value = ToRegister(result());
2558 Register scratch = temps.Acquire();
2559 ZoneLabelRef done(masm);
2560 __ AssertObjectType(script_context, SCRIPT_CONTEXT_TYPE,
2561 AbortReason::kUnexpectedInstanceType);
2562
2563 // Be sure to not clobber script_context.
2564 if (value == script_context) {
2565 Register tmp = temps.Acquire();
2566 __ Move(tmp, script_context);
2567 script_context = tmp;
2568 }
2569
2570 // Load value from context.
2571 __ LoadTaggedField(value, script_context, offset());
2572
2573 // Check side table if HeapNumber.
2574 __ JumpIfSmi(value, *done);
2575 __ CompareMapWithRoot(value, RootIndex::kHeapNumberMap, scratch);
2576 __ JumpToDeferredIf(
2577 kEqual,
2578 [](MaglevAssembler* masm, Register script_context, Register result_reg,
2579 Register scratch, LoadTaggedFieldForScriptContextSlot* node,
2580 ZoneLabelRef done) {
2581 Label property_loaded;
2582 Label check_heap_number, allocate;
2583 // Load side table.
2584 // TODO(victorgomes): Should we hoist the side_table?
2585 __ LoadTaggedField(scratch, script_context,
2588 __ LoadTaggedField(
2589 scratch, scratch,
2590 FixedArray::OffsetOfElementAt(node->index() -
2592
2593 __ JumpIfSmi(scratch, &property_loaded);
2594 __ AssertObjectType(scratch, CONTEXT_SIDE_PROPERTY_CELL_TYPE,
2595 AbortReason::kUnexpectedInstanceType);
2596 __ LoadTaggedField(scratch, scratch,
2597 ContextSidePropertyCell::kPropertyDetailsRawOffset);
2598 __ bind(&property_loaded);
2599
2600 MaglevAssembler::TemporaryRegisterScope temps(masm);
2601 DoubleRegister double_value = temps.AcquireDouble();
2602
2603 if (v8_flags.script_context_mutable_heap_int32) {
2604 __ CompareTaggedAndJumpIf(scratch,
2606 kNotEqual, &check_heap_number);
2607 __ LoadHeapInt32Value(scratch, result_reg);
2608 __ Int32ToDouble(double_value, scratch);
2609 __ Jump(&allocate, Label::kNear);
2610 }
2611
2612 __ bind(&check_heap_number);
2613 __ CompareTaggedAndJumpIf(scratch,
2615 kNotEqual, *done);
2616 __ LoadHeapNumberValue(double_value, result_reg);
2617
2618 __ bind(&allocate);
2619 __ AllocateHeapNumber(node->register_snapshot(), result_reg,
2620 double_value);
2621 __ Jump(*done);
2622 },
2623 script_context, value, scratch, this, done);
2624
2625 __ bind(*done);
2626}
2627
2631 DefineAsRegister(this);
2632 set_temporaries_needed(1);
2633 set_double_temporaries_needed(1);
2634}
2635void LoadTaggedFieldByFieldIndex::GenerateCode(MaglevAssembler* masm,
2636 const ProcessingState& state) {
2637 Register object = ToRegister(object_input());
2638 Register field_index = ToRegister(index_input());
2639 Register result_reg = ToRegister(result());
2640 __ AssertNotSmi(object);
2641 __ AssertSmi(field_index);
2642
2643 ZoneLabelRef done(masm);
2644
2645 // For in-object properties, the field_index is encoded as:
2646 //
2647 // field_index = array_index | is_double_bit | smi_tag
2648 // = array_index << (1+kSmiTagBits)
2649 // + is_double_bit << kSmiTagBits
2650 //
2651 // The value we want is at the field offset:
2652 //
2653 // (array_index << kTaggedSizeLog2) + JSObject::kHeaderSize
2654 //
2655 // We could get field_index from array_index by shifting away the double bit
2656 // and smi tag, followed by shifting back up again, but this means shifting
2657 // twice:
2658 //
2659 // ((field_index >> kSmiTagBits >> 1) << kTaggedSizeLog2
2660 // + JSObject::kHeaderSize
2661 //
2662 // Instead, we can do some rearranging to get the offset with either a single
2663 // small shift, or no shift at all:
2664 //
2665 // (array_index << kTaggedSizeLog2) + JSObject::kHeaderSize
2666 //
2667 // [Split shift to match array_index component of field_index]
2668 // = (
2669 // (array_index << 1+kSmiTagBits)) << (kTaggedSizeLog2-1-kSmiTagBits)
2670 // ) + JSObject::kHeaderSize
2671 //
2672 // [Substitute in field_index]
2673 // = (
2674 // (field_index - is_double_bit << kSmiTagBits)
2675 // << (kTaggedSizeLog2-1-kSmiTagBits)
2676 // ) + JSObject::kHeaderSize
2677 //
2678 // [Fold together the constants]
2679 // = (field_index << (kTaggedSizeLog2-1-kSmiTagBits)
2680 // + (JSObject::kHeaderSize - (is_double_bit << (kTaggedSizeLog2-1)))
2681 //
2682 // Note that this results in:
2683 //
2684 // * No shift when kSmiTagBits == kTaggedSizeLog2 - 1, which is the case
2685 // when pointer compression is on.
2686 // * A shift of 1 when kSmiTagBits == 1 and kTaggedSizeLog2 == 3, which
2687 // is the case when pointer compression is off but Smis are 31 bit.
2688 // * A shift of 2 when kSmiTagBits == 0 and kTaggedSizeLog2 == 3, which
2689 // is the case when pointer compression is off, Smis are 32 bit, and
2690 // the Smi was untagged to int32 already.
2691 //
2692 // These shifts are small enough to encode in the load operand.
2693 //
2694 // For out-of-object properties, the encoding is:
2695 //
2696 // field_index = (-1 - array_index) | is_double_bit | smi_tag
2697 // = (-1 - array_index) << (1+kSmiTagBits)
2698 // + is_double_bit << kSmiTagBits
2699 // = -array_index << (1+kSmiTagBits)
2700 // - 1 << (1+kSmiTagBits) + is_double_bit << kSmiTagBits
2701 // = -array_index << (1+kSmiTagBits)
2702 // - 2 << kSmiTagBits + is_double_bit << kSmiTagBits
2703 // = -array_index << (1+kSmiTagBits)
2704 // (is_double_bit - 2) << kSmiTagBits
2705 //
2706 // The value we want is in the property array at offset:
2707 //
2708 // (array_index << kTaggedSizeLog2) + OFFSET_OF_DATA_START(FixedArray)
2709 //
2710 // [Split shift to match array_index component of field_index]
2711 // = (array_index << (1+kSmiTagBits)) << (kTaggedSizeLog2-1-kSmiTagBits)
2712 // + OFFSET_OF_DATA_START(FixedArray)
2713 //
2714 // [Substitute in field_index]
2715 // = (-field_index - (is_double_bit - 2) << kSmiTagBits)
2716 // << (kTaggedSizeLog2-1-kSmiTagBits)
2717 // + OFFSET_OF_DATA_START(FixedArray)
2718 //
2719 // [Fold together the constants]
2720 // = -field_index << (kTaggedSizeLog2-1-kSmiTagBits)
2721 // + OFFSET_OF_DATA_START(FixedArray)
2722 // - (is_double_bit - 2) << (kTaggedSizeLog2-1))
2723 //
2724 // This allows us to simply negate the field_index register and do a load with
2725 // otherwise constant offset and the same scale factor as for in-object
2726 // properties.
2727
2728 static constexpr int kSmiTagBitsInValue = SmiValuesAre32Bits() ? 0 : 1;
2729 static_assert(kSmiTagBitsInValue == 32 - kSmiValueSize);
2730 if (SmiValuesAre32Bits()) {
2731 __ SmiUntag(field_index);
2732 }
2733
2734 static constexpr int scale = 1 << (kTaggedSizeLog2 - 1 - kSmiTagBitsInValue);
2735
2736 // Check if field is a mutable double field.
2737 static constexpr int32_t kIsDoubleBitMask = 1 << kSmiTagBitsInValue;
2738 __ TestInt32AndJumpIfAnySet(
2739 field_index, kIsDoubleBitMask,
2740 __ MakeDeferredCode(
2741 [](MaglevAssembler* masm, Register object, Register field_index,
2742 Register result_reg, RegisterSnapshot register_snapshot,
2743 ZoneLabelRef done) {
2744 // The field is a Double field, a.k.a. a mutable HeapNumber.
2745 static constexpr int kIsDoubleBit = 1;
2746
2747 // Check if field is in-object or out-of-object. The is_double bit
2748 // value doesn't matter, since negative values will stay negative.
2749 Label if_outofobject, loaded_field;
2750 __ CompareInt32AndJumpIf(field_index, 0, kLessThan,
2751 &if_outofobject);
2752
2753 // The field is located in the {object} itself.
2754 {
2755 // See giant comment above.
2757 // We haven't untagged, so we need to sign extend.
2758 __ SignExtend32To64Bits(field_index, field_index);
2759 }
2760 __ LoadTaggedFieldByIndex(
2761 result_reg, object, field_index, scale,
2762 JSObject::kHeaderSize -
2763 (kIsDoubleBit << (kTaggedSizeLog2 - 1)));
2764 __ Jump(&loaded_field);
2765 }
2766
2767 __ bind(&if_outofobject);
2768 {
2769 MaglevAssembler::TemporaryRegisterScope temps(masm);
2770 Register property_array = temps.Acquire();
2771 // Load the property array.
2772 __ LoadTaggedField(
2773 property_array,
2774 FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
2775
2776 // See giant comment above. No need to sign extend, negate will
2777 // handle it.
2778 __ NegateInt32(field_index);
2779 __ LoadTaggedFieldByIndex(
2780 result_reg, property_array, field_index, scale,
2781 OFFSET_OF_DATA_START(FixedArray) -
2782 ((2 - kIsDoubleBit) << (kTaggedSizeLog2 - 1)));
2783 __ Jump(&loaded_field);
2784 }
2785
2786 __ bind(&loaded_field);
2787 // We may have transitioned in-place away from double, so check that
2788 // this is a HeapNumber -- otherwise the load is fine and we don't
2789 // need to copy anything anyway.
2790 __ JumpIfSmi(result_reg, *done);
2791 MaglevAssembler::TemporaryRegisterScope temps(masm);
2792 Register map = temps.Acquire();
2793 // Hack: The temporary allocated for `map` might alias the result
2794 // register. If it does, use the field_index register as a temporary
2795 // instead (since it's clobbered anyway).
2796 // TODO(leszeks): Extend the result register's lifetime to overlap
2797 // the temporaries, so that this alias isn't possible.
2798 if (map == result_reg) {
2799 DCHECK_NE(map, field_index);
2800 map = field_index;
2801 }
2802 __ LoadMapForCompare(map, result_reg);
2803 __ JumpIfNotRoot(map, RootIndex::kHeapNumberMap, *done);
2804 DoubleRegister double_value = temps.AcquireDouble();
2805 __ LoadHeapNumberValue(double_value, result_reg);
2806 __ AllocateHeapNumber(register_snapshot, result_reg, double_value);
2807 __ Jump(*done);
2808 },
2809 object, field_index, result_reg, register_snapshot(), done));
2810
2811 // The field is a proper Tagged field on {object}. The {field_index} is
2812 // shifted to the left by one in the code below.
2813 {
2814 static constexpr int kIsDoubleBit = 0;
2815
2816 // Check if field is in-object or out-of-object. The is_double bit value
2817 // doesn't matter, since negative values will stay negative.
2818 Label if_outofobject;
2819 __ CompareInt32AndJumpIf(field_index, 0, kLessThan, &if_outofobject);
2820
2821 // The field is located in the {object} itself.
2822 {
2823 // See giant comment above.
2825 // We haven't untagged, so we need to sign extend.
2826 __ SignExtend32To64Bits(field_index, field_index);
2827 }
2828 __ LoadTaggedFieldByIndex(
2829 result_reg, object, field_index, scale,
2830 JSObject::kHeaderSize - (kIsDoubleBit << (kTaggedSizeLog2 - 1)));
2831 __ Jump(*done);
2832 }
2833
2834 __ bind(&if_outofobject);
2835 {
2836 MaglevAssembler::TemporaryRegisterScope temps(masm);
2837 Register property_array = temps.Acquire();
2838 // Load the property array.
2839 __ LoadTaggedField(
2840 property_array,
2841 FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
2842
2843 // See giant comment above. No need to sign extend, negate will handle it.
2844 __ NegateInt32(field_index);
2845 __ LoadTaggedFieldByIndex(
2846 result_reg, property_array, field_index, scale,
2847 OFFSET_OF_DATA_START(FixedArray) -
2848 ((2 - kIsDoubleBit) << (kTaggedSizeLog2 - 1)));
2849 // Fallthrough to `done`.
2850 }
2851 }
2852
2853 __ bind(*done);
2854}
2855
2859 DefineAsRegister(this);
2860}
2861void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm,
2862 const ProcessingState& state) {
2863 Register elements = ToRegister(elements_input());
2864 Register index = ToRegister(index_input());
2865 Register result_reg = ToRegister(result());
2866 if (this->decompresses_tagged_result()) {
2867 __ LoadFixedArrayElement(result_reg, elements, index);
2868 } else {
2869 __ LoadFixedArrayElementWithoutDecompressing(result_reg, elements, index);
2870 }
2871}
2872
2876 DefineAsRegister(this);
2877}
2878void LoadFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
2879 const ProcessingState& state) {
2880 Register elements = ToRegister(elements_input());
2881 Register index = ToRegister(index_input());
2882 DoubleRegister result_reg = ToDoubleRegister(result());
2883 __ LoadFixedDoubleArrayElement(result_reg, elements, index);
2884}
2885
2889 DefineAsRegister(this);
2890}
2892 MaglevAssembler* masm, const ProcessingState& state) {
2893 Register elements = ToRegister(elements_input());
2894 Register index = ToRegister(index_input());
2895 DoubleRegister result_reg = ToDoubleRegister(result());
2896 __ LoadFixedDoubleArrayElement(result_reg, elements, index);
2897}
2898
2903 DefineAsRegister(this);
2904 set_temporaries_needed(1);
2905}
2907 MaglevAssembler* masm, const ProcessingState& state) {
2908 MaglevAssembler::TemporaryRegisterScope temps(masm);
2909 Register elements = ToRegister(elements_input());
2910 Register index = ToRegister(index_input());
2911 DoubleRegister result_reg = ToDoubleRegister(result());
2912 __ LoadFixedDoubleArrayElement(result_reg, elements, index);
2913 __ JumpIfHoleNan(result_reg, temps.Acquire(),
2914 __ GetDeoptLabel(this, DeoptimizeReason::kHole));
2915}
2916
2921}
2922void StoreFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm,
2923 const ProcessingState& state) {
2924 Register elements = ToRegister(elements_input());
2925 Register index = ToRegister(index_input());
2927 if (v8_flags.debug_code) {
2928 __ AssertObjectType(elements, FIXED_DOUBLE_ARRAY_TYPE,
2929 AbortReason::kUnexpectedValue);
2930 __ CompareInt32AndAssert(index, 0, kUnsignedGreaterThanEqual,
2931 AbortReason::kUnexpectedNegativeValue);
2932 }
2933 __ StoreFixedDoubleArrayElement(elements, index, value);
2934}
2935
2936int StoreMap::MaxCallStackArgs() const {
2938}
2941 set_temporaries_needed(1);
2942}
2943void StoreMap::GenerateCode(MaglevAssembler* masm,
2944 const ProcessingState& state) {
2945 MaglevAssembler::TemporaryRegisterScope temps(masm);
2946 // TODO(leszeks): Consider making this an arbitrary register and push/popping
2947 // in the deferred path.
2949 DCHECK_EQ(object, ToRegister(object_input()));
2950 Register value = temps.Acquire();
2951 __ MoveTagged(value, map_.object());
2952
2953 switch (kind()) {
2956 auto inlined = object_input().node()->Cast<InlinedAllocation>();
2957 if (inlined->allocation_block()->allocation_type() ==
2959 __ StoreTaggedFieldNoWriteBarrier(object, HeapObject::kMapOffset,
2960 value);
2961 __ AssertElidedWriteBarrier(object, value, register_snapshot());
2962 break;
2963 }
2964 [[fallthrough]];
2965 }
2968 __ StoreTaggedFieldWithWriteBarrier(object, HeapObject::kMapOffset, value,
2969 register_snapshot(),
2972 break;
2973 }
2974}
2975
2978}
2982}
2984 MaglevAssembler* masm, const ProcessingState& state) {
2985 // TODO(leszeks): Consider making this an arbitrary register and push/popping
2986 // in the deferred path.
2988 DCHECK_EQ(object, ToRegister(object_input()));
2989 Register value = ToRegister(value_input());
2990
2992 object, offset(), value, register_snapshot(),
2993 value_input().node()->decompresses_tagged_result()
2997}
2998
3001}
3005}
3007 MaglevAssembler* masm, const ProcessingState& state) {
3008#ifdef V8_ENABLE_SANDBOX
3009 // TODO(leszeks): Consider making this an arbitrary register and push/popping
3010 // in the deferred path.
3012 DCHECK_EQ(object, ToRegister(object_input()));
3013 Register value = ToRegister(value_input());
3015 register_snapshot(), tag());
3016#else
3017 UNREACHABLE();
3018#endif
3019}
3020
3027 } else {
3029 }
3030 DefineAsRegister(this);
3031 set_temporaries_needed(1);
3032}
3033void LoadSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
3034 const ProcessingState& state) {
3035 Register object = ToRegister(object_input());
3036 Register index = ToRegister(index_input());
3037 Register result_reg = ToRegister(result());
3038
3039 if (v8_flags.debug_code) {
3040 __ AssertObjectTypeInRange(object,
3041 FIRST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3042 LAST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3043 AbortReason::kUnexpectedValue);
3044 }
3045
3046 int element_size = compiler::ExternalArrayElementSize(type_);
3047
3048 MaglevAssembler::TemporaryRegisterScope temps(masm);
3049 Register data_pointer = temps.Acquire();
3050
3051 // We need to make sure we don't clobber is_little_endian_input by writing to
3052 // the result register.
3053 Register reg_with_result = result_reg;
3056 result_reg == ToRegister(is_little_endian_input())) {
3057 reg_with_result = data_pointer;
3058 }
3059
3060 // Load data pointer.
3061 __ LoadExternalPointerField(
3062 data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
3063 MemOperand element_address = __ DataViewElementOperand(data_pointer, index);
3064 __ LoadSignedField(reg_with_result, element_address, element_size);
3065
3066 // We ignore little endian argument if type is a byte size.
3071 DCHECK_EQ(reg_with_result, result_reg);
3072 __ ReverseByteOrder(result_reg, element_size);
3073 }
3074 } else {
3075 ZoneLabelRef keep_byte_order(masm), reverse_byte_order(masm);
3076 DCHECK_NE(reg_with_result, ToRegister(is_little_endian_input()));
3077 __ ToBoolean(
3079 V8_TARGET_BIG_ENDIAN_BOOL ? reverse_byte_order : keep_byte_order,
3080 V8_TARGET_BIG_ENDIAN_BOOL ? keep_byte_order : reverse_byte_order,
3081 false);
3082 __ bind(*reverse_byte_order);
3083 __ ReverseByteOrder(reg_with_result, element_size);
3084 __ bind(*keep_byte_order);
3085 if (reg_with_result != result_reg) {
3086 __ Move(result_reg, reg_with_result);
3087 }
3088 }
3089 }
3090}
3091
3092void StoreSignedIntDataViewElement::SetValueLocationConstraints() {
3093 UseRegister(object_input());
3094 UseRegister(index_input());
3096 UseAndClobberRegister(value_input());
3097 } else {
3098 UseRegister(value_input());
3099 }
3100 if (is_little_endian_constant() ||
3102 UseAny(is_little_endian_input());
3103 } else {
3104 UseRegister(is_little_endian_input());
3105 }
3106 set_temporaries_needed(1);
3107}
3108void StoreSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm,
3109 const ProcessingState& state) {
3110 Register object = ToRegister(object_input());
3111 Register index = ToRegister(index_input());
3112 Register value = ToRegister(value_input());
3113
3114 if (v8_flags.debug_code) {
3115 __ AssertObjectTypeInRange(object,
3116 FIRST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3117 LAST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3118 AbortReason::kUnexpectedValue);
3119 }
3120
3121 int element_size = compiler::ExternalArrayElementSize(type_);
3122
3123 // We ignore little endian argument if type is a byte size.
3124 if (element_size > 1) {
3125 if (is_little_endian_constant()) {
3127 !FromConstantToBool(masm, is_little_endian_input().node())) {
3128 __ ReverseByteOrder(value, element_size);
3129 }
3130 } else {
3131 ZoneLabelRef keep_byte_order(masm), reverse_byte_order(masm);
3132 __ ToBoolean(
3133 ToRegister(is_little_endian_input()), CheckType::kCheckHeapObject,
3134 V8_TARGET_BIG_ENDIAN_BOOL ? reverse_byte_order : keep_byte_order,
3135 V8_TARGET_BIG_ENDIAN_BOOL ? keep_byte_order : reverse_byte_order,
3136 false);
3137 __ bind(*reverse_byte_order);
3138 __ ReverseByteOrder(value, element_size);
3139 __ bind(*keep_byte_order);
3140 }
3141 }
3142
3143 MaglevAssembler::TemporaryRegisterScope temps(masm);
3144 Register data_pointer = temps.Acquire();
3145 __ LoadExternalPointerField(
3146 data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
3147 MemOperand element_address = __ DataViewElementOperand(data_pointer, index);
3148 __ StoreField(element_address, value, element_size);
3149}
3150
3156 } else {
3158 }
3159 set_temporaries_needed(1);
3160 DefineAsRegister(this);
3161}
3162void LoadDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
3163 const ProcessingState& state) {
3164 MaglevAssembler::TemporaryRegisterScope temps(masm);
3165 Register object = ToRegister(object_input());
3166 Register index = ToRegister(index_input());
3167 DoubleRegister result_reg = ToDoubleRegister(result());
3168 Register data_pointer = temps.Acquire();
3169
3170 if (v8_flags.debug_code) {
3171 __ AssertObjectTypeInRange(object,
3172 FIRST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3173 LAST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3174 AbortReason::kUnexpectedValue);
3175 }
3176
3177 // Load data pointer.
3178 __ LoadExternalPointerField(
3179 data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
3180
3184 __ LoadUnalignedFloat64(result_reg, data_pointer, index);
3185 } else {
3186 __ LoadUnalignedFloat64AndReverseByteOrder(result_reg, data_pointer,
3187 index);
3188 }
3189 } else {
3190 Label done;
3191 ZoneLabelRef keep_byte_order(masm), reverse_byte_order(masm);
3192 // TODO(leszeks): We're likely to be calling this on an existing boolean --
3193 // maybe that's a case we should fast-path here and reuse that boolean
3194 // value?
3195 __ ToBoolean(
3197 V8_TARGET_BIG_ENDIAN_BOOL ? reverse_byte_order : keep_byte_order,
3198 V8_TARGET_BIG_ENDIAN_BOOL ? keep_byte_order : reverse_byte_order, true);
3199 __ bind(*keep_byte_order);
3200 __ LoadUnalignedFloat64(result_reg, data_pointer, index);
3201 __ Jump(&done);
3202 // We should swap the bytes if big endian.
3203 __ bind(*reverse_byte_order);
3204 __ LoadUnalignedFloat64AndReverseByteOrder(result_reg, data_pointer, index);
3205 __ bind(&done);
3206 }
3207}
3208
3215 } else {
3217 }
3218 set_temporaries_needed(1);
3219}
3220void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
3221 const ProcessingState& state) {
3222 Register object = ToRegister(object_input());
3223 Register index = ToRegister(index_input());
3225 MaglevAssembler::TemporaryRegisterScope temps(masm);
3226 Register data_pointer = temps.Acquire();
3227
3228 if (v8_flags.debug_code) {
3229 __ AssertObjectTypeInRange(object,
3230 FIRST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3231 LAST_JS_DATA_VIEW_OR_RAB_GSAB_DATA_VIEW_TYPE,
3232 AbortReason::kUnexpectedValue);
3233 }
3234
3235 // Load data pointer.
3236 __ LoadExternalPointerField(
3237 data_pointer, FieldMemOperand(object, JSDataView::kDataPointerOffset));
3238
3242 __ StoreUnalignedFloat64(data_pointer, index, value);
3243 } else {
3244 __ ReverseByteOrderAndStoreUnalignedFloat64(data_pointer, index, value);
3245 }
3246 } else {
3247 Label done;
3248 ZoneLabelRef keep_byte_order(masm), reverse_byte_order(masm);
3249 // TODO(leszeks): We're likely to be calling this on an existing boolean --
3250 // maybe that's a case we should fast-path here and reuse that boolean
3251 // value?
3252 __ ToBoolean(
3254 V8_TARGET_BIG_ENDIAN_BOOL ? reverse_byte_order : keep_byte_order,
3255 V8_TARGET_BIG_ENDIAN_BOOL ? keep_byte_order : reverse_byte_order, true);
3256 __ bind(*keep_byte_order);
3257 __ StoreUnalignedFloat64(data_pointer, index, value);
3258 __ Jump(&done);
3259 // We should swap the bytes if big endian.
3260 __ bind(*reverse_byte_order);
3261 __ ReverseByteOrderAndStoreUnalignedFloat64(data_pointer, index, value);
3262 __ bind(&done);
3263 }
3264}
3265
3266
3269 DefineAsRegister(this);
3270}
3271void LoadEnumCacheLength::GenerateCode(MaglevAssembler* masm,
3272 const ProcessingState& state) {
3273 Register map = ToRegister(map_input());
3274 Register result_reg = ToRegister(result());
3275 __ AssertMap(map);
3276 __ LoadBitField<Map::Bits3::EnumLengthBits>(
3277 result_reg, FieldMemOperand(map, Map::kBitField3Offset));
3278}
3279
3280int LoadGlobal::MaxCallStackArgs() const {
3282 using D = CallInterfaceDescriptorFor<Builtin::kLoadGlobalIC>::type;
3283 return D::GetStackParameterCount();
3284 } else {
3285 using D =
3286 CallInterfaceDescriptorFor<Builtin::kLoadGlobalICInsideTypeof>::type;
3287 return D::GetStackParameterCount();
3288 }
3289}
3293}
3294void LoadGlobal::GenerateCode(MaglevAssembler* masm,
3295 const ProcessingState& state) {
3297 __ CallBuiltin<Builtin::kLoadGlobalIC>(
3298 context(), // context
3299 name().object(), // name
3300 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3301 feedback().vector // feedback vector
3302 );
3303 } else {
3305 __ CallBuiltin<Builtin::kLoadGlobalICInsideTypeof>(
3306 context(), // context
3307 name().object(), // name
3308 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3309 feedback().vector // feedback vector
3310 );
3311 }
3312
3313 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
3314}
3315
3317 using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type;
3318 return D::GetStackParameterCount();
3319}
3321 using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type;
3323 UseFixed(value(), D::GetRegisterParameter(D::kValue));
3325}
3326void StoreGlobal::GenerateCode(MaglevAssembler* masm,
3327 const ProcessingState& state) {
3328 __ CallBuiltin<Builtin::kStoreGlobalIC>(
3329 context(), // context
3330 name().object(), // name
3331 value(), // value
3332 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3333 feedback().vector // feedback vector
3334 );
3335 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
3336}
3337
3339void CheckValue::GenerateCode(MaglevAssembler* masm,
3340 const ProcessingState& state) {
3341 Register target = ToRegister(target_input());
3342 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
3343 __ CompareTaggedAndJumpIf(target, value().object(), kNotEqual, fail);
3344}
3345
3348}
3349void CheckValueEqualsInt32::GenerateCode(MaglevAssembler* masm,
3350 const ProcessingState& state) {
3351 Register target = ToRegister(target_input());
3352 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
3353 __ CompareInt32AndJumpIf(target, value(), kNotEqual, fail);
3354}
3355
3357 using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type;
3358 UseFixed(target_input(), D::GetRegisterParameter(D::kLeft));
3359 RequireSpecificTemporary(D::GetRegisterParameter(D::kLength));
3360}
3361void CheckValueEqualsString::GenerateCode(MaglevAssembler* masm,
3362 const ProcessingState& state) {
3363 using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type;
3364
3365 ZoneLabelRef end(masm);
3366 DCHECK_EQ(D::GetRegisterParameter(D::kLeft), ToRegister(target_input()));
3367 Register target = D::GetRegisterParameter(D::kLeft);
3368 // Maybe the string is internalized already, do a fast reference check first.
3369 __ CompareTaggedAndJumpIf(target, value().object(), kEqual, *end,
3370 Label::kNear);
3371
3372 __ EmitEagerDeoptIfSmi(this, target, deoptimize_reason());
3373 __ JumpIfString(
3374 target,
3375 __ MakeDeferredCode(
3376 [](MaglevAssembler* masm, CheckValueEqualsString* node,
3377 ZoneLabelRef end, DeoptimizeReason deoptimize_reason) {
3378 Register target = D::GetRegisterParameter(D::kLeft);
3379 Register string_length = D::GetRegisterParameter(D::kLength);
3380 __ StringLength(string_length, target);
3381 Label* fail = __ GetDeoptLabel(node, deoptimize_reason);
3382 __ CompareInt32AndJumpIf(string_length, node->value().length(),
3383 kNotEqual, fail);
3384 RegisterSnapshot snapshot = node->register_snapshot();
3385 {
3386 SaveRegisterStateForCall save_register_state(masm, snapshot);
3387 __ CallBuiltin<Builtin::kStringEqual>(
3388 node->target_input(), // left
3389 node->value().object(), // right
3390 string_length // length
3391 );
3392 save_register_state.DefineSafepoint();
3393 // Compare before restoring registers, so that the deopt below has
3394 // the correct register set.
3395 __ CompareRoot(kReturnRegister0, RootIndex::kTrueValue);
3396 }
3397 __ EmitEagerDeoptIf(kNotEqual, deoptimize_reason, node);
3398 __ Jump(*end);
3399 },
3400 this, end, deoptimize_reason()));
3401
3402 __ EmitEagerDeopt(this, deoptimize_reason());
3403
3404 __ bind(*end);
3405}
3406
3410}
3411void CheckDynamicValue::GenerateCode(MaglevAssembler* masm,
3412 const ProcessingState& state) {
3413 Register first = ToRegister(first_input());
3415 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
3416 __ CompareTaggedAndJumpIf(first, second, kNotEqual, fail);
3417}
3418
3420void CheckSmi::GenerateCode(MaglevAssembler* masm,
3421 const ProcessingState& state) {
3423 __ EmitEagerDeoptIfNotSmi(this, object, DeoptimizeReason::kNotASmi);
3424}
3425
3428}
3429void CheckHeapObject::GenerateCode(MaglevAssembler* masm,
3430 const ProcessingState& state) {
3432 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kSmi);
3433}
3434
3437}
3438void CheckSymbol::GenerateCode(MaglevAssembler* masm,
3439 const ProcessingState& state) {
3442 __ AssertNotSmi(object);
3443 } else {
3444 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kNotASymbol);
3445 }
3446 __ JumpIfNotObjectType(object, SYMBOL_TYPE,
3447 __ GetDeoptLabel(this, DeoptimizeReason::kNotASymbol));
3448}
3449
3452}
3453void CheckInstanceType::GenerateCode(MaglevAssembler* masm,
3454 const ProcessingState& state) {
3457 __ AssertNotSmi(object);
3458 } else {
3459 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kWrongInstanceType);
3460 }
3462 __ JumpIfNotObjectType(
3463 object, first_instance_type_,
3464 __ GetDeoptLabel(this, DeoptimizeReason::kWrongInstanceType));
3465 } else {
3466 __ JumpIfObjectTypeNotInRange(
3468 __ GetDeoptLabel(this, DeoptimizeReason::kWrongInstanceType));
3469 }
3470}
3471
3475}
3476void CheckCacheIndicesNotCleared::GenerateCode(MaglevAssembler* masm,
3477 const ProcessingState& state) {
3478 Register indices = ToRegister(indices_input());
3479 Register length = ToRegister(length_input());
3480 __ AssertNotSmi(indices);
3481
3482 if (v8_flags.debug_code) {
3483 __ AssertObjectType(indices, FIXED_ARRAY_TYPE,
3484 AbortReason::kOperandIsNotAFixedArray);
3485 }
3486 Label done;
3487 // If the cache length is zero, we don't have any indices, so we know this is
3488 // ok even though the indices are the empty array.
3489 __ CompareInt32AndJumpIf(length, 0, kEqual, &done);
3490 // Otherwise, an empty array with non-zero required length is not valid.
3491 __ JumpIfRoot(indices, RootIndex::kEmptyFixedArray,
3492 __ GetDeoptLabel(this, DeoptimizeReason::kWrongEnumIndices));
3493 __ bind(&done);
3494}
3495
3499}
3500void CheckTypedArrayBounds::GenerateCode(MaglevAssembler* masm,
3501 const ProcessingState& state) {
3502 Register index = ToRegister(index_input());
3503 Register length = ToRegister(length_input());
3504 // The index must be a zero-extended Uint32 for this to work.
3505#ifdef V8_TARGET_ARCH_RISCV64
3506 // All Word32 values are been signed-extended in Register in RISCV.
3507 __ ZeroExtendWord(index, index);
3508#endif
3509 __ AssertZeroExtended(index);
3510 __ CompareIntPtrAndJumpIf(
3511 index, length, kUnsignedGreaterThanEqual,
3512 __ GetDeoptLabel(this, DeoptimizeReason::kOutOfBounds));
3513}
3514
3518}
3519void CheckInt32Condition::GenerateCode(MaglevAssembler* masm,
3520 const ProcessingState& state) {
3521 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
3522 __ CompareInt32AndJumpIf(ToRegister(left_input()), ToRegister(right_input()),
3524}
3525
3528}
3529
3533 set_temporaries_needed(2);
3534 set_double_temporaries_needed(1);
3535}
3536
3538 MaglevAssembler* masm, const ProcessingState& state) {
3539 __ RecordComment("StoreScriptContextSlotWithWriteBarrier");
3540 ZoneLabelRef done(masm);
3541 ZoneLabelRef do_normal_store(masm);
3542
3543 // TODO(leszeks): Consider making this an arbitrary register and push/popping
3544 // in the deferred path.
3546 Register new_value = ToRegister(new_value_input());
3547 MaglevAssembler::TemporaryRegisterScope temps(masm);
3548 Register scratch = temps.Acquire();
3549 Register old_value = temps.Acquire();
3550
3551 __ AssertObjectType(context, SCRIPT_CONTEXT_TYPE,
3552 AbortReason::kUnexpectedInstanceType);
3553
3554 __ LoadTaggedField(old_value, context, offset());
3555 __ CompareTaggedAndJumpIf(old_value, new_value, kEqual, *done);
3556
3557 // Load property.
3558 // TODO(victorgomes): Should we hoist the side_table?
3559 __ LoadTaggedField(
3560 scratch, context,
3562 __ LoadTaggedField(scratch, scratch,
3565
3566 __ CompareTaggedAndJumpIf(
3568 __ MakeDeferredCode(
3569 [](MaglevAssembler* masm, Register context, Register old_value,
3570 Register new_value, Register property,
3571 StoreScriptContextSlotWithWriteBarrier* node, ZoneLabelRef done,
3572 ZoneLabelRef do_normal_store) {
3573 Label check_smi, check_mutable_int32, mutable_heap_number;
3574 __ CompareRootAndEmitEagerDeoptIf(
3575 property, RootIndex::kUndefinedValue, kEqual,
3576 DeoptimizeReason::kStoreToConstant, node);
3577 __ JumpIfSmi(property, &check_smi);
3578 __ AssertObjectType(property, CONTEXT_SIDE_PROPERTY_CELL_TYPE,
3579 AbortReason::kUnexpectedInstanceType);
3580 __ LoadTaggedField(
3581 property, property,
3582 ContextSidePropertyCell::kPropertyDetailsRawOffset);
3583 __ bind(&check_smi);
3584
3585 // Check for const case.
3586 __ CompareTaggedAndJumpIf(
3588 __ GetDeoptLabel(node, DeoptimizeReason::kStoreToConstant));
3589
3590 if (v8_flags.script_context_mutable_heap_number) {
3591 // Check for smi case
3592 __ CompareTaggedAndJumpIf(property,
3594 kNotEqual, &check_mutable_int32);
3595 __ EmitEagerDeoptIfNotSmi(node, new_value,
3596 DeoptimizeReason::kStoreToConstant);
3597 __ Jump(*do_normal_store);
3598
3599 MaglevAssembler::TemporaryRegisterScope temps(masm);
3600 DoubleRegister double_scratch = temps.AcquireDouble();
3601
3602 // Check mutable int32 case.
3603 __ bind(&check_mutable_int32);
3604 if (v8_flags.script_context_mutable_heap_int32) {
3605 __ CompareTaggedAndJumpIf(
3607 kNotEqual, &mutable_heap_number);
3608 {
3609 Label new_value_is_not_smi;
3610 Register new_value_int32 = property;
3611 __ JumpIfNotSmi(new_value, &new_value_is_not_smi);
3612 __ SmiUntag(new_value_int32, new_value);
3613 __ StoreHeapInt32Value(new_value_int32, old_value);
3614 __ Jump(*done);
3615
3616 __ bind(&new_value_is_not_smi);
3617 __ CompareMapWithRoot(new_value, RootIndex::kHeapNumberMap,
3618 property);
3619 __ EmitEagerDeoptIf(kNotEqual,
3620 DeoptimizeReason::kStoreToConstant, node);
3621
3622 __ LoadHeapNumberValue(double_scratch, new_value);
3623 __ TryTruncateDoubleToInt32(
3624 new_value_int32, double_scratch,
3625 __ GetDeoptLabel(node,
3626 DeoptimizeReason::kStoreToConstant));
3627 __ StoreHeapInt32Value(new_value_int32, old_value);
3628 __ Jump(*done);
3629 }
3630 }
3631
3632 // Check mutable heap number case.
3633 __ bind(&mutable_heap_number);
3634 {
3635 Label new_value_is_not_smi;
3636 Register new_value_int32 = property;
3637 __ JumpIfNotSmi(new_value, &new_value_is_not_smi);
3638 __ SmiUntag(new_value_int32, new_value);
3639 __ Int32ToDouble(double_scratch, new_value_int32);
3640 __ StoreHeapNumberValue(double_scratch, old_value);
3641 __ Jump(*done);
3642
3643 __ bind(&new_value_is_not_smi);
3644 __ CompareMapWithRoot(new_value, RootIndex::kHeapNumberMap,
3645 property);
3646 __ EmitEagerDeoptIf(kNotEqual,
3647 DeoptimizeReason::kStoreToConstant, node);
3648 __ LoadHeapNumberValue(double_scratch, new_value);
3649 __ StoreHeapNumberValue(double_scratch, old_value);
3650 __ Jump(*done);
3651 }
3652 } else {
3653 __ Jump(*do_normal_store);
3654 }
3655 },
3656 context, old_value, new_value, scratch, this, done, do_normal_store));
3657
3658 __ bind(*do_normal_store);
3659 __ StoreTaggedFieldWithWriteBarrier(
3660 context, offset(), new_value, register_snapshot(),
3661 new_value_input().node()->decompresses_tagged_result()
3665
3666 __ bind(*done);
3667}
3668
3671}
3672void CheckString::GenerateCode(MaglevAssembler* masm,
3673 const ProcessingState& state) {
3676 __ AssertNotSmi(object);
3677 } else {
3678 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kNotAString);
3679 }
3680 __ JumpIfNotString(object,
3681 __ GetDeoptLabel(this, DeoptimizeReason::kNotAString));
3682}
3683
3686 set_temporaries_needed(1);
3687}
3688
3689void CheckStringOrStringWrapper::GenerateCode(MaglevAssembler* masm,
3690 const ProcessingState& state) {
3692
3694 __ AssertNotSmi(object);
3695 } else {
3696 __ EmitEagerDeoptIfSmi(this, object,
3697 DeoptimizeReason::kNotAStringOrStringWrapper);
3698 }
3699
3700 auto deopt =
3701 __ GetDeoptLabel(this, DeoptimizeReason::kNotAStringOrStringWrapper);
3702 Label done;
3703
3704 MaglevAssembler::TemporaryRegisterScope temps(masm);
3705 Register scratch = temps.Acquire();
3706
3707 __ JumpIfString(object, &done);
3708 __ JumpIfNotObjectType(object, InstanceType::JS_PRIMITIVE_WRAPPER_TYPE,
3709 deopt);
3710 __ LoadMap(scratch, object);
3711 __ LoadBitField<Map::Bits2::ElementsKindBits>(
3712 scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
3713 static_assert(FAST_STRING_WRAPPER_ELEMENTS + 1 ==
3715 __ CompareInt32AndJumpIf(scratch, FAST_STRING_WRAPPER_ELEMENTS, kLessThan,
3716 deopt);
3717 __ CompareInt32AndJumpIf(scratch, SLOW_STRING_WRAPPER_ELEMENTS, kGreaterThan,
3718 deopt);
3719 __ Jump(&done);
3720 __ bind(&done);
3721}
3722
3725 set_temporaries_needed(1);
3726}
3727
3728void CheckDetectableCallable::GenerateCode(MaglevAssembler* masm,
3729 const ProcessingState& state) {
3731 MaglevAssembler::TemporaryRegisterScope temps(masm);
3732 Register scratch = temps.Acquire();
3733 auto deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotDetectableReceiver);
3734 __ JumpIfNotCallable(object, scratch, check_type(), deopt);
3735 __ JumpIfUndetectable(object, scratch, CheckType::kOmitHeapObjectCheck,
3736 deopt);
3737}
3738
3741 set_temporaries_needed(1);
3742}
3744 MaglevAssembler* masm, const ProcessingState& state) {
3745 Register object = ToRegister(object_input());
3746
3747 MaglevAssembler::TemporaryRegisterScope temps(masm);
3748 Register scratch = temps.Acquire();
3749
3751 __ AssertNotSmi(object);
3752 } else {
3753 __ EmitEagerDeoptIfSmi(
3754 this, object, DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined);
3755 }
3756
3757 Label done;
3758 // Undetectable (document.all) is a JSReceiverOrNullOrUndefined. We already
3759 // checked for Smis above, so no check needed here.
3760 __ JumpIfUndetectable(object, scratch, CheckType::kOmitHeapObjectCheck,
3761 &done);
3762 __ JumpIfObjectTypeNotInRange(
3763 object, FIRST_JS_RECEIVER_TYPE, LAST_JS_RECEIVER_TYPE,
3764 __ GetDeoptLabel(
3765 this, DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined));
3766 __ Jump(&done);
3767 __ bind(&done);
3768}
3769
3772}
3773void CheckNotHole::GenerateCode(MaglevAssembler* masm,
3774 const ProcessingState& state) {
3775 __ CompareRootAndEmitEagerDeoptIf(ToRegister(object_input()),
3776 RootIndex::kTheHoleValue, kEqual,
3777 DeoptimizeReason::kHole, this);
3778}
3779
3782 set_temporaries_needed(1);
3783}
3784void CheckHoleyFloat64NotHole::GenerateCode(MaglevAssembler* masm,
3785 const ProcessingState& state) {
3786 MaglevAssembler::TemporaryRegisterScope temps(masm);
3787 Register scratch = temps.AcquireScratch();
3788 __ JumpIfHoleNan(ToDoubleRegister(float64_input()), scratch,
3789 __ GetDeoptLabel(this, DeoptimizeReason::kHole),
3790 Label::kFar);
3791}
3792
3795 DefineSameAsFirst(this);
3796}
3797void ConvertHoleToUndefined::GenerateCode(MaglevAssembler* masm,
3798 const ProcessingState& state) {
3799 Label done;
3801 __ JumpIfNotRoot(ToRegister(object_input()), RootIndex::kTheHoleValue, &done);
3802 __ LoadRoot(ToRegister(result()), RootIndex::kUndefinedValue);
3803 __ bind(&done);
3804}
3805
3807 using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
3808 return D::GetStackParameterCount();
3809}
3811 using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
3812 static_assert(D::GetRegisterParameter(D::kInput) == kReturnRegister0);
3813 UseFixed(receiver_input(), D::GetRegisterParameter(D::kInput));
3815}
3816void ConvertReceiver::GenerateCode(MaglevAssembler* masm,
3817 const ProcessingState& state) {
3818 Label convert_to_object, done;
3820 __ JumpIfSmi(
3821 receiver, &convert_to_object,
3823
3824 // If {receiver} is not primitive, no need to move it to {result}, since
3825 // they share the same register.
3827 __ JumpIfJSAnyIsNotPrimitive(receiver, &done);
3828
3829 compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
3831 Label convert_global_proxy;
3832 __ JumpIfRoot(receiver, RootIndex::kUndefinedValue, &convert_global_proxy,
3834 __ JumpIfNotRoot(
3835 receiver, RootIndex::kNullValue, &convert_to_object,
3837 __ bind(&convert_global_proxy);
3838 // Patch receiver to global proxy.
3839 __ Move(ToRegister(result()),
3840 native_context_.global_proxy_object(broker).object());
3841 __ Jump(&done);
3842 }
3843
3844 __ bind(&convert_to_object);
3845 __ CallBuiltin<Builtin::kToObject>(native_context_.object(),
3846 receiver_input());
3847 __ bind(&done);
3848}
3849
3850int CheckDerivedConstructResult::MaxCallStackArgs() const { return 0; }
3853 DefineSameAsFirst(this);
3854}
3855void CheckDerivedConstructResult::GenerateCode(MaglevAssembler* masm,
3856 const ProcessingState& state) {
3857 Register construct_result = ToRegister(construct_result_input());
3858
3859 DCHECK_EQ(construct_result, ToRegister(result()));
3860
3861 // If the result is an object (in the ECMA sense), we should get rid
3862 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
3863 // on page 74.
3864 Label done, do_throw;
3865
3866 __ CompareRoot(construct_result, RootIndex::kUndefinedValue);
3867 __ Assert(kNotEqual, AbortReason::kUnexpectedValue);
3868
3869 // If the result is a smi, it is *not* an object in the ECMA sense.
3870 __ JumpIfSmi(construct_result, &do_throw, Label::Distance::kNear);
3871
3872 // Check if the type of the result is not an object in the ECMA sense.
3873 __ JumpIfJSAnyIsNotPrimitive(construct_result, &done, Label::Distance::kNear);
3874
3875 // Throw away the result of the constructor invocation and use the
3876 // implicit receiver as the result.
3877 __ bind(&do_throw);
3878 __ Jump(__ MakeDeferredCode(
3879 [](MaglevAssembler* masm, CheckDerivedConstructResult* node) {
3880 __ Move(kContextRegister, masm->native_context().object());
3881 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
3882 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
3883 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
3884 },
3885 this));
3886
3887 __ bind(&done);
3888}
3889
3890int CheckConstructResult::MaxCallStackArgs() const { return 0; }
3894 DefineSameAsFirst(this);
3895}
3896void CheckConstructResult::GenerateCode(MaglevAssembler* masm,
3897 const ProcessingState& state) {
3898 Register construct_result = ToRegister(construct_result_input());
3899 Register result_reg = ToRegister(result());
3900
3901 DCHECK_EQ(construct_result, result_reg);
3902
3903 // If the result is an object (in the ECMA sense), we should get rid
3904 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
3905 // on page 74.
3906 Label done, use_receiver;
3907
3908 // If the result is undefined, we'll use the implicit receiver.
3909 __ JumpIfRoot(construct_result, RootIndex::kUndefinedValue, &use_receiver,
3911
3912 // If the result is a smi, it is *not* an object in the ECMA sense.
3913 __ JumpIfSmi(construct_result, &use_receiver, Label::Distance::kNear);
3914
3915 // Check if the type of the result is not an object in the ECMA sense.
3916 __ JumpIfJSAnyIsNotPrimitive(construct_result, &done, Label::Distance::kNear);
3917
3918 // Throw away the result of the constructor invocation and use the
3919 // implicit receiver as the result.
3920 __ bind(&use_receiver);
3921 Register implicit_receiver = ToRegister(implicit_receiver_input());
3922 __ Move(result_reg, implicit_receiver);
3923
3924 __ bind(&done);
3925}
3926
3928 DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->nargs, 4);
3929 return 4;
3930}
3933}
3934void CreateObjectLiteral::GenerateCode(MaglevAssembler* masm,
3935 const ProcessingState& state) {
3936 __ CallBuiltin<Builtin::kCreateObjectFromSlowBoilerplate>(
3937 masm->native_context().object(), // context
3938 feedback().vector, // feedback vector
3939 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3940 boilerplate_descriptor().object(), // boilerplate descriptor
3941 Smi::FromInt(flags()) // flags
3942 );
3943 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
3944}
3945
3947 using D =
3948 CallInterfaceDescriptorFor<Builtin::kCreateShallowArrayLiteral>::type;
3949 return D::GetStackParameterCount();
3950}
3953}
3954void CreateShallowArrayLiteral::GenerateCode(MaglevAssembler* masm,
3955 const ProcessingState& state) {
3956 __ CallBuiltin<Builtin::kCreateShallowArrayLiteral>(
3957 masm->native_context().object(), // context
3958 feedback().vector, // feedback vector
3959 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3960 constant_elements().object(), // constant elements
3961 Smi::FromInt(flags()) // flags
3962 );
3963 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
3964}
3965
3967 DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateArrayLiteral)->nargs, 4);
3968 return 4;
3969}
3972}
3973void CreateArrayLiteral::GenerateCode(MaglevAssembler* masm,
3974 const ProcessingState& state) {
3975 __ CallBuiltin<Builtin::kCreateArrayFromSlowBoilerplate>(
3976 masm->native_context().object(), // context
3977 feedback().vector, // feedback vector
3978 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3979 constant_elements().object(), // boilerplate descriptor
3980 Smi::FromInt(flags()) // flags
3981 );
3982 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
3983}
3984
3986 using D =
3987 CallInterfaceDescriptorFor<Builtin::kCreateShallowObjectLiteral>::type;
3988 return D::GetStackParameterCount();
3989}
3992}
3993void CreateShallowObjectLiteral::GenerateCode(MaglevAssembler* masm,
3994 const ProcessingState& state) {
3995 __ CallBuiltin<Builtin::kCreateShallowObjectLiteral>(
3996 masm->native_context().object(), // context
3997 feedback().vector, // feedback vector
3998 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
3999 boilerplate_descriptor().object(), // desc
4000 Smi::FromInt(flags()) // flags
4001 );
4002 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4003}
4004
4006
4007void AllocationBlock::GenerateCode(MaglevAssembler* masm,
4008 const ProcessingState& state) {
4009 __ Allocate(register_snapshot(), ToRegister(result()), size(),
4010 allocation_type());
4011}
4012
4014 DCHECK_EQ(Runtime::FunctionForId(pretenured() ? Runtime::kNewClosure_Tenured
4015 : Runtime::kNewClosure)
4016 ->nargs,
4017 2);
4018 return 2;
4019}
4023}
4024void CreateClosure::GenerateCode(MaglevAssembler* masm,
4025 const ProcessingState& state) {
4026 Runtime::FunctionId function_id =
4027 pretenured() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
4028 __ Push(shared_function_info().object(), feedback_cell().object());
4029 __ CallRuntime(function_id);
4030}
4031
4033 using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
4034 return D::GetStackParameterCount();
4035}
4037 using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type;
4038 static_assert(D::HasContextParameter());
4039 UseFixed(context(), D::ContextRegister());
4041}
4042void FastCreateClosure::GenerateCode(MaglevAssembler* masm,
4043 const ProcessingState& state) {
4044 __ CallBuiltin<Builtin::kFastNewClosure>(
4045 context(), // context
4046 shared_function_info().object(), // shared function info
4047 feedback_cell().object() // feedback cell
4048 );
4049 masm->DefineLazyDeoptPoint(lazy_deopt_info());
4050}
4051
4053 if (scope_type() == FUNCTION_SCOPE) {
4054 using D = CallInterfaceDescriptorFor<
4055 Builtin::kFastNewFunctionContextFunction>::type;
4056 return D::GetStackParameterCount();
4057 } else {
4058 using D =
4059 CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type;
4060 return D::GetStackParameterCount();
4061 }
4062}
4065 static_cast<uint32_t>(
4067 if (scope_type() == FUNCTION_SCOPE) {
4068 using D = CallInterfaceDescriptorFor<
4069 Builtin::kFastNewFunctionContextFunction>::type;
4070 static_assert(D::HasContextParameter());
4071 UseFixed(context(), D::ContextRegister());
4072 } else {
4074 using D =
4075 CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type;
4076 static_assert(D::HasContextParameter());
4077 UseFixed(context(), D::ContextRegister());
4078 }
4080}
4081void CreateFunctionContext::GenerateCode(MaglevAssembler* masm,
4082 const ProcessingState& state) {
4083 if (scope_type() == FUNCTION_SCOPE) {
4084 __ CallBuiltin<Builtin::kFastNewFunctionContextFunction>(
4085 context(), // context
4086 scope_info().object(), // scope info
4087 slot_count() // slots
4088 );
4089 } else {
4090 __ CallBuiltin<Builtin::kFastNewFunctionContextEval>(
4091 context(), // context
4092 scope_info().object(), // scope info
4093 slot_count() // slots
4094 );
4095 }
4096 masm->DefineLazyDeoptPoint(lazy_deopt_info());
4097}
4098
4100 using D = CallInterfaceDescriptorFor<Builtin::kCreateRegExpLiteral>::type;
4101 return D::GetStackParameterCount();
4102}
4105}
4106void CreateRegExpLiteral::GenerateCode(MaglevAssembler* masm,
4107 const ProcessingState& state) {
4108 __ CallBuiltin<Builtin::kCreateRegExpLiteral>(
4109 masm->native_context().object(), // context
4110 feedback().vector, // feedback vector
4111 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4112 pattern().object(), // pattern
4113 Smi::FromInt(flags()) // flags
4114 );
4115 masm->DefineLazyDeoptPoint(lazy_deopt_info());
4116}
4117
4119 using D = CallInterfaceDescriptorFor<Builtin::kGetTemplateObject>::type;
4120 return D::GetStackParameterCount();
4121}
4123 using D = GetTemplateObjectDescriptor;
4124 UseFixed(description(), D::GetRegisterParameter(D::kDescription));
4126}
4127void GetTemplateObject::GenerateCode(MaglevAssembler* masm,
4128 const ProcessingState& state) {
4129 __ CallBuiltin<Builtin::kGetTemplateObject>(
4130 masm->native_context().object(), // context
4131 shared_function_info_.object(), // shared function info
4132 description(), // description
4133 feedback().index(), // feedback slot
4134 feedback().vector // feedback vector
4135 );
4136 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4137}
4138
4140 DCHECK_EQ(2, Runtime::FunctionForId(Runtime::kHasInPrototypeChain)->nargs);
4141 return 2;
4142}
4144 UseRegister(object());
4145 DefineAsRegister(this);
4146 set_temporaries_needed(2);
4147}
4148void HasInPrototypeChain::GenerateCode(MaglevAssembler* masm,
4149 const ProcessingState& state) {
4150 MaglevAssembler::TemporaryRegisterScope temps(masm);
4151 Register object_reg = ToRegister(object());
4152 Register result_reg = ToRegister(result());
4153
4154 Label return_false, return_true;
4155 ZoneLabelRef done(masm);
4156
4157 __ JumpIfSmi(object_reg, &return_false,
4158 v8_flags.debug_code ? Label::kFar : Label::kNear);
4159
4160 // Loop through the prototype chain looking for the {prototype}.
4161 Register map = temps.Acquire();
4162 __ LoadMap(map, object_reg);
4163 Label loop;
4164 {
4165 __ bind(&loop);
4166 Register scratch = temps.Acquire();
4167 // Check if we can determine the prototype directly from the {object_map}.
4168 ZoneLabelRef if_objectisdirect(masm);
4169 Register instance_type = scratch;
4170 Condition jump_cond = __ CompareInstanceTypeRange(
4171 map, instance_type, FIRST_TYPE, LAST_SPECIAL_RECEIVER_TYPE);
4172 __ JumpToDeferredIf(
4173 jump_cond,
4174 [](MaglevAssembler* masm, RegisterSnapshot snapshot,
4175 Register object_reg, Register map, Register instance_type,
4176 Register result_reg, HasInPrototypeChain* node,
4177 ZoneLabelRef if_objectisdirect, ZoneLabelRef done) {
4178 Label return_runtime;
4179 // The {object_map} is a special receiver map or a primitive map,
4180 // check if we need to use the if_objectisspecial path in the runtime.
4181 __ JumpIfEqual(instance_type, JS_PROXY_TYPE, &return_runtime);
4182
4183 int mask = Map::Bits1::HasNamedInterceptorBit::kMask |
4184 Map::Bits1::IsAccessCheckNeededBit::kMask;
4185 __ TestUint8AndJumpIfAllClear(
4186 FieldMemOperand(map, Map::kBitFieldOffset), mask,
4187 *if_objectisdirect);
4188
4189 __ bind(&return_runtime);
4190 {
4191 snapshot.live_registers.clear(result_reg);
4192 SaveRegisterStateForCall save_register_state(masm, snapshot);
4193 __ Push(object_reg, node->prototype().object());
4194 __ Move(kContextRegister, masm->native_context().object());
4195 __ CallRuntime(Runtime::kHasInPrototypeChain, 2);
4196 masm->DefineExceptionHandlerPoint(node);
4197 save_register_state.DefineSafepointWithLazyDeopt(
4198 node->lazy_deopt_info());
4199 __ Move(result_reg, kReturnRegister0);
4200 }
4201 __ Jump(*done);
4202 },
4203 register_snapshot(), object_reg, map, instance_type, result_reg, this,
4204 if_objectisdirect, done);
4205 instance_type = Register::no_reg();
4206
4207 __ bind(*if_objectisdirect);
4208 // Check the current {object} prototype.
4209 Register object_prototype = scratch;
4210 __ LoadTaggedField(object_prototype, map, Map::kPrototypeOffset);
4211 __ JumpIfRoot(object_prototype, RootIndex::kNullValue, &return_false,
4212 v8_flags.debug_code ? Label::kFar : Label::kNear);
4213 __ CompareTaggedAndJumpIf(object_prototype, prototype().object(), kEqual,
4214 &return_true, Label::kNear);
4215
4216 // Continue with the prototype.
4217 __ AssertNotSmi(object_prototype);
4218 __ LoadMap(map, object_prototype);
4219 __ Jump(&loop);
4220 }
4221
4222 __ bind(&return_true);
4223 __ LoadRoot(result_reg, RootIndex::kTrueValue);
4224 __ Jump(*done, Label::kNear);
4225
4226 __ bind(&return_false);
4227 __ LoadRoot(result_reg, RootIndex::kFalseValue);
4228 __ bind(*done);
4229}
4230
4232void DebugBreak::GenerateCode(MaglevAssembler* masm,
4233 const ProcessingState& state) {
4234 __ DebugBreak();
4235}
4236
4237int Abort::MaxCallStackArgs() const {
4238 DCHECK_EQ(Runtime::FunctionForId(Runtime::kAbort)->nargs, 1);
4239 return 1;
4240}
4242void Abort::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
4243 __ Push(Smi::FromInt(static_cast<int>(reason())));
4244 __ CallRuntime(Runtime::kAbort, 1);
4245 __ Trap();
4246}
4247
4249 UseAny(value());
4250 DefineAsRegister(this);
4251}
4252void LogicalNot::GenerateCode(MaglevAssembler* masm,
4253 const ProcessingState& state) {
4254 if (v8_flags.debug_code) {
4255 // LogicalNot expects either TrueValue or FalseValue.
4256 Label next;
4257 __ JumpIf(__ IsRootConstant(value(), RootIndex::kFalseValue), &next);
4258 __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &next);
4259 __ Abort(AbortReason::kUnexpectedValue);
4260 __ bind(&next);
4261 }
4262
4263 Label return_false, done;
4264 __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &return_false);
4265 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
4266 __ Jump(&done);
4267
4268 __ bind(&return_false);
4269 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
4270
4271 __ bind(&done);
4272}
4273
4276}
4278 using D = LoadWithVectorDescriptor;
4280 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4282}
4283void LoadNamedGeneric::GenerateCode(MaglevAssembler* masm,
4284 const ProcessingState& state) {
4285 __ CallBuiltin<Builtin::kLoadIC>(
4286 context(), // context
4287 object_input(), // receiver
4288 name().object(), // name
4289 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4290 feedback().vector // feedback vector
4291 );
4292 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4293}
4294
4297}
4299 using D = LoadWithReceiverAndVectorDescriptor;
4301 UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver));
4303 D::GetRegisterParameter(D::kLookupStartObject));
4305}
4306void LoadNamedFromSuperGeneric::GenerateCode(MaglevAssembler* masm,
4307 const ProcessingState& state) {
4308 __ CallBuiltin<Builtin::kLoadSuperIC>(
4309 context(), // context
4310 receiver(), // receiver
4311 lookup_start_object(), // lookup start object
4312 name().object(), // name
4313 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4314 feedback().vector // feedback vector
4315 );
4316 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4317}
4318
4320 using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type;
4321 return D::GetStackParameterCount();
4322}
4324 using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type;
4326 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4327 UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
4329}
4330void SetNamedGeneric::GenerateCode(MaglevAssembler* masm,
4331 const ProcessingState& state) {
4332 __ CallBuiltin<Builtin::kStoreIC>(
4333 context(), // context
4334 object_input(), // receiver
4335 name().object(), // name
4336 value_input(), // value
4337 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4338 feedback().vector // feedback vector
4339 );
4340 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4341}
4342
4344 using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type;
4345 return D::GetStackParameterCount();
4346}
4348 using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type;
4350 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4351 UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
4355 const ProcessingState& state) {
4357 context(), // context
4358 object_input(), // receiver
4359 name().object(), // name
4360 value_input(), // value
4361 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4362 feedback().vector // feedback vector
4363 );
4365}
4366
4371 DefineSameAsFirst(this);
4372}
4373
4374void UpdateJSArrayLength::GenerateCode(MaglevAssembler* masm,
4375 const ProcessingState& state) {
4376 Register length = ToRegister(length_input());
4377 Register object = ToRegister(object_input());
4378 Register index = ToRegister(index_input());
4379 DCHECK_EQ(length, ToRegister(result()));
4380
4381 Label done, tag_length;
4382 if (v8_flags.debug_code) {
4383 __ AssertObjectType(object, JS_ARRAY_TYPE, AbortReason::kUnexpectedValue);
4385 "MaxLength not a Smi");
4386 __ CompareInt32AndAssert(index, FixedArray::kMaxLength, kUnsignedLessThan,
4387 AbortReason::kUnexpectedValue);
4388 }
4389 __ CompareInt32AndJumpIf(index, length, kUnsignedLessThan, &tag_length,
4390 Label::kNear);
4391 __ IncrementInt32(index); // This cannot overflow.
4392 __ SmiTag(length, index);
4393 __ StoreTaggedSignedField(object, JSArray::kLengthOffset, length);
4394 __ Jump(&done, Label::kNear);
4395 __ bind(&tag_length);
4396 __ SmiTag(length);
4397 __ bind(&done);
4398}
4399
4403 set_temporaries_needed(1);
4404 DefineSameAsFirst(this);
4405}
4406void EnsureWritableFastElements::GenerateCode(MaglevAssembler* masm,
4407 const ProcessingState& state) {
4408 Register object = ToRegister(object_input());
4409 Register elements = ToRegister(elements_input());
4410 DCHECK_EQ(elements, ToRegister(result()));
4411 MaglevAssembler::TemporaryRegisterScope temps(masm);
4412 Register scratch = temps.Acquire();
4413 __ EnsureWritableFastElements(register_snapshot(), elements, object, scratch);
4414}
4415
4422 set_temporaries_needed(1);
4423 }
4424 DefineSameAsFirst(this);
4425}
4426void MaybeGrowFastElements::GenerateCode(MaglevAssembler* masm,
4427 const ProcessingState& state) {
4428 Register elements = ToRegister(elements_input());
4429 Register object = ToRegister(object_input());
4430 Register index = ToRegister(index_input());
4431 Register elements_length = ToRegister(elements_length_input());
4432 DCHECK_EQ(elements, ToRegister(result()));
4433
4434 ZoneLabelRef done(masm);
4435
4436 __ CompareInt32AndJumpIf(
4437 index, elements_length, kUnsignedGreaterThanEqual,
4438 __ MakeDeferredCode(
4439 [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
4440 Register index, Register result_reg, MaybeGrowFastElements* node) {
4441 {
4442 RegisterSnapshot snapshot = node->register_snapshot();
4443 snapshot.live_registers.clear(result_reg);
4444 snapshot.live_tagged_registers.clear(result_reg);
4445 SaveRegisterStateForCall save_register_state(masm, snapshot);
4446 using D = GrowArrayElementsDescriptor;
4447 if (index == D::GetRegisterParameter(D::kObject)) {
4448 // That implies that the first parameter move will clobber the
4449 // index value. So we use the result register as temporary.
4450 // TODO(leszeks): Use parallel moves to resolve cases like this.
4451 __ SmiTag(result_reg, index);
4452 index = result_reg;
4453 } else {
4454 __ SmiTag(index);
4455 }
4456 if (IsDoubleElementsKind(node->elements_kind())) {
4457 __ CallBuiltin<Builtin::kGrowFastDoubleElements>(object, index);
4458 } else {
4459 __ CallBuiltin<Builtin::kGrowFastSmiOrObjectElements>(object,
4460 index);
4461 }
4462 save_register_state.DefineSafepoint();
4463 __ Move(result_reg, kReturnRegister0);
4464 }
4465 __ EmitEagerDeoptIfSmi(node, result_reg,
4466 DeoptimizeReason::kCouldNotGrowElements);
4467 __ Jump(*done);
4468 },
4469 done, object, index, elements, this));
4470
4471 __ bind(*done);
4472}
4473
4477 DefineAsRegister(this);
4478 set_temporaries_needed(2);
4479}
4480
4481void ExtendPropertiesBackingStore::GenerateCode(MaglevAssembler* masm,
4482 const ProcessingState& state) {
4483 Register object = ToRegister(object_input());
4484 Register old_property_array = ToRegister(property_array_input());
4485 Register result_reg = ToRegister(result());
4486 MaglevAssembler::TemporaryRegisterScope temps(masm);
4487 Register new_property_array =
4488 result_reg == object || result_reg == old_property_array ? temps.Acquire()
4489 : result_reg;
4490 Register scratch = temps.Acquire();
4491 DCHECK(!AreAliased(object, old_property_array, new_property_array, scratch));
4492
4494
4495 // Allocate new PropertyArray.
4496 {
4497 RegisterSnapshot snapshot = register_snapshot();
4498 // old_property_array needs to be live, since we'll read data from it.
4499 // Object needs to be live, since we write the new property array into it.
4500 snapshot.live_registers.set(object);
4501 snapshot.live_registers.set(old_property_array);
4502 snapshot.live_tagged_registers.set(object);
4503 snapshot.live_tagged_registers.set(old_property_array);
4504
4505 Register size_in_bytes = scratch;
4506 __ Move(size_in_bytes, PropertyArray::SizeFor(new_length));
4507 __ Allocate(snapshot, new_property_array, size_in_bytes,
4509 __ SetMapAsRoot(new_property_array, RootIndex::kPropertyArrayMap);
4510 }
4511
4512 // Copy existing properties over.
4513 {
4514 RegisterSnapshot snapshot = register_snapshot();
4515 snapshot.live_registers.set(object);
4516 snapshot.live_registers.set(old_property_array);
4517 snapshot.live_registers.set(new_property_array);
4518 snapshot.live_tagged_registers.set(object);
4519 snapshot.live_tagged_registers.set(old_property_array);
4520 snapshot.live_tagged_registers.set(new_property_array);
4521
4522 for (int i = 0; i < old_length_; ++i) {
4523 __ LoadTaggedFieldWithoutDecompressing(
4524 scratch, old_property_array, PropertyArray::OffsetOfElementAt(i));
4525
4526 __ StoreTaggedFieldWithWriteBarrier(
4527 new_property_array, PropertyArray::OffsetOfElementAt(i), scratch,
4530 }
4531 }
4532
4533 // Initialize new properties to undefined.
4534 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
4535 for (int i = 0; i < JSObject::kFieldsAdded; ++i) {
4536 __ StoreTaggedFieldNoWriteBarrier(
4537 new_property_array, PropertyArray::OffsetOfElementAt(old_length_ + i),
4538 scratch);
4539 }
4540
4541 // Read the hash.
4542 if (old_length_ == 0) {
4543 // The object might still have a hash, stored in properties_or_hash. If
4544 // properties_or_hash is a SMI, then it's the hash. It can also be an empty
4545 // PropertyArray.
4546 __ LoadTaggedField(scratch, object, JSObject::kPropertiesOrHashOffset);
4547
4548 Label done;
4549 __ JumpIfSmi(scratch, &done);
4550
4551 __ Move(scratch, PropertyArray::kNoHashSentinel);
4552
4553 __ bind(&done);
4554 __ SmiUntag(scratch);
4555 __ ShiftLeft(scratch, PropertyArray::HashField::kShift);
4556 } else {
4557 __ LoadTaggedField(scratch, old_property_array,
4558 PropertyArray::kLengthAndHashOffset);
4559 __ SmiUntag(scratch);
4560 __ AndInt32(scratch, PropertyArray::HashField::kMask);
4561 }
4562
4563 // Add the new length and write the length-and-hash field.
4564 static_assert(PropertyArray::LengthField::kShift == 0);
4565 __ OrInt32(scratch, new_length);
4566
4567 __ UncheckedSmiTagInt32(scratch, scratch);
4568 __ StoreTaggedFieldNoWriteBarrier(
4569 new_property_array, PropertyArray::kLengthAndHashOffset, scratch);
4570
4571 {
4572 RegisterSnapshot snapshot = register_snapshot();
4573 // new_property_array needs to be live since we'll return it.
4574 snapshot.live_registers.set(new_property_array);
4575 snapshot.live_tagged_registers.set(new_property_array);
4576
4577 __ StoreTaggedFieldWithWriteBarrier(
4578 object, JSObject::kPropertiesOrHashOffset, new_property_array, snapshot,
4581 }
4582 if (result_reg != new_property_array) {
4583 __ Move(result_reg, new_property_array);
4584 }
4585}
4586
4588 using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type;
4589 return D::GetStackParameterCount();
4590}
4592 using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type;
4594 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4595 UseFixed(key_input(), D::GetRegisterParameter(D::kName));
4596 UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
4598}
4599void SetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
4600 const ProcessingState& state) {
4601 __ CallBuiltin<Builtin::kKeyedStoreIC>(
4602 context(), // context
4603 object_input(), // receiver
4604 key_input(), // name
4605 value_input(), // value
4606 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4607 feedback().vector // feedback vector
4608 );
4609 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4610}
4611
4613 using D = CallInterfaceDescriptorFor<Builtin::kDefineKeyedOwnIC>::type;
4614 return D::GetStackParameterCount();
4615}
4617 using D = CallInterfaceDescriptorFor<Builtin::kDefineKeyedOwnIC>::type;
4619 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4620 UseFixed(key_input(), D::GetRegisterParameter(D::kName));
4621 UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
4622 UseFixed(flags_input(), D::GetRegisterParameter(D::kFlags));
4624}
4625void DefineKeyedOwnGeneric::GenerateCode(MaglevAssembler* masm,
4626 const ProcessingState& state) {
4627 __ CallBuiltin<Builtin::kDefineKeyedOwnIC>(
4628 context(), // context
4629 object_input(), // receiver
4630 key_input(), // name
4631 value_input(), // value
4632 flags_input(), // flags
4633 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4634 feedback().vector // feedback vector
4635 );
4636 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4637}
4638
4640 using D = CallInterfaceDescriptorFor<Builtin::kStoreInArrayLiteralIC>::type;
4641 return D::GetStackParameterCount();
4642}
4644 using D = CallInterfaceDescriptorFor<Builtin::kStoreInArrayLiteralIC>::type;
4646 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4647 UseFixed(name_input(), D::GetRegisterParameter(D::kName));
4648 UseFixed(value_input(), D::GetRegisterParameter(D::kValue));
4650}
4651void StoreInArrayLiteralGeneric::GenerateCode(MaglevAssembler* masm,
4652 const ProcessingState& state) {
4653 __ CallBuiltin<Builtin::kStoreInArrayLiteralIC>(
4654 context(), // context
4655 object_input(), // receiver
4656 name_input(), // name
4657 value_input(), // value
4658 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4659 feedback().vector // feedback vector
4660 );
4661 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4662}
4663
4667 DefineAsRegister(this);
4668 set_temporaries_needed(1);
4669}
4670void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
4671 const ProcessingState& state) {
4672 MaglevAssembler::TemporaryRegisterScope temps(masm);
4673 Register temp = temps.Acquire();
4674 Register array = ToRegister(array_input());
4675 Register stale = ToRegister(stale_input());
4676 Register result_reg = ToRegister(result());
4677
4678 // The input and the output can alias, if that happens we use a temporary
4679 // register and a move at the end.
4680 Register value = (array == result_reg ? temp : result_reg);
4681
4682 // Loads the current value in the generator register file.
4683 __ LoadTaggedField(value, array, FixedArray::OffsetOfElementAt(index()));
4684
4685 // And trashs it with StaleRegisterConstant.
4687 __ StoreTaggedFieldNoWriteBarrier(
4688 array, FixedArray::OffsetOfElementAt(index()), stale);
4689
4690 if (value != result_reg) {
4691 __ Move(result_reg, value);
4692 }
4693}
4694
4697}
4701 for (int i = 0; i < num_parameters_and_registers(); i++) {
4703 }
4704 RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister());
4705 RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister());
4706}
4707void GeneratorStore::GenerateCode(MaglevAssembler* masm,
4708 const ProcessingState& state) {
4709 Register generator = ToRegister(generator_input());
4711 __ LoadTaggedField(array, generator,
4712 JSGeneratorObject::kParametersAndRegistersOffset);
4713
4714 RegisterSnapshot register_snapshot_during_store = register_snapshot();
4715 // Include the array and generator registers in the register snapshot while
4716 // storing parameters and registers, to avoid the write barrier clobbering
4717 // them.
4718 register_snapshot_during_store.live_registers.set(array);
4719 register_snapshot_during_store.live_tagged_registers.set(array);
4720 register_snapshot_during_store.live_registers.set(generator);
4721 register_snapshot_during_store.live_tagged_registers.set(generator);
4722 for (int i = 0; i < num_parameters_and_registers(); i++) {
4723 // Use WriteBarrierDescriptor::SlotAddressRegister() as the temporary for
4724 // the value -- it'll be clobbered by StoreTaggedFieldWithWriteBarrier since
4725 // it's not in the register snapshot, but that's ok, and a clobberable value
4726 // register lets the write barrier emit slightly better code.
4727 Input value_input = parameters_and_registers(i);
4728 Register value = __ FromAnyToRegister(
4730 // Include the value register in the live set, in case it is used by future
4731 // inputs.
4732 register_snapshot_during_store.live_registers.set(value);
4733 register_snapshot_during_store.live_tagged_registers.set(value);
4734 __ StoreTaggedFieldWithWriteBarrier(
4735 array, FixedArray::OffsetOfElementAt(i), value,
4736 register_snapshot_during_store,
4737 value_input.node()->decompresses_tagged_result()
4741 }
4742
4743 __ StoreTaggedSignedField(generator, JSGeneratorObject::kContinuationOffset,
4745 __ StoreTaggedSignedField(generator,
4746 JSGeneratorObject::kInputOrDebugPosOffset,
4748
4749 // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
4750 // register, see comment above. At this point we no longer need to preserve
4751 // the array or generator registers, so use the original register snapshot.
4752 Register context = __ FromAnyToRegister(
4754 __ StoreTaggedFieldWithWriteBarrier(
4755 generator, JSGeneratorObject::kContextOffset, context,
4756 register_snapshot(),
4757 context_input().node()->decompresses_tagged_result()
4761}
4762
4764 using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
4765 return D::GetStackParameterCount();
4766}
4768 using D = CallInterfaceDescriptorFor<Builtin::kKeyedLoadIC>::type;
4770 UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
4771 UseFixed(key_input(), D::GetRegisterParameter(D::kName));
4773}
4774void GetKeyedGeneric::GenerateCode(MaglevAssembler* masm,
4775 const ProcessingState& state) {
4776 __ CallBuiltin<Builtin::kKeyedLoadIC>(
4777 context(), // context
4778 object_input(), // receiver
4779 key_input(), // name
4780 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
4781 feedback().vector // feedback vector
4782 );
4783 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
4784}
4785
4787 UseRegister(input());
4788 DefineAsRegister(this);
4789}
4790void Int32ToNumber::GenerateCode(MaglevAssembler* masm,
4791 const ProcessingState& state) {
4792 Register object = ToRegister(result());
4793 Register value = ToRegister(input());
4794 ZoneLabelRef done(masm);
4795 MaglevAssembler::TemporaryRegisterScope temps(masm);
4796 // Object is not allowed to alias value, because SmiTagInt32AndJumpIfFail will
4797 // clobber `object` even if the tagging fails, and we don't want it to clobber
4798 // `value`.
4799 bool input_output_alias = (object == value);
4800 Register res = object;
4801 if (input_output_alias) {
4802 res = temps.AcquireScratch();
4803 }
4804 __ SmiTagInt32AndJumpIfFail(
4805 res, value,
4806 __ MakeDeferredCode(
4807 [](MaglevAssembler* masm, Register object, Register value,
4808 Register scratch, ZoneLabelRef done, Int32ToNumber* node) {
4809 MaglevAssembler::TemporaryRegisterScope temps(masm);
4810 // AllocateHeapNumber needs a scratch register, and the res scratch
4811 // register isn't needed anymore, so return it to the pool.
4812 if (scratch.is_valid()) {
4813 temps.IncludeScratch(scratch);
4814 }
4815 DoubleRegister double_value = temps.AcquireScratchDouble();
4816 __ Int32ToDouble(double_value, value);
4817 __ AllocateHeapNumber(node->register_snapshot(), object,
4818 double_value);
4819 __ Jump(*done);
4820 },
4821 object, value, input_output_alias ? res : Register::no_reg(), done,
4822 this));
4823 if (input_output_alias) {
4824 __ Move(object, res);
4825 }
4826 __ bind(*done);
4827}
4828
4830 UseRegister(input());
4831#ifdef V8_TARGET_ARCH_X64
4832 // We emit slightly more efficient code if result is the same as input.
4833 DefineSameAsFirst(this);
4834#else
4835 DefineAsRegister(this);
4836#endif
4837}
4838void Uint32ToNumber::GenerateCode(MaglevAssembler* masm,
4839 const ProcessingState& state) {
4840 ZoneLabelRef done(masm);
4841 Register value = ToRegister(input());
4842 Register object = ToRegister(result());
4843 // Unlike Int32ToNumber, object is allowed to alias value here (indeed, the
4844 // code is better if it does). The difference is that Uint32 smi tagging first
4845 // does a range check, and doesn't clobber `object` on failure.
4846 __ SmiTagUint32AndJumpIfFail(
4847 object, value,
4848 __ MakeDeferredCode(
4849 [](MaglevAssembler* masm, Register object, Register value,
4850 ZoneLabelRef done, Uint32ToNumber* node) {
4851 MaglevAssembler::TemporaryRegisterScope temps(masm);
4852 DoubleRegister double_value = temps.AcquireScratchDouble();
4853 __ Uint32ToDouble(double_value, value);
4854 __ AllocateHeapNumber(node->register_snapshot(), object,
4855 double_value);
4856 __ Jump(*done);
4857 },
4858 object, value, done, this));
4859 __ bind(*done);
4860}
4861
4863 UseRegister(input());
4864#ifdef V8_TARGET_ARCH_X64
4865 // We emit slightly more efficient code if result is the same as input.
4866 DefineSameAsFirst(this);
4867#else
4868 DefineAsRegister(this);
4869#endif
4870}
4871
4872void IntPtrToNumber::GenerateCode(MaglevAssembler* masm,
4873 const ProcessingState& state) {
4874 ZoneLabelRef done(masm);
4875 Register value = ToRegister(input());
4876 Register object = ToRegister(result());
4877 // Unlike Int32ToNumber, object is allowed to alias value here (indeed, the
4878 // code is better if it does). The difference is that IntPtr smi tagging first
4879 // does a range check, and doesn't clobber `object` on failure.
4880 __ SmiTagIntPtrAndJumpIfFail(
4881 object, value,
4882 __ MakeDeferredCode(
4883 [](MaglevAssembler* masm, Register object, Register value,
4884 ZoneLabelRef done, IntPtrToNumber* node) {
4885 MaglevAssembler::TemporaryRegisterScope temps(masm);
4886 DoubleRegister double_value = temps.AcquireScratchDouble();
4887 __ IntPtrToDouble(double_value, value);
4888 __ AllocateHeapNumber(node->register_snapshot(), object,
4889 double_value);
4890 __ Jump(*done);
4891 },
4892 object, value, done, this));
4893 __ bind(*done);
4894}
4895
4897 UseRegister(input());
4898 DefineAsRegister(this);
4899}
4900void Float64ToTagged::GenerateCode(MaglevAssembler* masm,
4901 const ProcessingState& state) {
4903 Register object = ToRegister(result());
4904 Label box, done;
4905 if (canonicalize_smi()) {
4906 __ TryTruncateDoubleToInt32(object, value, &box);
4907 __ SmiTagInt32AndJumpIfFail(object, &box);
4908 __ Jump(&done, Label::kNear);
4909 __ bind(&box);
4910 }
4911 __ AllocateHeapNumber(register_snapshot(), object, value);
4912 if (canonicalize_smi()) {
4913 __ bind(&done);
4914 }
4915}
4916
4918 UseRegister(input());
4919 DefineAsRegister(this);
4920}
4921void Float64ToHeapNumberForField::GenerateCode(MaglevAssembler* masm,
4922 const ProcessingState& state) {
4924 Register object = ToRegister(result());
4925 __ AllocateHeapNumber(register_snapshot(), object, value);
4926}
4927
4929 UseRegister(input());
4930 DefineAsRegister(this);
4931}
4932void HoleyFloat64ToTagged::GenerateCode(MaglevAssembler* masm,
4933 const ProcessingState& state) {
4934 ZoneLabelRef done(masm);
4936 Register object = ToRegister(result());
4937 Label box;
4938 if (canonicalize_smi()) {
4939 __ TryTruncateDoubleToInt32(object, value, &box);
4940 __ SmiTagInt32AndJumpIfFail(object, &box);
4941 __ Jump(*done, Label::kNear);
4942 __ bind(&box);
4943 }
4944 // Using return as scratch register.
4945 __ JumpIfHoleNan(
4946 value, ToRegister(result()),
4947 __ MakeDeferredCode(
4948 [](MaglevAssembler* masm, Register object, ZoneLabelRef done) {
4949 // TODO(leszeks): Evaluate whether this is worth deferring.
4950 __ LoadRoot(object, RootIndex::kUndefinedValue);
4951 __ Jump(*done);
4952 },
4953 object, done));
4954 __ AllocateHeapNumber(register_snapshot(), object, value);
4955 __ bind(*done);
4956}
4957
4959 UseRegister(input());
4960 DefineAsRegister(this);
4961 if (kind_ == Kind::kNearest) {
4962 set_double_temporaries_needed(1);
4963 }
4964}
4965
4967 UseRegister(input());
4968 DefineSameAsFirst(this);
4969}
4970
4972 UseRegister(input());
4973 DefineSameAsFirst(this);
4974}
4975
4977 UseRegister(input());
4978 DefineAsRegister(this);
4979}
4980void CheckedSmiTagFloat64::GenerateCode(MaglevAssembler* masm,
4981 const ProcessingState& state) {
4983 Register object = ToRegister(result());
4984
4985 __ TryTruncateDoubleToInt32(
4986 object, value, __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi));
4987 __ SmiTagInt32AndJumpIfFail(
4988 object, __ GetDeoptLabel(this, DeoptimizeReason::kNotASmi));
4989}
4990
4994}
4995void StoreFloat64::GenerateCode(MaglevAssembler* masm,
4996 const ProcessingState& state) {
4997 Register object = ToRegister(object_input());
4999
5000 __ AssertNotSmi(object);
5001 __ StoreFloat64(FieldMemOperand(object, offset()), value);
5002}
5003
5007}
5008void StoreInt32::GenerateCode(MaglevAssembler* masm,
5009 const ProcessingState& state) {
5010 Register object = ToRegister(object_input());
5011 Register value = ToRegister(value_input());
5012
5013 __ AssertNotSmi(object);
5014 __ StoreInt32(FieldMemOperand(object, offset()), value);
5015}
5016
5020}
5022 MaglevAssembler* masm, const ProcessingState& state) {
5023 Register object = ToRegister(object_input());
5024 Register value = ToRegister(value_input());
5025
5026 __ AssertNotSmi(object);
5027
5028 __ StoreTaggedFieldNoWriteBarrier(object, offset(), value);
5029 __ AssertElidedWriteBarrier(object, value, register_snapshot());
5030}
5031
5032int StringAt::MaxCallStackArgs() const {
5033 DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
5034 return std::max(2, AllocateDescriptor::GetStackParameterCount());
5035}
5039 DefineAsRegister(this);
5040 set_temporaries_needed(1);
5041}
5042void StringAt::GenerateCode(MaglevAssembler* masm,
5043 const ProcessingState& state) {
5044 MaglevAssembler::TemporaryRegisterScope temps(masm);
5045 Register scratch = temps.Acquire();
5046 Register result_string = ToRegister(result());
5047 Register string = ToRegister(string_input());
5048 Register index = ToRegister(index_input());
5049 Register char_code = string;
5050
5051 ZoneLabelRef done(masm);
5052 Label cached_one_byte_string;
5053
5054 RegisterSnapshot save_registers = register_snapshot();
5055 __ StringCharCodeOrCodePointAt(
5057 char_code, string, index, scratch, Register::no_reg(),
5058 &cached_one_byte_string);
5059 __ StringFromCharCode(save_registers, &cached_one_byte_string, result_string,
5060 char_code, scratch,
5062}
5063
5065 DCHECK_EQ(Runtime::FunctionForId(Runtime::kStringCharCodeAt)->nargs, 2);
5066 return 2;
5067}
5072 DefineAsRegister(this);
5073 // TODO(victorgomes): Add a mode to the register allocator where we ensure
5074 // input cannot alias with output. We can then remove the second scratch.
5075 set_temporaries_needed(
5077 : 1);
5078}
5080 MaglevAssembler* masm, const ProcessingState& state) {
5081 MaglevAssembler::TemporaryRegisterScope temps(masm);
5082 Register scratch1 = temps.Acquire();
5083 Register scratch2 = Register::no_reg();
5085 scratch2 = temps.Acquire();
5086 }
5087 Register string = ToRegister(string_input());
5088 Register index = ToRegister(index_input());
5089 ZoneLabelRef done(masm);
5090 RegisterSnapshot save_registers = register_snapshot();
5091 __ StringCharCodeOrCodePointAt(mode_, save_registers, ToRegister(result()),
5092 string, index, scratch1, scratch2, *done);
5093 __ bind(*done);
5094}
5095
5098 DefineAsRegister(this);
5099}
5100void StringLength::GenerateCode(MaglevAssembler* masm,
5101 const ProcessingState& state) {
5103}
5104
5106 using D = StringAdd_CheckNoneDescriptor;
5107 UseFixed(lhs(), D::GetRegisterParameter(D::kLeft));
5108 UseFixed(rhs(), D::GetRegisterParameter(D::kRight));
5110}
5111void StringConcat::GenerateCode(MaglevAssembler* masm,
5112 const ProcessingState& state) {
5113 __ CallBuiltin<Builtin::kStringAdd_CheckNone>(
5114 masm->native_context().object(), // context
5115 lhs(), // left
5116 rhs() // right
5117 );
5118 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
5120}
5121
5123 UseRegister(lhs());
5124 UseRegister(rhs());
5125 DefineSameAsFirst(this);
5126}
5127void ConsStringMap::GenerateCode(MaglevAssembler* masm,
5128 const ProcessingState& state) {
5129 Label two_byte, ok;
5130 Register res = ToRegister(result());
5131 Register left = ToRegister(lhs());
5132 Register right = ToRegister(rhs());
5133
5134 // Fast case for when the lhs() (which is identical to the result) happens to
5135 // contain the result for one byte string map inputs. In this case we only
5136 // need to check the rhs() and if it is one byte too, already have the result
5137 // in the correct register.
5138 bool left_contains_one_byte_res_map =
5139 lhs().node()->Is<RootConstant>() &&
5140 lhs().node()->Cast<RootConstant>()->index() ==
5141 RootIndex::kConsOneByteStringMap;
5142
5143#ifdef V8_STATIC_ROOTS
5144 static_assert(InstanceTypeChecker::kOneByteStringMapBit == 0 ||
5145 InstanceTypeChecker::kTwoByteStringMapBit == 0);
5146 auto TestForTwoByte = [&](Register reg, Register second) {
5147 if constexpr (InstanceTypeChecker::kOneByteStringMapBit == 0) {
5148 // Two-byte is represented as 1: Check if either of them have the two-byte
5149 // bit set
5150 if (second != no_reg) {
5151 __ OrInt32(reg, second);
5152 }
5153 __ TestInt32AndJumpIfAnySet(reg,
5154 InstanceTypeChecker::kStringMapEncodingMask,
5155 &two_byte, Label::kNear);
5156 } else {
5157 // One-byte is represented as 1: Check that both of them have the one-byte
5158 // bit set
5159 if (second != no_reg) {
5160 __ AndInt32(reg, second);
5161 }
5162 __ TestInt32AndJumpIfAllClear(reg,
5163 InstanceTypeChecker::kStringMapEncodingMask,
5164 &two_byte, Label::kNear);
5165 }
5166 };
5167 if (left_contains_one_byte_res_map) {
5168 TestForTwoByte(right, no_reg);
5169 } else {
5170 TestForTwoByte(left, right);
5171 }
5172#else
5173 MaglevAssembler::TemporaryRegisterScope temps(masm);
5174 Register scratch = temps.AcquireScratch();
5175 static_assert(kTwoByteStringTag == 0);
5176 if (left_contains_one_byte_res_map) {
5177 __ LoadByte(scratch, FieldMemOperand(right, Map::kInstanceTypeOffset));
5178 __ TestInt32AndJumpIfAllClear(scratch, kStringEncodingMask, &two_byte,
5179 Label::kNear);
5180 } else {
5181 __ LoadByte(left, FieldMemOperand(left, Map::kInstanceTypeOffset));
5182 if (left != right) {
5183 __ LoadByte(scratch, FieldMemOperand(right, Map::kInstanceTypeOffset));
5184 __ AndInt32(scratch, left);
5185 }
5186 __ TestInt32AndJumpIfAllClear(scratch, kStringEncodingMask, &two_byte,
5187 Label::kNear);
5188 }
5189#endif // V8_STATIC_ROOTS
5190 DCHECK_EQ(left, res);
5191 if (!left_contains_one_byte_res_map) {
5192 __ LoadRoot(res, RootIndex::kConsOneByteStringMap);
5193 }
5194 __ Jump(&ok, Label::kNear);
5195
5196 __ bind(&two_byte);
5197 __ LoadRoot(res, RootIndex::kConsTwoByteStringMap);
5198 __ bind(&ok);
5199}
5200
5203 DefineSameAsFirst(this);
5204}
5205void UnwrapStringWrapper::GenerateCode(MaglevAssembler* masm,
5206 const ProcessingState& state) {
5207 Register input = ToRegister(value_input());
5208 Label done;
5209 __ JumpIfString(input, &done, Label::kNear);
5210 __ LoadTaggedField(input, input, JSPrimitiveWrapper::kValueOffset);
5211 __ bind(&done);
5212}
5213
5216 DefineSameAsFirst(this);
5217}
5218void UnwrapThinString::GenerateCode(MaglevAssembler* masm,
5219 const ProcessingState& state) {
5220 Register input = ToRegister(value_input());
5221 Label ok;
5222 {
5223 MaglevAssembler::TemporaryRegisterScope temps(masm);
5224 Register scratch = temps.AcquireScratch();
5225#ifdef V8_STATIC_ROOTS
5226 __ LoadCompressedMap(scratch, input);
5227 __ JumpIfObjectNotInRange(
5228 scratch,
5229 InstanceTypeChecker::kUniqueMapRangeOfStringType::kThinString.first,
5230 InstanceTypeChecker::kUniqueMapRangeOfStringType::kThinString.second,
5231 &ok, Label::kNear);
5232#else
5233 __ LoadInstanceType(scratch, input);
5234 __ TestInt32AndJumpIfAllClear(scratch, kThinStringTagBit, &ok,
5235 Label::kNear);
5236#endif // V8_STATIC_ROOTS
5237 }
5238 __ LoadThinStringValue(input, input);
5239 __ bind(&ok);
5240}
5241
5243 using D = StringEqualDescriptor;
5244 UseFixed(lhs(), D::GetRegisterParameter(D::kLeft));
5245 UseFixed(rhs(), D::GetRegisterParameter(D::kRight));
5246 set_temporaries_needed(1);
5247 RequireSpecificTemporary(D::GetRegisterParameter(D::kLength));
5249}
5250void StringEqual::GenerateCode(MaglevAssembler* masm,
5251 const ProcessingState& state) {
5252 using D = StringEqualDescriptor;
5253 Label done, if_equal, if_not_equal;
5254 Register left = ToRegister(lhs());
5255 Register right = ToRegister(rhs());
5256 MaglevAssembler::TemporaryRegisterScope temps(masm);
5257 Register left_length = temps.Acquire();
5258 Register right_length = D::GetRegisterParameter(D::kLength);
5259
5260 __ CmpTagged(left, right);
5261 __ JumpIf(kEqual, &if_equal,
5262 // Debug checks in StringLength can make this jump too long for a
5263 // near jump.
5264 v8_flags.debug_code ? Label::kFar : Label::kNear);
5265
5266 __ StringLength(left_length, left);
5267 __ StringLength(right_length, right);
5268 __ CompareInt32AndJumpIf(left_length, right_length, kNotEqual, &if_not_equal,
5270
5271 // The inputs are already in the right registers. The |left| and |right|
5272 // inputs were required to come in in the left/right inputs of the builtin,
5273 // and the |length| input of the builtin is where we loaded the length of the
5274 // right string (which matches the length of the left string when we get
5275 // here).
5276 DCHECK_EQ(right_length, D::GetRegisterParameter(D::kLength));
5277 __ CallBuiltin<Builtin::kStringEqual>(lhs(), rhs(),
5278 D::GetRegisterParameter(D::kLength));
5279 masm->DefineLazyDeoptPoint(this->lazy_deopt_info());
5280 __ Jump(&done, Label::Distance::kNear);
5281
5282 __ bind(&if_equal);
5283 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
5284 __ Jump(&done, Label::Distance::kNear);
5285
5286 __ bind(&if_not_equal);
5287 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
5288
5289 __ bind(&done);
5290}
5291
5293 UseRegister(lhs());
5294 UseRegister(rhs());
5295 DefineAsRegister(this);
5296}
5297void TaggedEqual::GenerateCode(MaglevAssembler* masm,
5298 const ProcessingState& state) {
5299 Label done, if_equal;
5300 __ CmpTagged(ToRegister(lhs()), ToRegister(rhs()));
5301 __ JumpIf(kEqual, &if_equal, Label::Distance::kNear);
5302 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
5303 __ Jump(&done);
5304 __ bind(&if_equal);
5305 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
5306 __ bind(&done);
5307}
5308
5310 UseRegister(lhs());
5311 UseRegister(rhs());
5312 DefineAsRegister(this);
5313}
5314void TaggedNotEqual::GenerateCode(MaglevAssembler* masm,
5315 const ProcessingState& state) {
5316 Label done, if_equal;
5317 __ CmpTagged(ToRegister(lhs()), ToRegister(rhs()));
5318 __ JumpIf(kEqual, &if_equal, Label::Distance::kNear);
5319 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
5320 __ Jump(&done);
5321 __ bind(&if_equal);
5322 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
5323 __ bind(&done);
5324}
5325
5327 using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
5328 return D::GetStackParameterCount();
5329}
5331 using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
5333 UseFixed(object(), D::GetRegisterParameter(D::kLeft));
5334 UseFixed(callable(), D::GetRegisterParameter(D::kRight));
5336}
5337void TestInstanceOf::GenerateCode(MaglevAssembler* masm,
5338 const ProcessingState& state) {
5339 __ CallBuiltin<Builtin::kInstanceOf_WithFeedback>(
5340 context(), // context
5341 object(), // left
5342 callable(), // right
5343 feedback().index(), // feedback slot
5344 feedback().vector // feedback vector
5345 );
5346 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
5347}
5348
5350 UseRegister(value());
5351 DefineAsRegister(this);
5352#ifdef V8_TARGET_ARCH_ARM
5353 set_temporaries_needed(1);
5354#endif
5355}
5356void TestTypeOf::GenerateCode(MaglevAssembler* masm,
5357 const ProcessingState& state) {
5358#ifdef V8_TARGET_ARCH_ARM
5359 // Arm32 needs one extra scratch register for TestTypeOf, so take a maglev
5360 // temporary and allow it to be used as a macro assembler scratch register.
5361 MaglevAssembler::TemporaryRegisterScope temps(masm);
5362 temps.IncludeScratch(temps.Acquire());
5363#endif
5364 Register object = ToRegister(value());
5365 Label is_true, is_false, done;
5366 __ TestTypeOf(object, literal_, &is_true, Label::Distance::kNear, true,
5367 &is_false, Label::Distance::kNear, false);
5368 // Fallthrough into true.
5369 __ bind(&is_true);
5370 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
5371 __ Jump(&done, Label::Distance::kNear);
5372 __ bind(&is_false);
5373 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
5374 __ bind(&done);
5375}
5376
5378 UseRegister(value());
5379 DefineAsRegister(this);
5380}
5381void ToBoolean::GenerateCode(MaglevAssembler* masm,
5382 const ProcessingState& state) {
5383 Register object = ToRegister(value());
5384 Register return_value = ToRegister(result());
5385 Label done;
5386 ZoneLabelRef object_is_true(masm), object_is_false(masm);
5387 // TODO(leszeks): We're likely to be calling this on an existing boolean --
5388 // maybe that's a case we should fast-path here and reuse that boolean value?
5389 __ ToBoolean(object, check_type(), object_is_true, object_is_false, true);
5390 __ bind(*object_is_true);
5391 __ LoadRoot(return_value, RootIndex::kTrueValue);
5392 __ Jump(&done);
5393 __ bind(*object_is_false);
5394 __ LoadRoot(return_value, RootIndex::kFalseValue);
5395 __ bind(&done);
5396}
5397
5399 UseRegister(value());
5400 DefineAsRegister(this);
5401}
5402void ToBooleanLogicalNot::GenerateCode(MaglevAssembler* masm,
5403 const ProcessingState& state) {
5404 Register object = ToRegister(value());
5405 Register return_value = ToRegister(result());
5406 Label done;
5407 ZoneLabelRef object_is_true(masm), object_is_false(masm);
5408 __ ToBoolean(object, check_type(), object_is_true, object_is_false, true);
5409 __ bind(*object_is_true);
5410 __ LoadRoot(return_value, RootIndex::kFalseValue);
5411 __ Jump(&done);
5412 __ bind(*object_is_false);
5413 __ LoadRoot(return_value, RootIndex::kTrueValue);
5414 __ bind(&done);
5415}
5416
5417int ToName::MaxCallStackArgs() const {
5418 using D = CallInterfaceDescriptorFor<Builtin::kToName>::type;
5419 return D::GetStackParameterCount();
5420}
5422 using D = CallInterfaceDescriptorFor<Builtin::kToName>::type;
5424 UseFixed(value_input(), D::GetRegisterParameter(D::kInput));
5426}
5427void ToName::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
5428 __ CallBuiltin<Builtin::kToName>(context(), // context
5429 value_input() // input
5430 );
5431 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
5432}
5433
5436}
5439 set_temporaries_needed(1);
5440 DefineAsRegister(this);
5441}
5442void ToNumberOrNumeric::GenerateCode(MaglevAssembler* masm,
5443 const ProcessingState& state) {
5444 ZoneLabelRef done(masm);
5445 Label move_and_return;
5446 Register object = ToRegister(value_input());
5447 Register result_reg = ToRegister(result());
5448
5449 __ JumpIfSmi(object, &move_and_return, Label::kNear);
5450 MaglevAssembler::TemporaryRegisterScope temps(masm);
5451 Register scratch = temps.Acquire();
5452 __ CompareMapWithRoot(object, RootIndex::kHeapNumberMap, scratch);
5453 __ JumpToDeferredIf(
5454 kNotEqual,
5455 [](MaglevAssembler* masm, Object::Conversion mode, Register object,
5456 Register result_reg, ToNumberOrNumeric* node, ZoneLabelRef done) {
5457 {
5458 RegisterSnapshot snapshot = node->register_snapshot();
5459 snapshot.live_registers.clear(result_reg);
5460 SaveRegisterStateForCall save_register_state(masm, snapshot);
5461 switch (mode) {
5463 __ CallBuiltin<Builtin::kToNumber>(
5464 masm->native_context().object(), object);
5465 break;
5467 __ CallBuiltin<Builtin::kToNumeric>(
5468 masm->native_context().object(), object);
5469 break;
5470 }
5471 masm->DefineExceptionHandlerPoint(node);
5472 save_register_state.DefineSafepointWithLazyDeopt(
5473 node->lazy_deopt_info());
5474 __ Move(result_reg, kReturnRegister0);
5475 }
5476 __ Jump(*done);
5477 },
5478 mode(), object, result_reg, this, done);
5479 __ bind(&move_and_return);
5480 __ Move(result_reg, object);
5481
5482 __ bind(*done);
5483}
5484
5485int ToObject::MaxCallStackArgs() const {
5486 using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
5487 return D::GetStackParameterCount();
5488}
5490 using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type;
5492 UseFixed(value_input(), D::GetRegisterParameter(D::kInput));
5494}
5495void ToObject::GenerateCode(MaglevAssembler* masm,
5496 const ProcessingState& state) {
5497 Register value = ToRegister(value_input());
5498 Label call_builtin, done;
5499 // Avoid the builtin call if {value} is a JSReceiver.
5501 __ AssertNotSmi(value);
5502 } else {
5503 __ JumpIfSmi(value, &call_builtin, Label::Distance::kNear);
5504 }
5505 __ JumpIfJSAnyIsNotPrimitive(value, &done, Label::Distance::kNear);
5506 __ bind(&call_builtin);
5507 __ CallBuiltin<Builtin::kToObject>(context(), // context
5508 value_input() // input
5509 );
5510 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
5511 __ bind(&done);
5512}
5513
5514int ToString::MaxCallStackArgs() const {
5515 using D = CallInterfaceDescriptorFor<Builtin::kToString>::type;
5516 return D::GetStackParameterCount();
5517}
5519 using D = CallInterfaceDescriptorFor<Builtin::kToString>::type;
5521 UseFixed(value_input(), D::GetRegisterParameter(D::kO));
5523}
5524void ToString::GenerateCode(MaglevAssembler* masm,
5525 const ProcessingState& state) {
5526 Register value = ToRegister(value_input());
5527 Label call_builtin, done;
5528 // Avoid the builtin call if {value} is a string.
5529 __ JumpIfSmi(value, &call_builtin, Label::Distance::kNear);
5530 __ JumpIfString(value, &done, Label::Distance::kNear);
5531 __ bind(&call_builtin);
5532 if (mode() == kConvertSymbol) {
5533 __ CallBuiltin<Builtin::kToStringConvertSymbol>(context(), // context
5534 value_input()); // input
5535 } else {
5536 __ CallBuiltin<Builtin::kToString>(context(), // context
5537 value_input()); // input
5538 }
5539 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
5540 __ bind(&done);
5541}
5542
5544 using D = CallInterfaceDescriptorFor<Builtin::kNumberToString>::type;
5545 UseFixed(value_input(), D::GetRegisterParameter(D::kInput));
5547}
5548void NumberToString::GenerateCode(MaglevAssembler* masm,
5549 const ProcessingState& state) {
5550 __ CallBuiltin<Builtin::kNumberToString>(value_input());
5551 masm->DefineLazyDeoptPoint(this->lazy_deopt_info());
5552}
5553
5554int ThrowReferenceErrorIfHole::MaxCallStackArgs() const { return 1; }
5556 UseAny(value());
5557}
5558void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm,
5559 const ProcessingState& state) {
5560 __ JumpToDeferredIf(
5561 __ IsRootConstant(value(), RootIndex::kTheHoleValue),
5562 [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) {
5563 __ Push(node->name().object());
5564 __ Move(kContextRegister, masm->native_context().object());
5565 __ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1);
5566 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
5567 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
5568 },
5569 this);
5570}
5571
5572int ThrowSuperNotCalledIfHole::MaxCallStackArgs() const { return 0; }
5574 UseAny(value());
5575}
5576void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm,
5577 const ProcessingState& state) {
5578 __ JumpToDeferredIf(
5579 __ IsRootConstant(value(), RootIndex::kTheHoleValue),
5580 [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) {
5581 __ Move(kContextRegister, masm->native_context().object());
5582 __ CallRuntime(Runtime::kThrowSuperNotCalled, 0);
5583 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
5584 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
5585 },
5586 this);
5587}
5588
5591 UseAny(value());
5592}
5594 MaglevAssembler* masm, const ProcessingState& state) {
5595 __ JumpToDeferredIf(
5596 NegateCondition(__ IsRootConstant(value(), RootIndex::kTheHoleValue)),
5597 [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) {
5598 __ Move(kContextRegister, masm->native_context().object());
5599 __ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0);
5600 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
5601 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
5602 },
5603 this);
5604}
5605
5606int ThrowIfNotCallable::MaxCallStackArgs() const { return 1; }
5608 UseRegister(value());
5609 set_temporaries_needed(1);
5610}
5611void ThrowIfNotCallable::GenerateCode(MaglevAssembler* masm,
5612 const ProcessingState& state) {
5613 Label* if_not_callable = __ MakeDeferredCode(
5614 [](MaglevAssembler* masm, ThrowIfNotCallable* node) {
5615 __ Push(node->value());
5616 __ Move(kContextRegister, masm->native_context().object());
5617 __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
5618 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
5619 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
5620 },
5621 this);
5622
5623 Register value_reg = ToRegister(value());
5624 MaglevAssembler::TemporaryRegisterScope temps(masm);
5625 Register scratch = temps.Acquire();
5626 __ JumpIfNotCallable(value_reg, scratch, CheckType::kCheckHeapObject,
5627 if_not_callable);
5628}
5629
5630int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; }
5634 set_temporaries_needed(1);
5635}
5636void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm,
5637 const ProcessingState& state) {
5638 MaglevAssembler::TemporaryRegisterScope temps(masm);
5639 Register scratch = temps.Acquire();
5640 __ LoadMap(scratch, ToRegister(constructor()));
5641 static_assert(Map::kBitFieldOffsetEnd + 1 - Map::kBitFieldOffset == 1);
5642 __ TestUint8AndJumpIfAllClear(
5643 FieldMemOperand(scratch, Map::kBitFieldOffset),
5644 Map::Bits1::IsConstructorBit::kMask,
5645 __ MakeDeferredCode(
5646 [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) {
5647 __ Push(ToRegister(node->constructor()),
5648 ToRegister(node->function()));
5649 __ Move(kContextRegister, masm->native_context().object());
5650 __ CallRuntime(Runtime::kThrowNotSuperConstructor, 2);
5651 masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
5652 __ Abort(AbortReason::kUnexpectedReturnFromThrow);
5653 },
5654 this));
5655}
5656
5657void TruncateUint32ToInt32::SetValueLocationConstraints() {
5658 UseRegister(input());
5659 DefineSameAsFirst(this);
5660}
5661void TruncateUint32ToInt32::GenerateCode(MaglevAssembler* masm,
5662 const ProcessingState& state) {
5663 // No code emitted -- as far as the machine is concerned, int32 is uint32.
5664 DCHECK_EQ(ToRegister(input()), ToRegister(result()));
5665}
5666
5667void TruncateFloat64ToInt32::SetValueLocationConstraints() {
5668 UseRegister(input());
5669 DefineAsRegister(this);
5670}
5671void TruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
5672 const ProcessingState& state) {
5673 __ TruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()));
5674}
5675
5677 UseRegister(input());
5678 DefineAsRegister(this);
5679}
5680void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
5681 const ProcessingState& state) {
5682 __ TryTruncateDoubleToInt32(
5684 __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32));
5685}
5686
5688 UseRegister(input());
5689 DefineAsRegister(this);
5690}
5692 MaglevAssembler* masm, const ProcessingState& state) {
5693 __ TryTruncateDoubleToUint32(
5695 __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32));
5696}
5697
5698void UnsafeTruncateFloat64ToInt32::SetValueLocationConstraints() {
5699 UseRegister(input());
5700 DefineAsRegister(this);
5701}
5702void UnsafeTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
5703 const ProcessingState& state) {
5704#ifdef DEBUG
5705 Label fail, start;
5706 __ Jump(&start);
5707 __ bind(&fail);
5708 __ Abort(AbortReason::kFloat64IsNotAInt32);
5709
5710 __ bind(&start);
5711 __ TryTruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()),
5712 &fail);
5713#else
5714 // TODO(dmercadier): TruncateDoubleToInt32 does additional work when the
5715 // double doesn't fit in a 32-bit integer. This is not necessary for
5716 // UnsafeTruncateFloat64ToInt32 (since we statically know that it the double
5717 // fits in a 32-bit int) and could be instead just a Cvttsd2si (x64) or Fcvtzs
5718 // (arm64).
5719 __ TruncateDoubleToInt32(ToRegister(result()), ToDoubleRegister(input()));
5720#endif
5721}
5722
5724 UseRegister(input());
5725 DefineSameAsFirst(this);
5726}
5727void CheckedUint32ToInt32::GenerateCode(MaglevAssembler* masm,
5728 const ProcessingState& state) {
5729 Register input_reg = ToRegister(input());
5730 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
5731 __ CompareInt32AndJumpIf(input_reg, 0, kLessThan, fail);
5732}
5733
5734void UnsafeTruncateUint32ToInt32::SetValueLocationConstraints() {
5735 UseRegister(input());
5736 DefineSameAsFirst(this);
5737}
5738void UnsafeTruncateUint32ToInt32::GenerateCode(MaglevAssembler* masm,
5739 const ProcessingState& state) {
5740#ifdef DEBUG
5741 Register input_reg = ToRegister(input());
5742 __ CompareInt32AndAssert(input_reg, 0, kGreaterThanEqual,
5743 AbortReason::kUint32IsNotAInt32);
5744#endif
5745 // No code emitted -- as far as the machine is concerned, int32 is uint32.
5746 DCHECK_EQ(ToRegister(input()), ToRegister(result()));
5747}
5748
5750 UseRegister(input());
5751 DefineSameAsFirst(this);
5752}
5753void Int32ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
5754 const ProcessingState& state) {
5755 Register value = ToRegister(input());
5756 Register result_reg = ToRegister(result());
5757 DCHECK_EQ(value, result_reg);
5758 Label min, done;
5759 __ CompareInt32AndJumpIf(value, 0, kLessThanEqual, &min);
5760 __ CompareInt32AndJumpIf(value, 255, kLessThanEqual, &done);
5761 __ Move(result_reg, 255);
5762 __ Jump(&done, Label::Distance::kNear);
5763 __ bind(&min);
5764 __ Move(result_reg, 0);
5765 __ bind(&done);
5766}
5767
5769 UseRegister(input());
5770 DefineSameAsFirst(this);
5771}
5772void Uint32ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
5773 const ProcessingState& state) {
5774 Register value = ToRegister(input());
5775 DCHECK_EQ(value, ToRegister(result()));
5776 Label done;
5777 __ CompareInt32AndJumpIf(value, 255, kUnsignedLessThanEqual, &done,
5779 __ Move(value, 255);
5780 __ bind(&done);
5781}
5782
5784 UseRegister(input());
5785 DefineAsRegister(this);
5786}
5787void Float64ToUint8Clamped::GenerateCode(MaglevAssembler* masm,
5788 const ProcessingState& state) {
5790 Register result_reg = ToRegister(result());
5791 Label min, max, done;
5792 __ ToUint8Clamped(result_reg, value, &min, &max, &done);
5793 __ bind(&min);
5794 __ Move(result_reg, 0);
5795 __ Jump(&done, Label::Distance::kNear);
5796 __ bind(&max);
5797 __ Move(result_reg, 255);
5798 __ bind(&done);
5799}
5800
5803}
5804void CheckNumber::GenerateCode(MaglevAssembler* masm,
5805 const ProcessingState& state) {
5806 Label done;
5807 MaglevAssembler::TemporaryRegisterScope temps(masm);
5808 Register scratch = temps.AcquireScratch();
5810 // If {value} is a Smi or a HeapNumber, we're done.
5811 __ JumpIfSmi(
5812 value, &done,
5815 __ LoadMapForCompare(scratch, value);
5816 __ CompareTaggedRoot(scratch, RootIndex::kHeapNumberMap);
5817 // Jump to done if it is a HeapNumber.
5818 __ JumpIf(
5819 kEqual, &done,
5821 // Check if it is a BigInt.
5822 __ CompareTaggedRootAndEmitEagerDeoptIf(
5823 scratch, RootIndex::kBigIntMap, kNotEqual,
5824 DeoptimizeReason::kNotANumber, this);
5825 } else {
5826 __ CompareMapWithRootAndEmitEagerDeoptIf(
5827 value, RootIndex::kHeapNumberMap, scratch, kNotEqual,
5828 DeoptimizeReason::kNotANumber, this);
5829 }
5830 __ bind(&done);
5831}
5832
5835 DefineSameAsFirst(this);
5836}
5837void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
5838 const ProcessingState& state) {
5839 Register object = ToRegister(object_input());
5840 MaglevAssembler::TemporaryRegisterScope temps(masm);
5841 Register instance_type = temps.AcquireScratch();
5843 __ AssertNotSmi(object);
5844 } else {
5845 __ EmitEagerDeoptIfSmi(this, object, DeoptimizeReason::kWrongMap);
5846 }
5847 __ LoadInstanceType(instance_type, object);
5848 __ RecordComment("Test IsInternalizedString");
5849 // Go to the slow path if this is a non-string, or a non-internalised string.
5850 static_assert((kStringTag | kInternalizedTag) == 0);
5851 ZoneLabelRef done(masm);
5852 __ TestInt32AndJumpIfAnySet(
5854 __ MakeDeferredCode(
5855 [](MaglevAssembler* masm, ZoneLabelRef done,
5856 CheckedInternalizedString* node, Register object,
5857 Register instance_type) {
5858 __ RecordComment("Deferred Test IsThinString");
5859 // Deopt if this isn't a string.
5860 __ TestInt32AndJumpIfAnySet(
5861 instance_type, kIsNotStringMask,
5862 __ GetDeoptLabel(node, DeoptimizeReason::kWrongMap));
5863 // Deopt if this isn't a thin string.
5865 __ TestInt32AndJumpIfAllClear(
5866 instance_type, kThinStringTagBit,
5867 __ GetDeoptLabel(node, DeoptimizeReason::kWrongMap));
5868 // Load internalized string from thin string.
5869 __ LoadTaggedField(object, object, offsetof(ThinString, actual_));
5870 if (v8_flags.debug_code) {
5871 __ RecordComment("DCHECK IsInternalizedString");
5872 Label checked;
5873 __ LoadInstanceType(instance_type, object);
5874 __ TestInt32AndJumpIfAllClear(
5876 &checked);
5877 __ Abort(AbortReason::kUnexpectedValue);
5878 __ bind(&checked);
5879 }
5880 __ Jump(*done);
5881 },
5882 done, this, object, instance_type));
5883 __ bind(*done);
5884}
5885
5887 UseRegister(input());
5888 DefineSameAsFirst(this);
5889 set_temporaries_needed(1);
5890 set_double_temporaries_needed(1);
5891}
5892void CheckedNumberToUint8Clamped::GenerateCode(MaglevAssembler* masm,
5893 const ProcessingState& state) {
5894 Register value = ToRegister(input());
5895 Register result_reg = ToRegister(result());
5896 MaglevAssembler::TemporaryRegisterScope temps(masm);
5897 Register scratch = temps.Acquire();
5898 DoubleRegister double_value = temps.AcquireDouble();
5899 Label is_not_smi, min, max, done;
5900 // Check if Smi.
5901 __ JumpIfNotSmi(value, &is_not_smi);
5902 // If Smi, convert to Int32.
5903 __ SmiToInt32(value);
5904 // Clamp.
5905 __ CompareInt32AndJumpIf(value, 0, kLessThanEqual, &min);
5906 __ CompareInt32AndJumpIf(value, 255, kGreaterThanEqual, &max);
5907 __ Jump(&done);
5908 __ bind(&is_not_smi);
5909 // Check if HeapNumber, deopt otherwise.
5910 __ CompareMapWithRootAndEmitEagerDeoptIf(value, RootIndex::kHeapNumberMap,
5911 scratch, kNotEqual,
5912 DeoptimizeReason::kNotANumber, this);
5913 // If heap number, get double value.
5914 __ LoadHeapNumberValue(double_value, value);
5915 // Clamp.
5916 __ ToUint8Clamped(value, double_value, &min, &max, &done);
5917 __ bind(&min);
5918 __ Move(result_reg, 0);
5919 __ Jump(&done, Label::Distance::kNear);
5920 __ bind(&max);
5921 __ Move(result_reg, 255);
5922 __ bind(&done);
5923}
5924
5929 RequireSpecificTemporary(WriteBarrierDescriptor::ObjectRegister());
5930 RequireSpecificTemporary(WriteBarrierDescriptor::SlotAddressRegister());
5931}
5933 MaglevAssembler* masm, const ProcessingState& state) {
5934 Register elements = ToRegister(elements_input());
5935 Register index = ToRegister(index_input());
5936 Register value = ToRegister(value_input());
5937 __ StoreFixedArrayElementWithWriteBarrier(elements, index, value,
5938 register_snapshot());
5939}
5940
5945}
5947 MaglevAssembler* masm, const ProcessingState& state) {
5948 Register elements = ToRegister(elements_input());
5949 Register index = ToRegister(index_input());
5950 Register value = ToRegister(value_input());
5951 __ StoreFixedArrayElementNoWriteBarrier(elements, index, value);
5952 __ AssertElidedWriteBarrier(elements, value, register_snapshot());
5953}
5954
5955// ---
5956// Arch agnostic call nodes
5957// ---
5958
5959int Call::MaxCallStackArgs() const { return num_args(); }
5961 using D = CallTrampolineDescriptor;
5962 UseFixed(function(), D::GetRegisterParameter(D::kFunction));
5963 UseAny(arg(0));
5964 for (int i = 1; i < num_args(); i++) {
5965 UseAny(arg(i));
5966 }
5969}
5970
5971void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
5972 __ PushReverse(args());
5973
5974 uint32_t arg_count = num_args();
5976 switch (receiver_mode_) {
5978 __ CallBuiltin<Builtin::kCall_ReceiverIsNullOrUndefined>(
5979 context(), function(), arg_count);
5980 break;
5982 __ CallBuiltin<Builtin::kCall_ReceiverIsNotNullOrUndefined>(
5983 context(), function(), arg_count);
5984 break;
5986 __ CallBuiltin<Builtin::kCall_ReceiverIsAny>(context(), function(),
5987 arg_count);
5988 break;
5989 }
5990 } else {
5992 switch (receiver_mode_) {
5994 __ CallBuiltin<Builtin::kCallFunction_ReceiverIsNullOrUndefined>(
5995 context(), function(), arg_count);
5996 break;
5998 __ CallBuiltin<Builtin::kCallFunction_ReceiverIsNotNullOrUndefined>(
5999 context(), function(), arg_count);
6000 break;
6002 __ CallBuiltin<Builtin::kCallFunction_ReceiverIsAny>(
6003 context(), function(), arg_count);
6004 break;
6005 }
6006 }
6007
6008 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6009}
6010
6011int CallForwardVarargs::MaxCallStackArgs() const { return num_args(); }
6013 using D = CallTrampolineDescriptor;
6014 UseFixed(function(), D::GetRegisterParameter(D::kFunction));
6015 UseAny(arg(0));
6016 for (int i = 1; i < num_args(); i++) {
6017 UseAny(arg(i));
6018 }
6021}
6022
6023void CallForwardVarargs::GenerateCode(MaglevAssembler* masm,
6024 const ProcessingState& state) {
6025 __ PushReverse(args());
6026 switch (target_type_) {
6028 __ CallBuiltin<Builtin::kCallFunctionForwardVarargs>(
6030 break;
6032 __ CallBuiltin<Builtin::kCallForwardVarargs>(context(), function(),
6034 break;
6035 }
6036 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6037}
6038
6039int CallSelf::MaxCallStackArgs() const {
6040 int actual_parameter_count = num_args() + 1;
6041 return std::max(expected_parameter_count_, actual_parameter_count);
6042}
6044 UseAny(receiver());
6045 for (int i = 0; i < num_args(); i++) {
6046 UseAny(arg(i));
6047 }
6052 set_temporaries_needed(1);
6053}
6054
6055void CallSelf::GenerateCode(MaglevAssembler* masm,
6056 const ProcessingState& state) {
6057 MaglevAssembler::TemporaryRegisterScope temps(masm);
6058 Register scratch = temps.Acquire();
6059 int actual_parameter_count = num_args() + 1;
6060 if (actual_parameter_count < expected_parameter_count_) {
6061 int number_of_undefineds =
6062 expected_parameter_count_ - actual_parameter_count;
6063 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
6064 __ PushReverse(receiver(), args(),
6065 RepeatValue(scratch, number_of_undefineds));
6066 } else {
6067 __ PushReverse(receiver(), args());
6068 }
6071 __ Move(kJavaScriptCallArgCountRegister, actual_parameter_count);
6072 __ CallSelf();
6073 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6074}
6075
6077 int actual_parameter_count = num_args() + 1;
6078 return std::max(expected_parameter_count_, actual_parameter_count);
6079}
6081 UseAny(receiver());
6082 for (int i = 0; i < num_args(); i++) {
6083 UseAny(arg(i));
6084 }
6089 set_temporaries_needed(1);
6090}
6091
6092void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm,
6093 const ProcessingState& state) {
6094 MaglevAssembler::TemporaryRegisterScope temps(masm);
6095 Register scratch = temps.Acquire();
6096 int actual_parameter_count = num_args() + 1;
6097 if (actual_parameter_count < expected_parameter_count_) {
6098 int number_of_undefineds =
6099 expected_parameter_count_ - actual_parameter_count;
6100 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
6101 __ PushReverse(receiver(), args(),
6102 RepeatValue(scratch, number_of_undefineds));
6103 } else {
6104 __ PushReverse(receiver(), args());
6105 }
6106 // From here on, we're going to do a call, so all registers are valid temps,
6107 // except for the ones we're going to write. This is needed in case one of the
6108 // helper methods below wants to use a temp and one of these is in the temp
6109 // list (in particular, this can happen on arm64 where cp is a temp register
6110 // by default).
6111 temps.SetAvailable(MaglevAssembler::GetAllocatableRegisters() -
6118 __ Move(kJavaScriptCallArgCountRegister, actual_parameter_count);
6119 if (shared_function_info().HasBuiltinId()) {
6121
6122 // This SBXCHECK is a defense-in-depth measure to ensure that we always
6123 // generate valid calls here (with matching signatures).
6126
6127 __ CallBuiltin(builtin);
6128 } else {
6129#if V8_ENABLE_LEAPTIERING
6130 __ CallJSDispatchEntry(dispatch_handle_, expected_parameter_count_);
6131#else
6133#endif
6134 }
6135 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6136}
6137
6139 int actual_parameter_count = num_args() + 1;
6140 return actual_parameter_count;
6141}
6142
6144 UseAny(receiver());
6145 for (int i = 0; i < num_args(); i++) {
6146 UseAny(arg(i));
6147 }
6149
6151
6152 if (inline_builtin()) {
6153 set_temporaries_needed(2);
6154 }
6155}
6156
6157void CallKnownApiFunction::GenerateCode(MaglevAssembler* masm,
6158 const ProcessingState& state) {
6159 MaglevAssembler::TemporaryRegisterScope temps(masm);
6160 __ PushReverse(receiver(), args());
6161
6162 // From here on, we're going to do a call, so all registers are valid temps,
6163 // except for the ones we're going to write. This is needed in case one of the
6164 // helper methods below wants to use a temp and one of these is in the temp
6165 // list (in particular, this can happen on arm64 where cp is a temp register
6166 // by default).
6167 temps.SetAvailable(
6169 RegList{
6173 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister()});
6175
6176 if (inline_builtin()) {
6178 return;
6179 }
6180
6182 num_args()); // not including receiver
6183
6186
6187 compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
6188 ApiFunction function(function_template_info_.callback(broker));
6189 ExternalReference reference =
6191 __ Move(CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(),
6192 reference);
6193
6194 switch (mode()) {
6195 case kNoProfiling:
6196 __ CallBuiltin(Builtin::kCallApiCallbackOptimizedNoProfiling);
6197 break;
6199 UNREACHABLE();
6200 case kGeneric:
6201 __ CallBuiltin(Builtin::kCallApiCallbackOptimized);
6202 break;
6203 }
6204 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6205}
6206
6208 MaglevAssembler* masm, const ProcessingState& state) {
6209 MaglevAssembler::TemporaryRegisterScope temps(masm);
6210 Register scratch = temps.Acquire();
6211 Register scratch2 = temps.Acquire();
6212
6213 using FCA = FunctionCallbackArguments;
6214 using ER = ExternalReference;
6215 using FC = ApiCallbackExitFrameConstants;
6216
6217 static_assert(FCA::kArgsLength == 6);
6218 static_assert(FCA::kNewTargetIndex == 5);
6219 static_assert(FCA::kTargetIndex == 4);
6220 static_assert(FCA::kReturnValueIndex == 3);
6221 static_assert(FCA::kContextIndex == 2);
6222 static_assert(FCA::kIsolateIndex == 1);
6223 static_assert(FCA::kUnusedIndex == 0);
6224
6225 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
6226 //
6227 // Target state:
6228 // sp[0 * kSystemPointerSize]: kUnused <= FCA::implicit_args_
6229 // sp[1 * kSystemPointerSize]: kIsolate
6230 // sp[2 * kSystemPointerSize]: kContext
6231 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
6232 // sp[4 * kSystemPointerSize]: kTarget
6233 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
6234 // Existing state:
6235 // sp[6 * kSystemPointerSize]: <= FCA:::values_
6236
6237 __ StoreRootRelative(IsolateData::topmost_script_having_context_offset(),
6239
6240 ASM_CODE_COMMENT_STRING(masm, "inlined CallApiCallbackOptimized builtin");
6241 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
6242 // kNewTarget, kTarget, kReturnValue, kContext
6243 __ Push(scratch, i::Cast<HeapObject>(function_template_info_.object()),
6244 scratch, kContextRegister);
6245 __ Move(scratch2, ER::isolate_address());
6246 // kIsolate, kUnused
6247 __ Push(scratch2, scratch);
6248
6249 Register api_function_address =
6250 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
6251
6252 compiler::JSHeapBroker* broker = masm->compilation_info()->broker();
6253 ApiFunction function(function_template_info_.callback(broker));
6254 ExternalReference reference =
6256 __ Move(api_function_address, reference);
6257
6258 Label done, call_api_callback_builtin_inline;
6259 __ Call(&call_api_callback_builtin_inline);
6260 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6261 __ jmp(&done);
6262
6263 //
6264 // Generate a CallApiCallback builtin inline.
6265 //
6266 __ bind(&call_api_callback_builtin_inline);
6267
6268 FrameScope frame_scope(masm, StackFrame::MANUAL);
6269 __ EmitEnterExitFrame(FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
6270 StackFrame::API_CALLBACK_EXIT, api_function_address,
6271 scratch2);
6272
6273 Register fp = __ GetFramePointer();
6274#ifdef V8_TARGET_ARCH_ARM64
6275 // This is a workaround for performance regression observed on Apple Silicon
6276 // (https://crbug.com/347741609): reading argc value after the call via
6277 // MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
6278 // is noticeably slower than using sp-based access:
6279 MemOperand argc_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset);
6280#else
6281 // We don't enable this workaround for other configurations because
6282 // a) it's not possible to convert fp-based encoding to sp-based one:
6283 // V8 guarantees stack pointer to be only kSystemPointerSize-aligned,
6284 // while C function might require stack pointer to be 16-byte aligned on
6285 // certain platforms,
6286 // b) local experiments on x64 didn't show improvements.
6287 MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
6288#endif // V8_TARGET_ARCH_ARM64
6289 {
6290 ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo");
6291 // FunctionCallbackInfo::length_.
6292 __ Move(scratch, num_args()); // not including receiver
6293 __ Move(argc_operand, scratch);
6294
6295 // FunctionCallbackInfo::implicit_args_.
6296 __ LoadAddress(scratch, MemOperand(fp, FC::kImplicitArgsArrayOffset));
6297 __ Move(MemOperand(fp, FC::kFCIImplicitArgsOffset), scratch);
6298
6299 // FunctionCallbackInfo::values_ (points at JS arguments on the stack).
6300 __ LoadAddress(scratch, MemOperand(fp, FC::kFirstArgumentOffset));
6301 __ Move(MemOperand(fp, FC::kFCIValuesOffset), scratch);
6302 }
6303
6304 Register function_callback_info_arg = kCArgRegs[0];
6305
6306 __ RecordComment("v8::FunctionCallback's argument.");
6307 __ LoadAddress(function_callback_info_arg,
6308 MemOperand(fp, FC::kFunctionCallbackInfoOffset));
6309
6310 DCHECK(!AreAliased(api_function_address, function_callback_info_arg));
6311
6312 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
6313 const int kSlotsToDropOnReturn =
6314 FC::kFunctionCallbackInfoArgsLength + kJSArgcReceiverSlots + num_args();
6315
6316 const bool with_profiling = false;
6317 ExternalReference no_thunk_ref;
6318 Register no_thunk_arg = no_reg;
6319
6320 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
6321 no_thunk_ref, no_thunk_arg, kSlotsToDropOnReturn,
6322 nullptr, return_value_operand);
6323 __ RecordComment("end of inlined CallApiCallbackOptimized builtin");
6324
6325 __ bind(&done);
6326}
6327
6329 auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
6330 if (!descriptor.AllowVarArgs()) {
6331 return descriptor.GetStackParameterCount();
6332 } else {
6333 int all_input_count = InputCountWithoutContext() + (has_feedback() ? 2 : 0);
6334 DCHECK_GE(all_input_count, descriptor.GetRegisterParameterCount());
6335 return all_input_count - descriptor.GetRegisterParameterCount();
6336 }
6337}
6338
6340 auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
6341 bool has_context = descriptor.HasContextParameter();
6342 int i = 0;
6343 for (; i < InputsInRegisterCount(); i++) {
6344 UseFixed(input(i), descriptor.GetRegisterParameter(i));
6345 }
6346 for (; i < InputCountWithoutContext(); i++) {
6347 UseAny(input(i));
6348 }
6349 if (has_context) {
6350 UseFixed(input(i), kContextRegister);
6351 }
6353}
6354
6355template <typename... Args>
6356void CallBuiltin::PushArguments(MaglevAssembler* masm, Args... extra_args) {
6357 auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
6358 if (descriptor.GetStackArgumentOrder() == StackArgumentOrder::kDefault) {
6359 // In Default order we cannot have extra args (feedback).
6360 DCHECK_EQ(sizeof...(extra_args), 0);
6361 __ Push(stack_args());
6362 } else {
6363 DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
6364 __ PushReverse(extra_args..., stack_args());
6365 }
6366}
6367
6368void CallBuiltin::PassFeedbackSlotInRegister(MaglevAssembler* masm) {
6370 auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
6371 int slot_index = InputCountWithoutContext();
6372 switch (slot_type()) {
6373 case kTaggedIndex:
6374 __ Move(descriptor.GetRegisterParameter(slot_index),
6375 TaggedIndex::FromIntptr(feedback().index()));
6376 break;
6377 case kSmi:
6378 __ Move(descriptor.GetRegisterParameter(slot_index),
6379 Smi::FromInt(feedback().index()));
6380 break;
6381 }
6382}
6383
6384void CallBuiltin::PushFeedbackAndArguments(MaglevAssembler* masm) {
6386
6387 auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin());
6388 int slot_index = InputCountWithoutContext();
6389 int vector_index = slot_index + 1;
6390
6391 // There are three possibilities:
6392 // 1. Feedback slot and vector are in register.
6393 // 2. Feedback slot is in register and vector is on stack.
6394 // 3. Feedback slot and vector are on stack.
6395 if (vector_index < descriptor.GetRegisterParameterCount()) {
6397 __ Move(descriptor.GetRegisterParameter(vector_index), feedback().vector);
6398 PushArguments(masm);
6399 } else if (vector_index == descriptor.GetRegisterParameterCount()) {
6401 DCHECK_EQ(descriptor.GetStackArgumentOrder(), StackArgumentOrder::kJS);
6402 // Ensure that the builtin only expects the feedback vector on the stack and
6403 // potentional additional var args are passed through to another builtin.
6404 // This is required to align the stack correctly (e.g. on arm64).
6405 DCHECK_EQ(descriptor.GetStackParameterCount(), 1);
6406 PushArguments(masm);
6407 __ Push(feedback().vector);
6408 } else {
6409 int slot = feedback().index();
6411 switch (slot_type()) {
6412 case kTaggedIndex:
6413 PushArguments(masm, TaggedIndex::FromIntptr(slot), vector);
6414 break;
6415 case kSmi:
6416 PushArguments(masm, Smi::FromInt(slot), vector);
6417 break;
6418 }
6419 }
6420}
6421
6422void CallBuiltin::GenerateCode(MaglevAssembler* masm,
6423 const ProcessingState& state) {
6424 if (has_feedback()) {
6426 } else {
6427 PushArguments(masm);
6428 }
6430 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6431}
6432
6434 using D = CallInterfaceDescriptorFor<kCEntry_Builtin>::type;
6435 return D::GetStackParameterCount() + num_args();
6436}
6437
6439 using D = CallInterfaceDescriptorFor<kCEntry_Builtin>::type;
6440 UseAny(target());
6441 UseAny(new_target());
6443 for (int i = 0; i < num_args(); i++) {
6444 UseAny(arg(i));
6445 }
6447 set_temporaries_needed(1);
6448 RequireSpecificTemporary(D::GetRegisterParameter(D::kArity));
6449 RequireSpecificTemporary(D::GetRegisterParameter(D::kCFunction));
6450}
6451
6452void CallCPPBuiltin::GenerateCode(MaglevAssembler* masm,
6453 const ProcessingState& state) {
6454 using D = CallInterfaceDescriptorFor<kCEntry_Builtin>::type;
6455 constexpr Register kArityReg = D::GetRegisterParameter(D::kArity);
6456 constexpr Register kCFunctionReg = D::GetRegisterParameter(D::kCFunction);
6457
6458 MaglevAssembler::TemporaryRegisterScope temps(masm);
6459 Register scratch = temps.Acquire();
6460 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
6461
6462 // Push all arguments to the builtin (including the receiver).
6463 static_assert(BuiltinArguments::kReceiverIndex == 4);
6464 __ PushReverse(args());
6465
6466 static_assert(BuiltinArguments::kNumExtraArgs == 4);
6467 static_assert(BuiltinArguments::kNewTargetIndex == 0);
6468 static_assert(BuiltinArguments::kTargetIndex == 1);
6469 static_assert(BuiltinArguments::kArgcIndex == 2);
6470 static_assert(BuiltinArguments::kPaddingIndex == 3);
6471 // Push stack arguments for CEntry.
6473 num_args()); // Includes receiver.
6474 __ Push(scratch /* padding */, tagged_argc, target(), new_target());
6475
6476 // Move values to fixed registers after all arguments are pushed. Registers
6477 // for arguments and CEntry registers might overlap.
6478 __ Move(kArityReg, BuiltinArguments::kNumExtraArgs + num_args());
6479 ExternalReference builtin_address =
6481 __ Move(kCFunctionReg, builtin_address);
6482
6484 1);
6485 __ CallBuiltin(Builtin::kCEntry_Return1_ArgvOnStack_BuiltinExit);
6486}
6487
6488int CallRuntime::MaxCallStackArgs() const { return num_args(); }
6491 for (int i = 0; i < num_args(); i++) {
6492 UseAny(arg(i));
6493 }
6495}
6496void CallRuntime::GenerateCode(MaglevAssembler* masm,
6497 const ProcessingState& state) {
6499 __ Push(args());
6501 // TODO(victorgomes): Not sure if this is needed for all runtime calls.
6502 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6503}
6504
6506 int argc_no_spread = num_args() - 1;
6507 using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
6508 return argc_no_spread + D::GetStackParameterCount();
6509}
6511 using D = CallInterfaceDescriptorFor<Builtin::kCallWithSpread>::type;
6512 UseFixed(function(), D::GetRegisterParameter(D::kTarget));
6513 UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
6515 for (int i = 0; i < num_args() - 1; i++) {
6516 UseAny(arg(i));
6517 }
6519}
6520void CallWithSpread::GenerateCode(MaglevAssembler* masm,
6521 const ProcessingState& state) {
6522 __ CallBuiltin<Builtin::kCallWithSpread>(
6523 context(), // context
6524 function(), // target
6525 num_args_no_spread(), // arguments count
6526 spread(), // spread
6527 args_no_spread() // pushed args
6528 );
6529
6530 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6531}
6532
6534 using D = CallInterfaceDescriptorFor<Builtin::kCallWithArrayLike>::type;
6535 return D::GetStackParameterCount();
6536}
6538 using D = CallInterfaceDescriptorFor<Builtin::kCallWithArrayLike>::type;
6539 UseFixed(function(), D::GetRegisterParameter(D::kTarget));
6540 UseAny(receiver());
6541 UseFixed(arguments_list(), D::GetRegisterParameter(D::kArgumentsList));
6544}
6545void CallWithArrayLike::GenerateCode(MaglevAssembler* masm,
6546 const ProcessingState& state) {
6547 // CallWithArrayLike is a weird builtin that expects a receiver as top of the
6548 // stack, but doesn't explicitly list it as an extra argument. Push it
6549 // manually, and assert that there are no other stack arguments.
6550 static_assert(
6551 CallInterfaceDescriptorFor<
6552 Builtin::kCallWithArrayLike>::type::GetStackParameterCount() == 0);
6553 __ Push(receiver());
6554 __ CallBuiltin<Builtin::kCallWithArrayLike>(
6555 context(), // context
6556 function(), // target
6557 arguments_list() // arguments list
6558 );
6559 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6560}
6561
6562// ---
6563// Arch agnostic construct nodes
6564// ---
6565
6566int Construct::MaxCallStackArgs() const {
6567 using D = Construct_WithFeedbackDescriptor;
6568 return num_args() + D::GetStackParameterCount();
6569}
6571 using D = Construct_WithFeedbackDescriptor;
6572 UseFixed(function(), D::GetRegisterParameter(D::kTarget));
6573 UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget));
6575 for (int i = 0; i < num_args(); i++) {
6576 UseAny(arg(i));
6577 }
6579}
6580void Construct::GenerateCode(MaglevAssembler* masm,
6581 const ProcessingState& state) {
6582 __ CallBuiltin<Builtin::kConstruct_WithFeedback>(
6583 context(), // context
6584 function(), // target
6585 new_target(), // new target
6586 num_args(), // actual arguments count
6587 feedback().index(), // feedback slot
6588 feedback().vector, // feedback vector
6589 args() // args
6590 );
6591 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6592}
6593
6595 int argc_no_spread = num_args() - 1;
6596 using D = CallInterfaceDescriptorFor<
6597 Builtin::kConstructWithSpread_WithFeedback>::type;
6598 return argc_no_spread + D::GetStackParameterCount();
6599}
6601 using D = CallInterfaceDescriptorFor<
6602 Builtin::kConstructWithSpread_WithFeedback>::type;
6603 UseFixed(function(), D::GetRegisterParameter(D::kTarget));
6604 UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget));
6606 for (int i = 0; i < num_args() - 1; i++) {
6607 UseAny(arg(i));
6608 }
6609 UseFixed(spread(), D::GetRegisterParameter(D::kSpread));
6611}
6612void ConstructWithSpread::GenerateCode(MaglevAssembler* masm,
6613 const ProcessingState& state) {
6614 __ CallBuiltin<Builtin::kConstructWithSpread_WithFeedback>(
6615 context(), // context
6616 function(), // target
6617 new_target(), // new target
6618 num_args_no_spread(), // actual arguments count
6619 spread(), // spread
6620 TaggedIndex::FromIntptr(feedback().index()), // feedback slot
6621 feedback().vector, // feedback vector
6622 args_no_spread() // args
6623 );
6624 masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
6625}
6626
6628 UseRegister(value());
6629 DefineAsRegister(this);
6630}
6631
6632void SetPendingMessage::GenerateCode(MaglevAssembler* masm,
6633 const ProcessingState& state) {
6634 Register new_message = ToRegister(value());
6635 Register return_value = ToRegister(result());
6636 MaglevAssembler::TemporaryRegisterScope temps(masm);
6637 Register scratch = temps.AcquireScratch();
6638 MemOperand pending_message_operand = __ ExternalReferenceAsOperand(
6639 ExternalReference::address_of_pending_message(masm->isolate()), scratch);
6640 if (new_message != return_value) {
6641 __ Move(return_value, pending_message_operand);
6642 __ Move(pending_message_operand, new_message);
6643 } else {
6644 __ Move(scratch, pending_message_operand);
6645 __ Move(pending_message_operand, new_message);
6646 __ Move(return_value, scratch);
6647 }
6648}
6649
6653}
6654void StoreDoubleField::GenerateCode(MaglevAssembler* masm,
6655 const ProcessingState& state) {
6656 Register object = ToRegister(object_input());
6658
6659 MaglevAssembler::TemporaryRegisterScope temps(masm);
6660 Register heap_number = temps.AcquireScratch();
6661
6662 __ AssertNotSmi(object);
6663 __ LoadTaggedField(heap_number, object, offset());
6664 __ AssertNotSmi(heap_number);
6665 __ StoreHeapNumberValue(value, heap_number);
6666}
6667
6671}
6672void StoreHeapInt32::GenerateCode(MaglevAssembler* masm,
6673 const ProcessingState& state) {
6674 Register object = ToRegister(object_input());
6675 Register value = ToRegister(value_input());
6676
6677 MaglevAssembler::TemporaryRegisterScope temps(masm);
6678 Register heap_number = temps.AcquireScratch();
6679
6680 __ AssertNotSmi(object);
6681 __ LoadTaggedField(heap_number, object, offset());
6682 __ AssertNotSmi(heap_number);
6683 __ StoreHeapInt32Value(value, heap_number);
6684}
6685
6686namespace {
6687
6688template <typename NodeT>
6689void GenerateTransitionElementsKind(
6690 MaglevAssembler* masm, NodeT* node, Register object, Register map,
6691 base::Vector<const compiler::MapRef> transition_sources,
6692 const compiler::MapRef transition_target, ZoneLabelRef done,
6693 std::optional<Register> result_opt) {
6694 DCHECK(!compiler::AnyMapIsHeapNumber(transition_sources));
6695 DCHECK(!IsHeapNumberMap(*transition_target.object()));
6696
6697 for (const compiler::MapRef transition_source : transition_sources) {
6698 bool is_simple = IsSimpleMapChangeTransition(
6699 transition_source.elements_kind(), transition_target.elements_kind());
6700
6701 // TODO(leszeks): If there are a lot of transition source maps, move the
6702 // source into a register and share the deferred code between maps.
6703 __ CompareTaggedAndJumpIf(
6704 map, transition_source.object(), kEqual,
6705 __ MakeDeferredCode(
6706 [](MaglevAssembler* masm, Register object, Register map,
6707 RegisterSnapshot register_snapshot,
6708 compiler::MapRef transition_target, bool is_simple,
6709 ZoneLabelRef done, std::optional<Register> result_opt) {
6710 if (is_simple) {
6711 __ MoveTagged(map, transition_target.object());
6712 __ StoreTaggedFieldWithWriteBarrier(
6713 object, HeapObject::kMapOffset, map, register_snapshot,
6716 } else {
6717 SaveRegisterStateForCall save_state(masm, register_snapshot);
6718 __ Push(object, transition_target.object());
6719 __ Move(kContextRegister, masm->native_context().object());
6720 __ CallRuntime(Runtime::kTransitionElementsKind);
6721 save_state.DefineSafepoint();
6722 }
6723 if (result_opt) {
6724 __ MoveTagged(*result_opt, transition_target.object());
6725 }
6726 __ Jump(*done);
6727 },
6728 object, map, node->register_snapshot(), transition_target,
6729 is_simple, done, result_opt));
6730 }
6731}
6732
6733} // namespace
6734
6737}
6738
6742 DefineAsRegister(this);
6743}
6744
6745void TransitionElementsKind::GenerateCode(MaglevAssembler* masm,
6746 const ProcessingState& state) {
6747 Register object = ToRegister(object_input());
6748 Register map = ToRegister(map_input());
6749 Register result_register = ToRegister(result());
6750
6751 ZoneLabelRef done(masm);
6752
6753 __ AssertNotSmi(object);
6754 GenerateTransitionElementsKind(masm, this, object, map,
6756 transition_target_, done, result_register);
6757 // No transition happened, return the original map.
6758 __ Move(result_register, map);
6759 __ Jump(*done);
6760 __ bind(*done);
6761}
6762
6765}
6766
6770}
6771
6773 MaglevAssembler* masm, const ProcessingState& state) {
6774 Register object = ToRegister(object_input());
6775 Register map = ToRegister(map_input());
6776
6777 ZoneLabelRef done(masm);
6778
6779 __ CompareTaggedAndJumpIf(map, transition_target_.object(), kEqual, *done);
6780
6781 GenerateTransitionElementsKind(masm, this, object, map,
6783 transition_target_, done, {});
6784 // If we didn't jump to 'done' yet, the transition failed.
6785 __ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
6786 __ bind(*done);
6787}
6788
6791 set_temporaries_needed(1);
6792}
6793
6794void CheckTypedArrayNotDetached::GenerateCode(MaglevAssembler* masm,
6795 const ProcessingState& state) {
6796 MaglevAssembler::TemporaryRegisterScope temps(masm);
6797 Register object = ToRegister(object_input());
6798 Register scratch = temps.Acquire();
6799 __ DeoptIfBufferDetached(object, scratch, this);
6800}
6801
6803 DefineAsRegister(this);
6804}
6805
6807 MaglevAssembler* masm, const ProcessingState& state) {
6808 Register result = ToRegister(this->result());
6809 MaglevAssembler::TemporaryRegisterScope temps(masm);
6810 MemOperand reference = __ ExternalReferenceAsOperand(
6811 IsolateFieldId::kContinuationPreservedEmbedderData);
6812 __ Move(result, reference);
6813}
6814
6817}
6818
6820 MaglevAssembler* masm, const ProcessingState& state) {
6821 Register data = ToRegister(data_input());
6822 MaglevAssembler::TemporaryRegisterScope temps(masm);
6823 MemOperand reference = __ ExternalReferenceAsOperand(
6824 IsolateFieldId::kContinuationPreservedEmbedderData);
6825 __ Move(reference, data);
6826}
6827
6828namespace {
6829
6830template <typename ResultReg, typename NodeT>
6831void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object,
6832 Register index, ResultReg result_reg,
6834 __ AssertNotSmi(object);
6835 if (v8_flags.debug_code) {
6836 MaglevAssembler::TemporaryRegisterScope temps(masm);
6837 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
6838 AbortReason::kUnexpectedValue);
6839 }
6840
6841 MaglevAssembler::TemporaryRegisterScope temps(masm);
6842 Register scratch = temps.Acquire();
6843
6844 Register data_pointer = scratch;
6845 __ BuildTypedArrayDataPointer(data_pointer, object);
6846
6847 int element_size = ElementsKindToByteSize(kind);
6848 MemOperand operand =
6849 __ TypedArrayElementOperand(data_pointer, index, element_size);
6850 if constexpr (std::is_same_v<ResultReg, Register>) {
6852 __ LoadSignedField(result_reg, operand, element_size);
6853 } else {
6855 __ LoadUnsignedField(result_reg, operand, element_size);
6856 }
6857 } else {
6858#ifdef DEBUG
6859 bool result_reg_is_double = std::is_same_v<ResultReg, DoubleRegister>;
6860 DCHECK(result_reg_is_double);
6862#endif
6863 switch (kind) {
6864 case FLOAT32_ELEMENTS:
6865 __ LoadFloat32(result_reg, operand);
6866 break;
6867 case FLOAT64_ELEMENTS:
6868 __ LoadFloat64(result_reg, operand);
6869 break;
6870 default:
6871 UNREACHABLE();
6872 }
6873 }
6874}
6875
6876template <typename ValueReg, typename NodeT>
6877void GenerateTypedArrayStore(MaglevAssembler* masm, NodeT* node,
6878 Register object, Register index, ValueReg value,
6880 __ AssertNotSmi(object);
6881 if (v8_flags.debug_code) {
6882 MaglevAssembler::TemporaryRegisterScope temps(masm);
6883 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
6884 AbortReason::kUnexpectedValue);
6885 }
6886
6887 MaglevAssembler::TemporaryRegisterScope temps(masm);
6888 Register scratch = temps.Acquire();
6889
6890 Register data_pointer = scratch;
6891 __ BuildTypedArrayDataPointer(data_pointer, object);
6892
6893 int element_size = ElementsKindToByteSize(kind);
6894 MemOperand operand =
6895 __ TypedArrayElementOperand(data_pointer, index, element_size);
6896 if constexpr (std::is_same_v<ValueReg, Register>) {
6897 __ StoreField(operand, value, element_size);
6898 } else {
6899#ifdef DEBUG
6900 bool value_is_double = std::is_same_v<ValueReg, DoubleRegister>;
6901 DCHECK(value_is_double);
6903#endif
6904 switch (kind) {
6905 case FLOAT32_ELEMENTS:
6906 __ StoreFloat32(operand, value);
6907 break;
6908 case FLOAT64_ELEMENTS:
6909 __ StoreFloat64(operand, value);
6910 break;
6911 default:
6912 UNREACHABLE();
6913 }
6914 }
6915}
6916
6917} // namespace
6918
6919#define DEF_LOAD_TYPED_ARRAY(Name, ResultReg, ToResultReg) \
6920 void Name::SetValueLocationConstraints() { \
6921 UseRegister(object_input()); \
6922 UseRegister(index_input()); \
6923 DefineAsRegister(this); \
6924 set_temporaries_needed(1); \
6925 } \
6926 void Name::GenerateCode(MaglevAssembler* masm, \
6927 const ProcessingState& state) { \
6928 Register object = ToRegister(object_input()); \
6929 Register index = ToRegister(index_input()); \
6930 ResultReg result_reg = ToResultReg(result()); \
6931 \
6932 GenerateTypedArrayLoad(masm, this, object, index, result_reg, \
6933 elements_kind_); \
6934 }
6935
6936DEF_LOAD_TYPED_ARRAY(LoadSignedIntTypedArrayElement, Register, ToRegister)
6937
6938DEF_LOAD_TYPED_ARRAY(LoadUnsignedIntTypedArrayElement, Register, ToRegister)
6939
6940DEF_LOAD_TYPED_ARRAY(LoadDoubleTypedArrayElement, DoubleRegister,
6942#undef DEF_LOAD_TYPED_ARRAY
6943
6944#define DEF_STORE_TYPED_ARRAY(Name, ValueReg, ToValueReg) \
6945 void Name::SetValueLocationConstraints() { \
6946 UseRegister(object_input()); \
6947 UseRegister(index_input()); \
6948 UseRegister(value_input()); \
6949 set_temporaries_needed(1); \
6950 } \
6951 void Name::GenerateCode(MaglevAssembler* masm, \
6952 const ProcessingState& state) { \
6953 Register object = ToRegister(object_input()); \
6954 Register index = ToRegister(index_input()); \
6955 ValueReg value = ToValueReg(value_input()); \
6956 \
6957 GenerateTypedArrayStore(masm, this, object, index, value, elements_kind_); \
6958 }
6959
6960DEF_STORE_TYPED_ARRAY(StoreIntTypedArrayElement, Register, ToRegister)
6961
6962DEF_STORE_TYPED_ARRAY(StoreDoubleTypedArrayElement, DoubleRegister,
6964#undef DEF_STORE_TYPED_ARRAY
6965
6966// ---
6967// Arch agnostic control nodes
6968// ---
6969
6971void Jump::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
6972 // Avoid emitting a jump to the next block.
6973 if (target() != state.next_block()) {
6974 __ Jump(target()->label());
6975 }
6976}
6977
6979void CheckpointedJump::GenerateCode(MaglevAssembler* masm,
6980 const ProcessingState& state) {
6981 // Avoid emitting a jump to the next block.
6982 if (target() != state.next_block()) {
6983 __ Jump(target()->label());
6984 }
6985}
6986
6987namespace {
6988
6989void AttemptOnStackReplacement(MaglevAssembler* masm,
6990 ZoneLabelRef no_code_for_osr,
6991 TryOnStackReplacement* node, Register scratch0,
6992 Register scratch1, int32_t loop_depth,
6993 FeedbackSlot feedback_slot,
6994 BytecodeOffset osr_offset) {
6995 // Two cases may cause us to attempt OSR, in the following order:
6996 //
6997 // 1) Presence of cached OSR Turbofan code.
6998 // 2) The OSR urgency exceeds the current loop depth - in that case, call
6999 // into runtime to trigger a Turbofan OSR compilation. A non-zero return
7000 // value means we should deopt into Ignition which will handle all further
7001 // necessary steps (rewriting the stack frame, jumping to OSR'd code).
7002 //
7003 // See also: InterpreterAssembler::OnStackReplacement.
7004
7005 __ AssertFeedbackVector(scratch0, scratch1);
7006
7007 // Case 1).
7008 Label deopt;
7009 Register maybe_target_code = scratch1;
7010 __ TryLoadOptimizedOsrCode(scratch1, CodeKind::TURBOFAN_JS, scratch0,
7011 feedback_slot, &deopt, Label::kFar);
7012
7013 // Case 2).
7014 {
7015 __ LoadByte(scratch0,
7016 FieldMemOperand(scratch0, FeedbackVector::kOsrStateOffset));
7017 __ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch0);
7018 __ JumpIfByte(kUnsignedLessThanEqual, scratch0, loop_depth,
7019 *no_code_for_osr);
7020
7021 // The osr_urgency exceeds the current loop_depth, signaling an OSR
7022 // request. Call into runtime to compile.
7023 {
7024 RegisterSnapshot snapshot = node->register_snapshot();
7025 DCHECK(!snapshot.live_registers.has(maybe_target_code));
7026 SaveRegisterStateForCall save_register_state(masm, snapshot);
7027 if (node->unit()->is_inline()) {
7028 // See comment in
7029 // MaglevGraphBuilder::ShouldEmitOsrInterruptBudgetChecks.
7030 CHECK(!node->unit()->is_osr());
7031 __ Push(Smi::FromInt(osr_offset.ToInt()), node->closure());
7032 __ Move(kContextRegister, masm->native_context().object());
7033 __ CallRuntime(Runtime::kCompileOptimizedOSRFromMaglevInlined, 2);
7034 } else {
7035 __ Push(Smi::FromInt(osr_offset.ToInt()));
7036 __ Move(kContextRegister, masm->native_context().object());
7037 __ CallRuntime(Runtime::kCompileOptimizedOSRFromMaglev, 1);
7038 }
7039 save_register_state.DefineSafepoint();
7040 __ Move(maybe_target_code, kReturnRegister0);
7041 }
7042
7043 // A `0` return value means there is no OSR code available yet. Continue
7044 // execution in Maglev, OSR code will be picked up once it exists and is
7045 // cached on the feedback vector.
7046 __ CompareInt32AndJumpIf(maybe_target_code, 0, kEqual, *no_code_for_osr);
7047 }
7048
7049 __ bind(&deopt);
7050 if (V8_LIKELY(v8_flags.turbofan)) {
7051 // None of the mutated input registers should be a register input into the
7052 // eager deopt info.
7054 RegList{scratch0, scratch1} &
7055 GetGeneralRegistersUsedAsInputs(node->eager_deopt_info()));
7056 __ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement);
7057 } else {
7058 // Continue execution in Maglev. With TF disabled we cannot OSR and thus it
7059 // doesn't make sense to start the process. We do still perform all
7060 // remaining bookkeeping above though, to keep Maglev code behavior roughly
7061 // the same in both configurations.
7062 __ Jump(*no_code_for_osr);
7063 }
7064}
7065
7066} // namespace
7067
7069 // For the kCompileOptimizedOSRFromMaglev call.
7070 if (unit()->is_inline()) return 2;
7071 return 1;
7072}
7074 UseAny(closure());
7075 set_temporaries_needed(2);
7076}
7077void TryOnStackReplacement::GenerateCode(MaglevAssembler* masm,
7078 const ProcessingState& state) {
7079 MaglevAssembler::TemporaryRegisterScope temps(masm);
7080 Register scratch0 = temps.Acquire();
7081 Register scratch1 = temps.Acquire();
7082
7083 const Register osr_state = scratch1;
7084 __ Move(scratch0, unit_->feedback().object());
7085 __ AssertFeedbackVector(scratch0, scratch1);
7086 __ LoadByte(osr_state,
7087 FieldMemOperand(scratch0, FeedbackVector::kOsrStateOffset));
7088
7089 ZoneLabelRef no_code_for_osr(masm);
7090
7091 if (v8_flags.maglev_osr) {
7092 // In case we use maglev_osr, we need to explicitly know if there is
7093 // turbofan code waiting for us (i.e., ignore the MaybeHasMaglevOsrCodeBit).
7094 __ DecodeField<
7095 base::BitFieldUnion<FeedbackVector::OsrUrgencyBits,
7096 FeedbackVector::MaybeHasTurbofanOsrCodeBit>>(
7097 osr_state);
7098 }
7099
7100 // The quick initial OSR check. If it passes, we proceed on to more
7101 // expensive OSR logic.
7102 static_assert(FeedbackVector::MaybeHasTurbofanOsrCodeBit::encode(true) >
7104 __ CompareInt32AndJumpIf(
7106 __ MakeDeferredCode(AttemptOnStackReplacement, no_code_for_osr, this,
7107 scratch0, scratch1, loop_depth_, feedback_slot_,
7108 osr_offset_));
7109 __ bind(*no_code_for_osr);
7110}
7111
7113void JumpLoop::GenerateCode(MaglevAssembler* masm,
7114 const ProcessingState& state) {
7115 __ Jump(target()->label());
7116}
7117
7120}
7121void BranchIfSmi::GenerateCode(MaglevAssembler* masm,
7122 const ProcessingState& state) {
7123 __ Branch(__ CheckSmi(ToRegister(condition_input())), if_true(), if_false(),
7124 state.next_block());
7125}
7126
7129}
7130void BranchIfRootConstant::GenerateCode(MaglevAssembler* masm,
7131 const ProcessingState& state) {
7132 __ CompareRoot(ToRegister(condition_input()), root_index());
7133 __ Branch(ConditionFor(Operation::kEqual), if_true(), if_false(),
7134 state.next_block());
7135}
7136
7138 // TODO(victorgomes): consider using any input instead.
7140}
7141void BranchIfToBooleanTrue::GenerateCode(MaglevAssembler* masm,
7142 const ProcessingState& state) {
7143 // BasicBlocks are zone allocated and so safe to be casted to ZoneLabelRef.
7144 ZoneLabelRef true_label =
7146 ZoneLabelRef false_label =
7148 bool fallthrough_when_true = (if_true() == state.next_block());
7149 __ ToBoolean(ToRegister(condition_input()), check_type(), true_label,
7150 false_label, fallthrough_when_true);
7151}
7152
7154 // TODO(victorgomes): consider using any input instead.
7156}
7157void BranchIfInt32ToBooleanTrue::GenerateCode(MaglevAssembler* masm,
7158 const ProcessingState& state) {
7159 __ CompareInt32AndBranch(ToRegister(condition_input()), 0, kNotEqual,
7160 if_true(), if_false(), state.next_block());
7161}
7162
7164 // TODO(victorgomes): consider using any input instead.
7166}
7167void BranchIfIntPtrToBooleanTrue::GenerateCode(MaglevAssembler* masm,
7168 const ProcessingState& state) {
7169 __ CompareIntPtrAndBranch(ToRegister(condition_input()), 0, kNotEqual,
7170 if_true(), if_false(), state.next_block());
7171}
7172
7176}
7177void BranchIfFloat64ToBooleanTrue::GenerateCode(MaglevAssembler* masm,
7178 const ProcessingState& state) {
7179 MaglevAssembler::TemporaryRegisterScope temps(masm);
7180 DoubleRegister double_scratch = temps.AcquireDouble();
7181
7182 __ Move(double_scratch, 0.0);
7183 __ CompareFloat64AndBranch(ToDoubleRegister(condition_input()),
7184 double_scratch, kEqual, if_false(), if_true(),
7185 state.next_block(), if_false());
7186}
7187
7191}
7192void BranchIfFloat64IsHole::GenerateCode(MaglevAssembler* masm,
7193 const ProcessingState& state) {
7194 MaglevAssembler::TemporaryRegisterScope temps(masm);
7195 Register scratch = temps.Acquire();
7197 // See MaglevAssembler::Branch.
7198 bool fallthrough_when_true = if_true() == state.next_block();
7199 bool fallthrough_when_false = if_false() == state.next_block();
7200 if (fallthrough_when_false) {
7201 if (fallthrough_when_true) {
7202 // If both paths are a fallthrough, do nothing.
7204 return;
7205 }
7206 // Jump over the false block if true, otherwise fall through into it.
7207 __ JumpIfHoleNan(input, scratch, if_true()->label(), Label::kFar);
7208 } else {
7209 // Jump to the false block if true.
7210 __ JumpIfNotHoleNan(input, scratch, if_false()->label(), Label::kFar);
7211 // Jump to the true block if it's not the next block.
7212 if (!fallthrough_when_true) {
7213 __ Jump(if_true()->label(), Label::kFar);
7214 }
7215 }
7216}
7217
7219 UseRegister(input());
7220 DefineAsRegister(this);
7221 set_temporaries_needed(1);
7222}
7223void HoleyFloat64IsHole::GenerateCode(MaglevAssembler* masm,
7224 const ProcessingState& state) {
7225 MaglevAssembler::TemporaryRegisterScope temps(masm);
7226 Register scratch = temps.Acquire();
7228 Label done, if_not_hole;
7229 __ JumpIfNotHoleNan(value, scratch, &if_not_hole, Label::kNear);
7230 __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
7231 __ Jump(&done);
7232 __ bind(&if_not_hole);
7233 __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
7234 __ bind(&done);
7235}
7236
7240}
7241void BranchIfFloat64Compare::GenerateCode(MaglevAssembler* masm,
7242 const ProcessingState& state) {
7245 __ CompareFloat64AndBranch(left, right, ConditionForFloat64(operation_),
7246 if_true(), if_false(), state.next_block(),
7247 if_false());
7248}
7249
7253}
7254void BranchIfReferenceEqual::GenerateCode(MaglevAssembler* masm,
7255 const ProcessingState& state) {
7256 Register left = ToRegister(left_input());
7257 Register right = ToRegister(right_input());
7258 __ CmpTagged(left, right);
7259 __ Branch(kEqual, if_true(), if_false(), state.next_block());
7260}
7261
7265}
7266void BranchIfInt32Compare::GenerateCode(MaglevAssembler* masm,
7267 const ProcessingState& state) {
7268 Register left = ToRegister(left_input());
7269 Register right = ToRegister(right_input());
7270 __ CompareInt32AndBranch(left, right, ConditionFor(operation_), if_true(),
7271 if_false(), state.next_block());
7272}
7273
7277}
7278void BranchIfUint32Compare::GenerateCode(MaglevAssembler* masm,
7279 const ProcessingState& state) {
7280 Register left = ToRegister(left_input());
7281 Register right = ToRegister(right_input());
7282 __ CompareInt32AndBranch(left, right, UnsignedConditionFor(operation_),
7283 if_true(), if_false(), state.next_block());
7284}
7285
7288}
7289void BranchIfUndefinedOrNull::GenerateCode(MaglevAssembler* masm,
7290 const ProcessingState& state) {
7292 __ JumpIfRoot(value, RootIndex::kUndefinedValue, if_true()->label());
7293 __ JumpIfRoot(value, RootIndex::kNullValue, if_true()->label());
7294 auto* next_block = state.next_block();
7295 if (if_false() != next_block) {
7296 __ Jump(if_false()->label());
7297 }
7298}
7299
7303}
7304void BranchIfUndetectable::GenerateCode(MaglevAssembler* masm,
7305 const ProcessingState& state) {
7307 MaglevAssembler::TemporaryRegisterScope temps(masm);
7308 Register scratch = temps.Acquire();
7309
7310 auto* next_block = state.next_block();
7311 if (next_block == if_true() || next_block != if_false()) {
7312 __ JumpIfNotUndetectable(value, scratch, check_type(), if_false()->label());
7313 if (next_block != if_true()) {
7314 __ Jump(if_true()->label());
7315 }
7316 } else {
7317 __ JumpIfUndetectable(value, scratch, check_type(), if_true()->label());
7318 }
7319}
7320
7322 UseRegister(value());
7323 set_temporaries_needed(1);
7324 DefineAsRegister(this);
7325}
7326void TestUndetectable::GenerateCode(MaglevAssembler* masm,
7327 const ProcessingState& state) {
7328 Register object = ToRegister(value());
7329 Register return_value = ToRegister(result());
7330 MaglevAssembler::TemporaryRegisterScope temps(masm);
7331 Register scratch = temps.Acquire();
7332
7333 Label return_false, done;
7334 __ JumpIfNotUndetectable(object, scratch, check_type(), &return_false,
7335 Label::kNear);
7336
7337 __ LoadRoot(return_value, RootIndex::kTrueValue);
7338 __ Jump(&done, Label::kNear);
7339
7340 __ bind(&return_false);
7341 __ LoadRoot(return_value, RootIndex::kFalseValue);
7342
7343 __ bind(&done);
7344}
7345
7348 // One temporary for TestTypeOf.
7350}
7351void BranchIfTypeOf::GenerateCode(MaglevAssembler* masm,
7352 const ProcessingState& state) {
7353 Register value = ToRegister(value_input());
7354 __ TestTypeOf(value, literal_, if_true()->label(), Label::kFar,
7355 if_true() == state.next_block(), if_false()->label(),
7356 Label::kFar, if_false() == state.next_block());
7357}
7358
7361}
7362void BranchIfJSReceiver::GenerateCode(MaglevAssembler* masm,
7363 const ProcessingState& state) {
7365 __ JumpIfSmi(value, if_false()->label());
7366 __ JumpIfJSAnyIsNotPrimitive(value, if_true()->label());
7367 __ jmp(if_false()->label());
7368}
7369
7373}
7374void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
7375 MaglevAssembler::TemporaryRegisterScope temps(masm);
7376 Register scratch = temps.Acquire();
7377 std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(size());
7378 for (int i = 0; i < size(); i++) {
7379 BasicBlock* block = (targets())[i].block_ptr();
7380 block->set_start_block_of_switch_case(true);
7381 labels[i] = block->label();
7382 }
7383 Register val = ToRegister(value());
7384 // Switch requires {val} (the switch's condition) to be 64-bit, but maglev
7385 // usually manipulates/creates 32-bit integers. We thus sign-extend {val} to
7386 // 64-bit to have the correct value for negative numbers.
7387 __ SignExtend32To64Bits(val, val);
7388 __ Switch(scratch, val, value_base(), labels.get(), size());
7389 if (has_fallthrough()) {
7390 // If we jump-thread the fallthrough, it's not necessarily the next block.
7391 if (fallthrough() != state.next_block()) {
7392 __ Jump(fallthrough()->label());
7393 }
7394 } else {
7395 __ Trap();
7396 }
7397}
7398
7399void HandleNoHeapWritesInterrupt::GenerateCode(MaglevAssembler* masm,
7400 const ProcessingState& state) {
7401 ZoneLabelRef done(masm);
7402 Label* deferred = __ MakeDeferredCode(
7403 [](MaglevAssembler* masm, ZoneLabelRef done, Node* node) {
7404 ASM_CODE_COMMENT_STRING(masm, "HandleNoHeapWritesInterrupt");
7405 {
7406 SaveRegisterStateForCall save_register_state(
7407 masm, node->register_snapshot());
7408 __ Move(kContextRegister, masm->native_context().object());
7409 __ CallRuntime(Runtime::kHandleNoHeapWritesInterrupts, 0);
7410 save_register_state.DefineSafepointWithLazyDeopt(
7411 node->lazy_deopt_info());
7412 }
7413 __ Jump(*done);
7414 },
7415 done, this);
7416
7417 MaglevAssembler::TemporaryRegisterScope temps(masm);
7418 Register scratch = temps.AcquireScratch();
7419 MemOperand check = __ ExternalReferenceAsOperand(
7420 ExternalReference::address_of_no_heap_write_interrupt_request(
7421 masm->isolate()),
7422 scratch);
7423 __ CompareByteAndJumpIf(check, 0, kNotEqual, scratch, deferred, Label::kFar);
7424 __ bind(*done);
7425}
7426
7427#endif // V8_ENABLE_MAGLEV
7428
7429// ---
7430// Print params
7431// ---
7432
7433void ExternalConstant::PrintParams(std::ostream& os,
7434 MaglevGraphLabeller* graph_labeller) const {
7435 os << "(" << reference() << ")";
7436}
7437
7438void SmiConstant::PrintParams(std::ostream& os,
7439 MaglevGraphLabeller* graph_labeller) const {
7440 os << "(" << value() << ")";
7441}
7442
7444 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7445 os << "(" << value() << ")";
7446}
7447
7448void Int32Constant::PrintParams(std::ostream& os,
7449 MaglevGraphLabeller* graph_labeller) const {
7450 os << "(" << value() << ")";
7451}
7452
7453void Uint32Constant::PrintParams(std::ostream& os,
7454 MaglevGraphLabeller* graph_labeller) const {
7455 os << "(" << value() << ")";
7456}
7457
7458void Float64Constant::PrintParams(std::ostream& os,
7459 MaglevGraphLabeller* graph_labeller) const {
7460 if (value().is_nan()) {
7461 os << "(NaN [0x" << std::hex << value().get_bits() << std::dec << "]";
7462 if (value().is_hole_nan()) {
7463 os << ", the hole";
7464 } else if (value().get_bits() ==
7466 std::numeric_limits<double>::quiet_NaN())) {
7467 os << ", quiet NaN";
7468 }
7469 os << ")";
7470
7471 } else {
7472 os << "(" << value().get_scalar() << ")";
7473 }
7474}
7475
7476void Constant::PrintParams(std::ostream& os,
7477 MaglevGraphLabeller* graph_labeller) const {
7478 os << "(" << *object_.object() << ")";
7479}
7480
7481void TrustedConstant::PrintParams(std::ostream& os,
7482 MaglevGraphLabeller* graph_labeller) const {
7483 os << "(" << *object_.object() << ")";
7484}
7485
7486void DeleteProperty::PrintParams(std::ostream& os,
7487 MaglevGraphLabeller* graph_labeller) const {
7488 os << "(" << LanguageMode2String(mode()) << ")";
7489}
7490
7491void InitialValue::PrintParams(std::ostream& os,
7492 MaglevGraphLabeller* graph_labeller) const {
7493 os << "(" << source().ToString() << ")";
7494}
7495
7496void LoadGlobal::PrintParams(std::ostream& os,
7497 MaglevGraphLabeller* graph_labeller) const {
7498 os << "(" << *name().object() << ")";
7499}
7500
7501void StoreGlobal::PrintParams(std::ostream& os,
7502 MaglevGraphLabeller* graph_labeller) const {
7503 os << "(" << *name().object() << ")";
7504}
7505
7506void RegisterInput::PrintParams(std::ostream& os,
7507 MaglevGraphLabeller* graph_labeller) const {
7508 os << "(" << input() << ")";
7509}
7510
7511void RootConstant::PrintParams(std::ostream& os,
7512 MaglevGraphLabeller* graph_labeller) const {
7513 os << "(" << RootsTable::name(index()) << ")";
7514}
7515
7517 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7518 os << "(" << *scope_info().object() << ", " << slot_count() << ")";
7519}
7520
7522 MaglevGraphLabeller* graph_labeller) const {
7523 os << "(" << *shared_function_info().object() << ", "
7524 << feedback_cell().object() << ")";
7525}
7526
7527void CreateClosure::PrintParams(std::ostream& os,
7528 MaglevGraphLabeller* graph_labeller) const {
7529 os << "(" << *shared_function_info().object() << ", "
7530 << feedback_cell().object();
7531 if (pretenured()) {
7532 os << " [pretenured]";
7533 }
7534 os << ")";
7535}
7536
7537void AllocationBlock::PrintParams(std::ostream& os,
7538 MaglevGraphLabeller* graph_labeller) const {
7539 os << "(" << allocation_type() << ")";
7540}
7541
7543 MaglevGraphLabeller* graph_labeller) const {
7544 os << "(" << object()->type();
7545 if (object()->has_static_map()) {
7546 os << " " << *object()->map().object();
7547 }
7548 os << ")";
7549}
7550
7551void VirtualObject::PrintParams(std::ostream& os,
7552 MaglevGraphLabeller* graph_labeller) const {
7553 os << "(" << *map().object() << ")";
7554}
7555
7556void Abort::PrintParams(std::ostream& os,
7557 MaglevGraphLabeller* graph_labeller) const {
7558 os << "(" << GetAbortReason(reason()) << ")";
7559}
7560
7561void AssertInt32::PrintParams(std::ostream& os,
7562 MaglevGraphLabeller* graph_labeller) const {
7563 os << "(" << condition_ << ")";
7564}
7565
7567 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7568 switch (mode_) {
7570 os << "(CharCodeAt)";
7571 break;
7573 os << "(CodePointAt)";
7574 break;
7575 }
7576}
7577
7578void CheckMaps::PrintParams(std::ostream& os,
7579 MaglevGraphLabeller* graph_labeller) const {
7580 os << "(";
7581 bool first = true;
7582 for (compiler::MapRef map : maps()) {
7583 if (first) {
7584 first = false;
7585 } else {
7586 os << ", ";
7587 }
7588 os << *map.object();
7589 }
7590 os << ")";
7591}
7592
7594 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7595 os << "(";
7596 bool first = true;
7597 for (compiler::MapRef map : maps()) {
7598 if (first) {
7599 first = false;
7600 } else {
7601 os << ", ";
7602 }
7603 os << *map.object();
7604 }
7605 os << ")";
7606}
7607
7609 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7610 os << "(";
7611 bool first = true;
7612 for (compiler::MapRef map : maps()) {
7613 if (first) {
7614 first = false;
7615 } else {
7616 os << ", ";
7617 }
7618 os << *map.object();
7619 }
7620 os << ")";
7621}
7622
7624 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7625 os << "(" << Node::input(0).node() << ", [";
7626 os << *transition_target().object();
7627 for (compiler::MapRef source : transition_sources()) {
7628 os << ", " << *source.object();
7629 }
7630 os << "]-->" << *transition_target().object() << ")";
7631}
7632
7634 MaglevGraphLabeller* graph_labeller) const {
7635 os << "(" << DeoptimizeReasonToString(deoptimize_reason()) << ")";
7636}
7637
7638void CheckValue::PrintParams(std::ostream& os,
7639 MaglevGraphLabeller* graph_labeller) const {
7640 os << "(" << *value().object() << ", "
7641 << DeoptimizeReasonToString(deoptimize_reason()) << ")";
7642}
7643
7645 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7646 os << "(" << value() << ", " << DeoptimizeReasonToString(deoptimize_reason())
7647 << ")";
7648}
7649
7651 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7652 os << "(" << value().get_scalar() << ", "
7653 << DeoptimizeReasonToString(deoptimize_reason()) << ")";
7654}
7655
7657 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7658 os << "(" << *value().object() << ", "
7659 << DeoptimizeReasonToString(deoptimize_reason()) << ")";
7660}
7661
7663 MaglevGraphLabeller* graph_labeller) const {
7664 os << "(" << first_instance_type_;
7666 os << " - " << last_instance_type_;
7667 }
7668 os << ")";
7669}
7670
7672 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7673 os << "(";
7674 bool first = true;
7675 for (compiler::MapRef map : maps()) {
7676 if (first) {
7677 first = false;
7678 } else {
7679 os << ", ";
7680 }
7681 os << *map.object();
7682 }
7683 os << ")";
7684}
7685
7687 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7688 os << "(" << condition() << ", " << deoptimize_reason() << ")";
7689}
7690
7692 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7693 os << "(" << index_ << ")";
7694}
7695
7696template <typename Derived, ValueRepresentation FloatType>
7700 PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7701 os << "(" << conversion_type() << ")";
7702}
7703
7704void UncheckedNumberOrOddballToFloat64::PrintParams(
7705 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7706 os << "(" << conversion_type() << ")";
7707}
7708
7709void CheckedTruncateNumberOrOddballToInt32::PrintParams(
7710 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7711 os << "(" << conversion_type() << ")";
7712}
7713
7714void TruncateNumberOrOddballToInt32::PrintParams(
7715 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7716 os << "(" << conversion_type() << ")";
7717}
7718
7719template <typename T>
7721 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7722 os << "(0x" << std::hex << offset() << std::dec;
7723 // Print compression status only after the result is allocated, since that's
7724 // when we do decompression marking.
7725 if (!result().operand().IsUnallocated()) {
7726 if (decompresses_tagged_result()) {
7727 os << ", decompressed";
7728 } else {
7729 os << ", compressed";
7730 }
7731 }
7732 os << ")";
7733}
7734
7735void LoadTaggedFieldForScriptContextSlot::PrintParams(
7736 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7737 os << "(0x" << std::hex << offset() << std::dec << ")";
7739
7740void LoadDoubleField::PrintParams(std::ostream& os,
7741 MaglevGraphLabeller* graph_labeller) const {
7742 os << "(0x" << std::hex << offset() << std::dec << ")";
7743}
7744
7745void LoadFloat64::PrintParams(std::ostream& os,
7746 MaglevGraphLabeller* graph_labeller) const {
7747 os << "(0x" << std::hex << offset() << std::dec << ")";
7748}
7749
7750void LoadHeapInt32::PrintParams(std::ostream& os,
7751 MaglevGraphLabeller* graph_labeller) const {
7752 os << "(0x" << std::hex << offset() << std::dec << ")";
7753}
7754
7755void LoadInt32::PrintParams(std::ostream& os,
7756 MaglevGraphLabeller* graph_labeller) const {
7757 os << "(0x" << std::hex << offset() << std::dec << ")";
7758}
7759
7760void LoadFixedArrayElement::PrintParams(
7761 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7762 // Print compression status only after the result is allocated, since that's
7763 // when we do decompression marking.
7764 if (!result().operand().IsUnallocated()) {
7765 if (decompresses_tagged_result()) {
7766 os << "(decompressed)";
7767 } else {
7768 os << "(compressed)";
7769 }
7770 }
7771}
7772
7773void StoreDoubleField::PrintParams(std::ostream& os,
7774 MaglevGraphLabeller* graph_labeller) const {
7775 os << "(0x" << std::hex << offset() << std::dec << ")";
7776}
7777
7778void StoreHeapInt32::PrintParams(std::ostream& os,
7779 MaglevGraphLabeller* graph_labeller) const {
7780 os << "(0x" << std::hex << offset() << std::dec << ")";
7781}
7782
7783void StoreFloat64::PrintParams(std::ostream& os,
7784 MaglevGraphLabeller* graph_labeller) const {
7785 os << "(0x" << std::hex << offset() << std::dec << ")";
7786}
7787
7788void StoreInt32::PrintParams(std::ostream& os,
7789 MaglevGraphLabeller* graph_labeller) const {
7790 os << "(0x" << std::hex << offset() << std::dec << ")";
7791}
7792
7793void StoreTaggedFieldNoWriteBarrier::PrintParams(
7794 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7795 os << "(0x" << std::hex << offset() << std::dec << ")";
7796}
7797
7798std::ostream& operator<<(std::ostream& os, StoreMap::Kind kind) {
7799 switch (kind) {
7800 case StoreMap::Kind::kInitializing:
7801 os << "Initializing";
7802 break;
7803 case StoreMap::Kind::kInlinedAllocation:
7804 os << "InlinedAllocation";
7805 break;
7806 case StoreMap::Kind::kTransitioning:
7807 os << "Transitioning";
7808 break;
7809 }
7810 return os;
7811}
7812
7813void StoreMap::PrintParams(std::ostream& os,
7814 MaglevGraphLabeller* graph_labeller) const {
7815 os << "(" << *map_.object() << ", " << kind() << ")";
7816}
7817
7818void StoreTaggedFieldWithWriteBarrier::PrintParams(
7819 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7820 os << "(0x" << std::hex << offset() << std::dec << ")";
7821}
7822
7823void StoreTrustedPointerFieldWithWriteBarrier::PrintParams(
7824 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7825 os << "(0x" << std::hex << offset() << std::dec << ")";
7826}
7827
7828void LoadNamedGeneric::PrintParams(std::ostream& os,
7829 MaglevGraphLabeller* graph_labeller) const {
7830 os << "(" << *name_.object() << ")";
7831}
7832
7833void LoadNamedFromSuperGeneric::PrintParams(
7834 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7835 os << "(" << *name_.object() << ")";
7836}
7837
7838void SetNamedGeneric::PrintParams(std::ostream& os,
7839 MaglevGraphLabeller* graph_labeller) const {
7840 os << "(" << *name_.object() << ")";
7841}
7842
7843void DefineNamedOwnGeneric::PrintParams(
7844 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7845 os << "(" << *name_.object() << ")";
7846}
7847
7848void HasInPrototypeChain::PrintParams(
7849 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7850 os << "(" << *prototype_.object() << ")";
7851}
7852
7853void GapMove::PrintParams(std::ostream& os,
7854 MaglevGraphLabeller* graph_labeller) const {
7855 os << "(" << source() << " → " << target() << ")";
7856}
7857
7858void ConstantGapMove::PrintParams(std::ostream& os,
7859 MaglevGraphLabeller* graph_labeller) const {
7860 os << "(";
7861 graph_labeller->PrintNodeLabel(os, node_);
7862 os << " → " << target() << ")";
7863}
7864
7865void Float64Compare::PrintParams(std::ostream& os,
7866 MaglevGraphLabeller* graph_labeller) const {
7867 os << "(" << operation() << ")";
7868}
7869
7870void Float64ToBoolean::PrintParams(std::ostream& os,
7871 MaglevGraphLabeller* graph_labeller) const {
7872 if (flip()) {
7873 os << "(flipped)";
7874 }
7875}
7876
7877void Int32Compare::PrintParams(std::ostream& os,
7878 MaglevGraphLabeller* graph_labeller) const {
7879 os << "(" << operation() << ")";
7880}
7881
7882void Int32ToBoolean::PrintParams(std::ostream& os,
7883 MaglevGraphLabeller* graph_labeller) const {
7884 if (flip()) {
7885 os << "(flipped)";
7886 }
7887}
7888
7889void IntPtrToBoolean::PrintParams(std::ostream& os,
7890 MaglevGraphLabeller* graph_labeller) const {
7891 if (flip()) {
7892 os << "(flipped)";
7893 }
7894}
7895
7896void Float64Ieee754Unary::PrintParams(
7897 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7898 switch (ieee_function_) {
7899#define CASE(MathName, ExtName, EnumName) \
7900 case Ieee754Function::k##EnumName: \
7901 os << "(" << #EnumName << ")"; \
7902 break;
7904#undef CASE
7905 }
7906}
7907
7908void Float64Round::PrintParams(std::ostream& os,
7909 MaglevGraphLabeller* graph_labeller) const {
7910 switch (kind_) {
7911 case Kind::kCeil:
7912 os << "(ceil)";
7913 return;
7914 case Kind::kFloor:
7915 os << "(floor)";
7916 return;
7917 case Kind::kNearest:
7918 os << "(nearest)";
7919 return;
7920 }
7921}
7922
7923void Phi::PrintParams(std::ostream& os,
7924 MaglevGraphLabeller* graph_labeller) const {
7925 os << "(" << (owner().is_valid() ? owner().ToString() : "VO") << ")";
7926}
7927
7928void Call::PrintParams(std::ostream& os,
7929 MaglevGraphLabeller* graph_labeller) const {
7930 os << "(" << receiver_mode_ << ", ";
7931 switch (target_type_) {
7932 case TargetType::kJSFunction:
7933 os << "JSFunction";
7934 break;
7935 case TargetType::kAny:
7936 os << "Any";
7937 break;
7938 }
7939 os << ")";
7940}
7941
7942void CallSelf::PrintParams(std::ostream& os,
7943 MaglevGraphLabeller* graph_labeller) const {}
7944
7945void CallKnownJSFunction::PrintParams(
7946 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7947 os << "(" << shared_function_info_.object() << ")";
7948}
7949
7950void CallKnownApiFunction::PrintParams(
7951 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7952 os << "(";
7953 switch (mode()) {
7954 case kNoProfiling:
7955 os << "no profiling, ";
7956 break;
7957 case kNoProfilingInlined:
7958 os << "no profiling inlined, ";
7959 break;
7960 case kGeneric:
7961 break;
7962 }
7963 os << function_template_info_.object() << ")";
7964}
7965
7966void CallBuiltin::PrintParams(std::ostream& os,
7967 MaglevGraphLabeller* graph_labeller) const {
7968 os << "(" << Builtins::name(builtin()) << ")";
7969}
7970
7971void CallCPPBuiltin::PrintParams(std::ostream& os,
7972 MaglevGraphLabeller* graph_labeller) const {
7973 os << "(" << Builtins::name(builtin()) << ")";
7974}
7975
7976void CallForwardVarargs::PrintParams(
7977 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7978 if (start_index_ == 0) return;
7979 os << "(" << start_index_ << ")";
7980}
7981
7982void CallRuntime::PrintParams(std::ostream& os,
7983 MaglevGraphLabeller* graph_labeller) const {
7984 os << "(" << Runtime::FunctionForId(function_id())->name << ")";
7985}
7986
7987void TestTypeOf::PrintParams(std::ostream& os,
7988 MaglevGraphLabeller* graph_labeller) const {
7989 os << "(" << interpreter::TestTypeOfFlags::ToString(literal_) << ")";
7990}
7991
7992void ReduceInterruptBudgetForLoop::PrintParams(
7993 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7994 os << "(" << amount() << ")";
7995}
7996
7997void ReduceInterruptBudgetForReturn::PrintParams(
7998 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
7999 os << "(" << amount() << ")";
8000}
8001
8002void Deopt::PrintParams(std::ostream& os,
8003 MaglevGraphLabeller* graph_labeller) const {
8004 os << "(" << DeoptimizeReasonToString(deoptimize_reason()) << ")";
8005}
8006
8007void BranchIfRootConstant::PrintParams(
8008 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
8009 os << "(" << RootsTable::name(root_index_) << ")";
8010}
8011
8012void BranchIfFloat64Compare::PrintParams(
8013 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
8014 os << "(" << operation_ << ")";
8015}
8016
8017void BranchIfInt32Compare::PrintParams(
8018 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
8019 os << "(" << operation_ << ")";
8020}
8021
8022void BranchIfUint32Compare::PrintParams(
8023 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
8024 os << "(" << operation_ << ")";
8025}
8026
8027void BranchIfTypeOf::PrintParams(std::ostream& os,
8028 MaglevGraphLabeller* graph_labeller) const {
8029 os << "(" << interpreter::TestTypeOfFlags::ToString(literal_) << ")";
8030}
8031
8032void ExtendPropertiesBackingStore::PrintParams(
8033 std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
8034 os << "(" << old_length_ << ")";
8035}
8036
8037// Keeping track of the effects this instruction has on known node aspects.
8038void NodeBase::ClearElementsProperties(KnownNodeAspects& known_node_aspects) {
8039 DCHECK(IsElementsArrayWrite(opcode()));
8040 // Clear Elements cache.
8041 auto elements_properties = known_node_aspects.loaded_properties.find(
8042 KnownNodeAspects::LoadedPropertyMapKey::Elements());
8043 if (elements_properties != known_node_aspects.loaded_properties.end()) {
8044 elements_properties->second.clear();
8045 if (v8_flags.trace_maglev_graph_building) {
8046 std::cout << " * Removing non-constant cached [Elements]";
8047 }
8048 }
8049}
8050
8051void NodeBase::ClearUnstableNodeAspects(KnownNodeAspects& known_node_aspects) {
8052 DCHECK(properties().can_write());
8053 DCHECK(!IsSimpleFieldStore(opcode()));
8054 DCHECK(!IsElementsArrayWrite(opcode()));
8055 known_node_aspects.ClearUnstableNodeAspects();
8056}
8057
8058void StoreMap::ClearUnstableNodeAspects(KnownNodeAspects& known_node_aspects) {
8059 switch (kind()) {
8060 case Kind::kInitializing:
8061 case Kind::kInlinedAllocation:
8062 return;
8063 case Kind::kTransitioning: {
8064 if (NodeInfo* node_info =
8065 known_node_aspects.TryGetInfoFor(object_input().node())) {
8066 if (node_info->possible_maps_are_known() &&
8067 node_info->possible_maps().size() == 1) {
8068 compiler::MapRef old_map = node_info->possible_maps().at(0);
8069 auto MaybeAliases = [&](compiler::MapRef map) -> bool {
8070 return map.equals(old_map);
8071 };
8072 known_node_aspects.ClearUnstableMapsIfAny(MaybeAliases);
8073 if (v8_flags.trace_maglev_graph_building) {
8074 std::cout << " ! StoreMap: Clearing unstable map "
8075 << Brief(*old_map.object()) << std::endl;
8076 }
8077 return;
8078 }
8079 }
8080 break;
8081 }
8082 }
8083 // TODO(olivf): Only invalidate nodes with the same type.
8084 known_node_aspects.ClearUnstableMaps();
8085 if (v8_flags.trace_maglev_graph_building) {
8086 std::cout << " ! StoreMap: Clearing unstable maps" << std::endl;
8087 }
8088}
8089
8090void CheckMapsWithMigration::ClearUnstableNodeAspects(
8091 KnownNodeAspects& known_node_aspects) {
8092 // This instruction only migrates representations of values, not the values
8093 // themselves, so cached values are still valid.
8094}
8095
8096void MigrateMapIfNeeded::ClearUnstableNodeAspects(
8097 KnownNodeAspects& known_node_aspects) {
8098 // This instruction only migrates representations of values, not the values
8099 // themselves, so cached values are still valid.
8100}
8101
8105
8107 CheckedNumberOrOddballToFloat64, ValueRepresentation::kFloat64>;
8109 CheckedNumberOrOddballToHoleyFloat64, ValueRepresentation::kHoleyFloat64>;
8110
8111std::optional<int32_t> NodeBase::TryGetInt32ConstantInput(int index) {
8112 Node* node = input(index).node();
8113 if (auto smi = node->TryCast<SmiConstant>()) {
8114 return smi->value().value();
8115 }
8116 if (auto i32 = node->TryCast<Int32Constant>()) {
8117 return i32->value();
8118 }
8119 return {};
8120}
8121
8122} // namespace maglev
8123} // namespace internal
8124} // namespace v8
#define Assert(condition)
interpreter::OperandScale scale
Definition builtins.cc:44
Builtins::Kind kind
Definition builtins.cc:40
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
static constexpr U kMask
Definition bit-field.h:41
static constexpr int kShift
Definition bit-field.h:39
constexpr bool is_subset_of(EnumSet set) const
Definition enum-set.h:47
constexpr void Add(E element)
Definition enum-set.h:50
static constexpr int kNewTargetIndex
static constexpr int kPaddingIndex
static constexpr int kArgcIndex
static constexpr int kNumExtraArgs
static constexpr int kTargetIndex
static constexpr int kReceiverIndex
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static Address CppEntryOf(Builtin builtin)
Definition builtins.cc:350
static int GetFormalParameterCount(Builtin builtin)
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition builtins.cc:226
static DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register ActualArgumentsCountRegister()
static Tagged< Smi > MutableHeapNumber()
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static V8_EXPORT_PRIVATE ExternalReference address_of_pending_message(LocalIsolate *local_isolate)
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr int kMaxOsrUrgency
static constexpr int kMaxLength
uint64_t get_bits() const
Definition boxed-float.h:80
double get_scalar() const
Definition boxed-float.h:81
static constexpr int kMapOffset
static V8_INLINE constexpr bool IsValidSmi(T value)
static V8_INLINE Isolate * Current()
Definition isolate-inl.h:35
LocalIsolate * AsLocalIsolate()
Definition isolate.h:2188
LocalHeap * main_thread_local_heap()
Definition isolate.cc:7479
static const int kInitialMaxFastElementArray
Definition js-array.h:144
static const int kFieldsAdded
Definition js-objects.h:954
static constexpr MachineType AnyTagged()
void CallBuiltin(Builtin builtin, Condition cond=al)
static constexpr int OffsetOfElementAt(int index)
static constexpr int SizeFor(int length)
static const int kNoHashSentinel
static constexpr Register no_reg()
static const char * name(RootIndex root_index)
Definition roots.h:600
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int kMaxValue
Definition smi.h:101
static Tagged< TaggedIndex > FromIntptr(intptr_t value)
T * AllocateArray(size_t length)
Definition zone.h:127
IndirectHandle< FeedbackCell > object() const
IndirectHandle< FeedbackVector > object() const
IndirectHandle< HeapObject > object() const
IndirectHandle< InternalizedString > object() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
IndirectHandle< Map > object() const
IndirectHandle< Name > object() const
IndirectHandle< ScopeInfo > object() const
IndirectHandle< SharedFunctionInfo > object() const
static constexpr Register virtual_accumulator()
static const char * ToString(LiteralFlag literal_flag)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
AbortReason reason() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
InlinedAllocation::List allocation_list_
Definition maglev-ir.h:6174
AllocationType allocation_type() const
Definition maglev-ir.h:6150
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
CreateArgumentsType type() const
Definition maglev-ir.h:6233
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
interpreter::TestTypeOfFlags::LiteralFlag literal_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PushFeedbackAndArguments(MaglevAssembler *)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9703
FeedbackSlotType slot_type() const
Definition maglev-ir.h:9707
void PassFeedbackSlotInRegister(MaglevAssembler *)
CallBuiltin(uint64_t bitfield, Builtin builtin)
Definition maglev-ir.h:9684
void PushArguments(MaglevAssembler *masm, Args... extra_args)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:728
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:779
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:611
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCallApiCallbackOptimizedInline(MaglevAssembler *masm, const ProcessingState &state)
const compiler::FunctionTemplateInfoRef function_template_info_
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:684
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::SharedFunctionInfoRef shared_function_info() const
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:669
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Runtime::FunctionId function_id() const
Definition maglev-ir.h:9917
void GenerateCode(MaglevAssembler *, const ProcessingState &)
CallRuntime(uint64_t bitfield, Runtime::FunctionId function_id, ValueNode *context)
Definition maglev-ir.h:9909
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:793
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:655
CallSelf(uint64_t bitfield, int expected_parameter_count, ValueNode *closure, ValueNode *context, ValueNode *receiver, ValueNode *new_target)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:626
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:641
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:597
ConvertReceiverMode receiver_mode_
Definition maglev-ir.h:9614
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const compiler::ZoneRefSet< Map > & maps() const
Definition maglev-ir.h:6568
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
const compiler::ZoneRefSet< Map > maps_
Definition maglev-ir.h:6548
const compiler::ZoneRefSet< Map > & maps() const
Definition maglev-ir.h:6533
CheckMapsWithMigrationAndDeopt(uint64_t bitfield, const compiler::ZoneRefSet< Map > &maps, CheckType check_type)
Definition maglev-ir.h:6515
void GenerateCode(MaglevAssembler *, const ProcessingState &)
CheckMapsWithMigration(uint64_t bitfield, const compiler::ZoneRefSet< Map > &maps, CheckType check_type)
Definition maglev-ir.h:6963
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
const compiler::ZoneRefSet< Map > maps_
Definition maglev-ir.h:6990
const compiler::ZoneRefSet< Map > & maps() const
Definition maglev-ir.h:6975
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
const compiler::ZoneRefSet< Map > maps_
Definition maglev-ir.h:6507
const compiler::ZoneRefSet< Map > & maps() const
Definition maglev-ir.h:6493
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Object::Conversion mode() const
Definition maglev-ir.h:6781
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
CheckValueEqualsString(uint64_t bitfield, compiler::InternalizedStringRef value, DeoptimizeReason reason)
Definition maglev-ir.h:6681
compiler::InternalizedStringRef value() const
Definition maglev-ir.h:6693
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::HeapObjectRef value() const
Definition maglev-ir.h:6595
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
CheckedInternalizedString(uint64_t bitfield, CheckType check_type)
Definition maglev-ir.h:7227
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
CheckedObjectToIndex(uint64_t bitfield, CheckType check_type)
Definition maglev-ir.h:7256
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
TaggedToFloat64ConversionType conversion_type() const
Definition maglev-ir.h:4547
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
const compiler::HeapObjectRef object_
Definition maglev-ir.h:5299
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:897
compiler::FeedbackSource feedback() const
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:713
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:699
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9661
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const compiler::NativeContextRef native_context_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::HeapObjectRef constant_elements()
Definition maglev-ir.h:5368
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:5369
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
compiler::FeedbackCellRef feedback_cell() const
Definition maglev-ir.h:6390
compiler::SharedFunctionInfoRef shared_function_info() const
Definition maglev-ir.h:6387
compiler::ScopeInfoRef scope_info() const
Definition maglev-ir.h:6285
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:5439
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::ObjectBoilerplateDescriptionRef boilerplate_descriptor()
Definition maglev-ir.h:5436
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:6355
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:5404
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::ObjectBoilerplateDescriptionRef boilerplate_descriptor()
Definition maglev-ir.h:5475
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:5478
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
DebugBreak(uint64_t bitfield)
Definition maglev-ir.h:7178
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9303
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9167
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const InterpretedDeoptFrame & as_interpreted() const
Definition maglev-ir.h:1428
void InitializeInputLocations(Zone *zone, size_t count)
Definition maglev-ir.cc:340
DeoptInfo(Zone *zone, const DeoptFrame top_frame, compiler::FeedbackSource feedback_to_update)
Definition maglev-ir.cc:383
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
ExternalReference reference() const
Definition maglev-ir.h:5255
void GenerateCode(MaglevAssembler *, const ProcessingState &)
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:872
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::SharedFunctionInfoRef shared_function_info() const
Definition maglev-ir.h:6320
compiler::FeedbackCellRef feedback_cell() const
Definition maglev-ir.h:6323
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.h:2813
constexpr uint16_t input_count() const
Definition maglev-ir.h:2808
void GenerateCode(MaglevAssembler *, const ProcessingState &)
constexpr Operation operation() const
Definition maglev-ir.h:3311
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:892
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:4989
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:4960
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::AllocatedOperand target() const
Definition maglev-ir.h:9334
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::AllocatedOperand source() const
Definition maglev-ir.h:9333
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:570
void GenerateCode(MaglevAssembler *, const ProcessingState &)
IndirectHandle< FeedbackVector > feedback() const
Definition maglev-ir.h:5026
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9235
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::SharedFunctionInfoRef shared_function_info_
Definition maglev-ir.h:7310
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:7302
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
HasInPrototypeChain(uint64_t bitfield, compiler::HeapObjectRef prototype)
Definition maglev-ir.h:7319
compiler::HeapObjectRef prototype()
Definition maglev-ir.h:7332
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
interpreter::Register source() const
Definition maglev-ir.h:5158
const interpreter::Register source_
Definition maglev-ir.h:5169
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
InitialValue(uint64_t bitfield, interpreter::Register source)
Definition maglev-ir.cc:167
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:819
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
ValueNode * node() const
Definition maglev-ir.h:1300
constexpr Operation operation() const
Definition maglev-ir.h:3122
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:884
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const MaglevCompilationUnit & unit() const
Definition maglev-ir.h:1406
int ComputeReturnOffset(interpreter::Register result_location, int result_size) const
Definition maglev-ir.cc:412
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Jump(uint64_t bitfield, BasicBlockRef *target_refs)
interpreter::Register result_location_
Definition maglev-ir.h:1778
interpreter::Register result_location() const
Definition maglev-ir.h:1698
static bool InReturnValues(interpreter::Register reg, interpreter::Register result_location, int result_size)
Definition maglev-ir.cc:402
bool IsResultRegister(interpreter::Register reg) const
Definition maglev-ir.cc:387
const InterpretedDeoptFrame & GetFrameForExceptionHandler(const ExceptionHandlerInfo *handler_info)
Definition maglev-ir.cc:435
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
LoadFloat64(uint64_t bitfield, int offset)
Definition maglev-ir.h:7847
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:8819
TypeofMode typeof_mode() const
Definition maglev-ir.h:8820
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
compiler::NameRef name() const
Definition maglev-ir.h:8818
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
LoadInt32(uint64_t bitfield, int offset)
Definition maglev-ir.h:7901
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:8939
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::NameRef name() const
Definition maglev-ir.h:8905
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:8906
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
LoadTaggedFieldForScriptContextSlot(uint64_t bitfield, const int index)
Definition maglev-ir.h:7786
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
static constexpr RegList GetAllocatableRegisters()
void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase *node)
compiler::FeedbackVectorRef feedback() const
void PrintNodeLabel(std::ostream &os, const NodeBase *node)
static int TemporaryCount(size_t map_count)
MaybeGrowFastElements(uint64_t bitfield, ElementsKind elements_kind)
Definition maglev-ir.h:8037
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void set_temporaries_needed(uint8_t value)
Definition maglev-ir.h:2191
constexpr bool Is() const
Definition maglev-ir.h:2362
void CheckCanOverwriteWith(Opcode new_opcode, OpProperties new_properties)
constexpr Input & input(int index)
Definition maglev-ir.h:1978
constexpr int input_count() const
Definition maglev-ir.h:1973
void set_double_temporaries_needed(uint8_t value)
Definition maglev-ir.h:2196
constexpr Opcode opcode() const
Definition maglev-ir.h:1939
void GenerateCode(MaglevAssembler *, const ProcessingState &)
bool uses_require_31_bit_value() const
Definition maglev-ir.h:9514
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:576
BasicBlock * predecessor_at(int i)
Definition maglev-ir.cc:59
MergePointInterpreterFrameState *const merge_state_
Definition maglev-ir.h:9537
UseRepresentationSet same_loop_uses_repr_hint_
Definition maglev-ir.h:9534
UseRepresentationSet uses_repr_hint_
Definition maglev-ir.h:9533
const MergePointInterpreterFrameState * merge_state() const
Definition maglev-ir.h:9427
bool is_unmerged_loop_phi() const
Definition maglev-ir.cc:128
void RecordUseReprHint(UseRepresentation repr)
Definition maglev-ir.h:9458
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
bool ToBoolean(LocalIsolate *local_isolate) const
Definition maglev-ir.cc:352
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Handle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:905
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9267
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:8973
compiler::NameRef name() const
Definition maglev-ir.h:8972
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Tagged< Smi > value() const
Definition maglev-ir.h:5200
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:876
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
StoreFloat64(uint64_t bitfield, int offset)
Definition maglev-ir.h:8547
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:8849
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
compiler::NameRef name() const
Definition maglev-ir.h:8848
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:9201
StoreInt32(uint64_t bitfield, int offset)
Definition maglev-ir.h:8521
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const compiler::MapRef map_
Definition maglev-ir.h:8666
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
StoreScriptContextSlotWithWriteBarrier(uint64_t bitfield, int index)
Definition maglev-ir.h:8719
void VerifyInputs(MaglevGraphLabeller *graph_labeller) const
Definition maglev-ir.cc:807
void GenerateCode(MaglevAssembler *, const ProcessingState &)
StoreTaggedFieldNoWriteBarrier(uint64_t bitfield, int offset, StoreTaggedMode store_mode)
Definition maglev-ir.h:8584
void GenerateCode(MaglevAssembler *, const ProcessingState &)
StoreTaggedFieldWithWriteBarrier(uint64_t bitfield, int offset, StoreTaggedMode store_mode)
Definition maglev-ir.h:8675
void GenerateCode(MaglevAssembler *, const ProcessingState &)
StoreTrustedPointerFieldWithWriteBarrier(uint64_t bitfield, int offset, IndirectPointerTag tag, StoreTaggedMode store_mode)
Definition maglev-ir.h:8757
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
Switch(uint64_t bitfield, int value_base, BasicBlockRef *targets, int size)
BasicBlock * fallthrough() const
BasicBlockRef * targets() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:880
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
Tagged< TaggedIndex > value() const
Definition maglev-ir.h:5227
void GenerateCode(MaglevAssembler *, const ProcessingState &)
compiler::FeedbackSource feedback() const
Definition maglev-ir.h:4723
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
interpreter::TestTypeOfFlags::LiteralFlag literal_
Definition maglev-ir.h:4779
TestTypeOf(uint64_t bitfield, interpreter::TestTypeOfFlags::LiteralFlag literal)
Definition maglev-ir.h:4761
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ThrowReferenceErrorIfHole(uint64_t bitfield, const compiler::NameRef name)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ToBoolean(uint64_t bitfield, CheckType check_type)
Definition maglev-ir.h:4597
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ToNumberOrNumeric(uint64_t bitfield, Object::Conversion mode)
Definition maglev-ir.h:4806
Object::Conversion mode() const
Definition maglev-ir.h:4815
CheckType check_type() const
Definition maglev-ir.h:5069
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ConversionMode mode() const
Definition maglev-ir.h:5095
const ZoneVector< compiler::MapRef > & transition_sources() const
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< compiler::MapRef > transition_sources_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
TaggedToFloat64ConversionType conversion_type() const
Definition maglev-ir.h:4515
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:901
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const compiler::HeapObjectRef object_
Definition maglev-ir.h:5351
MaglevCompilationUnit *const unit_
Definition maglev-ir.h:4945
const MaglevCompilationUnit * unit() const
Definition maglev-ir.h:4933
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void DoLoadToRegister(MaglevAssembler *, OutputRegister)
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
DirectHandle< Object > DoReify(LocalIsolate *isolate) const
Definition maglev-ir.cc:888
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
TaggedToFloat64ConversionType conversion_type() const
Definition maglev-ir.h:4428
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
const compiler::InstructionOperand & operand() const
Definition maglev-ir.h:1280
compiler::InstructionOperand hint_
Definition maglev-ir.h:2706
compiler::AllocatedOperand spill_slot() const
Definition maglev-ir.h:2505
DirectHandle< Object > Reify(LocalIsolate *isolate) const
Definition maglev-ir.cc:860
void LoadToRegister(MaglevAssembler *, Register)
compiler::InstructionOperand spill_
Definition maglev-ir.h:2704
void DoLoadToRegister(MaglevAssembler *, Register)
void SetHint(compiler::InstructionOperand hint)
Definition maglev-ir.cc:466
constexpr bool use_double_register() const
Definition maglev-ir.h:2542
const compiler::InstructionOperand & hint() const
Definition maglev-ir.h:2466
void Print(std::ostream &os, const char *prefix, MaglevGraphLabeller *labeller) const
Definition maglev-ir.cc:329
compiler::MapRef map() const
Definition maglev-ir.h:5591
void PrintParams(std::ostream &, MaglevGraphLabeller *) const
static ZoneLabelRef UnsafeFromLabelPointer(Label *label)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
const MapRef map_
const ObjectRef type_
const PropertyKind kind_
const ObjectRef prototype_
Handle< String > source_
Definition compiler.cc:3791
bool is_empty
Definition sweeper.cc:229
int start
uint32_t count
DeclarationScope * scope_
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
LineAndColumn previous
Label label
JSHeapBroker * broker
int32_t offset
TNode< Context > context
TNode< Object > target
TNode< Object > receiver
std::map< const std::string, const std::string > map
const std::string property
Node * node
double second
ZoneVector< RpoNumber > & result
Builtin builtin
LiftoffRegister reg
Register tmp
const char * name_
uint32_t const mask
#define DCHECK_REGLIST_EMPTY(...)
#define CASE_REPR(repr)
#define DEF_NAME(Name)
#define TURBOLEV_NON_VALUE_NODE_LIST(V)
Definition maglev-ir.h:159
#define VALUE_NODE_LIST(V)
Definition maglev-ir.h:161
#define IEEE_754_UNARY_LIST(V)
Definition maglev-ir.h:3366
#define TURBOLEV_VALUE_NODE_LIST(V)
Definition maglev-ir.h:153
#define GENERIC_OPERATIONS_NODE_LIST(V)
Definition maglev-ir.h:76
#define CONSTANT_VALUE_NODE_LIST(V)
Definition maglev-ir.h:138
InstructionOperand source
const int length_
Definition mul-fft.cc:473
STL namespace.
int int32_t
Definition unicode.cc:40
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
void * Allocate(void *address, size_t size, OS::MemoryPermission access)
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
bool AnyMapIsHeapNumber(const ZoneRefSet< Map > &maps)
Definition heap-refs.h:1303
ApiCallbackExitFrameConstants FC
Definition frames.cc:1270
FunctionCallbackArguments FCA
Definition frames.cc:1271
void DefineAsRegister(Node *node)
constexpr Condition ConditionFor(Operation operation)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
const char * OpcodeToString(Opcode opcode)
Definition maglev-ir.cc:52
Condition ToCondition(AssertCondition cond)
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
Register ToRegister(const compiler::InstructionOperand &operand)
void DefineAsFixed(Node *node, Register reg)
void CheckValueInputIs(const NodeBase *node, int i, ValueRepresentation expected, MaglevGraphLabeller *graph_labeller)
Definition maglev-ir.cc:532
void UseAndClobberRegister(Input &input)
constexpr Condition ConditionForFloat64(Operation operation)
void DefineAsConstant(Node *node)
auto RepeatValue(T val, int count)
void UseAny(Input &input)
static constexpr int kNoVreg
bool FromConstantToBool(LocalIsolate *local_isolate, ValueNode *node)
Definition maglev-ir.cc:364
constexpr bool IsDoubleRepresentation(ValueRepresentation repr)
Definition maglev-ir.h:601
constexpr Condition UnsignedConditionFor(Operation operation)
constexpr bool IsSimpleFieldStore(Opcode opcode)
Definition maglev-ir.h:547
ValueRepresentation ToValueRepresentation(MachineType type)
Definition maglev-ir.cc:516
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
constexpr bool IsElementsArrayWrite(Opcode opcode)
Definition maglev-ir.h:559
constexpr Register no_reg
const uint32_t kStringEncodingMask
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
const uint32_t kTwoByteStringTag
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kJavaScriptCallTargetRegister
const uint32_t kThinStringTagBit
char const * DeoptimizeReasonToString(DeoptimizeReason reason)
constexpr Register kJavaScriptCallArgCountRegister
@ SLOW_STRING_WRAPPER_ELEMENTS
@ FAST_STRING_WRAPPER_ELEMENTS
const uint32_t kStringTag
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
static constexpr RegList kAllocatableGeneralRegisters
Definition reglist.h:36
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
std::ostream & operator<<(std::ostream &os, AtomicMemoryOrder order)
MemOperand FieldMemOperand(Register object, int offset)
bool IsSmiOrObjectElementsKind(ElementsKind kind)
constexpr int kSystemPointerSize
Definition globals.h:410
const char * LanguageMode2String(LanguageMode mode)
Definition globals.h:759
bool IsSignedIntTypedArrayElementsKind(ElementsKind kind)
constexpr Register kReturnRegister1
const char * GetAbortReason(AbortReason reason)
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
constexpr Register kScratchRegister
Condition NegateCondition(Condition cond)
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kSmiValueSize
bool IsFloatTypedArrayElementsKind(ElementsKind kind)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
const uint32_t kInternalizedTag
constexpr Register kJavaScriptCallCodeStartRegister
bool IsUnsignedIntTypedArrayElementsKind(ElementsKind kind)
@ kExternalInt8Array
Definition globals.h:2453
const uint32_t kIsNotInternalizedMask
return value
Definition map-inl.h:893
V8_EXPORT_PRIVATE constexpr int ElementSizeInBytes(MachineRepresentation)
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
const uint32_t kIsNotStringMask
constexpr bool IsDoubleElementsKind(ElementsKind kind)
constexpr Register kCArgRegs[]
constexpr int ElementsKindToByteSize(ElementsKind elements_kind)
constexpr Register kJavaScriptCallNewTargetRegister
template const char * string
MemOperand ExitFrameStackSlotOperand(int offset)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Local< T > Handle
Operation
Definition operation.h:43
#define OPERATION_LIST(V)
Definition operation.h:38
Node * node_
#define READ_ONLY_ROOT_LIST(V)
Definition roots.h:468
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
IndirectHandle< FeedbackVector > vector
const NodeInfo * TryGetInfoFor(ValueNode *node) const
#define OFFSET_OF_DATA_START(Type)
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_TARGET_BIG_ENDIAN_BOOL
Definition v8config.h:1008
wasm::ValueType type