v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
turbolev-graph-builder.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8#include <memory>
9#include <optional>
10#include <type_traits>
11
12#include "src/base/logging.h"
14#include "src/base/vector.h"
17#include "src/common/globals.h"
41#include "src/handles/handles.h"
58#include "src/objects/map.h"
59#include "src/objects/objects.h"
62
64
66
67namespace {
68
69MachineType MachineTypeFor(maglev::ValueRepresentation repr) {
70 switch (repr) {
74 return MachineType::Int32();
76 return MachineType::Uint32();
78 return MachineType::IntPtr();
80 return MachineType::Float64();
83 }
84}
85
86} // namespace
87
88// This reducer tracks the Maglev origin of the Turboshaft blocks that we build
89// during the translation. This is then used when reordering Phi inputs.
90template <class Next>
91class BlockOriginTrackingReducer : public Next {
92 public:
93 TURBOSHAFT_REDUCER_BOILERPLATE(BlockOriginTracking)
94 void SetMaglevInputBlock(const maglev::BasicBlock* block) {
96 }
100 void Bind(Block* block) {
101 Next::Bind(block);
102 // The 1st block we bind doesn't exist in Maglev and is meant to hold
103 // Constants (which in Maglev are not in any block), and thus
104 // {maglev_input_block_} should still be nullptr. In all other cases,
105 // {maglev_input_block_} should not be nullptr.
107 block == &__ output_graph().StartBlock());
109 }
110
113 return turboshaft_block_origins_[block->index()];
114 }
115
116 private:
120};
121
123 // A document explaning how generators are handled by the translation can be
124 // found here:
125 //
126 // https://docs.google.com/document/d/1-iFoVuvpIEjA9dtSsOjmKL5vAzzvf0cKI6f4zaObiV8/edit?usp=sharing
127 //
128 //
129 // Because of generator resumes, Maglev graphs can have edges that bypass loop
130 // headers. This actually happens everytime a loop contains a `yield`.
131 // In Turboshaft, however, the loop header must always dominate every block in
132 // the loop, and thus does not allow such edges that bypass the loop header.
133 // For instance,
134 //
135 // function* foo() {
136 // for (let i = 0; i < 10; i++) {
137 // if (i % 2 == 0) {
138 // yield i;
139 // }
140 // }
141 // }
142 //
143 // The corresponding Maglev graph will look something like (this is a little
144 // bit simplified since details don't matter much for this high level
145 // explanation; the drawing in FindLoopHeaderBypasses below gives a more
146 // precise view of what the Maglev graph looks like):
147 //
148 // + 1 ------+
149 // | Switch |
150 // +---------+
151 // / \
152 // / \ |----------------------|
153 // / \ | |
154 // / v v |
155 // / + 2 --------+ |
156 // / | Loop | |
157 // | +-----------+ |
158 // | | |
159 // | | |
160 // v v |
161 // + 4 ------+ + 3 --------------+ |
162 // | Resume | | Branch(i%2==0) | |
163 // +---------+ +-----------------+ |
164 // | / \ |
165 // | / \ |
166 // | / \ |
167 // | + 5 -------+ | |
168 // | | yield i | | |
169 // | +----------+ | |
170 // | | |
171 // |----------------------------| | |
172 // | | |
173 // v v |
174 // + 6 ----------+ |
175 // | i++ | |
176 // | backedge | |
177 // +-------------+ |
178 // | |
179 // |-------------------|
180 //
181 // In this graph, the edge from block 4 to block 6 bypasses the loop header.
182 //
183 //
184 // Note that it's even possible that the graph contains no forward path from
185 // the loop header to the backedge. This happens for instance when the loop
186 // body always unconditionally yields. In such cases, the backedge is always
187 // reached through the main resume switch. For instance:
188 //
189 // function* foo() {
190 // for (let i = 0; i < 10; i++) {
191 // yield i;
192 // }
193 // }
194 //
195 // Will produce the following graph:
196 //
197 // + 1 ------+
198 // | Switch |
199 // +---------+
200 // / \
201 // / \ |-------------|
202 // / \ | |
203 // / v v |
204 // / + 2 --------+ |
205 // / | Loop | |
206 // | +-----------+ |
207 // | | |
208 // | | |
209 // v v |
210 // + 4 ------+ + 3 -------+ |
211 // | Resume | | yield i | |
212 // +---------+ +----------+ |
213 // | |
214 // | |
215 // |----------------------------------------|
216 //
217 //
218 // GeneratorAnalyzer finds the loop in the Maglev graph, and finds the
219 // generator resume edges that bypass loops headers. The GraphBuilder then
220 // uses this information to re-route such edges to loop headers and insert
221 // secondary switches. For instance, the graph right above will be transformed
222 // to something like this:
223 //
224 // + 1 ------+
225 // | Switch |
226 // +---------+
227 // | |
228 // | |
229 // v v
230 // + 2 --------+
231 // | p1 = phi |
232 // +-----------+
233 // |
234 // | |-----------------------|
235 // | | |
236 // v v |
237 // + 3 -----------------+ |
238 // | Loop | |
239 // | p2 = phi(p1,...) | |
240 // +--------------------+ |
241 // | |
242 // | |
243 // v |
244 // + 4 -----------+ |
245 // | Switch(p2) | |
246 // +--------------+ |
247 // / \ |
248 // / \ |
249 // / \ |
250 // v v |
251 // + 5 --------+ + 6 --------+ |
252 // | Resume | | yield i | |
253 // +-----------+ +-----------+ |
254 // | |
255 // | |
256 // |-------------------------------------|
257
258 public:
266
267 void Analyze(maglev::Graph* graph) {
268 for (auto it = graph->rbegin(); it != graph->rend(); ++it) {
269 if ((*it)->is_loop()) {
270 FindLoopBody(it);
271 }
272 }
273
275 }
276
278 return block_to_innermost_bypassed_header_.contains(target);
279 }
280
286
288 DCHECK(header->is_loop());
289 return bypassed_headers_.contains(header);
290 }
291
293 if (block_to_header_.contains(node)) {
294 return block_to_header_[node];
295 }
296 return nullptr;
297 }
298
299 bool has_header_bypasses() const { return !bypassed_headers_.empty(); }
300
301 private:
302 // We consider that every block in between the loop header and the backedge
303 // belongs to the loop. This is a little bit more conservative than necessary
304 // and might include blocks that in fact cannot reach the backedge, but it
305 // makes dealing with exception blocks easier (because they have no explicit
306 // predecessors in Maglev).
308 const maglev::BasicBlock* header = *it;
309 DCHECK(header->is_loop());
310
311 --it; // Skipping the header, since we consider its loop header to be the
312 // header of their outer loop (if any).
313
314 const maglev::BasicBlock* backedge_block = header->backedge_predecessor();
315 if (backedge_block == header) {
316 // This is a 1-block loop. Since headers are part of the outer loop, we
317 // have nothing to mark.
318 return;
319 }
320
321 block_to_header_[backedge_block] = header;
322
323 for (; *it != backedge_block; --it) {
324 const maglev::BasicBlock* curr = *it;
325 if (block_to_header_.contains(curr)) {
326 // {curr} is part of an inner loop.
327 continue;
328 }
329 block_to_header_[curr] = header;
330 }
331 }
332
334 // As mentioned earlier, Maglev graphs for resumable generator functions
335 // always start with a main dispatch switch in the 3rd block:
336 //
337 //
338 // + 1 -----------------+
339 // | InitialValues... |
340 // | Jump |
341 // +--------------------+
342 // |
343 // |
344 // v
345 // + 2 --------------------+
346 // | BranchIfRootConstant |
347 // +-----------------------+
348 // / \
349 // / \
350 // / \
351 // / \
352 // v v
353 // + 3 ----------+ + 4 --------------+
354 // | Load state | | Initial setup |
355 // | Switch | | return |
356 // +-------------+ +-----------------+
357 // / | \
358 // / | \
359 // v v v
360 // Resuming in various places
361 //
362 //
363 //
364 // In order to find loop header bypasses, we are looking for cases where
365 // the destination of the dispatch switch (= the successors of block 3) are
366 // inside a loop.
367
368 constexpr int kGeneratorSwitchBLockIndex = 2;
369 maglev::BasicBlock* generator_switch_block =
370 graph->blocks()[kGeneratorSwitchBLockIndex];
371 DCHECK(generator_switch_block->control_node()->Is<maglev::Switch>());
372
373 for (maglev::BasicBlock* target : generator_switch_block->successors()) {
374 const maglev::BasicBlock* innermost_header = GetLoopHeader(target);
375
376 if (innermost_header) {
377 // This case bypasses a loop header.
378 RecordHeadersForBypass(target, innermost_header);
379 }
380 }
381 }
382
384 const maglev::BasicBlock* innermost_header) {
385 block_to_innermost_bypassed_header_[initial_target] = innermost_header;
386 bypassed_headers_.insert(innermost_header);
387
388 for (const maglev::BasicBlock* outer_header =
389 GetLoopHeader(innermost_header);
390 outer_header; outer_header = GetLoopHeader(outer_header)) {
391 bypassed_headers_.insert(outer_header);
392 }
393 }
394
396
397 // Map from blocks inside loops to the header of said loops.
400
401 // Map from jump target to the innermost header they bypass.
402 std::unordered_map<const maglev::BasicBlock*, const maglev::BasicBlock*>
404 // Set of headers that are bypassed because of generator resumes.
405 std::unordered_set<const maglev::BasicBlock*> bypassed_headers_;
406
407 // {visit_queue_} is used in FindLoopBody to store nodes that still need to be
408 // visited. It is an instance variable in order to reuse its memory more
409 // efficiently.
411};
412
413#define GET_FRAME_STATE_MAYBE_ABORT(name, deopt_info) \
414 V<FrameState> name; \
415 { \
416 OptionalV<FrameState> _maybe_frame_state = BuildFrameState(deopt_info); \
417 if (!_maybe_frame_state.has_value()) { \
418 DCHECK(bailout_->has_value()); \
419 return maglev::ProcessResult::kAbort; \
420 } \
421 name = _maybe_frame_state.value(); \
422 }
423
424constexpr bool TooManyArgumentsForCall(size_t arguments_count) {
425 constexpr int kCalleeCount = 1;
426 constexpr int kFrameStateCount = 1;
427 return (arguments_count + kCalleeCount + kFrameStateCount) >
428 std::numeric_limits<decltype(Operation::input_count)>::max();
429}
430
431#define BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(count) \
432 { \
433 if (TooManyArgumentsForCall(count)) { \
434 *bailout_ = BailoutReason::kTooManyArguments; \
435 return maglev::ProcessResult::kAbort; \
436 } \
437 }
438
439#define GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state, arguments, \
440 ...) \
441 BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(arguments.size()); \
442 SetMap(node, GenerateBuiltinCall(node, builtin, frame_state, arguments, \
443 ##__VA_ARGS__));
444
445// Turboshaft's MachineOptimizationReducer will sometimes detect that the
446// condition for a DeoptimizeIf is always true, and replace it with an
447// unconditional Deoptimize. When this happens, the assembler doesn't emit
448// anything until the next reachable block is bound, which can lead to some
449// Variable or OpIndex being Invalid, which can break some assumptions. To avoid
450// this, the RETURN_IF_UNREACHABLE macro can be used to early-return.
451#define RETURN_IF_UNREACHABLE() \
452 if (__ generating_unreachable_operations()) { \
453 return maglev::ProcessResult::kContinue; \
454 }
455
456// TODO(dmercadier): LazyDeoptOnThrow is currently not very cleanly dealt with.
457// In Maglev, it is a property of the ExceptionHandlerInfo, which is use by all
458// throwing nodes and is created in a single place
459// (MaglevGraphBuilder::AttachExceptionHandlerInfo). However, during the
460// translation, we create different kind of calls from different places (Call,
461// CallBuiltin_XXX, CallRuntime_XXX), and non-call nodes can also
462// LazyDeoptOnThrow (such as GenericBinop) and we always have to manually
463// remember to pass ShouldLazyDeoptOnThrow, which is easy to forget, which can
464// then easily lead to bugs. A few ideas come to mind:
465//
466// - Make ShouldLazyDeoptOnThrow non-optional on all throwing nodes. This is a
467// bit verbose, but at least we won't forget it.
468//
469// - Make ThrowingScope automatically annotate all throwing nodes that are
470// emitted while the scope is active. The Assembler would be doing most of
471// the work: it would have a "LazyDeoptOnThrowScope" or something similar,
472// and any throwing node emitted during this scope would have the
473// LazyDeoptOnThrow property added as needed. All throwing nodes have a
474// {lazy_deopt_on_throw} field defined by THROWING_OP_BOILERPLATE (except
475// calls, but we could add it), so it shouldn't be very hard for the
476// Assembler to deal with this in a unified way.
477// The downside of this approach is that the interaction between this and
478// {current_catch_block} (in particular with nested scopes) might introduce
479// even more complexity and magic in the assembler.
480
482 public:
487
489 PipelineData* data, Graph& graph, Zone* temp_zone,
490 maglev::MaglevCompilationUnit* maglev_compilation_unit,
491 std::optional<BailoutReason>* bailout)
492 : data_(data),
494 assembler_(data, graph, graph, temp_zone),
495 maglev_compilation_unit_(maglev_compilation_unit),
502 maglev_compilation_unit_->graph_labeller()),
503 bailout_(bailout) {}
504
506 for (maglev::BasicBlock* block : *graph) {
508 block->is_loop() ? __ NewLoopHeader() : __ NewBlock();
509 }
510 // Constants are not in a block in Maglev but are in Turboshaft. We bind a
511 // block now, so that Constants can then be emitted.
512 __ Bind(__ NewBlock());
513
514 // Initializing undefined constant so that we don't need to recreate it too
515 // often.
516 undefined_value_ = __ HeapConstant(local_factory_->undefined_value());
517
520 .is_valid()) {
521 // The Maglev graph might contain a RegisterInput for
522 // kJavaScriptCallNewTargetRegister later in the graph, which in
523 // Turboshaft is represented as a Parameter. We create this Parameter
524 // here, because the Instruction Selector tends to be unhappy when
525 // Parameters are defined late in the graph.
526 int new_target_index = Linkage::GetJSCallNewTargetParamIndex(
528 new_target_param_ = __ Parameter(
529 new_target_index, RegisterRepresentation::Tagged(), "%new.target");
530 }
531
532 if (graph->has_resumable_generator()) {
534
535 dummy_object_input_ = __ SmiZeroConstant();
536 dummy_word32_input_ = __ Word32Constant(0);
537 dummy_float64_input_ = __ Float64Constant(0);
538
542 __ NewLoopInvariantVariable(RegisterRepresentation::Tagged());
543 __ SetVariable(generator_context_, __ NoContextConstant());
544 }
545
546 // Maglev nodes often don't have the NativeContext as input, but instead
547 // rely on the MaglevAssembler to provide it during code generation, unlike
548 // Turboshaft nodes, which need the NativeContext as an explicit input if
549 // they use it. We thus emit a single NativeContext constant here, which we
550 // reuse later to construct Turboshaft nodes.
552 __ HeapConstant(broker_->target_native_context().object());
553 }
554
556 // It can happen that some Maglev loops don't actually loop (the backedge
557 // isn't actually reachable). We can't know this when emitting the header in
558 // Turboshaft, which means that we still emit the header, but then we never
559 // come around to calling FixLoopPhis on it. So, once we've generated the
560 // whole Turboshaft graph, we go over all loop headers, and if some turn out
561 // to not be headers, we turn them into regular merge blocks (and patch
562 // their PendingLoopPhis).
563 for (Block& block : __ output_graph().blocks()) {
564 if (block.IsLoop() && block.PredecessorCount() == 1) {
565 __ output_graph().TurnLoopIntoMerge(&block);
566 }
567 }
568 }
569
570 // The Maglev graph for resumable generator functions always has the main
571 // dispatch Switch in the same block.
573 const maglev::BasicBlock* maglev_block) {
574 if (!generator_analyzer_.has_header_bypasses()) return false;
575 constexpr int kMainSwitchBlockId = 2;
576 bool is_main_switch_block = maglev_block->id() == kMainSwitchBlockId;
577 DCHECK_IMPLIES(is_main_switch_block,
578 maglev_block->control_node()->Is<maglev::Switch>());
579 return is_main_switch_block;
580 }
581
584 maglev::BasicBlock* maglev_block) {
585 // Note that it's important to call SetMaglevInputBlock before calling Bind,
586 // so that BlockOriginTrackingReducer::Bind records the correct predecessor
587 // for the current block.
588 __ SetMaglevInputBlock(maglev_block);
589
592
593 Block* turboshaft_block = Map(maglev_block);
594
595 if (__ current_block() != nullptr) {
596 // The first block for Constants doesn't end with a Jump, so we add one
597 // now.
598 __ Goto(turboshaft_block);
599 }
600
601#ifdef DEBUG
605#endif
606
607 if (maglev_block->is_loop() &&
608 (loop_single_edge_predecessors_.contains(maglev_block) ||
609 pre_loop_generator_blocks_.contains(maglev_block))) {
610 EmitLoopSinglePredecessorBlock(maglev_block);
611 }
612
613 if (maglev_block->is_exception_handler_block()) {
614 StartExceptionBlock(maglev_block);
616 }
617
618 // SetMaglevInputBlock should have been called before calling Bind, and the
619 // current `maglev_input_block` should thus already be `maglev_block`.
620 DCHECK_EQ(__ maglev_input_block(), maglev_block);
621 if (!__ Bind(turboshaft_block)) {
622 // The current block is not reachable.
624 }
625
626 if (maglev_block->is_loop()) {
627 // The "permutation" stuff that comes afterwards in this function doesn't
628 // apply to loops, since loops always have 2 predecessors in Turboshaft,
629 // and in both Turboshaft and Maglev, the backedge is always the last
630 // predecessors, so we never need to reorder phi inputs.
632 } else if (maglev_block->is_exception_handler_block()) {
633 // We need to emit the CatchBlockBegin at the begining of this block. Note
634 // that if this block has multiple predecessors (because multiple throwing
635 // operations are caught by the same catch handler), then edge splitting
636 // will have already created CatchBlockBegin operations in the
637 // predecessors, and calling `__ CatchBlockBegin` now will actually only
638 // emit a Phi of the CatchBlockBegin of the predecessors (which is exactly
639 // what we want). See the comment above CatchBlockBegin in
640 // TurboshaftAssemblerOpInterface.
641 catch_block_begin_ = __ CatchBlockBegin();
642 }
643
644 // Because of edge splitting in Maglev (which happens on Bind rather than on
645 // Goto), predecessors in the Maglev graph are not always ordered by their
646 // position in the graph (ie, block 4 could be the second predecessor and
647 // block 5 the first one). However, since we're processing the graph "in
648 // order" (because that's how the maglev GraphProcessor works), predecessors
649 // in the Turboshaft graph will be ordered by their position in the graph.
650 // Additionally, optimizations during the translation (like constant folding
651 // by MachineOptimizationReducer) could change control flow and remove
652 // predecessors (by changing a Branch into a Goto for instance).
653 // We thus compute in {predecessor_permutation_} a map from Maglev
654 // predecessor index to Turboshaft predecessor index, and we'll use this
655 // later when emitting Phis to reorder their inputs.
657 if (maglev_block->has_phi() &&
658 // We ignore this for exception phis since they have no inputs in Maglev
659 // anyways, and in Turboshaft we rely on {regs_to_vars_} to populate
660 // their inputs (and also, Maglev exception blocks have no
661 // predecessors).
662 !maglev_block->is_exception_handler_block()) {
663 ComputePredecessorPermutations(maglev_block, turboshaft_block, false,
664 false);
665 }
667 }
668
670 Block* turboshaft_block,
671 bool skip_backedge,
672 bool ignore_last_predecessor) {
673 // This function is only called for loops that need a "single block
674 // predecessor" (from EmitLoopSinglePredecessorBlock). The backedge should
675 // always be skipped in thus cases. Additionally, this means that when
676 // even when {maglev_block} is a loop, {turboshaft_block} shouldn't and
677 // should instead be the new single forward predecessor of the loop.
678 DCHECK_EQ(skip_backedge, maglev_block->is_loop());
679 DCHECK(!turboshaft_block->IsLoop());
680
681 DCHECK(maglev_block->has_phi());
682 DCHECK(turboshaft_block->IsBound());
683 DCHECK_EQ(__ current_block(), turboshaft_block);
684
685 // Collecting the Maglev predecessors.
687 maglev_predecessors.resize(maglev_block->predecessor_count());
688 for (int i = 0; i < maglev_block->predecessor_count() - skip_backedge;
689 ++i) {
690 maglev_predecessors[i] = maglev_block->predecessor_at(i);
691 }
692
696 int index = turboshaft_block->PredecessorCount() - 1;
697 // Iterating predecessors from the end (because it's simpler and more
698 // efficient in Turboshaft).
699 for (const Block* pred : turboshaft_block->PredecessorsIterable()) {
700 if (ignore_last_predecessor &&
701 index == turboshaft_block->PredecessorCount() - 1) {
702 // When generator resumes bypass loop headers, we add an additional
703 // predecessor to the header's predecessor (called {pred_for_generator}
704 // in EmitLoopSinglePredecessorBlock). This block doesn't have Maglev
705 // origin, we thus have to skip it here. To compensate,
706 // MakePhiMaybePermuteInputs will take an additional input for these
707 // cases.
708 index--;
709 continue;
710 }
711 // Finding out to which Maglev predecessor {pred} corresponds.
712 const maglev::BasicBlock* orig = __ GetMaglevOrigin(pred);
713 auto orig_index = *base::index_of(maglev_predecessors, orig);
714
715 predecessor_permutation_[orig_index] = index;
716 index--;
717 }
718 DCHECK_EQ(index, -1);
719 }
720
721 // Exceptions Phis are a bit special in Maglev: they have no predecessors, and
722 // get populated on Throw based on values in the FrameState, which can be raw
723 // Int32/Float64. However, they are always Tagged, which means that retagging
724 // happens when they are populated. This can lead to exception Phis having a
725 // mix of tagged and untagged predecessors (the latter would be automatically
726 // retagged). When this happens, we need to manually retag all of the
727 // predecessors of the exception Phis. To do so:
728 //
729 // - If {block} has a single predecessor, it means that it won't have
730 // exception "phis" per se, but just values that have to retag.
731 //
732 // - If {block} has multiple predecessors, then we need to do the retagging
733 // in the predecessors. It's a bit annoying because we've already bound
734 // and finalized all of the predecessors by now. So, we create new
735 // predecessor blocks in which we insert the taggings, patch the old
736 // predecessors to point to the new ones, and update the predecessors of
737 // {block}.
738 void StartExceptionBlock(maglev::BasicBlock* maglev_catch_handler) {
739 Block* turboshaft_catch_handler = Map(maglev_catch_handler);
740 if (turboshaft_catch_handler->PredecessorCount() == 0) {
741 // Some Assembler optimizations made this catch handler not be actually
742 // reachable.
743 return;
744 }
745 if (turboshaft_catch_handler->PredecessorCount() == 1) {
746 StartSinglePredecessorExceptionBlock(maglev_catch_handler,
747 turboshaft_catch_handler);
748 } else {
749 StartMultiPredecessorExceptionBlock(maglev_catch_handler,
750 turboshaft_catch_handler);
751 }
752 }
754 maglev::BasicBlock* maglev_catch_handler,
755 Block* turboshaft_catch_handler) {
756 if (!__ Bind(turboshaft_catch_handler)) return;
757 catch_block_begin_ = __ CatchBlockBegin();
758 if (!maglev_catch_handler->has_phi()) return;
759 InsertTaggingForPhis(maglev_catch_handler);
760 }
761 // InsertTaggingForPhis makes sure that all of the inputs of the exception
762 // phis of {maglev_catch_handler} are tagged. If some aren't tagged, it
763 // inserts a tagging node in the current block and updates the corresponding
764 // Variable.
765 void InsertTaggingForPhis(maglev::BasicBlock* maglev_catch_handler) {
766 DCHECK(maglev_catch_handler->has_phi());
767
768 IterCatchHandlerPhis(maglev_catch_handler, [&](interpreter::Register owner,
769 Variable var) {
771 V<Any> ts_idx = __ GetVariable(var);
772 DCHECK(maglev_representations_.contains(ts_idx));
773 switch (maglev_representations_[ts_idx]) {
775 // Already tagged, nothing to do.
776 break;
778 __ SetVariable(var, __ ConvertInt32ToNumber(V<Word32>::Cast(ts_idx)));
779 break;
781 __ SetVariable(var,
782 __ ConvertUint32ToNumber(V<Word32>::Cast(ts_idx)));
783 break;
785 __ SetVariable(
786 var,
788 V<Float64>::Cast(ts_idx),
790 break;
792 __ SetVariable(
795 ConversionMode::kCanonicalizeSmi));
796 break;
798 __ SetVariable(var,
799 __ ConvertIntPtrToNumber(V<WordPtr>::Cast(ts_idx)));
800 }
801 });
802 }
804 maglev::BasicBlock* maglev_catch_handler,
805 Block* turboshaft_catch_handler) {
806 if (!maglev_catch_handler->has_phi()) {
807 // The very simple case: the catch handler didn't have any Phis, we don't
808 // have to do anything complex.
809 if (!__ Bind(turboshaft_catch_handler)) return;
810 catch_block_begin_ = __ CatchBlockBegin();
811 return;
812 }
813
814 // Inserting the tagging in all of the predecessors.
815 auto predecessors = turboshaft_catch_handler->Predecessors();
816 turboshaft_catch_handler->ResetAllPredecessors();
817 base::SmallVector<V<Object>, 16> catch_block_begins;
818 for (Block* predecessor : predecessors) {
819 // Recording the CatchBlockBegin of this predecessor.
820 V<Object> catch_begin = predecessor->begin();
821 DCHECK(Asm().Get(catch_begin).template Is<CatchBlockBeginOp>());
822 catch_block_begins.push_back(catch_begin);
823
824 TagExceptionPhiInputsForBlock(predecessor, maglev_catch_handler,
825 turboshaft_catch_handler);
826 }
827
828 // Finally binding the catch handler.
829 __ Bind(turboshaft_catch_handler);
830
831 // We now need to insert a Phi for the CatchBlockBegins of the
832 // predecessors (usually, we would just call `__ CatchBlockbegin`, which
833 // takes care of creating a Phi node if necessary, but this won't work here,
834 // because this mechanisms expects the CatchBlockBegin to be the 1st
835 // instruction of the predecessors, and it isn't the case since the
836 // predecessors are now the blocks with the tagging).
837 catch_block_begin_ = __ Phi(base::VectorOf(catch_block_begins));
838 }
840 maglev::BasicBlock* maglev_catch_handler,
841 Block* turboshaft_catch_handler) {
842 DCHECK(maglev_catch_handler->has_phi());
843
844 // We start by patching in-place the predecessors final Goto of {old_block}
845 // to jump to a new block (in which we'll insert the tagging).
846 Block* new_block = __ NewBlock();
847 const GotoOp& old_goto =
848 old_block->LastOperation(__ output_graph()).Cast<GotoOp>();
849 DCHECK_EQ(old_goto.destination, turboshaft_catch_handler);
850 __ output_graph().Replace<GotoOp>(__ output_graph().Index(old_goto),
851 new_block, /* is_backedge */ false);
852 __ AddPredecessor(old_block, new_block, false);
853
854 // Now, we bind the new block and insert the taggings
855 __ BindReachable(new_block);
856 InsertTaggingForPhis(maglev_catch_handler);
857
858 // Finally, we just go from this block to the catch handler.
859 __ Goto(turboshaft_catch_handler);
860 }
861
863 DCHECK(maglev_loop_header->is_loop());
864
865 bool has_special_generator_handling = false;
866 V<Word32> switch_var_first_input;
867 if (pre_loop_generator_blocks_.contains(maglev_loop_header)) {
868 // This loop header used to be bypassed by generator resume edges. It will
869 // now act as a secondary switch for the generator resumes.
870 std::vector<GeneratorSplitEdge>& generator_preds =
871 pre_loop_generator_blocks_[maglev_loop_header];
872 // {generator_preds} contains all of the edges that were bypassing this
873 // loop header. Rather than adding that many predecessors to the loop
874 // header, will create a single predecessor, {pred_for_generator}, to
875 // which all of the edges of {generator_preds} will go.
876 Block* pred_for_generator = __ NewBlock();
877
878 for (GeneratorSplitEdge pred : generator_preds) {
879 __ Bind(pred.pre_loop_dst);
880 __ SetVariable(header_switch_input_,
881 __ Word32Constant(pred.switch_value));
882 __ Goto(pred_for_generator);
883 }
884
885 __ Bind(pred_for_generator);
886 switch_var_first_input = __ GetVariable(header_switch_input_);
887 DCHECK(switch_var_first_input.valid());
888
889 BuildJump(maglev_loop_header);
890
891 has_special_generator_handling = true;
893 }
894
895 DCHECK(loop_single_edge_predecessors_.contains(maglev_loop_header));
896 Block* loop_pred = loop_single_edge_predecessors_[maglev_loop_header];
897 __ Bind(loop_pred);
898
899 if (maglev_loop_header->has_phi()) {
900 ComputePredecessorPermutations(maglev_loop_header, loop_pred, true,
901 has_special_generator_handling);
902
903 // Now we need to emit Phis (one per loop phi in {block}, which should
904 // contain the same input except for the backedge).
907 for (maglev::Phi* phi : *maglev_loop_header->phis()) {
908 constexpr int kSkipBackedge = 1;
909 int input_count = phi->input_count() - kSkipBackedge;
910
911 if (has_special_generator_handling) {
912 // Adding an input to the Phis to account for the additional
913 // generator-related predecessor.
914 V<Any> additional_input;
915 switch (phi->value_representation()) {
917 additional_input = dummy_object_input_;
918 break;
921 additional_input = dummy_word32_input_;
922 break;
925 additional_input = dummy_float64_input_;
926 break;
928 // Maglev doesn't have IntPtr Phis.
929 UNREACHABLE();
930 }
931 loop_phis_first_input_.push_back(
932 MakePhiMaybePermuteInputs(phi, input_count, additional_input));
933 } else {
934 loop_phis_first_input_.push_back(
935 MakePhiMaybePermuteInputs(phi, input_count));
936 }
937 }
938 }
939
940 if (has_special_generator_handling) {
941 // We now emit the Phi that will be used in the loop's main switch.
943 constexpr int kSkipGeneratorPredecessor = 1;
944
945 // We insert a default input for all of the non-generator predecessor.
946 int input_count_without_generator =
947 loop_pred->PredecessorCount() - kSkipGeneratorPredecessor;
949 inputs.insert(inputs.begin(), input_count_without_generator,
951
952 // And we insert the "true" input for the generator predecessor (which is
953 // {pred_for_generator} above).
954 DCHECK(switch_var_first_input.valid());
955 inputs.push_back(switch_var_first_input);
956
957 __ SetVariable(
960 }
961
962 // Actually jumping to the loop.
963 __ Goto(Map(maglev_loop_header));
964 }
965
967 // Loop headers that are bypassed because of generators need to be turned
968 // into secondary generator switches (so as to not be bypassed anymore).
969 // Concretely, we split the loop headers in half by inserting a Switch right
970 // after the loop phis have been emitted. Here is a visual representation of
971 // what's happening:
972 //
973 // Before:
974 //
975 // | ----------------------------
976 // | | |
977 // | | |
978 // v v |
979 // +------------------------+ |
980 // | phi_1(...) | |
981 // | ... | |
982 // | phi_k(...) | |
983 // | <some op 1> | |
984 // | ... | |
985 // | <some op n> | |
986 // | Branch | |
987 // +------------------------+ |
988 // | |
989 // | |
990 // v |
991 //
992 //
993 // After:
994 //
995 //
996 // | -----------------------------------
997 // | | |
998 // | | |
999 // v v |
1000 // +------------------------+ |
1001 // | phi_1(...) | |
1002 // | ... | |
1003 // | phi_k(...) | |
1004 // | Switch | |
1005 // +------------------------+ |
1006 // / | | \ |
1007 // / | | \ |
1008 // / | | \ |
1009 // v v v v |
1010 // +------------------+ |
1011 // | <some op 1> | |
1012 // | ... | |
1013 // | <some op n> | |
1014 // | Branch | |
1015 // +------------------+ |
1016 // | |
1017 // | |
1018 // v |
1019 //
1020 //
1021 // Since `PostPhiProcessing` is called right after all phis have been
1022 // emitted, now is thus the time to split the loop header.
1023
1025 const maglev::BasicBlock* maglev_loop_header = __ maglev_input_block();
1026 DCHECK(maglev_loop_header->is_loop());
1027 std::vector<GeneratorSplitEdge>& generator_preds =
1028 pre_loop_generator_blocks_[maglev_loop_header];
1029
1031 __ output_graph().graph_zone()
1032 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
1033 generator_preds.size());
1034
1035 for (int i = 0; static_cast<unsigned int>(i) < generator_preds.size();
1036 i++) {
1037 GeneratorSplitEdge pred = generator_preds[i];
1038 cases[i] = {pred.switch_value, pred.inside_loop_target,
1040 }
1041 Block* default_block = __ NewBlock();
1042 __ Switch(__ GetVariable(header_switch_input_),
1043 base::VectorOf(cases, generator_preds.size()), default_block);
1044
1045 // We now bind {default_block}. It will contain the rest of the loop
1046 // header. The MaglevGraphProcessor will continue to visit the header's
1047 // body as if nothing happened.
1048 __ Bind(default_block);
1049 }
1051 }
1052
1054 const maglev::ProcessingState& state) {
1055 SetMap(node, __ HeapConstant(node->object().object()));
1057 }
1059 const maglev::ProcessingState& state) {
1060 SetMap(node, __ HeapConstant(MakeRef(broker_, node->DoReify(local_isolate_))
1061 .AsHeapObject()
1062 .object()));
1064 }
1066 const maglev::ProcessingState& state) {
1067 SetMap(node, __ Word32Constant(node->value()));
1069 }
1071 const maglev::ProcessingState& state) {
1072 SetMap(node, __ Word32SignHintUnsigned(__ Word32Constant(node->value())));
1074 }
1076 const maglev::ProcessingState& state) {
1077 SetMap(node, __ Float64Constant(node->value()));
1079 }
1081 const maglev::ProcessingState& state) {
1082 SetMap(node, __ SmiConstant(node->value()));
1084 }
1086 const maglev::ProcessingState& state) {
1087 // TODO(dmercadier): should this really be a SmiConstant, or rather a
1088 // Word32Constant?
1089 SetMap(node, __ SmiConstant(i::Tagged<Smi>(node->value().ptr())));
1091 }
1093 const maglev::ProcessingState& state) {
1094 SetMap(node, __ TrustedHeapConstant(node->object().object()));
1096 }
1098 const maglev::ProcessingState& state) {
1099 // TODO(dmercadier): InitialValues are much simpler in Maglev because they
1100 // are mapped directly to interpreter registers, whereas Turbofan changes
1101 // the indices, making everything more complex. We should try to have the
1102 // same InitialValues in Turboshaft as in Maglev, in order to simplify
1103 // things.
1104#ifdef DEBUG
1105 // We cannot use strdup or something that simple for {debug_name}, because
1106 // it has to be zone allocated rather than heap-allocated, since it won't be
1107 // freed and this would thus cause a leak.
1108 std::string reg_string_name = node->source().ToString();
1109 base::Vector<char> debug_name_arr =
1110 graph_zone()->NewVector<char>(reg_string_name.length() + /* \n */ 1);
1111 snprintf(debug_name_arr.data(), debug_name_arr.length(), "%s",
1112 reg_string_name.c_str());
1113 char* debug_name = debug_name_arr.data();
1114#else
1115 char* debug_name = nullptr;
1116#endif
1117 interpreter::Register source = node->source();
1119 if (source.is_function_closure()) {
1120 // The function closure is a Parameter rather than an OsrValue even when
1121 // OSR-compiling.
1122 value = __ Parameter(Linkage::kJSCallClosureParamIndex,
1123 RegisterRepresentation::Tagged(), debug_name);
1124 } else if (maglev_compilation_unit_->is_osr()) {
1125 int index;
1126 if (source.is_current_context()) {
1128 } else if (source == interpreter::Register::virtual_accumulator()) {
1130 } else if (source.is_parameter()) {
1131 index = source.ToParameterIndex();
1132 } else {
1133 // For registers, recreate the index computed by FillWithOsrValues in
1134 // BytecodeGraphBuilder.
1135 index = source.index() + InterpreterFrameConstants::kExtraSlotCount +
1137 }
1138 value = __ OsrValue(index);
1139 } else {
1140 int index = source.ToParameterIndex();
1141 if (source.is_current_context()) {
1144 } else {
1145 index = source.ToParameterIndex();
1146 }
1147 value = __ Parameter(index, RegisterRepresentation::Tagged(), debug_name);
1148 }
1149 SetMap(node, value);
1151 }
1162
1164 const maglev::ProcessingState& state) {
1165 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1166 __ JSFunctionEntryStackCheck(native_context(), frame_state);
1168 }
1169
1171 const maglev::ProcessingState& state) {
1172 int input_count = node->input_count();
1174 RegisterRepresentationFor(node->value_representation());
1175 if (node->is_exception_phi()) {
1176 if (node->owner() == interpreter::Register::virtual_accumulator()) {
1177 DCHECK(catch_block_begin_.valid());
1179 } else {
1180 Variable var = regs_to_vars_[node->owner().index()];
1181 SetMap(node, __ GetVariable(var));
1182 // {var} won't be used anymore once we've created the mapping from
1183 // {node} to its value. We thus reset it, in order to avoid Phis being
1184 // created for {var} at later merge points.
1185 __ SetVariable(var, V<Object>::Invalid());
1186 }
1188 }
1189 if (__ current_block()->IsLoop()) {
1190 DCHECK(state.block()->is_loop());
1191 OpIndex first_phi_input;
1192 if (state.block()->predecessor_count() > 2 ||
1193 generator_analyzer_.HeaderIsBypassed(state.block())) {
1194 // This loop has multiple forward edges in Maglev, so we should have
1195 // created an intermediate block in Turboshaft, which will be the only
1196 // predecessor of the Turboshaft loop, and from which we'll find the
1197 // first input for this loop phi.
1199 static_cast<size_t>(state.block()->phis()->LengthForTest()));
1202 DCHECK(loop_single_edge_predecessors_.contains(state.block()));
1204 __ current_block()->LastPredecessor());
1207 } else {
1208 DCHECK_EQ(input_count, 2);
1209 DCHECK_EQ(state.block()->predecessor_count(), 2);
1211 first_phi_input = Map(node->input(0));
1212 }
1213 SetMap(node, __ PendingLoopPhi(first_phi_input, rep));
1214 } else {
1215 SetMap(node, MakePhiMaybePermuteInputs(node, input_count));
1216 }
1218 }
1219
1221 maglev::ValueNode* maglev_node, int maglev_input_count,
1222 OptionalV<Any> additional_input = OptionalV<Any>::Nullopt()) {
1224
1226 // Note that it's important to use `current_block()->PredecessorCount()` as
1227 // the size of {inputs}, because some Maglev predecessors could have been
1228 // dropped by Turboshaft during the translation (and thus, `input_count`
1229 // might be too much).
1230 inputs.resize(__ current_block()->PredecessorCount(), {});
1231 for (int i = 0; i < maglev_input_count; ++i) {
1233 inputs[predecessor_permutation_[i]] =
1234 MapPhiInput(maglev_node->input(i), predecessor_permutation_[i]);
1235 }
1236 }
1237
1238 if (additional_input.has_value()) {
1239 // When a loop header was bypassed by a generator resume, we insert an
1240 // additional predecessor to the loop, and thus need an additional input
1241 // for the Phis.
1242 inputs[inputs.size() - 1] = additional_input.value();
1243 }
1244
1245 return __ Phi(
1246 base::VectorOf(inputs),
1248 }
1249
1251 const maglev::ProcessingState& state) {
1252 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1253 V<Object> function = Map(node->function());
1254 V<Context> context = Map(node->context());
1255
1257 switch (node->target_type()) {
1259 switch (node->receiver_mode()) {
1261 builtin = Builtin::kCall_ReceiverIsNullOrUndefined;
1262 break;
1264 builtin = Builtin::kCall_ReceiverIsNotNullOrUndefined;
1265 break;
1267 builtin = Builtin::kCall_ReceiverIsAny;
1268 break;
1269 }
1270 break;
1272 switch (node->receiver_mode()) {
1274 builtin = Builtin::kCallFunction_ReceiverIsNullOrUndefined;
1275 break;
1277 builtin = Builtin::kCallFunction_ReceiverIsNotNullOrUndefined;
1278 break;
1280 builtin = Builtin::kCallFunction_ReceiverIsAny;
1281 break;
1282 }
1283 break;
1284 }
1285
1287 arguments.push_back(function);
1288 arguments.push_back(__ Word32Constant(node->num_args()));
1289 for (auto arg : node->args()) {
1290 arguments.push_back(Map(arg));
1291 }
1292 arguments.push_back(context);
1293
1294 GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state,
1295 base::VectorOf(arguments), node->num_args());
1296
1298 }
1300 const maglev::ProcessingState& state) {
1301 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1302 V<Object> callee = Map(node->closure());
1303 int actual_parameter_count = JSParameterCount(node->num_args());
1304
1305 if (node->shared_function_info().HasBuiltinId()) {
1306 // Note that there is no need for a ThrowingScope here:
1307 // GenerateBuiltinCall takes care of creating one.
1309 arguments.push_back(callee);
1310 arguments.push_back(Map(node->new_target()));
1311 arguments.push_back(__ Word32Constant(actual_parameter_count));
1312#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1313 arguments.push_back(
1314 __ Word32Constant(kPlaceholderDispatchHandle.value()));
1315#endif
1316 arguments.push_back(Map(node->receiver()));
1317 for (int i = 0; i < node->num_args(); i++) {
1318 arguments.push_back(Map(node->arg(i)));
1319 }
1320 // Setting missing arguments to Undefined.
1321 for (int i = actual_parameter_count; i < node->expected_parameter_count();
1322 i++) {
1323 arguments.push_back(undefined_value_);
1324 }
1325 arguments.push_back(Map(node->context()));
1327 node, node->shared_function_info().builtin_id(), frame_state,
1328 base::VectorOf(arguments),
1329 std::max<int>(actual_parameter_count,
1330 node->expected_parameter_count()));
1331 } else {
1332 ThrowingScope throwing_scope(this, node);
1334 arguments.push_back(Map(node->receiver()));
1335 for (int i = 0; i < node->num_args(); i++) {
1336 arguments.push_back(Map(node->arg(i)));
1337 }
1338 // Setting missing arguments to Undefined.
1339 for (int i = actual_parameter_count; i < node->expected_parameter_count();
1340 i++) {
1341 arguments.push_back(undefined_value_);
1342 }
1343 arguments.push_back(Map(node->new_target()));
1344 arguments.push_back(__ Word32Constant(actual_parameter_count));
1345#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1346 arguments.push_back(
1347 __ Word32Constant(kPlaceholderDispatchHandle.value()));
1348#endif
1349
1350 // Load the context from {callee}.
1351 OpIndex context =
1352 __ LoadField(callee, AccessBuilder::ForJSFunctionContext());
1353 arguments.push_back(context);
1354
1355 const CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
1356 graph_zone(), false,
1357 std::max<int>(actual_parameter_count,
1358 node->expected_parameter_count()),
1360
1361 LazyDeoptOnThrow lazy_deopt_on_throw = ShouldLazyDeoptOnThrow(node);
1362
1364 SetMap(node, __ Call(V<CallTarget>::Cast(callee), frame_state,
1365 base::VectorOf(arguments),
1367 lazy_deopt_on_throw,
1368 graph_zone())));
1369 }
1370
1372 }
1374 const maglev::ProcessingState& state) {
1375 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1376
1377 if (node->inline_builtin()) {
1378 DCHECK(v8_flags.maglev_inline_api_calls);
1379 // TODO(dmercadier, 40912714, 42203760): The flag maglev_inline_api_calls
1380 // is currently experimental, and it's not clear at this point if it will
1381 // even become non-experimental, so we currently don't support it in the
1382 // Maglev->Turboshaft translation. Note that a quick-fix would be to treat
1383 // kNoProfilingInlined like kNoProfiling, although this would be slower
1384 // than desired.
1385 UNIMPLEMENTED();
1386 }
1387
1388 V<Object> target =
1389 __ HeapConstant(node->function_template_info().AsHeapObject().object());
1390
1391 ApiFunction function(node->function_template_info().callback(broker_));
1394
1396 arguments.push_back(__ ExternalConstant(function_ref));
1397 arguments.push_back(__ Word32Constant(node->num_args()));
1398 arguments.push_back(target);
1399 arguments.push_back(Map(node->receiver()));
1400 for (maglev::Input arg : node->args()) {
1401 arguments.push_back(Map(arg));
1402 }
1403 arguments.push_back(Map(node->context()));
1404
1406 switch (node->mode()) {
1408 builtin = Builtin::kCallApiCallbackOptimizedNoProfiling;
1409 break;
1411 // Handled earlier when checking `node->inline_builtin()`.
1412 UNREACHABLE();
1414 builtin = Builtin::kCallApiCallbackOptimized;
1415 break;
1416 }
1417
1418 int stack_arg_count = node->num_args() + /* implicit receiver */ 1;
1419 GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state,
1420 base::VectorOf(arguments), stack_arg_count);
1421
1423 }
1425 maglev::NodeBase* node, Builtin builtin,
1427 std::optional<int> stack_arg_count = std::nullopt) {
1428 ThrowingScope throwing_scope(this, node);
1429 DCHECK(!TooManyArgumentsForCall(arguments.size()));
1430
1431 Callable callable = Builtins::CallableFor(isolate_, builtin);
1432 const CallInterfaceDescriptor& descriptor = callable.descriptor();
1434 graph_zone(), descriptor,
1435 stack_arg_count.has_value() ? stack_arg_count.value()
1436 : descriptor.GetStackParameterCount(),
1439 V<Code> stub_code = __ HeapConstant(callable.code());
1440
1441 LazyDeoptOnThrow lazy_deopt_on_throw = ShouldLazyDeoptOnThrow(node);
1442
1443 return __ Call(stub_code, frame_state, base::VectorOf(arguments),
1445 lazy_deopt_on_throw, graph_zone()));
1446 }
1448 const maglev::ProcessingState& state) {
1449 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1450
1452 for (int i = 0; i < node->InputCountWithoutContext(); i++) {
1453 arguments.push_back(Map(node->input(i)));
1454 }
1455
1456 if (node->has_feedback()) {
1457 V<Any> feedback_slot;
1458 switch (node->slot_type()) {
1460 feedback_slot = __ TaggedIndexConstant(node->feedback().index());
1461 break;
1463 feedback_slot = __ WordPtrConstant(node->feedback().index());
1464 break;
1465 }
1466 arguments.push_back(feedback_slot);
1467 arguments.push_back(__ HeapConstant(node->feedback().vector));
1468 }
1469
1470 auto descriptor = Builtins::CallInterfaceDescriptorFor(node->builtin());
1471 if (descriptor.HasContextParameter()) {
1472 arguments.push_back(Map(node->context_input()));
1473 }
1474
1475 int stack_arg_count =
1476 node->InputCountWithoutContext() - node->InputsInRegisterCount();
1477 if (node->has_feedback()) {
1478 // We might need to take the feedback slot and vector into account for
1479 // {stack_arg_count}. There are three possibilities:
1480 // 1. Feedback slot and vector are in register.
1481 // 2. Feedback slot is in register and vector is on stack.
1482 // 3. Feedback slot and vector are on stack.
1483 int slot_index = node->InputCountWithoutContext();
1484 int vector_index = slot_index + 1;
1485 if (vector_index < descriptor.GetRegisterParameterCount()) {
1486 // stack_arg_count is already correct.
1487 } else if (vector_index == descriptor.GetRegisterParameterCount()) {
1488 // feedback vector is on the stack
1489 stack_arg_count += 1;
1490 } else {
1491 // feedback slot and vector on the stack
1492 stack_arg_count += 2;
1493 }
1494 }
1495
1497 V<Any> call_idx =
1498 GenerateBuiltinCall(node, node->builtin(), frame_state,
1499 base::VectorOf(arguments), stack_arg_count);
1500 SetMapMaybeMultiReturn(node, call_idx);
1501
1503 }
1505 const maglev::ProcessingState& state) {
1506 ThrowingScope throwing_scope(this, node);
1507 LazyDeoptOnThrow lazy_deopt_on_throw = ShouldLazyDeoptOnThrow(node);
1508
1509 auto c_entry_stub = __ CEntryStubConstant(isolate_, node->ReturnCount());
1510
1512 graph_zone(), node->function_id(), node->num_args(),
1514 lazy_deopt_on_throw);
1515
1517 for (int i = 0; i < node->num_args(); i++) {
1518 arguments.push_back(Map(node->arg(i)));
1519 }
1520
1521 arguments.push_back(
1522 __ ExternalConstant(ExternalReference::Create(node->function_id())));
1523 arguments.push_back(__ Word32Constant(node->num_args()));
1524
1525 arguments.push_back(Map(node->context()));
1526
1528 if (call_descriptor->NeedsFrameState()) {
1529 GET_FRAME_STATE_MAYBE_ABORT(frame_state_value, node->lazy_deopt_info());
1530 frame_state = frame_state_value;
1531 }
1532 DCHECK_IMPLIES(lazy_deopt_on_throw == LazyDeoptOnThrow::kYes,
1533 frame_state.has_value());
1534
1536 V<Any> call_idx =
1537 __ Call(c_entry_stub, frame_state, base::VectorOf(arguments),
1539 lazy_deopt_on_throw, graph_zone()));
1540 SetMapMaybeMultiReturn(node, call_idx);
1541
1543 }
1544
1546 const maglev::ProcessingState& state) {
1547 ThrowingScope throwing_scope(this, node);
1548
1549 IF (UNLIKELY(RootEqual(node->value(), RootIndex::kTheHoleValue))) {
1550 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1551 __ CallRuntime_ThrowAccessedUninitializedVariable(
1552 isolate_, frame_state, native_context(), ShouldLazyDeoptOnThrow(node),
1553 __ HeapConstant(node->name().object()));
1554 // TODO(dmercadier): use RuntimeAbort here instead of Unreachable.
1555 // However, before doing so, RuntimeAbort should be changed so that 1)
1556 // it's a block terminator and 2) it doesn't call the runtime when
1557 // v8_flags.trap_on_abort is true.
1558 __ Unreachable();
1559 }
1561 }
1562
1564 const maglev::ProcessingState& state) {
1565 ThrowingScope throwing_scope(this, node);
1566
1567 V<HeapObject> constructor = Map(node->constructor());
1568 V<i::Map> map = __ LoadMapField(constructor);
1569 static_assert(Map::kBitFieldOffsetEnd + 1 - Map::kBitFieldOffset == 1);
1570 V<Word32> bitfield =
1571 __ template LoadField<Word32>(map, AccessBuilder::ForMapBitField());
1572 IF_NOT (LIKELY(__ Word32BitwiseAnd(bitfield,
1573 Map::Bits1::IsConstructorBit::kMask))) {
1574 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1575 __ CallRuntime_ThrowNotSuperConstructor(
1576 isolate_, frame_state, native_context(), ShouldLazyDeoptOnThrow(node),
1577 constructor, Map(node->function()));
1578 // TODO(dmercadier): use RuntimeAbort here instead of Unreachable.
1579 // However, before doing so, RuntimeAbort should be changed so that 1)
1580 // it's a block terminator and 2) it doesn't call the runtime when
1581 // v8_flags.trap_on_abort is true.
1582 __ Unreachable();
1583 }
1584
1586 }
1587
1589 const maglev::ProcessingState& state) {
1590 ThrowingScope throwing_scope(this, node);
1591
1592 IF_NOT (LIKELY(__ RootEqual(Map(node->value()), RootIndex::kTheHoleValue,
1593 isolate_))) {
1594 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1595 __ CallRuntime_ThrowSuperAlreadyCalledError(isolate_, frame_state,
1598 // TODO(dmercadier): use RuntimeAbort here instead of Unreachable.
1599 // However, before doing so, RuntimeAbort should be changed so that 1)
1600 // it's a block terminator and 2) it doesn't call the runtime when
1601 // v8_flags.trap_on_abort is true.
1602 __ Unreachable();
1603 }
1604
1606 }
1607
1609 const maglev::ProcessingState& state) {
1610 ThrowingScope throwing_scope(this, node);
1611
1612 IF (UNLIKELY(__ RootEqual(Map(node->value()), RootIndex::kTheHoleValue,
1613 isolate_))) {
1614 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1615 __ CallRuntime_ThrowSuperNotCalled(isolate_, frame_state,
1618 // TODO(dmercadier): use RuntimeAbort here instead of Unreachable.
1619 // However, before doing so, RuntimeAbort should be changed so that 1)
1620 // it's a block terminator and 2) it doesn't call the runtime when
1621 // v8_flags.trap_on_abort is true.
1622 __ Unreachable();
1623 }
1624
1626 }
1627
1629 const maglev::ProcessingState& state) {
1630 ThrowingScope throwing_scope(this, node);
1631
1632 V<Object> value = Map(node->value());
1633
1634 IF_NOT (LIKELY(__ ObjectIsCallable(value))) {
1635 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1636 __ CallRuntime_ThrowCalledNonCallable(
1637 isolate_, frame_state, native_context(), ShouldLazyDeoptOnThrow(node),
1638 value);
1639 // TODO(dmercadier): use RuntimeAbort here instead of Unreachable.
1640 // However, before doing so, RuntimeAbort should be changed so that 1)
1641 // it's a block terminator and 2) it doesn't call the runtime when
1642 // v8_flags.trap_on_abort is true.
1643 __ Unreachable();
1644 }
1645
1647 }
1648
1650 const maglev::ProcessingState& state) {
1651 ThrowingScope throwing_scope(this, node);
1652
1653 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1654 V<Context> context = Map(node->context());
1655 V<ScopeInfo> scope_info = __ HeapConstant(node->scope_info().object());
1656 if (node->scope_type() == FUNCTION_SCOPE) {
1657 SetMap(node, __ CallBuiltin_FastNewFunctionContextFunction(
1658 isolate_, frame_state, context, scope_info,
1659 node->slot_count(), ShouldLazyDeoptOnThrow(node)));
1660 } else {
1661 DCHECK_EQ(node->scope_type(), EVAL_SCOPE);
1662 SetMap(node, __ CallBuiltin_FastNewFunctionContextEval(
1663 isolate_, frame_state, context, scope_info,
1664 node->slot_count(), ShouldLazyDeoptOnThrow(node)));
1665 }
1667 }
1668
1670 const maglev::ProcessingState& state) {
1671 NoThrowingScopeRequired no_throws(node);
1672
1673 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1674 V<Context> context = Map(node->context());
1675 V<SharedFunctionInfo> shared_function_info =
1676 __ HeapConstant(node->shared_function_info().object());
1677 V<FeedbackCell> feedback_cell =
1678 __ HeapConstant(node->feedback_cell().object());
1679
1680 SetMap(node,
1681 __ CallBuiltin_FastNewClosure(isolate_, frame_state, context,
1682 shared_function_info, feedback_cell));
1683
1685 }
1687 const maglev::ProcessingState& state) {
1688 NoThrowingScopeRequired no_throws(node);
1689
1690 V<Context> context = Map(node->context());
1691 V<SharedFunctionInfo> shared_function_info =
1692 __ HeapConstant(node->shared_function_info().object());
1693 V<FeedbackCell> feedback_cell =
1694 __ HeapConstant(node->feedback_cell().object());
1695
1696 V<JSFunction> closure;
1697 if (node->pretenured()) {
1698 closure = __ CallRuntime_NewClosure_Tenured(
1699 isolate_, context, shared_function_info, feedback_cell);
1700 } else {
1701 closure = __ CallRuntime_NewClosure(isolate_, context,
1702 shared_function_info, feedback_cell);
1703 }
1704
1705 SetMap(node, closure);
1706
1708 }
1709
1711 const maglev::ProcessingState& state) {
1712 ThrowingScope throwing_scope(this, node);
1713
1714 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1715 V<Context> context = Map(node->context());
1716 V<Object> function = Map(node->function());
1717 V<Object> receiver = Map(node->receiver());
1718 V<Object> arguments_list = Map(node->arguments_list());
1719
1720 SetMap(node, __ CallBuiltin_CallWithArrayLike(
1721 isolate_, graph_zone(), frame_state, context, receiver,
1722 function, arguments_list, ShouldLazyDeoptOnThrow(node)));
1723
1725 }
1726
1728 const maglev::ProcessingState& state) {
1729 ThrowingScope throwing_scope(this, node);
1730
1731 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1732 V<Context> context = Map(node->context());
1733 V<Object> function = Map(node->function());
1734 V<Object> spread = Map(node->spread());
1735
1736 base::SmallVector<V<Object>, 16> arguments_no_spread;
1737 for (auto arg : node->args_no_spread()) {
1738 arguments_no_spread.push_back(Map(arg));
1739 }
1740
1741 SetMap(node, __ CallBuiltin_CallWithSpread(
1742 isolate_, graph_zone(), frame_state, context, function,
1743 node->num_args_no_spread(), spread,
1744 base::VectorOf(arguments_no_spread),
1745 ShouldLazyDeoptOnThrow(node)));
1746
1748 }
1749
1751 const maglev::ProcessingState& state) {
1752 ThrowingScope throwing_scope(this, node);
1753
1754 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1755 V<JSFunction> function = Map(node->function());
1756 V<Context> context = Map(node->context());
1757
1758 base::SmallVector<V<Object>, 16> arguments;
1759 for (auto arg : node->args()) {
1760 arguments.push_back(Map(arg));
1761 }
1762 DCHECK_EQ(node->num_args(), arguments.size());
1763
1765 switch (node->target_type()) {
1767 builtin = Builtin::kCallFunctionForwardVarargs;
1768 break;
1770 builtin = Builtin::kCallForwardVarargs;
1771 break;
1772 }
1773 V<Object> call = __ CallBuiltin_CallForwardVarargs(
1774 isolate_, graph_zone(), builtin, frame_state, context, function,
1775 node->num_args(), node->start_index(), base::VectorOf(arguments),
1777
1778 SetMap(node, call);
1780 }
1781
1783 const maglev::ProcessingState& state) {
1784 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1786
1787 arguments.push_back(Map(node->function()));
1788 arguments.push_back(Map(node->new_target()));
1789 arguments.push_back(__ Word32Constant(node->num_args()));
1790
1791#ifndef V8_TARGET_ARCH_ARM64
1792 arguments.push_back(__ WordPtrConstant(node->feedback().index()));
1793 arguments.push_back(__ HeapConstant(node->feedback().vector));
1794#endif
1795
1796 for (auto arg : node->args()) {
1797 arguments.push_back(Map(arg));
1798 }
1799
1800 arguments.push_back(Map(node->context()));
1801
1802#ifndef V8_TARGET_ARCH_ARM64
1803 // Construct_WithFeedback can't be called from Turbofan on Arm64, because of
1804 // the stack alignment requirements: the feedback vector is dropped by
1805 // Construct_WithFeedback while the other arguments are passed through to
1806 // Construct. As a result, when the feedback vector is pushed on the stack,
1807 // it should be padded to 16-bytes, but there is no way to express this in
1808 // Turbofan.
1809 // Anyways, long-term we'll want to feedback-specialize Construct in the
1810 // frontend (ie, probably in Maglev), so we don't really need to adapt
1811 // Turbofan to be able to call Construct_WithFeedback on Arm64.
1812 static constexpr int kFeedbackVector = 1;
1813 int stack_arg_count = node->num_args() + kFeedbackVector;
1814 Builtin builtin = Builtin::kConstruct_WithFeedback;
1815#else
1816 int stack_arg_count = node->num_args();
1817 Builtin builtin = Builtin::kConstruct;
1818#endif
1819
1820 GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state,
1821 base::VectorOf(arguments), stack_arg_count);
1822
1824 }
1826 const maglev::ProcessingState& state) {
1827 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1828
1830 arguments.push_back(Map(node->function()));
1831 arguments.push_back(Map(node->new_target()));
1832 arguments.push_back(__ Word32Constant(node->num_args_no_spread()));
1833 arguments.push_back(Map(node->spread()));
1834
1835 for (auto arg : node->args_no_spread()) {
1836 arguments.push_back(Map(arg));
1837 }
1838
1839 arguments.push_back(Map(node->context()));
1840
1841 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kConstructWithSpread,
1842 frame_state, base::VectorOf(arguments),
1843 node->num_args_no_spread());
1845 }
1847 const maglev::ProcessingState& state) {
1848 SetMap(node, __ CheckConstructResult(Map(node->construct_result_input()),
1849 Map(node->implicit_receiver_input())));
1851 }
1853 const maglev::ProcessingState& state) {
1854 ThrowingScope throwing_scope(this, node);
1855 V<Object> construct_result = Map(node->construct_result_input());
1856 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1857 __ CheckDerivedConstructResult(construct_result, frame_state,
1860 SetMap(node, construct_result);
1862 }
1863
1865 const maglev::ProcessingState& state) {
1866 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1867
1868 OpIndex arguments[] = {Map(node->object_input()),
1869 Map(node->key_input()),
1870 Map(node->value_input()),
1871 __ TaggedIndexConstant(node->feedback().index()),
1872 __ HeapConstant(node->feedback().vector),
1873 Map(node->context())};
1874
1875 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kKeyedStoreIC, frame_state,
1876 base::VectorOf(arguments));
1878 }
1880 const maglev::ProcessingState& state) {
1881 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1882
1883 OpIndex arguments[] = {Map(node->object_input()), Map(node->key_input()),
1884 __ TaggedIndexConstant(node->feedback().index()),
1885 __ HeapConstant(node->feedback().vector),
1886 Map(node->context())};
1887
1888 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kKeyedLoadIC, frame_state,
1889 base::VectorOf(arguments));
1891 }
1892
1894 const maglev::ProcessingState& state) {
1895 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1896
1897 OpIndex arguments[] = {Map(node->object_input()),
1898 __ HeapConstant(node->name().object()),
1899 Map(node->value_input()),
1900 __ TaggedIndexConstant(node->feedback().index()),
1901 __ HeapConstant(node->feedback().vector),
1902 Map(node->context())};
1903
1904 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kStoreIC, frame_state,
1905 base::VectorOf(arguments));
1907 }
1909 const maglev::ProcessingState& state) {
1910 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1911
1912 OpIndex arguments[] = {
1913 Map(node->object_input()), __ HeapConstant(node->name().object()),
1914 __ TaggedIndexConstant(node->feedback().index()),
1915 __ HeapConstant(node->feedback().vector), Map(node->context())};
1916
1917 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kLoadIC, frame_state,
1918 base::VectorOf(arguments));
1920 }
1921
1923 const maglev::ProcessingState& state) {
1924 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1925
1926 OpIndex arguments[] = {Map(node->receiver()),
1927 Map(node->lookup_start_object()),
1928 __ HeapConstant(node->name().object()),
1929 __ TaggedIndexConstant(node->feedback().index()),
1930 __ HeapConstant(node->feedback().vector),
1931 Map(node->context())};
1932
1933 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kLoadSuperIC, frame_state,
1934 base::VectorOf(arguments));
1936 }
1937
1939 const maglev::ProcessingState& state) {
1940 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1941
1942 OpIndex arguments[] = {__ HeapConstant(node->name().object()),
1943 __ TaggedIndexConstant(node->feedback().index()),
1944 __ HeapConstant(node->feedback().vector),
1945 Map(node->context())};
1946
1948 switch (node->typeof_mode()) {
1950 builtin = Builtin::kLoadGlobalICInsideTypeof;
1951 break;
1953 builtin = Builtin::kLoadGlobalIC;
1954 break;
1955 }
1956
1957 GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state,
1958 base::VectorOf(arguments));
1960 }
1961
1963 const maglev::ProcessingState& state) {
1964 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1965
1966 OpIndex arguments[] = {
1967 __ HeapConstant(node->name().object()), Map(node->value()),
1968 __ TaggedIndexConstant(node->feedback().index()),
1969 __ HeapConstant(node->feedback().vector), Map(node->context())};
1970
1971 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kStoreGlobalIC, frame_state,
1972 base::VectorOf(arguments));
1974 }
1975
1977 const maglev::ProcessingState& state) {
1978 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1979
1980 OpIndex arguments[] = {Map(node->object_input()),
1981 Map(node->key_input()),
1982 Map(node->value_input()),
1983 Map(node->flags_input()),
1984 __ TaggedIndexConstant(node->feedback().index()),
1985 __ HeapConstant(node->feedback().vector),
1986 Map(node->context())};
1987
1988 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kDefineKeyedOwnIC, frame_state,
1989 base::VectorOf(arguments));
1991 }
1992
1994 const maglev::ProcessingState& state) {
1995 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
1996
1997 OpIndex arguments[] = {Map(node->object_input()),
1998 __ HeapConstant(node->name().object()),
1999 Map(node->value_input()),
2000 __ TaggedIndexConstant(node->feedback().index()),
2001 __ HeapConstant(node->feedback().vector),
2002 Map(node->context())};
2003
2004 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kDefineNamedOwnIC, frame_state,
2005 base::VectorOf(arguments));
2007 }
2008
2010 const maglev::ProcessingState& state) {
2011 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2012
2013 OpIndex arguments[] = {
2014 Map(node->receiver()), __ TaggedIndexConstant(node->load_slot()),
2015 __ TaggedIndexConstant(node->call_slot()),
2016 __ HeapConstant(node->feedback()), Map(node->context())};
2017
2018 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kGetIteratorWithFeedback,
2019 frame_state, base::VectorOf(arguments));
2021 }
2022
2024 const maglev::ProcessingState& state) {
2025 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2026
2027 OpIndex arguments[] = {
2028 __ HeapConstant(node->feedback().vector),
2029 __ TaggedIndexConstant(node->feedback().index()),
2030 __ HeapConstant(node->boilerplate_descriptor().object()),
2031 __ SmiConstant(Smi::FromInt(node->flags())), native_context()};
2032
2033 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kCreateShallowObjectLiteral,
2034 frame_state, base::VectorOf(arguments));
2036 }
2037
2039 const maglev::ProcessingState& state) {
2040 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2041
2042 OpIndex arguments[] = {__ HeapConstant(node->feedback().vector),
2043 __ TaggedIndexConstant(node->feedback().index()),
2044 __ HeapConstant(node->constant_elements().object()),
2045 __ SmiConstant(Smi::FromInt(node->flags())),
2046 native_context()};
2047
2048 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kCreateShallowArrayLiteral,
2049 frame_state, base::VectorOf(arguments));
2051 }
2052
2054 const maglev::ProcessingState& state) {
2055 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2056
2057 OpIndex arguments[] = {Map(node->object_input()),
2058 Map(node->name_input()),
2059 Map(node->value_input()),
2060 __ TaggedIndexConstant(node->feedback().index()),
2061 __ HeapConstant(node->feedback().vector),
2062 native_context()};
2063
2064 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kStoreInArrayLiteralIC,
2065 frame_state, base::VectorOf(arguments));
2067 }
2068
2070 const maglev::ProcessingState& state) {
2071 V<Object> table = Map(node->table_input());
2072 V<Smi> key = Map(node->key_input());
2073
2074 V<Smi> entry = __ FindOrderedHashMapEntry(table, key);
2076
2077 IF_NOT (__ TaggedEqual(entry, __ SmiConstant(Smi::FromInt(-1)))) {
2078 result =
2079 __ LoadElement(table, AccessBuilderTS::ForOrderedHashMapEntryValue(),
2080 __ ChangeInt32ToIntPtr(__ UntagSmi(entry)));
2081 }
2082
2083 SetMap(node, result);
2084
2086 }
2087
2089 const maglev::ProcessingState& state) {
2090 V<Object> table = Map(node->table_input());
2091 V<Word32> key = Map(node->key_input());
2092
2093 V<WordPtr> entry = __ FindOrderedHashMapEntryForInt32Key(table, key);
2095
2096 IF_NOT (__ Word32Equal(__ TruncateWordPtrToWord32(entry), -1)) {
2097 result = __ LoadElement(
2098 table, AccessBuilderTS::ForOrderedHashMapEntryValue(), entry);
2099 }
2100
2101 SetMap(node, result);
2102
2104 }
2105
2107 const maglev::ProcessingState& state) {
2108 V<Object> table = Map(node->table_input());
2109 V<Smi> key = Map(node->key_input());
2110
2111 V<Smi> entry = __ FindOrderedHashSetEntry(table, key);
2113 this, __ HeapConstant(local_factory_->true_value()));
2114
2115 IF (__ TaggedEqual(entry, __ SmiConstant(Smi::FromInt(-1)))) {
2116 result = __ HeapConstant(local_factory_->false_value());
2117 }
2118 SetMap(node, result);
2119
2121 }
2122
2124 const maglev::ProcessingState& state) {
2125 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2126
2127 OpIndex arguments[] = {Map(node->object()), Map(node->callable()),
2128 Map(node->context())};
2129
2130 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kInstanceOf, frame_state,
2131 base::VectorOf(arguments));
2133 }
2134
2136 const maglev::ProcessingState& state) {
2137 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2138
2139 OpIndex arguments[] = {
2140 Map(node->object()), Map(node->key()),
2141 __ SmiConstant(Smi::FromInt(static_cast<int>(node->mode()))),
2142 Map(node->context())};
2143
2144 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kDeleteProperty, frame_state,
2145 base::VectorOf(arguments));
2147 }
2148
2150 const maglev::ProcessingState& state) {
2151 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2152
2153 OpIndex arguments[] = {Map(node->value_input()), Map(node->context())};
2154
2155 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kToName, frame_state,
2156 base::VectorOf(arguments));
2158 }
2159
2161 const maglev::ProcessingState& state) {
2162 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2163
2164 OpIndex arguments[] = {__ HeapConstant(node->feedback().vector),
2165 __ TaggedIndexConstant(node->feedback().index()),
2166 __ HeapConstant(node->pattern().object()),
2167 __ SmiConstant(Smi::FromInt(node->flags())),
2168 native_context()};
2169
2170 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kCreateRegExpLiteral,
2171 frame_state, base::VectorOf(arguments));
2173 }
2174
2176 const maglev::ProcessingState& state) {
2177 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2178
2179 OpIndex arguments[] = {
2180 __ HeapConstant(node->shared_function_info().object()),
2181 Map(node->description()), __ WordPtrConstant(node->feedback().index()),
2182 __ HeapConstant(node->feedback().vector), native_context()};
2183
2184 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kGetTemplateObject,
2185 frame_state, base::VectorOf(arguments));
2187 }
2188
2190 const maglev::ProcessingState& state) {
2191 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2192
2193 OpIndex arguments[] = {
2194 __ HeapConstant(node->feedback().vector),
2195 __ TaggedIndexConstant(node->feedback().index()),
2196 __ HeapConstant(node->boilerplate_descriptor().object()),
2197 __ SmiConstant(Smi::FromInt(node->flags())), native_context()};
2198
2200 Builtin::kCreateObjectFromSlowBoilerplate,
2201 frame_state, base::VectorOf(arguments));
2203 }
2204
2206 const maglev::ProcessingState& state) {
2207 V<Word32> length = Map(node->length_input());
2208 SetMap(node,
2209 __ NewArray(__ ChangeInt32ToIntPtr(length),
2210 NewArrayOp::Kind::kObject, node->allocation_type()));
2212 }
2213
2215 const maglev::ProcessingState& state) {
2216 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2217
2218 OpIndex arguments[] = {__ HeapConstant(node->feedback().vector),
2219 __ TaggedIndexConstant(node->feedback().index()),
2220 __ HeapConstant(node->constant_elements().object()),
2221 __ SmiConstant(Smi::FromInt(node->flags())),
2222 native_context()};
2223
2225 Builtin::kCreateArrayFromSlowBoilerplate,
2226 frame_state, base::VectorOf(arguments));
2228 }
2229
2231 const maglev::ProcessingState& state) {
2232 OpIndex arguments[] = {Map(node->enumerator()),
2233 __ TaggedIndexConstant(node->feedback().index()),
2234 __ HeapConstant(node->feedback().vector),
2235 Map(node->context())};
2236
2237 V<Any> call =
2238 GenerateBuiltinCall(node, Builtin::kForInPrepare,
2240 base::VectorOf(arguments));
2241 SetMap(node, __ Projection(call, 0, RegisterRepresentation::Tagged()));
2243 __ Projection(call, 1, RegisterRepresentation::Tagged()));
2245 }
2247 const maglev::ProcessingState& state) {
2248 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2249
2250 OpIndex arguments[] = {__ WordPtrConstant(node->feedback().index()),
2251 Map(node->receiver()),
2252 Map(node->cache_array()),
2253 Map(node->cache_type()),
2254 Map(node->cache_index()),
2255 __ HeapConstant(node->feedback().vector),
2256 Map(node->context())};
2257
2258 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kForInNext, frame_state,
2259 base::VectorOf(arguments));
2261 }
2262
2264 const maglev::ProcessingState& state) {
2265 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2266 __ DeoptimizeIfNot(__ ObjectIsSmi(Map(node->receiver_input())), frame_state,
2267 DeoptimizeReason::kNotASmi,
2268 node->eager_deopt_info()->feedback_to_update());
2270 }
2272 const maglev::ProcessingState& state) {
2273 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2274 DeoptIfInt32IsNotSmi(node->input(), frame_state,
2275 node->eager_deopt_info()->feedback_to_update());
2277 }
2279 const maglev::ProcessingState& state) {
2280 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2281 __ DeoptimizeIfNot(__ Uint32LessThan(Map(node->input()), Smi::kMaxValue),
2282 frame_state, DeoptimizeReason::kNotASmi,
2283 node->eager_deopt_info()->feedback_to_update());
2285 }
2286
2288 const maglev::ProcessingState& state) {
2289 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2290 __ DeoptimizeIfNot(
2291 __ UintPtrLessThanOrEqual(Map(node->input()), Smi::kMaxValue),
2292 frame_state, DeoptimizeReason::kNotASmi,
2293 node->eager_deopt_info()->feedback_to_update());
2294 // TODO(388844115): Rename the IntPtr in Maglev to make it clear it's
2295 // non-negative.
2297 }
2298
2300 const maglev::ProcessingState& state) {
2301 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2302 V<Word32> w32 = __ ChangeFloat64ToInt32OrDeopt(
2303 Map(node->input()), frame_state,
2305 node->eager_deopt_info()->feedback_to_update());
2306 if (!SmiValuesAre32Bits()) {
2307 DeoptIfInt32IsNotSmi(w32, frame_state,
2308 node->eager_deopt_info()->feedback_to_update());
2309 }
2311 }
2313 const maglev::ProcessingState& state) {
2314 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2315 V<Object> input = Map(node->receiver_input());
2316 V<Word32> check;
2317 if (node->mode() == Object::Conversion::kToNumeric) {
2318 check = __ ObjectIsNumberOrBigInt(input);
2319 } else {
2321 check = __ ObjectIsNumber(input);
2322 }
2323 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotANumber,
2324 node->eager_deopt_info()->feedback_to_update());
2326 }
2328 const maglev::ProcessingState& state) {
2329 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2330 __ DeoptimizeIf(__ ObjectIsSmi(Map(node->receiver_input())), frame_state,
2331 DeoptimizeReason::kSmi,
2332 node->eager_deopt_info()->feedback_to_update());
2334 }
2336 const maglev::ProcessingState& state) {
2337 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2338 V<Object> input = Map(node->input());
2340 IF (__ IsSmi(input)) {
2341 value = __ UntagSmi(V<Smi>::Cast(input));
2342 } ELSE {
2343 __ DeoptimizeIfNot(__ IsHeapNumberMap(__ LoadMapField(input)),
2344 frame_state, DeoptimizeReason::kNotInt32,
2345 node->eager_deopt_info()->feedback_to_update());
2346 value = __ ChangeFloat64ToInt32OrDeopt(
2347 __ LoadHeapNumberValue(V<HeapNumber>::Cast(input)), frame_state,
2349 node->eager_deopt_info()->feedback_to_update());
2350 }
2351 SetMap(node, value);
2353 }
2354 void CheckMaps(V<Object> receiver_input, V<FrameState> frame_state,
2355 OptionalV<Map> object_map, const FeedbackSource& feedback,
2356 const compiler::ZoneRefSet<Map>& maps, bool check_heap_object,
2357 CheckMapsFlags flags) {
2358 Label<> done(this);
2359 if (check_heap_object) {
2360 OpIndex is_smi = __ IsSmi(receiver_input);
2361 if (AnyMapIsHeapNumber(maps)) {
2362 // Smis count as matching the HeapNumber map, so we're done.
2363 GOTO_IF(is_smi, done);
2364 } else {
2365 __ DeoptimizeIf(is_smi, frame_state, DeoptimizeReason::kWrongMap,
2366 feedback);
2367 }
2368 }
2369
2370#ifdef DEBUG
2372 bool has_migration_targets = false;
2373 for (MapRef map : maps) {
2374 if (map.object()->is_migration_target()) {
2375 has_migration_targets = true;
2376 break;
2377 }
2378 }
2379 DCHECK(has_migration_targets);
2380 }
2381#endif // DEBUG
2382
2383 __ CheckMaps(V<HeapObject>::Cast(receiver_input), frame_state, object_map,
2384 maps, flags, feedback);
2385
2386 if (done.has_incoming_jump()) {
2387 GOTO(done);
2388 BIND(done);
2389 }
2390 }
2392 const maglev::ProcessingState& state) {
2393 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2394 CheckMaps(Map(node->receiver_input()), frame_state, {},
2395 node->eager_deopt_info()->feedback_to_update(),
2396 node->maps().Clone(graph_zone()),
2397 node->check_type() == maglev::CheckType::kCheckHeapObject,
2400 }
2402 const maglev::ProcessingState& state) {
2403 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2404 CheckMaps(Map(node->object_input()), frame_state, Map(node->map_input()),
2405 node->eager_deopt_info()->feedback_to_update(),
2406 node->maps().Clone(graph_zone()), /*check_heap_object*/ false,
2409 }
2411 const maglev::ProcessingState& state) {
2412 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2413 CheckMaps(Map(node->receiver_input()), frame_state, {},
2414 node->eager_deopt_info()->feedback_to_update(),
2415 node->maps().Clone(graph_zone()),
2416 node->check_type() == maglev::CheckType::kCheckHeapObject,
2419 }
2421 const maglev::ProcessingState& state) {
2422 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2423 CheckMaps(Map(node->receiver_input()), frame_state, {},
2424 node->eager_deopt_info()->feedback_to_update(),
2425 node->maps().Clone(graph_zone()),
2426 node->check_type() == maglev::CheckType::kCheckHeapObject,
2429 }
2431 const maglev::ProcessingState& state) {
2432 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2433 SetMap(node,
2434 __ MigrateMapIfNeeded(
2435 Map(node->object_input()), Map(node->map_input()), frame_state,
2436 node->eager_deopt_info()->feedback_to_update()));
2438 }
2440 const maglev::ProcessingState& state) {
2441 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2442 __ DeoptimizeIfNot(__ TaggedEqual(Map(node->target_input()),
2443 __ HeapConstant(node->value().object())),
2444 frame_state, node->deoptimize_reason(),
2445 node->eager_deopt_info()->feedback_to_update());
2447 }
2449 const maglev::ProcessingState& state) {
2450 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2451 __ DeoptimizeIfNot(__ Word32Equal(Map(node->target_input()), node->value()),
2452 frame_state, node->deoptimize_reason(),
2453 node->eager_deopt_info()->feedback_to_update());
2455 }
2457 const maglev::ProcessingState& state) {
2458 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2459 __ DeoptimizeIfNot(__ Float64SameValue(Map(node->target_input()),
2460 node->value().get_scalar()),
2461 frame_state, node->deoptimize_reason(),
2462 node->eager_deopt_info()->feedback_to_update());
2464 }
2466 const maglev::ProcessingState& state) {
2467 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2468 ObjectIsOp::InputAssumptions input_assumptions =
2469 node->check_type() == maglev::CheckType::kCheckHeapObject
2472 V<Word32> check = __ ObjectIs(Map(node->receiver_input()),
2473 ObjectIsOp::Kind::kString, input_assumptions);
2474 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotAString,
2475 node->eager_deopt_info()->feedback_to_update());
2477 }
2479 const maglev::ProcessingState& state) {
2480 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2481 ObjectIsOp::InputAssumptions input_assumptions =
2482 node->check_type() == maglev::CheckType::kCheckHeapObject
2485 V<Word32> check = __ ObjectIs(Map(node->receiver_input()),
2487 input_assumptions);
2488 __ DeoptimizeIfNot(check, frame_state,
2489 DeoptimizeReason::kNotAStringOrStringWrapper,
2490 node->eager_deopt_info()->feedback_to_update());
2492 }
2494 const maglev::ProcessingState& state) {
2495 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2496 ObjectIsOp::InputAssumptions input_assumptions =
2497 node->check_type() == maglev::CheckType::kCheckHeapObject
2500 V<Word32> check = __ ObjectIs(Map(node->receiver_input()),
2501 ObjectIsOp::Kind::kSymbol, input_assumptions);
2502 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotASymbol,
2503 node->eager_deopt_info()->feedback_to_update());
2505 }
2507 const maglev::ProcessingState& state) {
2508 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2509 __ CheckInstanceType(
2510 Map(node->receiver_input()), frame_state,
2511 node->eager_deopt_info()->feedback_to_update(),
2512 node->first_instance_type(), node->last_instance_type(),
2513 node->check_type() != maglev::CheckType::kOmitHeapObjectCheck);
2514
2516 }
2518 const maglev::ProcessingState& state) {
2519 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2520 __ DeoptimizeIfNot(
2521 __ TaggedEqual(Map(node->first_input()), Map(node->second_input())),
2522 frame_state, node->deoptimize_reason(),
2523 node->eager_deopt_info()->feedback_to_update());
2525 }
2527 const maglev::ProcessingState& state) {
2528 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2529 DeoptIfInt32IsNotSmi(node->input(), frame_state,
2530 node->eager_deopt_info()->feedback_to_update());
2531 SetMap(node, Map(node->input()));
2533 }
2535 const maglev::ProcessingState& state) {
2536 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2537 __ DeoptimizeIf(RootEqual(node->object_input(), RootIndex::kTheHoleValue),
2538 frame_state, DeoptimizeReason::kHole,
2539 node->eager_deopt_info()->feedback_to_update());
2541 }
2543 const maglev::ProcessingState& state) {
2544 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2545 __ DeoptimizeIf(__ Float64IsHole(Map(node->float64_input())), frame_state,
2546 DeoptimizeReason::kHole,
2547 node->eager_deopt_info()->feedback_to_update());
2549 }
2551 const maglev::ProcessingState& state) {
2552 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2553 bool negate_result = false;
2554 V<Word32> cmp = ConvertInt32Compare(node->left_input(), node->right_input(),
2555 node->condition(), &negate_result);
2556 if (negate_result) {
2557 __ DeoptimizeIf(cmp, frame_state, node->deoptimize_reason(),
2558 node->eager_deopt_info()->feedback_to_update());
2559 } else {
2560 __ DeoptimizeIfNot(cmp, frame_state, node->deoptimize_reason(),
2561 node->eager_deopt_info()->feedback_to_update());
2562 }
2564 }
2565
2567 const maglev::ProcessingState& state) {
2568 DCHECK(
2569 node->is_used()); // Should have been dead-code eliminated otherwise.
2570 int size = 0;
2571 for (auto alloc : node->allocation_list()) {
2572 if (!alloc->HasBeenAnalysed() || alloc->HasEscaped()) {
2573 alloc->set_offset(size);
2574 size += alloc->size();
2575 }
2576 }
2577 node->set_size(size);
2578 SetMap(node, __ FinishInitialization(
2579 __ Allocate<HeapObject>(size, node->allocation_type())));
2581 }
2583 const maglev::ProcessingState& state) {
2584 DCHECK(node->HasBeenAnalysed() &&
2585 node->HasEscaped()); // Would have been removed otherwise.
2586 V<HeapObject> alloc = Map(node->allocation_block());
2587 SetMap(node, __ BitcastWordPtrToHeapObject(__ WordPtrAdd(
2588 __ BitcastHeapObjectToWordPtr(alloc), node->offset())));
2590 }
2591
2593 const maglev::ProcessingState& state) {
2594 SetMap(node, __ EnsureWritableFastElements(Map(node->object_input()),
2595 Map(node->elements_input())));
2597 }
2599 const maglev::ProcessingState& state) {
2600 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2602 IsDoubleElementsKind(node->elements_kind())
2605 SetMap(node, __ MaybeGrowFastElements(
2606 Map(node->object_input()), Map(node->elements_input()),
2607 Map(node->index_input()),
2608 Map(node->elements_length_input()), frame_state, mode,
2609 node->eager_deopt_info()->feedback_to_update()));
2611 }
2612
2614 const maglev::ProcessingState& state) {
2615 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2616 SetMap(node, __ ExtendPropertiesBackingStore(
2617 Map(node->property_array_input()),
2618 Map(node->object_input()), node->old_length(), frame_state,
2619 node->eager_deopt_info()->feedback_to_update()));
2621 }
2622
2624 const maglev::ProcessingState& state) {
2625 __ TransitionAndStoreArrayElement(
2626 Map(node->array_input()),
2627 __ ChangeInt32ToIntPtr(Map(node->index_input())),
2628 Map(node->value_input()),
2630 node->fast_map().object(), node->double_map().object());
2632 }
2633
2635 const maglev::ProcessingState& state) {
2636 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2637 ZoneRefSet<i::Map> sources(node->transition_sources().begin(),
2638 node->transition_sources().end(), graph_zone());
2639 __ TransitionElementsKindOrCheckMap(
2640 Map(node->object_input()), Map(node->map_input()), frame_state,
2642 sources, node->transition_target(),
2643 node->eager_deopt_info()->feedback_to_update()));
2645 }
2647 const maglev::ProcessingState& state) {
2648 SetMap(node, __ TransitionMultipleElementsKind(
2649 Map(node->object_input()), Map(node->map_input()),
2650 node->transition_sources(), node->transition_target()));
2652 }
2653
2655 const maglev::ProcessingState& state) {
2656 ThrowingScope throwing_scope(this, node);
2657
2658 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2659 SetMap(node, __ HasInPrototypeChain(Map(node->object()), node->prototype(),
2660 frame_state, native_context(),
2661 ShouldLazyDeoptOnThrow(node)));
2663 }
2664
2666 const maglev::ProcessingState& state) {
2667 SetMap(node, __ UpdateJSArrayLength(Map(node->length_input()),
2668 Map(node->object_input()),
2669 Map(node->index_input())));
2671 }
2672
2674 const maglev::ProcessingState& state) {
2675 V<Word32> length = Map(node->length_input());
2676 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2677 // Note that {length} cannot be negative (Maglev inserts a check before
2678 // AllocateElementsArray to ensure this).
2679 __ DeoptimizeIfNot(
2680 __ Uint32LessThan(length, JSArray::kInitialMaxFastElementArray),
2681 frame_state, DeoptimizeReason::kGreaterThanMaxFastElementArray,
2682 node->eager_deopt_info()->feedback_to_update());
2684
2685 SetMap(node,
2686 __ NewArray(__ ChangeUint32ToUintPtr(length),
2687 NewArrayOp::Kind::kObject, node->allocation_type()));
2689 }
2690
2691 template <typename Node>
2693 V<String> right) {
2694 // When coming from Turbofan, StringConcat is always guarded by a check that
2695 // the length is less than String::kMaxLength, which prevents StringConcat
2696 // from ever throwing (and as a consequence of this, it does not need a
2697 // Context input). This is not the case for Maglev. To mimic Turbofan's
2698 // behavior, we thus insert here a length check.
2699 // TODO(dmercadier): I'm not convinced that these checks make a lot of
2700 // sense, since they make the graph bigger, and throwing before the builtin
2701 // call to StringConcat isn't super important since throwing is not supposed
2702 // to be fast. We should consider just calling the builtin and letting it
2703 // throw. With LazyDeopOnThrow, this is currently a bit verbose to
2704 // implement, so we should first find a way to have this LazyDeoptOnThrow
2705 // without adding a member to all throwing operations (like adding
2706 // LazyDeoptOnThrow in FrameStateOp).
2707 ThrowingScope throwing_scope(this, node);
2708
2709 V<Word32> left_len = __ StringLength(left);
2710 V<Word32> right_len = __ StringLength(right);
2711
2712 V<Tuple<Word32, Word32>> len_and_ovf =
2713 __ Int32AddCheckOverflow(left_len, right_len);
2714 V<Word32> len = __ Projection<0>(len_and_ovf);
2715 V<Word32> ovf = __ Projection<1>(len_and_ovf);
2716
2717 Label<> throw_invalid_length(this);
2718 Label<> done(this);
2719
2720 GOTO_IF(UNLIKELY(ovf), throw_invalid_length);
2721 GOTO_IF(LIKELY(__ Uint32LessThanOrEqual(len, String::kMaxLength)), done);
2722
2723 GOTO(throw_invalid_length);
2724 BIND(throw_invalid_length);
2725 {
2726 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2727 __ CallRuntime_ThrowInvalidStringLength(isolate_, frame_state,
2730 // We should not return from Throw.
2731 __ Unreachable();
2732 }
2733
2734 BIND(done);
2735 SetMap(node, __ StringConcat(__ TagSmi(len), left, right));
2737 }
2739 const maglev::ProcessingState& state) {
2740 V<String> left = Map(node->lhs());
2741 V<String> right = Map(node->rhs());
2742 return StringConcatHelper(node, left, right);
2743 }
2745 const maglev::ProcessingState& state) {
2746 V<HeapObject> string_or_wrapper = Map(node->value_input());
2747
2749 IF (__ ObjectIsString(string_or_wrapper)) {
2750 string = V<String>::Cast(string_or_wrapper);
2751 } ELSE {
2752 string = V<String>::Cast(
2753 __ LoadTaggedField(V<JSPrimitiveWrapper>::Cast(string_or_wrapper),
2754 JSPrimitiveWrapper::kValueOffset));
2755 }
2756 SetMap(node, string);
2758 }
2768 const maglev::ProcessingState& state) {
2769 SetMap(node, __ StringEqual(Map(node->lhs()), Map(node->rhs())));
2771 }
2773 const maglev::ProcessingState& state) {
2774 SetMap(node, __ StringLength(Map(node->object_input())));
2776 }
2778 const maglev::ProcessingState& state) {
2779 V<Word32> char_code =
2780 __ StringCharCodeAt(Map(node->string_input()),
2781 __ ChangeUint32ToUintPtr(Map(node->index_input())));
2782 SetMap(node, __ ConvertCharCodeToString(char_code));
2784 }
2786 const maglev::ProcessingState& state) {
2787 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2788 SetMap(node, __ CheckedInternalizedString(
2789 Map(node->object_input()), frame_state,
2790 node->check_type() == maglev::CheckType::kCheckHeapObject,
2791 node->eager_deopt_info()->feedback_to_update()));
2793 }
2795 const maglev::ProcessingState& state) {
2796 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2797 __ CheckValueEqualsString(Map(node->target_input()), node->value(),
2798 frame_state,
2799 node->eager_deopt_info()->feedback_to_update());
2801 }
2803 const maglev::ProcessingState& state) {
2804 SetMap(node, __ ConvertCharCodeToString(Map(node->code_input())));
2806 }
2809 const maglev::ProcessingState& state) {
2811 Mode::kCharCodeAt) {
2812 SetMap(node, __ StringCharCodeAt(
2813 Map(node->string_input()),
2814 __ ChangeUint32ToUintPtr(Map(node->index_input()))));
2815 } else {
2816 DCHECK_EQ(node->mode(),
2818 kCodePointAt);
2819 SetMap(node, __ StringCodePointAt(
2820 Map(node->string_input()),
2821 __ ChangeUint32ToUintPtr(Map(node->index_input()))));
2822 }
2824 }
2826 const maglev::ProcessingState& state) {
2827 ThrowingScope throwing_scope(this, node);
2828
2829 Label<String> done(this);
2830
2831 V<Object> value = Map(node->value_input());
2832 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
2833
2834 GOTO_IF(__ ObjectIsString(value), done, V<String>::Cast(value));
2835
2836 IF_NOT (__ IsSmi(value)) {
2838 V<i::Map> map = __ LoadMapField(value);
2839 V<Word32> instance_type = __ LoadInstanceTypeField(map);
2840 IF (__ Word32Equal(instance_type, SYMBOL_TYPE)) {
2841 GOTO(done, __ CallRuntime_SymbolDescriptiveString(
2842 isolate_, frame_state, Map(node->context()),
2844 }
2845 }
2846 }
2847
2848 GOTO(done,
2849 __ CallBuiltin_ToString(isolate_, frame_state, Map(node->context()),
2850 value, ShouldLazyDeoptOnThrow(node)));
2851
2852 BIND(done, result);
2853 SetMap(node, result);
2855 }
2857 const maglev::ProcessingState& state) {
2858 NoThrowingScopeRequired no_throws(node);
2859
2860 SetMap(node,
2861 __ CallBuiltin_NumberToString(isolate_, Map(node->value_input())));
2863 }
2864
2866 const maglev::ProcessingState& state) {
2867 // TODO(dmercadier): ArgumentsLength in Maglev returns a raw Word32, while
2868 // in Turboshaft, it returns a Smi. We thus untag this Smi here to match
2869 // Maglev's behavior, but it would be more efficient to change Turboshaft's
2870 // ArgumentsLength operation to return a raw Word32 as well.
2871 SetMap(node, __ UntagSmi(__ ArgumentsLength()));
2873 }
2875 const maglev::ProcessingState& state) {
2876 SetMap(node, __ NewArgumentsElements(Map(node->arguments_count_input()),
2877 node->type(),
2878 node->formal_parameter_count()));
2880 }
2882 const maglev::ProcessingState& state) {
2883 SetMap(node, __ RestLength(node->formal_parameter_count()));
2885 }
2886
2887 template <typename T>
2889 const maglev::ProcessingState& state) {
2890 V<Object> value =
2891 __ LoadTaggedField(Map(node->object_input()), node->offset());
2892 SetMap(node, value);
2893
2895 maglev_generator_context_node_ == nullptr &&
2896 node->object_input().node()->template Is<maglev::RegisterInput>() &&
2897 node->offset() == JSGeneratorObject::kContextOffset) {
2898 // This is loading the context of a generator for the 1st time. We save it
2899 // in {generator_context_} for later use.
2900 __ SetVariable(generator_context_, value);
2902 }
2903
2905 }
2908 const maglev::ProcessingState& state) {
2909 V<Context> script_context = V<Context>::Cast(Map(node->context()));
2910 V<Object> value = __ LoadTaggedField(script_context, node->offset());
2912 IF_NOT (__ IsSmi(value)) {
2913 V<i::Map> value_map = __ LoadMapField(value);
2914 IF (UNLIKELY(__ TaggedEqual(
2915 value_map, __ HeapConstant(local_factory_->heap_number_map())))) {
2916 V<HeapNumber> heap_number = V<HeapNumber>::Cast(value);
2917 result = __ LoadHeapNumberFromScriptContext(script_context,
2918 node->index(), heap_number);
2919 }
2920 }
2921 SetMap(node, result);
2923 }
2925 const maglev::ProcessingState& state) {
2926 V<HeapNumber> field = __ LoadTaggedField<HeapNumber>(
2927 Map(node->object_input()), node->offset());
2928 SetMap(node, __ LoadHeapNumberValue(field));
2930 }
2932 const maglev::ProcessingState& state) {
2933 SetMap(node, __ Load(Map(node->object_input()), LoadOp::Kind::TaggedBase(),
2934 MemoryRepresentation::Float64(), node->offset()));
2936 }
2938 const maglev::ProcessingState& state) {
2939 SetMap(node, __ Load(Map(node->object_input()), LoadOp::Kind::TaggedBase(),
2940 MemoryRepresentation::Int32(), node->offset()));
2942 }
2944 const maglev::ProcessingState& state) {
2945 V<HeapNumber> field = __ LoadTaggedField<HeapNumber>(
2946 Map(node->object_input()), node->offset());
2947 SetMap(node, __ LoadHeapInt32Value(field));
2949 }
2951 const maglev::ProcessingState& state) {
2952 SetMap(node, __ LoadFixedArrayElement(
2953 Map(node->elements_input()),
2954 __ ChangeInt32ToIntPtr(Map(node->index_input()))));
2956 }
2958 const maglev::ProcessingState& state) {
2959 SetMap(node, __ LoadFixedDoubleArrayElement(
2960 Map(node->elements_input()),
2961 __ ChangeInt32ToIntPtr(Map(node->index_input()))));
2963 }
2965 const maglev::ProcessingState& state) {
2966 SetMap(node, __ LoadFixedDoubleArrayElement(
2967 Map(node->elements_input()),
2968 __ ChangeInt32ToIntPtr(Map(node->index_input()))));
2970 }
2973 const maglev::ProcessingState& state) {
2974 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
2975 V<Float64> result = __ LoadFixedDoubleArrayElement(
2976 Map(node->elements_input()),
2977 __ ChangeInt32ToIntPtr(Map(node->index_input())));
2978 __ DeoptimizeIf(__ Float64IsHole(result), frame_state,
2979 DeoptimizeReason::kHole,
2980 node->eager_deopt_info()->feedback_to_update());
2981 SetMap(node, result);
2983 }
2984
2986 const maglev::ProcessingState& state) {
2987 __ Store(Map(node->object_input()), Map(node->value_input()),
2989 WriteBarrierKind::kNoWriteBarrier, node->offset(),
2990 node->initializing_or_transitioning());
2992 }
2994 const maglev::ProcessingState& state) {
2995 __ Store(Map(node->object_input()), Map(node->value_input()),
2998 node->initializing_or_transitioning());
3000 }
3003 const maglev::ProcessingState& state) {
3004 Label<> done(this);
3005 V<Context> context = V<i::Context>::Cast(Map(node->context_input()));
3006 V<Object> new_value = Map(node->new_value_input());
3007 V<Object> old_value = __ LoadTaggedField(context, node->offset());
3008 IF_NOT (__ TaggedEqual(old_value, new_value)) {
3009 V<Object> side_data =
3010 __ LoadScriptContextSideData(context, node->index());
3011 IF_NOT (UNLIKELY(__ TaggedEqual(
3012 side_data,
3013 __ SmiConstant(ContextSidePropertyCell::Other())))) {
3014 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3015 __ StoreScriptContextSlowPath(
3016 context, old_value, new_value, side_data, frame_state,
3017 node->eager_deopt_info()->feedback_to_update(), done);
3018 }
3019 __ Store(context, new_value, StoreOp::Kind::TaggedBase(),
3021 WriteBarrierKind::kFullWriteBarrier, node->offset(), false);
3022 }
3023 GOTO(done);
3024 BIND(done);
3026 }
3028 const maglev::ProcessingState& state) {
3029 V<HeapNumber> field = __ LoadTaggedField<HeapNumber>(
3030 Map(node->object_input()), node->offset());
3031 __ StoreField(field, AccessBuilder::ForHeapNumberValue(),
3032 Map(node->value_input()));
3034 }
3036 const maglev::ProcessingState& state) {
3037 V<HeapNumber> field = __ LoadTaggedField<HeapNumber>(
3038 Map(node->object_input()), node->offset());
3039 __ StoreField(field, AccessBuilder::ForHeapInt32Value(),
3040 Map(node->value_input()));
3042 }
3045 const maglev::ProcessingState& state) {
3046 __ Store(Map(node->object_input()), Map(node->value_input()),
3050 node->initializing_or_transitioning(), node->tag());
3052 }
3055 const maglev::ProcessingState& state) {
3056 __ StoreFixedArrayElement(Map(node->elements_input()),
3057 __ ChangeInt32ToIntPtr(Map(node->index_input())),
3058 Map(node->value_input()),
3061 }
3064 const maglev::ProcessingState& state) {
3065 __ StoreFixedArrayElement(Map(node->elements_input()),
3066 __ ChangeInt32ToIntPtr(Map(node->index_input())),
3067 Map(node->value_input()),
3070 }
3072 const maglev::ProcessingState& state) {
3073 __ StoreFixedDoubleArrayElement(
3074 Map(node->elements_input()),
3075 __ ChangeInt32ToIntPtr(Map(node->index_input())),
3076 Map(node->value_input()));
3078 }
3080 const maglev::ProcessingState& state) {
3081 __ Store(Map(node->object_input()), __ HeapConstant(node->map().object()),
3084 /*maybe_initializing_or_transitioning*/ true);
3086 }
3088 const maglev::ProcessingState& state) {
3089 __ Store(Map(node->object_input()), Map(node->value_input()),
3091 WriteBarrierKind::kNoWriteBarrier, node->offset());
3093 }
3095 const maglev::ProcessingState& state) {
3096 __ Store(Map(node->object_input()), Map(node->value_input()),
3098 WriteBarrierKind::kNoWriteBarrier, node->offset());
3100 }
3101
3102 // For-in specific operations.
3104 const maglev::ProcessingState& state) {
3105 V<Word32> bitfield3 =
3106 __ LoadField<Word32>(V<i::Map>::Cast(Map(node->map_input())),
3108 V<Word32> length = __ Word32ShiftRightLogical(
3109 __ Word32BitwiseAnd(bitfield3, Map::Bits3::EnumLengthBits::kMask),
3110 Map::Bits3::EnumLengthBits::kShift);
3111 SetMap(node, length);
3113 }
3115 const maglev::ProcessingState& state) {
3116 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3117 // If the cache length is zero, we don't have any indices, so we know this
3118 // is ok even though the indices are the empty array.
3119 IF_NOT (__ Word32Equal(Map(node->length_input()), 0)) {
3120 // Otherwise, an empty array with non-zero required length is not valid.
3122 RootEqual(node->indices_input(), RootIndex::kEmptyFixedArray);
3123 __ DeoptimizeIf(condition, frame_state,
3124 DeoptimizeReason::kWrongEnumIndices,
3125 node->eager_deopt_info()->feedback_to_update());
3126 }
3128 }
3130 const maglev::ProcessingState& state) {
3131 SetMap(node,
3132 __ LoadFieldByIndex(Map(node->object_input()),
3133 __ UntagSmi(Map<Smi>(node->index_input()))));
3135 }
3136
3138 const maglev::ProcessingState& state) {
3139 // TODO(dmercadier): consider loading the raw length instead of the byte
3140 // length. This is not currently done because the raw length field might be
3141 // removed soon.
3142 V<WordPtr> length =
3143 __ LoadField<WordPtr>(Map<JSTypedArray>(node->receiver_input()),
3145
3146 int shift_size = ElementsKindToShiftSize(node->elements_kind());
3147 if (shift_size > 0) {
3148 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
3149 length = __ WordPtrShiftRightLogical(length, shift_size);
3150 }
3151 SetMap(node, length);
3153 }
3155 const maglev::ProcessingState& state) {
3156 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3157 __ DeoptimizeIfNot(
3158 __ UintPtrLessThan(__ ChangeUint32ToUintPtr(Map(node->index_input())),
3159 Map(node->length_input())),
3160 frame_state, DeoptimizeReason::kOutOfBounds,
3161 node->eager_deopt_info()->feedback_to_update());
3163 }
3164
3165 maglev::ProcessResult Process(maglev::LoadUnsignedIntTypedArrayElement* node,
3166 const maglev::ProcessingState& state) {
3167 SetMap(node, BuildTypedArrayLoad(Map<JSTypedArray>(node->object_input()),
3168 Map<Word32>(node->index_input()),
3169 node->elements_kind()));
3171 }
3172 maglev::ProcessResult Process(maglev::LoadSignedIntTypedArrayElement* node,
3173 const maglev::ProcessingState& state) {
3174 SetMap(node, BuildTypedArrayLoad(Map<JSTypedArray>(node->object_input()),
3175 Map<Word32>(node->index_input()),
3176 node->elements_kind()));
3178 }
3179 maglev::ProcessResult Process(maglev::LoadDoubleTypedArrayElement* node,
3180 const maglev::ProcessingState& state) {
3181 DCHECK_EQ(node->elements_kind(),
3182 any_of(FLOAT32_ELEMENTS, FLOAT64_ELEMENTS));
3184 Map<JSTypedArray>(node->object_input()),
3185 Map<Word32>(node->index_input()), node->elements_kind()));
3186 if (node->elements_kind() == FLOAT32_ELEMENTS) {
3187 value = __ ChangeFloat32ToFloat64(V<Float32>::Cast(value));
3188 }
3189 SetMap(node, value);
3191 }
3192
3193 maglev::ProcessResult Process(maglev::StoreIntTypedArrayElement* node,
3194 const maglev::ProcessingState& state) {
3195 BuildTypedArrayStore(Map<JSTypedArray>(node->object_input()),
3196 Map<Word32>(node->index_input()),
3197 Map<Untagged>(node->value_input()),
3198 node->elements_kind());
3200 }
3201 maglev::ProcessResult Process(maglev::StoreDoubleTypedArrayElement* node,
3202 const maglev::ProcessingState& state) {
3203 DCHECK_EQ(node->elements_kind(),
3204 any_of(FLOAT32_ELEMENTS, FLOAT64_ELEMENTS));
3205 V<Float> value = Map<Float>(node->value_input());
3206 if (node->elements_kind() == FLOAT32_ELEMENTS) {
3207 value = __ TruncateFloat64ToFloat32(Map(node->value_input()));
3208 }
3209 BuildTypedArrayStore(Map<JSTypedArray>(node->object_input()),
3210 Map<Word32>(node->index_input()), value,
3211 node->elements_kind());
3213 }
3214
3216 const maglev::ProcessingState& state) {
3217 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3218 // Normal DataView (backed by AB / SAB) or non-length tracking backed by
3219 // GSAB.
3220 V<WordPtr> byte_length =
3221 __ LoadField<WordPtr>(Map<JSTypedArray>(node->receiver_input()),
3223
3224 int element_size = ExternalArrayElementSize(node->element_type());
3225 if (element_size > 1) {
3226 // For element_size larger than 1, we need to make sure that {index} is
3227 // less than {byte_length}, but also that {index+element_size} is less
3228 // than {byte_length}. We do this by subtracting {element_size-1} from
3229 // {byte_length}: if the resulting length is greater than 0, then we can
3230 // just treat {element_size} as 1 and check if {index} is less than this
3231 // new {byte_length}.
3232 DCHECK(element_size == 2 || element_size == 4 || element_size == 8);
3233 byte_length = __ WordPtrSub(byte_length, element_size - 1);
3234 __ DeoptimizeIf(__ IntPtrLessThan(byte_length, 0), frame_state,
3235 DeoptimizeReason::kOutOfBounds,
3236 node->eager_deopt_info()->feedback_to_update());
3237 }
3238 __ DeoptimizeIfNot(
3239 __ Uint32LessThan(Map<Word32>(node->index_input()),
3240 __ TruncateWordPtrToWord32(byte_length)),
3241 frame_state, DeoptimizeReason::kOutOfBounds,
3242 node->eager_deopt_info()->feedback_to_update());
3244 }
3245
3247 const maglev::ProcessingState& state) {
3248 V<JSDataView> data_view = Map<JSDataView>(node->object_input());
3249 V<WordPtr> storage = __ LoadField<WordPtr>(
3251 V<Word32> is_little_endian =
3252 ToBit(node->is_little_endian_input(),
3254 SetMap(node, __ LoadDataViewElement(
3255 data_view, storage,
3256 __ ChangeUint32ToUintPtr(Map<Word32>(node->index_input())),
3257 is_little_endian, node->type()));
3259 }
3261 const maglev::ProcessingState& state) {
3262 V<JSDataView> data_view = Map<JSDataView>(node->object_input());
3263 V<WordPtr> storage = __ LoadField<WordPtr>(
3265 V<Word32> is_little_endian =
3266 ToBit(node->is_little_endian_input(),
3268 SetMap(node,
3269 __ LoadDataViewElement(
3270 data_view, storage,
3271 __ ChangeUint32ToUintPtr(Map<Word32>(node->index_input())),
3272 is_little_endian, ExternalArrayType::kExternalFloat64Array));
3274 }
3275
3276 maglev::ProcessResult Process(maglev::StoreSignedIntDataViewElement* node,
3277 const maglev::ProcessingState& state) {
3278 V<JSDataView> data_view = Map<JSDataView>(node->object_input());
3279 V<WordPtr> storage = __ LoadField<WordPtr>(
3281 V<Word32> is_little_endian =
3282 ToBit(node->is_little_endian_input(),
3284 __ StoreDataViewElement(
3285 data_view, storage,
3286 __ ChangeUint32ToUintPtr(Map<Word32>(node->index_input())),
3287 Map<Word32>(node->value_input()), is_little_endian, node->type());
3289 }
3291 const maglev::ProcessingState& state) {
3292 V<JSDataView> data_view = Map<JSDataView>(node->object_input());
3293 V<WordPtr> storage = __ LoadField<WordPtr>(
3295 V<Word32> is_little_endian =
3296 ToBit(node->is_little_endian_input(),
3298 __ StoreDataViewElement(
3299 data_view, storage,
3300 __ ChangeUint32ToUintPtr(Map<Word32>(node->index_input())),
3301 Map<Float64>(node->value_input()), is_little_endian,
3304 }
3305
3307 const maglev::ProcessingState& state) {
3308 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3309 __ DeoptimizeIf(
3310 __ ArrayBufferIsDetached(Map<JSArrayBufferView>(node->object_input())),
3311 frame_state, DeoptimizeReason::kArrayBufferWasDetached,
3312 node->eager_deopt_info()->feedback_to_update());
3313
3315 }
3316
3318 Block* destination = Map(target);
3319 if (target->is_loop() && (target->predecessor_count() > 2 ||
3321 // This loop has multiple forward edges in Maglev, so we'll create an
3322 // extra block in Turboshaft that will be the only predecessor.
3323 auto it = loop_single_edge_predecessors_.find(target);
3324 if (it != loop_single_edge_predecessors_.end()) {
3325 destination = it->second;
3326 } else {
3327 Block* loop_only_pred = __ NewBlock();
3328 loop_single_edge_predecessors_[target] = loop_only_pred;
3329 destination = loop_only_pred;
3330 }
3331 }
3332 __ Goto(destination);
3333 }
3334
3336 const maglev::ProcessingState& state) {
3337 BuildJump(node->target());
3339 }
3345
3347 const maglev::ProcessingState& state) {
3350 }
3351 __ Goto(Map(node->target()));
3352 FixLoopPhis(node->target());
3354 }
3355
3357 const maglev::ProcessingState& state) {
3358 V<Word32> bool_res =
3359 ConvertCompare<Word32>(node->left_input(), node->right_input(),
3360 node->operation(), Sign::kSigned);
3361 SetMap(node, ConvertWord32ToJSBool(bool_res));
3363 }
3365 const maglev::ProcessingState& state) {
3366 V<Word32> bool_res =
3367 ConvertCompare<Float64>(node->left_input(), node->right_input(),
3368 node->operation(), Sign::kSigned);
3369 SetMap(node, ConvertWord32ToJSBool(bool_res));
3371 }
3373 const maglev::ProcessingState& state) {
3375 __ TaggedEqual(Map(node->lhs()), Map(node->rhs()))));
3377 }
3379 const maglev::ProcessingState& state) {
3381 __ TaggedEqual(Map(node->lhs()), Map(node->rhs())),
3382 /*flip*/ true));
3384 }
3386 const maglev::ProcessingState& state) {
3388 switch (node->check_type()) {
3391 break;
3394 break;
3395 }
3397 __ ObjectIs(Map(node->value()),
3398 ObjectIsOp::Kind::kUndetectable, assumption)));
3400 }
3402 const maglev::ProcessingState& state) {
3403 V<Object> input = Map(node->value());
3405 switch (node->literal()) {
3406 case interpreter::TestTypeOfFlags::LiteralFlag::kNumber:
3408 __ ObjectIs(input, ObjectIsOp::Kind::kNumber,
3410 break;
3411 case interpreter::TestTypeOfFlags::LiteralFlag::kString:
3413 __ ObjectIs(input, ObjectIsOp::Kind::kString,
3415 break;
3416 case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol:
3418 __ ObjectIs(input, ObjectIsOp::Kind::kSymbol,
3420 break;
3421 case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt:
3423 __ ObjectIs(input, ObjectIsOp::Kind::kBigInt,
3425 break;
3426 case interpreter::TestTypeOfFlags::LiteralFlag::kFunction:
3430 break;
3431 case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean:
3432 result = __ Select(__ RootEqual(input, RootIndex::kTrueValue, isolate_),
3433 __ HeapConstant(local_factory_->true_value()),
3435 input, RootIndex::kFalseValue, isolate_)),
3438 break;
3439 case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined:
3440 result = __ Select(__ RootEqual(input, RootIndex::kNullValue, isolate_),
3441 __ HeapConstant(local_factory_->false_value()),
3442 ConvertWord32ToJSBool(__ ObjectIs(
3447 break;
3448 case interpreter::TestTypeOfFlags::LiteralFlag::kObject:
3449 result = __ Select(__ ObjectIs(input, ObjectIsOp::Kind::kNonCallable,
3451 __ HeapConstant(local_factory_->true_value()),
3453 input, RootIndex::kNullValue, isolate_)),
3456 break;
3457 case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
3458 UNREACHABLE(); // Should never be emitted.
3459 }
3460
3461 SetMap(node, result);
3463 }
3464
3466 const maglev::ProcessingState& state) {
3467 V<Object> receiver = Map(node->receiver_input());
3468 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3469
3470 ObjectIsOp::InputAssumptions assumptions;
3471 switch (node->check_type()) {
3474 break;
3477 break;
3478 }
3479
3480 __ DeoptimizeIfNot(
3482 assumptions),
3483 frame_state, DeoptimizeReason::kNotDetectableReceiver,
3484 node->eager_deopt_info()->feedback_to_update());
3485
3487 }
3488
3490 const maglev::ProcessingState& state) {
3491 V<Object> object = Map(node->object_input());
3492 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3493
3494 ObjectIsOp::InputAssumptions assumptions;
3495 switch (node->check_type()) {
3498 break;
3501 break;
3502 }
3503
3504 __ DeoptimizeIfNot(
3506 assumptions),
3507 frame_state, DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
3508 node->eager_deopt_info()->feedback_to_update());
3509
3511 }
3512
3524 const maglev::ProcessingState& state) {
3526 ConvertCompare<Word32>(node->left_input(), node->right_input(),
3527 node->operation(), Sign::kSigned);
3528 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3530 }
3532 const maglev::ProcessingState& state) {
3534 ConvertCompare<Word32>(node->left_input(), node->right_input(),
3535 node->operation(), Sign::kUnsigned);
3536 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3538 }
3540 const maglev::ProcessingState& state) {
3542 ConvertCompare<Float64>(node->left_input(), node->right_input(),
3543 node->operation(), Sign::kSigned);
3544 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3546 }
3548 const maglev::ProcessingState& state) {
3549 V<Word32> condition = Map(node->condition_input());
3550 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3552 }
3553
3555 const maglev::ProcessingState& state) {
3557 __ Equal(Map(node->condition_input()), __ IntPtrConstant(0),
3559 __ Branch(condition, Map(node->if_false()), Map(node->if_true()));
3561 }
3562
3564 const maglev::ProcessingState& state) {
3565 V<Word32> condition = Float64ToBit(Map(node->condition_input()));
3566 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3568 }
3570 const maglev::ProcessingState& state) {
3571 V<Word32> condition = __ Float64IsHole(Map(node->condition_input()));
3572 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3574 }
3576 const maglev::ProcessingState& state) {
3578 __ TaggedEqual(Map(node->left_input()), Map(node->right_input()));
3579 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3581 }
3583 const maglev::ProcessingState& state) {
3585 RootEqual(node->condition_input(), node->root_index());
3586 __ Branch(condition, Map(node->if_true()), Map(node->if_false()));
3588 }
3590 const maglev::ProcessingState& state) {
3591 __ GotoIf(RootEqual(node->condition_input(), RootIndex::kUndefinedValue),
3592 Map(node->if_true()));
3593 __ Branch(RootEqual(node->condition_input(), RootIndex::kNullValue),
3594 Map(node->if_true()), Map(node->if_false()));
3596 }
3598 const maglev::ProcessingState& state) {
3600 switch (node->check_type()) {
3603 break;
3606 break;
3607 }
3608 __ Branch(__ ObjectIs(Map(node->condition_input()),
3610 Map(node->if_true()), Map(node->if_false()));
3612 }
3614 const maglev::ProcessingState& state) {
3615 __ Branch(__ IsSmi(Map(node->condition_input())), Map(node->if_true()),
3616 Map(node->if_false()));
3618 }
3620 const maglev::ProcessingState& state) {
3621 __ GotoIf(__ IsSmi(Map(node->condition_input())), Map(node->if_false()));
3622 __ Branch(__ JSAnyIsNotPrimitive(Map(node->condition_input())),
3623 Map(node->if_true()), Map(node->if_false()));
3625 }
3626
3628 const maglev::ProcessingState& state) {
3630 // This is the main resume-switch for a generator, and some of its target
3631 // bypass loop headers. We need to re-route the destinations to the
3632 // bypassed loop headers, where secondary switches will be inserted.
3633
3635 __ output_graph().graph_zone()
3636 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
3637 node->size());
3638
3639 DCHECK_EQ(0, node->value_base());
3640
3641 for (int i = 0; i < node->size(); i++) {
3642 maglev::BasicBlock* target = node->targets()[i].block_ptr();
3644 Block* new_dst = __ NewBlock();
3645
3646 const maglev::BasicBlock* innermost_bypassed_header =
3648
3649 pre_loop_generator_blocks_[innermost_bypassed_header].push_back(
3650 {new_dst, Map(target), i});
3651
3652 // {innermost_bypassed_header} is only the innermost bypassed header.
3653 // We also need to record bypasses of outer headers. In the end, we
3654 // want this main Switch to go to before the outermost header, which
3655 // will dispatch to the next inner loop, and so on until the innermost
3656 // loop header and then to the initial destination.
3657 for (const maglev::BasicBlock* bypassed_header =
3658 generator_analyzer_.GetLoopHeader(innermost_bypassed_header);
3659 bypassed_header != nullptr;
3660 bypassed_header =
3661 generator_analyzer_.GetLoopHeader(bypassed_header)) {
3662 Block* prev_loop_dst = __ NewBlock();
3663 pre_loop_generator_blocks_[bypassed_header].push_back(
3664 {prev_loop_dst, new_dst, i});
3665 new_dst = prev_loop_dst;
3666 }
3667
3668 cases[i] = {i, new_dst, BranchHint::kNone};
3669
3670 } else {
3671 cases[i] = {i, Map(target), BranchHint::kNone};
3672 }
3673 }
3674
3675 Block* default_block = __ NewBlock();
3676 __ Switch(Map(node->value()), base::VectorOf(cases, node->size()),
3677 default_block);
3678 __ Bind(default_block);
3679 __ Unreachable();
3680
3682 }
3683
3685 __ output_graph().graph_zone()
3686 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
3687 node->size());
3688 int case_value_base = node->value_base();
3689 for (int i = 0; i < node->size(); i++) {
3690 cases[i] = {i + case_value_base, Map(node->targets()[i].block_ptr()),
3692 }
3693 Block* default_block;
3694 bool emit_default_block = false;
3695 if (node->has_fallthrough()) {
3696 default_block = Map(state.next_block());
3697 } else {
3698 default_block = __ NewBlock();
3699 emit_default_block = true;
3700 }
3701 __ Switch(Map(node->value()), base::VectorOf(cases, node->size()),
3702 default_block);
3703 if (emit_default_block) {
3704 __ Bind(default_block);
3705 __ Unreachable();
3706 }
3708 }
3709
3711 const maglev::ProcessingState& state) {
3712 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3713 SetMap(node,
3714 __ CheckedSmiUntag(Map(node->input()), frame_state,
3715 node->eager_deopt_info()->feedback_to_update()));
3717 }
3719 const maglev::ProcessingState& state) {
3720 SetMap(node, __ UntagSmi(Map(node->input())));
3722 }
3724 const maglev::ProcessingState& state) {
3725 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3726 SetMap(
3727 node,
3728 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3729 Map(node->input()), frame_state,
3733 node->eager_deopt_info()->feedback_to_update()));
3735 }
3737 const maglev::ProcessingState& state) {
3738 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3739 SetMap(node,
3740 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3741 Map(node->input()), frame_state,
3745 kUnsigned,
3746 node->eager_deopt_info()->feedback_to_update()));
3748 }
3749
3751 const maglev::ProcessingState& state) {
3752 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3753 SetMap(
3754 node,
3755 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3756 Map(node->input()), frame_state,
3760 node->eager_deopt_info()->feedback_to_update()));
3762 }
3763
3765 const maglev::ProcessingState& state) {
3766 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3767 V<Word32> as_int32 = __ ChangeFloat64ToInt32OrDeopt(
3768 Map(node->input()), frame_state,
3770 node->eager_deopt_info()->feedback_to_update());
3771 SetMap(
3772 node,
3773 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3774 as_int32, frame_state,
3778 node->eager_deopt_info()->feedback_to_update()));
3780 }
3782 const maglev::ProcessingState& state) {
3783 SetMap(node, __ TagSmi(Map(node->input())));
3785 }
3787 const maglev::ProcessingState& state) {
3788 SetMap(node, __ TagSmi(Map(node->input())));
3790 }
3791
3793 const maglev::ProcessingState& state) {
3794 // TODO(388844115): Rename the IntPtr in Maglev to make it clear it's
3795 // non-negative.
3796 SetMap(node, __ TagSmi(__ TruncateWordPtrToWord32(Map(node->input()))));
3798 }
3799
3800#define PROCESS_BINOP_WITH_OVERFLOW(MaglevName, TurboshaftName, \
3801 minus_zero_mode) \
3802 maglev::ProcessResult Process(maglev::Int32##MaglevName##WithOverflow* node, \
3803 const maglev::ProcessingState& state) { \
3804 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info()); \
3805 SetMap(node, \
3806 __ Word32##TurboshaftName##DeoptOnOverflow( \
3807 Map(node->left_input()), Map(node->right_input()), frame_state, \
3808 node->eager_deopt_info()->feedback_to_update(), \
3809 CheckForMinusZeroMode::k##minus_zero_mode)); \
3810 return maglev::ProcessResult::kContinue; \
3811 }
3812 PROCESS_BINOP_WITH_OVERFLOW(Add, SignedAdd, DontCheckForMinusZero)
3813 PROCESS_BINOP_WITH_OVERFLOW(Subtract, SignedSub, DontCheckForMinusZero)
3814 PROCESS_BINOP_WITH_OVERFLOW(Multiply, SignedMul, CheckForMinusZero)
3815 PROCESS_BINOP_WITH_OVERFLOW(Divide, SignedDiv, CheckForMinusZero)
3816 PROCESS_BINOP_WITH_OVERFLOW(Modulus, SignedMod, CheckForMinusZero)
3817#undef PROCESS_BINOP_WITH_OVERFLOW
3818 maglev::ProcessResult Process(maglev::Int32IncrementWithOverflow* node,
3819 const maglev::ProcessingState& state) {
3820 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3821 // Turboshaft doesn't have a dedicated Increment operation; we use a regular
3822 // addition instead.
3823 SetMap(node, __ Word32SignedAddDeoptOnOverflow(
3824 Map(node->value_input()), 1, frame_state,
3825 node->eager_deopt_info()->feedback_to_update()));
3827 }
3828 maglev::ProcessResult Process(maglev::Int32DecrementWithOverflow* node,
3829 const maglev::ProcessingState& state) {
3830 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3831 // Turboshaft doesn't have a dedicated Decrement operation; we use a regular
3832 // addition instead.
3833 SetMap(node, __ Word32SignedSubDeoptOnOverflow(
3834 Map(node->value_input()), 1, frame_state,
3835 node->eager_deopt_info()->feedback_to_update()));
3837 }
3838 maglev::ProcessResult Process(maglev::Int32NegateWithOverflow* node,
3839 const maglev::ProcessingState& state) {
3840 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3841 // Turboshaft doesn't have an Int32NegateWithOverflow operation, but
3842 // Turbofan emits multiplications by -1 for this, so using this as well
3843 // here.
3844 SetMap(node, __ Word32SignedMulDeoptOnOverflow(
3845 Map(node->value_input()), -1, frame_state,
3846 node->eager_deopt_info()->feedback_to_update(),
3849 }
3850
3851#define PROCESS_FLOAT64_BINOP(MaglevName, TurboshaftName) \
3852 maglev::ProcessResult Process(maglev::Float64##MaglevName* node, \
3853 const maglev::ProcessingState& state) { \
3854 SetMap(node, __ Float64##TurboshaftName(Map(node->left_input()), \
3855 Map(node->right_input()))); \
3856 return maglev::ProcessResult::kContinue; \
3857 }
3858 PROCESS_FLOAT64_BINOP(Add, Add)
3859 PROCESS_FLOAT64_BINOP(Subtract, Sub)
3860 PROCESS_FLOAT64_BINOP(Multiply, Mul)
3861 PROCESS_FLOAT64_BINOP(Divide, Div)
3862 PROCESS_FLOAT64_BINOP(Modulus, Mod)
3863 PROCESS_FLOAT64_BINOP(Exponentiate, Power)
3864#undef PROCESS_FLOAT64_BINOP
3865
3866#define PROCESS_INT32_BITWISE_BINOP(Name) \
3867 maglev::ProcessResult Process(maglev::Int32Bitwise##Name* node, \
3868 const maglev::ProcessingState& state) { \
3869 SetMap(node, __ Word32Bitwise##Name(Map(node->left_input()), \
3870 Map(node->right_input()))); \
3871 return maglev::ProcessResult::kContinue; \
3872 }
3876#undef PROCESS_INT32_BITWISE_BINOP
3877
3878#define PROCESS_INT32_SHIFT(MaglevName, TurboshaftName) \
3879 maglev::ProcessResult Process(maglev::Int32##MaglevName* node, \
3880 const maglev::ProcessingState& state) { \
3881 V<Word32> right = Map(node->right_input()); \
3882 if (!SupportedOperations::word32_shift_is_safe()) { \
3883 /* JavaScript spec says that the right-hand side of a shift should be \
3884 * taken modulo 32. Some architectures do this automatically, some \
3885 * don't. For those that don't, which do this modulo 32 with a `& 0x1f`. \
3886 */ \
3887 right = __ Word32BitwiseAnd(right, 0x1f); \
3888 } \
3889 SetMap(node, __ Word32##TurboshaftName(Map(node->left_input()), right)); \
3890 return maglev::ProcessResult::kContinue; \
3891 }
3892 PROCESS_INT32_SHIFT(ShiftLeft, ShiftLeft)
3893 PROCESS_INT32_SHIFT(ShiftRight, ShiftRightArithmetic)
3894#undef PROCESS_INT32_SHIFT
3895
3897 const maglev::ProcessingState& state) {
3898 V<Word32> right = Map(node->right_input());
3899 if (!SupportedOperations::word32_shift_is_safe()) {
3900 // JavaScript spec says that the right-hand side of a shift should be
3901 // taken modulo 32. Some architectures do this automatically, some
3902 // don't. For those that don't, which do this modulo 32 with a `& 0x1f`.
3903 right = __ Word32BitwiseAnd(right, 0x1f);
3904 }
3905 V<Word32> ts_op =
3906 __ Word32ShiftRightLogical(Map(node->left_input()), right);
3907 SetMap(node, __ Word32SignHintUnsigned(ts_op));
3909 }
3910
3912 const maglev::ProcessingState& state) {
3913 // Turboshaft doesn't have a bitwise Not operator; we instead use "^ -1".
3914 SetMap(node, __ Word32BitwiseXor(Map(node->value_input()),
3915 __ Word32Constant(-1)));
3917 }
3919 const maglev::ProcessingState& state) {
3920 V<Word32> input = Map(node->input());
3921 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3923
3924 IF (__ Int32LessThan(input, 0)) {
3925 V<Tuple<Word32, Word32>> result_with_ovf =
3926 __ Int32MulCheckOverflow(input, -1);
3927 __ DeoptimizeIf(__ Projection<1>(result_with_ovf), frame_state,
3928 DeoptimizeReason::kOverflow,
3929 node->eager_deopt_info()->feedback_to_update());
3930 result = __ Projection<0>(result_with_ovf);
3931 }
3932
3933 SetMap(node, result);
3939 SetMap(node, __ Float64Negate(Map(node->input())));
3941 }
3943 const maglev::ProcessingState& state) {
3944 SetMap(node, __ Float64Abs(Map(node->input())));
3946 }
3948 const maglev::ProcessingState& state) {
3949 if (node->kind() == maglev::Float64Round::Kind::kFloor) {
3950 SetMap(node, __ Float64RoundDown(Map(node->input())));
3951 } else if (node->kind() == maglev::Float64Round::Kind::kCeil) {
3952 SetMap(node, __ Float64RoundUp(Map(node->input())));
3953 } else {
3955 // Nearest rounds to +infinity on ties. We emulate this by rounding up and
3956 // adjusting if the difference exceeds 0.5 (like SimplifiedLowering does
3957 // for lower Float64Round).
3958 OpIndex input = Map(node->input());
3959 ScopedVar<Float64, AssemblerT> result(this, __ Float64RoundUp(input));
3960 IF_NOT (__ Float64LessThanOrEqual(__ Float64Sub(result, 0.5), input)) {
3961 result = __ Float64Sub(result, 1.0);
3962 }
3963
3964 SetMap(node, result);
3965 }
3967 }
3968
3970 const maglev::ProcessingState& state) {
3972 switch (node->ieee_function()) {
3973#define CASE(MathName, ExpName, EnumName) \
3974 case maglev::Float64Ieee754Unary::Ieee754Function::k##EnumName: \
3975 kind = FloatUnaryOp::Kind::k##EnumName; \
3976 break;
3978#undef CASE
3979 }
3980 SetMap(node, __ Float64Unary(Map(node->input()), kind));
3982 }
3983
3985 const maglev::ProcessingState& state) {
3986 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
3987 V<Smi> result;
3988 if constexpr (SmiValuesAre31Bits()) {
3989 result = __ BitcastWord32ToSmi(__ Word32SignedAddDeoptOnOverflow(
3990 __ BitcastSmiToWord32(Map(node->value_input())),
3991 Smi::FromInt(1).ptr(), frame_state,
3992 node->eager_deopt_info()->feedback_to_update()));
3993 } else {
3994 // Remember that 32-bit Smis are stored in the upper 32 bits of 64-bit
3995 // qwords. We thus perform a 64-bit addition rather than a 32-bit one,
3996 // despite Smis being only 32 bits.
3997 result = __ BitcastWordPtrToSmi(__ WordPtrSignedAddDeoptOnOverflow(
3998 __ BitcastSmiToWordPtr(Map(node->value_input())),
3999 Smi::FromInt(1).ptr(), frame_state,
4000 node->eager_deopt_info()->feedback_to_update()));
4001 }
4004 }
4006 const maglev::ProcessingState& state) {
4007 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4008 V<Smi> result;
4009 if constexpr (SmiValuesAre31Bits()) {
4010 result = __ BitcastWord32ToSmi(__ Word32SignedSubDeoptOnOverflow(
4011 __ BitcastSmiToWord32(Map(node->value_input())),
4012 Smi::FromInt(1).ptr(), frame_state,
4013 node->eager_deopt_info()->feedback_to_update()));
4014 } else {
4015 result = __ BitcastWordPtrToSmi(__ WordPtrSignedSubDeoptOnOverflow(
4016 __ BitcastSmiToWordPtr(Map(node->value_input())),
4017 Smi::FromInt(1).ptr(), frame_state,
4018 node->eager_deopt_info()->feedback_to_update()));
4019 }
4020 SetMap(node, result);
4022 }
4023
4024// Note that Maglev collects feedback in the generic binops and unops, so that
4025// Turbofan has chance to get better feedback. However, once we reach Turbofan,
4026// we stop collecting feedback, since we've tried multiple times to keep
4027// collecting feedback in Turbofan, but it never seemed worth it. The latest
4028// occurence of this was ended by this CL: https://crrev.com/c/4110858.
4029#define PROCESS_GENERIC_BINOP(Name) \
4030 maglev::ProcessResult Process(maglev::Generic##Name* node, \
4031 const maglev::ProcessingState& state) { \
4032 ThrowingScope throwing_scope(this, node); \
4033 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info()); \
4034 SetMap(node, \
4035 __ Generic##Name(Map(node->left_input()), Map(node->right_input()), \
4036 frame_state, native_context(), \
4037 ShouldLazyDeoptOnThrow(node))); \
4038 return maglev::ProcessResult::kContinue; \
4039 }
4041#undef PROCESS_GENERIC_BINOP
4042
4043#define PROCESS_GENERIC_UNOP(Name) \
4044 maglev::ProcessResult Process(maglev::Generic##Name* node, \
4045 const maglev::ProcessingState& state) { \
4046 ThrowingScope throwing_scope(this, node); \
4047 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info()); \
4048 SetMap(node, \
4049 __ Generic##Name(Map(node->operand_input()), frame_state, \
4050 native_context(), ShouldLazyDeoptOnThrow(node))); \
4051 return maglev::ProcessResult::kContinue; \
4052 }
4054#undef PROCESS_GENERIC_UNOP
4055
4057 const maglev::ProcessingState& state) {
4058 ThrowingScope throwing_scope(this, node);
4059 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
4060 SetMap(node, __ ToNumberOrNumeric(Map(node->value_input()), frame_state,
4061 native_context(), node->mode(),
4062 ShouldLazyDeoptOnThrow(node)));
4064 }
4065
4067 const maglev::ProcessingState& state) {
4068 V<Word32> condition = __ TaggedEqual(
4069 Map(node->value()), __ HeapConstant(local_factory_->true_value()));
4070 SetMap(node, ConvertWord32ToJSBool(condition, /*flip*/ true));
4081 SetMap(node, ConvertWord32ToJSBool(condition, /*flip*/ true));
4083 }
4084
4086 const maglev::ProcessingState& state) {
4088 switch (node->check_type()) {
4090 input_assumptions =
4092 break;
4094 input_assumptions =
4096 break;
4097 }
4098 SetMap(node,
4099 ConvertWord32ToJSBool(ToBit(node->value(), input_assumptions)));
4101 }
4103 const maglev::ProcessingState& state) {
4104 SetMap(node, ConvertWord32ToJSBool(Map(node->value()), node->flip()));
4110 SetMap(node, ConvertWordPtrToJSBool(Map(node->value()), node->flip()));
4122 SetMap(node, __ ConvertInt32ToNumber(Map(node->input())));
4124 }
4126 const maglev::ProcessingState& state) {
4127 SetMap(node, __ ConvertUint32ToNumber(Map(node->input())));
4133 SetMap(node, __ ConvertIntPtrToNumber(Map(node->input())));
4139 SetMap(node, Float64ToTagged(Map(node->input()), node->conversion_mode()));
4141 }
4143 const maglev::ProcessingState& state) {
4144 SetMap(node,
4145 HoleyFloat64ToTagged(Map(node->input()), node->conversion_mode()));
4147 }
4149 const maglev::ProcessingState& state) {
4150 // We don't use ConvertUntaggedToJSPrimitive but instead the lower level
4151 // AllocateHeapNumberWithValue helper, because ConvertUntaggedToJSPrimitive
4152 // can be GVNed, which we don't want for Float64ToHeapNumberForField, since
4153 // it creates a mutable HeapNumber, that will then be owned by an object
4154 // field.
4155 SetMap(node, __ AllocateHeapNumberWithValue(Map(node->input()),
4156 isolate_->factory()));
4162 SetMap(node, ConvertWord32ToJSBool(__ Float64IsHole(Map(node->input()))));
4164 }
4165
4166 template <typename NumberToFloat64Op>
4167 requires(std::is_same_v<NumberToFloat64Op,
4193 node->eager_deopt_info()->feedback_to_update()));
4195 }
4197 const maglev::ProcessingState& state) {
4198 // `node->conversion_type()` doesn't matter here, since for both HeapNumbers
4199 // and Oddballs, the Float64 value is at the same index (and this node never
4200 // deopts, regardless of its input).
4201 SetMap(node, __ ConvertJSPrimitiveToUntagged(
4202 Map(node->input()),
4207 }
4208 maglev::ProcessResult Process(maglev::TruncateUint32ToInt32* node,
4209 const maglev::ProcessingState& state) {
4210 // This doesn't matter in Turboshaft: both Uint32 and Int32 are Word32.
4211 SetMap(node, __ Word32SignHintSigned(Map(node->input())));
4213 }
4215 const maglev::ProcessingState& state) {
4216 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4217 __ DeoptimizeIf(__ Int32LessThan(Map(node->input()), 0), frame_state,
4218 DeoptimizeReason::kNotUint32,
4219 node->eager_deopt_info()->feedback_to_update());
4220 SetMap(node, __ Word32SignHintUnsigned(Map(node->input())));
4222 }
4223
4225 const maglev::ProcessingState& state) {
4226 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4227 // TODO(388844115): Rename the IntPtr in Maglev to make it clear it's
4228 // non-negative.
4229 __ DeoptimizeIfNot(
4230 __ UintPtrLessThanOrEqual(Map(node->input()),
4231 std::numeric_limits<uint32_t>::max()),
4232 frame_state, DeoptimizeReason::kNotUint32,
4233 node->eager_deopt_info()->feedback_to_update());
4234 SetMap(node, __ Word32SignHintUnsigned(
4235 __ TruncateWordPtrToWord32(Map(node->input()))));
4237 }
4238
4240 const maglev::ProcessingState& state) {
4241 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4242 __ DeoptimizeIf(__ Int32LessThan(Map(node->input()), 0), frame_state,
4243 DeoptimizeReason::kNotInt32,
4244 node->eager_deopt_info()->feedback_to_update());
4245 SetMap(node, __ Word32SignHintSigned(Map(node->input())));
4247 }
4248
4250 const maglev::ProcessingState& state) {
4251 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4252 // TODO(388844115): Rename the IntPtr in Maglev to make it clear it's
4253 // non-negative.
4254 __ DeoptimizeIfNot(
4255 __ UintPtrLessThanOrEqual(Map(node->input()),
4256 std::numeric_limits<int32_t>::max()),
4257 frame_state, DeoptimizeReason::kNotInt32,
4258 node->eager_deopt_info()->feedback_to_update());
4259 SetMap(node, __ Word32SignHintSigned(
4260 __ TruncateWordPtrToWord32(Map(node->input()))));
4266 SetMap(node, __ Word32SignHintUnsigned(Map(node->input())));
4268 }
4270 const maglev::ProcessingState& state) {
4271 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4272 const FeedbackSource& feedback =
4273 node->eager_deopt_info()->feedback_to_update();
4274 OpIndex result = __ ConvertJSPrimitiveToUntaggedOrDeopt(
4275 Map(node->object_input()), frame_state,
4279 if constexpr (Is64()) {
4280 // ArrayIndex is 32-bit in Maglev, but 64 in Turboshaft. This means that
4281 // we have to convert it to 32-bit before the following `SetMap`, and we
4282 // thus have to check that it actually fits in a Uint32.
4283 __ DeoptimizeIfNot(__ Uint64LessThanOrEqual(
4284 result, std::numeric_limits<uint32_t>::max()),
4285 frame_state, DeoptimizeReason::kNotInt32, feedback);
4287 }
4288 SetMap(node, Is64() ? __ TruncateWord64ToWord32(result) : result);
4290 }
4292 const maglev::ProcessingState& state) {
4293 SetMap(node, __ ChangeInt32ToFloat64(Map(node->input())));
4295 }
4297 const maglev::ProcessingState& state) {
4298 SetMap(node, __ ChangeUint32ToFloat64(Map(node->input())));
4304 SetMap(node, __ ChangeIntPtrToFloat64(Map(node->input())));
4306 }
4307
4309 const maglev::ProcessingState& state) {
4310 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4311 SetMap(node, __ ChangeFloat64ToInt32OrDeopt(
4312 Map(node->input()), frame_state,
4314 node->eager_deopt_info()->feedback_to_update()));
4316 }
4318 const maglev::ProcessingState& state) {
4319 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4320 SetMap(node, __ ChangeFloat64ToUint32OrDeopt(
4321 Map(node->input()), frame_state,
4323 node->eager_deopt_info()->feedback_to_update()));
4325 }
4328 const maglev::ProcessingState& state) {
4330 switch (node->conversion_type()) {
4332 input_requirement =
4333 TruncateJSPrimitiveToUntaggedOrDeoptOp::InputRequirement::kNumber;
4334 break;
4336 input_requirement = TruncateJSPrimitiveToUntaggedOrDeoptOp::
4337 InputRequirement::kNumberOrBoolean;
4338 break;
4340 input_requirement = TruncateJSPrimitiveToUntaggedOrDeoptOp::
4341 InputRequirement::kNumberOrOddball;
4342 break;
4343 }
4344 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4345 SetMap(
4346 node,
4347 __ TruncateJSPrimitiveToUntaggedOrDeopt(
4348 Map(node->input()), frame_state,
4350 input_requirement, node->eager_deopt_info()->feedback_to_update()));
4352 }
4354 const maglev::ProcessingState& state) {
4355 // In Maglev, TruncateNumberOrOddballToInt32 does the same thing for both
4356 // NumberOrOddball and Number; except when debug_code is enabled: then,
4357 // Maglev inserts runtime checks ensuring that the input is indeed a Number
4358 // or NumberOrOddball. Turboshaft doesn't typically introduce such runtime
4359 // checks, so we instead just lower both Number and NumberOrOddball to the
4360 // NumberOrOddball variant.
4361 SetMap(node, __ TruncateJSPrimitiveToUntagged(
4362 Map(node->input()),
4367 }
4368 maglev::ProcessResult Process(maglev::TruncateFloat64ToInt32* node,
4369 const maglev::ProcessingState& state) {
4370 SetMap(node, __ JSTruncateFloat64ToWord32(Map(node->input())));
4372 }
4374 const maglev::ProcessingState& state) {
4375 SetMap(node, __ Float64SilenceNaN(Map(node->input())));
4377 }
4379 const maglev::ProcessingState& state) {
4380 V<Float64> input = Map(node->input());
4381 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4382
4383 __ DeoptimizeIf(__ Float64IsHole(input), frame_state,
4384 DeoptimizeReason::kHole,
4385 node->eager_deopt_info()->feedback_to_update());
4386
4387 SetMap(node, input);
4389 }
4391 const maglev::ProcessingState& state) {
4392 V<Word32> cond = RootEqual(node->object_input(), RootIndex::kTheHoleValue);
4393 SetMap(node,
4394 __ Select(cond, undefined_value_, Map<Object>(node->object_input()),
4398 }
4400 const maglev::ProcessingState& state) {
4401 NoThrowingScopeRequired no_throws(node);
4402
4403 Label<Object> done(this);
4404 Label<> non_js_receiver(this);
4405 V<Object> receiver = Map(node->receiver_input());
4406
4407 GOTO_IF(__ IsSmi(receiver), non_js_receiver);
4408
4409 GOTO_IF(__ JSAnyIsNotPrimitive(V<HeapObject>::Cast(receiver)), done,
4410 receiver);
4411
4412 if (node->mode() != ConvertReceiverMode::kNotNullOrUndefined) {
4413 Label<> convert_global_proxy(this);
4414 GOTO_IF(__ RootEqual(receiver, RootIndex::kUndefinedValue, isolate_),
4415 convert_global_proxy);
4416 GOTO_IF_NOT(__ RootEqual(receiver, RootIndex::kNullValue, isolate_),
4417 non_js_receiver);
4418 GOTO(convert_global_proxy);
4419 BIND(convert_global_proxy);
4420 GOTO(done,
4421 __ HeapConstant(
4422 node->native_context().global_proxy_object(broker_).object()));
4423 } else {
4424 GOTO(non_js_receiver);
4425 }
4426
4427 BIND(non_js_receiver);
4428 GOTO(done, __ CallBuiltin_ToObject(
4429 isolate_, __ HeapConstant(node->native_context().object()),
4431
4432 BIND(done, result);
4433 SetMap(node, result);
4434
4438 static constexpr int kMinClampedUint8 = 0;
4439 static constexpr int kMaxClampedUint8 = 255;
4442 IF (__ Int32LessThan(value, kMinClampedUint8)) {
4443 result = __ Word32Constant(kMinClampedUint8);
4444 } ELSE IF (__ Int32LessThan(value, kMaxClampedUint8)) {
4445 result = value;
4446 } ELSE {
4447 result = __ Word32Constant(kMaxClampedUint8);
4449 return result;
4450 }
4453 IF (__ Float64LessThan(value, kMinClampedUint8)) {
4454 result = __ Word32Constant(kMinClampedUint8);
4455 } ELSE IF (__ Float64LessThan(kMaxClampedUint8, value)) {
4456 result = __ Word32Constant(kMaxClampedUint8);
4457 } ELSE {
4458 // Note that this case handles values that are in range of Clamped Uint8
4459 // and NaN. The order of the IF/ELSE-IF/ELSE in this function is so that
4460 // we do indeed end up here for NaN.
4461 result = __ JSTruncateFloat64ToWord32(__ Float64RoundTiesEven(value));
4462 }
4468 SetMap(node, Int32ToUint8Clamped(Map(node->input())));
4470 }
4472 const maglev::ProcessingState& state) {
4474 V<Word32> value = Map(node->input());
4475 IF (__ Uint32LessThan(value, kMaxClampedUint8)) {
4476 result = value;
4477 } ELSE {
4478 result = __ Word32Constant(kMaxClampedUint8);
4479 }
4485 SetMap(node, Float64ToUint8Clamped(Map(node->input())));
4487 }
4489 const maglev::ProcessingState& state) {
4491 V<Object> value = Map(node->input());
4492 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4493 IF (__ IsSmi(value)) {
4494 result = Int32ToUint8Clamped(__ UntagSmi(V<Smi>::Cast(value)));
4495 } ELSE {
4496 V<i::Map> map = __ LoadMapField(value);
4497 __ DeoptimizeIfNot(
4498 __ TaggedEqual(map,
4499 __ HeapConstant(local_factory_->heap_number_map())),
4500 frame_state, DeoptimizeReason::kNotAHeapNumber,
4501 node->eager_deopt_info()->feedback_to_update());
4503 __ LoadHeapNumberValue(V<HeapNumber>::Cast(value)));
4504 }
4506 SetMap(node, result);
4508 }
4509
4511 const maglev::ProcessingState& state) {
4512 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
4513 OpIndex arguments[] = {Map(node->value_input()), Map(node->context())};
4514
4515 GENERATE_AND_MAP_BUILTIN_CALL(node, Builtin::kToObject, frame_state,
4516 base::VectorOf(arguments));
4517
4523 __ Return(Map(node->value_input()));
4525 }
4526
4528 const maglev::ProcessingState& state) {
4529 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info());
4530 __ Deoptimize(frame_state, node->deoptimize_reason(),
4531 node->eager_deopt_info()->feedback_to_update());
4533 }
4534
4536 const maglev::ProcessingState& state) {
4537 V<WordPtr> message_address = __ ExternalConstant(
4539 V<Object> old_message = __ LoadMessage(message_address);
4540 __ StoreMessage(message_address, Map(node->value()));
4541 SetMap(node, old_message);
4543 }
4544
4546 const maglev::ProcessingState& state) {
4547 base::SmallVector<OpIndex, 32> parameters_and_registers;
4548 int num_parameters_and_registers = node->num_parameters_and_registers();
4549 for (int i = 0; i < num_parameters_and_registers; i++) {
4550 parameters_and_registers.push_back(
4551 Map(node->parameters_and_registers(i)));
4552 }
4553 __ GeneratorStore(Map(node->context_input()), Map(node->generator_input()),
4554 parameters_and_registers, node->suspend_id(),
4555 node->bytecode_offset());
4557 }
4559 const maglev::ProcessingState& state) {
4560 V<FixedArray> array = Map(node->array_input());
4562 __ LoadTaggedField(array, FixedArray::OffsetOfElementAt(node->index()));
4563 __ Store(array, Map(node->stale_input()), StoreOp::Kind::TaggedBase(),
4566 FixedArray::OffsetOfElementAt(node->index()));
4567
4568 SetMap(node, result);
4569
4571 }
4572
4574 const maglev::ProcessingState& state) {
4575 __ RuntimeAbort(node->reason());
4576 // TODO(dmercadier): remove this `Unreachable` once RuntimeAbort is marked
4577 // as a block terminator.
4578 __ Unreachable();
4584 SetMap(node, Map(node->input(0)));
4586 }
4587
4589 // Nothing to do; `Dead` is in Maglev to kill a node when removing it
4590 // directly from the graph is not possible.
4596 __ DebugBreak();
4598 }
4599
4601 const maglev::ProcessingState&) {
4602 // GapMove nodes are created by Maglev's register allocator, which
4603 // doesn't run when using Maglev as a frontend for Turboshaft.
4604 UNREACHABLE();
4605 }
4607 const maglev::ProcessingState&) {
4608 // ConstantGapMove nodes are created by Maglev's register allocator, which
4609 // doesn't run when using Maglev as a frontend for Turboshaft.
4615 // VirtualObjects should never be part of the Maglev graph.
4617 }
4618
4620 const maglev::ProcessingState& state) {
4623
4624#ifdef DEBUG
4626#endif
4627
4633 // Turboshaft is the top tier compiler, so we never need to OSR from it.
4635 }
4637 const maglev::ProcessingState&) {
4638 // No need to update the interrupt budget once we reach Turboshaft.
4640 }
4642 const maglev::ProcessingState&) {
4643 // ReduceInterruptBudgetForLoop nodes are not emitted by Maglev when it is
4644 // used as a frontend for Turboshaft.
4645 UNREACHABLE();
4646 }
4648 const maglev::ProcessingState&) {
4649 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info());
4650 __ JSLoopStackCheck(native_context(), frame_state);
4652 }
4653
4654#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
4657 const maglev::ProcessingState&) {
4658 V<Object> data = __ GetContinuationPreservedEmbedderData();
4659 SetMap(node, data);
4661 }
4662
4665 const maglev::ProcessingState&) {
4666 V<Object> data = Map(node->input(0));
4667 __ SetContinuationPreservedEmbedderData(data);
4670#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
4671
4673 const maglev::ProcessingState&) {
4674 bool negate_result = false;
4675 V<Word32> cmp = ConvertInt32Compare(node->left_input(), node->right_input(),
4676 node->condition(), &negate_result);
4677 Label<> abort(this);
4678 Label<> end(this);
4679 if (negate_result) {
4680 GOTO_IF(cmp, abort);
4681 } else {
4682 GOTO_IF_NOT(cmp, abort);
4683 }
4684 GOTO(end);
4685
4686 BIND(abort);
4687 __ RuntimeAbort(node->reason());
4688 __ Unreachable();
4689
4690 BIND(end);
4692 }
4693
4695 const maglev::ProcessingState&) {
4696 // CallSelf nodes are only created when Maglev is the top-tier compiler
4697 // (which can't be the case here, since we're currently compiling for
4698 // Turboshaft).
4699 UNREACHABLE();
4701
4702 // Nodes unused by maglev but still existing.
4709 UNREACHABLE();
4710 }
4711 maglev::ProcessResult Process(maglev::UnsafeTruncateUint32ToInt32*,
4713 UNREACHABLE();
4714 }
4715 maglev::ProcessResult Process(maglev::UnsafeTruncateFloat64ToInt32*,
4720 const maglev::ProcessingState&) {
4724 AssemblerT& Asm() { return assembler_; }
4726 Zone* graph_zone() { return Asm().output_graph().graph_zone(); }
4727
4728 bool IsMapped(const maglev::NodeBase* node) const {
4730 return true;
4731 }
4732 return node_mapping_.count(node);
4734
4735 private:
4737 maglev::EagerDeoptInfo* eager_deopt_info) {
4739 // Eager deopts don't have a result location/size.
4740 const interpreter::Register result_location =
4742 const int result_size = 0;
4743 const maglev::VirtualObjectList virtual_objects =
4744 eager_deopt_info->top_frame().GetVirtualObjects();
4745
4746 switch (eager_deopt_info->top_frame().type()) {
4748 return BuildFrameState(eager_deopt_info->top_frame().as_interpreted(),
4749 virtual_objects, result_location, result_size);
4751 return BuildFrameState(
4752 eager_deopt_info->top_frame().as_builtin_continuation(),
4753 virtual_objects);
4756 UNIMPLEMENTED();
4758 }
4759
4761 maglev::LazyDeoptInfo* lazy_deopt_info) {
4763 const maglev::VirtualObjectList virtual_objects =
4764 lazy_deopt_info->top_frame().GetVirtualObjects();
4765 switch (lazy_deopt_info->top_frame().type()) {
4767 return BuildFrameState(
4768 lazy_deopt_info->top_frame().as_interpreted(), virtual_objects,
4769 lazy_deopt_info->result_location(), lazy_deopt_info->result_size());
4771 return BuildFrameState(lazy_deopt_info->top_frame().as_construct_stub(),
4772 virtual_objects);
4773
4775 return BuildFrameState(
4776 lazy_deopt_info->top_frame().as_builtin_continuation(),
4777 virtual_objects);
4778
4780 UNIMPLEMENTED();
4782 }
4783
4785 maglev::DeoptFrame& frame,
4786 const maglev::VirtualObjectList& virtual_objects) {
4787 // Only the topmost frame should have a valid result_location and
4788 // result_size. One reason for this is that, in Maglev, the PokeAt is not an
4789 // attribute of the DeoptFrame but rather of the LazyDeoptInfo (to which the
4790 // topmost frame is attached).
4791 const interpreter::Register result_location =
4793 const int result_size = 0;
4794
4795 switch (frame.type()) {
4797 return BuildFrameState(frame.as_interpreted(), virtual_objects,
4798 result_location, result_size);
4800 return BuildFrameState(frame.as_construct_stub(), virtual_objects);
4802 return BuildFrameState(frame.as_inlined_arguments(), virtual_objects);
4805 virtual_objects);
4807 }
4808
4811 const maglev::VirtualObjectList& virtual_objects) {
4813 if (frame.parent() != nullptr) {
4814 OptionalV<FrameState> parent_frame =
4815 BuildParentFrameState(*frame.parent(), virtual_objects);
4816 if (!parent_frame.has_value()) return OptionalV<FrameState>::Nullopt();
4817 builder.AddParentFrameState(parent_frame.value());
4818 }
4819
4820 // Closure
4821 // TODO(dmercadier): ConstructInvokeStub frames don't have a Closure input,
4822 // but the instruction selector assumes that they do and that it should be
4823 // skipped. We thus use SmiConstant(0) as a fake Closure input here, but it
4824 // would be nicer to fix the instruction selector to not require this input
4825 // at all for such frames.
4826 V<Any> fake_closure_input = __ SmiZeroConstant();
4827 builder.AddInput(MachineType::AnyTagged(), fake_closure_input);
4828
4829 // Parameters
4830 AddDeoptInput(builder, virtual_objects, frame.receiver());
4831
4832 // Context
4833 AddDeoptInput(builder, virtual_objects, frame.context());
4834
4835 if (builder.Inputs().size() >
4836 std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
4837 *bailout_ = BailoutReason::kTooManyArguments;
4839 }
4840
4841 const FrameStateInfo* frame_state_info = MakeFrameStateInfo(frame);
4842 return __ FrameState(
4843 builder.Inputs(), builder.inlined(),
4844 builder.AllocateFrameStateData(*frame_state_info, graph_zone()));
4845 }
4846
4849 const maglev::VirtualObjectList& virtual_objects) {
4851 if (frame.parent() != nullptr) {
4852 OptionalV<FrameState> parent_frame =
4853 BuildParentFrameState(*frame.parent(), virtual_objects);
4854 if (!parent_frame.has_value()) return OptionalV<FrameState>::Nullopt();
4855 builder.AddParentFrameState(parent_frame.value());
4856 }
4857
4858 // Closure
4859 AddDeoptInput(builder, virtual_objects, frame.closure());
4860
4861 // Parameters
4862 for (const maglev::ValueNode* arg : frame.arguments()) {
4863 AddDeoptInput(builder, virtual_objects, arg);
4864 }
4865
4866 // Context
4867 // TODO(dmercadier): InlinedExtraArguments frames don't have a Context
4868 // input, but the instruction selector assumes that they do and that it
4869 // should be skipped. We thus use SmiConstant(0) as a fake Context input
4870 // here, but it would be nicer to fix the instruction selector to not
4871 // require this input at all for such frames.
4872 V<Any> fake_context_input = __ SmiZeroConstant();
4873 builder.AddInput(MachineType::AnyTagged(), fake_context_input);
4874
4875 if (builder.Inputs().size() >
4876 std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
4877 *bailout_ = BailoutReason::kTooManyArguments;
4879 }
4880
4881 const FrameStateInfo* frame_state_info = MakeFrameStateInfo(frame);
4882 return __ FrameState(
4883 builder.Inputs(), builder.inlined(),
4884 builder.AllocateFrameStateData(*frame_state_info, graph_zone()));
4885 }
4886
4889 const maglev::VirtualObjectList& virtual_objects) {
4891 if (frame.parent() != nullptr) {
4892 OptionalV<FrameState> parent_frame =
4893 BuildParentFrameState(*frame.parent(), virtual_objects);
4894 if (!parent_frame.has_value()) return OptionalV<FrameState>::Nullopt();
4895 builder.AddParentFrameState(parent_frame.value());
4896 }
4897
4898 // Closure
4899 if (frame.is_javascript()) {
4901 __ HeapConstant(frame.javascript_target().object()));
4902 } else {
4903 builder.AddUnusedRegister();
4904 }
4905
4906 // Parameters
4907 for (maglev::ValueNode* param : frame.parameters()) {
4908 AddDeoptInput(builder, virtual_objects, param);
4909 }
4910
4911 // Extra fixed JS frame parameters. These are at the end since JS builtins
4912 // push their parameters in reverse order.
4913 constexpr int kExtraFixedJSFrameParameters =
4915 if (frame.is_javascript()) {
4918 kExtraFixedJSFrameParameters);
4919 static_assert(kExtraFixedJSFrameParameters ==
4921 // kJavaScriptCallTargetRegister
4923 __ HeapConstant(frame.javascript_target().object()));
4924 // kJavaScriptCallNewTargetRegister
4926 // kJavaScriptCallArgCountRegister
4927 builder.AddInput(
4929 __ SmiConstant(Smi::FromInt(
4931#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
4932 // kJavaScriptCallDispatchHandleRegister
4933 builder.AddInput(
4936#endif
4937 }
4938
4939 // Context
4940 AddDeoptInput(builder, virtual_objects, frame.context());
4941
4942 if (builder.Inputs().size() >
4943 std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
4944 *bailout_ = BailoutReason::kTooManyArguments;
4946 }
4947
4948 const FrameStateInfo* frame_state_info = MakeFrameStateInfo(frame);
4949 return __ FrameState(
4950 builder.Inputs(), builder.inlined(),
4951 builder.AllocateFrameStateData(*frame_state_info, graph_zone()));
4952 }
4953
4956 const maglev::VirtualObjectList& virtual_objects,
4957 interpreter::Register result_location, int result_size) {
4958 DCHECK_EQ(result_size != 0, result_location.is_valid());
4960
4961 if (frame.parent() != nullptr) {
4962 OptionalV<FrameState> parent_frame =
4963 BuildParentFrameState(*frame.parent(), virtual_objects);
4964 if (!parent_frame.has_value()) return OptionalV<FrameState>::Nullopt();
4965 builder.AddParentFrameState(parent_frame.value());
4966 }
4967
4968 // Closure
4969 AddDeoptInput(builder, virtual_objects, frame.closure());
4970
4971 // Parameters
4972 frame.frame_state()->ForEachParameter(
4973 frame.unit(), [&](maglev::ValueNode* value, interpreter::Register reg) {
4974 AddDeoptInput(builder, virtual_objects, value, reg, result_location,
4975 result_size);
4976 });
4977
4978 // Context
4979 AddDeoptInput(builder, virtual_objects,
4980 frame.frame_state()->context(frame.unit()));
4981
4982 // Locals
4983 // ForEachLocal in Maglev skips over dead registers, but we still need to
4984 // call AddUnusedRegister on the Turboshaft FrameStateData Builder.
4985 // {local_index} is used to keep track of such unused registers.
4986 // Among the variables not included in ForEachLocal is the Accumulator (but
4987 // this is fine since there is an entry in the state specifically for the
4988 // accumulator later).
4989 int local_index = 0;
4990 frame.frame_state()->ForEachLocal(
4991 frame.unit(), [&](maglev::ValueNode* value, interpreter::Register reg) {
4992 while (local_index < reg.index()) {
4993 builder.AddUnusedRegister();
4994 local_index++;
4995 }
4996 AddDeoptInput(builder, virtual_objects, value, reg, result_location,
4997 result_size);
4998 local_index++;
4999 });
5000 for (; local_index < frame.unit().register_count(); local_index++) {
5001 builder.AddUnusedRegister();
5002 }
5003
5004 // Accumulator
5005 if (frame.frame_state()->liveness()->AccumulatorIsLive()) {
5006 AddDeoptInput(builder, virtual_objects,
5007 frame.frame_state()->accumulator(frame.unit()),
5009 result_location, result_size);
5010 } else {
5011 builder.AddUnusedRegister();
5012 }
5013
5014 OutputFrameStateCombine combine =
5015 ComputeCombine(frame, result_location, result_size);
5016
5017 if (builder.Inputs().size() >
5018 std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
5019 *bailout_ = BailoutReason::kTooManyArguments;
5021 }
5022
5023 const FrameStateInfo* frame_state_info = MakeFrameStateInfo(frame, combine);
5024 return __ FrameState(
5025 builder.Inputs(), builder.inlined(),
5026 builder.AllocateFrameStateData(*frame_state_info, graph_zone()));
5027 }
5028
5029 void AddDeoptInput(FrameStateData::Builder& builder,
5030 const maglev::VirtualObjectList& virtual_objects,
5031 const maglev::ValueNode* node) {
5032 if (const maglev::InlinedAllocation* alloc =
5033 node->TryCast<maglev::InlinedAllocation>()) {
5034 DCHECK(alloc->HasBeenAnalysed());
5035 if (alloc->HasBeenElided()) {
5036 AddVirtualObjectInput(builder, virtual_objects,
5037 virtual_objects.FindAllocatedWith(alloc));
5038 return;
5039 }
5040 }
5041 if (const maglev::Identity* ident_obj = node->TryCast<maglev::Identity>()) {
5042 // The value_representation of Identity nodes is always Tagged rather than
5043 // the actual value_representation of their input. We thus bypass identity
5044 // nodes manually here to get to correct value_representation and thus the
5045 // correct MachineType.
5046 node = ident_obj->input(0).node();
5047 // Identity nodes should not have Identity as input.
5048 DCHECK(!node->Is<maglev::Identity>());
5049 }
5050 builder.AddInput(MachineTypeFor(node->value_representation()), Map(node));
5051 }
5052
5053 void AddDeoptInput(FrameStateData::Builder& builder,
5054 const maglev::VirtualObjectList& virtual_objects,
5056 interpreter::Register result_location, int result_size) {
5057 if (result_location.is_valid() && maglev::LazyDeoptInfo::InReturnValues(
5058 reg, result_location, result_size)) {
5059 builder.AddUnusedRegister();
5060 } else {
5061 AddDeoptInput(builder, virtual_objects, node);
5063 }
5064
5065 void AddVirtualObjectInput(FrameStateData::Builder& builder,
5066 const maglev::VirtualObjectList& virtual_objects,
5067 const maglev::VirtualObject* vobj) {
5069 // We need to add HeapNumbers as dematerialized HeapNumbers (rather than
5070 // simply NumberConstant), because they could be mutable HeapNumber
5071 // fields, in which case we don't want GVN to merge them.
5072 constexpr int kNumberOfField = 2; // map + value
5073 builder.AddDematerializedObject(deduplicator_.CreateFreshId().id,
5074 kNumberOfField);
5076 __ HeapConstant(local_factory_->heap_number_map()));
5078 __ Float64Constant(vobj->number()));
5079 return;
5080 }
5081
5082 Deduplicator::DuplicatedId dup_id = deduplicator_.GetDuplicatedId(vobj);
5083 if (dup_id.duplicated) {
5084 builder.AddDematerializedObjectReference(dup_id.id);
5085 return;
5086 }
5087
5088 switch (vobj->type()) {
5090 // Handled above
5091 UNREACHABLE();
5093 // TODO(olivf): Support elided maglev cons strings in turbolev.
5094 UNREACHABLE();
5096 constexpr int kMapAndLengthFieldCount = 2;
5097 uint32_t length = vobj->double_elements_length();
5098 uint32_t field_count = length + kMapAndLengthFieldCount;
5099 builder.AddDematerializedObject(dup_id.id, field_count);
5100 builder.AddInput(
5102 __ HeapConstantNoHole(local_factory_->fixed_double_array_map()));
5104 __ SmiConstant(Smi::FromInt(length)));
5105 FixedDoubleArrayRef elements = vobj->double_elements();
5106 for (uint32_t i = 0; i < length; i++) {
5107 i::Float64 value = elements.GetFromImmutableFixedDoubleArray(i);
5108 if (value.is_hole_nan()) {
5109 builder.AddInput(
5111 __ HeapConstantHole(local_factory_->the_hole_value()));
5112 } else {
5114 __ NumberConstant(value.get_scalar()));
5115 }
5116 }
5117 return;
5118 }
5120 constexpr int kMapFieldCount = 1;
5121 uint32_t field_count = vobj->slot_count() + kMapFieldCount;
5122 builder.AddDematerializedObject(dup_id.id, field_count);
5124 __ HeapConstantNoHole(vobj->map().object()));
5125 vobj->ForEachInput([&](maglev::ValueNode* value_node) {
5126 AddVirtualObjectNestedValue(builder, virtual_objects, value_node);
5127 });
5128 break;
5130 }
5131
5132 void AddVirtualObjectNestedValue(
5133 FrameStateData::Builder& builder,
5134 const maglev::VirtualObjectList& virtual_objects,
5135 const maglev::ValueNode* value) {
5136 if (maglev::IsConstantNode(value->opcode())) {
5137 switch (value->opcode()) {
5138 case maglev::Opcode::kConstant:
5139 builder.AddInput(
5141 __ HeapConstant(value->Cast<maglev::Constant>()->ref().object()));
5142 break;
5143
5144 case maglev::Opcode::kFloat64Constant:
5145 builder.AddInput(
5148 ->value()
5149 .get_scalar()));
5150 break;
5151
5152 case maglev::Opcode::kInt32Constant:
5153 builder.AddInput(
5155 __ NumberConstant(value->Cast<maglev::Int32Constant>()->value()));
5156 break;
5157
5158 case maglev::Opcode::kUint32Constant:
5161 value->Cast<maglev::Uint32Constant>()->value()));
5162 break;
5163
5164 case maglev::Opcode::kRootConstant:
5165 builder.AddInput(
5167 (__ HeapConstant(Cast<HeapObject>(isolate_->root_handle(
5168 value->Cast<maglev::RootConstant>()->index())))));
5169 break;
5170
5171 case maglev::Opcode::kSmiConstant:
5172 builder.AddInput(
5174 __ SmiConstant(value->Cast<maglev::SmiConstant>()->value()));
5175 break;
5176
5177 case maglev::Opcode::kTrustedConstant:
5178 builder.AddInput(
5180 __ TrustedHeapConstant(
5181 value->Cast<maglev::TrustedConstant>()->object().object()));
5182 break;
5183
5184 case maglev::Opcode::kTaggedIndexConstant:
5185 case maglev::Opcode::kExternalConstant:
5186 default:
5187 UNREACHABLE();
5188 }
5189 return;
5190 }
5191
5192 // Special nodes.
5193 switch (value->opcode()) {
5194 case maglev::Opcode::kArgumentsElements:
5195 builder.AddArgumentsElements(
5196 value->Cast<maglev::ArgumentsElements>()->type());
5197 break;
5198 case maglev::Opcode::kArgumentsLength:
5199 builder.AddArgumentsLength();
5200 break;
5201 case maglev::Opcode::kRestLength:
5202 builder.AddRestLength();
5203 break;
5204 case maglev::Opcode::kVirtualObject:
5205 UNREACHABLE();
5206 default:
5207 AddDeoptInput(builder, virtual_objects, value);
5208 break;
5210 }
5213 public:
5214 struct DuplicatedId {
5215 uint32_t id;
5216 bool duplicated;
5217 };
5218 DuplicatedId GetDuplicatedId(const maglev::VirtualObject* object) {
5219 // TODO(dmercadier): do better than a linear search here.
5220 for (uint32_t idx = 0; idx < object_ids_.size(); idx++) {
5221 if (object_ids_[idx] == object) {
5222 return {idx, true};
5223 }
5224 }
5225 object_ids_.push_back(object);
5226 return {next_id_++, false};
5227 }
5229 DuplicatedId CreateFreshId() { return {next_id_++, false}; }
5230
5231 void Reset() {
5232 object_ids_.clear();
5233 next_id_ = 0;
5234 }
5235
5236 static const uint32_t kNotDuplicated = -1;
5238 private:
5239 std::vector<const maglev::VirtualObject*> object_ids_;
5240 uint32_t next_id_ = 0;
5241 };
5242
5244 interpreter::Register result_location,
5245 int result_size) {
5246 if (result_size == 0) {
5248 }
5250 frame.ComputeReturnOffset(result_location, result_size));
5251 }
5252
5253 const FrameStateInfo* MakeFrameStateInfo(
5254 maglev::InterpretedDeoptFrame& maglev_frame,
5255 OutputFrameStateCombine combine) {
5257 uint16_t parameter_count = maglev_frame.unit().parameter_count();
5258 uint16_t max_arguments = maglev_frame.unit().max_arguments();
5259 int local_count = maglev_frame.unit().register_count();
5260 Handle<SharedFunctionInfo> shared_info =
5261 maglev_frame.unit().shared_function_info().object();
5262 Handle<BytecodeArray> bytecode_array =
5263 maglev_frame.unit().bytecode().object();
5265 type, parameter_count, max_arguments, local_count, shared_info,
5266 bytecode_array);
5267
5268 return graph_zone()->New<FrameStateInfo>(maglev_frame.bytecode_position(),
5269 combine, info);
5270 }
5271
5272 const FrameStateInfo* MakeFrameStateInfo(
5273 maglev::InlinedArgumentsDeoptFrame& maglev_frame) {
5275 uint16_t parameter_count =
5276 static_cast<uint16_t>(maglev_frame.arguments().size());
5277 uint16_t max_arguments = 0;
5278 int local_count = 0;
5279 Handle<SharedFunctionInfo> shared_info =
5280 maglev_frame.unit().shared_function_info().object();
5281 Handle<BytecodeArray> bytecode_array =
5282 maglev_frame.unit().bytecode().object();
5284 type, parameter_count, max_arguments, local_count, shared_info,
5285 bytecode_array);
5286
5287 return graph_zone()->New<FrameStateInfo>(maglev_frame.bytecode_position(),
5289 info);
5290 }
5291
5292 const FrameStateInfo* MakeFrameStateInfo(
5295 Handle<SharedFunctionInfo> shared_info =
5296 maglev_frame.unit().shared_function_info().object();
5297 Handle<BytecodeArray> bytecode_array =
5298 maglev_frame.unit().bytecode().object();
5299 constexpr uint16_t kParameterCount = 1; // Only 1 parameter: the receiver.
5300 constexpr uint16_t kMaxArguments = 0;
5301 constexpr int kLocalCount = 0;
5303 type, kParameterCount, kMaxArguments, kLocalCount, shared_info,
5304 bytecode_array);
5305
5306 return graph_zone()->New<FrameStateInfo>(
5308 }
5309
5310 const FrameStateInfo* MakeFrameStateInfo(
5312 FrameStateType type = maglev_frame.is_javascript()
5315 uint16_t parameter_count =
5316 static_cast<uint16_t>(maglev_frame.parameters().length());
5317 if (maglev_frame.is_javascript()) {
5318 constexpr int kExtraFixedJSFrameParameters =
5322 kExtraFixedJSFrameParameters);
5323 parameter_count += kExtraFixedJSFrameParameters;
5324 }
5325 Handle<SharedFunctionInfo> shared_info =
5326 GetSharedFunctionInfo(maglev_frame).object();
5327 constexpr int kLocalCount = 0;
5328 constexpr uint16_t kMaxArguments = 0;
5330 type, parameter_count, kMaxArguments, kLocalCount, shared_info,
5332
5333 return graph_zone()->New<FrameStateInfo>(
5336 }
5337
5338 SharedFunctionInfoRef GetSharedFunctionInfo(
5339 const maglev::DeoptFrame& deopt_frame) {
5340 switch (deopt_frame.type()) {
5342 return deopt_frame.as_interpreted().unit().shared_function_info();
5344 return deopt_frame.as_inlined_arguments().unit().shared_function_info();
5346 return deopt_frame.as_construct_stub().unit().shared_function_info();
5348 return GetSharedFunctionInfo(*deopt_frame.parent());
5349 }
5351 }
5353 enum class Sign { kSigned, kUnsigned };
5354 template <typename rep>
5355 V<Word32> ConvertCompare(maglev::Input left_input, maglev::Input right_input,
5356 ::Operation operation, Sign sign) {
5358 (std::is_same_v<rep, Float64> || std::is_same_v<rep, Float32>),
5359 sign == Sign::kSigned);
5361 bool swap_inputs = false;
5362 switch (operation) {
5363 case ::Operation::kEqual:
5364 case ::Operation::kStrictEqual:
5366 break;
5367 case ::Operation::kLessThan:
5368 kind = sign == Sign::kSigned ? ComparisonOp::Kind::kSignedLessThan
5370 break;
5371 case ::Operation::kLessThanOrEqual:
5372 kind = sign == Sign::kSigned
5375 break;
5376 case ::Operation::kGreaterThan:
5377 kind = sign == Sign::kSigned ? ComparisonOp::Kind::kSignedLessThan
5379 swap_inputs = true;
5380 break;
5381 case ::Operation::kGreaterThanOrEqual:
5382 kind = sign == Sign::kSigned
5385 swap_inputs = true;
5386 break;
5387 default:
5388 UNREACHABLE();
5389 }
5390 V<rep> left = Map(left_input);
5391 V<rep> right = Map(right_input);
5392 if (swap_inputs) std::swap(left, right);
5393 return __ Comparison(left, right, kind, V<rep>::rep);
5394 }
5395
5396 V<Word32> ConvertInt32Compare(maglev::Input left_input,
5397 maglev::Input right_input,
5399 bool* negate_result) {
5401 bool swap_inputs = false;
5402 switch (condition) {
5403 case maglev::AssertCondition::kEqual:
5405 break;
5406 case maglev::AssertCondition::kNotEqual:
5408 *negate_result = true;
5409 break;
5410 case maglev::AssertCondition::kLessThan:
5412 break;
5413 case maglev::AssertCondition::kLessThanEqual:
5415 break;
5416 case maglev::AssertCondition::kGreaterThan:
5418 swap_inputs = true;
5419 break;
5420 case maglev::AssertCondition::kGreaterThanEqual:
5422 swap_inputs = true;
5423 break;
5424 case maglev::AssertCondition::kUnsignedLessThan:
5426 break;
5427 case maglev::AssertCondition::kUnsignedLessThanEqual:
5429 break;
5430 case maglev::AssertCondition::kUnsignedGreaterThan:
5432 swap_inputs = true;
5433 break;
5434 case maglev::AssertCondition::kUnsignedGreaterThanEqual:
5436 swap_inputs = true;
5437 break;
5438 }
5439 V<Word32> left = Map(left_input);
5440 V<Word32> right = Map(right_input);
5441 if (swap_inputs) std::swap(left, right);
5442 return __ Comparison(left, right, kind, WordRepresentation::Word32());
5443 }
5444
5445 V<Word32> RootEqual(maglev::Input input, RootIndex root) {
5446 return __ RootEqual(Map(input), root, isolate_);
5447 }
5448
5449 void DeoptIfInt32IsNotSmi(maglev::Input maglev_input,
5450 V<FrameState> frame_state,
5451 const compiler::FeedbackSource& feedback) {
5452 return DeoptIfInt32IsNotSmi(Map<Word32>(maglev_input), frame_state,
5453 feedback);
5454 }
5455 void DeoptIfInt32IsNotSmi(V<Word32> input, V<FrameState> frame_state,
5456 const compiler::FeedbackSource& feedback) {
5457 // TODO(dmercadier): is there no higher level way of doing this?
5458 V<Tuple<Word32, Word32>> add = __ Int32AddCheckOverflow(input, input);
5459 V<Word32> check = __ template Projection<1>(add);
5460 __ DeoptimizeIf(check, frame_state, DeoptimizeReason::kNotASmi, feedback);
5461 }
5462
5463 std::pair<V<WordPtr>, V<Object>> GetTypedArrayDataAndBasePointers(
5464 V<JSTypedArray> typed_array) {
5465 V<WordPtr> data_pointer = __ LoadField<WordPtr>(
5467 V<Object> base_pointer = __ LoadField<Object>(
5469 return {data_pointer, base_pointer};
5470 }
5471 V<Untagged> BuildTypedArrayLoad(V<JSTypedArray> typed_array, V<Word32> index,
5473 auto [data_pointer, base_pointer] =
5474 GetTypedArrayDataAndBasePointers(typed_array);
5475 return __ LoadTypedElement(typed_array, base_pointer, data_pointer,
5476 __ ChangeUint32ToUintPtr(index),
5478 }
5479 void BuildTypedArrayStore(V<JSTypedArray> typed_array, V<Word32> index,
5480 V<Untagged> value, ElementsKind kind) {
5481 auto [data_pointer, base_pointer] =
5482 GetTypedArrayDataAndBasePointers(typed_array);
5483 __ StoreTypedElement(typed_array, base_pointer, data_pointer,
5484 __ ChangeUint32ToUintPtr(index), value,
5486 }
5487
5488 V<Number> Float64ToTagged(
5489 V<Float64> input,
5491 // Float64ToTagged's conversion mode is used to control whether integer
5492 // floats should be converted to Smis or to HeapNumbers: kCanonicalizeSmi
5493 // means that they can be converted to Smis, and otherwise they should
5494 // remain HeapNumbers.
5496 conversion_mode ==
5500 return V<Number>::Cast(__ ConvertUntaggedToJSPrimitive(
5504 }
5505
5506 V<NumberOrUndefined> HoleyFloat64ToTagged(
5507 V<Float64> input,
5509 Label<NumberOrUndefined> done(this);
5510 if (conversion_mode ==
5512 // ConvertUntaggedToJSPrimitive cannot at the same time canonicalize smis
5513 // and handle holes. We thus manually insert a smi check when the
5514 // conversion_mode is CanonicalizeSmi.
5515 IF (__ Float64IsSmi(input)) {
5516 V<Word32> as_int32 = __ TruncateFloat64ToInt32OverflowUndefined(input);
5517 V<Smi> as_smi = V<Smi>::Cast(__ ConvertUntaggedToJSPrimitive(
5522 GOTO(done, as_smi);
5523 }
5524 }
5525 V<NumberOrUndefined> as_obj =
5526 V<NumberOrUndefined>::Cast(__ ConvertUntaggedToJSPrimitive(
5527 input,
5529 kHeapNumberOrUndefined,
5533 if (done.has_incoming_jump()) {
5534 GOTO(done, as_obj);
5535 BIND(done, result);
5536 return result;
5537 } else {
5538 // Avoid creating a new block if {as_obj} is the only possible return
5539 // value.
5540 return as_obj;
5542 }
5543
5544 void FixLoopPhis(maglev::BasicBlock* loop) {
5545 DCHECK(loop->is_loop());
5546 if (!loop->has_phi()) return;
5547 for (maglev::Phi* maglev_phi : *loop->phis()) {
5548 // Note that we've already emited the backedge Goto, which means that
5549 // we're currently not in a block, which means that we need to pass
5550 // can_be_invalid=false to `Map`, otherwise it will think that we're
5551 // currently emitting unreachable operations and return
5552 // OpIndex::Invalid().
5553 constexpr bool kIndexCanBeInvalid = false;
5554 OpIndex phi_index = Map(maglev_phi, kIndexCanBeInvalid);
5555 PendingLoopPhiOp& pending_phi =
5556 __ output_graph().Get(phi_index).Cast<PendingLoopPhiOp>();
5557 __ output_graph().Replace<PhiOp>(
5558 phi_index,
5560 {pending_phi.first(),
5561 Map(maglev_phi -> backedge_input(), kIndexCanBeInvalid)}),
5562 pending_phi.rep);
5579 }
5580 }
5581
5582 // TODO(dmercadier): Using a Branch would open more optimization opportunities
5583 // for BranchElimination compared to using a Select. However, in most cases,
5584 // Maglev should avoid materializing JS booleans, so there is a good chance
5585 // that it we actually need to do it, it's because we have to, and
5586 // BranchElimination probably cannot help. Thus, using a Select rather than a
5587 // Branch leads to smaller graphs, which is generally beneficial. Still, once
5588 // the graph builder is finished, we should evaluate whether Select or Branch
5589 // is the best choice here.
5590 V<Boolean> ConvertWord32ToJSBool(V<Word32> b, bool flip = false) {
5591 V<Boolean> true_idx = __ HeapConstant(local_factory_->true_value());
5592 V<Boolean> false_idx = __ HeapConstant(local_factory_->false_value());
5593 if (flip) std::swap(true_idx, false_idx);
5594 return __ Select(b, true_idx, false_idx, RegisterRepresentation::Tagged(),
5596 }
5597
5598 V<Boolean> ConvertWordPtrToJSBool(V<WordPtr> b, bool flip = false) {
5599 V<Boolean> true_idx = __ HeapConstant(local_factory_->true_value());
5600 V<Boolean> false_idx = __ HeapConstant(local_factory_->false_value());
5601 if (flip) std::swap(true_idx, false_idx);
5602 return __ Select(__ WordPtrEqual(b, __ WordPtrConstant(0)), false_idx,
5606
5607 // This function corresponds to MaglevAssembler::ToBoolean.
5608 V<Word32> ToBit(
5609 maglev::Input input,
5611 // TODO(dmercadier): {input} in Maglev is of type Object (like, any
5612 // HeapObject or Smi). However, the implementation of ToBoolean in Maglev is
5613 // identical to the lowering of TruncateJSPrimitiveToUntaggedOp(kBit) in
5614 // Turboshaft (which is done in MachineLoweringReducer), so we're using
5615 // TruncateJSPrimitiveToUntaggedOp with a non-JSPrimitive input (but it
5616 // still works). We should avoid doing this to avoid any confusion. Renaming
5617 // TruncateJSPrimitiveToUntagged to TruncateObjectToUntagged might be the
5618 // proper fix, in particular because it seems that the Turbofan input to
5619 // this operation is indeed an Object rather than a JSPrimitive (since
5620 // we use this operation in the regular TF->TS graph builder to translate
5621 // TruncateTaggedToBit and TruncateTaggedPointerToBit).
5622 return V<Word32>::Cast(__ TruncateJSPrimitiveToUntagged(
5624 assumptions));
5625 }
5627 // Converts a Float64 to a Word32 boolean, correctly producing 0 for NaN, by
5628 // relying on the fact that "0.0 < abs(x)" is only false for NaN and 0.
5629 V<Word32> Float64ToBit(V<Float64> input) {
5630 return __ Float64LessThan(0.0, __ Float64Abs(input));
5631 }
5632
5633 LazyDeoptOnThrow ShouldLazyDeoptOnThrow(maglev::NodeBase* node) {
5634 if (!node->properties().can_throw()) return LazyDeoptOnThrow::kNo;
5635 const maglev::ExceptionHandlerInfo* info = node->exception_handler_info();
5636 if (info->ShouldLazyDeopt()) return LazyDeoptOnThrow::kYes;
5638 }
5639
5640 class ThrowingScope {
5641 // In Maglev, exception handlers have no predecessors, and their Phis are a
5642 // bit special: they all correspond to interpreter registers, and get
5643 // eventually initialized with the value that their predecessors have for
5644 // the corresponding interpreter registers.
5645
5646 // In Turboshaft, exception handlers have predecessors and contain regular
5647 // phis. Creating a ThrowingScope takes care of recording in Variables
5648 // the current value of interpreter registers (right before emitting a node
5649 // that can throw), and sets the current_catch_block of the Assembler.
5650 // Throwing operations that are emitted while the scope is active will
5651 // automatically be wired to the catch handler. Then, when calling
5652 // Process(Phi) on exception phis (= when processing the catch handler),
5653 // these Phis will be mapped to the Variable corresponding to their owning
5654 // intepreter register.
5655
5656 public:
5658 maglev::NodeBase* throwing_node)
5659 : builder_(*builder) {
5660 DCHECK_EQ(__ current_catch_block(), nullptr);
5661 if (!throwing_node->properties().can_throw()) return;
5662 const maglev::ExceptionHandlerInfo* handler_info =
5663 throwing_node->exception_handler_info();
5664 if (!handler_info->HasExceptionHandler() ||
5665 handler_info->ShouldLazyDeopt()) {
5666 return;
5667 }
5668
5669 catch_block_ = handler_info->catch_block();
5670
5671 __ set_current_catch_block(builder_.Map(catch_block_));
5672
5673 // We now need to prepare recording the inputs for the exception phis of
5674 // the catch handler.
5675
5676 if (!catch_block_->has_phi()) {
5677 // Catch handler doesn't have any Phis, no need to do anything else.
5678 return;
5679 }
5680
5681 const maglev::InterpretedDeoptFrame& interpreted_frame =
5683 handler_info);
5684 const maglev::CompactInterpreterFrameState* compact_frame =
5685 interpreted_frame.frame_state();
5686 const maglev::MaglevCompilationUnit& maglev_unit =
5687 interpreted_frame.unit();
5688
5689 builder_.IterCatchHandlerPhis(
5690 catch_block_, [this, compact_frame, maglev_unit](
5691 interpreter::Register owner, Variable var) {
5692 DCHECK_NE(owner, interpreter::Register::virtual_accumulator());
5693
5694 const maglev::ValueNode* maglev_value =
5695 compact_frame->GetValueOf(owner, maglev_unit);
5696 DCHECK_NOT_NULL(maglev_value);
5697
5698 if (const maglev::VirtualObject* vobj =
5699 maglev_value->TryCast<maglev::VirtualObject>()) {
5700 maglev_value = vobj->allocation();
5701 }
5702
5703 V<Any> ts_value = builder_.Map(maglev_value);
5704 __ SetVariable(var, ts_value);
5705 builder_.RecordRepresentation(ts_value,
5706 maglev_value->value_representation());
5707 });
5708 }
5709
5710 ~ThrowingScope() {
5711 // Resetting the catch handler. It is always set on a case-by-case basis
5712 // before emitting a throwing node, so there is no need to "reset the
5713 // previous catch handler" or something like that, since there is no
5714 // previous handler (there is a DCHECK in the ThrowingScope constructor
5715 // checking that the current_catch_block is indeed nullptr when the scope
5716 // is created).
5717 __ set_current_catch_block(nullptr);
5718
5719 if (catch_block_ == nullptr) return;
5720 if (!catch_block_->has_phi()) return;
5721
5722 // We clear the Variables that we've set when initializing the scope, in
5723 // order to avoid creating Phis for such Variables. These are really only
5724 // meant to be used when translating the Phis in the catch handler, and
5725 // when the scope is destroyed, we shouldn't be in the Catch handler yet.
5726 builder_.IterCatchHandlerPhis(
5727 catch_block_, [this](interpreter::Register, Variable var) {
5728 __ SetVariable(var, V<Object>::Invalid());
5729 });
5732 private:
5735 const maglev::BasicBlock* catch_block_ = nullptr;
5736 };
5739 public:
5741 // If this DCHECK fails, then the caller should instead use a
5742 // ThrowingScope. Additionally, all of the calls it contains should
5743 // explicitely pass LazyDeoptOnThrow.
5744 DCHECK(!node->properties().can_throw());
5745 }
5747
5748 template <typename Function>
5749 void IterCatchHandlerPhis(const maglev::BasicBlock* catch_block,
5750 Function&& callback) {
5751 DCHECK_NOT_NULL(catch_block);
5752 DCHECK(catch_block->has_phi());
5753 for (auto phi : *catch_block->phis()) {
5754 DCHECK(phi->is_exception_phi());
5755 interpreter::Register owner = phi->owner();
5757 // The accumulator exception phi corresponds to the exception object
5758 // rather than whatever value the accumulator contained before the
5759 // throwing operation. We don't need to iterate here, since there is
5760 // special handling when processing Phis to use `catch_block_begin_`
5761 // for it instead of a Variable.
5762 continue;
5763 }
5764
5765 auto it = regs_to_vars_.find(owner.index());
5766 Variable var;
5767 if (it == regs_to_vars_.end()) {
5768 // We use a LoopInvariantVariable: if loop phis were needed, then the
5769 // Maglev value would already be a loop Phi, and we wouldn't need
5770 // Turboshaft to automatically insert a loop phi.
5771 var = __ NewLoopInvariantVariable(RegisterRepresentation::Tagged());
5772 regs_to_vars_.insert({owner.index(), var});
5773 } else {
5774 var = it->second;
5775 }
5776
5777 callback(owner, var);
5779 }
5780
5781 OpIndex MapPhiInput(const maglev::Input input, int input_index) {
5782 return MapPhiInput(input.node(), input_index);
5783 }
5784 OpIndex MapPhiInput(const maglev::NodeBase* node, int input_index) {
5785 if (V8_UNLIKELY(node == maglev_generator_context_node_)) {
5786 OpIndex generator_context = __ GetVariable(generator_context_);
5787 if (__ current_block()->Contains(generator_context)) {
5788 DCHECK(!__ current_block()->IsLoop());
5789 DCHECK(__ output_graph().Get(generator_context).Is<PhiOp>());
5790 // If {generator_context} is a Phi defined in the current block and it's
5791 // used as input for another Phi, then we need to use it's value from
5792 // the correct predecessor, since a Phi can't be an input to another Phi
5793 // in the same block.
5794 return __ GetPredecessorValue(generator_context_, input_index);
5795 }
5796 return generator_context;
5797 }
5798 return Map(node);
5800
5801 template <typename T>
5802 V<T> Map(const maglev::Input input, bool can_be_invalid = true) {
5803 return V<T>::Cast(Map(input.node(), can_be_invalid));
5804 }
5805 OpIndex Map(const maglev::Input input, bool can_be_invalid = true) {
5806 return Map(input.node(), can_be_invalid);
5807 }
5808 OpIndex Map(const maglev::NodeBase* node, bool can_be_invalid = true) {
5809 // If {can_be_invalid} is true (which it should be in most cases) and we're
5810 // currently in unreachable code, then `OpIndex::Invalid` is returned. The
5811 // only case where `can_be_invalid` is false is FixLoopPhis: this is called
5812 // after having emitted the backedge Goto, which means that we are in
5813 // unreachable code, but we know that the mappings should still exist.
5814 if (can_be_invalid && __ generating_unreachable_operations()) {
5815 return OpIndex::Invalid();
5816 }
5817 if (V8_UNLIKELY(node == maglev_generator_context_node_)) {
5818 return __ GetVariable(generator_context_);
5819 }
5820 DCHECK(node_mapping_[node].valid());
5821 return node_mapping_[node];
5823 Block* Map(const maglev::BasicBlock* block) { return block_mapping_[block]; }
5824
5825 void SetMap(maglev::NodeBase* node, V<Any> idx) {
5826 if (__ generating_unreachable_operations()) return;
5827 DCHECK(idx.valid());
5828 DCHECK_EQ(__ output_graph().Get(idx).outputs_rep().size(), 1);
5829 node_mapping_[node] = idx;
5830 }
5831
5832 void SetMapMaybeMultiReturn(maglev::NodeBase* node, V<Any> idx) {
5833 const Operation& op = __ output_graph().Get(idx);
5834 if (const TupleOp* tuple = op.TryCast<TupleOp>()) {
5835 // If the call returned multiple values, then in Maglev, {node} is
5836 // used as the 1st returned value, and a GetSecondReturnedValue node is
5837 // used to access the 2nd value. We thus call `SetMap` with the 1st
5838 // projection of the call, and record the 2nd projection in
5839 // {second_return_value_}, which we'll use when translating
5840 // GetSecondReturnedValue.
5841 DCHECK_EQ(tuple->input_count, 2);
5842 SetMap(node, tuple->input(0));
5843 second_return_value_ = tuple->input<Object>(1);
5844 } else {
5845 SetMap(node, idx);
5847 }
5848
5849 void RecordRepresentation(OpIndex idx, maglev::ValueRepresentation repr) {
5850 DCHECK_IMPLIES(maglev_representations_.contains(idx),
5851 maglev_representations_[idx] == repr);
5852 maglev_representations_[idx] = repr;
5853 }
5854
5856 DCHECK(native_context_.valid());
5861 Zone* temp_zone_;
5865 LocalFactory* local_factory_ = local_isolate_->factory();
5867 maglev::MaglevCompilationUnit* maglev_compilation_unit_;
5870 ZoneUnorderedMap<int, Variable> regs_to_vars_;
5871
5872 V<HeapObject> undefined_value_;
5873
5874 // The {deduplicator_} is used when building frame states containing escaped
5875 // objects. It could be a local object in `BuildFrameState`, but it's instead
5876 // defined here to recycle its memory.
5877 Deduplicator deduplicator_;
5878
5879 // In Turboshaft, exception blocks start with a CatchBlockBegin. In Maglev,
5880 // there is no such operation, and the exception is instead populated into the
5881 // accumulator by the throwing code, and is then loaded in Maglev through an
5882 // exception phi. When emitting a Turboshaft exception block, we thus store
5883 // the CatchBlockBegin in {catch_block_begin_}, which we then use when trying
5884 // to map the exception phi corresponding to the accumulator.
5885 V<Object> catch_block_begin_ = V<Object>::Invalid();
5886
5887 // Maglev loops can have multiple forward edges, while Turboshaft should only
5888 // have a single one. When a Maglev loop has multiple forward edges, we create
5889 // an additional Turboshaft block before (which we record in
5890 // {loop_single_edge_predecessors_}), and jumps to the loop will instead go to
5891 // this additional block, which will become the only forward predecessor of
5892 // the loop.
5894 loop_single_edge_predecessors_;
5895 // When we create an additional loop predecessor for loops that have multiple
5896 // forward predecessors, we store the newly created phis in
5897 // {loop_phis_first_input_}, so that we can then use them as the first input
5898 // of the original loop phis. {loop_phis_first_input_index_} is used as an
5899 // index in {loop_phis_first_input_} in VisitPhi so that we know where to find
5900 // the first input for the current loop phi.
5901 base::SmallVector<OpIndex, 16> loop_phis_first_input_;
5902 int loop_phis_first_input_index_ = -1;
5903
5904 // Magle doesn't have projections. Instead, after nodes that return multiple
5905 // values (currently, only maglev::ForInPrepare and maglev::CallBuiltin for
5906 // some builtins), Maglev inserts a GetSecondReturnedValue node, which
5907 // basically just binds kReturnRegister1 to a ValueNode. In the
5908 // Maglev->Turboshaft translation, when we emit a builtin call with multiple
5909 // return values, we set {second_return_value_} to the 2nd projection, and
5910 // then use it when translating GetSecondReturnedValue.
5911 V<Object> second_return_value_ = V<Object>::Invalid();
5912
5913 // {maglev_representations_} contains a map from Turboshaft OpIndex to
5914 // ValueRepresentation of the corresponding Maglev node. This is used when
5915 // translating exception phis: they might need to be re-tagged, and we need to
5916 // know the Maglev ValueRepresentation to distinguish between Float64 and
5917 // HoleyFloat64 (both of which would have Float64 RegisterRepresentation in
5918 // Turboshaft, but they need to be tagged differently).
5920 maglev_representations_;
5921
5922 GeneratorAnalyzer generator_analyzer_;
5923 static constexpr int kDefaultSwitchVarValue = -1;
5924 // {is_visiting_generator_main_switch_} is true if the function is a resumable
5925 // generator, and the current input block is the main dispatch switch for
5926 // resuming the generator.
5927 bool is_visiting_generator_main_switch_ = false;
5928 // {on_generator_switch_loop_} is true if the current input block is a loop
5929 // that used to be bypassed by generator resumes, and thus that needs a
5930 // secondary generator dispatch switch.
5931 bool on_generator_switch_loop_ = false;
5932 // {header_switch_input_} is the value on which secondary generator switches
5933 // should switch.
5934 Variable header_switch_input_;
5935 // When secondary dispatch switches for generators are created,
5936 // {loop_default_generator_value_} is used as the default inputs for
5937 // {header_switch_input_} for edges that weren't manually inserted in the
5938 // translation for generators.
5939 V<Word32> loop_default_generator_value_ = V<Word32>::Invalid();
5940 // If the main generator switch bypasses some loop headers, we'll need to
5941 // add an additional predecessor to these loop headers to get rid of the
5942 // bypass. If we do so, we'll need a dummy input for the loop Phis, which
5943 // we create here.
5944 V<Object> dummy_object_input_ = V<Object>::Invalid();
5945 V<Word32> dummy_word32_input_ = V<Word32>::Invalid();
5946 V<Float64> dummy_float64_input_ = V<Float64>::Invalid();
5947 // {maglev_generator_context_node_} is the 1st Maglev node that load the
5948 // context from the generator. Because of the removal of loop header bypasses,
5949 // we can end up using this node in place that's not dominated by the block
5950 // defining this node. To fix this problem, when loading the context from the
5951 // generator for the 1st time, we set {generator_context_}, and in `Map`, we
5952 // always check whether we're trying to get the generator context (=
5953 // {maglev_generator_context_node_}): if so, then we get the value from
5954 // {generator_context_} instead. Note that {generator_context_} is initialized
5955 // with a dummy value (NoContextConstant) so that valid Phis get inserted
5956 // where needed, but by construction, we'll never actually use this dummy
5957 // value.
5958 maglev::NodeBase* maglev_generator_context_node_ = nullptr;
5959 Variable generator_context_;
5962 Block* pre_loop_dst;
5963 Block* inside_loop_target;
5964 int switch_value;
5965 };
5966 std::unordered_map<const maglev::BasicBlock*, std::vector<GeneratorSplitEdge>>
5967 pre_loop_generator_blocks_;
5970 V<Object> new_target_param_ = V<Object>::Invalid();
5971 base::SmallVector<int, 16> predecessor_permutation_;
5972
5973 std::optional<BailoutReason>* bailout_;
5974};
5975
5976// A wrapper around GraphBuildingNodeProcessor that takes care of
5977// - skipping nodes when we are in Unreachable code.
5978// - recording source positions.
5980 public:
5981 using GraphBuildingNodeProcessor::GraphBuildingNodeProcessor;
5982
5983 NodeProcessorBase(PipelineData* data, Graph& graph, Zone* temp_zone,
5984 maglev::MaglevCompilationUnit* maglev_compilation_unit,
5985 std::optional<BailoutReason>* bailout)
5986 : GraphBuildingNodeProcessor(data, graph, temp_zone,
5987 maglev_compilation_unit, bailout),
5988 graph_(graph),
5989 labeller_(maglev_compilation_unit->graph_labeller()) {}
5990
5991 template <typename NodeT>
5992 maglev::ProcessResult Process(NodeT* node,
5993 const maglev::ProcessingState& state) {
5994 if (GraphBuildingNodeProcessor::Asm().generating_unreachable_operations()) {
5995 // It doesn't matter much whether we return kRemove or kContinue here,
5996 // since we'll be done with the Maglev graph anyway once this phase is
5997 // over. Maglev currently doesn't support kRemove for control nodes, so we
5998 // just return kContinue for simplicity.
5999 return maglev::ProcessResult::kContinue;
6000 }
6001
6002 OpIndex end_index_before = graph_.EndIndex();
6004 GraphBuildingNodeProcessor::Process(node, state);
6005 DCHECK_IMPLIES(result == maglev::ProcessResult::kContinue &&
6006 !GraphBuildingNodeProcessor::Asm()
6007 .generating_unreachable_operations() &&
6008 maglev::IsValueNode(node->opcode()),
6009 IsMapped(node));
6010
6011 // Recording the SourcePositions of the OpIndex that were just created.
6012 SourcePosition source = labeller_->GetNodeProvenance(node).position;
6013 for (OpIndex idx = end_index_before; idx != graph_.EndIndex();
6014 idx = graph_.NextIndex(idx)) {
6015 graph_.source_positions()[idx] = source;
6016 }
6017
6018 return result;
6021 private:
6022 Graph& graph_;
6024};
6025
6026void PrintBytecode(PipelineData& data,
6027 maglev::MaglevCompilationInfo* compilation_info) {
6028 DCHECK(data.info()->trace_turbo_graph());
6029 maglev::MaglevCompilationUnit* top_level_unit =
6030 compilation_info->toplevel_compilation_unit();
6031 CodeTracer* code_tracer = data.GetCodeTracer();
6032 CodeTracer::StreamScope tracing_scope(code_tracer);
6033 tracing_scope.stream()
6034 << "\n----- Bytecode before MaglevGraphBuilding -----\n"
6035 << std::endl;
6036 tracing_scope.stream() << "Function: "
6037 << Brief(*compilation_info->toplevel_function())
6038 << std::endl;
6039 BytecodeArray::Disassemble(top_level_unit->bytecode().object(),
6040 tracing_scope.stream());
6041 Print(*top_level_unit->feedback().object(), tracing_scope.stream());
6042}
6043
6045 maglev::MaglevCompilationInfo* compilation_info,
6046 maglev::Graph* maglev_graph, const char* msg) {
6047 CodeTracer* code_tracer = data.GetCodeTracer();
6048 CodeTracer::StreamScope tracing_scope(code_tracer);
6049 tracing_scope.stream() << "\n----- " << msg << " -----" << std::endl;
6050 maglev::PrintGraph(tracing_scope.stream(), compilation_info, maglev_graph);
6051}
6052
6053// TODO(dmercadier, nicohartmann): consider doing some of these optimizations on
6054// the Turboshaft graph after the Maglev->Turboshaft translation. For instance,
6055// MaglevPhiRepresentationSelector is the Maglev equivalent of Turbofan's
6056// SimplifiedLowering, but is much less powerful (doesn't take truncations into
6057// account, doesn't do proper range analysis, doesn't run a fixpoint
6058// analysis...).
6060 maglev::MaglevCompilationInfo* compilation_info,
6061 maglev::MaglevGraphBuilder& maglev_graph_builder,
6062 maglev::Graph* maglev_graph) {
6063 // Non-eager inlining.
6064 if (v8_flags.turbolev_non_eager_inlining) {
6065 maglev::MaglevInliner inliner(compilation_info, maglev_graph);
6066 inliner.Run(data->info()->trace_turbo_graph());
6067
6069 /* visit_identity_nodes */ true>
6070 sweep;
6071 sweep.ProcessGraph(maglev_graph);
6072 }
6073
6074 // Phi untagging.
6075 {
6077 &maglev_graph_builder);
6078 processor.ProcessGraph(maglev_graph);
6079 }
6080
6081 if (V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6082 PrintMaglevGraph(*data, compilation_info, maglev_graph,
6083 "After phi untagging");
6084 }
6085
6086 // Escape analysis.
6087 {
6089 processor.ProcessGraph(maglev_graph);
6090 }
6091
6092#ifdef DEBUG
6094 compilation_info);
6095 verifier.ProcessGraph(maglev_graph);
6096#endif
6097
6098 // Dead nodes elimination (which, amongst other things, cleans up the left
6099 // overs of escape analysis).
6100 {
6102 maglev::DeadNodeSweepingProcessor{compilation_info});
6103 processor.ProcessGraph(maglev_graph);
6104 }
6105
6106 if (V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6107 PrintMaglevGraph(*data, compilation_info, maglev_graph,
6108 "After escape analysis and dead node sweeping");
6110}
6111
6112std::optional<BailoutReason> MaglevGraphBuildingPhase::Run(PipelineData* data,
6113 Zone* temp_zone,
6114 Linkage* linkage) {
6115 JSHeapBroker* broker = data->broker();
6116 UnparkedScopeIfNeeded unparked_scope(broker);
6117
6118 std::unique_ptr<maglev::MaglevCompilationInfo> compilation_info =
6119 maglev::MaglevCompilationInfo::NewForTurboshaft(
6120 data->isolate(), broker, data->info()->closure(),
6121 data->info()->osr_offset(),
6122 data->info()->function_context_specializing());
6123
6124 // We need to be certain that the parameter count reported by our output
6125 // Code object matches what the code we compile expects. Otherwise, this
6126 // may lead to effectively signature mismatches during function calls. This
6127 // CHECK is a defense-in-depth measure to ensure this doesn't happen.
6128 SBXCHECK_EQ(compilation_info->toplevel_compilation_unit()->parameter_count(),
6129 linkage->GetIncomingDescriptor()->ParameterSlotCount());
6130
6131 if (V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6132 PrintBytecode(*data, compilation_info.get());
6133 }
6134
6135 LocalIsolate* local_isolate = broker->local_isolate_or_isolate();
6136 maglev::Graph* maglev_graph =
6137 maglev::Graph::New(temp_zone, data->info()->is_osr());
6138
6139 // We always create a MaglevGraphLabeller in order to record source positions.
6140 compilation_info->set_graph_labeller(new maglev::MaglevGraphLabeller());
6141
6142 maglev::MaglevGraphBuilder maglev_graph_builder(
6143 local_isolate, compilation_info->toplevel_compilation_unit(),
6144 maglev_graph);
6145 maglev_graph_builder.Build();
6146
6147 if (V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6148 PrintMaglevGraph(*data, compilation_info.get(), maglev_graph,
6149 "After graph building");
6150 }
6151
6152 RunMaglevOptimizations(data, compilation_info.get(), maglev_graph_builder,
6153 maglev_graph);
6154
6155 // TODO(nicohartmann): Should we have source positions here?
6156 data->InitializeGraphComponent(nullptr);
6157
6158 std::optional<BailoutReason> bailout;
6159 maglev::GraphProcessor<NodeProcessorBase, true> builder(
6160 data, data->graph(), temp_zone,
6161 compilation_info->toplevel_compilation_unit(), &bailout);
6162 builder.ProcessGraph(maglev_graph);
6163
6164 // Copying {inlined_functions} from Maglev to Turboshaft.
6165 for (OptimizedCompilationInfo::InlinedFunctionHolder holder :
6166 maglev_graph->inlined_functions()) {
6167 data->info()->inlined_functions().push_back(holder);
6168 }
6169
6170 if (V8_UNLIKELY(bailout.has_value() &&
6171 (v8_flags.trace_turbo || v8_flags.trace_turbo_graph))) {
6172 // If we've bailed out, then we've probably left the graph in some kind of
6173 // invalid state. We Reset it now, so that --trace-turbo doesn't try to
6174 // print an invalid graph.
6175 data->graph().Reset();
6176 }
6177
6178 return bailout;
6179}
6180
6182
6183} // namespace v8::internal::compiler::turboshaft
Isolate * isolate_
#define BIND(label)
#define ELSE
#define GOTO(label,...)
#define IF_NOT(...)
#define UNLIKELY(...)
#define LIKELY(...)
#define GOTO_IF_NOT(cond, label,...)
#define IF(...)
#define GOTO_IF(cond, label,...)
uint8_t data_[MAX_STACK_LENGTH]
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
size_t size() const
T * insert(T *pos, const T &value)
void resize(size_t new_size)
constexpr UnderlyingType & value() &
int length() const
Definition vector.h:64
constexpr T * data() const
Definition vector.h:100
static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin)
Definition builtins.cc:97
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
Definition builtins.cc:160
static V8_EXPORT_PRIVATE Callable CallableFor(Isolate *isolate, Builtin builtin)
Definition builtins.cc:214
static constexpr BytecodeOffset None()
Definition utils.h:675
Handle< Code > code() const
Definition callable.h:22
CallInterfaceDescriptor descriptor() const
Definition callable.h:23
static V8_EXPORT_PRIVATE ExternalReference address_of_pending_message(LocalIsolate *local_isolate)
static ExternalReference Create(const SCTableReference &table_ref)
Isolate * isolate() const
Definition factory.h:1281
static constexpr int kMapOffset
v8::internal::Factory * factory()
Definition isolate.h:1527
static const int kInitialMaxFastElementArray
Definition js-array.h:144
LocalIsolate * AsLocalIsolate()
static constexpr MachineType Float64()
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint32()
static constexpr MachineType HoleyFloat64()
static constexpr MachineType IntPtr()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int kMaxValue
Definition smi.h:101
static const uint32_t kMaxLength
Definition string.h:511
V8_INLINE constexpr StorageType ptr() const
base::Vector< T > NewVector(size_t length)
Definition zone.h:143
static FieldAccess ForJSTypedArrayExternalPointer()
static FieldAccess ForJSTypedArrayByteLength()
static FieldAccess ForJSDataViewByteLength()
static FieldAccess ForJSTypedArrayBasePointer()
static FieldAccess ForJSDataViewDataPointer()
interpreter::Register incoming_new_target_or_generator_register() const
IndirectHandle< BytecodeArray > object() const
IndirectHandle< FeedbackVector > object() const
IndirectHandle< HeapObject > object() const
IndirectHandle< JSFunction > object() const
NativeContextRef target_native_context() const
static constexpr int GetJSCallNewTargetParamIndex(int parameter_count)
Definition linkage.h:477
static constexpr int GetJSCallContextParamIndex(int parameter_count)
Definition linkage.h:495
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
static CallDescriptor * GetRuntimeCallDescriptor(Zone *zone, Runtime::FunctionId function, int js_parameter_count, Operator::Properties properties, CallDescriptor::Flags flags, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
Definition linkage.cc:426
static constexpr int kJSCallClosureParamIndex
Definition linkage.h:504
static CallDescriptor * GetJSCallDescriptor(Zone *zone, bool is_osr, int parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties)
Definition linkage.cc:507
static const int kOsrContextSpillSlotIndex
Definition linkage.h:508
static const int kOsrAccumulatorRegisterIndex
Definition linkage.h:511
IndirectHandle< Map > object() const
IndirectHandle< NativeContext > object() const
static OutputFrameStateCombine PokeAt(size_t index)
static OutputFrameStateCombine Ignore()
GrowingBlockSidetable< const maglev::BasicBlock * > turboshaft_block_origins_
const maglev::BasicBlock * GetMaglevOrigin(const Block *block)
static constexpr int kInvalidPredecessorIndex
Definition graph.h:364
base::SmallVector< Block *, 8 > Predecessors() const
Definition graph.h:328
const Operation & LastOperation(const Graph &graph) const
Definition graph.h:1242
NeighboringPredecessorIterable PredecessorsIterable() const
Definition graph.h:340
const FrameStateData * AllocateFrameStateData(const FrameStateInfo &info, Zone *zone)
Definition deopt-data.h:83
void AddDematerializedObject(uint32_t id, uint32_t field_count)
Definition deopt-data.h:56
void AddInput(MachineType type, OpIndex input)
Definition deopt-data.h:41
const maglev::BasicBlock * GetInnermostBypassedHeader(const maglev::BasicBlock *target)
GeneratorAnalyzer(Zone *phase_zone, maglev::MaglevGraphLabeller *labeller)
bool JumpBypassesHeader(const maglev::BasicBlock *target)
void FindLoopBody(maglev::BlockConstReverseIterator it)
void RecordHeadersForBypass(maglev::BasicBlock *initial_target, const maglev::BasicBlock *innermost_header)
std::unordered_set< const maglev::BasicBlock * > bypassed_headers_
ZoneAbslFlatHashMap< const maglev::BasicBlock *, const maglev::BasicBlock * > block_to_header_
ZoneVector< const maglev::BasicBlock * > visit_queue_
std::unordered_map< const maglev::BasicBlock *, const maglev::BasicBlock * > block_to_innermost_bypassed_header_
const maglev::BasicBlock * GetLoopHeader(const maglev::BasicBlock *node)
maglev::ProcessResult Process(maglev::CheckValueEqualsString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ThrowReferenceErrorIfHole *node, const maglev::ProcessingState &state)
const FrameStateInfo * MakeFrameStateInfo(maglev::InterpretedDeoptFrame &maglev_frame, OutputFrameStateCombine combine)
V< Number > Float64ToTagged(V< Float64 > input, maglev::Float64ToTagged::ConversionMode conversion_mode)
maglev::ProcessResult Process(maglev::TaggedIndexConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StringConcat *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckStringOrStringWrapper *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::AbstractLoadTaggedField< T > *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TestInstanceOf *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::GetKeyedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreGlobal *node, const maglev::ProcessingState &state)
V< Any > GenerateBuiltinCall(maglev::NodeBase *node, Builtin builtin, OptionalV< FrameState > frame_state, base::Vector< const OpIndex > arguments, std::optional< int > stack_arg_count=std::nullopt)
maglev::ProcessResult Process(maglev::BranchIfFloat64IsHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::GetIterator *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreTaggedFieldNoWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHeapInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::HasInPrototypeChain *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32Constant *node, const maglev::ProcessingState &state)
void DeoptIfInt32IsNotSmi(V< Word32 > input, V< FrameState > frame_state, const compiler::FeedbackSource &feedback)
maglev::ProcessResult Process(maglev::LoadSignedIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckHoleyFloat64IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ToName *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckValueEqualsInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckSymbol *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFixedArrayElementNoWriteBarrier *node, const maglev::ProcessingState &state)
void ComputePredecessorPermutations(maglev::BasicBlock *maglev_block, Block *turboshaft_block, bool skip_backedge, bool ignore_last_predecessor)
maglev::ProcessResult Process(maglev::ExtendPropertiesBackingStore *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckNotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ConstructWithSpread *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckConstructResult *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetKeyedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SmiConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RestLength *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< const maglev::BasicBlock *, Block * > loop_single_edge_predecessors_
maglev::ProcessResult Process(maglev::EnsureWritableFastElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadUnsignedIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfFloat64ToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ForInPrepare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMapsWithMigration *node, const maglev::ProcessingState &state)
V< NumberOrUndefined > HoleyFloat64ToTagged(V< Float64 > input, maglev::HoleyFloat64ToTagged::ConversionMode conversion_mode)
maglev::ProcessResult Process(maglev::CallKnownApiFunction *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiTagUint32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallBuiltin *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32DecrementWithOverflow *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTaggedFieldForScriptContextSlot *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreInArrayLiteralGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Call *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckIntPtrIsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallWithSpread *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Phi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ToString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionAndStoreArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfReferenceEqual *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateShallowObjectLiteral *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfInt32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckInt32Condition *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UpdateJSArrayLength *node, const maglev::ProcessingState &state)
void BuildTypedArrayStore(V< JSTypedArray > typed_array, V< Word32 > index, V< Untagged > value, ElementsKind kind)
maglev::ProcessResult Process(maglev::Float64Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckCacheIndicesNotCleared *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateFunctionContext *node, const maglev::ProcessingState &state)
V< Any > MakePhiMaybePermuteInputs(maglev::ValueNode *maglev_node, int maglev_input_count, OptionalV< Any > additional_input=OptionalV< Any >::Nullopt())
maglev::ProcessResult Process(maglev::CallForwardVarargs *node, const maglev::ProcessingState &state)
void IterCatchHandlerPhis(const maglev::BasicBlock *catch_block, Function &&callback)
maglev::ProcessResult Process(maglev::MapPrototypeGet *node, const maglev::ProcessingState &state)
RegisterRepresentation RegisterRepresentationFor(maglev::ValueRepresentation value_rep)
maglev::ProcessResult Process(maglev::StringLength *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckTypedArrayBounds *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ArgumentsElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StringAt *node, const maglev::ProcessingState &state)
OpIndex Map(const maglev::NodeBase *node, bool can_be_invalid=true)
maglev::ProcessResult Process(maglev::CheckedSmiTagIntPtr *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMapsWithMigrationAndDeopt *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckTypedArrayNotDetached *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TestTypeOf *node, const maglev::ProcessingState &state)
OptionalV< FrameState > BuildParentFrameState(maglev::DeoptFrame &frame, const maglev::VirtualObjectList &virtual_objects)
maglev::ProcessResult Process(maglev::CheckedSmiTagInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DeleteProperty *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetNamedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TaggedEqual *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTypedArrayLength *node, const maglev::ProcessingState &state)
void InsertTaggingForPhis(maglev::BasicBlock *maglev_catch_handler)
maglev::ProcessResult Process(maglev::LoadDoubleTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreInt32 *node, const maglev::ProcessingState &state)
void StartMultiPredecessorExceptionBlock(maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
maglev::ProcessResult Process(maglev::ArgumentsLength *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DefineKeyedOwnGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Float64Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagUint32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadSignedIntDataViewElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTaggedFieldByFieldIndex *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadGlobal *node, const maglev::ProcessingState &state)
maglev::ProcessResult StringConcatHelper(Node *node, V< String > left, V< String > right)
maglev::ProcessResult Process(maglev::Int32IncrementWithOverflow *node, const maglev::ProcessingState &state)
void TagExceptionPhiInputsForBlock(Block *old_block, maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
maglev::BlockProcessResult PreProcessBasicBlock(maglev::BasicBlock *maglev_block)
maglev::ProcessResult Process(maglev::BranchIfJSReceiver *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfIntPtrToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfFloat64Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckJSDataViewBounds *node, const maglev::ProcessingState &state)
OptionalV< FrameState > BuildFrameState(maglev::EagerDeoptInfo *eager_deopt_info)
V< Word32 > ToBit(maglev::Input input, TruncateJSPrimitiveToUntaggedOp::InputAssumptions assumptions)
maglev::ProcessResult Process(maglev::StoreFixedArrayElementWithWriteBarrier *node, const maglev::ProcessingState &state)
V< Untagged > BuildTypedArrayLoad(V< JSTypedArray > typed_array, V< Word32 > index, ElementsKind kind)
maglev::ProcessResult Process(maglev::Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetPrototypeHas *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiSizedInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUint32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreScriptContextSlotWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUndefinedOrNull *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckUint32IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFloat64 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiUntag *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagIntPtr *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32NegateWithOverflow *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckDetectableCallable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnwrapThinString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMapsWithAlreadyLoadedMap *node, const maglev::ProcessingState &state)
void EmitLoopSinglePredecessorBlock(maglev::BasicBlock *maglev_loop_header)
maglev::ProcessResult Process(maglev::ThrowSuperNotCalledIfHole *node, const maglev::ProcessingState &state)
OpIndex MapPhiInput(const maglev::NodeBase *node, int input_index)
maglev::ProcessResult Process(maglev::ThrowIfNotSuperConstructor *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionElementsKindOrCheckMap *node, const maglev::ProcessingState &state)
OpIndex Map(const maglev::Input input, bool can_be_invalid=true)
maglev::ProcessResult Process(maglev::ThrowIfNotCallable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckDynamicValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Jump *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateObjectLiteral *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckDerivedConstructResult *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RegisterInput *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadDoubleField *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Construct *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadDoubleDataViewElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::NumberToString *node, const maglev::ProcessingState &state)
V< Word32 > ConvertInt32Compare(maglev::Input left_input, maglev::Input right_input, maglev::AssertCondition condition, bool *negate_result)
maglev::ProcessResult Process(maglev::CheckHoleyFloat64NotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiTagFloat64 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHoleyFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
std::unordered_map< const maglev::BasicBlock *, std::vector< GeneratorSplitEdge > > pre_loop_generator_blocks_
maglev::ProcessResult Process(maglev::BranchIfRootConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadEnumCacheLength *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MapPrototypeGetInt32Key *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfInt32ToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BuiltinStringFromCharCode *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallKnownJSFunction *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TaggedNotEqual *node, const maglev::ProcessingState &state)
V< Word32 > ConvertCompare(maglev::Input left_input, maglev::Input right_input, ::Operation operation, Sign sign)
maglev::ProcessResult Process(maglev::CheckValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ForInNext *node, const maglev::ProcessingState &state)
void StartExceptionBlock(maglev::BasicBlock *maglev_catch_handler)
OutputFrameStateCombine ComputeCombine(maglev::InterpretedDeoptFrame &frame, interpreter::Register result_location, int result_size)
maglev::ProcessResult Process(maglev::StringEqual *node, const maglev::ProcessingState &state)
void AddDeoptInput(FrameStateData::Builder &builder, const maglev::VirtualObjectList &virtual_objects, const maglev::ValueNode *node)
maglev::ProcessResult Process(maglev::CheckNumber *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MigrateMapIfNeeded *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TrustedConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreTaggedFieldWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUndetectable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreSignedIntDataViewElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadNamedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedInternalizedString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreTrustedPointerFieldWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::FunctionEntryStackCheck *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::FastCreateClosure *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreMap *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Switch *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< const maglev::BasicBlock *, Block * > block_mapping_
maglev::ProcessResult Process(maglev::CreateRegExpLiteral *node, const maglev::ProcessingState &state)
void DeoptIfInt32IsNotSmi(maglev::Input maglev_input, V< FrameState > frame_state, const compiler::FeedbackSource &feedback)
maglev::ProcessResult Process(maglev::CheckpointedJump *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::JumpLoop *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::InitialValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateClosure *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateFastArrayElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateShallowArrayLiteral *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreDoubleDataViewElement *node, const maglev::ProcessingState &state)
V< Boolean > ConvertWord32ToJSBool(V< Word32 > b, bool flip=false)
maglev::ProcessResult Process(maglev::GetTemplateObject *node, const maglev::ProcessingState &state)
void StartSinglePredecessorExceptionBlock(maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
maglev::ProcessResult Process(maglev::LoadNamedFromSuperGeneric *node, const maglev::ProcessingState &state)
GraphBuildingNodeProcessor(PipelineData *data, Graph &graph, Zone *temp_zone, maglev::MaglevCompilationUnit *maglev_compilation_unit, std::optional< BailoutReason > *bailout)
maglev::ProcessResult Process(maglev::ThrowSuperAlreadyCalledIfNotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckJSReceiverOrNullOrUndefined *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< const maglev::NodeBase *, OpIndex > node_mapping_
maglev::ProcessResult Process(maglev::StoreDoubleField *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::AllocationBlock *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ConsStringMap *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallRuntime *node, const maglev::ProcessingState &state)
bool IsMaglevMainGeneratorSwitchBlock(const maglev::BasicBlock *maglev_block)
ZoneAbslFlatHashMap< OpIndex, maglev::ValueRepresentation > maglev_representations_
OpIndex MapPhiInput(const maglev::Input input, int input_index)
maglev::ProcessResult Process(maglev::BuiltinStringPrototypeCharCodeOrCodePointAt *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Uint32Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TestUndetectable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckInstanceType *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RootConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFloat64 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMaps *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallWithArrayLike *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionElementsKind *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreHeapInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiUntag *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnwrapStringWrapper *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedNumberToInt32 *node, const maglev::ProcessingState &state)
V< Word32 > RootEqual(maglev::Input input, RootIndex root)
maglev::ProcessResult Process(maglev::CheckInt32IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MaybeGrowFastElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFixedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::AllocateElementsArray *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreDoubleTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateArrayLiteral *node, const maglev::ProcessingState &state)
V< Boolean > ConvertWordPtrToJSBool(V< WordPtr > b, bool flip=false)
maglev::ProcessResult Process(maglev::StoreIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHoleyFixedDoubleArrayElementCheckedNotHole *node, const maglev::ProcessingState &state)
void CheckMaps(V< Object > receiver_input, V< FrameState > frame_state, OptionalV< Map > object_map, const FeedbackSource &feedback, const compiler::ZoneRefSet< Map > &maps, bool check_heap_object, CheckMapsFlags flags)
maglev::ProcessResult Process(maglev::CheckFloat64SameValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DefineNamedOwnGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckHeapObject *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::InlinedAllocation *node, const maglev::ProcessingState &state)
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Float64()
static constexpr OpIndex Invalid()
Definition index.h:88
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Tagged()
static V< T > Cast(V< U > index)
Definition index.h:632
static constexpr Register virtual_accumulator()
static constexpr Register invalid_value()
BasicBlock * backedge_predecessor() const
base::SmallVector< BasicBlock *, 2 > successors() const
BasicBlock * predecessor_at(int i) const
compiler::JSFunctionRef javascript_target() const
Definition maglev-ir.h:1532
base::Vector< ValueNode * > parameters() const
Definition maglev-ir.h:1528
ValueNode * GetValueOf(interpreter::Register reg, const MaglevCompilationUnit &info) const
compiler::HeapObjectRef ref() const
Definition maglev-ir.h:5296
const MaglevCompilationUnit & unit() const
Definition maglev-ir.h:1486
const InlinedArgumentsDeoptFrame & as_inlined_arguments() const
Definition maglev-ir.h:1466
const ConstructInvokeStubDeoptFrame & as_construct_stub() const
Definition maglev-ir.h:1505
const InterpretedDeoptFrame & as_interpreted() const
Definition maglev-ir.h:1428
const BuiltinContinuationDeoptFrame & as_builtin_continuation() const
Definition maglev-ir.h:1549
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
base::Vector< ValueNode * > arguments() const
Definition maglev-ir.h:1452
const MaglevCompilationUnit & unit() const
Definition maglev-ir.h:1448
const MaglevCompilationUnit & unit() const
Definition maglev-ir.h:1406
int ComputeReturnOffset(interpreter::Register result_location, int result_size) const
Definition maglev-ir.cc:412
const CompactInterpreterFrameState * frame_state() const
Definition maglev-ir.h:1407
interpreter::Register result_location() const
Definition maglev-ir.h:1698
static bool InReturnValues(interpreter::Register reg, interpreter::Register result_location, int result_size)
Definition maglev-ir.cc:402
const InterpretedDeoptFrame & GetFrameForExceptionHandler(const ExceptionHandlerInfo *handler_info)
Definition maglev-ir.cc:435
IndirectHandle< JSFunction > toplevel_function() const
MaglevCompilationUnit * toplevel_compilation_unit() const
compiler::BytecodeArrayRef bytecode() const
compiler::FeedbackVectorRef feedback() const
void Run(bool is_tracing_maglev_graphs_enabled)
constexpr bool Is() const
Definition maglev-ir.h:2362
ExceptionHandlerInfo * exception_handler_info()
Definition maglev-ir.h:2063
constexpr Input & input(int index)
Definition maglev-ir.h:1978
LazyDeoptInfo * lazy_deopt_info()
Definition maglev-ir.h:2052
constexpr OpProperties properties() const
Definition maglev-ir.h:1940
constexpr bool can_throw() const
Definition maglev-ir.h:1033
Tagged< Smi > value() const
Definition maglev-ir.h:5200
compiler::HeapObjectRef object() const
Definition maglev-ir.h:5344
constexpr ValueRepresentation value_representation() const
Definition maglev-ir.h:2577
VirtualObject * FindAllocatedWith(const InlinedAllocation *allocation) const
Definition maglev-ir.h:5887
void ForEachInput(Function &&callback)
Definition maglev-ir.h:5694
compiler::FixedDoubleArrayRef double_elements() const
Definition maglev-ir.h:5621
uint32_t double_elements_length() const
Definition maglev-ir.h:5616
compiler::MapRef map() const
Definition maglev-ir.h:5591
InlinedAllocation * allocation() const
Definition maglev-ir.h:5664
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
JSHeapBroker *const broker_
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
Tagged< NativeContext > native_context_
Handle< SharedFunctionInfo > info
int end
BytecodeAssembler & assembler_
Zone * graph_zone
JSHeapBroker * broker
Linkage * linkage
TNode< Object > target
TNode< Object > receiver
TNode< Object > callback
Node * node
RpoNumber block
ZoneVector< RpoNumber > & result
Builtin builtin
LiftoffRegister reg
std::vector< intptr_t > object_ids_
LocalIsolate * local_isolate_
static const int kNotDuplicated
#define IEEE_754_UNARY_LIST(V)
Definition maglev-ir.h:3366
InstructionOperand source
InstructionOperand destination
std::optional< size_t > index_of(const C &container, const T &element)
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
void RunMaglevOptimizations(PipelineData *data, maglev::MaglevCompilationInfo *compilation_info, maglev::MaglevGraphBuilder &maglev_graph_builder, maglev::Graph *maglev_graph)
void PrintMaglevGraph(PipelineData &data, maglev::MaglevCompilationInfo *compilation_info, maglev::Graph *maglev_graph, const char *msg)
void PrintBytecode(PipelineData &data, maglev::MaglevCompilationInfo *compilation_info)
constexpr bool TooManyArgumentsForCall(size_t arguments_count)
HeapConstantHole(factory() ->the_hole_value())) DEFINE_GETTER(PropertyCellHoleConstant
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
HeapConstantNoHole(BUILTIN_CODE(isolate(), AllocateInOldGeneration))) DEFINE_GETTER(ArrayConstructorStubConstant
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
bool AnyMapIsHeapNumber(const ZoneRefSet< Map > &maps)
Definition heap-refs.h:1303
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind)
Definition globals.h:142
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
ref_traits< T >::ref_type MakeRef(JSHeapBroker *broker, Tagged< T > object)
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
ZoneVector< BasicBlock * >::const_reverse_iterator BlockConstReverseIterator
constexpr NullMaybeHandleType kNullMaybeHandle
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
constexpr JSDispatchHandle kInvalidDispatchHandle(0xffffffff<< kJSDispatchHandleShift)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr bool SmiValuesAre31Bits()
constexpr JSDispatchHandle kPlaceholderDispatchHandle(0x0)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
constexpr bool SmiValuesAre32Bits()
@ kExternalFloat64Array
Definition globals.h:2461
return value
Definition map-inl.h:893
constexpr bool Is64()
constexpr bool IsDoubleElementsKind(ElementsKind kind)
T * NewArray(size_t size)
Definition allocation.h:43
constexpr Register kJavaScriptCallNewTargetRegister
template const char * string
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
i::Address Load(i::Address address)
Definition unwinder.cc:19
Operation
Definition operation.h:43
#define GENERIC_BINOP_LIST(V)
#define GENERIC_UNOP_LIST(V)
RegExpBuilder builder_
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
#define PROCESS_FLOAT64_BINOP(MaglevName, TurboshaftName)
#define GET_FRAME_STATE_MAYBE_ABORT(name, deopt_info)
#define PROCESS_BINOP_WITH_OVERFLOW(MaglevName, TurboshaftName, minus_zero_mode)
#define PROCESS_INT32_SHIFT(MaglevName, TurboshaftName)
#define BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(count)
#define RETURN_IF_UNREACHABLE()
#define PROCESS_GENERIC_UNOP(Name)
#define PROCESS_GENERIC_BINOP(Name)
#define GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state, arguments,...)
#define PROCESS_INT32_BITWISE_BINOP(Name)
#define V8_UNLIKELY(condition)
Definition v8config.h:660
TFGraph * graph_
wasm::ValueType type