v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_
6#define V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_
7
8#include <cstring>
9#include <iomanip>
10#include <iterator>
11#include <limits>
12#include <memory>
13#include <optional>
14#include <type_traits>
15#include <utility>
16
18#include "src/base/logging.h"
19#include "src/base/macros.h"
23#include "src/base/vector.h"
49#include "src/flags/flags.h"
55#include "src/objects/oddball.h"
59#include "src/objects/tagged.h"
61#include "v8-primitive.h"
62
63#ifdef V8_ENABLE_WEBASSEMBLY
65#endif
66
67namespace v8::internal {
68enum class Builtin : int32_t;
69}
70
72
74
75template <class AssemblerT>
76class CatchScopeImpl;
77
78// GotoIf(cond, dst) and GotoIfNot(cond, dst) are not guaranteed to actually
79// generate a Branch with `dst` as one of the destination, because some reducer
80// in the stack could realize that `cond` is statically known and optimize away
81// the Branch. Thus, GotoIf and GotoIfNot return a {ConditionalGotoStatus},
82// which represents whether a GotoIf/GotoIfNot was emitted as a Branch or a Goto
83// (and if a Goto, then to what: `dst` or the fallthrough block).
85 kGotoDestination = 1, // The conditional Goto became an unconditional Goto to
86 // the destination.
87 kGotoEliminated = 2, // The conditional GotoIf/GotoIfNot would never be
88 // executed and only the fallthrough path remains.
89 kBranch = 3 // The conditional Goto became a branch.
90
91 // Some examples of this:
92 // GotoIf(true, dst) ===> kGotoDestination
93 // GotoIf(false, dst) ===> kGotoEliminated
94 // GotoIf(var, dst) ===> kBranch
95 // GotoIfNot(true, dst) ===> kGotoEliminated
96 // GotoIfNot(false, dst) ===> kGotoDestination
97 // GotoIfNot(var, dst) ===> kBranch
98};
102
103template <typename It, typename A>
104concept ForeachIterable = requires(It iterator, A& assembler) {
105 { iterator.Begin(assembler) } -> std::same_as<typename It::iterator_type>;
106 {
107 iterator.IsEnd(assembler, typename It::iterator_type{})
108 } -> std::same_as<OptionalV<Word32>>;
109 {
110 iterator.Advance(assembler, typename It::iterator_type{})
111 } -> std::same_as<typename It::iterator_type>;
112 {
113 iterator.Dereference(assembler, typename It::iterator_type{})
114 } -> std::same_as<typename It::value_type>;
115};
116
117// `Range<T>` implements the `ForeachIterable` concept to iterate over a range
118// of values inside a `FOREACH` loop. The range can be specified with a begin,
119// an (exclusive) end and an optional stride.
120//
121// Example:
122//
123// FOREACH(offset, Range(start, end, 4)) {
124// // Load the value at `offset`.
125// auto value = __ Load(offset, LoadOp::Kind::RawAligned(), ...);
126// // ...
127// }
128//
129template <typename T>
130class Range {
131 public:
134
137
138 template <typename A>
139 iterator_type Begin(A& assembler) const {
140 return assembler.resolve(begin_);
141 }
142
143 template <typename A>
144 OptionalV<Word32> IsEnd(A& assembler, iterator_type current_iterator) const {
145 if constexpr (std::is_same_v<T, Word32>) {
146 return assembler.Uint32LessThanOrEqual(assembler.resolve(end_),
147 current_iterator);
148 } else {
149 static_assert(std::is_same_v<T, Word64>);
150 return assembler.Uint64LessThanOrEqual(assembler.resolve(end_),
151 current_iterator);
152 }
153 }
154
155 template <typename A>
156 iterator_type Advance(A& assembler, iterator_type current_iterator) const {
157 if constexpr (std::is_same_v<T, Word32>) {
158 return assembler.Word32Add(current_iterator, assembler.resolve(stride_));
159 } else {
160 static_assert(std::is_same_v<T, Word64>);
161 return assembler.Word64Add(current_iterator, assembler.resolve(stride_));
162 }
163 }
164
165 template <typename A>
166 value_type Dereference(A& assembler, iterator_type current_iterator) const {
167 return current_iterator;
168 }
169
170 private:
174};
175
176// Deduction guides for `Range`.
177template <typename T>
179template <typename T>
181template <typename T>
184
185// `IndexRange<T>` is a short hand for a Range<T> that iterates the range [0,
186// count) with steps of 1. This is the ideal iterator to generate a `for(int i =
187// 0; i < count; ++i) {}`-style loop.
188//
189// Example:
190//
191// FOREACH(i, IndexRange(count)) { ... }
192//
193template <typename T>
194class IndexRange : public Range<T> {
195 public:
196 using base = Range<T>;
199
200 explicit IndexRange(ConstOrV<T> count) : Range<T>(0, count, 1) {}
201};
202
203// `Sequence<T>` implements the `ForeachIterable` concept to iterate an
204// unlimited sequence of inside a `FOREACH` loop. The iteration begins at the
205// given start value and during each iteration the value is incremented by the
206// optional `stride` argument. Note that there is no termination condition, so
207// the end of the loop needs to be terminated in another way. This could be
208// either by a conditional break inside the loop or by combining the `Sequence`
209// iterator with another iterator that provides the termination condition (see
210// Zip below).
211//
212// Example:
213//
214// FOREACH(index, Sequence<WordPtr>(0)) {
215// // ...
216// V<Object> value = __ Load(object, index, LoadOp::Kind::TaggedBase(),
217// offset, field_size);
218// GOTO_IF(__ IsSmi(value), done, index);
219// }
220//
221template <typename T>
222class Sequence : private Range<T> {
223 using base = Range<T>;
224
225 public:
228
229 explicit Sequence(ConstOrV<T> begin, ConstOrV<T> stride = 1)
230 : base(begin, 0, stride) {}
231
232 using base::Advance;
233 using base::Begin;
234 using base::Dereference;
235
236 template <typename A>
238 // Sequence doesn't have a termination condition.
240 }
241};
242
243// Deduction guide for `Sequence`.
244template <typename T>
246template <typename T>
248template <typename T>
250
251// `Zip<T>` implements the `ForeachIterable` concept to iterate multiple
252// iterators at the same time inside a `FOREACH` loop. The loop terminates once
253// any of the zipped iterators signals end of iteration. The number of iteration
254// variables specified in the `FOREACH` loop has to match the number of zipped
255// iterators.
256//
257// Example:
258//
259// FOREACH(offset, index, Zip(Range(start, end, 4),
260// Sequence<Word32>(0)) {
261// // `offset` iterates [start, end) with steps of 4.
262// // `index` counts 0, 1, 2, ...
263// }
264//
265// NOTE: The generated loop is only controlled by the `offset < end` condition
266// as `Sequence` has no upper bound. Hence, the above loop resembles a loop like
267// (assuming start, end and therefore offset are WordPtr):
268//
269// for(auto [offset, index] = {start, 0};
270// offset < end;
271// offset += 4, ++index) {
272// // ...
273// }
274//
275template <typename... Iterables>
276class Zip {
277 public:
278 using value_type = std::tuple<typename Iterables::value_type...>;
279 using iterator_type = std::tuple<typename Iterables::iterator_type...>;
280
281 explicit Zip(Iterables... iterables) : iterables_(std::move(iterables)...) {}
282
283 template <typename A>
284 iterator_type Begin(A& assembler) {
285 return base::tuple_map(
286 iterables_, [&assembler](auto& it) { return it.Begin(assembler); });
287 }
288
289 template <typename A>
290 OptionalV<Word32> IsEnd(A& assembler, iterator_type current_iterator) {
291 // TODO(nicohartmann): Currently we don't short-circuit the disjunction here
292 // because that's slightly more difficult to do with the current `IsEnd`
293 // predicate. We can consider making this more powerful if we see use cases.
294 auto results = base::tuple_map2(iterables_, current_iterator,
295 [&assembler](auto& it, auto current) {
296 return it.IsEnd(assembler, current);
297 });
298 return base::tuple_fold(
300 [&assembler](OptionalV<Word32> acc, OptionalV<Word32> next) {
301 if (!next.has_value()) return acc;
302 if (!acc.has_value()) return next;
303 return OptionalV(
304 assembler.Word32BitwiseOr(acc.value(), next.value()));
305 });
306 }
307
308 template <typename A>
309 iterator_type Advance(A& assembler, iterator_type current_iterator) {
310 return base::tuple_map2(iterables_, current_iterator,
311 [&assembler](auto& it, auto current) {
312 return it.Advance(assembler, current);
313 });
314 }
315
316 template <typename A>
317 value_type Dereference(A& assembler, iterator_type current_iterator) {
318 return base::tuple_map2(iterables_, current_iterator,
319 [&assembler](auto& it, auto current) {
320 return it.Dereference(assembler, current);
321 });
322 }
323
324 private:
325 std::tuple<Iterables...> iterables_;
326};
327
328// Deduction guide for `Zip`.
329template <typename... Iterables>
330Zip(Iterables... iterables) -> Zip<Iterables...>;
331
332class ConditionWithHint final {
333 public:
338
339 template <typename T>
341 T condition,
342 BranchHint hint = BranchHint::kNone) // NOLINT(runtime/explicit)
343 requires(std::is_same_v<T, OpIndex>)
345
346 V<Word32> condition() const { return condition_; }
347 BranchHint hint() const { return hint_; }
348
349 private:
352};
353
354namespace detail {
355template <typename A, typename ConstOrValues>
356auto ResolveAll(A& assembler, const ConstOrValues& const_or_values) {
357 return std::apply(
358 [&](auto&... args) { return std::tuple{assembler.resolve(args)...}; },
359 const_or_values);
360}
361
362template <typename T>
364 using type = OpIndex;
365};
366template <typename T>
367struct IndexTypeFor<std::tuple<T>> {
368 using type = T;
369};
370
371template <typename T>
373
374inline bool SuppressUnusedWarning(bool b) { return b; }
375template <typename T>
376auto unwrap_unary_tuple(std::tuple<T>&& tpl) {
377 return std::get<0>(std::forward<std::tuple<T>>(tpl));
378}
379template <typename T1, typename T2, typename... Rest>
380auto unwrap_unary_tuple(std::tuple<T1, T2, Rest...>&& tpl) {
381 return tpl;
382}
383} // namespace detail
384
385template <bool loop, typename... Ts>
387 protected:
388 static constexpr size_t size = sizeof...(Ts);
389
390 LabelBase(const LabelBase&) = delete;
391 LabelBase& operator=(const LabelBase&) = delete;
392
393 public:
394 static constexpr bool is_loop = loop;
395 using values_t = std::tuple<V<Ts>...>;
396 using const_or_values_t = std::tuple<maybe_const_or_v_t<Ts>...>;
397 using recorded_values_t = std::tuple<base::SmallVector<V<Ts>, 2>...>;
398
399 Block* block() { return data_.block; }
400
401 bool has_incoming_jump() const { return has_incoming_jump_; }
402
403 template <typename A>
404 void Goto(A& assembler, const values_t& values) {
405 if (assembler.generating_unreachable_operations()) return;
406 has_incoming_jump_ = true;
407 Block* current_block = assembler.current_block();
408 DCHECK_NOT_NULL(current_block);
409 assembler.Goto(data_.block);
410 RecordValues(current_block, data_, values);
411 }
412
413 template <typename A>
414 void GotoIf(A& assembler, OpIndex condition, BranchHint hint,
415 const values_t& values) {
416 if (assembler.generating_unreachable_operations()) return;
417 has_incoming_jump_ = true;
418 Block* current_block = assembler.current_block();
419 DCHECK_NOT_NULL(current_block);
420 if (assembler.GotoIf(condition, data_.block, hint) &
422 RecordValues(current_block, data_, values);
423 }
424 }
425
426 template <typename A>
427 void GotoIfNot(A& assembler, OpIndex condition, BranchHint hint,
428 const values_t& values) {
429 if (assembler.generating_unreachable_operations()) return;
430 has_incoming_jump_ = true;
431 Block* current_block = assembler.current_block();
432 DCHECK_NOT_NULL(current_block);
433 if (assembler.GotoIfNot(condition, data_.block, hint) &
435 RecordValues(current_block, data_, values);
436 }
437 }
438
439 template <typename A>
442 if (!assembler.Bind(data_.block)) {
443 return std::tuple_cat(std::tuple{false}, values_t{});
444 }
445 DCHECK_EQ(data_.block, assembler.current_block());
446 return std::tuple_cat(std::tuple{true}, MaterializePhis(assembler));
447 }
448
449 protected:
457
458 explicit LabelBase(Block* block) : data_(block) {
460 }
461
463 : data_(std::move(other.data_)),
464 has_incoming_jump_(other.has_incoming_jump_) {}
465
466 static void RecordValues(Block* source, BlockData& data,
467 const values_t& values) {
468 DCHECK_NOT_NULL(source);
469 if (data.block->IsBound()) {
470 // Cannot `Goto` to a bound block. If you are trying to construct a
471 // loop, use a `LoopLabel` instead!
472 UNREACHABLE();
473 }
474 RecordValuesImpl(data, source, values, std::make_index_sequence<size>());
475 }
476
477 template <size_t... indices>
478 static void RecordValuesImpl(BlockData& data, Block* source,
479 const values_t& values,
480 std::index_sequence<indices...>) {
481#ifdef DEBUG
482 std::initializer_list<size_t> sizes{
483 std::get<indices>(data.recorded_values).size()...};
484 // There a -1 on the PredecessorCounts below, because we've emitted the
485 // Goto/Branch before calling RecordValues (which we do because the
486 // condition of the Goto might have been constant-folded, resulting in the
487 // destination not actually being reachable).
489 sizes, static_cast<size_t>(data.block->PredecessorCount() - 1)));
490 DCHECK_EQ(data.block->PredecessorCount() - 1, data.predecessors.size());
491#endif
492 (std::get<indices>(data.recorded_values)
493 .push_back(std::get<indices>(values)),
494 ...);
495 data.predecessors.push_back(source);
496 }
497
498 template <typename A>
500 return MaterializePhisImpl(assembler, data_,
501 std::make_index_sequence<size>());
502 }
503
504 template <typename A, size_t... indices>
505 static values_t MaterializePhisImpl(A& assembler, BlockData& data,
506 std::index_sequence<indices...>) {
507 size_t predecessor_count = data.block->PredecessorCount();
508 DCHECK_EQ(data.predecessors.size(), predecessor_count);
509 // If this label has no values, we don't need any Phis.
510 if constexpr (size == 0) return values_t{};
511
512 // If this block does not have any predecessors, we shouldn't call this.
513 DCHECK_LT(0, predecessor_count);
514 // With 1 predecessor, we don't need any Phis.
515 if (predecessor_count == 1) {
516 return values_t{std::get<indices>(data.recorded_values)[0]...};
517 }
518 DCHECK_LT(1, predecessor_count);
519
520 // Construct Phis.
521 return values_t{assembler.Phi(
522 base::VectorOf(std::get<indices>(data.recorded_values)))...};
523 }
524
526 bool has_incoming_jump_ = false;
527};
528
529template <typename... Ts>
530class Label : public LabelBase<false, Ts...> {
531 using super = LabelBase<false, Ts...>;
532
533 Label(const Label&) = delete;
534 Label& operator=(const Label&) = delete;
535
536 public:
537 template <typename Reducer>
538 explicit Label(Reducer* reducer) : super(reducer->Asm().NewBlock()) {}
539
540 Label(Label&& other) V8_NOEXCEPT : super(std::move(other)) {}
541};
542
543template <typename... Ts>
544class LoopLabel : public LabelBase<true, Ts...> {
545 using super = LabelBase<true, Ts...>;
546 using BlockData = typename super::BlockData;
547
548 LoopLabel(const LoopLabel&) = delete;
549 LoopLabel& operator=(const LoopLabel&) = delete;
550
551 public:
552 using values_t = typename super::values_t;
553 template <typename Reducer>
554 explicit LoopLabel(Reducer* reducer)
555 : super(reducer->Asm().NewBlock()),
556 loop_header_data_{reducer->Asm().NewLoopHeader()} {}
557
559 : super(std::move(other)),
560 loop_header_data_(std::move(other.loop_header_data_)),
561 pending_loop_phis_(std::move(other.pending_loop_phis_)) {}
562
563 Block* loop_header() const { return loop_header_data_.block; }
564
565 template <typename A>
566 void Goto(A& assembler, const values_t& values) {
567 if (assembler.generating_unreachable_operations()) return;
568 if (!loop_header_data_.block->IsBound()) {
569 // If the loop header is not bound yet, we have the forward edge to the
570 // loop.
571 DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
572 Block* current_block = assembler.current_block();
573 DCHECK_NOT_NULL(current_block);
574 assembler.Goto(loop_header_data_.block);
575 super::RecordValues(current_block, loop_header_data_, values);
576 } else {
577 // We have a jump back to the loop header and wire it to the single
578 // backedge block.
579 this->super::Goto(assembler, values);
580 }
581 }
582
583 template <typename A>
584 void GotoIf(A& assembler, OpIndex condition, BranchHint hint,
585 const values_t& values) {
586 if (assembler.generating_unreachable_operations()) return;
587 if (!loop_header_data_.block->IsBound()) {
588 // If the loop header is not bound yet, we have the forward edge to the
589 // loop.
590 DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
591 Block* current_block = assembler.current_block();
592 DCHECK_NOT_NULL(current_block);
593 if (assembler.GotoIf(condition, loop_header_data_.block, hint) &
595 super::RecordValues(current_block, loop_header_data_, values);
596 }
597 } else {
598 // We have a jump back to the loop header and wire it to the single
599 // backedge block.
600 this->super::GotoIf(assembler, condition, hint, values);
601 }
602 }
603
604 template <typename A>
605 void GotoIfNot(A& assembler, OpIndex condition, BranchHint hint,
606 const values_t& values) {
607 if (assembler.generating_unreachable_operations()) return;
608 if (!loop_header_data_.block->IsBound()) {
609 // If the loop header is not bound yet, we have the forward edge to the
610 // loop.
611 DCHECK_EQ(0, loop_header_data_.block->PredecessorCount());
612 Block* current_block = assembler.current_block();
613 DCHECK_NOT_NULL(current_block);
614 if (assembler.GotoIf(condition, loop_header_data_.block, hint) &
616 super::RecordValues(current_block, loop_header_data_, values);
617 }
618 } else {
619 // We have a jump back to the loop header and wire it to the single
620 // backedge block.
621 this->super::GotoIfNot(assembler, condition, hint, values);
622 }
623 }
624
625 template <typename A>
627 // LoopLabels must not be bound using `Bind`, but with `Loop`.
628 UNREACHABLE();
629 }
630
631 template <typename A>
633 DCHECK(!loop_header_data_.block->IsBound());
634 if (!assembler.Bind(loop_header_data_.block)) {
635 return std::tuple_cat(std::tuple{false}, values_t{});
636 }
637 DCHECK_EQ(loop_header_data_.block, assembler.current_block());
638 values_t pending_loop_phis =
640 pending_loop_phis_ = pending_loop_phis;
641 return std::tuple_cat(std::tuple{true}, pending_loop_phis);
642 }
643
644 template <typename A>
645 void EndLoop(A& assembler) {
646 // First, we need to bind the backedge block.
647 auto bind_result = this->super::Bind(assembler);
648 // `Bind` returns a tuple with a `bool` as first entry that indicates
649 // whether the block was bound. The rest of the tuple contains the phi
650 // values. Check if this block was bound (aka is reachable).
651 if (std::get<0>(bind_result)) {
652 // The block is bound.
653 DCHECK_EQ(assembler.current_block(), this->super::block());
654 // Now we build a jump from this block to the loop header.
655 // Remove the "bound"-flag from the beginning of the tuple.
656 auto values = base::tuple_drop<1>(bind_result);
657 assembler.Goto(loop_header_data_.block);
658 // Finalize Phis in the loop header.
659 FixLoopPhis(assembler, values);
660 }
661 assembler.FinalizeLoop(loop_header_data_.block);
662 }
663
664 private:
665 template <typename A>
666 static values_t MaterializeLoopPhis(A& assembler, BlockData& data) {
667 return MaterializeLoopPhisImpl(assembler, data,
668 std::make_index_sequence<super::size>());
669 }
670
671 template <typename A, size_t... indices>
672 static values_t MaterializeLoopPhisImpl(A& assembler, BlockData& data,
673 std::index_sequence<indices...>) {
674 size_t predecessor_count = data.block->PredecessorCount();
675 USE(predecessor_count);
676 DCHECK_EQ(data.predecessors.size(), predecessor_count);
677 // If this label has no values, we don't need any Phis.
678 if constexpr (super::size == 0) return typename super::values_t{};
679
680 DCHECK_EQ(predecessor_count, 1);
681 auto phis = typename super::values_t{assembler.PendingLoopPhi(
682 std::get<indices>(data.recorded_values)[0])...};
683 return phis;
684 }
685
686 template <typename A>
687 void FixLoopPhis(A& assembler, const typename super::values_t& values) {
688 DCHECK(loop_header_data_.block->IsBound());
689 DCHECK(loop_header_data_.block->IsLoop());
690 DCHECK_LE(1, loop_header_data_.predecessors.size());
691 DCHECK_LE(loop_header_data_.predecessors.size(), 2);
692 FixLoopPhi<0>(assembler, values);
693 }
694
695 template <size_t I, typename A>
696 void FixLoopPhi(A& assembler, const typename super::values_t& values) {
698 OpIndex phi_index = std::get<I>(*pending_loop_phis_);
699 PendingLoopPhiOp& pending_loop_phi =
700 assembler.output_graph()
701 .Get(phi_index)
702 .template Cast<PendingLoopPhiOp>();
703 DCHECK_EQ(pending_loop_phi.first(),
704 std::get<I>(loop_header_data_.recorded_values)[0]);
705 assembler.output_graph().template Replace<PhiOp>(
706 phi_index,
708 {pending_loop_phi.first(), std::get<I>(values)}),
709 pending_loop_phi.rep);
710 FixLoopPhi<I + 1>(assembler, values);
711 }
712 }
713
715 std::optional<values_t> pending_loop_phis_;
716};
717
718namespace detail {
719template <typename T>
721template <typename T>
724};
725template <typename... Ts>
726struct LoopLabelForHelper<std::tuple<V<Ts>...>> {
727 using type = LoopLabel<Ts...>;
728};
729} // namespace detail
730
731template <typename T>
733
735
736template <typename Next>
738
739template <typename T>
741 static_assert(is_subtype_v<T, HeapObject>);
742
743 public:
744 explicit Uninitialized(V<T> object) : object_(object) {}
745
746 private:
747 template <typename Next>
749
750 V<T> object() const {
751 DCHECK(object_.has_value());
752 return *object_;
753 }
754
756 DCHECK(object_.has_value());
757 auto temp = *object_;
758 object_.reset();
759 return temp;
760 }
761
762 std::optional<V<T>> object_;
763};
764
765// Forward declarations
766template <class Assembler>
767class GraphVisitor;
768template <class Next>
770template <class Next>
772
773template <typename Reducers>
776 using ReducerList = Reducers;
778 return *static_cast<Assembler<ReducerList>*>(this);
779 }
780};
781
782template <typename ReducerList>
784 static constexpr size_t length = reducer_list_length<ReducerList>::value;
785 // We assume a TSReducerBase is at the end of the list.
786 static constexpr size_t base_index =
788 static_assert(base_index == length - 1);
789 // Insert a GenericReducerBase before that.
792 // If we have a ValueNumberingReducer in the list, we insert at that index,
793 // otherwise before the reducer_base.
794 static constexpr size_t ep_index =
801 length + 2);
802
805};
806
807template <typename Next>
809
810// TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE should almost never be needed: it
811// should only be used by the IR-specific base class, while other reducers
812// should simply use `TURBOSHAFT_REDUCER_BOILERPLATE`.
813#define TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(Name) \
814 using ReducerList = typename Next::ReducerList; \
815 using assembler_t = compiler::turboshaft::Assembler<ReducerList>; \
816 assembler_t& Asm() { return *static_cast<assembler_t*>(this); } \
817 template <class T> \
818 using ScopedVar = compiler::turboshaft::ScopedVar<T, assembler_t>; \
819 using CatchScope = compiler::turboshaft::CatchScopeImpl<assembler_t>; \
820 static constexpr auto& ReducerName() { return #Name; }
821
822// Defines a few helpers to use the Assembler and its stack in Reducers.
823#define TURBOSHAFT_REDUCER_BOILERPLATE(Name) \
824 TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(Name) \
825 using node_t = typename Next::node_t; \
826 using block_t = typename Next::block_t;
827
828template <class T, class Assembler>
829class Var : protected Variable {
831
832 public:
833 template <typename Reducer>
834 explicit Var(Reducer* reducer) : Var(reducer->Asm()) {}
835
836 template <typename Reducer>
837 Var(Reducer* reducer, value_type initial_value) : Var(reducer) {
838 assembler_.SetVariable(*this, assembler_.resolve(initial_value));
839 }
840
841 explicit Var(Assembler& assembler)
842 : Variable(assembler.NewVariable(
843 static_cast<const RegisterRepresentation&>(V<T>::rep))),
844 assembler_(assembler) {}
845
846 Var(const Var&) = delete;
847 Var(Var&&) = delete;
848 Var& operator=(const Var) = delete;
849 Var& operator=(Var&&) = delete;
850 ~Var() = default;
851
852 void Set(value_type new_value) {
853 assembler_.SetVariable(*this, assembler_.resolve(new_value));
854 }
855 V<T> Get() const { return assembler_.GetVariable(*this); }
856
857 void operator=(value_type new_value) { Set(new_value); }
858 template <typename U>
859 operator V<U>() const
860 requires v_traits<U>::template
861 implicitly_constructible_from<T>::value {
862 return Get();
863 }
864 template <typename U>
865 operator OptionalV<U>() const
866 requires v_traits<U>::template
867 implicitly_constructible_from<T>::value {
868 return Get();
869 }
870 template <typename U>
871 operator ConstOrV<U>() const
872 requires(const_or_v_exists_v<U> &&
873 v_traits<U>::template implicitly_constructible_from<T>::value)
874 {
875 return Get();
876 }
877 operator OpIndex() const { return Get(); }
878 operator OptionalOpIndex() const { return Get(); }
879
880 protected:
882};
883
884template <typename T, typename Assembler>
885class ScopedVar : public Var<T, Assembler> {
887
888 public:
889 using Base::Base;
891 // Explicitly mark the variable as invalid to avoid the creation of
892 // unnecessary loop phis.
893 this->assembler_.SetVariable(*this, OpIndex::Invalid());
894 }
895
896 using Base::operator=;
897};
898
899// LABEL_BLOCK is used in Reducers to have a single call forwarding to the next
900// reducer without change. A typical use would be:
901//
902// OpIndex ReduceFoo(OpIndex arg) {
903// LABEL_BLOCK(no_change) return Next::ReduceFoo(arg);
904// ...
905// if (...) goto no_change;
906// ...
907// if (...) goto no_change;
908// ...
909// }
910#define LABEL_BLOCK(label) \
911 for (; false; UNREACHABLE()) \
912 label:
913
914// EmitProjectionReducer ensures that projections are always emitted right after
915// their input operation. To do so, when an operation with multiple outputs is
916// emitted, it always emit its projections, and returns a Tuple of the
917// projections.
918// It should "towards" the bottom of the stack (so that calling Next::ReduceXXX
919// just emits XXX without emitting any operation afterwards, and so that
920// Next::ReduceXXX does indeed emit XXX rather than lower/optimize it to some
921// other subgraph), but it should be before GlobalValueNumbering, so that
922// operations with multiple outputs can still be GVNed.
923template <class Next>
924class EmitProjectionReducer
925 : public UniformReducerAdapter<EmitProjectionReducer, Next> {
926 public:
927 TURBOSHAFT_REDUCER_BOILERPLATE(EmitProjection)
928
930 // CatchBlockBegin have a single output, so they never have projections,
931 // but additionally split-edge can transform CatchBlockBeginOp into PhiOp,
932 // which means that there is no guarantee here that Next::CatchBlockBegin is
933 // indeed a CatchBlockBegin (which means that the .Cast<> of the generic
934 // ReduceOperation could fail on CatchBlockBegin).
935 return Next::ReduceCatchBlockBegin();
936 }
937
938 template <Opcode opcode, typename Continuation, typename... Args>
940 OpIndex new_idx = Continuation{this}.Reduce(args...);
941 const Operation& op = Asm().output_graph().Get(new_idx);
942 if constexpr (MayThrow(opcode)) {
943 // Operations that can throw are lowered to an Op+DidntThrow, and what we
944 // get from Next::Reduce is the DidntThrow.
945 return WrapInTupleIfNeeded(op.Cast<DidntThrowOp>(), new_idx);
946 }
947 return WrapInTupleIfNeeded(op.Cast<typename Continuation::Op>(), new_idx);
948 }
949
950 private:
951 template <class Op>
952 V<Any> WrapInTupleIfNeeded(const Op& op, V<Any> idx) {
953 if (op.outputs_rep().size() > 1) {
954 base::SmallVector<V<Any>, 8> projections;
955 auto reps = op.outputs_rep();
956 for (int i = 0; i < static_cast<int>(reps.size()); i++) {
957 projections.push_back(Asm().Projection(idx, i, reps[i]));
958 }
959 return Asm().Tuple(base::VectorOf(projections));
960 }
961 return idx;
962 }
963};
964
965// This reducer takes care of emitting Turboshaft operations. Ideally, the rest
966// of the Assembler stack would be generic, and only TSReducerBase (and
967// TurboshaftAssemblerOpInterface) would be Turboshaft-specific.
968// TODO(dmercadier): this is currently not quite at the very bottom of the stack
969// but actually before ReducerBase and ReducerBaseForwarder. This doesn't
970// matter, because Emit should be unique on the reducer stack, but still, it
971// would be nice to have the TSReducerBase at the very bottom of the stack.
972template <class Next>
973class TSReducerBase : public Next {
974 public:
975 static constexpr bool kIsBottomOfStack = true;
978 using block_t = Block;
979
980 template <class Op, class... Args>
981 OpIndex Emit(Args... args) {
982 static_assert((std::is_base_of<Operation, Op>::value));
983 static_assert(!(std::is_same<Op, Operation>::value));
984 DCHECK_NOT_NULL(Asm().current_block());
985 OpIndex result = Asm().output_graph().next_operation_index();
986 Op& op = Asm().output_graph().template Add<Op>(args...);
987 Asm().output_graph().operation_origins()[result] =
988 Asm().current_operation_origin();
989#ifdef DEBUG
990 if (v8_flags.turboshaft_trace_intermediate_reductions) {
991 std::cout << std::setw(Asm().intermediate_tracing_depth()) << ' ' << "["
992 << ReducerName() << "]: emitted " << op << "\n";
993 }
994 op_to_block_[result] = Asm().current_block();
995 DCHECK(ValidInputs(result));
996#endif // DEBUG
997 if (op.IsBlockTerminator()) Asm().FinalizeBlock();
998 return result;
999 }
1000
1001 private:
1002#ifdef DEBUG
1003 GrowingOpIndexSidetable<Block*> op_to_block_{Asm().phase_zone(),
1004 &Asm().output_graph()};
1005
1006 bool ValidInputs(OpIndex op_idx) {
1007 const Operation& op = Asm().output_graph().Get(op_idx);
1008 if (auto* phi = op.TryCast<PhiOp>()) {
1009 auto pred_blocks = Asm().current_block()->Predecessors();
1010 for (size_t i = 0; i < phi->input_count; ++i) {
1011 Block* input_block = op_to_block_[phi->input(i)];
1012 Block* pred_block = pred_blocks[i];
1013 if (input_block->GetCommonDominator(pred_block) != input_block) {
1014 std::cerr << "Input #" << phi->input(i).id()
1015 << " does not dominate predecessor B"
1016 << pred_block->index().id() << ".\n";
1017 std::cerr << op_idx.id() << ": " << op << "\n";
1018 return false;
1019 }
1020 }
1021 } else {
1022 for (OpIndex input : op.inputs()) {
1023 Block* input_block = op_to_block_[input];
1024 if (input_block->GetCommonDominator(Asm().current_block()) !=
1025 input_block) {
1026 std::cerr << "Input #" << input.id()
1027 << " does not dominate its use.\n";
1028 std::cerr << op_idx.id() << ": " << op << "\n";
1029 return false;
1030 }
1031 }
1032 }
1033 return true;
1034 }
1035#endif // DEBUG
1036};
1037
1038namespace detail {
1039template <typename T>
1040inline T&& MakeShadowy(T&& value) {
1041 static_assert(!std::is_same_v<std::remove_reference_t<T>, OpIndex>);
1042 return std::forward<T>(value);
1043}
1045 return ShadowyOpIndex{value};
1046}
1047template <typename T>
1049 return ShadowyOpIndex{value};
1050}
1055template <typename T>
1059} // namespace detail
1060
1061// This empty base-class is used to provide default-implementations of plain
1062// methods emitting operations.
1063template <class Next>
1064class ReducerBaseForwarder : public Next {
1065 public:
1067
1068#define EMIT_OP(Name) \
1069 OpIndex ReduceInputGraph##Name(OpIndex ig_index, const Name##Op& op) { \
1070 return this->Asm().AssembleOutputGraph##Name(op); \
1071 } \
1072 template <class... Args> \
1073 OpIndex Reduce##Name(Args... args) { \
1074 return this->Asm().template Emit<Name##Op>(detail::MakeShadowy(args)...); \
1075 }
1077#undef EMIT_OP
1078};
1079
1080// GenericReducerBase provides default implementations of Branch-related
1081// Operations (Goto, Branch, Switch, CheckException), and takes care of updating
1082// Block predecessors (and calls the Assembler to maintain split-edge form).
1083// ReducerBase is always added by Assembler at the bottom of the reducer stack.
1084template <class Next>
1085class GenericReducerBase : public ReducerBaseForwarder<Next> {
1086 public:
1087 TURBOSHAFT_REDUCER_BOILERPLATE(GenericReducerBase)
1088
1090
1091 void Bind(Block* block) {}
1092
1093 // CanAutoInlineBlocksWithSinglePredecessor is used to control whether the
1094 // CopyingPhase is allowed to automatically inline blocks with a single
1095 // predecessor or not.
1096 bool CanAutoInlineBlocksWithSinglePredecessor() const { return true; }
1097
1098 void Analyze() {}
1099
1100#ifdef DEBUG
1101 void Verify(OpIndex old_index, OpIndex new_index) {}
1102#endif // DEBUG
1103
1104 void RemoveLast(OpIndex index_of_last_operation) {
1105 Asm().output_graph().RemoveLast();
1106 }
1107
1108 void FixLoopPhi(const PhiOp& input_phi, OpIndex output_index,
1109 Block* output_graph_loop) {
1110 if (!Asm()
1111 .output_graph()
1112 .Get(output_index)
1113 .template Is<PendingLoopPhiOp>()) {
1114 return;
1115 }
1116 DCHECK(output_graph_loop->Contains(output_index));
1117 auto& pending_phi = Asm()
1118 .output_graph()
1119 .Get(output_index)
1120 .template Cast<PendingLoopPhiOp>();
1121#ifdef DEBUG
1122 DCHECK_EQ(pending_phi.rep, input_phi.rep);
1123 // The 1st input of the PendingLoopPhi should be the same as the original
1124 // Phi, except for peeled loops (where it's the same as the 2nd input when
1125 // computed with the VariableReducer Snapshot right before the loop was
1126 // emitted).
1128 pending_phi.first() != Asm().MapToNewGraph(input_phi.input(0)),
1129 output_graph_loop->has_peeled_iteration());
1130#endif
1131 Asm().output_graph().template Replace<PhiOp>(
1132 output_index,
1134 {pending_phi.first(), Asm().MapToNewGraph(input_phi.input(1))}),
1135 input_phi.rep);
1136 }
1137
1140 DCHECK(Asm().current_block()->IsMerge() &&
1141 inputs.size() == Asm().current_block()->Predecessors().size());
1142 return Base::ReducePhi(inputs, rep);
1143 }
1144
1146 DCHECK(Asm().current_block()->IsLoop());
1147 return Base::ReducePendingLoopPhi(first, rep);
1148 }
1149
1150 V<None> REDUCE(Goto)(Block* destination, bool is_backedge) {
1151 // Calling Base::Goto will call Emit<Goto>, which will call FinalizeBlock,
1152 // which will reset {current_block_}. We thus save {current_block_} before
1153 // calling Base::Goto, as we'll need it for AddPredecessor. Note also that
1154 // AddPredecessor might introduce some new blocks/operations if it needs to
1155 // split an edge, which means that it has to run after Base::Goto
1156 // (otherwise, the current Goto could be inserted in the wrong block).
1157 Block* saved_current_block = Asm().current_block();
1158 V<None> new_opindex = Base::ReduceGoto(destination, is_backedge);
1159 Asm().AddPredecessor(saved_current_block, destination, false);
1160 return new_opindex;
1161 }
1162
1164 BranchHint hint) {
1165 // There should never be a good reason to generate a Branch where both the
1166 // {if_true} and {if_false} are the same Block. If we ever decide to lift
1167 // this condition, then AddPredecessor and SplitEdge should be updated
1168 // accordingly.
1169 DCHECK_NE(if_true, if_false);
1170 Block* saved_current_block = Asm().current_block();
1171 V<None> new_opindex =
1172 Base::ReduceBranch(condition, if_true, if_false, hint);
1173 Asm().AddPredecessor(saved_current_block, if_true, true);
1174 Asm().AddPredecessor(saved_current_block, if_false, true);
1175 return new_opindex;
1176 }
1177
1179 Block* current_block = Asm().current_block();
1180 if (current_block->IsBranchTarget()) {
1181 DCHECK_EQ(current_block->PredecessorCount(), 1);
1182 DCHECK_EQ(current_block->LastPredecessor()
1183 ->LastOperation(Asm().output_graph())
1184 .template Cast<CheckExceptionOp>()
1185 .catch_block,
1186 current_block);
1187 return Base::ReduceCatchBlockBegin();
1188 }
1189 // We are trying to emit a CatchBlockBegin into a block that used to be the
1190 // catch_block successor but got edge-splitted into a merge. Therefore, we
1191 // need to emit a phi now and can rely on the predecessors all having a
1192 // ReduceCatchBlockBegin and nothing else.
1193 DCHECK(current_block->IsMerge());
1195 for (Block* predecessor : current_block->Predecessors()) {
1196 V<Object> catch_begin = predecessor->begin();
1197 DCHECK(Asm().Get(catch_begin).template Is<CatchBlockBeginOp>());
1198 phi_inputs.push_back(catch_begin);
1199 }
1200 return Asm().Phi(base::VectorOf(phi_inputs),
1202 }
1203
1205 Block* default_case, BranchHint default_hint) {
1206#ifdef DEBUG
1207 // Making sure that all cases and {default_case} are different. If we ever
1208 // decide to lift this condition, then AddPredecessor and SplitEdge should
1209 // be updated accordingly.
1210 std::unordered_set<Block*> seen;
1211 seen.insert(default_case);
1212 for (auto switch_case : cases) {
1213 DCHECK_EQ(seen.count(switch_case.destination), 0);
1214 seen.insert(switch_case.destination);
1215 }
1216#endif
1217 Block* saved_current_block = Asm().current_block();
1218 V<None> new_opindex =
1219 Base::ReduceSwitch(input, cases, default_case, default_hint);
1220 for (SwitchOp::Case c : cases) {
1221 Asm().AddPredecessor(saved_current_block, c.destination, true);
1222 }
1223 Asm().AddPredecessor(saved_current_block, default_case, true);
1224 return new_opindex;
1225 }
1226
1230 const TSCallDescriptor* descriptor, OpEffects effects) {
1231 V<Any> raw_call =
1232 Base::ReduceCall(callee, frame_state, arguments, descriptor, effects);
1233 bool has_catch_block = false;
1234 if (descriptor->can_throw == CanThrow::kYes) {
1235 // TODO(nicohartmann@): Unfortunately, we have many descriptors where
1236 // effects are not set consistently with {can_throw}. We should fix those
1237 // and reenable this DCHECK.
1238 // DCHECK(effects.is_required_when_unused());
1239 effects = effects.RequiredWhenUnused();
1240 has_catch_block = CatchIfInCatchScope(raw_call);
1241 }
1242 return ReduceDidntThrow(raw_call, has_catch_block, &descriptor->out_reps,
1243 effects);
1244 }
1245
1247 V<FrameState> frame_state, V<Object> data_argument, V<Context> context,
1249 const FastApiCallParameters* parameters,
1251 OpIndex raw_call = Base::ReduceFastApiCall(
1252 frame_state, data_argument, context, arguments, parameters, out_reps);
1253 bool has_catch_block = CatchIfInCatchScope(raw_call);
1254 return ReduceDidntThrow(raw_call, has_catch_block,
1255 &Asm()
1256 .output_graph()
1257 .Get(raw_call)
1258 .template Cast<FastApiCallOp>()
1259 .out_reps,
1260 OpEffects().CanCallAnything());
1261 }
1262
1263#define REDUCE_THROWING_OP(Name) \
1264 template <typename... Args> \
1265 V<Any> Reduce##Name(Args... args) { \
1266 OpIndex raw_op_index = Base::Reduce##Name(args...); \
1267 bool has_catch_block = CatchIfInCatchScope(raw_op_index); \
1268 const Name##Op& raw_op = \
1269 Asm().output_graph().Get(raw_op_index).template Cast<Name##Op>(); \
1270 return ReduceDidntThrow(raw_op_index, has_catch_block, &raw_op.kOutReps, \
1271 raw_op.Effects()); \
1272 }
1274#undef REDUCE_THROWING_OP
1275
1276 private:
1277 // These reduce functions are private, as they should only be emitted
1278 // automatically by `CatchIfInCatchScope` and `DoNotCatch` defined below and
1279 // never explicitly.
1280 using Base::ReduceDidntThrow;
1281 V<None> REDUCE(CheckException)(V<Any> throwing_operation, Block* successor,
1282 Block* catch_block) {
1283 // {successor} and {catch_block} should never be the same. AddPredecessor
1284 // and SplitEdge rely on this.
1285 DCHECK_NE(successor, catch_block);
1286 Block* saved_current_block = Asm().current_block();
1287 V<None> new_opindex =
1288 Base::ReduceCheckException(throwing_operation, successor, catch_block);
1289 Asm().AddPredecessor(saved_current_block, successor, true);
1290 Asm().AddPredecessor(saved_current_block, catch_block, true);
1291 return new_opindex;
1292 }
1293
1294 bool CatchIfInCatchScope(OpIndex throwing_operation) {
1295 if (Asm().current_catch_block()) {
1296 Block* successor = Asm().NewBlock();
1297 ReduceCheckException(throwing_operation, successor,
1298 Asm().current_catch_block());
1299 Asm().BindReachable(successor);
1300 return true;
1301 }
1302 return false;
1303 }
1304};
1305
1306namespace detail {
1307
1308template <typename LoopLabel, typename Iterable, typename Iterator,
1309 typename ValueTuple, size_t... Indices>
1310auto BuildResultTupleImpl(bool bound, Iterable&& iterable,
1311 LoopLabel&& loop_header, Label<> loop_exit,
1312 Iterator current_iterator, ValueTuple current_values,
1313 std::index_sequence<Indices...>) {
1314 return std::make_tuple(bound, std::forward<Iterable>(iterable),
1315 std::forward<LoopLabel>(loop_header),
1316 std::move(loop_exit), current_iterator,
1317 std::get<Indices>(current_values)...);
1318}
1319
1320template <typename LoopLabel, typename Iterable, typename Iterator,
1321 typename Value>
1322auto BuildResultTuple(bool bound, Iterable&& iterable, LoopLabel&& loop_header,
1323 Label<> loop_exit, Iterator current_iterator,
1324 Value current_value) {
1325 return std::make_tuple(bound, std::forward<Iterable>(iterable),
1326 std::forward<LoopLabel>(loop_header),
1327 std::move(loop_exit), current_iterator, current_value);
1328}
1329
1330template <typename LoopLabel, typename Iterable, typename Iterator,
1331 typename... Values>
1332auto BuildResultTuple(bool bound, Iterable&& iterable, LoopLabel&& loop_header,
1333 Label<> loop_exit, Iterator current_iterator,
1334 std::tuple<Values...> current_values) {
1335 static_assert(std::tuple_size_v<Iterator> == sizeof...(Values));
1336 return BuildResultTupleImpl(bound, std::forward<Iterable>(iterable),
1337 std::forward<LoopLabel>(loop_header),
1338 std::move(loop_exit), std::move(current_iterator),
1339 std::move(current_values),
1340 std::make_index_sequence<sizeof...(Values)>{});
1341}
1342
1343} // namespace detail
1344
1345template <typename Assembler>
1347 public:
1350 assembler_t& Asm() { return *static_cast<assembler_t*>(this); }
1351
1352 // These methods are used by the assembler macros (BIND, BIND_LOOP, GOTO,
1353 // GOTO_IF).
1354 template <typename L>
1357 // LoopLabels need to be bound with `BIND_LOOP` instead of `BIND`.
1358 static_assert(!L::is_loop);
1359 return label.Bind(Asm());
1360 }
1361
1362 template <typename L>
1365 // Only LoopLabels can be bound with `BIND_LOOP`. Otherwise use `BIND`.
1366 static_assert(L::is_loop);
1367 return label.BindLoop(Asm());
1368 }
1369
1370 template <typename L>
1372 static_assert(L::is_loop);
1373 label.EndLoop(Asm());
1374 }
1375
1376 template <ForeachIterable<assembler_t> It>
1377 auto ControlFlowHelper_Foreach(It iterable) {
1378 // We need to take ownership over the `iterable` instance as we need to make
1379 // sure that the `ControlFlowHelper_Foreach` and
1380 // `ControlFlowHelper_EndForeachLoop` functions operate on the same object.
1381 // This can potentially involve copying the `iterable` if it is not moved to
1382 // the `FOREACH` macro. `ForeachIterable`s should be cheap to copy and they
1383 // MUST NOT emit any code in their constructors/destructors.
1384#ifdef DEBUG
1385 OpIndex next_index = Asm().output_graph().next_operation_index();
1386 {
1387 It temp_copy = iterable;
1388 USE(temp_copy);
1389 }
1390 // Make sure we have not emitted any operations.
1391 DCHECK_EQ(next_index, Asm().output_graph().next_operation_index());
1392#endif
1393
1395 Label<> loop_exit(this);
1396
1397 typename It::iterator_type begin = iterable.Begin(Asm());
1398
1399 ControlFlowHelper_Goto(loop_header, {begin});
1400
1401 auto bound_and_current_iterator = loop_header.BindLoop(Asm());
1402 auto [bound] = base::tuple_head<1>(bound_and_current_iterator);
1403 auto current_iterator = detail::unwrap_unary_tuple(
1404 base::tuple_drop<1>(bound_and_current_iterator));
1405 OptionalV<Word32> is_end = iterable.IsEnd(Asm(), current_iterator);
1406 if (is_end.has_value()) {
1407 ControlFlowHelper_GotoIf(is_end.value(), loop_exit, {});
1408 }
1409
1410 typename It::value_type current_value =
1411 iterable.Dereference(Asm(), current_iterator);
1412
1414 bound, std::move(iterable), std::move(loop_header),
1415 std::move(loop_exit), current_iterator, current_value);
1416 }
1417
1418 template <ForeachIterable<assembler_t> It>
1420 It iterable, LoopLabelFor<typename It::iterator_type>& header_label,
1421 Label<>& exit_label, typename It::iterator_type current_iterator) {
1422 typename It::iterator_type next_iterator =
1423 iterable.Advance(Asm(), current_iterator);
1424 ControlFlowHelper_Goto(header_label, {next_iterator});
1425 ControlFlowHelper_EndLoop(header_label);
1426 ControlFlowHelper_Bind(exit_label);
1427 }
1428
1429 std::tuple<bool, LoopLabel<>, Label<>> ControlFlowHelper_While(
1430 std::function<V<Word32>()> cond_builder) {
1431 LoopLabel<> loop_header(this);
1432 Label<> loop_exit(this);
1433
1434 ControlFlowHelper_Goto(loop_header, {});
1435
1436 auto [bound] = loop_header.BindLoop(Asm());
1437 V<Word32> cond = cond_builder();
1438 ControlFlowHelper_GotoIfNot(cond, loop_exit, {});
1439
1440 return std::make_tuple(bound, std::move(loop_header), std::move(loop_exit));
1441 }
1442
1443 template <typename L1, typename L2>
1444 void ControlFlowHelper_EndWhileLoop(L1& header_label, L2& exit_label) {
1445 static_assert(L1::is_loop);
1446 static_assert(!L2::is_loop);
1447 ControlFlowHelper_Goto(header_label, {});
1448 ControlFlowHelper_EndLoop(header_label);
1449 ControlFlowHelper_Bind(exit_label);
1450 }
1451
1452 template <typename L>
1454 const typename L::const_or_values_t& values) {
1455 auto resolved_values = detail::ResolveAll(Asm(), values);
1456 label.Goto(Asm(), resolved_values);
1457 }
1458
1459 template <typename L>
1461 const typename L::const_or_values_t& values) {
1462 auto resolved_values = detail::ResolveAll(Asm(), values);
1463 label.GotoIf(Asm(), condition.condition(), condition.hint(),
1464 resolved_values);
1465 }
1466
1467 template <typename L>
1470 const typename L::const_or_values_t& values) {
1471 auto resolved_values = detail::ResolveAll(Asm(), values);
1472 label.GotoIfNot(Asm(), condition.condition(), condition.hint(),
1473 resolved_values);
1474 }
1475
1480
1483 block_t* then_block = Asm().NewBlock();
1484 state->else_block = Asm().NewBlock();
1485 state->end_block = Asm().NewBlock();
1486 Asm().Branch(condition, then_block, state->else_block);
1487 return Asm().Bind(then_block);
1488 }
1489
1492 block_t* then_block = Asm().NewBlock();
1493 state->else_block = Asm().NewBlock();
1494 state->end_block = Asm().NewBlock();
1495 Asm().Branch(condition, state->else_block, then_block);
1496 return Asm().Bind(then_block);
1497 }
1498
1500 block_t* else_block = state->else_block;
1501 state->else_block = nullptr;
1502 return Asm().Bind(else_block);
1503 }
1504
1506 if (Asm().current_block() == nullptr) return;
1507 Asm().Goto(state->end_block);
1508 }
1509
1511 if (state->else_block) {
1512 if (Asm().Bind(state->else_block)) {
1513 Asm().Goto(state->end_block);
1514 }
1515 }
1516 Asm().Bind(state->end_block);
1517 }
1518};
1519
1520template <typename Assembler>
1522 : public GenericAssemblerOpInterface<Assembler> {
1523 public:
1525
1526 template <typename... Args>
1529 matcher_(Asm().output_graph()) {}
1530
1531 const OperationMatcher& matcher() const { return matcher_; }
1532
1533 // Methods to be used by the reducers to reducer operations with the whole
1534 // reducer stack.
1535
1537 return ReduceIfReachableWord32SignHint(input, sign);
1538 }
1539
1546
1550 LazyDeoptOnThrow lazy_deopt_on_throw) {
1551 return ReduceIfReachableGenericBinop(left, right, frame_state, context,
1552 kind, lazy_deopt_on_throw);
1553 }
1554#define DECL_GENERIC_BINOP(Name) \
1555 V<Object> Generic##Name( \
1556 V<Object> left, V<Object> right, V<turboshaft::FrameState> frame_state, \
1557 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw) { \
1558 return GenericBinop(left, right, frame_state, context, \
1559 GenericBinopOp::Kind::k##Name, lazy_deopt_on_throw); \
1560 }
1562#undef DECL_GENERIC_BINOP
1563
1566 LazyDeoptOnThrow lazy_deopt_on_throw) {
1567 return ReduceIfReachableGenericUnop(input, frame_state, context, kind,
1568 lazy_deopt_on_throw);
1569 }
1570#define DECL_GENERIC_UNOP(Name) \
1571 V<Object> Generic##Name( \
1572 V<Object> input, V<turboshaft::FrameState> frame_state, \
1573 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw) { \
1574 return GenericUnop(input, frame_state, context, \
1575 GenericUnopOp::Kind::k##Name, lazy_deopt_on_throw); \
1576 }
1578#undef DECL_GENERIC_UNOP
1579
1583 LazyDeoptOnThrow lazy_deopt_on_throw) {
1584 return ReduceIfReachableToNumberOrNumeric(input, frame_state, context, kind,
1585 lazy_deopt_on_throw);
1586 }
1588 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw) {
1589 return ToNumberOrNumeric(input, frame_state, context,
1591 lazy_deopt_on_throw);
1592 }
1594 V<Context> context,
1595 LazyDeoptOnThrow lazy_deopt_on_throw) {
1596 return ToNumberOrNumeric(input, frame_state, context,
1598 lazy_deopt_on_throw);
1599 }
1600
1601#define DECL_MULTI_REP_BINOP(name, operation, rep_type, kind) \
1602 OpIndex name(OpIndex left, OpIndex right, rep_type rep) { \
1603 return ReduceIfReachable##operation(left, right, \
1604 operation##Op::Kind::k##kind, rep); \
1605 }
1606
1607#define DECL_MULTI_REP_BINOP_V(name, operation, kind, tag) \
1608 V<tag> name(V<tag> left, V<tag> right, v_traits<tag>::rep_type rep) { \
1609 return ReduceIfReachable##operation(left, right, \
1610 operation##Op::Kind::k##kind, rep); \
1611 }
1612
1613#define DECL_SINGLE_REP_BINOP_V(name, operation, kind, tag) \
1614 V<tag> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
1615 return ReduceIfReachable##operation(resolve(left), resolve(right), \
1616 operation##Op::Kind::k##kind, \
1617 V<tag>::rep); \
1618 }
1619 DECL_MULTI_REP_BINOP_V(WordAdd, WordBinop, Add, Word)
1620 DECL_SINGLE_REP_BINOP_V(Word32Add, WordBinop, Add, Word32)
1621 DECL_SINGLE_REP_BINOP_V(Word64Add, WordBinop, Add, Word64)
1622 DECL_SINGLE_REP_BINOP_V(WordPtrAdd, WordBinop, Add, WordPtr)
1623
1624 DECL_MULTI_REP_BINOP_V(WordMul, WordBinop, Mul, Word)
1625 DECL_SINGLE_REP_BINOP_V(Word32Mul, WordBinop, Mul, Word32)
1626 DECL_SINGLE_REP_BINOP_V(Word64Mul, WordBinop, Mul, Word64)
1627 DECL_SINGLE_REP_BINOP_V(WordPtrMul, WordBinop, Mul, WordPtr)
1628
1629 DECL_MULTI_REP_BINOP_V(WordBitwiseAnd, WordBinop, BitwiseAnd, Word)
1630 DECL_SINGLE_REP_BINOP_V(Word32BitwiseAnd, WordBinop, BitwiseAnd, Word32)
1631 DECL_SINGLE_REP_BINOP_V(Word64BitwiseAnd, WordBinop, BitwiseAnd, Word64)
1632 DECL_SINGLE_REP_BINOP_V(WordPtrBitwiseAnd, WordBinop, BitwiseAnd, WordPtr)
1633
1634 DECL_MULTI_REP_BINOP_V(WordBitwiseOr, WordBinop, BitwiseOr, Word)
1635 DECL_SINGLE_REP_BINOP_V(Word32BitwiseOr, WordBinop, BitwiseOr, Word32)
1636 DECL_SINGLE_REP_BINOP_V(Word64BitwiseOr, WordBinop, BitwiseOr, Word64)
1637 DECL_SINGLE_REP_BINOP_V(WordPtrBitwiseOr, WordBinop, BitwiseOr, WordPtr)
1638
1639 DECL_MULTI_REP_BINOP_V(WordBitwiseXor, WordBinop, BitwiseXor, Word)
1640 DECL_SINGLE_REP_BINOP_V(Word32BitwiseXor, WordBinop, BitwiseXor, Word32)
1641 DECL_SINGLE_REP_BINOP_V(Word64BitwiseXor, WordBinop, BitwiseXor, Word64)
1642
1643 DECL_MULTI_REP_BINOP_V(WordSub, WordBinop, Sub, Word)
1644 DECL_SINGLE_REP_BINOP_V(Word32Sub, WordBinop, Sub, Word32)
1645 DECL_SINGLE_REP_BINOP_V(Word64Sub, WordBinop, Sub, Word64)
1646 DECL_SINGLE_REP_BINOP_V(WordPtrSub, WordBinop, Sub, WordPtr)
1647
1648 DECL_MULTI_REP_BINOP_V(IntDiv, WordBinop, SignedDiv, Word)
1649 DECL_SINGLE_REP_BINOP_V(Int32Div, WordBinop, SignedDiv, Word32)
1650 DECL_SINGLE_REP_BINOP_V(Int64Div, WordBinop, SignedDiv, Word64)
1651 DECL_MULTI_REP_BINOP_V(UintDiv, WordBinop, UnsignedDiv, Word)
1652 DECL_SINGLE_REP_BINOP_V(Uint32Div, WordBinop, UnsignedDiv, Word32)
1653 DECL_SINGLE_REP_BINOP_V(Uint64Div, WordBinop, UnsignedDiv, Word64)
1654 DECL_MULTI_REP_BINOP_V(IntMod, WordBinop, SignedMod, Word)
1655 DECL_SINGLE_REP_BINOP_V(Int32Mod, WordBinop, SignedMod, Word32)
1656 DECL_SINGLE_REP_BINOP_V(Int64Mod, WordBinop, SignedMod, Word64)
1657 DECL_MULTI_REP_BINOP_V(UintMod, WordBinop, UnsignedMod, Word)
1658 DECL_SINGLE_REP_BINOP_V(Uint32Mod, WordBinop, UnsignedMod, Word32)
1659 DECL_SINGLE_REP_BINOP_V(Uint64Mod, WordBinop, UnsignedMod, Word64)
1661 Word)
1662 DECL_SINGLE_REP_BINOP_V(Int32MulOverflownBits, WordBinop,
1664 DECL_SINGLE_REP_BINOP_V(Int64MulOverflownBits, WordBinop,
1666 DECL_MULTI_REP_BINOP_V(UintMulOverflownBits, WordBinop,
1668 DECL_SINGLE_REP_BINOP_V(Uint32MulOverflownBits, WordBinop,
1670 DECL_SINGLE_REP_BINOP_V(Uint64MulOverflownBits, WordBinop,
1672
1674 return Word32BitwiseXor(input, static_cast<uint32_t>(-1));
1675 }
1676
1679 return ReduceIfReachableWordBinop(left, right, kind, rep);
1680 }
1684 return ReduceIfReachableOverflowCheckedBinop(left, right, kind, rep);
1685 }
1686
1687#define DECL_MULTI_REP_CHECK_BINOP_V(name, operation, kind, tag) \
1688 V<turboshaft::Tuple<tag, Word32>> name(V<tag> left, V<tag> right, \
1689 v_traits<tag>::rep_type rep) { \
1690 return ReduceIfReachable##operation(left, right, \
1691 operation##Op::Kind::k##kind, rep); \
1692 }
1693#define DECL_SINGLE_REP_CHECK_BINOP_V(name, operation, kind, tag) \
1694 V<turboshaft::Tuple<tag, Word32>> name(ConstOrV<tag> left, \
1695 ConstOrV<tag> right) { \
1696 return ReduceIfReachable##operation(resolve(left), resolve(right), \
1697 operation##Op::Kind::k##kind, \
1698 V<tag>::rep); \
1699 }
1701 SignedAdd, Word)
1713 SignedMul, Word)
1718#undef DECL_MULTI_REP_CHECK_BINOP_V
1719#undef DECL_SINGLE_REP_CHECK_BINOP_V
1720
1721 DECL_MULTI_REP_BINOP_V(FloatAdd, FloatBinop, Add, Float)
1722 DECL_SINGLE_REP_BINOP_V(Float32Add, FloatBinop, Add, Float32)
1723 DECL_SINGLE_REP_BINOP_V(Float64Add, FloatBinop, Add, Float64)
1724 DECL_MULTI_REP_BINOP_V(FloatMul, FloatBinop, Mul, Float)
1725 DECL_SINGLE_REP_BINOP_V(Float32Mul, FloatBinop, Mul, Float32)
1726 DECL_SINGLE_REP_BINOP_V(Float64Mul, FloatBinop, Mul, Float64)
1727 DECL_MULTI_REP_BINOP_V(FloatSub, FloatBinop, Sub, Float)
1728 DECL_SINGLE_REP_BINOP_V(Float32Sub, FloatBinop, Sub, Float32)
1729 DECL_SINGLE_REP_BINOP_V(Float64Sub, FloatBinop, Sub, Float64)
1730 DECL_MULTI_REP_BINOP_V(FloatDiv, FloatBinop, Div, Float)
1731 DECL_SINGLE_REP_BINOP_V(Float32Div, FloatBinop, Div, Float32)
1732 DECL_SINGLE_REP_BINOP_V(Float64Div, FloatBinop, Div, Float64)
1733 DECL_MULTI_REP_BINOP_V(FloatMin, FloatBinop, Min, Float)
1734 DECL_SINGLE_REP_BINOP_V(Float32Min, FloatBinop, Min, Float32)
1735 DECL_SINGLE_REP_BINOP_V(Float64Min, FloatBinop, Min, Float64)
1736 DECL_MULTI_REP_BINOP_V(FloatMax, FloatBinop, Max, Float)
1737 DECL_SINGLE_REP_BINOP_V(Float32Max, FloatBinop, Max, Float32)
1738 DECL_SINGLE_REP_BINOP_V(Float64Max, FloatBinop, Max, Float64)
1739 DECL_SINGLE_REP_BINOP_V(Float64Mod, FloatBinop, Mod, Float64)
1740 DECL_SINGLE_REP_BINOP_V(Float64Power, FloatBinop, Power, Float64)
1741 DECL_SINGLE_REP_BINOP_V(Float64Atan2, FloatBinop, Atan2, Float64)
1742
1743 V<Word> Shift(V<Word> left, V<Word32> right, ShiftOp::Kind kind,
1745 return ReduceIfReachableShift(left, right, kind, rep);
1746 }
1747
1748#define DECL_SINGLE_REP_SHIFT_V(name, kind, tag) \
1749 V<tag> name(ConstOrV<tag> left, ConstOrV<Word32> right) { \
1750 return ReduceIfReachableShift(resolve(left), resolve(right), \
1751 ShiftOp::Kind::k##kind, V<tag>::rep); \
1752 }
1753
1756 DECL_SINGLE_REP_SHIFT_V(Word32ShiftRightArithmeticShiftOutZeros,
1758 DECL_SINGLE_REP_SHIFT_V(Word64ShiftRightArithmeticShiftOutZeros,
1760 DECL_SINGLE_REP_SHIFT_V(WordPtrShiftRightArithmeticShiftOutZeros,
1765 Word32)
1767 Word64)
1768 DECL_SINGLE_REP_SHIFT_V(WordPtrShiftRightArithmetic, ShiftRightArithmetic,
1769 WordPtr)
1772 DECL_SINGLE_REP_SHIFT_V(Word32ShiftRightLogical, ShiftRightLogical, Word32)
1773 DECL_SINGLE_REP_SHIFT_V(Word64ShiftRightLogical, ShiftRightLogical, Word64)
1774 DECL_SINGLE_REP_SHIFT_V(WordPtrShiftRightLogical, ShiftRightLogical, WordPtr)
1776 DECL_SINGLE_REP_SHIFT_V(Word32ShiftLeft, ShiftLeft, Word32)
1777 DECL_SINGLE_REP_SHIFT_V(Word64ShiftLeft, ShiftLeft, Word64)
1778 DECL_SINGLE_REP_SHIFT_V(WordPtrShiftLeft, ShiftLeft, WordPtr)
1779 DECL_MULTI_REP_BINOP(RotateRight, Shift, WordRepresentation, RotateRight)
1780 DECL_SINGLE_REP_SHIFT_V(Word32RotateRight, RotateRight, Word32)
1781 DECL_SINGLE_REP_SHIFT_V(Word64RotateRight, RotateRight, Word64)
1782 DECL_MULTI_REP_BINOP(RotateLeft, Shift, WordRepresentation, RotateLeft)
1783 DECL_SINGLE_REP_SHIFT_V(Word32RotateLeft, RotateLeft, Word32)
1784 DECL_SINGLE_REP_SHIFT_V(Word64RotateLeft, RotateLeft, Word64)
1785
1786 V<Word> ShiftRightLogical(V<Word> left, uint32_t right,
1788 DCHECK_GE(right, 0);
1789 DCHECK_LT(right, rep.bit_width());
1790 return ShiftRightLogical(left, this->Word32Constant(right), rep);
1791 }
1794 DCHECK_GE(right, 0);
1795 DCHECK_LT(right, rep.bit_width());
1796 return ShiftRightArithmetic(left, this->Word32Constant(right), rep);
1797 }
1799 DCHECK_LT(right, rep.bit_width());
1800 return ShiftLeft(left, this->Word32Constant(right), rep);
1801 }
1802
1806
1808 return Equal(left, right, RegisterRepresentation::Tagged());
1809 }
1810
1812 return __ TaggedEqual(
1813 input, __ HeapConstant(Cast<HeapObject>(isolate->root_handle(root))));
1814 }
1815
1816#define DECL_SINGLE_REP_EQUAL_V(name, tag) \
1817 V<Word32> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
1818 return ReduceIfReachableComparison(resolve(left), resolve(right), \
1819 ComparisonOp::Kind::kEqual, \
1820 V<tag>::rep); \
1821 }
1822 DECL_SINGLE_REP_EQUAL_V(Word32Equal, Word32)
1823 DECL_SINGLE_REP_EQUAL_V(Word64Equal, Word64)
1824 DECL_SINGLE_REP_EQUAL_V(WordPtrEqual, WordPtr)
1825 DECL_SINGLE_REP_EQUAL_V(Float32Equal, Float32)
1826 DECL_SINGLE_REP_EQUAL_V(Float64Equal, Float64)
1827#undef DECL_SINGLE_REP_EQUAL_V
1828
1829#define DECL_SINGLE_REP_COMPARISON_V(name, kind, tag) \
1830 V<Word32> name(ConstOrV<tag> left, ConstOrV<tag> right) { \
1831 return ReduceIfReachableComparison(resolve(left), resolve(right), \
1832 ComparisonOp::Kind::k##kind, \
1833 V<tag>::rep); \
1834 }
1835
1837 SignedLessThan)
1838 DECL_SINGLE_REP_COMPARISON_V(Int32LessThan, SignedLessThan, Word32)
1839 DECL_SINGLE_REP_COMPARISON_V(Int64LessThan, SignedLessThan, Word64)
1840 DECL_SINGLE_REP_COMPARISON_V(IntPtrLessThan, SignedLessThan, WordPtr)
1841
1843 UnsignedLessThan)
1844 DECL_SINGLE_REP_COMPARISON_V(Uint32LessThan, UnsignedLessThan, Word32)
1845 DECL_SINGLE_REP_COMPARISON_V(Uint64LessThan, UnsignedLessThan, Word64)
1846 DECL_SINGLE_REP_COMPARISON_V(UintPtrLessThan, UnsignedLessThan, WordPtr)
1848 SignedLessThan)
1849 DECL_SINGLE_REP_COMPARISON_V(Float32LessThan, SignedLessThan, Float32)
1850 DECL_SINGLE_REP_COMPARISON_V(Float64LessThan, SignedLessThan, Float64)
1851
1855 Word32)
1857 Word64)
1859 WordPtr)
1863 Word32)
1865 Word64)
1867 WordPtr)
1871 Float32)
1873 Float64)
1874#undef DECL_SINGLE_REP_COMPARISON_V
1875
1878 return ReduceIfReachableComparison(left, right, kind, rep);
1879 }
1880
1881#undef DECL_SINGLE_REP_BINOP_V
1882#undef DECL_MULTI_REP_BINOP
1883
1886 return ReduceIfReachableFloatUnary(input, kind, rep);
1887 }
1889 return ReduceIfReachableFloatUnary(input, kind,
1891 }
1892
1893#define DECL_MULTI_REP_UNARY(name, operation, rep_type, kind) \
1894 OpIndex name(OpIndex input, rep_type rep) { \
1895 return ReduceIfReachable##operation(input, operation##Op::Kind::k##kind, \
1896 rep); \
1897 }
1898#define DECL_MULTI_REP_UNARY_V(name, operation, rep_type, kind, tag) \
1899 V<tag> name(V<tag> input, rep_type rep) { \
1900 return ReduceIfReachable##operation(input, operation##Op::Kind::k##kind, \
1901 rep); \
1902 }
1903#define DECL_SINGLE_REP_UNARY_V(name, operation, kind, tag) \
1904 V<tag> name(ConstOrV<tag> input) { \
1905 return ReduceIfReachable##operation( \
1906 resolve(input), operation##Op::Kind::k##kind, V<tag>::rep); \
1907 }
1908
1913 Float)
1914 DECL_SINGLE_REP_UNARY_V(Float32Negate, FloatUnary, Negate, Float32)
1915 DECL_SINGLE_REP_UNARY_V(Float64Negate, FloatUnary, Negate, Float64)
1916 DECL_SINGLE_REP_UNARY_V(Float64SilenceNaN, FloatUnary, SilenceNaN, Float64)
1922 Float)
1932 Float32)
1934 Float64)
1935 DECL_SINGLE_REP_UNARY_V(Float64Log, FloatUnary, Log, Float64)
1937 Float)
1940 DECL_SINGLE_REP_UNARY_V(Float64Exp, FloatUnary, Exp, Float64)
1941 DECL_SINGLE_REP_UNARY_V(Float64Expm1, FloatUnary, Expm1, Float64)
1942 DECL_SINGLE_REP_UNARY_V(Float64Sin, FloatUnary, Sin, Float64)
1943 DECL_SINGLE_REP_UNARY_V(Float64Cos, FloatUnary, Cos, Float64)
1944 DECL_SINGLE_REP_UNARY_V(Float64Sinh, FloatUnary, Sinh, Float64)
1945 DECL_SINGLE_REP_UNARY_V(Float64Cosh, FloatUnary, Cosh, Float64)
1946 DECL_SINGLE_REP_UNARY_V(Float64Asin, FloatUnary, Asin, Float64)
1947 DECL_SINGLE_REP_UNARY_V(Float64Acos, FloatUnary, Acos, Float64)
1948 DECL_SINGLE_REP_UNARY_V(Float64Asinh, FloatUnary, Asinh, Float64)
1949 DECL_SINGLE_REP_UNARY_V(Float64Acosh, FloatUnary, Acosh, Float64)
1950 DECL_SINGLE_REP_UNARY_V(Float64Tan, FloatUnary, Tan, Float64)
1951 DECL_SINGLE_REP_UNARY_V(Float64Tanh, FloatUnary, Tanh, Float64)
1952 DECL_SINGLE_REP_UNARY_V(Float64Log2, FloatUnary, Log2, Float64)
1953 DECL_SINGLE_REP_UNARY_V(Float64Log10, FloatUnary, Log10, Float64)
1954 DECL_SINGLE_REP_UNARY_V(Float64Log1p, FloatUnary, Log1p, Float64)
1955 DECL_SINGLE_REP_UNARY_V(Float64Atan, FloatUnary, Atan, Float64)
1956 DECL_SINGLE_REP_UNARY_V(Float64Atanh, FloatUnary, Atanh, Float64)
1957 DECL_SINGLE_REP_UNARY_V(Float64Cbrt, FloatUnary, Cbrt, Float64)
1958
1966 Word32)
1968 Word64)
1971 DECL_SINGLE_REP_UNARY_V(Word32CountTrailingZeros, WordUnary,
1973 DECL_SINGLE_REP_UNARY_V(Word64CountTrailingZeros, WordUnary,
1976 Word)
1980 SignExtend8, Word)
1981 DECL_SINGLE_REP_UNARY_V(Word32SignExtend8, WordUnary, SignExtend8, Word32)
1982 DECL_SINGLE_REP_UNARY_V(Word64SignExtend8, WordUnary, SignExtend8, Word64)
1987
1989 V<Word> input, OverflowCheckedUnaryOp::Kind kind,
1991 return ReduceIfReachableOverflowCheckedUnary(input, kind, rep);
1992 }
1993
1997 Word32)
1999 Word64)
2000
2001#undef DECL_SINGLE_REP_UNARY_V
2002#undef DECL_MULTI_REP_UNARY
2003#undef DECL_MULTI_REP_UNARY_V
2004
2005 V<Word> WordBinopDeoptOnOverflow(V<Word> left, V<Word> right,
2011 return ReduceIfReachableWordBinopDeoptOnOverflow(left, right, frame_state,
2012 kind, rep, feedback, mode);
2013 }
2014#define DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(operation, rep_type) \
2015 OpIndex rep_type##operation##DeoptOnOverflow( \
2016 ConstOrV<rep_type> left, ConstOrV<rep_type> right, \
2017 V<turboshaft::FrameState> frame_state, FeedbackSource feedback, \
2018 CheckForMinusZeroMode mode = \
2019 CheckForMinusZeroMode::kDontCheckForMinusZero) { \
2020 return WordBinopDeoptOnOverflow( \
2021 resolve(left), resolve(right), frame_state, \
2022 WordBinopDeoptOnOverflowOp::Kind::k##operation, \
2023 WordRepresentation::rep_type(), feedback, mode); \
2024 }
2025
2043#undef DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW
2044
2046 ConstOrV<Word32> low_word32) {
2047 return ReduceIfReachableBitcastWord32PairToFloat64(resolve(high_word32),
2048 resolve(low_word32));
2049 }
2050
2053 return ReduceIfReachableTaggedBitcast(input, from, to, kind);
2054 }
2055
2056#define DECL_TAGGED_BITCAST(FromT, ToT, kind) \
2057 V<ToT> Bitcast##FromT##To##ToT(V<FromT> input) { \
2058 return TaggedBitcast(input, V<FromT>::rep, V<ToT>::rep, \
2059 TaggedBitcastOp::Kind::kind); \
2060 }
2067#undef DECL_TAGGED_BITCAST
2072
2077
2083
2085 ObjectIsOp::InputAssumptions input_assumptions) {
2086 return ReduceIfReachableObjectIs(input, kind, input_assumptions);
2087 }
2088#define DECL_OBJECT_IS(kind) \
2089 V<Word32> ObjectIs##kind(V<Object> object) { \
2090 return ObjectIs(object, ObjectIsOp::Kind::k##kind, \
2091 ObjectIsOp::InputAssumptions::kNone); \
2092 }
2093
2094 DECL_OBJECT_IS(ArrayBufferView)
2095 DECL_OBJECT_IS(BigInt)
2096 DECL_OBJECT_IS(BigInt64)
2097 DECL_OBJECT_IS(Callable)
2098 DECL_OBJECT_IS(Constructor)
2099 DECL_OBJECT_IS(DetectableCallable)
2100 DECL_OBJECT_IS(InternalizedString)
2101 DECL_OBJECT_IS(NonCallable)
2103 DECL_OBJECT_IS(NumberFitsInt32)
2104 DECL_OBJECT_IS(NumberOrBigInt)
2105 DECL_OBJECT_IS(Receiver)
2106 DECL_OBJECT_IS(ReceiverOrNullOrUndefined)
2107 DECL_OBJECT_IS(Smi)
2109 DECL_OBJECT_IS(StringOrStringWrapper)
2110 DECL_OBJECT_IS(Symbol)
2111 DECL_OBJECT_IS(Undetectable)
2112#undef DECL_OBJECT_IS
2113
2115 return ReduceIfReachableFloat64Is(input, kind);
2116 }
2118 return Float64Is(input, NumericKind::kNaN);
2119 }
2123 // Float64IsSmi returns true if {input} is an integer in smi range.
2125 return Float64Is(input, NumericKind::kSmi);
2126 }
2127
2129 FloatRepresentation input_rep) {
2130 return ReduceIfReachableObjectIsNumericValue(input, kind, input_rep);
2131 }
2132
2134 return ReduceIfReachableConvert(input, from, to);
2135 }
2152
2155 RegisterRepresentation input_rep,
2157 CheckForMinusZeroMode minus_zero_mode) {
2158 return ReduceIfReachableConvertUntaggedToJSPrimitive(
2159 input, kind, input_rep, input_interpretation, minus_zero_mode);
2160 }
2161#define CONVERT_PRIMITIVE_TO_OBJECT(name, kind, input_rep, \
2162 input_interpretation) \
2163 V<kind> name(V<input_rep> input) { \
2164 return V<kind>::Cast(ConvertUntaggedToJSPrimitive( \
2165 input, ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::k##kind, \
2166 RegisterRepresentation::input_rep(), \
2167 ConvertUntaggedToJSPrimitiveOp::InputInterpretation:: \
2168 k##input_interpretation, \
2169 CheckForMinusZeroMode::kDontCheckForMinusZero)); \
2170 }
2171 CONVERT_PRIMITIVE_TO_OBJECT(ConvertInt32ToNumber, Number, Word32, Signed)
2172 CONVERT_PRIMITIVE_TO_OBJECT(ConvertUint32ToNumber, Number, Word32, Unsigned)
2173 CONVERT_PRIMITIVE_TO_OBJECT(ConvertIntPtrToNumber, Number, WordPtr, Signed)
2174 CONVERT_PRIMITIVE_TO_OBJECT(ConvertWord32ToBoolean, Boolean, Word32, Signed)
2175 CONVERT_PRIMITIVE_TO_OBJECT(ConvertCharCodeToString, String, Word32, CharCode)
2176#undef CONVERT_PRIMITIVE_TO_OBJECT
2185
2189 RegisterRepresentation input_rep,
2191 input_interpretation,
2192 const FeedbackSource& feedback) {
2193 return ReduceIfReachableConvertUntaggedToJSPrimitiveOrDeopt(
2194 input, frame_state, kind, input_rep, input_interpretation, feedback);
2195 }
2196
2198 V<JSPrimitive> primitive,
2201 return ReduceIfReachableConvertJSPrimitiveToUntagged(primitive, kind,
2202 input_assumptions);
2203 }
2204
2209 CheckForMinusZeroMode minus_zero_mode, const FeedbackSource& feedback) {
2210 return ReduceIfReachableConvertJSPrimitiveToUntaggedOrDeopt(
2211 object, frame_state, from_kind, to_kind, minus_zero_mode, feedback);
2212 }
2222
2226 return ReduceIfReachableTruncateJSPrimitiveToUntagged(object, kind,
2227 input_assumptions);
2228 }
2229
2235
2240 input_requirement,
2241 const FeedbackSource& feedback) {
2242 return ReduceIfReachableTruncateJSPrimitiveToUntaggedOrDeopt(
2243 object, frame_state, kind, input_requirement, feedback);
2244 }
2245
2248 V<JSGlobalProxy> global_proxy,
2249 ConvertReceiverMode mode) {
2250 return ReduceIfReachableConvertJSPrimitiveToObject(value, native_context,
2251 global_proxy, mode);
2252 }
2253
2254 V<Word32> Word32Constant(uint32_t value) {
2255 return ReduceIfReachableConstant(ConstantOp::Kind::kWord32,
2256 uint64_t{value});
2257 }
2258 V<Word32> Word32Constant(int32_t value) {
2259 return Word32Constant(static_cast<uint32_t>(value));
2260 }
2261 V<Word64> Word64Constant(uint64_t value) {
2262 return ReduceIfReachableConstant(ConstantOp::Kind::kWord64, value);
2263 }
2264 V<Word64> Word64Constant(int64_t value) {
2265 return Word64Constant(static_cast<uint64_t>(value));
2266 }
2271 switch (rep.value()) {
2273 return Word32Constant(static_cast<uint32_t>(value));
2275 return Word64Constant(value);
2276 }
2277 }
2278 V<WordPtr> IntPtrConstant(intptr_t value) {
2279 return UintPtrConstant(static_cast<uintptr_t>(value));
2280 }
2281 V<WordPtr> UintPtrConstant(uintptr_t value) { return WordPtrConstant(value); }
2283 return V<Smi>::Cast(
2284 ReduceIfReachableConstant(ConstantOp::Kind::kSmi, value));
2285 }
2288 return ReduceIfReachableConstant(ConstantOp::Kind::kFloat32, value);
2289 }
2291 // Passing the NaN Hole as input is allowed, but there is no guarantee that
2292 // it will remain a hole (it will remain NaN though).
2293 if (std::isnan(value)) {
2294 return Float32Constant(
2295 i::Float32::FromBits(base::bit_cast<uint32_t>(value)));
2296 } else {
2297 return Float32Constant(i::Float32(value));
2298 }
2299 }
2301 return ReduceIfReachableConstant(ConstantOp::Kind::kFloat64, value);
2302 }
2304 // Passing the NaN Hole as input is allowed, but there is no guarantee that
2305 // it will remain a hole (it will remain NaN though).
2306 if (std::isnan(value)) {
2307 return Float64Constant(
2308 i::Float64::FromBits(base::bit_cast<uint64_t>(value)));
2309 } else {
2310 return Float64Constant(i::Float64(value));
2311 }
2312 }
2314 // Passing the NaN Hole as input is allowed, but there is no guarantee that
2315 // it will remain a hole (it will remain NaN though).
2316 switch (rep.value()) {
2318 return Float32Constant(static_cast<float>(value));
2320 return Float64Constant(value);
2321 }
2322 }
2324 return ReduceIfReachableConstant(ConstantOp::Kind::kNumber, value);
2325 }
2326 OpIndex NumberConstant(double value) {
2327 // Passing the NaN Hole as input is allowed, but there is no guarantee that
2328 // it will remain a hole (it will remain NaN though).
2329 if (std::isnan(value)) {
2330 return NumberConstant(
2331 i::Float64::FromBits(base::bit_cast<uint64_t>(value)));
2332 } else {
2333 return NumberConstant(i::Float64(value));
2334 }
2335 }
2337 return ReduceIfReachableConstant(ConstantOp::Kind::kTaggedIndex,
2338 uint64_t{static_cast<uint32_t>(value)});
2339 }
2340 // TODO(nicohartmann): Maybe we should replace all uses of `HeapConstant` with
2341 // `HeapConstant[No|Maybe]?Hole` version.
2342 template <typename T>
2345 {
2346 return ReduceIfReachableConstant(ConstantOp::Kind::kHeapObject,
2347 ConstantOp::Storage{value});
2348 }
2349 template <typename T>
2352 {
2353 return __ HeapConstant(value);
2354 }
2355 template <typename T>
2358 {
2359 CHECK(!IsAnyHole(*value));
2360 return __ HeapConstant(value);
2361 }
2363 DCHECK(IsAnyHole(*value));
2364 return __ HeapConstant(value);
2365 }
2366 V<Code> BuiltinCode(Builtin builtin, Isolate* isolate) {
2367 return HeapConstant(BuiltinCodeHandle(builtin, isolate));
2368 }
2370 return ReduceIfReachableConstant(ConstantOp::Kind::kHeapObject, value);
2371 }
2373 DCHECK(IsTrustedObject(*value));
2374 return ReduceIfReachableConstant(ConstantOp::Kind::kTrustedHeapObject,
2375 value);
2376 }
2378 return ReduceIfReachableConstant(ConstantOp::Kind::kExternal, value);
2379 }
2385 return ReduceIfReachableConstant(
2386 mode == RelocInfo::WASM_CALL
2389 static_cast<uint64_t>(value));
2390 }
2391
2393 return RelocatableConstant(static_cast<int64_t>(builtin),
2395 }
2396
2398 return ReduceIfReachableConstant(
2400 static_cast<uint64_t>(canonical_id));
2401 }
2402
2404 return ReduceIfReachableConstant(
2406 }
2407
2411
2412 // TODO(nicohartmann@): Might want to get rid of the isolate when supporting
2413 // Wasm.
2414 V<Code> CEntryStubConstant(Isolate* isolate, int result_size,
2415 ArgvMode argv_mode = ArgvMode::kStack,
2416 bool builtin_exit_frame = false) {
2417 if (argv_mode != ArgvMode::kStack) {
2418 return HeapConstant(CodeFactory::CEntry(isolate, result_size, argv_mode,
2419 builtin_exit_frame));
2420 }
2421
2422 DCHECK(result_size >= 1 && result_size <= 3);
2423 DCHECK_IMPLIES(builtin_exit_frame, result_size == 1);
2424 const int index = builtin_exit_frame ? 0 : result_size;
2425 if (cached_centry_stub_constants_[index].is_null()) {
2427 isolate, result_size, argv_mode, builtin_exit_frame);
2428 }
2429 return HeapConstant(cached_centry_stub_constants_[index].ToHandleChecked());
2430 }
2431
2432#define DECL_CHANGE_V(name, kind, assumption, from, to) \
2433 V<to> name(ConstOrV<from> input) { \
2434 return ReduceIfReachableChange(resolve(input), ChangeOp::Kind::kind, \
2435 ChangeOp::Assumption::assumption, \
2436 V<from>::rep, V<to>::rep); \
2437 }
2438#define DECL_TRY_CHANGE_V(name, kind, from, to) \
2439 V<turboshaft::Tuple<to, Word32>> name(V<from> input) { \
2440 return ReduceIfReachableTryChange(input, TryChangeOp::Kind::kind, \
2441 V<from>::rep, V<to>::rep); \
2442 }
2443
2444 DECL_CHANGE_V(BitcastWord32ToWord64, kBitcast, kNoAssumption, Word32, Word64)
2445 DECL_CHANGE_V(BitcastFloat32ToWord32, kBitcast, kNoAssumption, Float32,
2446 Word32)
2447 DECL_CHANGE_V(BitcastWord32ToFloat32, kBitcast, kNoAssumption, Word32,
2448 Float32)
2449 DECL_CHANGE_V(BitcastFloat64ToWord64, kBitcast, kNoAssumption, Float64,
2450 Word64)
2451 DECL_CHANGE_V(BitcastWord64ToFloat64, kBitcast, kNoAssumption, Word64,
2452 Float64)
2453 DECL_CHANGE_V(ChangeUint32ToUint64, kZeroExtend, kNoAssumption, Word32,
2454 Word64)
2455 DECL_CHANGE_V(ChangeInt32ToInt64, kSignExtend, kNoAssumption, Word32, Word64)
2457 Float64)
2459 Float64)
2461 Float32)
2463 Float32)
2465 Float32)
2467 Float32)
2468 DECL_CHANGE_V(ReversibleInt64ToFloat64, kSignedToFloat, kReversible, Word64,
2469 Float64)
2471 Float64)
2472 DECL_CHANGE_V(ReversibleUint64ToFloat64, kUnsignedToFloat, kReversible,
2475 Float64)
2477 Float64)
2482 DECL_CHANGE_V(JSTruncateFloat64ToWord32, kJSFloatTruncate, kNoAssumption,
2483 Float64, Word32)
2484 DECL_CHANGE_V(TruncateWord64ToWord32, kTruncate, kNoAssumption, Word64,
2485 Word32)
2487 if (rep == WordRepresentation::Word32()) return value;
2489 return ChangeUint32ToUint64(value);
2490 }
2492 if constexpr (Is64()) {
2493 return TruncateWord64ToWord32(input);
2494 } else {
2495 DCHECK_EQ(WordPtr::bits, Word32::bits);
2496 return V<Word32>::Cast(resolve(input));
2497 }
2498 }
2500 if constexpr (Is64()) {
2501 return ChangeInt32ToInt64(input);
2502 } else {
2503 DCHECK_EQ(WordPtr::bits, Word32::bits);
2504 return V<WordPtr>::Cast(input);
2505 }
2506 }
2508 if constexpr (Is64()) {
2509 return ChangeUint32ToUint64(input);
2510 } else {
2511 DCHECK_EQ(WordPtr::bits, Word32::bits);
2512 return V<WordPtr>::Cast(input);
2513 }
2514 }
2515
2517 if constexpr (Is64()) {
2518 DCHECK_EQ(WordPtr::bits, Word64::bits);
2519 return V<Word64>::Cast(input);
2520 } else {
2521 return ChangeInt32ToInt64(input);
2522 }
2523 }
2524
2526 if constexpr (Is64()) {
2527 DCHECK_EQ(WordPtr::bits, Word64::bits);
2528 return V<Word64>::Cast(input);
2529 } else {
2530 return ChangeUint32ToUint64(input);
2531 }
2532 }
2533
2535 if constexpr (COMPRESS_POINTERS_BOOL) {
2536 return Word32Equal(Word32BitwiseAnd(V<Word32>::Cast(object), kSmiTagMask),
2537 kSmiTag);
2538 } else {
2539 return WordPtrEqual(
2540 WordPtrBitwiseAnd(V<WordPtr>::Cast(object), kSmiTagMask), kSmiTag);
2541 }
2542 }
2543
2544#define DECL_SIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits) \
2545 DECL_CHANGE_V( \
2546 TruncateFloat##FloatBits##ToInt##ResultBits##OverflowUndefined, \
2547 kSignedFloatTruncateOverflowToMin, kNoOverflow, Float##FloatBits, \
2548 Word##ResultBits) \
2549 DECL_TRY_CHANGE_V(TryTruncateFloat##FloatBits##ToInt##ResultBits, \
2550 kSignedFloatTruncateOverflowUndefined, Float##FloatBits, \
2551 Word##ResultBits)
2552
2557#undef DECL_SIGNED_FLOAT_TRUNCATE
2558 DECL_CHANGE_V(TruncateFloat64ToInt64OverflowToMin,
2560 Word64)
2561 DECL_CHANGE_V(TruncateFloat32ToInt32OverflowToMin,
2563 Word32)
2564
2565#define DECL_UNSIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits) \
2566 DECL_CHANGE_V( \
2567 TruncateFloat##FloatBits##ToUint##ResultBits##OverflowUndefined, \
2568 kUnsignedFloatTruncateOverflowToMin, kNoOverflow, Float##FloatBits, \
2569 Word##ResultBits) \
2570 DECL_CHANGE_V(TruncateFloat##FloatBits##ToUint##ResultBits##OverflowToMin, \
2571 kUnsignedFloatTruncateOverflowToMin, kNoAssumption, \
2572 Float##FloatBits, Word##ResultBits) \
2573 DECL_TRY_CHANGE_V(TryTruncateFloat##FloatBits##ToUint##ResultBits, \
2574 kUnsignedFloatTruncateOverflowUndefined, Float##FloatBits, \
2575 Word##ResultBits)
2576
2581#undef DECL_UNSIGNED_FLOAT_TRUNCATE
2582
2591 DECL_CHANGE_V(Float64ExtractLowWord32, kExtractLowHalf, kNoAssumption,
2592 Float64, Word32)
2595#undef DECL_CHANGE_V
2596#undef DECL_TRY_CHANGE_V
2597
2601 CheckForMinusZeroMode minus_zero_mode,
2602 const FeedbackSource& feedback) {
2603 return ReduceIfReachableChangeOrDeopt(input, frame_state, kind,
2604 minus_zero_mode, feedback);
2605 }
2606
2609 CheckForMinusZeroMode minus_zero_mode,
2610 const FeedbackSource& feedback) {
2613 minus_zero_mode, feedback));
2614 }
2617 CheckForMinusZeroMode minus_zero_mode,
2618 const FeedbackSource& feedback) {
2621 minus_zero_mode, feedback));
2622 }
2633 CheckForMinusZeroMode minus_zero_mode,
2634 const FeedbackSource& feedback) {
2637 minus_zero_mode, feedback));
2638 }
2639
2641 constexpr int kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
2642 // Do shift on 32bit values if Smis are stored in the lower word.
2643 if constexpr (Is64() && SmiValuesAre31Bits()) {
2644 V<Word32> shifted = Word32ShiftLeft(resolve(input), kSmiShiftBits);
2645 // In pointer compression, we smi-corrupt. Then, the upper bits are not
2646 // important.
2647 return BitcastWord32ToSmi(shifted);
2648 } else {
2649 return BitcastWordPtrToSmi(
2650 WordPtrShiftLeft(ChangeInt32ToIntPtr(resolve(input)), kSmiShiftBits));
2651 }
2652 }
2653
2655 constexpr int kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
2656 if constexpr (Is64() && SmiValuesAre31Bits()) {
2657 return Word32ShiftRightArithmeticShiftOutZeros(BitcastSmiToWord32(input),
2658 kSmiShiftBits);
2659 }
2660 return TruncateWordPtrToWord32(WordPtrShiftRightArithmeticShiftOutZeros(
2661 BitcastSmiToWordPtr(input), kSmiShiftBits));
2662 }
2663
2665 AtomicRMWOp::BinOp bin_op,
2666 RegisterRepresentation in_out_rep,
2667 MemoryRepresentation memory_rep,
2668 MemoryAccessKind memory_access_kind) {
2669 DCHECK_NE(bin_op, AtomicRMWOp::BinOp::kCompareExchange);
2670 return ReduceIfReachableAtomicRMW(base, index, value, OpIndex::Invalid(),
2671 bin_op, in_out_rep, memory_rep,
2672 memory_access_kind);
2673 }
2674
2676 OpIndex expected, OpIndex new_value,
2677 RegisterRepresentation result_rep,
2678 MemoryRepresentation input_rep,
2679 MemoryAccessKind memory_access_kind) {
2680 return ReduceIfReachableAtomicRMW(
2681 base, index, new_value, expected, AtomicRMWOp::BinOp::kCompareExchange,
2682 result_rep, input_rep, memory_access_kind);
2683 }
2684
2686 OptionalV<Word32> value_low,
2687 OptionalV<Word32> value_high,
2688 OptionalV<Word32> expected_low,
2689 OptionalV<Word32> expected_high,
2690 AtomicWord32PairOp::Kind op_kind, int32_t offset) {
2691 return ReduceIfReachableAtomicWord32Pair(base, index, value_low, value_high,
2692 expected_low, expected_high,
2693 op_kind, offset);
2694 }
2695
2697 int32_t offset) {
2698 return AtomicWord32Pair(base, index, {}, {}, {}, {},
2700 }
2702 V<Word32> value_low, V<Word32> value_high,
2703 int32_t offset) {
2704 return AtomicWord32Pair(base, index, value_low, value_high, {}, {},
2706 }
2708 V<WordPtr> base, OptionalV<WordPtr> index, V<Word32> value_low,
2709 V<Word32> value_high, V<Word32> expected_low, V<Word32> expected_high,
2710 int32_t offset = 0) {
2711 return AtomicWord32Pair(base, index, value_low, value_high, expected_low,
2712 expected_high,
2714 }
2716 V<Word32> value_low, V<Word32> value_high,
2717 AtomicRMWOp::BinOp bin_op, int32_t offset = 0) {
2718 return AtomicWord32Pair(base, index, value_low, value_high, {}, {},
2720 }
2721
2723 return ReduceIfReachableMemoryBarrier(memory_order);
2724 }
2725
2727 MemoryRepresentation loaded_rep,
2728 RegisterRepresentation result_rep, int32_t offset = 0,
2729 uint8_t element_size_log2 = 0) {
2730 return ReduceIfReachableLoad(base, index, kind, loaded_rep, result_rep,
2731 offset, element_size_log2);
2732 }
2733
2735 MemoryRepresentation loaded_rep, int32_t offset = 0,
2736 uint8_t element_size_log2 = 0) {
2737 return Load(base, index, kind, loaded_rep,
2738 loaded_rep.ToRegisterRepresentation(), offset,
2739 element_size_log2);
2740 }
2742 int32_t offset = 0) {
2743 return Load(base, OpIndex::Invalid(), kind, loaded_rep, offset);
2744 }
2746 return LoadOffHeap(address, 0, rep);
2747 }
2750 return Load(address, LoadOp::Kind::RawAligned(), rep, offset);
2751 }
2754 return Load(address, index, LoadOp::Kind::RawAligned(), rep, offset,
2755 rep.SizeInBytesLog2());
2756 }
2757
2758 // Load a protected (trusted -> trusted) pointer field. The read value is
2759 // either a Smi or a TrustedObject.
2761 V<Object> base, OptionalV<WordPtr> index,
2763 int element_size_log2 = kTaggedSizeLog2) {
2764 return Load(base, index, kind,
2768 offset, index.valid() ? element_size_log2 : 0);
2769 }
2770
2771 // Load a protected (trusted -> trusted) pointer field. The read value is
2772 // either a Smi or a TrustedObject.
2777
2778 // Load a trusted (indirect) pointer. Returns Smi or ExposedTrustedObject.
2781 int offset = 0) {
2782#if V8_ENABLE_SANDBOX
2783 static_assert(COMPRESS_POINTERS_BOOL);
2786 V<Word32> table_index =
2787 Word32ShiftRightLogical(handle, kTrustedPointerHandleShift);
2788 V<Word64> table_offset = __ ChangeUint32ToUint64(
2789 Word32ShiftLeft(table_index, kTrustedPointerTableEntrySizeLog2));
2790 V<WordPtr> table =
2793 IsolateData::trusted_pointer_table_offset() +
2795 V<WordPtr> decoded_ptr =
2796 Load(table, table_offset, LoadOp::Kind::RawAligned(),
2798
2799 // Untag the pointer and remove the marking bit in one operation.
2800 decoded_ptr =
2801 __ Word64BitwiseAnd(decoded_ptr, ~(tag | kTrustedPointerTableMarkBit));
2802
2803 // Bitcast to tagged to this gets scanned by the GC properly.
2804 return BitcastWordPtrToTagged(decoded_ptr);
2805#else
2807 offset);
2808#endif // V8_ENABLE_SANDBOX
2809 }
2810
2811 // Load a trusted (indirect) pointer. Returns Smi or ExposedTrustedObject.
2816
2818 ExternalPointerTag tag) {
2819#ifdef V8_ENABLE_SANDBOX
2822 return __ DecodeExternalPointer(handle, tag);
2823#else
2824 return __ Load(object, LoadOp::Kind::TaggedBase(),
2826#endif // V8_ENABLE_SANDBOX
2827 }
2828
2839
2854
2860
2867
2868 void Store(
2869 OpIndex base, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind,
2870 MemoryRepresentation stored_rep, WriteBarrierKind write_barrier,
2871 int32_t offset = 0, uint8_t element_size_log2 = 0,
2872 bool maybe_initializing_or_transitioning = false,
2873 IndirectPointerTag maybe_indirect_pointer_tag = kIndirectPointerNullTag) {
2874 ReduceIfReachableStore(base, index, value, kind, stored_rep, write_barrier,
2875 offset, element_size_log2,
2876 maybe_initializing_or_transitioning,
2877 maybe_indirect_pointer_tag);
2878 }
2879 void Store(
2880 OpIndex base, OpIndex value, StoreOp::Kind kind,
2881 MemoryRepresentation stored_rep, WriteBarrierKind write_barrier,
2882 int32_t offset = 0, bool maybe_initializing_or_transitioning = false,
2883 IndirectPointerTag maybe_indirect_pointer_tag = kIndirectPointerNullTag) {
2884 Store(base, OpIndex::Invalid(), value, kind, stored_rep, write_barrier,
2885 offset, 0, maybe_initializing_or_transitioning,
2886 maybe_indirect_pointer_tag);
2887 }
2888
2889 template <typename T>
2891 MemoryRepresentation stored_rep,
2892 WriteBarrierKind write_barrier, int32_t offset = 0) {
2893 return Store(object.object(), value,
2895 stored_rep, write_barrier, offset, true);
2896 }
2897
2899 int32_t offset = 0) {
2900 Store(address, value, StoreOp::Kind::RawAligned(), rep,
2902 }
2903 void StoreOffHeap(OpIndex address, OptionalOpIndex index, OpIndex value,
2904 MemoryRepresentation rep, int32_t offset) {
2905 Store(address, index, value, StoreOp::Kind::RawAligned(), rep,
2906 WriteBarrierKind::kNoWriteBarrier, offset, rep.SizeInBytesLog2());
2907 }
2908
2909 template <typename Rep = Any>
2911 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
2912 return LoadFieldImpl<Rep>(object, access);
2913 }
2914
2915 template <typename Rep = Any>
2917 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase);
2918 return LoadFieldImpl<Rep>(raw_base, access);
2919 }
2920
2921 template <typename Obj, typename Class, typename T>
2924 implicitly_constructible_from<Obj>::value {
2925 return LoadFieldImpl<T>(object, field);
2926 }
2927
2928 template <typename Rep>
2929 V<Rep> LoadFieldImpl(OpIndex object, const compiler::FieldAccess& access) {
2930 MachineType machine_type = access.machine_type;
2931 if (machine_type.IsMapWord()) {
2932 machine_type = MachineType::TaggedPointer();
2933#ifdef V8_MAP_PACKING
2934 UNIMPLEMENTED();
2935#endif
2936 }
2939#ifdef V8_ENABLE_SANDBOX
2940 bool is_sandboxed_external =
2941 access.type.Is(compiler::Type::ExternalPointer());
2942 if (is_sandboxed_external) {
2943 // Fields for sandboxed external pointer contain a 32-bit handle, not a
2944 // 64-bit raw pointer.
2946 }
2947#endif // V8_ENABLE_SANDBOX
2948 LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
2949 if (access.is_immutable) {
2950 kind = kind.Immutable();
2951 }
2952 V<Rep> value = Load(object, kind, rep, access.offset);
2953#ifdef V8_ENABLE_SANDBOX
2954 if (is_sandboxed_external) {
2955 value = DecodeExternalPointer(value, access.external_pointer_tag);
2956 }
2957 if (access.is_bounded_size_access) {
2958 DCHECK(!is_sandboxed_external);
2960 kBoundedSizeShift,
2962 }
2963#endif // V8_ENABLE_SANDBOX
2964 return value;
2965 }
2966
2967 // Helpers to read the most common fields.
2968 // TODO(nicohartmann@): Strengthen this to `V<HeapObject>`.
2970 return LoadField<Map>(object, AccessBuilder::ForMap());
2971 }
2972
2976
2978 return Word32Equal(LoadInstanceTypeField(LoadMapField(object)),
2979 Word32Constant(instance_type));
2980 }
2981
2984 heap_number, AccessBuilderTS::ForHeapNumberValue());
2985 }
2986
2989 heap_number, AccessBuilderTS::ForHeapInt32Value());
2990 }
2991
2992 template <typename Type = Object>
2993 V<Type> LoadTaggedField(V<Object> object, int field_offset)
2995 {
2996 return Load(object, LoadOp::Kind::TaggedBase(),
2997 MemoryRepresentation::AnyTagged(), field_offset);
2998 }
2999
3000 template <typename Base>
3001 void StoreField(V<Base> object, const FieldAccess& access, V<Any> value) {
3002 StoreFieldImpl(object, access, value,
3003 access.maybe_initializing_or_transitioning_store);
3004 }
3005
3006 template <typename Object, typename Class, typename T>
3008 const FieldAccessTS<Class, T>& access,
3009 maybe_const_or_v_t<T> value) {
3010 static_assert(is_subtype_v<Object, Class>);
3011 StoreFieldImpl(object.object(), access, resolve(value), true);
3012 }
3013
3014 // TODO(nicohartmann): Remove `InitializeField` once fully transitioned to
3015 // `FieldAccess`.
3016 template <typename T>
3017 void InitializeField(Uninitialized<T>& object, const FieldAccess& access,
3018 V<Any> value) {
3019 StoreFieldImpl(object.object(), access, value, true);
3020 }
3021
3022 template <typename Base>
3023 void StoreFieldImpl(V<Base> object, const FieldAccess& access, V<Any> value,
3024 bool maybe_initializing_or_transitioning) {
3025 if constexpr (is_taggable_v<Base>) {
3026 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
3027 } else {
3028 static_assert(std::is_same_v<Base, WordPtr>);
3029 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase);
3030 }
3031 // External pointer must never be stored by optimized code.
3032 DCHECK(!access.type.Is(compiler::Type::ExternalPointer()) ||
3034 // SandboxedPointers are not currently stored by optimized code.
3035 DCHECK(!access.type.Is(compiler::Type::SandboxedPointer()));
3036
3037#ifdef V8_ENABLE_SANDBOX
3038 if (access.is_bounded_size_access) {
3039 value = ShiftLeft(V<WordPtr>::Cast(value), kBoundedSizeShift,
3041 }
3042#endif // V8_ENABLE_SANDBOX
3043
3044 StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
3045 MachineType machine_type = access.machine_type;
3046 if (machine_type.IsMapWord()) {
3047 machine_type = MachineType::TaggedPointer();
3048#ifdef V8_MAP_PACKING
3049 UNIMPLEMENTED();
3050#endif
3051 }
3054 Store(object, value, kind, rep, access.write_barrier_kind, access.offset,
3055 maybe_initializing_or_transitioning);
3056 }
3057
3058 void StoreFixedArrayElement(V<FixedArray> array, int index, V<Object> value,
3059 compiler::WriteBarrierKind write_barrier) {
3060 Store(array, value, LoadOp::Kind::TaggedBase(),
3061 MemoryRepresentation::AnyTagged(), write_barrier,
3063 }
3064
3066 V<Object> value,
3067 compiler::WriteBarrierKind write_barrier) {
3068 Store(array, index, value, LoadOp::Kind::TaggedBase(),
3069 MemoryRepresentation::AnyTagged(), write_barrier,
3071 }
3081
3082 template <typename Class, typename T>
3084 V<WordPtr> index) {
3085 return LoadElement<T>(object, access, index, access.is_array_buffer_load);
3086 }
3087
3088 // TODO(nicohartmann): Remove `LoadArrayBufferElement` once fully transitioned
3089 // to `ElementAccess`.
3090 template <typename T = Any, typename Base>
3092 V<WordPtr> index) {
3093 return LoadElement<T>(object, access, index, true);
3094 }
3095 // TODO(nicohartmann): Remove `LoadNonArrayBufferElement` once fully
3096 // transitioned to `ElementAccess`.
3097 template <typename T = Any, typename Base>
3099 V<WordPtr> index) {
3100 return LoadElement<T>(object, access, index, false);
3101 }
3102 template <typename Base>
3104 const ElementAccess& access) {
3105 return WordPtrAdd(BitcastHeapObjectToWordPtr(object),
3106 access.header_size - access.tag());
3107 }
3108
3109 template <typename Base>
3111 V<WordPtr> index, V<Any> value) {
3112 return StoreElement(object, access, index, value, true);
3113 }
3114 template <typename Base>
3116 V<WordPtr> index, V<Any> value) {
3117 return StoreElement(object, access, index, value, false);
3118 }
3119
3120 template <typename Class, typename T>
3122 ConstOrV<WordPtr> index, V<T> value) {
3123 StoreElement(object, access, index, value, access.is_array_buffer_load);
3124 }
3125
3126 template <typename Class, typename T>
3128 const ElementAccessTS<Class, T>& access,
3129 ConstOrV<WordPtr> index, V<T> value) {
3130 StoreElement(object.object(), access, index, value,
3131 access.is_array_buffer_load);
3132 }
3133
3134 // TODO(nicohartmann): Remove `InitializeArrayBufferElement` once fully
3135 // transitioned to `ElementAccess`.
3136 template <typename Base>
3138 const ElementAccess& access,
3139 V<WordPtr> index, V<Any> value) {
3140 StoreArrayBufferElement(object.object(), access, index, value);
3141 }
3142 // TODO(nicohartmann): Remove `InitializeNoneArrayBufferElement` once fully
3143 // transitioned to `ElementAccess`.
3144 template <typename Base>
3146 const ElementAccess& access,
3147 V<WordPtr> index, V<Any> value) {
3148 StoreNonArrayBufferElement(object.object(), access, index, value);
3149 }
3150
3152 V<HeapObject> buffer = __ template LoadField<HeapObject>(
3154 V<Word32> bitfield = __ template LoadField<Word32>(
3156 return __ Word32BitwiseAnd(bitfield, JSArrayBuffer::WasDetachedBit::kMask);
3157 }
3158
3159 template <typename T = HeapObject>
3161 static_assert(is_subtype_v<T, HeapObject>);
3164 return Uninitialized<T>{ReduceIfReachableAllocate(resolve(size), type)};
3165 }
3166
3167 template <typename T>
3171 return uninitialized.ReleaseObject();
3172 }
3173
3183
3185 return ReduceIfReachableDecodeExternalPointer(handle, tag);
3186 }
3187
3188#if V8_ENABLE_WEBASSEMBLY
3189 void WasmStackCheck(WasmStackCheckOp::Kind kind) {
3190 ReduceIfReachableWasmStackCheck(kind);
3191 }
3192#endif
3193
3197 ReduceIfReachableJSStackCheck(context, frame_state, kind);
3198 }
3199
3208
3209 void Retain(V<Object> value) { ReduceIfReachableRetain(value); }
3210
3212 return ReduceIfReachableStackPointerGreaterThan(limit, kind);
3213 }
3214
3216 return ReduceIfReachableFrameConstant(
3218 }
3220 return ReduceIfReachableFrameConstant(FrameConstantOp::Kind::kFramePointer);
3221 }
3223 return ReduceIfReachableFrameConstant(
3225 }
3226
3227 V<WordPtr> StackSlot(int size, int alignment, bool is_tagged = false) {
3228 return ReduceIfReachableStackSlot(size, alignment, is_tagged);
3229 }
3230
3232#ifdef V8_ENABLE_DIRECT_HANDLE
3233 // With direct locals, the argument can be passed directly.
3234 return BitcastTaggedToWordPtr(argument);
3235#else
3236 // With indirect locals, the argument has to be stored on the stack and the
3237 // slot address is passed.
3238 V<WordPtr> stack_slot =
3239 StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true);
3240 StoreOffHeap(stack_slot, __ BitcastTaggedToWordPtr(argument),
3242 return stack_slot;
3243#endif
3244 }
3245
3246 OpIndex LoadRootRegister() { return ReduceIfReachableLoadRootRegister(); }
3247
3248 template <typename T = Any, typename U = T>
3251 BranchHint hint,
3252 SelectOp::Implementation implem) {
3253 return ReduceIfReachableSelect(resolve(cond), vtrue, vfalse, rep, hint,
3254 implem);
3255 }
3256
3257 // TODO(chromium:331100916): remove this overload once Turboshaft has been
3258 // entirely V<>ified.
3261 SelectOp::Implementation implem) {
3262 return Select(cond, V<Any>::Cast(vtrue), V<Any>::Cast(vfalse), rep, hint,
3263 implem);
3264 }
3265
3266#define DEF_SELECT(Rep) \
3267 V<Rep> Rep##Select(ConstOrV<Word32> cond, ConstOrV<Rep> vtrue, \
3268 ConstOrV<Rep> vfalse) { \
3269 return Select<Rep>(resolve(cond), resolve(vtrue), resolve(vfalse), \
3270 RegisterRepresentation::Rep(), BranchHint::kNone, \
3271 SelectOp::Implementation::kCMove); \
3272 }
3278#undef DEF_SELECT
3279
3280 template <typename T, typename U>
3282 V<U> vfalse,
3284 return Select(resolve(cond), vtrue, vfalse,
3285 V<std::common_type_t<T, U>>::rep, hint,
3287 }
3290 BranchHint default_hint = BranchHint::kNone) {
3291 ReduceIfReachableSwitch(input, cases, default_case, default_hint);
3292 }
3293 void Unreachable() { ReduceIfReachableUnreachable(); }
3294
3296 const char* debug_name = nullptr) {
3297 // Parameter indices might be negative.
3298 int cache_location = index - kMinParameterIndex;
3299 DCHECK_GE(cache_location, 0);
3300 if (static_cast<size_t>(cache_location) >= cached_parameters_.size()) {
3301 cached_parameters_.resize(cache_location + 1, {});
3302 }
3303 OpIndex& cached_param = cached_parameters_[cache_location];
3304 if (!cached_param.valid()) {
3305 // Note: When in unreachable code, this will return OpIndex::Invalid, so
3306 // the cached state is unchanged.
3307 cached_param = ReduceIfReachableParameter(index, rep, debug_name);
3308 } else {
3309 DCHECK_EQ(Asm().output_graph().Get(cached_param).outputs_rep(),
3310 base::VectorOf({rep}));
3311 }
3312 return cached_param;
3313 }
3314 template <typename T>
3315 V<T> Parameter(int index, const char* debug_name = nullptr) {
3316 return Parameter(index, V<T>::rep, debug_name);
3317 }
3318 V<Object> OsrValue(int index) { return ReduceIfReachableOsrValue(index); }
3319 void Return(V<Word32> pop_count, base::Vector<const OpIndex> return_values,
3320 bool spill_caller_frame_slots = false) {
3321 ReduceIfReachableReturn(pop_count, return_values, spill_caller_frame_slots);
3322 }
3326
3327 template <typename R = AnyOrNone>
3330 const TSCallDescriptor* descriptor,
3331 OpEffects effects = OpEffects().CanCallAnything()) {
3332 return ReduceIfReachableCall(callee, frame_state, arguments, descriptor,
3333 effects);
3334 }
3335 template <typename R = AnyOrNone>
3336 V<R> Call(V<CallTarget> callee, std::initializer_list<OpIndex> arguments,
3337 const TSCallDescriptor* descriptor,
3338 OpEffects effects = OpEffects().CanCallAnything()) {
3340 base::VectorOf(arguments), descriptor, effects);
3341 }
3342
3343 template <typename Descriptor>
3346 V<Context> context, const typename Descriptor::arguments_t& args,
3347 LazyDeoptOnThrow lazy_deopt_on_throw = LazyDeoptOnThrow::kNo)
3348 requires(Descriptor::kNeedsFrameState && Descriptor::kNeedsContext)
3349 {
3351 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3352 return result_t::Invalid();
3353 }
3354 DCHECK(frame_state.valid());
3355 DCHECK(context.valid());
3356 auto arguments = std::apply(
3357 [context](auto&&... as) {
3358 return base::SmallVector<
3359 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
3360 std::forward<decltype(as)>(as)..., context};
3361 },
3362 args);
3363 return result_t::Cast(CallBuiltinImpl(
3364 isolate, Descriptor::kFunction, frame_state, base::VectorOf(arguments),
3365 Descriptor::Create(StubCallMode::kCallCodeObject,
3366 Asm().output_graph().graph_zone(),
3367 lazy_deopt_on_throw),
3368 Descriptor::kEffects));
3369 }
3370
3371 template <typename Descriptor>
3373 Isolate* isolate, V<Context> context,
3374 const typename Descriptor::arguments_t& args)
3375 requires(!Descriptor::kNeedsFrameState && Descriptor::kNeedsContext)
3376 {
3378 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3379 return result_t::Invalid();
3380 }
3381 DCHECK(context.valid());
3382 auto arguments = std::apply(
3383 [context](auto&&... as) {
3384 return base::SmallVector<
3385 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
3386 std::forward<decltype(as)>(as)..., context};
3387 },
3388 args);
3389 return result_t::Cast(CallBuiltinImpl(
3390 isolate, Descriptor::kFunction,
3392 Descriptor::Create(StubCallMode::kCallCodeObject,
3393 Asm().output_graph().graph_zone()),
3394 Descriptor::kEffects));
3395 }
3396 template <typename Descriptor>
3399 const typename Descriptor::arguments_t& args,
3400 LazyDeoptOnThrow lazy_deopt_on_throw = LazyDeoptOnThrow::kNo)
3401 requires(Descriptor::kNeedsFrameState && !Descriptor::kNeedsContext)
3402 {
3404 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3405 return result_t::Invalid();
3406 }
3407 DCHECK(frame_state.valid());
3408 auto arguments = std::apply(
3409 [](auto&&... as) {
3410 return base::SmallVector<OpIndex, std::tuple_size_v<decltype(args)>>{
3411 std::forward<decltype(as)>(as)...};
3412 },
3413 args);
3414 return result_t::Cast(CallBuiltinImpl(
3415 isolate, Descriptor::kFunction, frame_state, base::VectorOf(arguments),
3416 Descriptor::Create(StubCallMode::kCallCodeObject,
3417 Asm().output_graph().graph_zone(),
3418 lazy_deopt_on_throw),
3419 Descriptor::kEffects));
3420 }
3421 template <typename Descriptor>
3423 Isolate* isolate, const typename Descriptor::arguments_t& args)
3424 requires(!Descriptor::kNeedsFrameState && !Descriptor::kNeedsContext)
3425 {
3427 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3428 return result_t::Invalid();
3429 }
3430 auto arguments = std::apply(
3431 [](auto&&... as) {
3432 return base::SmallVector<
3433 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t>>{
3434 std::forward<decltype(as)>(as)...};
3435 },
3436 args);
3437 return result_t::Cast(CallBuiltinImpl(
3438 isolate, Descriptor::kFunction,
3440 Descriptor::Create(StubCallMode::kCallCodeObject,
3441 Asm().output_graph().graph_zone()),
3442 Descriptor::kEffects));
3443 }
3444
3445#if V8_ENABLE_WEBASSEMBLY
3446
3447 template <typename Descriptor>
3449 WasmCallBuiltinThroughJumptable(const typename Descriptor::arguments_t& args)
3450 requires(!Descriptor::kNeedsContext)
3451 {
3452 static_assert(!Descriptor::kNeedsFrameState);
3454 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3455 return result_t::Invalid();
3456 }
3457 auto arguments = std::apply(
3458 [](auto&&... as) {
3459 return base::SmallVector<
3460 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t>>{
3461 std::forward<decltype(as)>(as)...};
3462 },
3463 args);
3464 V<WordPtr> call_target =
3465 RelocatableWasmBuiltinCallTarget(Descriptor::kFunction);
3466 return result_t::Cast(
3468 base::VectorOf(arguments),
3469 Descriptor::Create(StubCallMode::kCallWasmRuntimeStub,
3470 Asm().output_graph().graph_zone()),
3471 Descriptor::kEffects));
3472 }
3473
3474 template <typename Descriptor>
3476 WasmCallBuiltinThroughJumptable(V<Context> context,
3477 const typename Descriptor::arguments_t& args)
3478 requires Descriptor::kNeedsContext
3479 {
3480 static_assert(!Descriptor::kNeedsFrameState);
3482 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3483 return result_t::Invalid();
3484 }
3485 DCHECK(context.valid());
3486 auto arguments = std::apply(
3487 [context](auto&&... as) {
3488 return base::SmallVector<
3489 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
3490 std::forward<decltype(as)>(as)..., context};
3491 },
3492 args);
3493 V<WordPtr> call_target =
3494 RelocatableWasmBuiltinCallTarget(Descriptor::kFunction);
3495 return result_t::Cast(
3497 base::VectorOf(arguments),
3498 Descriptor::Create(StubCallMode::kCallWasmRuntimeStub,
3499 Asm().output_graph().graph_zone()),
3500 Descriptor::kEffects));
3501 }
3502
3503#endif // V8_ENABLE_WEBASSEMBLY
3504
3508 const TSCallDescriptor* desc, OpEffects effects) {
3509 Callable callable = Builtins::CallableFor(isolate, builtin);
3510 return Call(HeapConstant(callable.code()), frame_state, arguments, desc,
3511 effects);
3512 }
3513
3514#define DECL_GENERIC_BINOP_BUILTIN_CALL(Name) \
3515 V<Object> CallBuiltin_##Name( \
3516 Isolate* isolate, V<turboshaft::FrameState> frame_state, \
3517 V<Context> context, V<Object> lhs, V<Object> rhs, \
3518 LazyDeoptOnThrow lazy_deopt_on_throw) { \
3519 return CallBuiltin<typename BuiltinCallDescriptor::Name>( \
3520 isolate, frame_state, context, {lhs, rhs}, lazy_deopt_on_throw); \
3521 }
3523#undef DECL_GENERIC_BINOP_BUILTIN_CALL
3524
3525#define DECL_GENERIC_UNOP_BUILTIN_CALL(Name) \
3526 V<Object> CallBuiltin_##Name(Isolate* isolate, \
3527 V<turboshaft::FrameState> frame_state, \
3528 V<Context> context, V<Object> input, \
3529 LazyDeoptOnThrow lazy_deopt_on_throw) { \
3530 return CallBuiltin<typename BuiltinCallDescriptor::Name>( \
3531 isolate, frame_state, context, {input}, lazy_deopt_on_throw); \
3532 }
3534#undef DECL_GENERIC_UNOP_BUILTIN_CALL
3535
3538 V<Context> context, V<Object> input,
3539 LazyDeoptOnThrow lazy_deopt_on_throw) {
3541 isolate, frame_state, context, {input}, lazy_deopt_on_throw);
3542 }
3545 V<Context> context, V<Object> input,
3546 LazyDeoptOnThrow lazy_deopt_on_throw) {
3548 isolate, frame_state, context, {input}, lazy_deopt_on_throw);
3549 }
3550
3552 V<Object> object,
3553 V<TurbofanType> allocated_type,
3554 V<Smi> node_id) {
3556 isolate, context, {object, allocated_type, node_id});
3557 }
3565 V<Float64> value) {
3567 isolate, context, {value});
3568 }
3570 V<WordPtr> value) {
3572 isolate, context, {value});
3573 }
3575 V<Context> context,
3576 V<Object> table, V<Smi> key) {
3578 isolate, context, {table, key});
3579 }
3592 V<Object> object,
3593 V<Smi> size) {
3594 return CallBuiltin<
3596 isolate, {object, size});
3597 }
3599 Isolate* isolate, V<WordPtr> frame, V<WordPtr> formal_parameter_count,
3600 V<Smi> arguments_count) {
3601 return CallBuiltin<
3603 isolate, {frame, formal_parameter_count, arguments_count});
3604 }
3606 Isolate* isolate, V<WordPtr> frame, V<WordPtr> formal_parameter_count,
3607 V<Smi> arguments_count) {
3608 return CallBuiltin<
3610 isolate, {frame, formal_parameter_count, arguments_count});
3611 }
3613 Isolate* isolate, V<WordPtr> frame, V<WordPtr> formal_parameter_count,
3614 V<Smi> arguments_count) {
3615 return CallBuiltin<
3617 isolate, {frame, formal_parameter_count, arguments_count});
3618 }
3625 V<Context> context, V<Object> input,
3626 LazyDeoptOnThrow lazy_deopt_on_throw) {
3628 isolate, frame_state, context, {input}, lazy_deopt_on_throw);
3629 }
3636 V<Object> right) {
3638 isolate, {left, right});
3639 }
3646 V<Context> context, V<String> left,
3647 V<String> right) {
3649 isolate, context, {left, right});
3650 }
3652 V<String> right, V<WordPtr> length) {
3654 isolate, {left, right, length});
3655 }
3657 V<String> right) {
3659 isolate, {left, right});
3660 }
3677#ifdef V8_INTL_SUPPORT
3678 V<String> CallBuiltin_StringToLowerCaseIntl(Isolate* isolate,
3679 V<Context> context,
3680 V<String> string) {
3682 isolate, context, {string});
3683 }
3684#endif // V8_INTL_SUPPORT
3699 V<JSPrimitive> object) {
3701 isolate, context, {object});
3702 }
3705 V<Context> context, V<ScopeInfo> scope_info, ConstOrV<Word32> slot_count,
3706 LazyDeoptOnThrow lazy_deopt_on_throw) {
3707 return CallBuiltin<
3709 isolate, frame_state, context, {scope_info, resolve(slot_count)},
3710 lazy_deopt_on_throw);
3711 }
3714 V<Context> context, V<ScopeInfo> scope_info, ConstOrV<Word32> slot_count,
3715 LazyDeoptOnThrow lazy_deopt_on_throw) {
3716 return CallBuiltin<
3718 isolate, frame_state, context, {scope_info, resolve(slot_count)},
3719 lazy_deopt_on_throw);
3720 }
3723 V<Context> context, V<SharedFunctionInfo> shared_function_info,
3724 V<FeedbackCell> feedback_cell) {
3726 isolate, frame_state, context, {shared_function_info, feedback_cell});
3727 }
3730 {object});
3731 }
3732
3734 Builtin builtin,
3736 int num_stack_args,
3737 base::Vector<OpIndex> arguments,
3738 LazyDeoptOnThrow lazy_deopt_on_throw) {
3739 Callable callable = Builtins::CallableFor(isolate, builtin);
3740 const CallInterfaceDescriptor& descriptor = callable.descriptor();
3741 CallDescriptor* call_descriptor =
3742 Linkage::GetStubCallDescriptor(graph_zone, descriptor, num_stack_args,
3744 V<Code> stub_code = __ HeapConstant(callable.code());
3745
3746 return Call<Object>(
3747 stub_code, frame_state, arguments,
3749 lazy_deopt_on_throw, graph_zone));
3750 }
3751
3754 V<Context> context, V<Object> function,
3755 int num_args_no_spread, V<Object> spread,
3756 base::Vector<V<Object>> args_no_spread,
3757 LazyDeoptOnThrow lazy_deopt_on_throw) {
3759 arguments.push_back(function);
3760 arguments.push_back(Word32Constant(num_args_no_spread));
3761 arguments.push_back(spread);
3762 arguments.insert(arguments.end(), args_no_spread.begin(),
3763 args_no_spread.end());
3764
3765 arguments.push_back(context);
3766
3768 isolate, graph_zone, Builtin::kCallWithSpread, frame_state,
3769 num_args_no_spread, base::VectorOf(arguments), lazy_deopt_on_throw);
3770 }
3773 V<Context> context, V<Object> receiver, V<Object> function,
3774 V<Object> arguments_list, LazyDeoptOnThrow lazy_deopt_on_throw) {
3775 // CallWithArrayLike is a weird builtin that expects a receiver as top of
3776 // the stack, but doesn't explicitly list it as an extra argument. We thus
3777 // manually create the call descriptor with 1 stack argument.
3778 constexpr int kNumberOfStackArguments = 1;
3779
3780 OpIndex arguments[] = {function, arguments_list, receiver, context};
3781
3783 isolate, graph_zone, Builtin::kCallWithArrayLike, frame_state,
3784 kNumberOfStackArguments, base::VectorOf(arguments),
3785 lazy_deopt_on_throw);
3786 }
3788 Isolate* isolate, Zone* graph_zone, Builtin builtin,
3790 V<JSFunction> function, int num_args, int start_index,
3791 base::Vector<V<Object>> args, LazyDeoptOnThrow lazy_deopt_on_throw) {
3792 DCHECK(builtin == Builtin::kCallFunctionForwardVarargs ||
3793 builtin == Builtin::kCallForwardVarargs);
3795 arguments.push_back(function);
3796 arguments.push_back(__ Word32Constant(num_args));
3797 arguments.push_back(__ Word32Constant(start_index));
3798 arguments.insert(arguments.end(), args.begin(), args.end());
3799 arguments.push_back(context);
3800
3802 isolate, graph_zone, builtin, frame_state, num_args,
3803 base::VectorOf(arguments), lazy_deopt_on_throw);
3804 }
3805
3806 template <typename Descriptor>
3807 typename Descriptor::result_t CallRuntime(
3809 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw,
3810 const typename Descriptor::arguments_t& args)
3811 requires Descriptor::kNeedsFrameState
3812 {
3813 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3814 return OpIndex::Invalid();
3815 }
3816 DCHECK(frame_state.valid());
3817 DCHECK(context.valid());
3819 isolate, Descriptor::kFunction,
3820 Descriptor::Create(Asm().output_graph().graph_zone(),
3821 lazy_deopt_on_throw),
3822 frame_state, context, args);
3823 }
3824 template <typename Descriptor>
3825 typename Descriptor::result_t CallRuntime(
3826 Isolate* isolate, V<Context> context,
3827 const typename Descriptor::arguments_t& args)
3828 requires(!Descriptor::kNeedsFrameState)
3829 {
3830 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
3831 return OpIndex::Invalid();
3832 }
3833 DCHECK(context.valid());
3835 isolate, Descriptor::kFunction,
3836 Descriptor::Create(Asm().output_graph().graph_zone(),
3838 {}, context, args);
3839 }
3840
3841 template <typename Ret, typename Args>
3843 const TSCallDescriptor* desc,
3845 const Args& args) {
3846 const int result_size = Runtime::FunctionForId(function)->result_size;
3847 constexpr size_t kMaxNumArgs = 6;
3848 const size_t argc = std::tuple_size_v<Args>;
3849 static_assert(kMaxNumArgs >= argc);
3850 // Convert arguments from `args` tuple into a `SmallVector<OpIndex>`.
3851 using vector_t = base::SmallVector<OpIndex, argc + 4>;
3852 auto inputs = std::apply(
3853 [](auto&&... as) {
3854 return vector_t{std::forward<decltype(as)>(as)...};
3855 },
3856 args);
3857 DCHECK(context.valid());
3858 inputs.push_back(ExternalConstant(ExternalReference::Create(function)));
3859 inputs.push_back(Word32Constant(static_cast<int>(argc)));
3860 inputs.push_back(context);
3861
3862 if constexpr (std::is_same_v<Ret, void>) {
3863 Call(CEntryStubConstant(isolate, result_size), frame_state,
3864 base::VectorOf(inputs), desc);
3865 } else {
3866 return Ret::Cast(Call(CEntryStubConstant(isolate, result_size),
3867 frame_state, base::VectorOf(inputs), desc));
3868 }
3869 }
3870
3871 void CallRuntime_Abort(Isolate* isolate, V<Context> context, V<Smi> reason) {
3873 {reason});
3874 }
3876 V<BigInt> input, ::Operation operation) {
3877 DCHECK_EQ(operation,
3878 any_of(::Operation::kBitwiseNot, ::Operation::kNegate,
3879 ::Operation::kIncrement, ::Operation::kDecrement));
3881 isolate, context, {input, __ SmiConstant(Smi::FromEnum(operation))});
3882 }
3900 context, {});
3901 }
3909 V<String> string, V<Number> index) {
3911 isolate, context, {string, index});
3912 }
3913#ifdef V8_INTL_SUPPORT
3914 V<String> CallRuntime_StringToUpperCaseIntl(Isolate* isolate,
3915 V<Context> context,
3916 V<String> string) {
3918 isolate, context, {string});
3919 }
3920#endif // V8_INTL_SUPPORT
3923 V<Context> context, V<Symbol> symbol,
3924 LazyDeoptOnThrow lazy_deopt_on_throw) {
3926 isolate, frame_state, context, lazy_deopt_on_throw, {symbol});
3927 }
3935 V<Context> context,
3936 V<HeapObject> object,
3937 V<Map> target_map) {
3939 isolate, context, {object, target_map});
3940 }
3942 V<HeapObject> heap_object) {
3944 isolate, context, {heap_object});
3945 }
3947 Isolate* isolate, V<Context> context, V<HeapObject> heap_object) {
3948 return CallRuntime<typename RuntimeCallDescriptor::
3949 TryMigrateInstanceAndMarkMapAsMigrationTarget>(
3950 isolate, context, {heap_object});
3951 }
3954 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw,
3955 V<Object> object) {
3958 isolate, frame_state, context, lazy_deopt_on_throw, {object});
3959 }
3969 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw,
3970 V<Object> constructor, V<Object> function) {
3972 isolate, frame_state, context, lazy_deopt_on_throw,
3973 {constructor, function});
3974 }
3983 V<Context> context,
3984 LazyDeoptOnThrow lazy_deopt_on_throw) {
3986 isolate, frame_state, context, lazy_deopt_on_throw, {});
3987 }
3990 V<Context> context,
3991 LazyDeoptOnThrow lazy_deopt_on_throw,
3992 V<Object> value) {
3994 isolate, frame_state, context, lazy_deopt_on_throw, {value});
3995 }
3998 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw) {
4000 isolate, frame_state, context, lazy_deopt_on_throw, {});
4001 }
4003 Isolate* isolate, V<Context> context,
4004 V<SharedFunctionInfo> shared_function_info,
4005 V<FeedbackCell> feedback_cell) {
4007 isolate, context, {shared_function_info, feedback_cell});
4008 }
4010 Isolate* isolate, V<Context> context,
4011 V<SharedFunctionInfo> shared_function_info,
4012 V<FeedbackCell> feedback_cell) {
4014 isolate, context, {shared_function_info, feedback_cell});
4015 }
4018 V<Context> context, LazyDeoptOnThrow lazy_deopt_on_throw,
4019 V<Object> object, V<HeapObject> prototype) {
4021 isolate, frame_state, context, lazy_deopt_on_throw,
4022 {object, prototype});
4023 }
4024
4026 const TSCallDescriptor* descriptor) {
4027 ReduceIfReachableTailCall(callee, arguments, descriptor);
4028 }
4029
4031 bool inlined,
4032 const FrameStateData* data) {
4033 return ReduceIfReachableFrameState(inputs, inlined, data);
4034 }
4036 const DeoptimizeParameters* parameters) {
4037 ReduceIfReachableDeoptimizeIf(condition, frame_state, false, parameters);
4038 }
4041 const DeoptimizeParameters* parameters) {
4042 ReduceIfReachableDeoptimizeIf(condition, frame_state, true, parameters);
4043 }
4045 DeoptimizeReason reason, const FeedbackSource& feedback) {
4046 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4047 return;
4048 }
4049 Zone* zone = Asm().output_graph().graph_zone();
4050 const DeoptimizeParameters* params =
4051 zone->New<DeoptimizeParameters>(reason, feedback);
4053 }
4056 DeoptimizeReason reason,
4057 const FeedbackSource& feedback) {
4058 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4059 return;
4060 }
4061 Zone* zone = Asm().output_graph().graph_zone();
4062 const DeoptimizeParameters* params =
4063 zone->New<DeoptimizeParameters>(reason, feedback);
4065 }
4067 const DeoptimizeParameters* parameters) {
4068 ReduceIfReachableDeoptimize(frame_state, parameters);
4069 }
4071 DeoptimizeReason reason, const FeedbackSource& feedback) {
4072 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4073 return;
4074 }
4075 Zone* zone = Asm().output_graph().graph_zone();
4076 const DeoptimizeParameters* params =
4077 zone->New<DeoptimizeParameters>(reason, feedback);
4078 Deoptimize(frame_state, params);
4079 }
4080
4081#if V8_ENABLE_WEBASSEMBLY
4082 // TrapIf and TrapIfNot in Wasm code do not pass a frame state.
4083 void TrapIf(ConstOrV<Word32> condition, TrapId trap_id) {
4084 ReduceIfReachableTrapIf(resolve(condition),
4086 trap_id);
4087 }
4088 void TrapIfNot(ConstOrV<Word32> condition, TrapId trap_id) {
4089 ReduceIfReachableTrapIf(resolve(condition),
4090 OptionalV<turboshaft::FrameState>{}, true, trap_id);
4091 }
4092
4093 // TrapIf and TrapIfNot from Wasm inlined into JS pass a frame state.
4094 void TrapIf(ConstOrV<Word32> condition,
4096 ReduceIfReachableTrapIf(resolve(condition), frame_state, false, trap_id);
4097 }
4098 void TrapIfNot(ConstOrV<Word32> condition,
4100 TrapId trap_id) {
4101 ReduceIfReachableTrapIf(resolve(condition), frame_state, true, trap_id);
4102 }
4103#endif // V8_ENABLE_WEBASSEMBLY
4104
4105 void StaticAssert(V<Word32> condition, const char* source) {
4106 ReduceIfReachableStaticAssert(condition, source);
4107 }
4108
4110 return ReduceIfReachablePhi(inputs, rep);
4111 }
4112 OpIndex Phi(std::initializer_list<OpIndex> inputs,
4114 return Phi(base::VectorOf(inputs), rep);
4115 }
4116 template <typename T>
4117 V<T> Phi(const base::Vector<V<T>>& inputs) {
4118 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4119 return OpIndex::Invalid();
4120 }
4121 // Downcast from typed `V<T>` wrapper to `OpIndex`.
4122 OpIndex* inputs_begin = inputs.data();
4123 static_assert(sizeof(OpIndex) == sizeof(V<T>));
4124 return Phi(base::VectorOf(inputs_begin, inputs.length()), V<T>::rep);
4125 }
4127 return ReduceIfReachablePendingLoopPhi(first, rep);
4128 }
4129 template <typename T>
4131 return PendingLoopPhi(first, V<T>::rep);
4132 }
4133
4135 return ReduceIfReachableTuple(indices);
4136 }
4137 V<Any> Tuple(std::initializer_list<V<Any>> indices) {
4138 return ReduceIfReachableTuple(base::VectorOf(indices));
4139 }
4140 template <typename... Ts>
4141 V<turboshaft::Tuple<Ts...>> Tuple(V<Ts>... indices) {
4142 std::initializer_list<V<Any>> inputs{V<Any>::Cast(indices)...};
4143 return V<turboshaft::Tuple<Ts...>>::Cast(Tuple(base::VectorOf(inputs)));
4144 }
4145 // TODO(chromium:331100916): Remove this overload once everything is properly
4146 // V<>ified.
4151
4153 return ReduceIfReachableProjection(tuple, index, rep);
4154 }
4155 template <uint16_t Index, typename... Ts>
4157 using element_t = base::nth_type_t<Index, Ts...>;
4158 static_assert(v_traits<element_t>::rep != nullrep,
4159 "Representation for Projection cannot be inferred. Use "
4160 "overload with explicit Representation argument.");
4161 return V<element_t>::Cast(Projection(tuple, Index, V<element_t>::rep));
4162 }
4163 template <uint16_t Index, typename... Ts>
4166 using element_t = base::nth_type_t<Index, Ts...>;
4168 return V<element_t>::Cast(Projection(tuple, Index, rep));
4169 }
4171 Type expected_type, bool successful) {
4172 CHECK(v8_flags.turboshaft_enable_debug_features);
4173 return ReduceIfReachableCheckTurboshaftTypeOf(input, rep, expected_type,
4174 successful);
4175 }
4176
4177 // This is currently only usable during graph building on the main thread.
4178 void Dcheck(V<Word32> condition, const char* message, const char* file,
4179 int line, const SourceLocation& loc = SourceLocation::Current()) {
4180 Isolate* isolate = Asm().data()->isolate();
4181 USE(isolate);
4182 DCHECK_NOT_NULL(isolate);
4183 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
4184#ifdef DEBUG
4185 if (v8_flags.debug_code) {
4186 Check(condition, message, file, line, loc);
4187 }
4188#endif
4189 }
4190
4191 // This is currently only usable during graph building on the main thread.
4192 void Check(V<Word32> condition, const char* message, const char* file,
4193 int line, const SourceLocation& loc = SourceLocation::Current()) {
4194 Isolate* isolate = Asm().data()->isolate();
4195 USE(isolate);
4196 DCHECK_NOT_NULL(isolate);
4197 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
4198
4199 if (message != nullptr) {
4200 CodeComment({"[ Assert: ", loc}, message);
4201 } else {
4202 CodeComment({"[ Assert: ", loc});
4203 }
4204
4206 std::vector<FileAndLine> file_and_line;
4207 if (file != nullptr) {
4208 file_and_line.push_back({file, line});
4209 }
4210 FailAssert(message, file_and_line, loc);
4211 }
4212 CodeComment({"] Assert", SourceLocation()});
4213 }
4214
4215 void FailAssert(const char* message,
4216 const std::vector<FileAndLine>& files_and_lines,
4217 const SourceLocation& loc) {
4218 std::stringstream stream;
4219 if (message) stream << message;
4220 for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend();
4221 ++it) {
4222 if (it->first != nullptr) {
4223 stream << " [" << it->first << ":" << it->second << "]";
4224#ifndef DEBUG
4225 // To limit the size of these strings in release builds, we include only
4226 // the innermost macro's file name and line number.
4227 break;
4228#endif
4229 }
4230 }
4231
4232 Isolate* isolate = Asm().data()->isolate();
4233 DCHECK_NOT_NULL(isolate);
4234 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
4235 V<String> string_constant =
4236 __ HeapConstantNoHole(isolate->factory()->NewStringFromAsciiChecked(
4237 stream.str().c_str(), AllocationType::kOld));
4238
4239 AbortCSADcheck(string_constant);
4240 Unreachable();
4241 }
4242
4244 ReduceIfReachableAbortCSADcheck(message);
4245 }
4246
4247 // CatchBlockBegin should always be the 1st operation of a catch handler, and
4248 // returns the value of the exception that was caught. Because of split-edge
4249 // form, catch handlers cannot have multiple predecessors (since their
4250 // predecessors always end with CheckException, which has 2 successors). As
4251 // such, when multiple CheckException go to the same catch handler,
4252 // Assembler::AddPredecessor and Assembler::SplitEdge take care of introducing
4253 // additional intermediate catch handlers, which are then wired to the
4254 // original catch handler. When calling `__ CatchBlockBegin` at the begining
4255 // of the original catch handler, a Phi of the CatchBlockBegin of the
4256 // predecessors is emitted instead. Here is an example:
4257 //
4258 // Initial graph:
4259 //
4260 // + B1 ----------------+
4261 // | ... |
4262 // | 1: CallOp(...) |
4263 // | 2: CheckException |
4264 // +--------------------+
4265 // / \
4266 // / \
4267 // / \
4268 // + B2 ----------------+ + B3 ----------------+
4269 // | 3: DidntThrow(1) | | 4: CatchBlockBegin |
4270 // | ... | | 5: SomeOp(4) |
4271 // | ... | | ... |
4272 // +--------------------+ +--------------------+
4273 // \ /
4274 // \ /
4275 // \ /
4276 // + B4 ----------------+
4277 // | 6: Phi(3, 4) |
4278 // | ... |
4279 // +--------------------+
4280 //
4281 //
4282 // Let's say that we lower the CallOp to 2 throwing calls. We'll thus get:
4283 //
4284 //
4285 // + B1 ----------------+
4286 // | ... |
4287 // | 1: CallOp(...) |
4288 // | 2: CheckException |
4289 // +--------------------+
4290 // / \
4291 // / \
4292 // / \
4293 // + B2 ----------------+ + B4 ----------------+
4294 // | 3: DidntThrow(1) | | 7: CatchBlockBegin |
4295 // | 4: CallOp(...) | | 8: Goto(B6) |
4296 // | 5: CheckException | +--------------------+
4297 // +--------------------+ \
4298 // / \ \
4299 // / \ \
4300 // / \ \
4301 // + B3 ----------------+ + B5 ----------------+ |
4302 // | 6: DidntThrow(4) | | 9: CatchBlockBegin | |
4303 // | ... | | 10: Goto(B6) | |
4304 // | ... | +--------------------+ |
4305 // +--------------------+ \ |
4306 // \ \ |
4307 // \ \ |
4308 // \ + B6 ----------------+
4309 // \ | 11: Phi(7, 9) |
4310 // \ | 12: SomeOp(11) |
4311 // \ | ... |
4312 // \ +--------------------+
4313 // \ /
4314 // \ /
4315 // \ /
4316 // + B7 ----------------+
4317 // | 6: Phi(6, 11) |
4318 // | ... |
4319 // +--------------------+
4320 //
4321 // Note B6 in the output graph corresponds to B3 in the input graph and that
4322 // `11: Phi(7, 9)` was emitted when calling `CatchBlockBegin` in order to map
4323 // `4: CatchBlockBegin` from the input graph.
4324 //
4325 // Besides AddPredecessor and SplitEdge in Assembler, most of the machinery to
4326 // make this work is in GenericReducerBase (in particular,
4327 // `REDUCE(CatchBlockBegin)`, `REDUCE(Call)`, `REDUCE(CheckException)` and
4328 // `CatchIfInCatchScope`).
4329 V<Object> CatchBlockBegin() { return ReduceIfReachableCatchBlockBegin(); }
4330
4332 bool is_backedge = destination->IsBound();
4333 Goto(destination, is_backedge);
4334 }
4335 void Goto(Block* destination, bool is_backedge) {
4336 ReduceIfReachableGoto(destination, is_backedge);
4337 }
4338 void Branch(V<Word32> condition, Block* if_true, Block* if_false,
4340 ReduceIfReachableBranch(condition, if_true, if_false, hint);
4341 }
4342 void Branch(ConditionWithHint condition, Block* if_true, Block* if_false) {
4343 return Branch(condition.condition(), if_true, if_false, condition.hint());
4344 }
4345
4346 // Return `true` if the control flow after the conditional jump is reachable.
4349 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4350 // What we return here should not matter.
4352 }
4353 Block* if_false = Asm().NewBlock();
4354 return BranchAndBind(condition, if_true, if_false, hint, if_false);
4355 }
4357 return GotoIf(condition.condition(), if_true, condition.hint());
4358 }
4359 // Return `true` if the control flow after the conditional jump is reachable.
4362 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4363 // What we return here should not matter.
4365 }
4366 Block* if_true = Asm().NewBlock();
4367 return BranchAndBind(condition, if_true, if_false, hint, if_true);
4368 }
4369
4371 Block* if_false) {
4372 return GotoIfNot(condition.condition(), if_false, condition.hint());
4373 }
4374
4376 base::Vector<OpIndex> arguments, CanThrow can_throw,
4377 Isolate* isolate) {
4378 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) {
4379 return OpIndex::Invalid();
4380 }
4381 Callable const callable = Builtins::CallableFor(isolate, builtin);
4383
4384 const CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
4385 graph_zone, callable.descriptor(),
4388 DCHECK_EQ(call_descriptor->NeedsFrameState(), frame_state.valid());
4389
4390 const TSCallDescriptor* ts_call_descriptor = TSCallDescriptor::Create(
4391 call_descriptor, can_throw, LazyDeoptOnThrow::kNo, graph_zone);
4392
4393 OpIndex callee = Asm().HeapConstant(callable.code());
4394
4395 return Asm().Call(callee, frame_state, arguments, ts_call_descriptor);
4396 }
4397
4399 V<String> second) {
4400 return ReduceIfReachableNewConsString(length, first, second);
4401 }
4403 AllocationType allocation_type) {
4404 return ReduceIfReachableNewArray(length, kind, allocation_type);
4405 }
4407 AllocationType allocation_type) {
4409 NewArray(length, NewArrayOp::Kind::kDouble, allocation_type));
4410 }
4411
4414 return ReduceIfReachableDoubleArrayMinMax(array, kind);
4415 }
4422
4424 return ReduceIfReachableLoadFieldByIndex(object, index);
4425 }
4426
4427 void DebugBreak() { ReduceIfReachableDebugBreak(); }
4428
4429 // TODO(nicohartmann): Maybe this can be unified with Dcheck?
4430 void AssertImpl(V<Word32> condition, const char* condition_string,
4431 const char* file, int line) {
4432#ifdef DEBUG
4433 // We use 256 characters as a buffer size. This can be increased if
4434 // necessary.
4435 static constexpr size_t kMaxAssertCommentLength = 256;
4436 base::Vector<char> buffer =
4437 Asm().data()->compilation_zone()->template AllocateVector<char>(
4438 kMaxAssertCommentLength);
4439 int result = base::SNPrintF(buffer, "Assert: %s [%s:%d]",
4440 condition_string, file, line);
4441 DCHECK_LT(0, result);
4442 Comment(buffer.data());
4444 Comment(buffer.data());
4445 Comment("ASSERT FAILED");
4446 DebugBreak();
4447 }
4448
4449#endif
4450 }
4451
4453 CHECK(v8_flags.turboshaft_enable_debug_features);
4454 ReduceIfReachableDebugPrint(input, rep);
4455 }
4465
4466 void Comment(const char* message) { ReduceIfReachableComment(message); }
4467 void Comment(const std::string& message) {
4468 size_t length = message.length() + 1;
4469 char* zone_buffer =
4470 Asm().data()->compilation_zone()->template AllocateArray<char>(length);
4471 MemCopy(zone_buffer, message.c_str(), length);
4472 Comment(zone_buffer);
4473 }
4475 template <typename... Args>
4476 void CodeComment(MessageWithSourceLocation message, Args&&... args) {
4477 if (!v8_flags.code_comments) return;
4478 std::ostringstream s;
4479 USE(s << message.message, (s << std::forward<Args>(args))...);
4480 if (message.loc.FileName()) {
4481 s << " - " << message.loc.ToString();
4482 }
4483 Comment(std::move(s).str());
4484 }
4485
4489 return ReduceIfReachableBigIntBinop(left, right, frame_state, kind);
4490 }
4491#define BIGINT_BINOP(kind) \
4492 V<BigInt> BigInt##kind(V<BigInt> left, V<BigInt> right, \
4493 V<turboshaft::FrameState> frame_state) { \
4494 return BigIntBinop(left, right, frame_state, \
4495 BigIntBinopOp::Kind::k##kind); \
4496 }
4497 BIGINT_BINOP(Add)
4498 BIGINT_BINOP(Sub)
4499 BIGINT_BINOP(Mul)
4500 BIGINT_BINOP(Div)
4501 BIGINT_BINOP(Mod)
4502 BIGINT_BINOP(BitwiseAnd)
4503 BIGINT_BINOP(BitwiseOr)
4504 BIGINT_BINOP(BitwiseXor)
4507#undef BIGINT_BINOP
4508
4511 return ReduceIfReachableBigIntComparison(left, right, kind);
4512 }
4513#define BIGINT_COMPARE(kind) \
4514 V<Boolean> BigInt##kind(V<BigInt> left, V<BigInt> right) { \
4515 return BigIntComparison(left, right, BigIntComparisonOp::Kind::k##kind); \
4516 }
4518 BIGINT_COMPARE(LessThan)
4519 BIGINT_COMPARE(LessThanOrEqual)
4520#undef BIGINT_COMPARE
4521
4523 return ReduceIfReachableBigIntUnary(input, kind);
4524 }
4528
4530 V<Word32> right_low, V<Word32> right_high,
4532 return ReduceIfReachableWord32PairBinop(left_low, left_high, right_low,
4533 right_high, kind);
4534 }
4535
4538 return ReduceIfReachableStringAt(string, position, kind);
4539 }
4546
4547#ifdef V8_INTL_SUPPORT
4548 V<String> StringToCaseIntl(V<String> string, StringToCaseIntlOp::Kind kind) {
4549 return ReduceIfReachableStringToCaseIntl(string, kind);
4550 }
4551 V<String> StringToLowerCaseIntl(V<String> string) {
4552 return StringToCaseIntl(string, StringToCaseIntlOp::Kind::kLower);
4553 }
4554 V<String> StringToUpperCaseIntl(V<String> string) {
4555 return StringToCaseIntl(string, StringToCaseIntlOp::Kind::kUpper);
4556 }
4557#endif // V8_INTL_SUPPORT
4558
4560 return ReduceIfReachableStringLength(string);
4561 }
4562
4564 ElementsKind elements_kind) {
4565 return ReduceIfReachableTypedArrayLength(typed_array, elements_kind);
4566 }
4567
4569 return ReduceIfReachableStringIndexOf(string, search, position);
4570 }
4571
4573 return ReduceIfReachableStringFromCodePointAt(string, index);
4574 }
4575
4577 return ReduceIfReachableStringSubstring(string, start, end);
4578 }
4579
4581 return ReduceIfReachableStringConcat(length, left, right);
4582 }
4583
4586 return ReduceIfReachableStringComparison(left, right, kind);
4587 }
4598
4600 return ReduceIfReachableArgumentsLength(ArgumentsLengthOp::Kind::kArguments,
4601 0);
4602 }
4603 V<Smi> RestLength(int formal_parameter_count) {
4604 DCHECK_LE(0, formal_parameter_count);
4605 return ReduceIfReachableArgumentsLength(ArgumentsLengthOp::Kind::kRest,
4606 formal_parameter_count);
4607 }
4608
4611 int formal_parameter_count) {
4612 DCHECK_LE(0, formal_parameter_count);
4613 return ReduceIfReachableNewArgumentsElements(arguments_count, type,
4614 formal_parameter_count);
4615 }
4616
4618 V<WordPtr> index, ExternalArrayType array_type) {
4619 return ReduceIfReachableLoadTypedElement(buffer, base, external, index,
4620 array_type);
4621 }
4622
4624 V<WordPtr> index, V<Word32> is_little_endian,
4625 ExternalArrayType element_type) {
4626 return ReduceIfReachableLoadDataViewElement(object, storage, index,
4627 is_little_endian, element_type);
4628 }
4629
4631 return ReduceIfReachableLoadStackArgument(base, index);
4632 }
4633
4634 void StoreTypedElement(OpIndex buffer, V<Object> base, V<WordPtr> external,
4635 V<WordPtr> index, OpIndex value,
4636 ExternalArrayType array_type) {
4637 ReduceIfReachableStoreTypedElement(buffer, base, external, index, value,
4638 array_type);
4639 }
4640
4642 V<WordPtr> index, OpIndex value,
4643 ConstOrV<Word32> is_little_endian,
4644 ExternalArrayType element_type) {
4645 ReduceIfReachableStoreDataViewElement(
4646 object, storage, index, value, resolve(is_little_endian), element_type);
4647 }
4648
4650 V<JSArray> array, V<WordPtr> index, V<Any> value,
4652 MaybeHandle<Map> double_map) {
4653 ReduceIfReachableTransitionAndStoreArrayElement(array, index, value, kind,
4654 fast_map, double_map);
4655 }
4656
4663
4665 const ZoneRefSet<Map>& maps) {
4666 return ReduceIfReachableCompareMaps(heap_object, map, maps);
4667 }
4668
4669 void CheckMaps(V<HeapObject> heap_object,
4671 const ZoneRefSet<Map>& maps, CheckMapsFlags flags,
4672 const FeedbackSource& feedback) {
4673 ReduceIfReachableCheckMaps(heap_object, frame_state, map, maps, flags,
4674 feedback);
4675 }
4676
4677 void AssumeMap(V<HeapObject> heap_object, const ZoneRefSet<Map>& maps) {
4678 ReduceIfReachableAssumeMap(heap_object, maps);
4679 }
4680
4683 Handle<FeedbackCell> feedback_cell) {
4684 return ReduceIfReachableCheckedClosure(input, frame_state, feedback_cell);
4685 }
4686
4689 ReduceIfReachableCheckEqualsInternalizedString(expected, value,
4690 frame_state);
4691 }
4692
4698
4704
4706 return ReduceIfReachableLoadMessage(offset);
4707 }
4708
4710 ReduceIfReachableStoreMessage(offset, object);
4711 }
4712
4714 SameValueOp::Mode mode) {
4715 return ReduceIfReachableSameValue(left, right, mode);
4716 }
4717
4719 return ReduceIfReachableFloat64SameValue(resolve(left), resolve(right));
4720 }
4721
4723 V<Object> data_argument, V<Context> context,
4725 const FastApiCallParameters* parameters,
4727 return ReduceIfReachableFastApiCall(frame_state, data_argument, context,
4728 arguments, parameters, out_reps);
4729 }
4730
4732 ReduceIfReachableRuntimeAbort(reason);
4733 }
4734
4736 return ReduceIfReachableEnsureWritableFastElements(object, elements);
4737 }
4738
4740 V<Word32> index, V<Word32> elements_length,
4743 const FeedbackSource& feedback) {
4744 return ReduceIfReachableMaybeGrowFastElements(
4745 object, elements, index, elements_length, frame_state, mode, feedback);
4746 }
4747
4749 const ElementsTransition& transition) {
4750 ReduceIfReachableTransitionElementsKind(object, transition);
4751 }
4754 const ElementsTransitionWithMultipleSources& transition) {
4755 ReduceIfReachableTransitionElementsKindOrCheckMap(object, map, frame_state,
4756 transition);
4757 }
4758
4761 return ReduceIfReachableFindOrderedHashEntry(data_structure, key, kind);
4762 }
4777
4779 Isolate* isolate = __ data() -> isolate();
4780 DCHECK_NOT_NULL(isolate);
4781 if (RootsTable::IsImmortalImmovable(root_index)) {
4782 Handle<Object> root = isolate->root_handle(root_index);
4783 if (i::IsSmi(*root)) {
4784 return __ SmiConstant(Cast<Smi>(*root));
4785 } else {
4787 }
4788 }
4789
4790 // TODO(jgruber): In theory we could generate better code for this by
4791 // letting the macro assembler decide how to load from the roots list. In
4792 // most cases, it would boil down to loading from a fixed kRootRegister
4793 // offset.
4794 OpIndex isolate_root =
4795 __ ExternalConstant(ExternalReference::isolate_root(isolate));
4796 int offset = IsolateData::root_slot_offset(root_index);
4797 return __ LoadOffHeap(isolate_root, offset,
4799 }
4800
4801#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
4802 V<RemoveTagged< \
4803 decltype(std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
4804 name##Constant() { \
4805 const TurboshaftPipelineKind kind = __ data() -> pipeline_kind(); \
4806 if (V8_UNLIKELY(kind == TurboshaftPipelineKind::kCSA || \
4807 kind == TurboshaftPipelineKind::kTSABuiltin)) { \
4808 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::k##rootIndexName)); \
4809 return V<RemoveTagged< \
4810 decltype(std::declval<ReadOnlyRoots>().rootAccessorName())>::type>:: \
4811 Cast(__ LoadRoot(RootIndex::k##rootIndexName)); \
4812 } else { \
4813 Isolate* isolate = __ data() -> isolate(); \
4814 DCHECK_NOT_NULL(isolate); \
4815 Factory* factory = isolate->factory(); \
4816 DCHECK_NOT_NULL(factory); \
4817 return __ HeapConstant(factory->rootAccessorName()); \
4818 } \
4819 }
4821#undef HEAP_CONSTANT_ACCESSOR
4822
4823#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
4824 V<RemoveTagged<decltype(std::declval<Heap>().rootAccessorName())>::type> \
4825 name##Constant() { \
4826 const TurboshaftPipelineKind kind = __ data() -> pipeline_kind(); \
4827 if (V8_UNLIKELY(kind == TurboshaftPipelineKind::kCSA || \
4828 kind == TurboshaftPipelineKind::kTSABuiltin)) { \
4829 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::k##rootIndexName)); \
4830 return V< \
4831 RemoveTagged<decltype(std::declval<Heap>().rootAccessorName())>:: \
4832 type>::Cast(__ LoadRoot(RootIndex::k##rootIndexName)); \
4833 } else { \
4834 Isolate* isolate = __ data() -> isolate(); \
4835 DCHECK_NOT_NULL(isolate); \
4836 Factory* factory = isolate->factory(); \
4837 DCHECK_NOT_NULL(factory); \
4838 return __ HeapConstant(factory->rootAccessorName()); \
4839 } \
4840 }
4842#undef HEAP_CONSTANT_ACCESSOR
4843
4844#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
4845 V<Word32> Is##name(V<Object> value) { \
4846 return TaggedEqual(value, name##Constant()); \
4847 } \
4848 V<Word32> IsNot##name(V<Object> value) { \
4849 return TaggedNotEqual(value, name##Constant()); \
4850 }
4852#undef HEAP_CONSTANT_TEST
4853
4854#ifdef V8_ENABLE_WEBASSEMBLY
4855 V<Any> GlobalGet(V<WasmTrustedInstanceData> trusted_instance_data,
4856 const wasm::WasmGlobal* global) {
4857 return ReduceIfReachableGlobalGet(trusted_instance_data, global);
4858 }
4859
4860 OpIndex GlobalSet(V<WasmTrustedInstanceData> trusted_instance_data,
4861 V<Any> value, const wasm::WasmGlobal* global) {
4862 return ReduceIfReachableGlobalSet(trusted_instance_data, value, global);
4863 }
4864
4865 V<HeapObject> RootConstant(RootIndex index) {
4866 return ReduceIfReachableRootConstant(index);
4867 }
4868
4869 V<Word32> IsRootConstant(V<Object> input, RootIndex index) {
4870 return ReduceIfReachableIsRootConstant(input, index);
4871 }
4872
4873 V<HeapObject> Null(wasm::ValueType type) {
4874 return ReduceIfReachableNull(type);
4875 }
4876
4877 V<Word32> IsNull(V<Object> input, wasm::ValueType type) {
4878 return ReduceIfReachableIsNull(input, type);
4879 }
4880
4881 V<Object> AssertNotNull(V<Object> object, wasm::ValueType type,
4882 TrapId trap_id) {
4883 return ReduceIfReachableAssertNotNull(object, type, trap_id);
4884 }
4885
4886 V<Map> RttCanon(V<FixedArray> rtts, wasm::ModuleTypeIndex type_index) {
4887 return ReduceIfReachableRttCanon(rtts, type_index);
4888 }
4889
4890 V<Word32> WasmTypeCheck(V<Object> object, OptionalV<Map> rtt,
4891 WasmTypeCheckConfig config) {
4892 return ReduceIfReachableWasmTypeCheck(object, rtt, config);
4893 }
4894
4895 V<Object> WasmTypeCast(V<Object> object, OptionalV<Map> rtt,
4896 WasmTypeCheckConfig config) {
4897 return ReduceIfReachableWasmTypeCast(object, rtt, config);
4898 }
4899
4900 V<Object> AnyConvertExtern(V<Object> input) {
4901 return ReduceIfReachableAnyConvertExtern(input);
4902 }
4903
4904 V<Object> ExternConvertAny(V<Object> input) {
4905 return ReduceIfReachableExternConvertAny(input);
4906 }
4907
4908 template <typename T>
4909 V<T> AnnotateWasmType(V<T> value, const wasm::ValueType type) {
4910 return ReduceIfReachableWasmTypeAnnotation(value, type);
4911 }
4912
4913 V<Any> StructGet(V<WasmStructNullable> object, const wasm::StructType* type,
4914 wasm::ModuleTypeIndex type_index, int field_index,
4915 bool is_signed, CheckForNull null_check) {
4916 return ReduceIfReachableStructGet(object, type, type_index, field_index,
4917 is_signed, null_check);
4918 }
4919
4920 void StructSet(V<WasmStructNullable> object, V<Any> value,
4921 const wasm::StructType* type, wasm::ModuleTypeIndex type_index,
4922 int field_index, CheckForNull null_check) {
4923 ReduceIfReachableStructSet(object, value, type, type_index, field_index,
4924 null_check);
4925 }
4926
4927 V<Any> ArrayGet(V<WasmArrayNullable> array, V<Word32> index,
4928 const wasm::ArrayType* array_type, bool is_signed) {
4929 return ReduceIfReachableArrayGet(array, index, array_type, is_signed);
4930 }
4931
4932 void ArraySet(V<WasmArrayNullable> array, V<Word32> index, V<Any> value,
4933 wasm::ValueType element_type) {
4934 ReduceIfReachableArraySet(array, index, value, element_type);
4935 }
4936
4937 V<Word32> ArrayLength(V<WasmArrayNullable> array, CheckForNull null_check) {
4938 return ReduceIfReachableArrayLength(array, null_check);
4939 }
4940
4941 V<WasmArray> WasmAllocateArray(V<Map> rtt, ConstOrV<Word32> length,
4942 const wasm::ArrayType* array_type) {
4943 return ReduceIfReachableWasmAllocateArray(rtt, resolve(length), array_type);
4944 }
4945
4946 V<WasmStruct> WasmAllocateStruct(V<Map> rtt,
4947 const wasm::StructType* struct_type) {
4948 return ReduceIfReachableWasmAllocateStruct(rtt, struct_type);
4949 }
4950
4951 V<WasmFuncRef> WasmRefFunc(V<Object> wasm_instance, uint32_t function_index) {
4952 return ReduceIfReachableWasmRefFunc(wasm_instance, function_index);
4953 }
4954
4955 V<String> StringAsWtf16(V<String> string) {
4956 return ReduceIfReachableStringAsWtf16(string);
4957 }
4958
4959 V<turboshaft::Tuple<Object, WordPtr, Word32>> StringPrepareForGetCodeUnit(
4960 V<Object> string) {
4961 return ReduceIfReachableStringPrepareForGetCodeUnit(string);
4962 }
4963
4964 V<Simd128> Simd128Constant(const uint8_t value[kSimd128Size]) {
4965 return ReduceIfReachableSimd128Constant(value);
4966 }
4967
4968 V<Simd128> Simd128Binop(V<Simd128> left, V<Simd128> right,
4969 Simd128BinopOp::Kind kind) {
4970 return ReduceIfReachableSimd128Binop(left, right, kind);
4971 }
4972
4973 V<Simd128> Simd128Unary(V<Simd128> input, Simd128UnaryOp::Kind kind) {
4974 return ReduceIfReachableSimd128Unary(input, kind);
4975 }
4976
4977 V<Simd128> Simd128ReverseBytes(V<Simd128> input) {
4978 return Simd128Unary(input, Simd128UnaryOp::Kind::kSimd128ReverseBytes);
4979 }
4980
4981 V<Simd128> Simd128Shift(V<Simd128> input, V<Word32> shift,
4982 Simd128ShiftOp::Kind kind) {
4983 return ReduceIfReachableSimd128Shift(input, shift, kind);
4984 }
4985
4986 V<Word32> Simd128Test(V<Simd128> input, Simd128TestOp::Kind kind) {
4987 return ReduceIfReachableSimd128Test(input, kind);
4988 }
4989
4990 V<Simd128> Simd128Splat(V<Any> input, Simd128SplatOp::Kind kind) {
4991 return ReduceIfReachableSimd128Splat(input, kind);
4992 }
4993
4994 V<Simd128> Simd128Ternary(V<Simd128> first, V<Simd128> second,
4995 V<Simd128> third, Simd128TernaryOp::Kind kind) {
4996 return ReduceIfReachableSimd128Ternary(first, second, third, kind);
4997 }
4998
4999 V<Any> Simd128ExtractLane(V<Simd128> input, Simd128ExtractLaneOp::Kind kind,
5000 uint8_t lane) {
5001 return ReduceIfReachableSimd128ExtractLane(input, kind, lane);
5002 }
5003
5004 V<Simd128> Simd128Reduce(V<Simd128> input, Simd128ReduceOp::Kind kind) {
5005 return ReduceIfReachableSimd128Reduce(input, kind);
5006 }
5007
5008 V<Simd128> Simd128ReplaceLane(V<Simd128> into, V<Any> new_lane,
5009 Simd128ReplaceLaneOp::Kind kind, uint8_t lane) {
5010 return ReduceIfReachableSimd128ReplaceLane(into, new_lane, kind, lane);
5011 }
5012
5013 OpIndex Simd128LaneMemory(V<WordPtr> base, V<WordPtr> index, V<WordPtr> value,
5014 Simd128LaneMemoryOp::Mode mode,
5015 Simd128LaneMemoryOp::Kind kind,
5016 Simd128LaneMemoryOp::LaneKind lane_kind,
5017 uint8_t lane, int offset) {
5018 return ReduceIfReachableSimd128LaneMemory(base, index, value, mode, kind,
5019 lane_kind, lane, offset);
5020 }
5021
5022 V<Simd128> Simd128LoadTransform(
5023 V<WordPtr> base, V<WordPtr> index,
5024 Simd128LoadTransformOp::LoadKind load_kind,
5025 Simd128LoadTransformOp::TransformKind transform_kind, int offset) {
5026 return ReduceIfReachableSimd128LoadTransform(base, index, load_kind,
5027 transform_kind, offset);
5028 }
5029
5030 V<Simd128> Simd128Shuffle(V<Simd128> left, V<Simd128> right,
5031 Simd128ShuffleOp::Kind kind,
5032 const uint8_t shuffle[kSimd128Size]) {
5033 return ReduceIfReachableSimd128Shuffle(left, right, kind, shuffle);
5034 }
5035
5036#if V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
5037 V<Simd256> Simd128LoadPairDeinterleave(
5038 V<WordPtr> base, V<WordPtr> index, LoadOp::Kind load_kind,
5039 Simd128LoadPairDeinterleaveOp::Kind kind) {
5040 return ReduceIfReachableSimd128LoadPairDeinterleave(base, index, load_kind,
5041 kind);
5042 }
5043#endif // V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
5044
5045 // SIMD256
5046#if V8_ENABLE_WASM_SIMD256_REVEC
5047 V<Simd256> Simd256Constant(const uint8_t value[kSimd256Size]) {
5048 return ReduceIfReachableSimd256Constant(value);
5049 }
5050
5051 OpIndex Simd256Extract128Lane(V<Simd256> source, uint8_t lane) {
5052 return ReduceIfReachableSimd256Extract128Lane(source, lane);
5053 }
5054
5055 V<Simd256> Simd256LoadTransform(
5056 V<WordPtr> base, V<WordPtr> index,
5057 Simd256LoadTransformOp::LoadKind load_kind,
5058 Simd256LoadTransformOp::TransformKind transform_kind, int offset) {
5059 return ReduceIfReachableSimd256LoadTransform(base, index, load_kind,
5060 transform_kind, offset);
5061 }
5062
5063 V<Simd256> Simd256Unary(V<Simd256> input, Simd256UnaryOp::Kind kind) {
5064 return ReduceIfReachableSimd256Unary(input, kind);
5065 }
5066
5067 V<Simd256> Simd256Unary(V<Simd128> input, Simd256UnaryOp::Kind kind) {
5068 DCHECK_GE(kind, Simd256UnaryOp::Kind::kFirstSignExtensionOp);
5069 DCHECK_LE(kind, Simd256UnaryOp::Kind::kLastSignExtensionOp);
5070 return ReduceIfReachableSimd256Unary(input, kind);
5071 }
5072
5073 V<Simd256> Simd256Binop(V<Simd256> left, V<Simd256> right,
5074 Simd256BinopOp::Kind kind) {
5075 return ReduceIfReachableSimd256Binop(left, right, kind);
5076 }
5077
5078 V<Simd256> Simd256Binop(V<Simd128> left, V<Simd128> right,
5079 Simd256BinopOp::Kind kind) {
5080 DCHECK_GE(kind, Simd256BinopOp::Kind::kFirstSignExtensionOp);
5081 DCHECK_LE(kind, Simd256BinopOp::Kind::kLastSignExtensionOp);
5082 return ReduceIfReachableSimd256Binop(left, right, kind);
5083 }
5084
5085 V<Simd256> Simd256Shift(V<Simd256> input, V<Word32> shift,
5086 Simd256ShiftOp::Kind kind) {
5087 return ReduceIfReachableSimd256Shift(input, shift, kind);
5088 }
5089
5090 V<Simd256> Simd256Ternary(V<Simd256> first, V<Simd256> second,
5091 V<Simd256> third, Simd256TernaryOp::Kind kind) {
5092 return ReduceIfReachableSimd256Ternary(first, second, third, kind);
5093 }
5094
5095 V<Simd256> Simd256Splat(OpIndex input, Simd256SplatOp::Kind kind) {
5096 return ReduceIfReachableSimd256Splat(input, kind);
5097 }
5098
5099 V<Simd256> SimdPack128To256(V<Simd128> left, V<Simd128> right) {
5100 return ReduceIfReachableSimdPack128To256(left, right);
5101 }
5102
5103#ifdef V8_TARGET_ARCH_X64
5104 V<Simd256> Simd256Shufd(V<Simd256> input, const uint8_t control) {
5105 return ReduceIfReachableSimd256Shufd(input, control);
5106 }
5107
5108 V<Simd256> Simd256Shufps(V<Simd256> left, V<Simd256> right,
5109 const uint8_t control) {
5110 return ReduceIfReachableSimd256Shufps(left, right, control);
5111 }
5112
5113 V<Simd256> Simd256Unpack(V<Simd256> left, V<Simd256> right,
5114 Simd256UnpackOp::Kind kind) {
5115 return ReduceIfReachableSimd256Unpack(left, right, kind);
5116 }
5117#endif // V8_TARGET_ARCH_X64
5118#endif // V8_ENABLE_WASM_SIMD256_REVEC
5119
5120 V<WasmTrustedInstanceData> WasmInstanceDataParameter() {
5123 }
5124
5125 OpIndex LoadStackPointer() { return ReduceIfReachableLoadStackPointer(); }
5126
5127 void SetStackPointer(V<WordPtr> value) {
5128 ReduceIfReachableSetStackPointer(value);
5129 }
5130#endif // V8_ENABLE_WEBASSEMBLY
5131
5132#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
5133 V<Object> GetContinuationPreservedEmbedderData() {
5134 return ReduceIfReachableGetContinuationPreservedEmbedderData();
5135 }
5136
5137 void SetContinuationPreservedEmbedderData(V<Object> data) {
5138 ReduceIfReachableSetContinuationPreservedEmbedderData(data);
5139 }
5140#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
5141
5142 template <typename Rep>
5144 return v;
5145 }
5147 return v.is_constant() ? Word32Constant(v.constant_value()) : v.value();
5148 }
5150 return v.is_constant() ? Word64Constant(v.constant_value()) : v.value();
5151 }
5153 return v.is_constant() ? Float32Constant(v.constant_value()) : v.value();
5154 }
5156 return v.is_constant() ? Float64Constant(v.constant_value()) : v.value();
5157 }
5158
5159 private:
5160#ifdef DEBUG
5161#define REDUCE_OP(Op) \
5162 template <class... Args> \
5163 V8_INLINE OpIndex ReduceIfReachable##Op(Args... args) { \
5164 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) { \
5165 DCHECK(Asm().conceptually_in_a_block()); \
5166 return OpIndex::Invalid(); \
5167 } \
5168 OpIndex result = Asm().Reduce##Op(args...); \
5169 if constexpr (!IsBlockTerminator(Opcode::k##Op)) { \
5170 if (Asm().current_block() == nullptr) { \
5171 /* The input operation was not a block terminator, but a reducer \
5172 * lowered it into a block terminator. */ \
5173 Asm().set_conceptually_in_a_block(true); \
5174 } \
5175 } \
5176 return result; \
5177 }
5178#else
5179#define REDUCE_OP(Op) \
5180 template <class... Args> \
5181 V8_INLINE OpIndex ReduceIfReachable##Op(Args... args) { \
5182 if (V8_UNLIKELY(Asm().generating_unreachable_operations())) { \
5183 return OpIndex::Invalid(); \
5184 } \
5185 /* With an empty reducer stack, `Asm().Reduce##Op` will just create a */ \
5186 /* new `Op` operation (defined in operations.h). To figure out where */ \
5187 /* this operation is lowered or optimized (if anywhere), search for */ \
5188 /* `REDUCE(<your operation>)`. Then, to know when this lowering */ \
5189 /* actually happens, search for phases that are instantiated with */ \
5190 /* that reducer. You can also look in operation.h where the opcode is */ \
5191 /* declared: operations declared in */ \
5192 /* TURBOSHAFT_SIMPLIFIED_OPERATION_LIST are typically lowered in */ \
5193 /* machine-lowering-reducer-inl.h, and operations in */ \
5194 /* TURBOSHAFT_MACHINE_OPERATION_LIST are typically not lowered before */ \
5195 /* reaching instruction-selector.h. */ \
5196 return Asm().Reduce##Op(args...); \
5197 }
5198#endif
5200#undef REDUCE_OP
5201
5202 // LoadArrayBufferElement and LoadNonArrayBufferElement should be called
5203 // instead of LoadElement.
5204 template <typename T = Any, typename Base>
5205 V<T> LoadElement(V<Base> object, const ElementAccess& access,
5206 V<WordPtr> index, bool is_array_buffer) {
5207 if constexpr (is_taggable_v<Base>) {
5208 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
5209 } else {
5210 static_assert(std::is_same_v<Base, WordPtr>);
5211 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase);
5212 }
5213 LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
5214 if (is_array_buffer) kind = kind.NotLoadEliminable();
5216 MemoryRepresentation::FromMachineType(access.machine_type);
5217 return Load(object, index, kind, rep, access.header_size,
5218 rep.SizeInBytesLog2());
5219 }
5220
5221 // StoreArrayBufferElement and StoreNonArrayBufferElement should be called
5222 // instead of StoreElement.
5223 template <typename Base>
5224 void StoreElement(V<Base> object, const ElementAccess& access,
5225 ConstOrV<WordPtr> index, V<Any> value,
5226 bool is_array_buffer) {
5227 if constexpr (is_taggable_v<Base>) {
5228 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kTaggedBase);
5229 } else {
5230 static_assert(std::is_same_v<Base, WordPtr>);
5231 DCHECK_EQ(access.base_is_tagged, BaseTaggedness::kUntaggedBase);
5232 }
5233 LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
5234 if (is_array_buffer) kind = kind.NotLoadEliminable();
5236 MemoryRepresentation::FromMachineType(access.machine_type);
5237 Store(object, resolve(index), value, kind, rep, access.write_barrier_kind,
5238 access.header_size, rep.SizeInBytesLog2());
5239 }
5240
5241 // BranchAndBind should be called from GotoIf/GotoIfNot. It will insert a
5242 // Branch, bind {to_bind} (which should correspond to the implicit new block
5243 // following the GotoIf/GotoIfNot) and return a ConditionalGotoStatus
5244 // representing whether the destinations of the Branch are reachable or not.
5246 Block* if_false, BranchHint hint,
5247 Block* to_bind) {
5248 DCHECK_EQ(to_bind, any_of(if_true, if_false));
5249 Block* other = to_bind == if_true ? if_false : if_true;
5250 Block* to_bind_last_pred = to_bind->LastPredecessor();
5251 Block* other_last_pred = other->LastPredecessor();
5252 Asm().Branch(condition, if_true, if_false, hint);
5253 bool to_bind_reachable = to_bind_last_pred != to_bind->LastPredecessor();
5254 bool other_reachable = other_last_pred != other->LastPredecessor();
5255 ConditionalGotoStatus status = static_cast<ConditionalGotoStatus>(
5256 static_cast<int>(other_reachable) | ((to_bind_reachable) << 1));
5257 bool bind_status = Asm().Bind(to_bind);
5258 DCHECK_EQ(bind_status, to_bind_reachable);
5259 USE(bind_status);
5260 return status;
5261 }
5262
5264 // [0] contains the stub with exit frame.
5267
5269};
5270
5271// Some members of Assembler that are used in the constructors of the stack are
5272// extracted to the AssemblerData class, so that they can be initialized before
5273// the rest of the stack, and thus don't need to be passed as argument to all of
5274// the constructors of the stack.
5276 // TODO(dmercadier): consider removing input_graph from this, and only having
5277 // it in GraphVisitor for Stacks that have it.
5288};
5289
5290template <class Reducers>
5291class Assembler : public AssemblerData,
5292 public ReducerStack<Reducers>::type,
5293 public TurboshaftAssemblerOpInterface<Assembler<Reducers>> {
5295 using node_t = typename Stack::node_t;
5296
5297 public:
5303
5304 using Stack::Asm;
5305
5310 Zone* graph_zone() const { return output_graph().graph_zone(); }
5311
5312 // When analyzers detect that an operation is dead, they replace its opcode by
5313 // kDead in-place, and thus need to have a non-const input graph.
5315
5317 Block* NewBlock() { return this->output_graph().NewBlock(); }
5318
5319// This condition is true for any compiler except GCC.
5320#if defined(__clang__) || !defined(V8_CC_GNU)
5321 V8_INLINE
5322#endif
5323 bool Bind(Block* block) {
5324#ifdef DEBUG
5325 set_conceptually_in_a_block(true);
5326#endif
5327
5328 if (block->IsLoop() && block->single_loop_predecessor()) {
5329 // {block} is a loop header that had multiple incoming forward edges, and
5330 // for which we've created a "single_predecessor" block. We bind it now,
5331 // and insert a single Goto to the original loop header.
5332 BindReachable(block->single_loop_predecessor());
5333 // We need to go through a raw Emit because calling this->Goto would go
5334 // through AddPredecessor and SplitEdge, which would wrongly try to
5335 // prevent adding more predecessors to the loop header.
5336 this->template Emit<GotoOp>(block, /*is_backedge*/ false);
5337 }
5338
5339 if (!this->output_graph().Add(block)) {
5340 return false;
5341 }
5344 Stack::Bind(block);
5345 return true;
5346 }
5347
5348 // TODO(nicohartmann@): Remove this.
5350 bool bound = Bind(block);
5351 DCHECK(bound);
5352 USE(bound);
5353 }
5354
5355 // Every loop should be finalized once, after it is certain that no backedge
5356 // can be added anymore.
5357 void FinalizeLoop(Block* loop_header) {
5358 if (loop_header->IsLoop() && loop_header->PredecessorCount() == 1) {
5359 this->output_graph().TurnLoopIntoMerge(loop_header);
5360 }
5361 }
5362
5363 void SetCurrentOrigin(OpIndex operation_origin) {
5364 current_operation_origin_ = operation_origin;
5365 }
5366
5367#ifdef DEBUG
5368 void set_conceptually_in_a_block(bool value) {
5370 }
5371 bool conceptually_in_a_block() { return conceptually_in_a_block_; }
5372#endif
5373
5376 return current_block() == nullptr;
5377 }
5381
5382 const Operation& Get(OpIndex op_idx) const {
5383 return this->output_graph().Get(op_idx);
5384 }
5385
5387 // CatchScope should be used in most cases to set the current catch block, but
5388 // this is sometimes impractical.
5390
5391#ifdef DEBUG
5392 int& intermediate_tracing_depth() { return intermediate_tracing_depth_; }
5393#endif
5394
5395 // ReduceProjection eliminates projections to tuples and returns the
5396 // corresponding tuple input instead. We do this at the top of the stack to
5397 // avoid passing this Projection around needlessly. This is in particular
5398 // important to ValueNumberingReducer, which assumes that it's at the bottom
5399 // of the stack, and that the BaseReducer will actually emit an Operation. If
5400 // we put this projection-to-tuple-simplification in the BaseReducer, then
5401 // this assumption of the ValueNumberingReducer will break.
5402 V<Any> ReduceProjection(V<Any> tuple, uint16_t index,
5404 if (auto* tuple_op = Asm().matcher().template TryCast<TupleOp>(tuple)) {
5405 return tuple_op->input(index);
5406 }
5407 return Stack::ReduceProjection(tuple, index, rep);
5408 }
5409
5410 // Adds {source} to the predecessors of {destination}.
5411 void AddPredecessor(Block* source, Block* destination, bool branch) {
5412 DCHECK_IMPLIES(branch, source->EndsWithBranchingOp(this->output_graph()));
5413 if (destination->LastPredecessor() == nullptr) {
5414 // {destination} has currently no predecessors.
5415 DCHECK(destination->IsLoopOrMerge());
5416 if (branch && destination->IsLoop()) {
5417 // We always split Branch edges that go to loop headers.
5418 SplitEdge(source, destination);
5419 } else {
5420 destination->AddPredecessor(source);
5421 if (branch) {
5422 DCHECK(!destination->IsLoop());
5424 }
5425 }
5426 return;
5427 } else if (destination->IsBranchTarget()) {
5428 // {destination} used to be a BranchTarget, but branch targets can only
5429 // have one predecessor. We'll thus split its (single) incoming edge, and
5430 // change its type to kMerge.
5431 DCHECK_EQ(destination->PredecessorCount(), 1);
5435 // We have to split `pred` first to preserve order of predecessors.
5436 SplitEdge(pred, destination);
5437 if (branch) {
5438 // A branch always goes to a BranchTarget. We thus split the edge: we'll
5439 // insert a new Block, to which {source} will branch, and which will
5440 // "Goto" to {destination}.
5441 SplitEdge(source, destination);
5442 } else {
5443 // {destination} is a Merge, and {source} just does a Goto; nothing
5444 // special to do.
5445 destination->AddPredecessor(source);
5446 }
5447 return;
5448 }
5449
5450 DCHECK(destination->IsLoopOrMerge());
5451
5452 if (destination->IsLoop() && !destination->IsBound()) {
5453 DCHECK(!branch);
5454 DCHECK_EQ(destination->PredecessorCount(), 1);
5455 // We are trying to add an additional forward edge to this loop, which is
5456 // not allowed (all loops in Turboshaft should have exactly one incoming
5457 // forward edge). Instead, we'll create a new predecessor for the loop,
5458 // where all previous and future forward predecessors will be routed to.
5459 Block* single_predecessor =
5463 AddLoopPredecessor(single_predecessor, source);
5464 return;
5465 }
5466
5467 if (branch) {
5468 // A branch always goes to a BranchTarget. We thus split the edge: we'll
5469 // insert a new Block, to which {source} will branch, and which will
5470 // "Goto" to {destination}.
5471 SplitEdge(source, destination);
5472 } else {
5473 // {destination} is a Merge, and {source} just does a Goto; nothing
5474 // special to do.
5475 destination->AddPredecessor(source);
5476 }
5477 }
5478
5479 private:
5482 current_block_ = nullptr;
5483#ifdef DEBUG
5484 set_conceptually_in_a_block(false);
5485#endif
5486 }
5487
5489 DCHECK(loop_header->IsLoop());
5490 DCHECK(!loop_header->IsBound());
5491 DCHECK_EQ(loop_header->PredecessorCount(), 1);
5492
5493 Block* old_predecessor = loop_header->LastPredecessor();
5494 // Because we always split edges going to loop headers, we know that
5495 // {predecessor} ends with a Goto.
5496 GotoOp& old_predecessor_goto =
5497 old_predecessor->LastOperation(this->output_graph())
5498 .template Cast<GotoOp>();
5499
5500 Block* single_loop_predecessor = NewBlock();
5501 single_loop_predecessor->SetKind(Block::Kind::kMerge);
5502 single_loop_predecessor->SetOrigin(loop_header->OriginForLoopHeader());
5503
5504 // Re-routing the final Goto of {old_predecessor} to go to
5505 // {single_predecessor} instead of {loop_header}.
5506 single_loop_predecessor->AddPredecessor(old_predecessor);
5507 old_predecessor_goto.destination = single_loop_predecessor;
5508
5509 // Resetting the predecessors of {loop_header}: it will now have a single
5510 // predecessor, {old_predecessor}, which isn't bound yet. (and which will be
5511 // bound automatically in Bind)
5512 loop_header->ResetAllPredecessors();
5513 loop_header->AddPredecessor(single_loop_predecessor);
5514 loop_header->SetSingleLoopPredecessor(single_loop_predecessor);
5515
5516 return single_loop_predecessor;
5517 }
5518
5519 void AddLoopPredecessor(Block* single_predecessor, Block* new_predecessor) {
5520 GotoOp& new_predecessor_goto =
5521 new_predecessor->LastOperation(this->output_graph())
5522 .template Cast<GotoOp>();
5523 new_predecessor_goto.destination = single_predecessor;
5524 single_predecessor->AddPredecessor(new_predecessor);
5525 }
5526
5527 // Insert a new Block between {source} and {destination}, in order to maintain
5528 // the split-edge form.
5530 DCHECK(source->EndsWithBranchingOp(this->output_graph()));
5531 // Creating the new intermediate block
5532 Block* intermediate_block = NewBlock();
5533 intermediate_block->SetKind(Block::Kind::kBranchTarget);
5534 // Updating "predecessor" edge of {intermediate_block}. This needs to be
5535 // done before calling Bind, because otherwise Bind will think that this
5536 // block is not reachable.
5537 intermediate_block->AddPredecessor(source);
5538
5539 // Updating {source}'s last Branch/Switch/CheckException. Note that
5540 // this must be done before Binding {intermediate_block}, otherwise,
5541 // Reducer::Bind methods will see an invalid block being bound (because its
5542 // predecessor would be a branch, but none of its targets would be the block
5543 // being bound).
5544 Operation& op = this->output_graph().Get(
5545 this->output_graph().PreviousIndex(source->end()));
5546 switch (op.opcode) {
5547 case Opcode::kBranch: {
5548 BranchOp& branch = op.Cast<BranchOp>();
5549 if (branch.if_true == destination) {
5550 branch.if_true = intermediate_block;
5551 // We enforce that Branches if_false and if_true can never be the same
5552 // (there is a DCHECK in Assembler::Branch enforcing that).
5554 } else {
5556 branch.if_false = intermediate_block;
5557 }
5558 break;
5559 }
5560 case Opcode::kCheckException: {
5561 CheckExceptionOp& catch_exception_op = op.Cast<CheckExceptionOp>();
5562 if (catch_exception_op.didnt_throw_block == destination) {
5563 catch_exception_op.didnt_throw_block = intermediate_block;
5564 // We assume that CheckException's successor and catch_block
5565 // can never be the same (there is a DCHECK in
5566 // CheckExceptionOp::Validate enforcing that).
5567 DCHECK_NE(catch_exception_op.catch_block, destination);
5568 } else {
5569 DCHECK_EQ(catch_exception_op.catch_block, destination);
5570 catch_exception_op.catch_block = intermediate_block;
5571 // A catch block always has to start with a `CatchBlockBeginOp`.
5572 BindReachable(intermediate_block);
5573 intermediate_block->SetOrigin(source->OriginForBlockEnd());
5574 this->CatchBlockBegin();
5575 this->Goto(destination);
5576 return;
5577 }
5578 break;
5579 }
5580 case Opcode::kSwitch: {
5581 SwitchOp& switch_op = op.Cast<SwitchOp>();
5582 bool found = false;
5583 for (auto& case_block : switch_op.cases) {
5584 if (case_block.destination == destination) {
5585 case_block.destination = intermediate_block;
5586 DCHECK(!found);
5587 found = true;
5588#ifndef DEBUG
5589 break;
5590#endif
5591 }
5592 }
5593 DCHECK_IMPLIES(found, switch_op.default_case != destination);
5594 if (!found) {
5596 switch_op.default_case = intermediate_block;
5597 }
5598 break;
5599 }
5600
5601 default:
5602 UNREACHABLE();
5603 }
5604
5605 BindReachable(intermediate_block);
5606 intermediate_block->SetOrigin(source->OriginForBlockEnd());
5607 // Inserting a Goto in {intermediate_block} to {destination}. This will
5608 // create the edge from {intermediate_block} to {destination}. Note that
5609 // this will call AddPredecessor, but we've already removed the possible
5610 // edge of {destination} that need splitting, so no risks of infinite
5611 // recursion here.
5612 this->Goto(destination);
5613 }
5614
5617
5618 // `current_block_` is nullptr after emitting a block terminator and before
5619 // Binding the next block. During this time, emitting an operation doesn't do
5620 // anything (because in which block would it be emitted?). However, we also
5621 // want to prevent silently skipping operations because of a missing Bind.
5622 // Consider for instance a lowering that would do:
5623 //
5624 // __ Add(x, y)
5625 // __ Goto(B)
5626 // __ Add(i, j)
5627 //
5628 // The 2nd Add is unreachable, but this has to be a mistake, since we exitted
5629 // the current block before emitting it, and forgot to Bind a new block.
5630 // On the other hand, consider this:
5631 //
5632 // __ Add(x, y)
5633 // __ Goto(B1)
5634 // __ Bind(B2)
5635 // __ Add(i, j)
5636 //
5637 // It's possible that B2 is not reachable, in which case `Bind(B2)` will set
5638 // the current_block to nullptr.
5639 // Similarly, consider:
5640 //
5641 // __ Add(x, y)
5642 // __ DeoptimizeIf(cond)
5643 // __ Add(i, j)
5644 //
5645 // It's possible that a reducer lowers the `DeoptimizeIf` to an unconditional
5646 // `Deoptimize`.
5647 //
5648 // The 1st case should produce an error (because a Bind was forgotten), but
5649 // the 2nd and 3rd case should not.
5650 //
5651 // The way we achieve this is with the following `conceptually_in_a_block_`
5652 // boolean:
5653 // - when Binding a block (successfully or not), we set
5654 // `conceptually_in_a_block_` to true.
5655 // - when exiting a block (= emitting a block terminator), we set
5656 // `conceptually_in_a_block_` to false.
5657 // - after the AssemblerOpInterface lowers a non-block-terminator which
5658 // makes the current_block_ become nullptr (= the last operation of its
5659 // lowering became a block terminator), we set `conceptually_in_a_block_` to
5660 // true (overriding the "false" that was set when emitting the block
5661 // terminator).
5662 //
5663 // Note that there is one category of errors that this doesn't prevent: if a
5664 // lowering of a non-block terminator creates new control flow and forgets a
5665 // final Bind, we'll set `conceptually_in_a_block_` to true and assume that
5666 // this lowering unconditionally exits the control flow. However, it's hard to
5667 // distinguish between lowerings that voluntarily end with block terminators,
5668 // and those who forgot a Bind.
5670
5671 // TODO(dmercadier,tebbi): remove {current_operation_origin_} and pass instead
5672 // additional parameters to ReduceXXX methods.
5674
5675#ifdef DEBUG
5676 int intermediate_tracing_depth_ = 0;
5677#endif
5678
5679 template <class Next>
5680 friend class TSReducerBase;
5681 template <class AssemblerT>
5682 friend class CatchScopeImpl;
5683};
5684
5685template <class AssemblerT>
5687 public:
5688 CatchScopeImpl(AssemblerT& assembler, Block* catch_block)
5689 : assembler_(assembler),
5690 previous_catch_block_(assembler.current_catch_block_) {
5691 assembler_.current_catch_block_ = catch_block;
5692#ifdef DEBUG
5693 this->catch_block = catch_block;
5694#endif
5695 }
5696
5698 DCHECK_EQ(assembler_.current_catch_block_, catch_block);
5699 assembler_.current_catch_block_ = previous_catch_block_;
5700 }
5701
5706
5707 private:
5708 AssemblerT& assembler_;
5710#ifdef DEBUG
5711 Block* catch_block = nullptr;
5712#endif
5713
5714 template <class Reducers>
5715 friend class Assembler;
5716};
5717
5718template <template <class> class... Reducers>
5719class TSAssembler : public Assembler<reducer_list<Reducers..., TSReducerBase>> {
5720 public:
5721 using Assembler<reducer_list<Reducers..., TSReducerBase>>::Assembler;
5722};
5723
5725
5726} // namespace v8::internal::compiler::turboshaft
5727
5728#endif // V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_
#define T
#define REDUCE(operation)
#define IF_NOT(...)
#define LIKELY(...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
int default_case
static constexpr SourceLocation Current()
constexpr T * data() const
Definition vector.h:100
static V8_EXPORT_PRIVATE Callable CallableFor(Isolate *isolate, Builtin builtin)
Definition builtins.cc:214
Handle< Code > code() const
Definition callable.h:22
CallInterfaceDescriptor descriptor() const
Definition callable.h:23
static Handle< Code > CEntry(Isolate *isolate, int result_size=1, ArgvMode argv_mode=ArgvMode::kStack, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kNoContext
Definition contexts.h:577
static ExternalReference Create(const SCTableReference &table_ref)
static const int kTrustedPointerTableBasePointerOffset
static constexpr int root_slot_offset(RootIndex root_index)
constexpr bool IsMapWord() const
static constexpr MachineType TaggedPointer()
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static constexpr Tagged< Smi > FromEnum(E value)
Definition smi.h:58
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static ThreadId Current()
Definition thread-id.h:32
T * New(Args &&... args)
Definition zone.h:114
static FieldAccess ForJSArrayBufferViewBuffer()
static FieldAccess ForMap(WriteBarrierKind write_barrier=kMapWriteBarrier)
static FieldAccess ForJSArrayBufferBitField()
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
const Operation & Get(OpIndex op_idx) const
Definition assembler.h:5382
void AddPredecessor(Block *source, Block *destination, bool branch)
Definition assembler.h:5411
Block * CreateSinglePredecessorForLoop(Block *loop_header)
Definition assembler.h:5488
void SetCurrentOrigin(OpIndex operation_origin)
Definition assembler.h:5363
typename ReducerStack< Reducers >::type Stack
Definition assembler.h:5294
V< AnyOrNone > current_operation_origin() const
Definition assembler.h:5378
void AddLoopPredecessor(Block *single_predecessor, Block *new_predecessor)
Definition assembler.h:5519
void SplitEdge(Block *source, Block *destination)
Definition assembler.h:5529
V8_INLINE void BindReachable(Block *block)
Definition assembler.h:5349
Assembler(PipelineData *data, Graph &input_graph, Graph &output_graph, Zone *phase_zone)
Definition assembler.h:5298
V< Any > ReduceProjection(V< Any > tuple, uint16_t index, RegisterRepresentation rep)
Definition assembler.h:5402
base::SmallVector< Block *, 8 > Predecessors() const
Definition graph.h:328
bool Contains(OpIndex op_idx) const
Definition graph.h:322
const Operation & LastOperation(const Graph &graph) const
Definition graph.h:1242
void SetOrigin(const Block *origin)
Definition graph.h:420
const Block * OriginForLoopHeader() const
Definition graph.h:432
void SetSingleLoopPredecessor(Block *single_loop_predecessor)
Definition graph.h:410
void AddPredecessor(Block *predecessor)
Definition graph.h:532
CatchScopeImpl & operator=(const CatchScopeImpl &)=delete
CatchScopeImpl(const CatchScopeImpl &)=delete
CatchScopeImpl & operator=(CatchScopeImpl &&)=delete
CatchScopeImpl(AssemblerT &assembler, Block *catch_block)
Definition assembler.h:5688
ConditionWithHint(V< Word32 > condition, BranchHint hint=BranchHint::kNone)
Definition assembler.h:334
ConditionWithHint(T condition, BranchHint hint=BranchHint::kNone)
Definition assembler.h:340
V< Any > WrapInTupleIfNeeded(const Op &op, V< Any > idx)
Definition assembler.h:952
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
void ControlFlowHelper_GotoIf(ConditionWithHint condition, L &label, const typename L::const_or_values_t &values)
Definition assembler.h:1460
bool ControlFlowHelper_BindIf(ConditionWithHint condition, ControlFlowHelper_IfState *state)
Definition assembler.h:1481
void ControlFlowHelper_Goto(L &label, const typename L::const_or_values_t &values)
Definition assembler.h:1453
bool ControlFlowHelper_BindIfNot(ConditionWithHint condition, ControlFlowHelper_IfState *state)
Definition assembler.h:1490
auto ControlFlowHelper_BindLoop(L &label) -> base::prepend_tuple_type< bool, typename L::values_t >
Definition assembler.h:1363
void ControlFlowHelper_EndIf(ControlFlowHelper_IfState *state)
Definition assembler.h:1510
void ControlFlowHelper_FinishIfBlock(ControlFlowHelper_IfState *state)
Definition assembler.h:1505
void ControlFlowHelper_EndForeachLoop(It iterable, LoopLabelFor< typename It::iterator_type > &header_label, Label<> &exit_label, typename It::iterator_type current_iterator)
Definition assembler.h:1419
void ControlFlowHelper_EndWhileLoop(L1 &header_label, L2 &exit_label)
Definition assembler.h:1444
bool ControlFlowHelper_BindElse(ControlFlowHelper_IfState *state)
Definition assembler.h:1499
auto ControlFlowHelper_Bind(L &label) -> base::prepend_tuple_type< bool, typename L::values_t >
Definition assembler.h:1355
std::tuple< bool, LoopLabel<>, Label<> > ControlFlowHelper_While(std::function< V< Word32 >()> cond_builder)
Definition assembler.h:1429
void ControlFlowHelper_GotoIfNot(ConditionWithHint condition, L &label, const typename L::const_or_values_t &values)
Definition assembler.h:1468
void RemoveLast(OpIndex index_of_last_operation)
Definition assembler.h:1104
V< None > REDUCE Branch(V< Word32 > condition, Block *if_true, Block *if_false, BranchHint hint)
Definition assembler.h:1163
OpIndex REDUCE FastApiCall(V< FrameState > frame_state, V< Object > data_argument, V< Context > context, base::Vector< const OpIndex > arguments, const FastApiCallParameters *parameters, base::Vector< const RegisterRepresentation > out_reps)
Definition assembler.h:1246
V< None > REDUCE CheckException(V< Any > throwing_operation, Block *successor, Block *catch_block)
Definition assembler.h:1281
bool CatchIfInCatchScope(OpIndex throwing_operation)
Definition assembler.h:1294
OpIndex REDUCE Phi(base::Vector< const OpIndex > inputs, RegisterRepresentation rep)
Definition assembler.h:1138
void FixLoopPhi(const PhiOp &input_phi, OpIndex output_index, Block *output_graph_loop)
Definition assembler.h:1108
OpIndex REDUCE PendingLoopPhi(OpIndex first, RegisterRepresentation rep)
Definition assembler.h:1145
V< None > REDUCE Switch(V< Word32 > input, base::Vector< SwitchOp::Case > cases, Block *default_case, BranchHint default_hint)
Definition assembler.h:1204
V< None > REDUCE Goto(Block *destination, bool is_backedge)
Definition assembler.h:1150
V8_INLINE Block * NewLoopHeader(const Block *origin=nullptr)
Definition graph.h:763
V8_INLINE Block * NewBlock(const Block *origin=nullptr)
Definition graph.h:766
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
LabelBase(LabelBase &&other) V8_NOEXCEPT
Definition assembler.h:462
void GotoIfNot(A &assembler, OpIndex condition, BranchHint hint, const values_t &values)
Definition assembler.h:427
std::tuple< base::SmallVector< V< Ts >, 2 >... > recorded_values_t
Definition assembler.h:397
base::prepend_tuple_type< bool, values_t > Bind(A &assembler)
Definition assembler.h:440
void Goto(A &assembler, const values_t &values)
Definition assembler.h:404
static void RecordValues(Block *source, BlockData &data, const values_t &values)
Definition assembler.h:466
std::tuple< maybe_const_or_v_t< Ts >... > const_or_values_t
Definition assembler.h:396
static values_t MaterializePhisImpl(A &assembler, BlockData &data, std::index_sequence< indices... >)
Definition assembler.h:505
LabelBase & operator=(const LabelBase &)=delete
static void RecordValuesImpl(BlockData &data, Block *source, const values_t &values, std::index_sequence< indices... >)
Definition assembler.h:478
void GotoIf(A &assembler, OpIndex condition, BranchHint hint, const values_t &values)
Definition assembler.h:414
Label(Label &&other) V8_NOEXCEPT
Definition assembler.h:540
LabelBase< false, Ts... > super
Definition assembler.h:531
Label & operator=(const Label &)=delete
void GotoIf(A &assembler, OpIndex condition, BranchHint hint, const values_t &values)
Definition assembler.h:584
base::prepend_tuple_type< bool, values_t > Bind(A &assembler)
Definition assembler.h:626
LoopLabel & operator=(const LoopLabel &)=delete
void FixLoopPhis(A &assembler, const typename super::values_t &values)
Definition assembler.h:687
static values_t MaterializeLoopPhisImpl(A &assembler, BlockData &data, std::index_sequence< indices... >)
Definition assembler.h:672
void Goto(A &assembler, const values_t &values)
Definition assembler.h:566
static values_t MaterializeLoopPhis(A &assembler, BlockData &data)
Definition assembler.h:666
base::prepend_tuple_type< bool, values_t > BindLoop(A &assembler)
Definition assembler.h:632
std::optional< values_t > pending_loop_phis_
Definition assembler.h:715
void FixLoopPhi(A &assembler, const typename super::values_t &values)
Definition assembler.h:696
void GotoIfNot(A &assembler, OpIndex condition, BranchHint hint, const values_t &values)
Definition assembler.h:605
LoopLabel(LoopLabel &&other) V8_NOEXCEPT
Definition assembler.h:558
static MemoryRepresentation FromMachineType(MachineType type)
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation UintPtr()
static constexpr MemoryRepresentation Float64()
static constexpr OpIndex Invalid()
Definition index.h:88
constexpr uint32_t id() const
Definition index.h:61
ZoneWithName< kCompilationZoneName > & compilation_zone()
Definition phase.h:396
Derived * GetCommonDominator(RandomAccessStackDominatorNode< Derived > *other) const
Definition graph.h:1341
iterator_type Advance(A &assembler, iterator_type current_iterator) const
Definition assembler.h:156
OptionalV< Word32 > IsEnd(A &assembler, iterator_type current_iterator) const
Definition assembler.h:144
Range(ConstOrV< T > begin, ConstOrV< T > end, ConstOrV< T > stride=1)
Definition assembler.h:135
value_type Dereference(A &assembler, iterator_type current_iterator) const
Definition assembler.h:166
iterator_type Begin(A &assembler) const
Definition assembler.h:139
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Tagged()
OptionalV< Word32 > IsEnd(A &, iterator_type) const
Definition assembler.h:237
Sequence(ConstOrV< T > begin, ConstOrV< T > stride=1)
Definition assembler.h:229
void DebugPrint(OpIndex input, RegisterRepresentation rep)
Definition assembler.h:4452
OpIndex LoadOffHeap(OpIndex address, MemoryRepresentation rep)
Definition assembler.h:2745
V< Object > CallBuiltin_GrowFastSmiOrObjectElements(Isolate *isolate, V< Object > object, V< Smi > size)
Definition assembler.h:3591
V< JSReceiver > CallBuiltin_ToObject(Isolate *isolate, V< Context > context, V< JSPrimitive > object)
Definition assembler.h:3698
V< Float64 > Float64Unary(V< Float64 > input, FloatUnaryOp::Kind kind)
Definition assembler.h:1888
void StoreFixedArrayElement(V< FixedArray > array, int index, V< Object > value, compiler::WriteBarrierKind write_barrier)
Definition assembler.h:3058
V< WordPtr > LoadExternalPointerFromObject(V< Object > object, int offset, ExternalPointerTag tag)
Definition assembler.h:2817
void TailCall(V< CallTarget > callee, base::Vector< const OpIndex > arguments, const TSCallDescriptor *descriptor)
Definition assembler.h:4025
void Branch(V< Word32 > condition, Block *if_true, Block *if_false, BranchHint hint=BranchHint::kNone)
Definition assembler.h:4338
V< FixedArray > NewArgumentsElements(V< Smi > arguments_count, CreateArgumentsType type, int formal_parameter_count)
Definition assembler.h:4609
V< Word32 > StringAt(V< String > string, V< WordPtr > position, StringAtOp::Kind kind)
Definition assembler.h:4536
void CheckMaps(V< HeapObject > heap_object, V< turboshaft::FrameState > frame_state, OptionalV< Map > map, const ZoneRefSet< Map > &maps, CheckMapsFlags flags, const FeedbackSource &feedback)
Definition assembler.h:4669
V< Word32 > CheckedSmiUntag(V< Object > object, V< turboshaft::FrameState > frame_state, const FeedbackSource &feedback)
Definition assembler.h:2213
void InitializeField(Uninitialized< T > &object, const FieldAccess &access, V< Any > value)
Definition assembler.h:3017
OpIndex TaggedBitcast(OpIndex input, RegisterRepresentation from, RegisterRepresentation to, TaggedBitcastOp::Kind kind)
Definition assembler.h:2051
OpIndex AtomicRMW(V< WordPtr > base, V< WordPtr > index, OpIndex value, AtomicRMWOp::BinOp bin_op, RegisterRepresentation in_out_rep, MemoryRepresentation memory_rep, MemoryAccessKind memory_access_kind)
Definition assembler.h:2664
OpIndex AtomicWord32Pair(V< WordPtr > base, OptionalV< WordPtr > index, OptionalV< Word32 > value_low, OptionalV< Word32 > value_high, OptionalV< Word32 > expected_low, OptionalV< Word32 > expected_high, AtomicWord32PairOp::Kind op_kind, int32_t offset)
Definition assembler.h:2685
V< Object > CallBuiltin_CallForwardVarargs(Isolate *isolate, Zone *graph_zone, Builtin builtin, V< turboshaft::FrameState > frame_state, V< Context > context, V< JSFunction > function, int num_args, int start_index, base::Vector< V< Object > > args, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3787
void JSLoopStackCheck(V< Context > context, V< turboshaft::FrameState > frame_state)
Definition assembler.h:3200
V< Boolean > CallBuiltin_SameValueNumbersOnly(Isolate *isolate, V< Object > left, V< Object > right)
Definition assembler.h:3640
detail::index_type_for_t< typename Descriptor::results_t > CallBuiltin(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, const typename Descriptor::arguments_t &args, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
Definition assembler.h:3344
OpIndex LoadOffHeap(OpIndex address, OptionalOpIndex index, int32_t offset, MemoryRepresentation rep)
Definition assembler.h:2752
V< Smi > FindOrderedHashMapEntry(V< Object > table, V< Smi > key)
Definition assembler.h:4763
V< Object > MaybeGrowFastElements(V< Object > object, V< Object > elements, V< Word32 > index, V< Word32 > elements_length, V< turboshaft::FrameState > frame_state, GrowFastElementsMode mode, const FeedbackSource &feedback)
Definition assembler.h:4739
void StoreFieldImpl(V< Base > object, const FieldAccess &access, V< Any > value, bool maybe_initializing_or_transitioning)
Definition assembler.h:3023
V< Object > ToNumber(V< Object > input, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:1587
V< Float > FloatUnary(V< Float > input, FloatUnaryOp::Kind kind, FloatRepresentation rep)
Definition assembler.h:1884
V< Number > CallRuntime_DateCurrentTime(Isolate *isolate, V< Context > context)
Definition assembler.h:3883
void Store(OpIndex base, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, int32_t offset=0, bool maybe_initializing_or_transitioning=false, IndirectPointerTag maybe_indirect_pointer_tag=kIndirectPointerNullTag)
Definition assembler.h:2879
V< Object > CallRuntime_TransitionElementsKind(Isolate *isolate, V< Context > context, V< HeapObject > object, V< Map > target_map)
Definition assembler.h:3934
V< Object > LoadProtectedFixedArrayElement(V< ProtectedFixedArray > array, V< WordPtr > index)
Definition assembler.h:2855
V< Boolean > StringLessThan(V< String > left, V< String > right)
Definition assembler.h:4591
V< Object > CallRuntime_TryMigrateInstanceAndMarkMapAsMigrationTarget(Isolate *isolate, V< Context > context, V< HeapObject > heap_object)
Definition assembler.h:3946
Word32 Word64 Word32 V< Untagged > ChangeOrDeopt(V< Untagged > input, V< turboshaft::FrameState > frame_state, ChangeOrDeoptOp::Kind kind, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2598
void CallRuntime_ThrowConstructorReturnedNonObject(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3960
OpIndex Select(ConstOrV< Word32 > cond, OpIndex vtrue, OpIndex vfalse, RegisterRepresentation rep, BranchHint hint, SelectOp::Implementation implem)
Definition assembler.h:3259
void CodeComment(MessageWithSourceLocation message, Args &&... args)
Definition assembler.h:4476
OpIndex AtomicWord32PairLoad(V< WordPtr > base, OptionalV< WordPtr > index, int32_t offset)
Definition assembler.h:2696
V< turboshaft::Tuple< Word, Word32 > > OverflowCheckedBinop(V< Word > left, V< Word > right, OverflowCheckedBinopOp::Kind kind, WordRepresentation rep)
Definition assembler.h:1681
V< Object > EnsureWritableFastElements(V< Object > object, V< Object > elements)
Definition assembler.h:4735
Descriptor::result_t CallRuntime(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw, const typename Descriptor::arguments_t &args)
Definition assembler.h:3807
Word32 V< Word > V< turboshaft::FrameState > WordBinopDeoptOnOverflowOp::Kind WordRepresentation FeedbackSource feedback
Definition assembler.h:2009
V< Object > CallRuntime_TerminateExecution(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context)
Definition assembler.h:3928
V< Number > ConvertFloat64ToNumber(V< Float64 > input, CheckForMinusZeroMode minus_zero_mode)
Definition assembler.h:2177
V< String > StringSubstring(V< String > string, V< Word32 > start, V< Word32 > end)
Definition assembler.h:4576
void DeoptimizeIfNot(V< Word32 > condition, V< turboshaft::FrameState > frame_state, const DeoptimizeParameters *parameters)
Definition assembler.h:4039
V< std::common_type_t< T, U > > Conditional(ConstOrV< Word32 > cond, V< T > vtrue, V< U > vfalse, BranchHint hint=BranchHint::kNone)
Definition assembler.h:3281
auto Projection(V< turboshaft::Tuple< Ts... > > tuple, RegisterRepresentation rep)
Definition assembler.h:4164
void CallRuntime_ThrowInvalidStringLength(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3996
void StoreElement(V< Base > object, const ElementAccess &access, ConstOrV< WordPtr > index, V< Any > value, bool is_array_buffer)
Definition assembler.h:5224
V< Rep > LoadField(V< WordPtr > raw_base, const compiler::FieldAccess &access)
Definition assembler.h:2916
detail::index_type_for_t< typename Descriptor::results_t > CallBuiltin(Isolate *isolate, const typename Descriptor::arguments_t &args)
Definition assembler.h:3422
V< Boolean > CallBuiltin_SameValue(Isolate *isolate, V< Object > left, V< Object > right)
Definition assembler.h:3635
void DeoptimizeIf(V< Word32 > condition, V< turboshaft::FrameState > frame_state, const DeoptimizeParameters *parameters)
Definition assembler.h:4035
void AssertImpl(V< Word32 > condition, const char *condition_string, const char *file, int line)
Definition assembler.h:4430
void JSStackCheck(V< Context > context, OptionalV< turboshaft::FrameState > frame_state, JSStackCheckOp::Kind kind)
Definition assembler.h:3194
V< Any > LoadFieldByIndex(V< Object > object, V< Word32 > index)
Definition assembler.h:4423
V< Object > CallBuiltin_GrowFastDoubleElements(Isolate *isolate, V< Object > object, V< Smi > size)
Definition assembler.h:3586
V< String > CallBuiltin_Typeof(Isolate *isolate, V< Object > object)
Definition assembler.h:3728
V< Code > BuiltinCode(Builtin builtin, Isolate *isolate)
Definition assembler.h:2366
Float32 Float64 Float64 Float32 Float32 Float64 Float64 Float64 Float32
Definition assembler.h:2481
V< Word32 > ObjectIsNumericValue(V< Object > input, NumericKind kind, FloatRepresentation input_rep)
Definition assembler.h:2128
V< Word > ShiftRightArithmetic(V< Word > left, uint32_t right, WordRepresentation rep)
Definition assembler.h:1792
DECL_CHANGE_V(ReversibleFloat64ToInt32, kSignedFloatTruncateOverflowToMin, kReversible, Float64, Word32) DECL_CHANGE_V(ReversibleFloat64ToUint32
V< String > StringConcat(V< Smi > length, V< String > left, V< String > right)
Definition assembler.h:4580
OpIndex Load(OpIndex base, LoadOp::Kind kind, MemoryRepresentation loaded_rep, int32_t offset=0)
Definition assembler.h:2741
V< WordPtr > GetElementStartPointer(V< Base > object, const ElementAccess &access)
Definition assembler.h:3103
V< FixedArray > CallBuiltin_NewRestArgumentsElements(Isolate *isolate, V< WordPtr > frame, V< WordPtr > formal_parameter_count, V< Smi > arguments_count)
Definition assembler.h:3612
V< Context > CallBuiltin_FastNewFunctionContextFunction(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< ScopeInfo > scope_info, ConstOrV< Word32 > slot_count, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3703
V< Word32 > CompareMaps(V< HeapObject > heap_object, OptionalV< Map > map, const ZoneRefSet< Map > &maps)
Definition assembler.h:4664
void StaticAssert(V< Word32 > condition, const char *source)
Definition assembler.h:4105
V< String > CallBuiltin_StringAdd_CheckNone(Isolate *isolate, V< Context > context, V< String > left, V< String > right)
Definition assembler.h:3645
V< String > CallBuiltin_StringSubstring(Isolate *isolate, V< String > string, V< WordPtr > start, V< WordPtr > end)
Definition assembler.h:3689
void CallRuntime_ThrowSuperAlreadyCalledError(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3975
V< T > LoadElement(V< Class > object, const ElementAccessTS< Class, T > &access, V< WordPtr > index)
Definition assembler.h:3083
V< Object > LoadProtectedPointerField(V< Object > base, OptionalV< WordPtr > index, LoadOp::Kind kind=LoadOp::Kind::TaggedBase(), int offset=0, int element_size_log2=kTaggedSizeLog2)
Definition assembler.h:2760
V< Boolean > StringComparison(V< String > left, V< String > right, StringComparisonOp::Kind kind)
Definition assembler.h:4584
void CallRuntime_DebugPrint(Isolate *isolate, V< Object > object)
Definition assembler.h:3887
V< Object > CallBuiltin_CallWithArrayLike(Isolate *isolate, Zone *graph_zone, V< turboshaft::FrameState > frame_state, V< Context > context, V< Object > receiver, V< Object > function, V< Object > arguments_list, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3771
V< Word32 > TaggedEqual(V< Object > left, V< Object > right)
Definition assembler.h:1807
void InitializeField(Uninitialized< Object > &object, const FieldAccessTS< Class, T > &access, maybe_const_or_v_t< T > value)
Definition assembler.h:3007
OpIndex DecodeExternalPointer(OpIndex handle, ExternalPointerTag tag)
Definition assembler.h:3184
V< turboshaft::FrameState > FrameState(base::Vector< const OpIndex > inputs, bool inlined, const FrameStateData *data)
Definition assembler.h:4030
V< Any > CallBuiltinImpl(Isolate *isolate, Builtin builtin, OptionalV< turboshaft::FrameState > frame_state, base::Vector< const OpIndex > arguments, const TSCallDescriptor *desc, OpEffects effects)
Definition assembler.h:3505
void Store(OpIndex base, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, int32_t offset=0, uint8_t element_size_log2=0, bool maybe_initializing_or_transitioning=false, IndirectPointerTag maybe_indirect_pointer_tag=kIndirectPointerNullTag)
Definition assembler.h:2868
V< T > Phi(const base::Vector< V< T > > &inputs)
Definition assembler.h:4117
V< Code > CEntryStubConstant(Isolate *isolate, int result_size, ArgvMode argv_mode=ArgvMode::kStack, bool builtin_exit_frame=false)
Definition assembler.h:2414
V< Float64 > LoadHeapNumberValue(V< HeapNumber > heap_number)
Definition assembler.h:2982
OpIndex AtomicWord32PairCompareExchange(V< WordPtr > base, OptionalV< WordPtr > index, V< Word32 > value_low, V< Word32 > value_high, V< Word32 > expected_low, V< Word32 > expected_high, int32_t offset=0)
Definition assembler.h:2707
void InitializeArrayBufferElement(Uninitialized< Base > &object, const ElementAccess &access, V< WordPtr > index, V< Any > value)
Definition assembler.h:3137
DECL_MULTI_REP_UNARY_V(IntAbsCheckOverflow, OverflowCheckedUnary, WordRepresentation, Abs, Word) DECL_SINGLE_REP_UNARY_V(Int32AbsCheckOverflow
Ret CallRuntimeImpl(Isolate *isolate, Runtime::FunctionId function, const TSCallDescriptor *desc, V< turboshaft::FrameState > frame_state, V< Context > context, const Args &args)
Definition assembler.h:3842
void Dcheck(V< Word32 > condition, const char *message, const char *file, int line, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:4178
void TransitionAndStoreArrayElement(V< JSArray > array, V< WordPtr > index, V< Any > value, TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle< Map > fast_map, MaybeHandle< Map > double_map)
Definition assembler.h:4649
V< String > StringFromCodePointAt(V< String > string, V< WordPtr > index)
Definition assembler.h:4572
V< T > Parameter(int index, const char *debug_name=nullptr)
Definition assembler.h:3315
OpIndex AtomicWord32PairBinop(V< WordPtr > base, OptionalV< WordPtr > index, V< Word32 > value_low, V< Word32 > value_high, AtomicRMWOp::BinOp bin_op, int32_t offset=0)
Definition assembler.h:2715
V< BigInt > BigIntUnary(V< BigInt > input, BigIntUnaryOp::Kind kind)
Definition assembler.h:4522
V< Word32 > RootEqual(V< Object > input, RootIndex root, Isolate *isolate)
Definition assembler.h:1811
V< Word32 > HasInstanceType(V< Object > object, InstanceType instance_type)
Definition assembler.h:2977
void CallRuntime_ThrowAccessedUninitializedVariable(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw, V< Object > object)
Definition assembler.h:3952
detail::index_type_for_t< typename Descriptor::results_t > CallBuiltin(Isolate *isolate, V< turboshaft::FrameState > frame_state, const typename Descriptor::arguments_t &args, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
Definition assembler.h:3397
V< Word32 > TruncateWordPtrToWord32(ConstOrV< WordPtr > input)
Definition assembler.h:2491
V< Any > Tuple(base::Vector< const V< Any > > indices)
Definition assembler.h:4134
OpIndex Phi(std::initializer_list< OpIndex > inputs, RegisterRepresentation rep)
Definition assembler.h:4112
V< Word32 > RelocatableWasmCanonicalSignatureId(uint32_t canonical_id)
Definition assembler.h:2397
void StoreOffHeap(OpIndex address, OptionalOpIndex index, OpIndex value, MemoryRepresentation rep, int32_t offset)
Definition assembler.h:2903
V< FixedArray > CallBuiltin_NewStrictArgumentsElements(Isolate *isolate, V< WordPtr > frame, V< WordPtr > formal_parameter_count, V< Smi > arguments_count)
Definition assembler.h:3605
V< Object > ToNumberOrNumeric(V< Object > input, V< turboshaft::FrameState > frame_state, V< Context > context, Object::Conversion kind, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:1580
V< Word32 > Float64SameValue(ConstOrV< Float64 > left, ConstOrV< Float64 > right)
Definition assembler.h:4718
auto Projection(V< turboshaft::Tuple< Ts... > > tuple)
Definition assembler.h:4156
void Deoptimize(V< turboshaft::FrameState > frame_state, const DeoptimizeParameters *parameters)
Definition assembler.h:4066
V< JSFunction > CallRuntime_NewClosure(Isolate *isolate, V< Context > context, V< SharedFunctionInfo > shared_function_info, V< FeedbackCell > feedback_cell)
Definition assembler.h:4002
V< JSPrimitive > ConvertUntaggedToJSPrimitiveOrDeopt(V< Untagged > input, V< turboshaft::FrameState > frame_state, ConvertUntaggedToJSPrimitiveOrDeoptOp::JSPrimitiveKind kind, RegisterRepresentation input_rep, ConvertUntaggedToJSPrimitiveOrDeoptOp::InputInterpretation input_interpretation, const FeedbackSource &feedback)
Definition assembler.h:2186
OpIndex AtomicCompareExchange(V< WordPtr > base, V< WordPtr > index, OpIndex expected, OpIndex new_value, RegisterRepresentation result_rep, MemoryRepresentation input_rep, MemoryAccessKind memory_access_kind)
Definition assembler.h:2675
V< Boolean > CallBuiltin_ToBoolean(Isolate *isolate, V< Object > object)
Definition assembler.h:3694
V< T > LoadNonArrayBufferElement(V< Base > object, const ElementAccess &access, V< WordPtr > index)
Definition assembler.h:3098
DECL_MULTI_REP_BINOP(ShiftRightArithmeticShiftOutZeros, Shift, WordRepresentation, ShiftRightArithmeticShiftOutZeros) DECL_SINGLE_REP_SHIFT_V(Word32ShiftRightArithmeticShiftOutZeros
DECL_CHANGE_V(TruncateFloat64ToInt64OverflowToMin, kSignedFloatTruncateOverflowToMin, kNoAssumption, Float64, Word64) DECL_CHANGE_V(TruncateFloat32ToInt32OverflowToMin
V< Boolean > CallBuiltin_StringLessThanOrEqual(Isolate *isolate, V< String > left, V< String > right)
Definition assembler.h:3661
void StoreDataViewElement(V< Object > object, V< WordPtr > storage, V< WordPtr > index, OpIndex value, ConstOrV< Word32 > is_little_endian, ExternalArrayType element_type)
Definition assembler.h:4641
V< Object > LoadFixedArrayElement(V< FixedArray > array, int index)
Definition assembler.h:2829
V< Float64 > LoadFixedDoubleArrayElement(V< FixedDoubleArray > array, int index)
Definition assembler.h:2840
OpIndex Load(OpIndex base, OptionalOpIndex index, LoadOp::Kind kind, MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, int32_t offset=0, uint8_t element_size_log2=0)
Definition assembler.h:2726
V< Word32 > Word32SignHint(V< Word32 > input, Word32SignHintOp::Sign sign)
Definition assembler.h:1536
V< Object > LoadTrustedPointerField(V< HeapObject > base, LoadOp::Kind kind, IndirectPointerTag tag, int offset=0)
Definition assembler.h:2812
V< String > CallRuntime_SymbolDescriptiveString(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< Symbol > symbol, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3921
Float32 Float64 Float64 Float32 Float32 Float64 Float64 Float64 Float64 kTruncate
Definition assembler.h:2484
void DeoptimizeIf(V< Word32 > condition, V< turboshaft::FrameState > frame_state, DeoptimizeReason reason, const FeedbackSource &feedback)
Definition assembler.h:4044
void AssumeMap(V< HeapObject > heap_object, const ZoneRefSet< Map > &maps)
Definition assembler.h:4677
V< Object > CallRuntime_HandleNoHeapWritesInterrupts(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context)
Definition assembler.h:3891
OpIndex FastApiCall(V< turboshaft::FrameState > frame_state, V< Object > data_argument, V< Context > context, base::Vector< const OpIndex > arguments, const FastApiCallParameters *parameters, base::Vector< const RegisterRepresentation > out_reps)
Definition assembler.h:4722
Word32 Word Word64 Word32 V< Word32 > ShiftOp::Kind WordRepresentation rep
Definition assembler.h:1744
V< Word32 > LoadHeapInt32Value(V< HeapNumber > heap_number)
Definition assembler.h:2987
V< Smi > StringIndexOf(V< String > string, V< String > search, V< Smi > position)
Definition assembler.h:4568
V< Word32 > StringCharCodeAt(V< String > string, V< WordPtr > position)
Definition assembler.h:4540
OpIndex Parameter(int index, RegisterRepresentation rep, const char *debug_name=nullptr)
Definition assembler.h:3295
V< Smi > FindOrderedHashSetEntry(V< Object > table, V< Smi > key)
Definition assembler.h:4767
void CallBuiltin_DebugPrintWordPtr(Isolate *isolate, V< Context > context, V< WordPtr > value)
Definition assembler.h:3569
void Switch(V< Word32 > input, base::Vector< SwitchOp::Case > cases, Block *default_case, BranchHint default_hint=BranchHint::kNone)
Definition assembler.h:3288
V< JSFunction > CallRuntime_NewClosure_Tenured(Isolate *isolate, V< Context > context, V< SharedFunctionInfo > shared_function_info, V< FeedbackCell > feedback_cell)
Definition assembler.h:4009
V< Number > ConvertPlainPrimitiveToNumber(V< PlainPrimitive > input)
Definition assembler.h:2136
void StoreTypedElement(OpIndex buffer, V< Object > base, V< WordPtr > external, V< WordPtr > index, OpIndex value, ExternalArrayType array_type)
Definition assembler.h:4634
void TransitionElementsKindOrCheckMap(V< HeapObject > object, V< Map > map, V< turboshaft::FrameState > frame_state, const ElementsTransitionWithMultipleSources &transition)
Definition assembler.h:4752
void CheckEqualsInternalizedString(V< Object > expected, V< Object > value, V< turboshaft::FrameState > frame_state)
Definition assembler.h:4687
V< Rep > LoadField(V< Object > object, const compiler::FieldAccess &access)
Definition assembler.h:2910
OpIndex CheckTurboshaftTypeOf(OpIndex input, RegisterRepresentation rep, Type expected_type, bool successful)
Definition assembler.h:4170
void Deoptimize(V< turboshaft::FrameState > frame_state, DeoptimizeReason reason, const FeedbackSource &feedback)
Definition assembler.h:4070
V< std::common_type_t< T, U > > Select(ConstOrV< Word32 > cond, V< T > vtrue, V< U > vfalse, RegisterRepresentation rep, BranchHint hint, SelectOp::Implementation implem)
Definition assembler.h:3249
Word32 Word Word64 V< Word32 > Word32BitwiseNot(ConstOrV< Word32 > input)
Definition assembler.h:1673
void StoreOffHeap(OpIndex address, OpIndex value, MemoryRepresentation rep, int32_t offset=0)
Definition assembler.h:2898
V< Number > CallBuiltin_ToNumber(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< Object > input, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3536
V< FixedArray > CallBuiltin_NewSloppyArgumentsElements(Isolate *isolate, V< WordPtr > frame, V< WordPtr > formal_parameter_count, V< Smi > arguments_count)
Definition assembler.h:3598
V< Float64 > LoadFixedDoubleArrayElement(V< FixedDoubleArray > array, V< WordPtr > index)
Definition assembler.h:2845
V< Object > CallBuiltin_CallWithSpread(Isolate *isolate, Zone *graph_zone, V< turboshaft::FrameState > frame_state, V< Context > context, V< Object > function, int num_args_no_spread, V< Object > spread, base::Vector< V< Object > > args_no_spread, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3752
V< Boolean > CallRuntime_HasInPrototypeChain(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw, V< Object > object, V< HeapObject > prototype)
Definition assembler.h:4016
V< Word > TruncateJSPrimitiveToUntaggedOrDeopt(V< JSPrimitive > object, V< turboshaft::FrameState > frame_state, TruncateJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind kind, TruncateJSPrimitiveToUntaggedOrDeoptOp::InputRequirement input_requirement, const FeedbackSource &feedback)
Definition assembler.h:2236
OpIndex LoadDataViewElement(V< Object > object, V< WordPtr > storage, V< WordPtr > index, V< Word32 > is_little_endian, ExternalArrayType element_type)
Definition assembler.h:4623
void CallRuntime_ThrowSuperNotCalled(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3981
DECL_MULTI_REP_CHECK_BINOP_V(IntAddCheckOverflow, OverflowCheckedBinop, SignedAdd, Word) DECL_SINGLE_REP_CHECK_BINOP_V(Int32AddCheckOverflow
OpIndex FloatConstant(double value, FloatRepresentation rep)
Definition assembler.h:2313
V< Number > CallBuiltin_StringToNumber(Isolate *isolate, V< String > input)
Definition assembler.h:3685
V< Boolean > SameValue(V< Object > left, V< Object > right, SameValueOp::Mode mode)
Definition assembler.h:4713
Word32 V< Word > V< turboshaft::FrameState > frame_state
Definition assembler.h:2006
V< Any > Tuple(std::initializer_list< V< Any > > indices)
Definition assembler.h:4137
V< Object > CheckedClosure(V< Object > input, V< turboshaft::FrameState > frame_state, Handle< FeedbackCell > feedback_cell)
Definition assembler.h:4681
V< Object > LoadTrustedPointerField(V< HeapObject > base, OptionalV< Word32 > index, LoadOp::Kind kind, IndirectPointerTag tag, int offset=0)
Definition assembler.h:2779
V< T > LoadField(V< Obj > object, const FieldAccessTS< Class, T > &field) implicitly_constructible_from< Obj >
Definition assembler.h:2922
Float Float Float32 Float Word Word64 Word32 PopCount
Definition assembler.h:1975
V< Boolean > CallBuiltin_StringLessThan(Isolate *isolate, V< String > left, V< String > right)
Definition assembler.h:3656
V< Any > Projection(V< Any > tuple, uint16_t index, RegisterRepresentation rep)
Definition assembler.h:4152
void Return(V< Word32 > pop_count, base::Vector< const OpIndex > return_values, bool spill_caller_frame_slots=false)
Definition assembler.h:3319
V< Word64 > ChangeFloat64ToInt64OrDeopt(V< Float64 > input, V< turboshaft::FrameState > frame_state, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2631
V< BigInt > BigIntBinop(V< BigInt > left, V< BigInt > right, V< turboshaft::FrameState > frame_state, BigIntBinopOp::Kind kind)
Definition assembler.h:4486
V< turboshaft::Tuple< Ts... > > Tuple(V< Ts >... indices)
Definition assembler.h:4141
V< Object > GenericBinop(V< Object > left, V< Object > right, V< turboshaft::FrameState > frame_state, V< Context > context, GenericBinopOp::Kind kind, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:1547
void StoreField(V< Base > object, const FieldAccess &access, V< Any > value)
Definition assembler.h:3001
V< WordPtr > FindOrderedHashMapEntryForInt32Key(V< Object > table, V< Word32 > key)
Definition assembler.h:4771
void Initialize(Uninitialized< T > &object, OpIndex value, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, int32_t offset=0)
Definition assembler.h:2890
V< String > CallBuiltin_ToString(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< Object > input, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3623
V< Object > CallRuntime_StackGuardWithGap(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< Smi > gap)
Definition assembler.h:3902
V< T > FinishInitialization(Uninitialized< T > &&uninitialized)
Definition assembler.h:3168
V< ConsString > NewConsString(V< Word32 > length, V< String > first, V< String > second)
Definition assembler.h:4398
V< T > LoadArrayBufferElement(V< Base > object, const ElementAccess &access, V< WordPtr > index)
Definition assembler.h:3091
Word32 Word Word64 Word32 V< Word32 > ShiftOp::Kind kind
Definition assembler.h:1743
V< Untagged > ConvertJSPrimitiveToUntaggedOrDeopt(V< Object > object, V< turboshaft::FrameState > frame_state, ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind from_kind, ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind to_kind, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2205
V< JSFunction > CallBuiltin_FastNewClosure(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< SharedFunctionInfo > shared_function_info, V< FeedbackCell > feedback_cell)
Definition assembler.h:3721
V< Object > CallRuntime_StringCharCodeAt(Isolate *isolate, V< Context > context, V< String > string, V< Number > index)
Definition assembler.h:3908
ConditionalGotoStatus BranchAndBind(V< Word32 > condition, Block *if_true, Block *if_false, BranchHint hint, Block *to_bind)
Definition assembler.h:5245
ConditionalGotoStatus GotoIfNot(ConditionWithHint condition, Block *if_false)
Definition assembler.h:4370
V< Object > GenericUnop(V< Object > input, V< turboshaft::FrameState > frame_state, V< Context > context, GenericUnopOp::Kind kind, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:1564
void Branch(ConditionWithHint condition, Block *if_true, Block *if_false)
Definition assembler.h:4342
ConditionalGotoStatus GotoIfNot(V< Word32 > condition, Block *if_false, BranchHint hint=BranchHint::kNone)
Definition assembler.h:4360
void TransitionElementsKind(V< HeapObject > object, const ElementsTransition &transition)
Definition assembler.h:4748
Float Float Float32 Float Word Word64 Word32 Word SignExtend16
Definition assembler.h:1984
void InitializeNonArrayBufferElement(Uninitialized< Base > &object, const ElementAccess &access, V< WordPtr > index, V< Any > value)
Definition assembler.h:3145
V< Word > ShiftLeft(V< Word > left, uint32_t right, WordRepresentation rep)
Definition assembler.h:1798
V< WordPtr > StackSlot(int size, int alignment, bool is_tagged=false)
Definition assembler.h:3227
V< Smi > CallBuiltin_FindOrderedHashSetEntry(Isolate *isolate, V< Context > context, V< Object > set, V< Smi > key)
Definition assembler.h:3580
V< Float32 > resolve(const ConstOrV< Float32 > &v)
Definition assembler.h:5152
void StoreArrayBufferElement(V< Base > object, const ElementAccess &access, V< WordPtr > index, V< Any > value)
Definition assembler.h:3110
Float32 Float64 Float64 Float32 Float32 Float64 Float64 Float64 Float64 Word32 V< Word > ZeroExtendWord32ToRep(V< Word32 > value, WordRepresentation rep)
Definition assembler.h:2486
V< BigInt > CallRuntime_BigIntUnaryOp(Isolate *isolate, V< Context > context, V< BigInt > input, ::Operation operation)
Definition assembler.h:3875
V< Word > TruncateJSPrimitiveToUntagged(V< JSPrimitive > object, TruncateJSPrimitiveToUntaggedOp::UntaggedKind kind, TruncateJSPrimitiveToUntaggedOp::InputAssumptions input_assumptions)
Definition assembler.h:2223
V< String > CallBuiltin_NumberToString(Isolate *isolate, V< Number > input)
Definition assembler.h:3619
V< HeapObject > HeapConstantHole(Handle< HeapObject > value)
Definition assembler.h:2362
V< Word32 > StringCodePointAt(V< String > string, V< WordPtr > position)
Definition assembler.h:4543
V< Object > CallBuiltinWithVarStackArgs(Isolate *isolate, Zone *graph_zone, Builtin builtin, V< turboshaft::FrameState > frame_state, int num_stack_args, base::Vector< OpIndex > arguments, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3733
V< R > Call(V< CallTarget > callee, std::initializer_list< OpIndex > arguments, const TSCallDescriptor *descriptor, OpEffects effects=OpEffects().CanCallAnything())
Definition assembler.h:3336
void CallBuiltin_CheckTurbofanType(Isolate *isolate, V< Context > context, V< Object > object, V< TurbofanType > allocated_type, V< Smi > node_id)
Definition assembler.h:3551
V< String > CallBuiltin_StringFromCodePointAt(Isolate *isolate, V< String > string, V< WordPtr > index)
Definition assembler.h:3671
V< HeapNumber > AllocateHeapNumberWithValue(V< Float64 > value, Factory *factory)
Definition assembler.h:3174
V< Word32 > ArrayBufferIsDetached(V< JSArrayBufferView > object)
Definition assembler.h:3151
V< FixedDoubleArray > NewDoubleArray(V< WordPtr > length, AllocationType allocation_type)
Definition assembler.h:4406
OpIndex Phi(base::Vector< const OpIndex > inputs, RegisterRepresentation rep)
Definition assembler.h:4109
void InitializeElement(Uninitialized< Class > &object, const ElementAccessTS< Class, T > &access, ConstOrV< WordPtr > index, V< T > value)
Definition assembler.h:3127
void StoreFixedArrayElement(V< FixedArray > array, V< WordPtr > index, V< Object > value, compiler::WriteBarrierKind write_barrier)
Definition assembler.h:3065
V< turboshaft::Tuple< Any, Any > > Tuple(OpIndex left, OpIndex right)
Definition assembler.h:4147
void CallBuiltin_DebugPrintFloat64(Isolate *isolate, V< Context > context, V< Float64 > value)
Definition assembler.h:3564
ConditionalGotoStatus GotoIf(V< Word32 > condition, Block *if_true, BranchHint hint=BranchHint::kNone)
Definition assembler.h:4347
V< Word32 > ChangeFloat64ToUint32OrDeopt(V< Float64 > input, V< turboshaft::FrameState > frame_state, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2615
V< Object > LoadProtectedFixedArrayElement(V< ProtectedFixedArray > array, int index)
Definition assembler.h:2861
V< AnyFixedArray > NewArray(V< WordPtr > length, NewArrayOp::Kind kind, AllocationType allocation_type)
Definition assembler.h:4402
V< Float64 > BitcastWord32PairToFloat64(ConstOrV< Word32 > high_word32, ConstOrV< Word32 > low_word32)
Definition assembler.h:2045
V< Word32 > StackPointerGreaterThan(V< WordPtr > limit, StackCheckKind kind)
Definition assembler.h:3211
void StoreNonArrayBufferElement(V< Base > object, const ElementAccess &access, V< WordPtr > index, V< Any > value)
Definition assembler.h:3115
V< Object > ConvertJSPrimitiveToObject(V< JSPrimitive > value, V< Context > native_context, V< JSGlobalProxy > global_proxy, ConvertReceiverMode mode)
Definition assembler.h:2246
UnsignedLessThan SignedLessThanOrEqual Word64 UnsignedLessThanOrEqual Word64 SignedLessThanOrEqual Float64 V< Word32 > Comparison(OpIndex left, OpIndex right, ComparisonOp::Kind kind, RegisterRepresentation rep)
Definition assembler.h:1876
void StoreElement(V< Class > object, const ElementAccessTS< Class, T > &access, ConstOrV< WordPtr > index, V< T > value)
Definition assembler.h:3121
detail::index_type_for_t< typename Descriptor::results_t > CallBuiltin(Isolate *isolate, V< Context > context, const typename Descriptor::arguments_t &args)
Definition assembler.h:3372
V< Untagged > ConvertJSPrimitiveToUntagged(V< JSPrimitive > primitive, ConvertJSPrimitiveToUntaggedOp::UntaggedKind kind, ConvertJSPrimitiveToUntaggedOp::InputAssumptions input_assumptions)
Definition assembler.h:2197
Descriptor::result_t CallRuntime(Isolate *isolate, V< Context > context, const typename Descriptor::arguments_t &args)
Definition assembler.h:3825
void FailAssert(const char *message, const std::vector< FileAndLine > &files_and_lines, const SourceLocation &loc)
Definition assembler.h:4215
V< Object > LoadProtectedPointerField(V< Object > base, LoadOp::Kind kind, int32_t offset)
Definition assembler.h:2773
void StoreSignedSmallElement(V< JSArray > array, V< WordPtr > index, V< Word32 > value)
Definition assembler.h:4657
V< Object > LoadStackArgument(V< Object > base, V< WordPtr > index)
Definition assembler.h:4630
Uninitialized< T > Allocate(ConstOrV< WordPtr > size, AllocationType type)
Definition assembler.h:3160
ConditionalGotoStatus GotoIf(ConditionWithHint condition, Block *if_true)
Definition assembler.h:4356
V< Boolean > CallBuiltin_StringEqual(Isolate *isolate, V< String > left, V< String > right, V< WordPtr > length)
Definition assembler.h:3651
Float32 Float64 Float64 Float32 Float32 Float64 Float64 Float64 kFloatConversion
Definition assembler.h:2480
V< Word32 > ObjectIs(V< Object > input, ObjectIsOp::Kind kind, ObjectIsOp::InputAssumptions input_assumptions)
Definition assembler.h:2084
V< JSPrimitive > ConvertUntaggedToJSPrimitive(V< Untagged > input, ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind kind, RegisterRepresentation input_rep, ConvertUntaggedToJSPrimitiveOp::InputInterpretation input_interpretation, CheckForMinusZeroMode minus_zero_mode)
Definition assembler.h:2153
V< Word32Pair > Word32PairBinop(V< Word32 > left_low, V< Word32 > left_high, V< Word32 > right_low, V< Word32 > right_high, Word32PairBinopOp::Kind kind)
Definition assembler.h:4529
void CallRuntime_ThrowCalledNonCallable(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw, V< Object > value)
Definition assembler.h:3988
V< Object > CallRuntime_TryMigrateInstance(Isolate *isolate, V< Context > context, V< HeapObject > heap_object)
Definition assembler.h:3941
V< Type > LoadTaggedField(V< Object > object, int field_offset)
Definition assembler.h:2993
V< WordPtr > RelocatableConstant(int64_t value, RelocInfo::Mode mode)
Definition assembler.h:2383
return ShiftRightLogical(left, this->Word32Constant(right), rep)
V< Word32 > RelocatableWasmIndirectCallTarget(uint32_t function_index)
Definition assembler.h:2403
V< Boolean > StringLessThanOrEqual(V< String > left, V< String > right)
Definition assembler.h:4594
V< Word > WordBinop(V< Word > left, V< Word > right, WordBinopOp::Kind kind, WordRepresentation rep)
Definition assembler.h:1677
V< Word32 > ChangeFloat64ToInt32OrDeopt(V< Float64 > input, V< turboshaft::FrameState > frame_state, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2607
void DeoptimizeIfNot(V< Word32 > condition, V< turboshaft::FrameState > frame_state, DeoptimizeReason reason, const FeedbackSource &feedback)
Definition assembler.h:4054
V< Object > ToNumeric(V< Object > input, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:1593
V< WordPtr > TypedArrayLength(V< JSTypedArray > typed_array, ElementsKind elements_kind)
Definition assembler.h:4563
UnsignedLessThan SignedLessThanOrEqual SignedLessThanOrEqual
Definition assembler.h:1856
V< Boolean > BigIntComparison(V< BigInt > left, V< BigInt > right, BigIntComparisonOp::Kind kind)
Definition assembler.h:4509
V< Context > CallBuiltin_FastNewFunctionContextEval(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< ScopeInfo > scope_info, ConstOrV< Word32 > slot_count, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3712
V< Boolean > StringEqual(V< String > left, V< String > right)
Definition assembler.h:4588
V< Word64 > ChangeFloat64ToAdditiveSafeIntegerOrDeopt(V< Float64 > input, V< turboshaft::FrameState > frame_state, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
Definition assembler.h:2623
DECL_MULTI_REP_BINOP(IntLessThan, Comparison, RegisterRepresentation, SignedLessThan) DECL_MULTI_REP_BINOP(UintLessThan
V< Object > CallRuntime_StackGuard(Isolate *isolate, V< Context > context)
Definition assembler.h:3898
OpIndex PendingLoopPhi(OpIndex first, RegisterRepresentation rep)
Definition assembler.h:4126
void Check(V< Word32 > condition, const char *message, const char *file, int line, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:4192
V< Smi > CallBuiltin_StringIndexOf(Isolate *isolate, V< String > string, V< String > search, V< Smi > position)
Definition assembler.h:3666
void JSFunctionEntryStackCheck(V< Context > context, V< turboshaft::FrameState > frame_state)
Definition assembler.h:3204
V< Word32 > Equal(V< Any > left, V< Any > right, RegisterRepresentation rep)
Definition assembler.h:1803
void StoreFixedDoubleArrayElement(V< FixedDoubleArray > array, V< WordPtr > index, V< Float64 > value)
Definition assembler.h:3072
OpIndex Load(OpIndex base, OptionalOpIndex index, LoadOp::Kind kind, MemoryRepresentation loaded_rep, int32_t offset=0, uint8_t element_size_log2=0)
Definition assembler.h:2734
V< Object > Convert(V< Object > input, ConvertOp::Kind from, ConvertOp::Kind to)
Definition assembler.h:2133
V< Float64 > resolve(const ConstOrV< Float64 > &v)
Definition assembler.h:5155
V< Numeric > CallBuiltin_ToNumeric(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, V< Object > input, LazyDeoptOnThrow lazy_deopt_on_throw)
Definition assembler.h:3543
void CallRuntime_Abort(Isolate *isolate, V< Context > context, V< Smi > reason)
Definition assembler.h:3871
UnsignedLessThan SignedLessThanOrEqual Word64 UnsignedLessThanOrEqual UnsignedLessThanOrEqual
Definition assembler.h:1864
OpIndex LoadOffHeap(OpIndex address, int32_t offset, MemoryRepresentation rep)
Definition assembler.h:2748
OpIndex FindOrderedHashEntry(V< Object > data_structure, OpIndex key, FindOrderedHashEntryOp::Kind kind)
Definition assembler.h:4759
V< Smi > CallBuiltin_FindOrderedHashMapEntry(Isolate *isolate, V< Context > context, V< Object > table, V< Smi > key)
Definition assembler.h:3574
void StoreMessage(V< WordPtr > offset, V< Object > object)
Definition assembler.h:4709
V< Word32 > Float64Is(V< Float64 > input, NumericKind kind)
Definition assembler.h:2114
V< T > LoadElement(V< Base > object, const ElementAccess &access, V< WordPtr > index, bool is_array_buffer)
Definition assembler.h:5205
V< Word > WordConstant(uint64_t value, WordRepresentation rep)
Definition assembler.h:2270
V< Number > CallBuiltin_PlainPrimitiveToNumber(Isolate *isolate, V< PlainPrimitive > input)
Definition assembler.h:3630
V< Number > DoubleArrayMinMax(V< JSArray > array, DoubleArrayMinMaxOp::Kind kind)
Definition assembler.h:4412
V< R > Call(V< CallTarget > callee, OptionalV< turboshaft::FrameState > frame_state, base::Vector< const OpIndex > arguments, const TSCallDescriptor *descriptor, OpEffects effects=OpEffects().CanCallAnything())
Definition assembler.h:3328
V< Object > LoadFixedArrayElement(V< FixedArray > array, V< WordPtr > index)
Definition assembler.h:2834
OpIndex AtomicWord32PairStore(V< WordPtr > base, OptionalV< WordPtr > index, V< Word32 > value_low, V< Word32 > value_high, int32_t offset)
Definition assembler.h:2701
Float32 Float64 Float64 Float32 Float32 Float64 Float64 WordPtr
Definition assembler.h:2476
void CallRuntime_ThrowNotSuperConstructor(Isolate *isolate, V< turboshaft::FrameState > frame_state, V< Context > context, LazyDeoptOnThrow lazy_deopt_on_throw, V< Object > constructor, V< Object > function)
Definition assembler.h:3967
V< Object > CallBuiltin_CopyFastSmiOrObjectElements(Isolate *isolate, V< Object > object)
Definition assembler.h:3558
OpIndex CallBuiltin(Builtin builtin, V< turboshaft::FrameState > frame_state, base::Vector< OpIndex > arguments, CanThrow can_throw, Isolate *isolate)
Definition assembler.h:4375
OpIndex LoadTypedElement(OpIndex buffer, V< Object > base, V< WordPtr > external, V< WordPtr > index, ExternalArrayType array_type)
Definition assembler.h:4617
static V< T > Cast(V< U > index)
Definition index.h:632
maybe_const_or_v_t< T > value_type
Definition assembler.h:830
void operator=(value_type new_value)
Definition assembler.h:857
Var(Reducer *reducer, value_type initial_value)
Definition assembler.h:837
void Set(value_type new_value)
Definition assembler.h:852
static constexpr WordRepresentation WordPtr()
std::tuple< typename Iterables::value_type... > value_type
Definition assembler.h:278
value_type Dereference(A &assembler, iterator_type current_iterator)
Definition assembler.h:317
OptionalV< Word32 > IsEnd(A &assembler, iterator_type current_iterator)
Definition assembler.h:290
iterator_type Advance(A &assembler, iterator_type current_iterator)
Definition assembler.h:309
iterator_type Begin(A &assembler)
Definition assembler.h:284
std::tuple< Iterables... > iterables_
Definition assembler.h:325
std::tuple< typename Iterables::iterator_type... > iterator_type
Definition assembler.h:279
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name)
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
#define DECL_UNSIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits)
Definition assembler.h:2565
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
#define DECL_GENERIC_UNOP(Name)
Definition assembler.h:1570
#define REDUCE_OP(Op)
Definition assembler.h:5179
#define DECL_MULTI_REP_BINOP_V(name, operation, kind, tag)
Definition assembler.h:1607
#define DECL_SINGLE_REP_UNARY_V(name, operation, kind, tag)
Definition assembler.h:1903
#define DECL_SIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits)
Definition assembler.h:2544
#define DECL_SINGLE_REP_COMPARISON_V(name, kind, tag)
Definition assembler.h:1829
#define DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(operation, rep_type)
Definition assembler.h:2014
#define DECL_MULTI_REP_CHECK_BINOP_V(name, operation, kind, tag)
Definition assembler.h:1687
#define TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(Name)
Definition assembler.h:813
#define DECL_GENERIC_UNOP_BUILTIN_CALL(Name)
Definition assembler.h:3525
#define REDUCE_THROWING_OP(Name)
Definition assembler.h:1263
#define DECL_GENERIC_BINOP_BUILTIN_CALL(Name)
Definition assembler.h:3514
#define DECL_SINGLE_REP_EQUAL_V(name, tag)
Definition assembler.h:1816
#define DECL_MULTI_REP_UNARY_V(name, operation, rep_type, kind, tag)
Definition assembler.h:1898
#define DEF_SELECT(Rep)
Definition assembler.h:3266
#define DECL_GENERIC_BINOP(Name)
Definition assembler.h:1554
#define DECL_SINGLE_REP_BINOP_V(name, operation, kind, tag)
Definition assembler.h:1613
#define DECL_CHANGE_V(name, kind, assumption, from, to)
Definition assembler.h:2432
#define EMIT_OP(Name)
Definition assembler.h:1068
#define DECL_MULTI_REP_BINOP(name, operation, rep_type, kind)
Definition assembler.h:1601
#define CONVERT_PRIMITIVE_TO_OBJECT(name, kind, input_rep, input_interpretation)
Definition assembler.h:2161
#define DECL_OBJECT_IS(kind)
Definition assembler.h:2088
#define DECL_SINGLE_REP_CHECK_BINOP_V(name, operation, kind, tag)
Definition assembler.h:1693
#define DECL_SINGLE_REP_SHIFT_V(name, kind, tag)
Definition assembler.h:1748
#define DECL_TAGGED_BITCAST(FromT, ToT, kind)
Definition assembler.h:2056
#define BIGINT_COMPARE(kind)
Definition assembler.h:4513
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
Isolate * isolate
AssemblerT assembler
Zone * graph_zone
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V)
#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V)
int32_t offset
TNode< Context > context
TNode< Object > receiver
double second
RpoNumber block
ZoneVector< RpoNumber > & result
int position
Definition liveedit.cc:290
InstructionOperand destination
int s
Definition mul-fft.cc:297
STL namespace.
int int32_t
Definition unicode.cc:40
constexpr auto tuple_head(Tuple &&tpl)
int SNPrintF(Vector< char > str, const char *format,...)
Definition strings.cc:20
bool all_equal(const C &container)
constexpr auto tuple_fold(T &&initial, Tuple &&tpl, Function &&function)
decltype(std::tuple_cat( std::declval< std::tuple< T > >(), std::declval< Tuple >())) prepend_tuple_type
constexpr auto tuple_map(Tuple &&tpl, Function &&function)
constexpr auto tuple_map2(TupleV &&tplv, TupleU &&tplu, Function &&function)
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
typename detail::nth_type< N, T... >::type nth_type_t
constexpr auto tuple_drop(Tuple &&tpl)
auto ResolveAll(A &assembler, const ConstOrValues &const_or_values)
Definition assembler.h:356
auto BuildResultTuple(bool bound, Iterable &&iterable, LoopLabel &&loop_header, Label<> loop_exit, Iterator current_iterator, Value current_value)
Definition assembler.h:1322
auto BuildResultTupleImpl(bool bound, Iterable &&iterable, LoopLabel &&loop_header, Label<> loop_exit, Iterator current_iterator, ValueTuple current_values, std::index_sequence< Indices... >)
Definition assembler.h:1310
auto unwrap_unary_tuple(std::tuple< T > &&tpl)
Definition assembler.h:376
typename IndexTypeFor< T >::type index_type_for_t
Definition assembler.h:372
ConstOrV(V< T >) -> ConstOrV< T >
Zip(Iterables... iterables) -> Zip< Iterables... >
detail::LoopLabelForHelper< T >::type LoopLabelFor
Definition assembler.h:732
constexpr bool const_or_v_exists_v
Definition index.h:794
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
any_of(const Args &...) -> any_of< Args... >
constexpr bool MayThrow(Opcode opcode)
Definition operations.h:432
constexpr nullrep_t nullrep
Definition index.h:197
std::conditional_t< Is64(), Word64, Word32 > WordPtr
Definition index.h:225
Range(V< T >, V< T >, V< T >) -> Range< T >
UntaggedUnion< Float32, Float64 > Float
Definition index.h:536
OptionalV(V< T >) -> OptionalV< T >
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
Handle< Code > BuiltinCodeHandle(Builtin builtin, Isolate *isolate)
Definition assembler.cc:12
typename detail::ConstOrVTypeHelper< T >::type maybe_const_or_v_t
Definition index.h:792
Sequence(V< T >, V< T >) -> Sequence< T >
TNode< Float64T > Float64Add(TNode< Float64T > a, TNode< Float64T > b)
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
constexpr int kWasmInstanceDataParameterIndex
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kMinParameterIndex
Definition globals.h:2790
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
bool IsAnyHole(Tagged< Object > obj, PtrComprCageBase cage_base)
const int kSmiTagSize
Definition v8-internal.h:87
constexpr int kTrustedPointerTableEntrySizeLog2
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr int I
static constexpr bool is_taggable_v
Definition tagged.h:316
static constexpr bool is_subtype_v
Definition tagged.h:121
constexpr int L
constexpr int U
constexpr int kSimd256Size
Definition globals.h:709
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr bool SmiValuesAre31Bits()
T ReverseBytes(T value, int block_bytes_log2)
Definition utils-arm64.h:53
DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES HOLEY_DOUBLE_ELEMENTS
constexpr uint32_t kTrustedPointerHandleShift
const int kSmiShiftSize
V8_EXPORT_PRIVATE FlagValues v8_flags
const intptr_t kSmiTagMask
Definition v8-internal.h:88
return value
Definition map-inl.h:893
constexpr bool Is64()
const int kSmiTag
Definition v8-internal.h:86
constexpr uint64_t kTrustedPointerTableMarkBit
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
constexpr int A
kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset IsNull(value)||IsJSProxy(value)||IsWasmObject(value)||(IsJSObject(value) &&(HeapLayout
Definition map-inl.h:70
bool is_signed(Condition cond)
template const char * string
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define BIGINT_BINOP(Name)
Operation
Definition operation.h:43
#define GENERIC_BINOP_LIST(V)
#define TURBOSHAFT_OPERATION_LIST(V)
Definition operations.h:362
#define TURBOSHAFT_THROWING_STATIC_OUTPUTS_OPERATIONS_LIST(V)
Definition operations.h:421
#define GENERIC_UNOP_LIST(V)
#define V8_NOEXCEPT
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
AssemblerData(PipelineData *data, Graph &input_graph, Graph &output_graph, Zone *phase_zone)
Definition assembler.h:5278
static Kind KindFromBinOp(AtomicRMWOp::BinOp bin_op)
static constexpr Kind Aligned(BaseTaggedness base_is_tagged)
base::Vector< const OpIndex > inputs() const
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
reducer_list_to_stack< WithGenericAndEmitProjection, StackBottom< ReducerList > >::type type
Definition assembler.h:803
reducer_list_insert_at< ReducerList, base_index, GenericReducerBase >::type WithGeneric
Definition assembler.h:790
reducer_list_insert_at< WithGeneric, ep_index, EmitProjectionReducer >::type WithGenericAndEmitProjection
Definition assembler.h:797
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
#define OFFSET_OF_DATA_START(Type)
#define T1(name, string, precedence)
Definition token.cc:28
#define T2(name, string, precedence)
Definition token.cc:30
Symbol file
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660