5#ifndef V8_COMPILER_TURBOSHAFT_COPYING_PHASE_H_
6#define V8_COMPILER_TURBOSHAFT_COPYING_PHASE_H_
43template <
typename Next>
45template <
typename Next>
48template <
typename Derived,
typename Base>
50#define FRIEND(op) friend struct op##Op;
53 template <
size_t I,
class D>
64 return derived_this()->template MapToNewGraph<N>(indices);
68#define ASSEMBLE(operation) \
69 OpIndex AssembleOutputGraph##operation(const operation##Op& op) { \
71 [a = assembler()](auto... args) { \
72 return a->Reduce##operation(args...); \
86template <
class AfterNext>
88 VariableReducer<AfterNext>> {
110 Asm().output_graph().Reset();
115 template <
bool trace_reduction>
120 for (
Block& input_block : Asm().modifiable_input_graph().blocks()) {
121 block_mapping_[input_block.index()] = Asm().output_graph().NewBlock(
140 for (
OpIndex index : Asm().output_graph().AllOperationIndices()) {
141 OpIndex origin = Asm().output_graph().operation_origins()[
index];
142 Asm().output_graph().source_positions()[
index] =
143 origin.
valid() ? Asm().input_graph().source_positions()[origin]
150 for (
OpIndex index : Asm().output_graph().AllOperationIndices()) {
151 OpIndex origin = Asm().output_graph().operation_origins()[
index];
152 if (origin.
valid()) {
153 origins->SetNodeOrigin(index.id(), origin.
id());
175 Asm().output_graph().NewBlock(input_block->
kind(), input_block);
181 Asm().current_block()->OriginForBlockEnd());
192 Asm().Goto(new_block);
194 blocks_to_clone_.push_back({input_block, added_block_phi_input, new_block});
201 if (Asm().generating_unreachable_operations())
return;
205 DCHECK(!is_in_recursive_inlining_);
213 Asm().current_block()->OriginForBlockEnd());
226 input_block, added_block_phi_input);
237 template <
bool can_be_inval
id = false>
246 if constexpr (can_be_invalid) {
247 if (!var.has_value()) {
252 if (predecessor_index == -1) {
253 result = Asm().GetVariable(var.value());
255 result = Asm().GetPredecessorValue(var.value(), predecessor_index);
263 template <
bool can_be_inval
id = false,
typename T>
266 static_cast<OpIndex>(old_index), predecessor_index));
275 template <
typename FunctionType>
285 OpIndex ig_index = Asm().input_graph().Index(op);
286 if (Asm().current_block()->IsLoop()) {
294 return Asm().PendingLoopPhi(og_index, rep);
299 int predecessor_count = Asm().current_block()->PredecessorCount();
310 int predecessor_index = predecessor_count - 1;
311 int old_index =
static_cast<int>(old_inputs.
size()) - 1;
318 new_inputs.
push_back(
map(input, predecessor_index, old_index));
327 if (new_pred !=
nullptr) {
349 for (
auto& block : Asm().modifiable_input_graph().blocks()) {
350 block.clear_custom_data();
364 predecessor_index = predecessor_count - 1;
365 for (new_pred = Asm().current_block()->LastPredecessor();
371 OpIndex input = old_inputs[old_index];
376 new_inputs.
push_back(
map(input, predecessor_index, old_index));
381 DCHECK_EQ(new_inputs.
size(), Asm().current_block()->PredecessorCount());
383 if (new_inputs.
size() == 1) {
387 return new_inputs[0];
390 std::reverse(new_inputs.
begin(), new_inputs.
end());
413 bool is_loop_after_peeling =
false) {
417 DCHECK(std::is_sorted(sub_graph.begin(), sub_graph.end(),
419 return a->index().id() <= b->index().id();
430 for (
auto&& [input_block, old_mapping] :
437 Asm().output_graph().NewBlock(
kind, input_block);
445 if (is_loop_after_peeling)
start->set_has_peeled_iteration();
449 for (
const Block* block : sub_graph) {
456 for (
auto&& [input_block, old_mapping] :
464 template <
bool can_be_inval
id = false>
466 int predecessor_index = -1) {
471 template <
size_t expected_size>
482 template <
bool trace_reduction>
486 visit_stack.
push_back(&Asm().input_graph().StartBlock());
487 while (!visit_stack.
empty()) {
500 template <
bool trace_reduction>
506 if constexpr (trace_reduction) {
510 Asm().output_graph().next_block_index()}
514 if (Asm().
Bind(new_block)) {
527 if (final_goto->destination->IsLoop()) {
528 if (input_block->
index() >= final_goto->destination->index()) {
542 bool trace_reduction>
544 int added_block_phi_input = -1) {
572 for (
OpIndex index : Asm().input_graph().OperationIndices(*input_block)) {
575 if (Asm().input_graph().
Get(index).
template Is<PhiOp>()) {
585 Asm().input_graph().
Get(index).input(added_block_phi_input));
591 if (!Asm().current_block()) {
605 bool stopped_early =
false;
607 Asm().input_graph().OperationIndices(*input_block))) {
609 const Operation& op = Asm().input_graph().Get(index);
622 stopped_early =
true;
630 if (stopped_early || Asm().current_block() ==
nullptr)
return;
643 template <
bool trace_reduction>
645 if (Asm().current_block() ==
nullptr)
return false;
648 const Operation& op = Asm().input_graph().Get(index);
655 template <
bool trace_reduction>
657 Block* current_block = Asm().current_block();
659 Asm().SetCurrentOrigin(index);
661 OpIndex first_output_index = Asm().output_graph().next_operation_index();
662 USE(first_output_index);
663 const Operation& op = Asm().input_graph().Get(index);
671#define EMIT_INSTR_CASE(Name) \
672 case Opcode::k##Name: \
673 if (MayThrow(Opcode::k##Name)) return OpIndex::Invalid(); \
674 new_index = Asm().ReduceInputGraph##Name(index, op.Cast<Name##Op>()); \
677#undef EMIT_INSTR_CASE
679 if constexpr (trace_reduction) {
688 Asm().output_graph().BelongsToThisGraph(new_index));
690 if (new_index.
valid()) {
691 const Operation& new_op = Asm().output_graph().Get(new_index);
700 Asm().output_graph().IsCreatedFromTurbofan()));
703 Asm().Verify(index, new_index);
710 template <
bool trace_reduction>
712 const Block* input_block) {
713 if (Asm().CanAutoInlineBlocksWithSinglePredecessor() &&
722 OpIndex index = Asm().input_graph().Index(terminator);
726 template <
bool trace_reduction>
738 template <
bool trace_reduction>
744 if constexpr (trace_reduction) {
752 template <
bool trace_reduction>
754 Block* output_block) {
756 if constexpr (trace_reduction) {
758 std::cout <<
"As new "
760 Asm().output_graph().next_block_index()}
766 Asm().BindReachable(output_block);
768 input_block, added_block_phi_input);
774 std::cout <<
"â•── o" << index.id() <<
": "
783 if (new_index < first_output_index) {
785 std::cout <<
"╰─> #n" << new_index.
id() <<
"\n";
787 bool before_arrow = new_index >= first_output_index;
788 for (
const Operation& op : Asm().output_graph().operations(
789 first_output_index, Asm().output_graph().next_operation_index())) {
790 OpIndex index = Asm().output_graph().Index(op);
792 if (index == new_index) {
794 before_arrow =
false;
795 }
else if (before_arrow) {
800 std::cout << prefix <<
" n" << index.
id() <<
": "
804 if (op.IsBlockTerminator() && Asm().current_block() &&
805 Asm().current_block() != current_block) {
806 current_block = &Asm().output_graph().Get(
843 return Asm().ReduceSwitch(
851 [
this](
OpIndex ind,
int predecessor_index,
int old_index = 0) {
867 return Asm().ReduceCall(callee, frame_state,
base::VectorOf(arguments),
874 switch (throwing_operation.
opcode) {
876 case Opcode::k##Name: \
877 result = Asm().ReduceInputGraph##Name( \
878 op.throwing_operation(), throwing_operation.Cast<Name##Op>()); \
890 &Asm().input_graph());
892 &Asm().input_graph());
908 for (; it !=
end; ++it) {
927 DCHECK(Asm().input_graph().BelongsToThisGraph(old_index));
929 Asm().output_graph().BelongsToThisGraph(new_index));
933 if (!var.has_value()) {
935 Asm().input_graph().Get(old_index).outputs_rep().size() == 1
937 Asm().input_graph().Get(old_index).outputs_rep()[0])
939 var = Asm().NewLoopInvariantVariable(rep);
942 Asm().SetVariable(*var, new_index);
963 for (
const Operation& op : Asm().input_graph().operations(
964 input_graph_loop->
begin(), input_graph_loop->
end())) {
965 if (
auto* input_phi = op.TryCast<
PhiOp>()) {
968 if (!phi_index.
valid() || !output_graph_loop->
Contains(phi_index)) {
975 Asm().FixLoopPhi(*input_phi, phi_index, output_graph_loop);
1038 bool is_in_recursive_inlining_ =
false;
1042template <
template <
class>
class... Reducers>
1045template <
template <
class>
class... Reducers>
1049 bool trace_reductions =
false) {
1053 if (trace_reductions) {
1054 phase.template VisitGraph<true>();
1056 phase.template VisitGraph<false>();
1059 phase.template VisitGraph<false>();
1064template <
template <
typename>
typename... Reducers>
1068 Graph& input_graph = data->graph();
1071 data->info()->turboshaft_trace_reduction());
void pop_back(size_t count=1)
void emplace_back(Args &&... args)
constexpr size_t size() const
bool Contains(int i) const
TickCounter & tick_counter()
static SourcePosition Unknown()
void TickAndMaybeEnterSafepoint()
const Block * OriginForBlockEnd() const
void set_custom_data(uint32_t data, CustomDataKind kind_for_debug_check)
bool Contains(OpIndex op_idx) const
Block * NeighboringPredecessor() const
const Operation & LastOperation(const Graph &graph) const
void SetOrigin(const Block *origin)
int PredecessorCount() const
uint32_t get_custom_data(CustomDataKind kind_for_debug_check) const
int GetPredecessorIndex(const Block *target) const
Block * LastPredecessor() const
static void Run(PipelineData *data, Graph &input_graph, Zone *phase_zone, bool trace_reductions=false)
static void Run(PipelineData *data, Zone *phase_zone)
Derived * NeighboringChild() const
Derived * LastChild() const
const Block * OriginForBlockStart(Block *block) const
V< T > MapToNewGraph(V< T > old_index, int predecessor_index=-1)
OptimizedCompilationInfo * info_
void FixLoopPhis(Block *input_graph_loop)
V8_INLINE OpIndex AssembleOutputGraphGoto(const GotoOp &op)
const Block * current_input_block()
FixedOpIndexSidetable< OpIndex > op_mapping_
void InlineWaitingBlock()
void TraceReductionStart(OpIndex index)
OpIndex AssembleOutputGraphCall(const CallOp &op)
void TraceBlockUnreachable()
void TraceReductionResult(Block *current_block, OpIndex first_output_index, OpIndex new_index)
void VisitBlockBody(const Block *input_block, int added_block_phi_input=-1)
bool turn_loop_without_backedge_into_merge_
OptionalOpIndex MapToNewGraph(OptionalOpIndex old_index, int predecessor_index=-1)
bool current_block_needs_variables_
V8_INLINE OpIndex AssembleOutputGraphFrameState(const FrameStateOp &op)
void TraceOperationSkipped()
bool VisitOpAndUpdateMapping(OpIndex index, const Block *input_block)
FixedOpIndexSidetable< MaybeVariable > old_opindex_to_variables
FixedBlockSidetable< Block * > block_mapping_
OpIndex AssembleOutputGraphSwitch(const SwitchOp &op)
Block * MapToNewGraph(const Block *block) const
OpIndex ResolvePhi(const PhiOp &op, FunctionType &&map, RegisterRepresentation rep)
void CloneAndInlineBlock(const Block *input_block)
void CreateOldToNewMapping(OpIndex old_index, OpIndex new_index)
bool InlineOp(OpIndex index, const Block *input_block)
TickCounter *const tick_counter_
Block * block_to_inline_now_
const Block * current_input_block_
void ProcessWaitingCloningAndInlining()
void VisitBlockTerminator(const Operation &terminator, const Block *input_block)
V8_INLINE OpIndex AssembleOutputGraphBranch(const BranchOp &op)
void TraceBlockFinished()
ZoneVector< BlockToClone > blocks_to_clone_
V< None > AssembleOutputGraphCheckException(const CheckExceptionOp &op)
OpIndex AssembleOutputGraphPendingLoopPhi(const PendingLoopPhiOp &op)
base::SmallVector< OpIndex, expected_size > MapToNewGraph(base::Vector< const OpIndex > inputs)
void SetVariableFor(OpIndex old_index, MaybeVariable var)
BitVector blocks_needing_variables_
OpIndex MapToNewGraph(OpIndex old_index, int predecessor_index=-1)
OpIndex VisitOpNoMappingUpdate(OpIndex index, const Block *input_block)
OpIndex AssembleOutputGraphDidntThrow(const DidntThrowOp &op)
OpIndex AssembleOutputGraphParameter(const ParameterOp ¶m)
OpIndex AssembleOutputGraphPhi(const PhiOp &op)
bool * turn_loop_without_backedge_into_merge()
void VisitBlock(const Block *input_block)
void DoCloneBlock(const Block *input_block, int added_block_phi_input, Block *output_block)
void CloneBlockAndGoto(const Block *input_block)
MaybeVariable GetVariableFor(OpIndex old_index) const
Block * CloneSubGraph(Set sub_graph, bool keep_loop_kinds, bool is_loop_after_peeling=false)
Graph & GetOrCreateCompanion()
static constexpr MaybeRegisterRepresentation None()
static constexpr OpIndex Invalid()
constexpr uint32_t id() const
constexpr bool valid() const
constexpr bool has_value() const
constexpr OpIndex value() const
static constexpr OptionalOpIndex Nullopt()
base::SmallVector< OpIndex, N > Map(base::Vector< const OpIndex > indices)
OpIndex Map(OpIndex index)
Assembler< typename Base::ReducerList > * assembler()
OptionalOpIndex Map(OptionalOpIndex index)
static V< T > Cast(V< U > index)
void Bind(Block *new_block)
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
#define EMIT_INSTR_CASE(Name)
#define ASSEMBLE(operation)
NodeOriginTable * origins
SourcePositionTable * source_positions
ZoneVector< RpoNumber > & result
InstructionOperand destination
auto zip(Containers &... containers)
auto IterateWithoutLast(T &t)
constexpr Vector< T > VectorOf(T *start, size_t size)
std::ostream & operator<<(std::ostream &os, PaddingSpace padding)
int CountDecimalDigits(uint32_t value)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
V8_EXPORT_PRIVATE V8_INLINE bool ShouldSkipOperation(const Operation &op)
std::optional< Variable > MaybeVariable
V8_INLINE bool CanBeUsedAsInput(const Operation &op)
bool(* FunctionType)(const Operation &op, Zone *zone)
bool Is(IndirectHandle< U > value)
V8_EXPORT_PRIVATE FlagValues v8_flags
base::Vector< T > CloneVector(Zone *zone, base::Vector< const T > other)
#define TURBOSHAFT_THROWING_OPERATIONS_LIST(V)
#define TURBOSHAFT_OPERATION_LIST(V)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define V8_EXPORT_PRIVATE
V< Word32 > condition() const
const TSCallDescriptor * descriptor
base::Vector< const OpIndex > arguments() const
V< CallTarget > callee() const
OptionalV< FrameState > frame_state() const
OpEffects Effects() const
Block * didnt_throw_block
OpIndex throwing_operation() const
const FrameStateData * data
int added_block_phi_input
const Block * input_block
base::Vector< OpIndex > inputs()
V8_INLINE OpIndex & input(size_t i)
bool IsBlockTerminator() const
const uint16_t input_count
const underlying_operation_t< Op > * TryCast() const
underlying_operation_t< Op > & Cast()
base::Vector< const RegisterRepresentation > outputs_rep() const
RegisterRepresentation rep
RegisterRepresentation rep
static constexpr size_t kLoopPhiBackEdgeIndex
V< Word32 > input() const
base::Vector< Case > cases
#define V8_UNLIKELY(condition)