v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
type-inference-analysis.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
6#define V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
7
8#include <limits>
9#include <optional>
10
11#include "src/base/logging.h"
12#include "src/base/vector.h"
21
23
24// This analysis infers types for all operations. It does so by running a
25// fixpoint analysis on the input graph in order to properly type PhiOps. The
26// analysis visits blocks in order and computes operation types using
27// Turboshaft's Typer. For Goto operations, the analysis checks if this is a
28// back edge (the Goto's target is a loop block with an index less than the
29// index of the current block). If this is the case, the analysis revisits the
30// loop block (this is when ProcessBlock<true> is called). During this revisit,
31// two things are different to the normal processing of a block:
32//
33// 1.) PhiOps are handled specially, which means applying proper
34// widening/narrowing mechanics to accelerate termination while still computing
35// somewhat precise types for Phis. 2.) If the type of any of the loop's Phis
36// grows, we reset the index of unprocessed blocks to the block after the loop
37// header, such that the entire loop body is revisited with the new type
38// information.
40 public:
41 explicit TypeInferenceAnalysis(const Graph& graph, Zone* phase_zone)
42 : graph_(graph),
43 // TODO(nicohartmann@): Might put types back into phase_zone once we
44 // don't store them in the graph anymore.
45 types_(graph.op_id_count(), Type{}, graph.graph_zone(), &graph),
48 block_to_snapshot_mapping_(graph.block_count(), std::nullopt,
51 graph_zone_(graph.graph_zone()) {}
52
54 GrowingBlockSidetable<std::vector<std::pair<OpIndex, Type>>>*
55 block_refinements = nullptr) {
56#ifdef DEBUG
57 block_refinements_ = block_refinements;
58#endif // DEBUG
59 TURBOSHAFT_TRACE_TYPING("=== Running Type Inference Analysis ===\n");
60 for (uint32_t unprocessed_index = 0;
61 unprocessed_index < graph_.block_count();) {
62 BlockIndex block_index = static_cast<BlockIndex>(unprocessed_index);
63 ++unprocessed_index;
64 const Block& block = graph_.Get(block_index);
65
66#ifdef DEBUG
67 if (V8_UNLIKELY(v8_flags.turboshaft_trace_typing)) {
68 std::stringstream os;
69 os << block.kind() << " " << block.index().id();
70 TURBOSHAFT_TRACE_TYPING("=== %s ===\n", os.str().c_str());
71 }
72#endif // DEBUG
73
74 ProcessBlock<false>(block, &unprocessed_index);
75 }
76 TURBOSHAFT_TRACE_TYPING("=== Completed Type Inference Analysis ===\n");
77
78 return std::move(types_);
79 }
80
81 template <bool revisit_loop_header>
82 void ProcessBlock(const Block& block, uint32_t* unprocessed_index) {
83 DCHECK_IMPLIES(revisit_loop_header, block.IsLoop());
84
85 // Seal the current block first.
86 if (table_.IsSealed()) {
88 } else {
89 // If we process a new block while the previous one is still unsealed, we
90 // finalize it.
92 DCHECK(current_block_->index().valid());
94 current_block_ = nullptr;
95 }
96
97 // Collect the snapshots of all predecessors.
98 {
100 for (const Block* pred : block.PredecessorsIterable()) {
101 std::optional<table_t::Snapshot> pred_snapshot =
102 block_to_snapshot_mapping_[pred->index()];
103 if (pred_snapshot.has_value()) {
104 predecessors_.push_back(pred_snapshot.value());
105 } else {
106 // The only case where we might not have a snapshot for the
107 // predecessor is when we visit a loop header for the first time.
108 DCHECK(block.IsLoop() && pred == block.LastPredecessor() &&
109 !revisit_loop_header);
110 }
111 }
112 std::reverse(predecessors_.begin(), predecessors_.end());
113 }
114
115 // Start a new snapshot for this block by merging information from
116 // predecessors.
117 {
118 auto MergeTypes = [&](table_t::Key,
119 base::Vector<const Type> predecessors) -> Type {
120 DCHECK_GT(predecessors.size(), 0);
121 Type result_type = predecessors[0];
122 for (size_t i = 1; i < predecessors.size(); ++i) {
123 result_type =
124 Type::LeastUpperBound(result_type, predecessors[i], graph_zone_);
125 }
126 return result_type;
127 };
128
130 }
131
132 // Check if the predecessor is a branch that allows us to refine a few
133 // types.
134 DCHECK_IMPLIES(revisit_loop_header, block.PredecessorCount() == 2);
135 if (block.PredecessorCount() == 1) {
136 Block* predecessor = block.LastPredecessor();
137 const Operation& terminator = predecessor->LastOperation(graph_);
138 if (const BranchOp* branch = terminator.TryCast<BranchOp>()) {
139 DCHECK(branch->if_true == &block || branch->if_false == &block);
140 RefineTypesAfterBranch(branch, &block, branch->if_true == &block);
141 }
142 }
144
145 bool loop_needs_revisit = false;
146 auto op_range = graph_.OperationIndices(block);
147 for (auto it = op_range.begin(); it != op_range.end(); ++it) {
148 OpIndex index = *it;
149 const Operation& op = graph_.Get(index);
150
151 switch (op.opcode) {
152 case Opcode::kBranch:
153 case Opcode::kDeoptimize:
154 case Opcode::kDeoptimizeIf:
155 case Opcode::kFrameState:
156 case Opcode::kReturn:
157 case Opcode::kStore:
158 case Opcode::kRetain:
159 case Opcode::kUnreachable:
160 case Opcode::kSwitch:
161 case Opcode::kTuple:
162 case Opcode::kStaticAssert:
163 case Opcode::kDebugBreak:
164 case Opcode::kDebugPrint:
165#if V8_ENABLE_WEBASSEMBLY
166 case Opcode::kGlobalSet:
167 case Opcode::kTrapIf:
168#endif
169 case Opcode::kCheckException:
170 // These operations do not produce any output that needs to be typed.
171 DCHECK_EQ(0, op.outputs_rep().size());
172 break;
173 case Opcode::kCheckTurboshaftTypeOf:
176 break;
177 case Opcode::kComparison:
178 ProcessComparison(index, op.Cast<ComparisonOp>());
179 break;
180 case Opcode::kConstant:
181 ProcessConstant(index, op.Cast<ConstantOp>());
182 break;
183 case Opcode::kFloatBinop:
184 ProcessFloatBinop(index, op.Cast<FloatBinopOp>());
185 break;
186 case Opcode::kOverflowCheckedBinop:
188 break;
189 case Opcode::kProjection:
190 ProcessProjection(index, op.Cast<ProjectionOp>());
191 break;
192 case Opcode::kWordBinop:
194 break;
195 case Opcode::kWord32PairBinop:
196 case Opcode::kAtomicWord32Pair:
197 case Opcode::kPendingLoopPhi:
198 // Input graph must not contain these op codes.
199 UNREACHABLE();
200 case Opcode::kPhi:
201 if constexpr (revisit_loop_header) {
202 loop_needs_revisit =
203 ProcessLoopPhi(index, op.Cast<PhiOp>()) || loop_needs_revisit;
204 } else {
205 ProcessPhi(index, op.Cast<PhiOp>());
206 }
207 break;
208 case Opcode::kGoto: {
209 const GotoOp& gto = op.Cast<GotoOp>();
210 // Check if this is a backedge.
211 if (gto.destination->IsLoop()) {
212 if (gto.destination->index() < current_block_->index()) {
213 ProcessBlock<true>(*gto.destination, unprocessed_index);
214 } else if (gto.destination->index() == current_block_->index()) {
215 // This is a single block loop. We must only revisit the current
216 // header block if we actually need to, in order to prevent
217 // infinite recursion.
218 if (!revisit_loop_header || loop_needs_revisit) {
219 ProcessBlock<true>(*gto.destination, unprocessed_index);
220 }
221 }
222 }
223 break;
224 }
225
226 default:
227 // TODO(nicohartmann@): Support remaining operations. For now we
228 // compute fallback types.
229 if (op.outputs_rep().size() > 0) {
230 constexpr bool allow_narrowing = false;
231 constexpr bool is_fallback_for_unsupported_operation = true;
232 SetType(index,
234 allow_narrowing, is_fallback_for_unsupported_operation);
235 }
236 break;
237 case Opcode::kLoadRootRegister:
238 SetType(index,
240 break;
241 }
242 }
243
244 if constexpr (revisit_loop_header) {
245 if (loop_needs_revisit) {
246 // This is a loop header and the loop body needs to be revisited. Reset
247 // {unprocessed_index} to the loop header's successor.
248 *unprocessed_index =
249 std::min(*unprocessed_index, block.index().id() + 1);
250 }
251 }
252 }
253
255 const CheckTurboshaftTypeOfOp& check) {
256 Type input_type = GetType(check.input());
257
258 if (input_type.IsSubtypeOf(check.type)) {
260 "CTOF %3d:%-40s\n P: %3d:%-40s ~~> %s\n", index.id(),
261 graph_.Get(index).ToString().substr(0, 40).c_str(),
262 check.input().id(),
263 graph_.Get(check.input()).ToString().substr(0, 40).c_str(),
264 input_type.ToString().c_str());
265 } else if (check.successful) {
266 FATAL(
267 "Checking type %s of operation %d:%s failed after it passed in a "
268 "previous phase",
269 check.type.ToString().c_str(), check.input().id(),
270 graph_.Get(check.input()).ToString().c_str());
271 } else {
273 "CTOF %3d:%-40s\n F: %3d:%-40s ~~> %s\n", index.id(),
274 graph_.Get(index).ToString().substr(0, 40).c_str(),
275 check.input().id(),
276 graph_.Get(check.input()).ToString().substr(0, 40).c_str(),
277 input_type.ToString().c_str());
278 }
279 }
280
281 void ProcessComparison(OpIndex index, const ComparisonOp& comparison) {
282 Type left_type = GetType(comparison.left());
283 Type right_type = GetType(comparison.right());
284
285 Type result_type = Typer::TypeComparison(
286 left_type, right_type, comparison.rep, comparison.kind, graph_zone_);
287 SetType(index, result_type);
288 }
289
290 void ProcessConstant(OpIndex index, const ConstantOp& constant) {
291 if (constant.kind == ConstantOp::Kind::kFloat64 &&
292 constant.float64().is_hole_nan()) {
293 // TODO(nicohartmann): figure out how to type Float64 NaN holes. Typing
294 // them simply as NaN is not always correct and can lead to replacing NaN
295 // holes with regular NaNs.
296 SetType(index, Type::Any());
297 return;
298 }
299 Type type = Typer::TypeConstant(constant.kind, constant.storage);
300 SetType(index, type);
301 }
302
303 void ProcessFloatBinop(OpIndex index, const FloatBinopOp& binop) {
304 Type left_type = GetType(binop.left());
305 Type right_type = GetType(binop.right());
306
307 Type result_type = Typer::TypeFloatBinop(left_type, right_type, binop.kind,
308 binop.rep, graph_zone_);
309 SetType(index, result_type);
310 }
311
312 bool ProcessLoopPhi(OpIndex index, const PhiOp& phi) {
313 Type old_type = GetTypeAtDefinition(index);
314 Type new_type = ComputeTypeForPhi(phi);
315
316 if (old_type.IsInvalid()) {
317 SetType(index, new_type);
318 return true;
319 }
320
321 // If the new type is smaller, we narrow it without revisiting the loop.
322 if (new_type.IsSubtypeOf(old_type)) {
324 "LOOP %3d:%-40s (FIXPOINT)\n N: %-40s ~~> %-40s\n", index.id(),
325 graph_.Get(index).ToString().substr(0, 40).c_str(),
326 old_type.ToString().c_str(), new_type.ToString().c_str());
327
328 constexpr bool allow_narrowing = true;
329 SetType(index, new_type, allow_narrowing);
330 return false;
331 }
332
333 // Otherwise, the new type is larger and we widen and revisit the loop.
335 "LOOP %3d:%-40s (REVISIT)\n W: %-40s ~~> %-40s\n", index.id(),
336 graph_.Get(index).ToString().substr(0, 40).c_str(),
337 old_type.ToString().c_str(), new_type.ToString().c_str());
338
339 if (!old_type.IsNone()) {
340 new_type = Widen(old_type, new_type);
341 }
342 SetType(index, new_type);
343 return true;
344 }
345
347 const OverflowCheckedBinopOp& binop) {
348 Type left_type = GetType(binop.left());
349 Type right_type = GetType(binop.right());
350
352 left_type, right_type, binop.kind, binop.rep, graph_zone_);
353 SetType(index, result_type);
354 }
355
356 void ProcessPhi(OpIndex index, const PhiOp& phi) {
357 Type result_type = ComputeTypeForPhi(phi);
358 SetType(index, result_type);
359 }
360
361 void ProcessProjection(OpIndex index, const ProjectionOp& projection) {
362 Type input_type = GetType(projection.input());
363
364 Type result_type;
365 if (input_type.IsNone()) {
366 result_type = Type::None();
367 } else if (input_type.IsTuple()) {
368 const TupleType& tuple = input_type.AsTuple();
369 DCHECK_LT(projection.index, tuple.size());
370 result_type = tuple.element(projection.index);
371 DCHECK(result_type.IsSubtypeOf(
372 Typer::TypeForRepresentation(projection.rep)));
373 } else {
374 result_type = Typer::TypeForRepresentation(projection.rep);
375 }
376
377 SetType(index, result_type);
378 }
379
380 void ProcessWordBinop(V<Word> index, const WordBinopOp& binop) {
381 Type left_type = GetType(binop.left());
382 Type right_type = GetType(binop.right());
383
384 Type result_type = Typer::TypeWordBinop(left_type, right_type, binop.kind,
385 binop.rep, graph_zone_);
386 SetType(index, result_type);
387 }
388
390 // Word64 values are truncated to word32 implicitly, we need to handle this
391 // here.
392 auto MaybeTruncate = [&](Type t) -> Type {
393 if (t.IsNone()) return t;
394 if (phi.rep == RegisterRepresentation::Word32()) {
396 }
397 return t;
398 };
399
400 Type result_type =
401 MaybeTruncate(GetTypeOrDefault(phi.inputs()[0], Type::None()));
402 for (size_t i = 1; i < phi.inputs().size(); ++i) {
403 Type input_type =
404 MaybeTruncate(GetTypeOrDefault(phi.inputs()[i], Type::None()));
405 result_type = Type::LeastUpperBound(result_type, input_type, graph_zone_);
406 }
407 return result_type;
408 }
409
410 void RefineTypesAfterBranch(const BranchOp* branch, const Block* new_block,
411 bool then_branch) {
412 TURBOSHAFT_TRACE_TYPING_OK("Br %3d:%-40s\n", graph_.Index(*branch).id(),
413 branch->ToString().substr(0, 40).c_str());
414
415 Typer::BranchRefinements refinements(
416 [this](OpIndex index) { return GetType(index); },
417 [&](OpIndex index, const Type& refined_type) {
418 RefineOperationType(new_block, index, refined_type,
419 then_branch ? 'T' : 'F');
420 });
421
422 // Inspect branch condition.
423 const Operation& condition = graph_.Get(branch->condition());
424 refinements.RefineTypes(condition, then_branch, graph_zone_);
425 }
426
427 void RefineOperationType(const Block* new_block, OpIndex op, const Type& type,
428 char case_for_tracing) {
429 DCHECK(op.valid());
430 DCHECK(!type.IsInvalid());
431
432 TURBOSHAFT_TRACE_TYPING_OK(" %c: %3d:%-40s ~~> %s\n", case_for_tracing,
433 op.id(),
434 graph_.Get(op).ToString().substr(0, 40).c_str(),
435 type.ToString().c_str());
436
437 auto key_opt = op_to_key_mapping_[op];
438 DCHECK(key_opt.has_value());
439 table_.Set(*key_opt, type);
440
441#ifdef DEBUG
442 if (block_refinements_) {
443 (*block_refinements_)[new_block->index()].emplace_back(op, type);
444 }
445#endif
446
447 // TODO(nicohartmann@): One could push the refined type deeper into the
448 // operations.
449 }
450
451 void SetType(OpIndex index, Type result_type, bool allow_narrowing = false,
452 bool is_fallback_for_unsupported_operation = false) {
453 DCHECK(!result_type.IsInvalid());
454
455 if (auto key_opt = op_to_key_mapping_[index]) {
456 table_.Set(*key_opt, result_type);
457 types_[index] = result_type;
458 } else {
459 auto key = table_.NewKey(Type::None());
461 table_.Set(key, result_type);
462 types_[index] = result_type;
463 }
464
465 if (!is_fallback_for_unsupported_operation) {
467 "Type %3d:%-40s ==> %s\n", index.id(),
468 graph_.Get(index).ToString().substr(0, 40).c_str(),
469 result_type.ToString().c_str());
470 } else {
471 // TODO(nicohartmann@): Remove the fallback case once all operations are
472 // supported.
474 "TODO %3d:%-40s ==> %s\n", index.id(),
475 graph_.Get(index).ToString().substr(0, 40).c_str(),
476 result_type.ToString().c_str());
477 }
478 }
479
481 if (auto key = op_to_key_mapping_[index]) return table_.Get(*key);
482 return Type::Invalid();
483 }
484
485 Type GetTypeOrDefault(OpIndex index, const Type& default_type) {
486 Type t = GetTypeOrInvalid(index);
487 if (t.IsInvalid()) return default_type;
488 return t;
489 }
490
492 Type t = GetTypeOrInvalid(index);
493 if (t.IsInvalid()) {
494 // TODO(nicohartmann@): This is a fallback mechanism as long as not all
495 // operations are properly typed. Remove this once typing is complete.
496 const Operation& op = graph_.Get(index);
498 }
499 return t;
500 }
501
502 Type GetTypeAtDefinition(OpIndex index) const { return types_[index]; }
503
504 Type Widen(const Type& old_type, const Type& new_type) {
505 if (new_type.IsAny()) return new_type;
506 // We might have to relax this eventually and widen different types.
507 DCHECK_EQ(old_type.kind(), new_type.kind());
508
509 switch (old_type.kind()) {
511 // TODO(nicohartmann@): Reevaluate whether exponential widening is
512 // better here.
513 //
514 // return WordOperationTyper<32>::WidenExponential(old_type.AsWord32(),
515 // new_type.AsWord32(), graph_zone_);
517 old_type.AsWord32(), new_type.AsWord32(), graph_zone_);
519 // TODO(nicohartmann@): Reevaluate whether exponential widening is
520 // better here.
521 //
522 // return WordOperationTyper<64>::WidenExponential(old_type.AsWord64(),
523 // new_type.AsWord64(), graph_zone_);
525 old_type.AsWord64(), new_type.AsWord64(), graph_zone_);
527 // TODO(nicohartmann@): Implement proper widening.
528 return Float32Type::Any();
530 // TODO(nicohartmann@): Implement proper widening.
531 return Float64Type::Any();
532 default:
533 // TODO(nicohartmann@): Handle remaining cases.
534 UNREACHABLE();
535 }
536 }
537
538 private:
539 const Graph& graph_;
543 const Block* current_block_ = nullptr;
547 // {predecessors_} is used during merging, but we use an instance variable for
548 // it, in order to save memory and not reallocate it for each merge.
551
552#ifdef DEBUG
553 // {block_refinements_} are only stored for tracing in Debug builds.
555 block_refinements_ = nullptr;
556#endif
557};
558
559} // namespace v8::internal::compiler::turboshaft
560
561#endif // V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_ANALYSIS_H_
void push_back(const T &value)
const Operation & LastOperation(const Graph &graph) const
Definition graph.h:1242
NeighboringPredecessorIterable PredecessorsIterable() const
Definition graph.h:340
constexpr uint32_t id() const
Definition index.h:61
static constexpr RegisterRepresentation Word32()
Key NewKey(KeyData data, Value initial_value=Value{})
void StartNewSnapshot(base::Vector< const Snapshot > predecessors, const ChangeCallback &change_callback={})
const Type & element(int index) const
Definition types.h:836
void ProcessBlock(const Block &block, uint32_t *unprocessed_index)
void ProcessConstant(OpIndex index, const ConstantOp &constant)
Type GetTypeOrDefault(OpIndex index, const Type &default_type)
GrowingOpIndexSidetable< std::optional< table_t::Key > > op_to_key_mapping_
void ProcessCheckTurboshaftTypeOf(OpIndex index, const CheckTurboshaftTypeOfOp &check)
void ProcessComparison(OpIndex index, const ComparisonOp &comparison)
void SetType(OpIndex index, Type result_type, bool allow_narrowing=false, bool is_fallback_for_unsupported_operation=false)
GrowingBlockSidetable< std::optional< table_t::Snapshot > > block_to_snapshot_mapping_
void ProcessFloatBinop(OpIndex index, const FloatBinopOp &binop)
void ProcessProjection(OpIndex index, const ProjectionOp &projection)
void ProcessOverflowCheckedBinop(OpIndex index, const OverflowCheckedBinopOp &binop)
void ProcessWordBinop(V< Word > index, const WordBinopOp &binop)
void RefineOperationType(const Block *new_block, OpIndex op, const Type &type, char case_for_tracing)
void RefineTypesAfterBranch(const BranchOp *branch, const Block *new_block, bool then_branch)
GrowingOpIndexSidetable< Type > Run(GrowingBlockSidetable< std::vector< std::pair< OpIndex, Type > > > *block_refinements=nullptr)
Type Widen(const Type &old_type, const Type &new_type)
const TupleType & AsTuple() const
Definition types.h:880
const Word32Type & AsWord32() const
Definition types.h:860
bool IsSubtypeOf(const Type &other) const
Definition types.cc:51
const Word64Type & AsWord64() const
Definition types.h:865
static Type LeastUpperBound(const Type &lhs, const Type &rhs, Zone *zone)
Definition types.cc:118
static Type TypeConstant(ConstantOp::Kind kind, ConstantOp::Storage value)
Definition typer.h:1173
static Type TypeWordBinop(Type left_type, Type right_type, WordBinopOp::Kind kind, WordRepresentation rep, Zone *zone)
Definition typer.h:1203
static Type TypeOverflowCheckedBinop(const Type &left_type, const Type &right_type, OverflowCheckedBinopOp::Kind kind, WordRepresentation rep, Zone *zone)
Definition typer.h:1347
static Type TypeComparison(const Type &lhs, const Type &rhs, RegisterRepresentation rep, ComparisonOp::Kind kind, Zone *zone)
Definition typer.h:1409
static Type TypeForRepresentation(RegisterRepresentation rep)
Definition typer.h:1144
static Type TypeFloatBinop(Type left_type, Type right_type, FloatBinopOp::Kind kind, FloatRepresentation rep, Zone *zone)
Definition typer.h:1272
static Word32Type TruncateWord32Input(const Type &input, bool implicit_word64_narrowing, Zone *zone)
Definition typer.h:1515
#define TURBOSHAFT_TRACE_TYPING_OK(str,...)
Definition types.h:42
#define TURBOSHAFT_TRACE_TYPING(...)
Definition types.h:40
#define TURBOSHAFT_TRACE_TYPING_FAIL(str,...)
Definition types.h:43
Zone * graph_zone
RpoNumber block
STL namespace.
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_EXPORT_PRIVATE FlagValues v8_flags
#define FATAL(...)
Definition logging.h:47
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
base::Vector< const RegisterRepresentation > outputs_rep() const
static type_t WidenMaximal(const type_t &old_type, const type_t &new_type, Zone *zone)
Definition typer.h:272
#define V8_UNLIKELY(condition)
Definition v8config.h:660