v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-gc-typed-optimization-reducer.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include "src/base/logging.h"
10
12
13#define TRACE(...) \
14 do { \
15 if (v8_flags.trace_wasm_typer) PrintF(__VA_ARGS__); \
16 } while (false)
17
19 LoopFinder loop_finder(phase_zone_, &graph_);
20 AnalyzerIterator iterator(phase_zone_, graph_, loop_finder);
21 while (iterator.HasNext()) {
22 const Block& block = *iterator.Next();
23 ProcessBlock(block);
24
25 // Finish snapshot.
27
28 // Consider re-processing for loops.
29 if (const GotoOp* last = block.LastOperation(graph_).TryCast<GotoOp>()) {
30 if (IsReachable(block) && last->destination->IsLoop() &&
31 last->destination->LastPredecessor() == &block) {
32 TRACE("[b%u] Reprocessing loop header b%u at backedge #%u\n",
33 block.index().id(), last->destination->index().id(),
34 graph_.Index(block.LastOperation(graph_)).id());
35 const Block& loop_header = *last->destination;
36 // Create a merged snapshot state for the forward- and backedge and
37 // process all operations inside the loop header.
38 ProcessBlock(loop_header);
39 Snapshot old_snapshot = block_to_snapshot_[loop_header.index()].value();
40 Snapshot snapshot = types_table_.Seal();
41 // TODO(14108): The snapshot isn't needed at all, we only care about the
42 // information if two snapshots are equivalent. Unfortunately, currently
43 // this can only be answered by creating a merge snapshot.
44 bool needs_revisit =
45 CreateMergeSnapshot(base::VectorOf({old_snapshot, snapshot}),
46 base::VectorOf({true, true}));
47 types_table_.Seal(); // Discard the snapshot.
48
49 TRACE("[b%u] Loop header b%u reprocessed at backedge #%u: %s\n",
50 block.index().id(), last->destination->index().id(),
51 graph_.Index(block.LastOperation(graph_)).id(),
52 needs_revisit ? "Scheduling loop body revisitation"
53 : "No revisit of loop body needed");
54
55 // TODO(14108): This currently encodes a fixed point analysis where the
56 // analysis is finished once the backedge doesn't provide updated type
57 // information any more compared to the previous iteration. This could
58 // be stopped in cases where the backedge only refines types (i.e. only
59 // defines more precise types than the previous iteration).
60 if (needs_revisit) {
61 block_to_snapshot_[loop_header.index()] = MaybeSnapshot(snapshot);
62 if (block.index() != loop_header.index()) {
63 // This will push the successors of the loop header to the iterator
64 // stack, so the loop body will be visited in the next iteration.
66 } else {
67 // A single-block loop doesn't have any successors which would be
68 // re-evaluated and which might trigger another re-evaluation of the
69 // loop header.
70 // TODO(mliedtke): This is not a great design: We don't just
71 // schedule revisiting the loop header but afterwards we revisit it
72 // once again to evaluate whether we need to revisit it more times,
73 // so for single block loops the revisitation count will always be a
74 // multiple of 2. While this is inefficient, single-block loops are
75 // rare and are either endless loops or need to trigger an exception
76 // (e.g. a wasm trap) to terminate.
77 iterator.MarkLoopForRevisit();
78 }
79 }
80 }
81 }
82 }
83}
84
92
95 // Reset reachability information. This can be outdated in case of loop
96 // revisits. Below the reachability is calculated again and potentially
97 // re-added.
98 bool block_was_previously_reachable = IsReachable(block);
99 if (!block_was_previously_reachable) {
100 TRACE("[b%u] Removing unreachable flag as block is re-evaluated\n",
101 block.index().id());
102 }
103 block_is_unreachable_.Remove(block.index().id());
104 // Start new snapshot based on predecessor information.
105 if (block.HasPredecessors() == 0) {
106 // The first block just starts with an empty snapshot.
107 DCHECK_EQ(block.index().id(), 0);
109 } else if (block.IsLoop()) {
110 const Block& forward_predecessor =
112 if (!IsReachable(forward_predecessor)) {
113 // If a loop isn't reachable through its forward edge, it can't possibly
114 // become reachable via the backedge.
115 TRACE(
116 "[b%uu] Loop unreachable as forward predecessor b%u is unreachable\n",
117 block.index().id(), forward_predecessor.index().id());
118 block_is_unreachable_.Add(block.index().id());
119 }
120 MaybeSnapshot back_edge_snap =
121 block_to_snapshot_[block.LastPredecessor()->index()];
122 if (back_edge_snap.has_value() && block_was_previously_reachable) {
123 // The loop was already visited at least once. In this case use the
124 // available information from the backedge.
125 // Note that we only do this if the loop wasn't marked as unreachable
126 // before. This solves an issue where a single block loop would think the
127 // backedge is reachable as we just removed the unreachable information
128 // above. Once the analyzer hits the backedge, it will re-evaluate if the
129 // backedge changes any analysis results and then potentially revisit
130 // this loop with forward edge and backedge.
131 CreateMergeSnapshot(block);
132 } else {
133 // The loop wasn't visited yet. There isn't any type information available
134 // for the backedge.
135 TRACE(
136 "[b%u%s] First loop header evaluation: Ignoring all backedges on "
137 "phis\n",
138 block.index().id(), !IsReachable(*current_block_) ? "u" : "");
140 Snapshot forward_edge_snap =
141 block_to_snapshot_[forward_predecessor.index()].value();
142 types_table_.StartNewSnapshot(forward_edge_snap);
143 }
144 } else if (block.IsBranchTarget()) {
145 DCHECK_EQ(block.PredecessorCount(), 1);
146 const Block& predecessor = *block.LastPredecessor();
148 block_to_snapshot_[predecessor.index()].value());
149 if (IsReachable(predecessor)) {
150 const BranchOp* branch =
151 block.Predecessors()[0]->LastOperation(graph_).TryCast<BranchOp>();
152 if (branch != nullptr) {
153 ProcessBranchOnTarget(*branch, block);
154 }
155 } else {
156 TRACE("[b%uu] Block unreachable as sole predecessor b%u is unreachable\n",
157 block.index().id(), predecessor.index().id());
158 block_is_unreachable_.Add(block.index().id());
159 }
160 } else {
161 DCHECK_EQ(block.kind(), Block::Kind::kMerge);
162 CreateMergeSnapshot(block);
163 }
164}
165
167 for (OpIndex op_idx : graph_.OperationIndices(block)) {
168 Operation& op = graph_.Get(op_idx);
169 switch (op.opcode) {
170 case Opcode::kWasmTypeCast:
171 ProcessTypeCast(op.Cast<WasmTypeCastOp>());
172 break;
173 case Opcode::kWasmTypeCheck:
174 ProcessTypeCheck(op.Cast<WasmTypeCheckOp>());
175 break;
176 case Opcode::kAssertNotNull:
177 ProcessAssertNotNull(op.Cast<AssertNotNullOp>());
178 break;
179 case Opcode::kNull:
180 ProcessNull(op.Cast<NullOp>());
181 break;
182 case Opcode::kIsNull:
183 ProcessIsNull(op.Cast<IsNullOp>());
184 break;
185 case Opcode::kParameter:
187 break;
188 case Opcode::kStructGet:
189 ProcessStructGet(op.Cast<StructGetOp>());
190 break;
191 case Opcode::kStructSet:
192 ProcessStructSet(op.Cast<StructSetOp>());
193 break;
194 case Opcode::kArrayGet:
195 ProcessArrayGet(op.Cast<ArrayGetOp>());
196 break;
197 case Opcode::kArrayLength:
198 ProcessArrayLength(op.Cast<ArrayLengthOp>());
199 break;
200 case Opcode::kGlobalGet:
201 ProcessGlobalGet(op.Cast<GlobalGetOp>());
202 break;
203 case Opcode::kWasmRefFunc:
204 ProcessRefFunc(op.Cast<WasmRefFuncOp>());
205 break;
206 case Opcode::kWasmAllocateArray:
207 ProcessAllocateArray(op.Cast<WasmAllocateArrayOp>());
208 break;
209 case Opcode::kWasmAllocateStruct:
210 ProcessAllocateStruct(op.Cast<WasmAllocateStructOp>());
211 break;
212 case Opcode::kPhi:
213 ProcessPhi(op.Cast<PhiOp>());
214 break;
215 case Opcode::kWasmTypeAnnotation:
216 ProcessTypeAnnotation(op.Cast<WasmTypeAnnotationOp>());
217 break;
218 case Opcode::kBranch:
219 // Handling branch conditions implying special values is handled on the
220 // beginning of the successor block.
221 default:
222 break;
223 }
224 }
225}
226
227void WasmGCTypeAnalyzer::ProcessTypeCast(const WasmTypeCastOp& type_cast) {
228 V<Object> object = type_cast.object();
229 wasm::ValueType target_type = type_cast.config.to;
230 wasm::ValueType known_input_type =
231 RefineTypeKnowledge(object, target_type, type_cast);
232 input_type_map_[graph_.Index(type_cast)] = known_input_type;
233}
234
235void WasmGCTypeAnalyzer::ProcessTypeCheck(const WasmTypeCheckOp& type_check) {
236 wasm::ValueType type = GetResolvedType(type_check.object());
237 input_type_map_[graph_.Index(type_check)] = type;
238}
239
241 const AssertNotNullOp& assert_not_null) {
242 V<Object> object = assert_not_null.object();
243 wasm::ValueType new_type = assert_not_null.type.AsNonNull();
244 wasm::ValueType known_input_type =
245 RefineTypeKnowledge(object, new_type, assert_not_null);
246 input_type_map_[graph_.Index(assert_not_null)] = known_input_type;
247}
248
249void WasmGCTypeAnalyzer::ProcessIsNull(const IsNullOp& is_null) {
250 input_type_map_[graph_.Index(is_null)] = GetResolvedType(is_null.object());
251}
252
256 signature_->GetParam(parameter.parameter_index - 1),
257 parameter);
258 }
259}
260
261void WasmGCTypeAnalyzer::ProcessStructGet(const StructGetOp& struct_get) {
262 // struct.get performs a null check.
263 wasm::ValueType type =
264 RefineTypeKnowledgeNotNull(struct_get.object(), struct_get);
265 input_type_map_[graph_.Index(struct_get)] = type;
266 RefineTypeKnowledge(graph_.Index(struct_get),
267 struct_get.type->field(struct_get.field_index).Unpacked(),
268 struct_get);
269}
270
271void WasmGCTypeAnalyzer::ProcessStructSet(const StructSetOp& struct_set) {
272 // struct.set performs a null check.
273 wasm::ValueType type =
274 RefineTypeKnowledgeNotNull(struct_set.object(), struct_set);
275 input_type_map_[graph_.Index(struct_set)] = type;
276}
277
278void WasmGCTypeAnalyzer::ProcessArrayGet(const ArrayGetOp& array_get) {
279 // array.get traps on null. (Typically already on the array length access
280 // needed for the bounds check.)
281 RefineTypeKnowledgeNotNull(array_get.array(), array_get);
282 // The result type is at least the static array element type.
284 array_get.array_type->element_type().Unpacked(),
285 array_get);
286}
287
288void WasmGCTypeAnalyzer::ProcessArrayLength(const ArrayLengthOp& array_length) {
289 // array.len performs a null check.
290 wasm::ValueType type =
291 RefineTypeKnowledgeNotNull(array_length.array(), array_length);
292 input_type_map_[graph_.Index(array_length)] = type;
293}
294
295void WasmGCTypeAnalyzer::ProcessGlobalGet(const GlobalGetOp& global_get) {
296 RefineTypeKnowledge(graph_.Index(global_get), global_get.global->type,
297 global_get);
298}
299
300void WasmGCTypeAnalyzer::ProcessRefFunc(const WasmRefFuncOp& ref_func) {
301 wasm::ModuleTypeIndex sig_index =
302 module_->functions[ref_func.function_index].sig_index;
304 wasm::ValueType::Ref(module_->heap_type(sig_index)),
305 ref_func);
306}
307
309 const WasmAllocateArrayOp& allocate_array) {
310 wasm::ModuleTypeIndex type_index =
311 graph_.Get(allocate_array.rtt()).Cast<RttCanonOp>().type_index;
312 RefineTypeKnowledge(graph_.Index(allocate_array),
313 wasm::ValueType::Ref(module_->heap_type(type_index)),
314 allocate_array);
315}
316
318 const WasmAllocateStructOp& allocate_struct) {
319 Operation& rtt = graph_.Get(allocate_struct.rtt());
320 wasm::ModuleTypeIndex type_index;
321 if (RttCanonOp* canon = rtt.TryCast<RttCanonOp>()) {
322 type_index = canon->type_index;
323 } else if (LoadOp* load = rtt.TryCast<LoadOp>()) {
324 DCHECK(load->kind.tagged_base && load->offset == WasmStruct::kHeaderSize);
325 OpIndex descriptor = load->base();
326 wasm::ValueType desc_type = types_table_.Get(descriptor);
327 if (!desc_type.has_index()) {
328 // We hope that this happens rarely or never. If there is evidence that
329 // we get this case a lot, we should store the original struct.new
330 // operation's type index immediate on the {WasmAllocateStructOp} to
331 // use it as a better upper bound than "structref" here.
333 allocate_struct);
334 return;
335 }
336 const wasm::TypeDefinition& desc_typedef =
337 module_->type(desc_type.ref_index());
338 DCHECK(desc_typedef.is_descriptor());
339 type_index = desc_typedef.describes;
340 } else {
341 // The graph builder only emits the two patterns above.
342 UNREACHABLE();
343 }
344 RefineTypeKnowledge(graph_.Index(allocate_struct),
345 wasm::ValueType::Ref(module_->heap_type(type_index)),
346 allocate_struct);
347}
348
350 int input_index) {
351 OpIndex phi_id = graph_.Index(phi);
352 OpIndex input = ResolveAliases(phi.input(input_index));
353 // If the input of the phi is in the same block as the phi and appears
354 // before the phi, don't use the predecessor value.
355
356 if (current_block_->begin().id() <= input.id() && input.id() < phi_id.id()) {
357 // Phi instructions have to be at the beginning of the block, so this can
358 // only happen for inputs that are also phis. Furthermore, this is only
359 // possible in loop headers of loops and only for the backedge-input.
360 DCHECK(graph_.Get(input).Is<PhiOp>());
361 DCHECK(current_block_->IsLoop());
362 DCHECK_EQ(input_index, 1);
363 return types_table_.Get(input);
364 }
365 return types_table_.GetPredecessorValue(input, input_index);
366}
367
369 // The result type of a phi is the union of all its input types.
370 // If any of the inputs is the default value ValueType(), there isn't any type
371 // knowledge inferrable.
372 DCHECK_GT(phi.input_count, 0);
374 // We don't know anything about the backedge yet, so we only use the
375 // forward edge. We will revisit the loop header again once the block with
376 // the back edge is evaluated.
377 RefineTypeKnowledge(graph_.Index(phi), GetResolvedType((phi.input(0))),
378 phi);
379 return;
380 }
381 wasm::ValueType union_type = GetTypeForPhiInput(phi, 0);
382 if (union_type == wasm::ValueType()) return;
383 for (int i = 1; i < phi.input_count; ++i) {
384 wasm::ValueType input_type = GetTypeForPhiInput(phi, i);
385 if (input_type == wasm::ValueType()) return;
386 // <bottom> types have to be skipped as an unreachable predecessor doesn't
387 // change our type knowledge.
388 // TODO(mliedtke): Ideally, we'd skip unreachable predecessors here
389 // completely, as we might loosen the known type due to an unreachable
390 // predecessor.
391 if (input_type.is_uninhabited()) continue;
392 if (union_type.is_uninhabited()) {
393 union_type = input_type;
394 } else {
395 union_type = wasm::Union(union_type, input_type, module_, module_).type;
396 }
397 }
398 RefineTypeKnowledge(graph_.Index(phi), union_type, phi);
399 if (v8_flags.trace_wasm_typer) {
400 for (int i = 0; i < phi.input_count; ++i) {
401 OpIndex input = phi.input(i);
403 TRACE("- phi input %d: #%u(%s) -> %s\n", i, input.id(),
404 OpcodeName(graph_.Get(input).opcode), type.name().c_str());
405 }
406 }
407}
408
410 const WasmTypeAnnotationOp& type_annotation) {
411 RefineTypeKnowledge(type_annotation.value(), type_annotation.type,
412 type_annotation);
413}
414
416 const Block& target) {
417 DCHECK_EQ(current_block_, &target);
418 const Operation& condition = graph_.Get(branch.condition());
419 switch (condition.opcode) {
420 case Opcode::kWasmTypeCheck: {
421 const WasmTypeCheckOp& check = condition.Cast<WasmTypeCheckOp>();
422 if (branch.if_true == &target) {
423 // It is known from now on that the type is at least the checked one.
424 RefineTypeKnowledge(check.object(), check.config.to, branch);
425 } else {
426 DCHECK_EQ(branch.if_false, &target);
427 if (wasm::IsSubtypeOf(GetResolvedType(check.object()), check.config.to,
428 module_)) {
429 // The type check always succeeds, the target is impossible to be
430 // reached.
431 DCHECK_EQ(target.PredecessorCount(), 1);
432 block_is_unreachable_.Add(target.index().id());
433 TRACE(
434 "[b%uu] Block unreachable as #%u(%s) used in #%u(%s) is always "
435 "true\n",
436 target.index().id(), branch.condition().id(),
437 OpcodeName(condition.opcode), graph_.Index(branch).id(),
438 OpcodeName(branch.opcode));
439 }
440 }
441 } break;
442 case Opcode::kIsNull: {
443 const IsNullOp& is_null = condition.Cast<IsNullOp>();
444 if (branch.if_true == &target) {
445 if (GetResolvedType(is_null.object()).is_non_nullable()) {
446 // The target is impossible to be reached.
447 DCHECK_EQ(target.PredecessorCount(), 1);
448 block_is_unreachable_.Add(target.index().id());
449 TRACE(
450 "[b%uu] Block unreachable as #%u(%s) used in #%u(%s) is always "
451 "false\n",
452 target.index().id(), branch.condition().id(),
453 OpcodeName(condition.opcode), graph_.Index(branch).id(),
454 OpcodeName(branch.opcode));
455 return;
456 }
457 RefineTypeKnowledge(is_null.object(),
458 wasm::ToNullSentinel({is_null.type, module_}),
459 branch);
460 } else {
461 DCHECK_EQ(branch.if_false, &target);
462 RefineTypeKnowledge(is_null.object(), is_null.type.AsNonNull(), branch);
463 }
464 } break;
465 default:
466 break;
467 }
468}
469
473}
474
477 // Unreachable predecessors should be ignored when merging but we can't remove
478 // them from the predecessors as that would mess up the phi inputs. Therefore
479 // the reachability of the predecessors is passed as a separate list.
481 bool all_predecessors_unreachable = true;
482 for (const Block* predecessor : block.PredecessorsIterable()) {
483 snapshots.push_back(block_to_snapshot_[predecessor->index()].value());
484 bool predecessor_reachable = IsReachable(*predecessor);
485 reachable.push_back(predecessor_reachable);
486 all_predecessors_unreachable &= !predecessor_reachable;
487 }
488 if (all_predecessors_unreachable) {
489 TRACE("[b%u] Block unreachable as all predecessors are unreachable\n",
490 block.index().id());
491 block_is_unreachable_.Add(block.index().id());
492 } else if (v8_flags.trace_wasm_typer) {
493 std::stringstream str;
494 size_t i = 0;
495 for (const Block* predecessor : block.PredecessorsIterable()) {
496 if (i != 0) str << ", ";
497 str << 'b' << predecessor->index().id() << (reachable[i] ? "" : "u");
498 ++i;
499 }
500 TRACE("[b%u] Predecessors reachability: %s\n", block.index().id(),
501 str.str().c_str());
502 }
503 // The predecessor snapshots need to be reversed to restore the "original"
504 // order of predecessors. (This is used to map phi inputs to their
505 // corresponding predecessor.)
506 std::reverse(snapshots.begin(), snapshots.end());
507 std::reverse(reachable.begin(), reachable.end());
508 CreateMergeSnapshot(base::VectorOf(snapshots), base::VectorOf(reachable));
509}
510
512 base::Vector<const Snapshot> predecessors,
513 base::Vector<const bool> reachable) {
514 DCHECK_EQ(predecessors.size(), reachable.size());
515 // The merging logic is also used to evaluate if two snapshots are
516 // "identical", i.e. the known types for all operations are the same.
517 bool types_are_equivalent = true;
519 predecessors, [this, &types_are_equivalent, reachable](
522 DCHECK_GT(predecessors.size(), 1);
523 size_t i = 0;
524 // Initialize the type based on the first reachable predecessor.
526 for (; i < reachable.size(); ++i) {
527 // Uninhabitated types can only occur in unreachable code e.g. as a
528 // result of an always failing cast. Still reachability tracking might
529 // in some cases miss that a block becomes unreachable, so we still
530 // check for uninhabited in the if below.
531 DCHECK_IMPLIES(reachable[i], !predecessors[i].is_uninhabited());
532 if (reachable[i] && !predecessors[i].is_uninhabited()) {
533 first = predecessors[i];
534 ++i;
535 break;
536 }
537 }
538
539 wasm::ValueType res = first;
540 for (; i < reachable.size(); ++i) {
541 if (!reachable[i]) continue; // Skip unreachable predecessors.
542 wasm::ValueType type = predecessors[i];
543 // Uninhabitated types can only occur in unreachable code e.g. as a
544 // result of an always failing cast. Still reachability tracking might
545 // in some cases miss that a block becomes unreachable, so we still
546 // check for uninhabited in the if below.
547 DCHECK(!type.is_uninhabited());
548 if (type.is_uninhabited()) continue;
549 types_are_equivalent &= first == type;
550 if (res == wasm::ValueType() || type == wasm::ValueType()) {
551 res = wasm::ValueType();
552 } else {
553 res = wasm::Union(res, type, module_, module_).type;
554 }
555 }
556 return res;
557 });
558 return !types_are_equivalent;
559}
560
562 OpIndex object, wasm::ValueType new_type, const Operation& op) {
564 object = ResolveAliases(object);
565 wasm::ValueType previous_value = types_table_.Get(object);
566 wasm::ValueType intersection_type =
567 previous_value == wasm::ValueType()
568 ? new_type
569 : wasm::Intersection(previous_value, new_type, module_, module_).type;
570 if (intersection_type == previous_value) return previous_value;
571
572 TRACE("[b%u%s] #%u(%s): Refine type for object #%u(%s) -> %s%s\n",
573 current_block_->index().id(), !IsReachable(*current_block_) ? "u" : "",
574 graph_.Index(op).id(), OpcodeName(op.opcode), object.id(),
575 OpcodeName(graph_.Get(object).opcode), intersection_type.name().c_str(),
576 intersection_type.is_uninhabited() ? " (unreachable!)" : "");
577
578 types_table_.Set(object, intersection_type);
579 if (intersection_type.is_uninhabited()) {
580 // After this instruction all other instructions in the current block are
581 // unreachable.
583 // Return bottom to indicate that the operation `op` shall always trap.
584 return wasm::kWasmBottom;
585 }
586 return previous_value;
587}
588
590 OpIndex object, const Operation& op) {
591 object = ResolveAliases(object);
592 wasm::ValueType previous_value = types_table_.Get(object);
593 if (previous_value.is_non_nullable()) return previous_value;
594
595 wasm::ValueType not_null_type = previous_value.AsNonNull();
596 TRACE("[b%u%s] #%u(%s): Refine type for object #%u(%s) -> %s%s\n",
597 current_block_->index().id(), !IsReachable(*current_block_) ? "u" : "",
598 graph_.Index(op).id(), OpcodeName(op.opcode), object.id(),
599 OpcodeName(graph_.Get(object).opcode), not_null_type.name().c_str(),
600 not_null_type.is_uninhabited() ? " (unreachable!)" : "");
601
602 types_table_.Set(object, not_null_type);
603 if (not_null_type.is_uninhabited()) {
604 // After this instruction all other instructions in the current block are
605 // unreachable.
607 // Return bottom to indicate that the operation `op` shall always trap.
608 return wasm::kWasmBottom;
609 }
610 return previous_value;
611}
612
614 while (true) {
615 const Operation* op = &graph_.Get(object);
616 switch (op->opcode) {
617 case Opcode::kWasmTypeCast:
618 object = op->Cast<WasmTypeCastOp>().object();
619 break;
620 case Opcode::kAssertNotNull:
621 object = op->Cast<AssertNotNullOp>().object();
622 break;
623 case Opcode::kWasmTypeAnnotation:
624 object = op->Cast<WasmTypeAnnotationOp>().value();
625 break;
626 default:
627 return object;
628 }
629 }
630}
631
632bool WasmGCTypeAnalyzer::IsReachable(const Block& block) const {
633 return !block_is_unreachable_.Contains(block.index().id());
634}
635
639
640#undef TRACE
641
642} // namespace v8::internal::compiler::turboshaft
constexpr size_t size() const
Definition vector.h:70
bool Contains(int i) const
Definition bit-vector.h:180
NeighboringPredecessorIterable PredecessorsIterable() const
Definition graph.h:340
base::iterator_range< OpIndexIterator > OperationIndices(const Block &block) const
Definition graph.h:957
OpIndex Index(const Operation &op) const
Definition graph.h:655
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
constexpr uint32_t id() const
Definition index.h:61
void StartNewSnapshot(base::Vector< const Snapshot > predecessors, const ChangeCallback &change_callback={})
void ProcessTypeAnnotation(const WasmTypeAnnotationOp &type_annotation)
void ProcessAllocateArray(const WasmAllocateArrayOp &allocate_array)
wasm::ValueType RefineTypeKnowledgeNotNull(OpIndex object, const Operation &op)
wasm::ValueType GetTypeForPhiInput(const PhiOp &phi, int input_index)
void ProcessAllocateStruct(const WasmAllocateStructOp &allocate_struct)
wasm::ValueType RefineTypeKnowledge(OpIndex object, wasm::ValueType new_type, const Operation &op)
void ProcessBranchOnTarget(const BranchOp &branch, const Block &target)
constexpr bool is_non_nullable() const
Definition value-type.h:396
constexpr bool has_index() const
Definition value-type.h:367
constexpr bool is_uninhabited() const
Definition value-type.h:455
V8_EXPORT_PRIVATE std::string name() const
static constexpr ValueType Ref(ModuleTypeIndex index, bool shared, RefTypeKind kind)
Definition value-type.h:887
constexpr ModuleTypeIndex ref_index() const
constexpr ValueType AsNonNull() const
Definition value-type.h:917
RpoNumber block
#define TRACE(...)
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
const char * OpcodeName(Opcode opcode)
ValueType ToNullSentinel(TypeInModule type)
constexpr int kWasmInstanceDataParameterIndex
TypeInModule Intersection(ValueType type1, ValueType type2, const WasmModule *module1, const WasmModule *module2)
V8_EXPORT_PRIVATE TypeInModule Union(ValueType type1, ValueType type2, const WasmModule *module1, const WasmModule *module2)
constexpr IndependentHeapType kWasmStructRef
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype, const WasmModule *sub_module, const WasmModule *super_module)
constexpr IndependentHeapType kWasmBottom
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
wasm::ValueType type