v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
late-escape-analysis-reducer.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8
13
15 auto [it, new_entry] = alloc_uses_.try_emplace(alloc, phase_zone_);
16 auto& uses = it->second;
17 if (new_entry) {
18 uses.reserve(graph_.Get(alloc).saturated_use_count.Get());
19 }
20 uses.push_back(use);
21}
22
23// Collects the Allocate Operations and their uses.
25 for (auto& op : graph_.AllOperations()) {
26 if (ShouldSkipOperation(op)) continue;
27 OpIndex op_index = graph_.Index(op);
28 for (OpIndex input : op.inputs()) {
29 if (graph_.Get(input).Is<AllocateOp>()) {
30 RecordAllocateUse(input, op_index);
31 }
32 }
33 if (op.Is<AllocateOp>()) {
34 allocs_.push_back(op_index);
35 }
36 }
37}
38
40 while (!allocs_.empty()) {
41 OpIndex current_alloc = allocs_.back();
42 allocs_.pop_back();
43
44 if (ShouldSkipOperation(graph_.Get(current_alloc))) {
45 // We are re-visiting an allocation that we've actually already removed.
46 continue;
47 }
48
49 if (!AllocationIsEscaping(current_alloc)) {
50 MarkToRemove(current_alloc);
51 }
52 }
53}
54
56 if (alloc_uses_.find(alloc) == alloc_uses_.end()) return false;
57 for (OpIndex use : alloc_uses_.at(alloc)) {
58 if (EscapesThroughUse(alloc, use)) return true;
59 }
60 // We haven't found any non-store use
61 return false;
62}
63
64// Returns true if {using_op_idx} is an operation that forces {alloc} to be
65// emitted.
67 OpIndex using_op_idx) {
68 if (ShouldSkipOperation(graph_.Get(alloc))) {
69 // {using_op_idx} is an Allocate itself, which has been removed.
70 return false;
71 }
72 const Operation& op = graph_.Get(using_op_idx);
73 if (const StoreOp* store_op = op.TryCast<StoreOp>()) {
74 // A StoreOp only makes {alloc} escape if it uses {alloc} as the {value} or
75 // the {index}. Put otherwise, StoreOp makes {alloc} escape if it writes
76 // {alloc}, but not if it writes **to** {alloc}.
77 return store_op->value() == alloc;
78 }
79 return true;
80}
81
83 if (ShouldSkipOptimizationStep()) return;
84 graph_.KillOperation(alloc);
85 if (alloc_uses_.find(alloc) == alloc_uses_.end()) {
86 return;
87 }
88
89 // The uses of {alloc} should also be skipped.
90 for (OpIndex use : alloc_uses_.at(alloc)) {
91 const StoreOp& store = graph_.Get(use).Cast<StoreOp>();
92 if (graph_.Get(store.value()).Is<AllocateOp>()) {
93 // This store was storing the result of an allocation. Because we now
94 // removed this store, we might be able to remove the other allocation
95 // as well.
96 allocs_.push_back(store.value());
97 }
99 }
100}
101
102} // namespace v8::internal::compiler::turboshaft
base::iterator_range< MutableOperationIterator > AllOperations()
Definition graph.h:937
OpIndex Index(const Operation &op) const
Definition graph.h:655
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
ZoneAbslFlatHashMap< OpIndex, ZoneVector< OpIndex > > alloc_uses_
V8_EXPORT_PRIVATE V8_INLINE bool ShouldSkipOperation(const Operation &op)
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990