v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-optimization-reducer.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
11#include "src/roots/roots-inl.h"
12
14
26
29 BlockIndex end = BlockIndex(input_graph.block_count());
30 while (current_block < end) {
32 auto operations_range =
33 input_graph.operations(input_graph.Get(current_block));
34 // Set the next block index here already, to allow it to be changed if
35 // needed.
37 for (const Operation& op : operations_range) {
38 Process(op);
39 }
40 }
41}
42
44 if (ShouldSkipOperation(op)) {
45 return;
46 }
47
48 if (auto* alloc = op.TryCast<AllocateOp>()) {
49 ProcessAllocation(*alloc);
50 return;
51 }
52 if (auto* store = op.TryCast<StoreOp>()) {
53 ProcessStore(*store);
54 return;
55 }
56 if (op.Effects().can_allocate) {
57 state = BlockState();
58 }
59 if (op.IsBlockTerminator()) {
61 }
62}
63
64// Update the successor block states based on the state of the current block.
65// For loop backedges, we need to re-start the analysis from the loop header
66// unless the backedge state is unchanged.
68 if (auto* goto_op = terminator.TryCast<GotoOp>()) {
69 if (input_graph.IsLoopBackedge(*goto_op)) {
70 std::optional<BlockState>& target_state =
71 block_states[goto_op->destination->index()];
72 BlockState old_state = *target_state;
73 MergeCurrentStateIntoSuccessor(goto_op->destination);
74 if (old_state != *target_state) {
75 // We can never fold allocations inside of the loop into an
76 // allocation before the loop, since this leads to unbounded
77 // allocation size. An unknown `reserved_size` will prevent adding
78 // allocations inside of the loop.
79 target_state->reserved_size = std::nullopt;
80 // Redo the analysis from the beginning of the loop.
81 current_block = goto_op->destination->index();
82 }
83 return;
84 } else if (goto_op->destination->IsLoop()) {
85 // Look ahead to detect allocating loops earlier, avoiding a wrong
86 // speculation resulting in processing the loop twice.
87 for (const Operation& op :
88 input_graph.operations(*goto_op->destination)) {
89 if (op.Effects().can_allocate && !ShouldSkipOperation(op)) {
90 state = BlockState();
91 break;
92 }
93 }
94 }
95 }
96 for (Block* successor : SuccessorBlocks(terminator)) {
98 }
99}
100
101// We try to merge the new allocation into a previous dominating allocation.
102// We also allow folding allocations across blocks, as long as there is a
103// dominating relationship.
105 if (ShouldSkipOptimizationStep()) return;
106 std::optional<uint64_t> new_size;
107 if (auto* size =
108 input_graph.Get(alloc.size()).template TryCast<ConstantOp>()) {
109 new_size = size->integral();
110 }
111 // If the new allocation has a static size and is of the same type, then we
112 // can fold it into the previous allocation unless the folded allocation would
113 // exceed `kMaxRegularHeapObjectSize`.
115 state.last_allocation && new_size.has_value() &&
116 state.reserved_size.has_value() &&
117 alloc.type == state.last_allocation->type &&
118 *new_size <= kMaxRegularHeapObjectSize - *state.reserved_size) {
119 state.reserved_size =
120 static_cast<uint32_t>(*state.reserved_size + *new_size);
121 folded_into[&alloc] = state.last_allocation;
122 uint32_t& max_reserved_size = reserved_size[state.last_allocation];
123 max_reserved_size = std::max(max_reserved_size, *state.reserved_size);
124 return;
125 }
126 state.last_allocation = &alloc;
127 state.reserved_size = std::nullopt;
128 if (new_size.has_value() && *new_size <= kMaxRegularHeapObjectSize) {
129 state.reserved_size = static_cast<uint32_t>(*new_size);
130 }
131 // We might be re-visiting the current block. In this case, we need to remove
132 // an allocation that can no longer be folded.
133 reserved_size.erase(&alloc);
134 folded_into.erase(&alloc);
135}
136
138 V<None> store_op_index = input_graph.Index(store);
139 if (SkipWriteBarrier(store)) {
140 skipped_write_barriers.insert(store_op_index);
141 } else {
142 // We might be re-visiting the current block. In this case, we need to
143 // still update the information.
145 skipped_write_barriers.erase(store_op_index);
146 }
147}
148
150 std::optional<BlockState>& target_state = block_states[successor->index()];
151 if (!target_state.has_value()) {
152 target_state = state;
153 return;
154 }
155 // All predecessors need to have the same last allocation for us to continue
156 // folding into it.
157 if (target_state->last_allocation != state.last_allocation) {
158 target_state = BlockState();
159 return;
160 }
161 // We take the maximum allocation size of all predecessors. If the size is
162 // unknown because it is dynamic, we remember the allocation to eliminate
163 // write barriers.
164 if (target_state->reserved_size.has_value() &&
165 state.reserved_size.has_value()) {
166 target_state->reserved_size =
167 std::max(*target_state->reserved_size, *state.reserved_size);
168 } else {
169 target_state->reserved_size = std::nullopt;
170 }
171}
172
173} // namespace v8::internal::compiler::turboshaft
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
int end
V8_EXPORT_PRIVATE V8_INLINE bool ShouldSkipOperation(const Operation &op)
const TSCallDescriptor * CreateAllocateBuiltinDescriptor(Zone *zone, Isolate *isolate)
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
base::SmallVector< Block *, 4 > SuccessorBlocks(const Block &block, const Graph &graph)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kMaxRegularHeapObjectSize
Definition globals.h:680
#define DCHECK_NE(v1, v2)
Definition logging.h:486
FixedBlockSidetable< std::optional< BlockState > > block_states
ZoneAbslFlatHashMap< const AllocateOp *, uint32_t > reserved_size
ZoneAbslFlatHashMap< const AllocateOp *, const AllocateOp * > folded_into
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)