v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
spill-placer.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
9
10namespace v8 {
11namespace internal {
12namespace compiler {
13
16
22
24 DCHECK(range->HasGeneralSpillRange());
25 InstructionOperand spill_operand = range->GetSpillRangeOperand();
26 range->FilterSpillMoves(data(), spill_operand);
27
29 InstructionBlock* top_start_block =
30 code->GetInstructionBlock(range->Start().ToInstructionIndex());
31 RpoNumber top_start_block_number = top_start_block->rpo_number();
32
33 // Check for several cases where spilling at the definition is best.
34 // - The value is already moved on-stack somehow so the list of insertion
35 // locations for spilling at the definition is empty.
36 // - If the first LiveRange is spilled, then there's no sense in doing
37 // anything other than spilling at the definition.
38 // - If the value is defined in a deferred block, then the logic to select
39 // the earliest deferred block as the insertion point would cause
40 // incorrect behavior, so the value must be spilled at the definition.
41 // - We haven't seen any indication of performance improvements from seeking
42 // optimal spilling positions except on loop-top phi values, so spill
43 // any value that isn't a loop-top phi at the definition to avoid
44 // increasing the code size for no benefit.
45 if (range->GetSpillMoveInsertionLocations(data()) == nullptr ||
46 range->spilled() || top_start_block->IsDeferred() ||
47 (!v8_flags.stress_turbo_late_spilling && !range->is_loop_phi())) {
48 range->CommitSpillMoves(data(), spill_operand);
49 return;
50 }
51
52 // Iterate through the range and mark every block that needs the value to be
53 // spilled.
54 for (const LiveRange* child = range; child != nullptr;
55 child = child->next()) {
56 if (child->spilled()) {
57 // Add every block that contains part of this live range.
58 for (const UseInterval& interval : child->intervals()) {
59 RpoNumber start_block =
60 code->GetInstructionBlock(interval.start().ToInstructionIndex())
61 ->rpo_number();
62 if (start_block == top_start_block_number) {
63 // Can't do late spilling if the first spill is within the
64 // definition block.
65 range->CommitSpillMoves(data(), spill_operand);
66 // Verify that we never added any data for this range to the table.
67 DCHECK(!IsLatestVreg(range->vreg()));
68 return;
69 }
70 LifetimePosition end = interval.end();
71 int end_instruction = end.ToInstructionIndex();
72 // The end position is exclusive, so an end position exactly on a block
73 // boundary indicates that the range applies only to the prior block.
74 if (data()->IsBlockBoundary(end)) {
75 --end_instruction;
76 }
77 RpoNumber end_block =
78 code->GetInstructionBlock(end_instruction)->rpo_number();
79 while (start_block <= end_block) {
80 SetSpillRequired(code->InstructionBlockAt(start_block), range->vreg(),
81 top_start_block_number);
82 start_block = start_block.Next();
83 }
84 }
85 } else {
86 // Add every block that contains a use which requires the on-stack value.
87 for (const UsePosition* pos : child->positions()) {
88 if (pos->type() != UsePositionType::kRequiresSlot) continue;
89 InstructionBlock* block =
90 code->GetInstructionBlock(pos->pos().ToInstructionIndex());
91 RpoNumber block_number = block->rpo_number();
92 if (block_number == top_start_block_number) {
93 // Can't do late spilling if the first spill is within the
94 // definition block.
95 range->CommitSpillMoves(data(), spill_operand);
96 // Verify that we never added any data for this range to the table.
97 DCHECK(!IsLatestVreg(range->vreg()));
98 return;
99 }
100 SetSpillRequired(block, range->vreg(), top_start_block_number);
101 }
102 }
103 }
104
105 // If we haven't yet marked anything for this range, then it never needs to
106 // spill at all.
107 if (!IsLatestVreg(range->vreg())) {
108 range->SetLateSpillingSelected(true);
109 return;
110 }
111
112 SetDefinition(top_start_block_number, range->vreg());
113}
114
116 public:
117 // Functions operating on single values (during setup):
118
119 void SetSpillRequiredSingleValue(int value_index) {
120 DCHECK_LT(value_index, kValueIndicesPerEntry);
121 uint64_t bit = uint64_t{1} << value_index;
122 SetSpillRequired(bit);
123 }
124 void SetDefinitionSingleValue(int value_index) {
125 DCHECK_LT(value_index, kValueIndicesPerEntry);
126 uint64_t bit = uint64_t{1} << value_index;
127 SetDefinition(bit);
128 }
129
130 // Functions operating on all values simultaneously, as bitfields:
131
132 uint64_t SpillRequired() const { return GetValuesInState<kSpillRequired>(); }
148 uint64_t Definition() const { return GetValuesInState<kDefinition>(); }
150
151 private:
152 // Possible states for every value, at every block.
153 enum State {
154 // This block is not (yet) known to require the on-stack value.
156
157 // The value must be on the stack in this block.
159
160 // The value doesn't need to be on-stack in this block, but some
161 // non-deferred successor needs it.
163
164 // The value doesn't need to be on-stack in this block, but some
165 // deferred successor needs it.
167
168 // The value is defined in this block.
170 };
171
172 template <State state>
173 uint64_t GetValuesInState() const {
174 static_assert(state < 8);
175 return ((state & 1) ? first_bit_ : ~first_bit_) &
176 ((state & 2) ? second_bit_ : ~second_bit_) &
177 ((state & 4) ? third_bit_ : ~third_bit_);
178 }
179
180 template <State state>
190
191 template <bool set_ones>
192 static uint64_t UpdateBitDataWithMask(uint64_t data, uint64_t mask) {
193 return set_ones ? data | mask : data & ~mask;
194 }
195
196 // Storage for the states of up to 64 live ranges.
197 uint64_t first_bit_ = 0;
198 uint64_t second_bit_ = 0;
199 uint64_t third_bit_ = 0;
200};
201
204 // If this vreg isn't yet the last one in the list, then add it.
205 if (!IsLatestVreg(vreg)) {
206 if (vreg_numbers_ == nullptr) {
208 DCHECK_EQ(entries_, nullptr);
209 // We lazily allocate these arrays because many functions don't have any
210 // values that use SpillPlacer.
212 data()->code()->instruction_blocks().size());
213 for (size_t i = 0; i < data()->code()->instruction_blocks().size(); ++i) {
214 new (&entries_[i]) Entry();
215 }
217 }
218
220 // The table is full; commit the current set of values and clear it.
221 CommitSpills();
222 ClearData();
223 }
224
227 }
228 return assigned_indices_ - 1;
229}
230
236
239 for (int i = 0; i < data()->code()->InstructionBlockCount(); ++i) {
240 new (&entries_[i]) Entry();
241 }
244}
245
247 if (!first_block_.IsValid()) {
251 } else {
252 if (first_block_ > block) {
254 }
255 if (last_block_ < block) {
257 }
258 }
259}
260
262 RpoNumber top_start_block) {
263 // Spilling in loops is bad, so if the block is non-deferred and nested
264 // within a loop, and the definition is before that loop, then mark the loop
265 // top instead. Of course we must find the outermost such loop.
266 if (!block->IsDeferred()) {
267 while (block->loop_header().IsValid() &&
268 block->loop_header() > top_start_block) {
269 block = data()->code()->InstructionBlockAt(block->loop_header());
270 }
271 }
272
273 int value_index = GetOrCreateIndexForLatestVreg(vreg);
274 entries_[block->rpo_number().ToSize()].SetSpillRequiredSingleValue(
275 value_index);
276 ExpandBoundsToInclude(block->rpo_number());
277}
278
280 int value_index = GetOrCreateIndexForLatestVreg(vreg);
281 entries_[block.ToSize()].SetDefinitionSingleValue(value_index);
283}
284
286 InstructionSequence* code = data()->code();
287
288 for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
289 RpoNumber block_id = RpoNumber::FromInt(i);
290 InstructionBlock* block = code->instruction_blocks()[i];
291
292 Entry& entry = entries_[i];
293
294 // State that will be accumulated from successors.
295 uint64_t spill_required_in_non_deferred_successor = 0;
296 uint64_t spill_required_in_deferred_successor = 0;
297
298 for (RpoNumber successor_id : block->successors()) {
299 // Ignore loop back-edges.
300 if (successor_id <= block_id) continue;
301
302 InstructionBlock* successor = code->InstructionBlockAt(successor_id);
303 const Entry& successor_entry = entries_[successor_id.ToSize()];
304 if (successor->IsDeferred()) {
305 spill_required_in_deferred_successor |= successor_entry.SpillRequired();
306 } else {
307 spill_required_in_non_deferred_successor |=
308 successor_entry.SpillRequired();
309 }
310 spill_required_in_deferred_successor |=
311 successor_entry.SpillRequiredInDeferredSuccessor();
312 spill_required_in_non_deferred_successor |=
313 successor_entry.SpillRequiredInNonDeferredSuccessor();
314 }
315
316 // Starting state of the current block.
317 uint64_t defs = entry.Definition();
318 uint64_t needs_spill = entry.SpillRequired();
319
320 // Info about successors doesn't get to override existing info about
321 // definitions and spills required by this block itself.
322 spill_required_in_deferred_successor &= ~(defs | needs_spill);
323 spill_required_in_non_deferred_successor &= ~(defs | needs_spill);
324
326 spill_required_in_deferred_successor);
328 spill_required_in_non_deferred_successor);
329 }
330}
331
333 InstructionSequence* code = data()->code();
334 for (int i = first_block_.ToInt(); i <= last_block_.ToInt(); ++i) {
335 RpoNumber block_id = RpoNumber::FromInt(i);
336 InstructionBlock* block = code->instruction_blocks()[i];
337
338 // Deferred blocks don't need to participate in the forward pass, because
339 // their spills all get pulled forward to the earliest possible deferred
340 // block (where a non-deferred block jumps to a deferred block), and
341 // decisions about spill requirements for non-deferred blocks don't take
342 // deferred blocks into account.
343 if (block->IsDeferred()) continue;
344
345 Entry& entry = entries_[i];
346
347 // State that will be accumulated from predecessors.
348 uint64_t spill_required_in_non_deferred_predecessor = 0;
349 uint64_t spill_required_in_all_non_deferred_predecessors =
350 static_cast<uint64_t>(int64_t{-1});
351
352 for (RpoNumber predecessor_id : block->predecessors()) {
353 // Ignore loop back-edges.
354 if (predecessor_id >= block_id) continue;
355
356 InstructionBlock* predecessor = code->InstructionBlockAt(predecessor_id);
357 if (predecessor->IsDeferred()) continue;
358 const Entry& predecessor_entry = entries_[predecessor_id.ToSize()];
359 spill_required_in_non_deferred_predecessor |=
360 predecessor_entry.SpillRequired();
361 spill_required_in_all_non_deferred_predecessors &=
362 predecessor_entry.SpillRequired();
363 }
364
365 // Starting state of the current block.
366 uint64_t spill_required_in_non_deferred_successor =
368 uint64_t spill_required_in_any_successor =
369 spill_required_in_non_deferred_successor |
371
372 // If all of the predecessors agree that a spill is required, then a
373 // spill is required. Note that we don't set anything for values that
374 // currently have no markings in this block, to avoid pushing data too
375 // far down the graph and confusing the next backward pass.
376 entry.SetSpillRequired(spill_required_in_any_successor &
377 spill_required_in_non_deferred_predecessor &
378 spill_required_in_all_non_deferred_predecessors);
379
380 // If only some of the predecessors require a spill, but some successor
381 // of this block also requires a spill, then this merge point requires a
382 // spill. This ensures that no control-flow path through non-deferred
383 // blocks ever has to spill twice.
384 entry.SetSpillRequired(spill_required_in_non_deferred_successor &
385 spill_required_in_non_deferred_predecessor);
386 }
387}
388
390 InstructionSequence* code = data()->code();
391 for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
392 RpoNumber block_id = RpoNumber::FromInt(i);
393 InstructionBlock* block = code->instruction_blocks()[i];
394
395 Entry& entry = entries_[i];
396
397 // State that will be accumulated from successors.
398 uint64_t spill_required_in_non_deferred_successor = 0;
399 uint64_t spill_required_in_deferred_successor = 0;
400 uint64_t spill_required_in_all_non_deferred_successors =
401 static_cast<uint64_t>(int64_t{-1});
402
403 for (RpoNumber successor_id : block->successors()) {
404 // Ignore loop back-edges.
405 if (successor_id <= block_id) continue;
406
407 InstructionBlock* successor = code->InstructionBlockAt(successor_id);
408 const Entry& successor_entry = entries_[successor_id.ToSize()];
409 if (successor->IsDeferred()) {
410 spill_required_in_deferred_successor |= successor_entry.SpillRequired();
411 } else {
412 spill_required_in_non_deferred_successor |=
413 successor_entry.SpillRequired();
414 spill_required_in_all_non_deferred_successors &=
415 successor_entry.SpillRequired();
416 }
417 }
418
419 // Starting state of the current block.
420 uint64_t defs = entry.Definition();
421
422 // If all of the successors of a definition need the value to be
423 // spilled, then the value should be spilled at the definition.
424 uint64_t spill_at_def = defs & spill_required_in_non_deferred_successor &
425 spill_required_in_all_non_deferred_successors;
426 for (int index_to_spill : base::bits::IterateBits(spill_at_def)) {
427 int vreg_to_spill = vreg_numbers_[index_to_spill];
428 TopLevelLiveRange* top = data()->live_ranges()[vreg_to_spill];
429 top->CommitSpillMoves(data(), top->GetSpillRangeOperand());
430 }
431
432 if (block->IsDeferred()) {
433 DCHECK_EQ(defs, 0);
434 // Any deferred successor needing a spill is sufficient to make the
435 // current block need a spill.
436 entry.SetSpillRequired(spill_required_in_deferred_successor);
437 }
438
439 // Propagate data upward if there are non-deferred successors and they
440 // all need a spill, regardless of whether the current block is
441 // deferred.
442 entry.SetSpillRequired(~defs & spill_required_in_non_deferred_successor &
443 spill_required_in_all_non_deferred_successors);
444
445 // Iterate the successors again to find out which ones require spills at
446 // their beginnings, and insert those spills.
447 for (RpoNumber successor_id : block->successors()) {
448 // Ignore loop back-edges.
449 if (successor_id <= block_id) continue;
450
451 InstructionBlock* successor = code->InstructionBlockAt(successor_id);
452 const Entry& successor_entry = entries_[successor_id.ToSize()];
453 for (int index_to_spill :
454 base::bits::IterateBits(successor_entry.SpillRequired() &
455 ~entry.SpillRequired() & ~spill_at_def)) {
456 CommitSpill(vreg_numbers_[index_to_spill], block, successor);
457 }
458 }
459 }
460}
461
462void SpillPlacer::CommitSpill(int vreg, InstructionBlock* predecessor,
463 InstructionBlock* successor) {
464 TopLevelLiveRange* live_range = data()->live_ranges()[vreg];
466 predecessor->last_instruction_index());
467 LiveRange* child_range = live_range->GetChildCovers(pred_end);
468 DCHECK_NOT_NULL(child_range);
469 InstructionOperand pred_op = child_range->GetAssignedOperand();
470 DCHECK(pred_op.IsAnyRegister());
471 DCHECK_EQ(successor->PredecessorCount(), 1);
474 live_range->GetSpillRangeOperand());
475 successor->mark_needs_frame();
476 live_range->SetLateSpillingSelected(true);
477}
478
479} // namespace compiler
480} // namespace internal
481} // namespace v8
uint8_t data_[MAX_STACK_LENGTH]
SourcePosition pos
T * AllocateArray(size_t length)
Definition zone.h:127
const InstructionBlocks & instruction_blocks() const
InstructionBlock * InstructionBlockAt(RpoNumber rpo_number)
static LifetimePosition InstructionFromInstructionIndex(int index)
InstructionOperand GetAssignedOperand() const
MoveOperands * AddGapMove(int index, Instruction::GapPosition position, const InstructionOperand &from, const InstructionOperand &to)
const ZoneVector< TopLevelLiveRange * > & live_ranges() const
static RpoNumber FromInt(int index)
void SetSpillRequiredInNonDeferredSuccessor(uint64_t mask)
static uint64_t UpdateBitDataWithMask(uint64_t data, uint64_t mask)
void SetSpillRequiredInDeferredSuccessor(uint64_t mask)
void SetSpillRequiredSingleValue(int value_index)
void ExpandBoundsToInclude(RpoNumber block)
void SetDefinition(RpoNumber block, int vreg)
void SetSpillRequired(InstructionBlock *block, int vreg, RpoNumber top_start_block)
RegisterAllocationData * data() const
void CommitSpill(int vreg, InstructionBlock *predecessor, InstructionBlock *successor)
void Add(TopLevelLiveRange *range)
RegisterAllocationData * data_
SpillPlacer(RegisterAllocationData *data, Zone *zone)
static constexpr int kValueIndicesPerEntry
LiveRange * GetChildCovers(LifetimePosition pos)
void SetLateSpillingSelected(bool late_spilling_selected)
Zone * zone_
int end
RpoNumber block
uint32_t const mask
auto IterateBits(T bits)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485