v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
parallel-move.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_PARALLEL_MOVE_H_
6#define V8_WASM_BASELINE_PARALLEL_MOVE_H_
7
10#include "src/wasm/wasm-value.h"
11
12namespace v8::internal::wasm {
13
14// ParallelMove is a utility class that encodes multiple moves from registers to
15// registers (`RegisterMove`), constants to registers (`RegisterLoad` with
16// `LoadKind::kConstant`), or stack slots to registers (other
17// `RegisterLoad`s).
18// It can handle cyclic moves, e.g., swaps between registers.
19// The moves are typically prepared/encoded into an instance via the high-level
20// entry point `Transfer`, which takes two Wasm value stack configurations
21// (`VarState`) as input.
22// Code is actually emitted to the underlying `LiftoffAssembler` only at the
23// end via `Execute` or implicitly in the destructor.
26
33
34 struct RegisterLoad {
35 enum LoadKind : uint8_t {
36 kNop, // no-op, used for high fp of a fp pair.
37 kConstant, // load a constant value into a register.
38 kStack, // fill a register from a stack slot.
39 kLowHalfStack, // fill a register from the low half of a stack slot.
40 kHighHalfStack // fill a register from the high half of a stack slot.
41 };
42
45 // `value` stores the i32 constant value (sign-extended if `kind == kI64`),
46 // or stack offset, depending on `load_kind`.
47 int32_t value;
48
49 // Named constructors.
50 static RegisterLoad Const(ValueKind kind, int32_t constant) {
51 V8_ASSUME(kind == kI32 || kind == kI64);
52 return {kConstant, kind, constant};
53 }
55 return {kStack, kind, offset};
56 }
57 static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
58 return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kI32, offset};
59 }
60 static RegisterLoad Nop() {
61 // ValueKind does not matter.
62 return {kNop, kI32, 0};
63 }
64
65 private:
67 : load_kind(load_kind), kind(kind), value(value) {}
68 };
69
70 public:
71 explicit inline ParallelMove(LiftoffAssembler* wasm_asm);
72 ParallelMove(const ParallelMove&) = delete;
75
77 // First, execute register moves. Then load constants and stack values into
78 // registers.
83 // Tell the compiler that the ParallelMove is empty after this, so it
84 // can eliminate a second {Execute} in the destructor.
85 bool all_done = move_dst_regs_.is_empty() && load_dst_regs_.is_empty();
86 V8_ASSUME(all_done);
87 }
88
89 V8_INLINE void Transfer(const VarState& dst, const VarState& src) {
90 DCHECK(CompatibleStackSlotTypes(dst.kind(), src.kind()));
91 if (dst.is_stack()) {
92 if (V8_UNLIKELY(!(src.is_stack() && src.offset() == dst.offset()))) {
93 TransferToStack(dst.offset(), src);
94 }
95 } else if (dst.is_reg()) {
96 LoadIntoRegister(dst.reg(), src);
97 } else {
98 DCHECK(dst.is_const());
99 DCHECK_EQ(dst.i32_const(), src.i32_const());
100 }
101 }
102
103 void TransferToStack(int dst_offset, const VarState& src);
104
106 if (src.is_reg()) {
107 DCHECK_EQ(dst.reg_class(), src.reg_class());
108 if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
109 } else if (src.is_stack()) {
110 LoadStackSlot(dst, src.offset(), src.kind());
111 } else {
112 DCHECK(src.is_const());
113 LoadConstant(dst, src.kind(), src.i32_const());
114 }
115 }
116
118 RegPairHalf half) {
119 // Use CHECK such that the remaining code is statically dead if
120 // {kNeedI64RegPair} is false.
122 DCHECK_EQ(kI64, src.kind());
123 switch (src.loc()) {
124 case VarState::kStack:
125 LoadI64HalfStackSlot(dst, src.offset(), half);
126 break;
127 case VarState::kRegister: {
128 LiftoffRegister src_half =
129 half == kLowWord ? src.reg().low() : src.reg().high();
130 if (dst != src_half) MoveRegister(dst, src_half, kI32);
131 break;
132 }
134 int32_t value = src.i32_const();
135 // The high word is the sign extension of the low word.
136 if (half == kHighWord) value = value >> 31;
137 LoadConstant(dst, kI32, value);
138 break;
139 }
140 }
141
143 DCHECK_NE(dst, src);
144 DCHECK_EQ(dst.reg_class(), src.reg_class());
145 DCHECK_EQ(reg_class_for(kind), src.reg_class());
146 if (src.is_gp_pair()) {
148 if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kI32);
149 if (dst.high() != src.high()) MoveRegister(dst.high(), src.high(), kI32);
150 return;
151 }
152 if (src.is_fp_pair()) {
154 if (dst.low() != src.low()) {
155 MoveRegister(dst.low(), src.low(), kF64);
156 MoveRegister(dst.high(), src.high(), kF64);
157 }
158 return;
159 }
160 if (move_dst_regs_.has(dst)) {
161 DCHECK_EQ(register_move(dst)->src, src);
162 // Check for compatible value kinds.
163 // - references can occur with mixed kRef / kRefNull kinds.
164 // - FP registers can only occur with f32 / f64 / s128 kinds (mixed kinds
165 // only if they hold the initial zero value).
166 // - others must match exactly.
169 DCHECK_EQ(dst.is_fp(), register_move(dst)->kind == kF32 ||
170 register_move(dst)->kind == kF64 ||
171 register_move(dst)->kind == kS128);
172 if (!is_object_reference(kind) && !dst.is_fp()) {
174 }
175 // Potentially upgrade an existing `kF32` move to a `kF64` move.
176 if (kind == kF64) register_move(dst)->kind = kF64;
177 return;
178 }
179 move_dst_regs_.set(dst);
180 ++*src_reg_use_count(src);
181 *register_move(dst) = {src, kind};
182 }
183
184 // Note: {constant} will be sign-extended if {kind == kI64}.
185 void LoadConstant(LiftoffRegister dst, ValueKind kind, int32_t constant) {
187 load_dst_regs_.set(dst);
188 if (dst.is_gp_pair()) {
190 *register_load(dst.low()) = RegisterLoad::Const(kI32, constant);
191 // The high word is either 0 or 0xffffffff.
192 *register_load(dst.high()) = RegisterLoad::Const(kI32, constant >> 31);
193 } else {
194 *register_load(dst) = RegisterLoad::Const(kind, constant);
195 }
196 }
197
198 void LoadStackSlot(LiftoffRegister dst, int stack_offset, ValueKind kind) {
199 V8_ASSUME(stack_offset > 0);
200 if (load_dst_regs_.has(dst)) {
201 // It can happen that we spilled the same register to different stack
202 // slots, and then we reload them later into the same dst register.
203 // In that case, it is enough to load one of the stack slots.
204 return;
205 }
206 load_dst_regs_.set(dst);
207 // Make sure that we only spill to positions after this stack offset to
208 // avoid overwriting the content.
209 if (stack_offset > last_spill_offset_) {
210 last_spill_offset_ = stack_offset;
211 }
212 if (dst.is_gp_pair()) {
214 *register_load(dst.low()) =
215 RegisterLoad::HalfStack(stack_offset, kLowWord);
216 *register_load(dst.high()) =
217 RegisterLoad::HalfStack(stack_offset, kHighWord);
218 } else if (dst.is_fp_pair()) {
220 // Only need register_load for low_gp since we load 128 bits at one go.
221 // Both low and high need to be set in load_dst_regs_ but when iterating
222 // over it, both low and high will be cleared, so we won't load twice.
223 *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, kind);
225 } else {
226 *register_load(dst) = RegisterLoad::Stack(stack_offset, kind);
227 }
228 }
229
231 if (load_dst_regs_.has(dst)) {
232 // It can happen that we spilled the same register to different stack
233 // slots, and then we reload them later into the same dst register.
234 // In that case, it is enough to load one of the stack slots.
235 return;
236 }
237 load_dst_regs_.set(dst);
239 }
240
241 private:
243 [kAfterMaxLiftoffRegCode * sizeof(RegisterMove)]; // uninitialized
245 [kAfterMaxLiftoffRegCode * sizeof(RegisterLoad)]; // uninitialized
250 // Cache the last spill offset in case we need to spill for resolving move
251 // cycles.
253
255 return reinterpret_cast<RegisterMove*>(register_moves_) +
256 reg.liftoff_code();
257 }
259 return reinterpret_cast<RegisterLoad*>(register_loads_) +
260 reg.liftoff_code();
261 }
263 return src_reg_use_count_ + reg.liftoff_code();
264 }
265
267 RegisterMove* move = register_move(dst);
269 asm_->Move(dst, move->src, move->kind);
271 }
272
276 RegisterMove* move = register_move(dst);
277 DCHECK_LT(0, *src_reg_use_count(move->src));
278 if (--*src_reg_use_count(move->src)) return;
279 // src count dropped to zero. If this is a destination register, execute
280 // that move now.
281 if (!move_dst_regs_.has(move->src)) return;
282 ExecuteMove(move->src);
283 }
284
286
288};
289
290} // namespace v8::internal::wasm
291
292#endif // V8_WASM_BASELINE_PARALLEL_MOVE_H_
Builtins::Kind kind
Definition builtins.cc:40
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
constexpr Register set(Register reg)
constexpr LiftoffRegister clear(LiftoffRegister reg)
bool has(LiftoffRegister reg) const
constexpr RegClass reg_class() const
char register_loads_[kAfterMaxLiftoffRegCode *sizeof(RegisterLoad)]
void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueKind kind)
void ClearExecutedMove(LiftoffRegister dst)
RegisterLoad * register_load(LiftoffRegister reg)
LiftoffAssembler *const asm_
ParallelMove(LiftoffAssembler *wasm_asm)
V8_INLINE void LoadIntoRegister(LiftoffRegister dst, const VarState &src)
void LoadI64HalfIntoRegister(LiftoffRegister dst, const VarState &src, RegPairHalf half)
void TransferToStack(int dst_offset, const VarState &src)
void LoadStackSlot(LiftoffRegister dst, int stack_offset, ValueKind kind)
V8_NOINLINE V8_PRESERVE_MOST void ExecuteLoads()
char register_moves_[kAfterMaxLiftoffRegCode *sizeof(RegisterMove)]
void ExecuteMove(LiftoffRegister dst)
void LoadI64HalfStackSlot(LiftoffRegister dst, int offset, RegPairHalf half)
ParallelMove & operator=(const ParallelMove &)=delete
int src_reg_use_count_[kAfterMaxLiftoffRegCode]
int * src_reg_use_count(LiftoffRegister reg)
V8_INLINE void Transfer(const VarState &dst, const VarState &src)
RegisterMove * register_move(LiftoffRegister reg)
V8_NOINLINE V8_PRESERVE_MOST void ExecuteMoves()
ParallelMove(const ParallelMove &)=delete
void LoadConstant(LiftoffRegister dst, ValueKind kind, int32_t constant)
int32_t offset
LiftoffRegister reg
static constexpr RegClass reg_class_for(ValueKind kind)
static constexpr int kAfterMaxLiftoffRegCode
constexpr bool is_object_reference(ValueKind kind)
static constexpr bool kNeedI64RegPair
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
RegisterLoad(LoadKind load_kind, ValueKind kind, int32_t value)
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half)
static RegisterLoad Stack(int32_t offset, ValueKind kind)
static RegisterLoad Const(ValueKind kind, int32_t constant)
constexpr RegisterMove(LiftoffRegister src, ValueKind kind)
#define V8_INLINE
Definition v8config.h:500
#define V8_ASSUME
Definition v8config.h:533
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NOINLINE
Definition v8config.h:586
#define V8_PRESERVE_MOST
Definition v8config.h:598