v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
register-configuration.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
10#include "src/common/globals.h"
11
12namespace v8 {
13namespace internal {
14
15namespace {
16
17#define REGISTER_COUNT(R) 1 +
18static const int kMaxAllocatableGeneralRegisterCount =
20static const int kMaxAllocatableDoubleRegisterCount =
22#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
23static const int kMaxAllocatableSIMD128RegisterCount =
25#endif
26
27static const int kAllocatableGeneralCodes[] = {
28#define REGISTER_CODE(R) kRegCode_##R,
30#undef REGISTER_CODE
31
32#define REGISTER_CODE(R) kDoubleCode_##R,
33static const int kAllocatableDoubleCodes[] = {
35#if V8_TARGET_ARCH_ARM
36static const int kAllocatableNoVFP32DoubleCodes[] = {
38#endif // V8_TARGET_ARCH_ARM
39#undef REGISTER_CODE
40
41#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
42static const int kAllocatableSIMD128Codes[] = {
43#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
44#define REGISTER_CODE(R) kVRCode_##R,
45#else
46#define REGISTER_CODE(R) kSimd128Code_##R,
47#endif
49#undef REGISTER_CODE
50#endif // V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 ||
51 // V8_TARGET_ARCH_PPC64
52
61#if V8_TARGET_ARCH_X64
64#endif
65
66static int get_num_simd128_registers() {
67 return
68#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
70#else
71 0;
72#endif // V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 ||
73 // V8_TARGET_ARCH_PPC64
74}
75
76static int get_num_simd256_registers() { return 0; }
77
78// Callers on architectures other than Arm expect this to be be constant
79// between build and runtime. Avoid adding variability on other platforms.
80static int get_num_allocatable_double_registers() {
81 return
82#if V8_TARGET_ARCH_IA32
83 kMaxAllocatableDoubleRegisterCount;
84#elif V8_TARGET_ARCH_X64
85 kMaxAllocatableDoubleRegisterCount;
86#elif V8_TARGET_ARCH_ARM
87 CpuFeatures::IsSupported(VFP32DREGS)
88 ? kMaxAllocatableDoubleRegisterCount
90#elif V8_TARGET_ARCH_ARM64
91 kMaxAllocatableDoubleRegisterCount;
92#elif V8_TARGET_ARCH_MIPS
93 kMaxAllocatableDoubleRegisterCount;
94#elif V8_TARGET_ARCH_MIPS64
95 kMaxAllocatableDoubleRegisterCount;
96#elif V8_TARGET_ARCH_LOONG64
97 kMaxAllocatableDoubleRegisterCount;
98#elif V8_TARGET_ARCH_PPC64
99 kMaxAllocatableDoubleRegisterCount;
100#elif V8_TARGET_ARCH_S390X
101 kMaxAllocatableDoubleRegisterCount;
102#elif V8_TARGET_ARCH_RISCV64
103 kMaxAllocatableDoubleRegisterCount;
104#elif V8_TARGET_ARCH_RISCV32
105 kMaxAllocatableDoubleRegisterCount;
106#else
107#error Unsupported target architecture.
108#endif
109}
110
111#undef REGISTER_COUNT
112
113static int get_num_allocatable_simd128_registers() {
114 return
115#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
116 kMaxAllocatableSIMD128RegisterCount;
117#else
118 0;
119#endif
120}
121
122static int get_num_allocatable_simd256_registers() { return 0; }
123
124// Callers on architectures other than Arm expect this to be be constant
125// between build and runtime. Avoid adding variability on other platforms.
126static const int* get_allocatable_double_codes() {
127 return
128#if V8_TARGET_ARCH_ARM
129 CpuFeatures::IsSupported(VFP32DREGS) ? kAllocatableDoubleCodes
130 : kAllocatableNoVFP32DoubleCodes;
131#else
132 kAllocatableDoubleCodes;
133#endif
134}
135
136static const int* get_allocatable_simd128_codes() {
137 return
138#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
139 kAllocatableSIMD128Codes;
140#else
141 kAllocatableDoubleCodes;
142#endif
143}
144
145class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
146 public:
147 ArchDefaultRegisterConfiguration()
148 : RegisterConfiguration(
150 get_num_simd128_registers(), get_num_simd256_registers(),
151 kMaxAllocatableGeneralRegisterCount,
152 get_num_allocatable_double_registers(),
153 get_num_allocatable_simd128_registers(),
154 get_num_allocatable_simd256_registers(), kAllocatableGeneralCodes,
155 get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
156};
157
158DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
159 GetDefaultRegisterConfiguration)
160
161// RestrictedRegisterConfiguration uses the subset of allocatable general
162// registers the architecture support, which results into generating assembly
163// to use less registers. Currently, it's only used by RecordWrite code stub.
164class RestrictedRegisterConfiguration : public RegisterConfiguration {
165 public:
166 RestrictedRegisterConfiguration(
167 int num_allocatable_general_registers,
168 std::unique_ptr<int[]> allocatable_general_register_codes,
169 std::unique_ptr<char const*[]> allocatable_general_register_names)
170 : RegisterConfiguration(
172 get_num_simd128_registers(), get_num_simd256_registers(),
173 num_allocatable_general_registers,
174 get_num_allocatable_double_registers(),
175 get_num_allocatable_simd128_registers(),
176 get_num_allocatable_simd256_registers(),
177 allocatable_general_register_codes.get(),
178 get_allocatable_double_codes(), get_allocatable_simd128_codes()),
179 allocatable_general_register_codes_(
180 std::move(allocatable_general_register_codes)),
181 allocatable_general_register_names_(
182 std::move(allocatable_general_register_names)) {
183 for (int i = 0; i < num_allocatable_general_registers; ++i) {
184 DCHECK(
185 IsAllocatableGeneralRegister(allocatable_general_register_codes_[i]));
186 }
187 }
188
189 bool IsAllocatableGeneralRegister(int code) {
190 for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
191 if (code == kAllocatableGeneralCodes[i]) {
192 return true;
193 }
194 }
195 return false;
196 }
197
198 private:
199 std::unique_ptr<int[]> allocatable_general_register_codes_;
200 std::unique_ptr<char const*[]> allocatable_general_register_names_;
201};
202
203} // namespace
204
206 return GetDefaultRegisterConfiguration();
207}
208
211 int num = registers.Count();
212 std::unique_ptr<int[]> codes{new int[num]};
213 std::unique_ptr<char const* []> names { new char const*[num] };
214 int counter = 0;
215 for (int i = 0; i < Default()->num_allocatable_general_registers(); ++i) {
217 if (registers.has(reg)) {
218 DCHECK(counter < num);
219 codes[counter] = reg.code();
220 names[counter] = RegisterName(Register::from_code(i));
221 counter++;
222 }
223 }
224
225 return new RestrictedRegisterConfiguration(num, std::move(codes),
226 std::move(names));
227}
228
230 AliasingKind fp_aliasing_kind, int num_general_registers,
231 int num_double_registers, int num_simd128_registers,
232 int num_simd256_registers, int num_allocatable_general_registers,
233 int num_allocatable_double_registers, int num_allocatable_simd128_registers,
234 int num_allocatable_simd256_registers, const int* allocatable_general_codes,
235 const int* allocatable_double_codes,
236 const int* independent_allocatable_simd128_codes)
237 : num_general_registers_(num_general_registers),
238 num_float_registers_(0),
239 num_double_registers_(num_double_registers),
240 num_simd128_registers_(num_simd128_registers),
241 num_simd256_registers_(num_simd256_registers),
242 num_allocatable_general_registers_(num_allocatable_general_registers),
243 num_allocatable_float_registers_(0),
244 num_allocatable_double_registers_(num_allocatable_double_registers),
245 num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
246 num_allocatable_simd256_registers_(num_allocatable_simd256_registers),
247 allocatable_general_codes_mask_(0),
248 allocatable_float_codes_mask_(0),
249 allocatable_double_codes_mask_(0),
250 allocatable_simd128_codes_mask_(0),
251 allocatable_simd256_codes_mask_(0),
252 allocatable_general_codes_(allocatable_general_codes),
253 allocatable_double_codes_(allocatable_double_codes),
254 fp_aliasing_kind_(fp_aliasing_kind) {
258 for (int i = 0; i < num_allocatable_general_registers_; ++i) {
260 }
261 for (int i = 0; i < num_allocatable_double_registers_; ++i) {
263 }
264
270 for (int i = 0; i < num_allocatable_double_registers_; i++) {
271 int base_code = allocatable_double_codes_[i] * 2;
272 if (base_code >= kMaxFPRegisters) continue;
275 base_code + 1;
276 allocatable_float_codes_mask_ |= (0x3 << base_code);
277 }
280 int last_simd128_code = allocatable_double_codes_[0] / 2;
281 for (int i = 1; i < num_allocatable_double_registers_; i++) {
282 int next_simd128_code = allocatable_double_codes_[i] / 2;
283 // This scheme assumes allocatable_double_codes_ are strictly increasing.
284 DCHECK_GE(next_simd128_code, last_simd128_code);
285 if (last_simd128_code == next_simd128_code) {
287 next_simd128_code;
288 allocatable_simd128_codes_mask_ |= (0x1 << next_simd128_code);
289 }
290 last_simd128_code = next_simd128_code;
291 }
296 for (int i = 0; i < num_allocatable_float_registers_; ++i) {
299#if V8_TARGET_ARCH_X64
301#endif
302 }
305#if V8_TARGET_ARCH_X64
309#endif
310 } else {
312 DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
315 for (int i = 0; i < num_allocatable_float_registers_; ++i) {
317 }
319 for (int i = 0; i < num_allocatable_simd128_registers; i++) {
320 allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
321 }
322 for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
324 }
325 }
326}
327
328// Assert that kFloat32, kFloat64, kSimd128 and kSimd256 are consecutive values.
329static_assert(static_cast<int>(MachineRepresentation::kSimd256) ==
330 static_cast<int>(MachineRepresentation::kSimd128) + 1);
331static_assert(static_cast<int>(MachineRepresentation::kSimd128) ==
332 static_cast<int>(MachineRepresentation::kFloat64) + 1);
333static_assert(static_cast<int>(MachineRepresentation::kFloat64) ==
334 static_cast<int>(MachineRepresentation::kFloat32) + 1);
335
337 MachineRepresentation other_rep,
338 int* alias_base_index) const {
340 DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
341 if (rep == other_rep) {
342 *alias_base_index = index;
343 return 1;
344 }
345 int rep_int = static_cast<int>(rep);
346 int other_rep_int = static_cast<int>(other_rep);
347 if (rep_int > other_rep_int) {
348 int shift = rep_int - other_rep_int;
349 int base_index = index << shift;
350 if (base_index >= kMaxFPRegisters) {
351 // Alias indices would be out of FP register range.
352 return 0;
353 }
354 *alias_base_index = base_index;
355 return 1 << shift;
356 }
357 int shift = other_rep_int - rep_int;
358 *alias_base_index = index >> shift;
359 return 1;
360}
361
363 MachineRepresentation other_rep,
364 int other_index) const {
366 DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
367 if (rep == other_rep) {
368 return index == other_index;
369 }
370 int rep_int = static_cast<int>(rep);
371 int other_rep_int = static_cast<int>(other_rep);
372 if (rep_int > other_rep_int) {
373 int shift = rep_int - other_rep_int;
374 return index == other_index >> shift;
375 }
376 int shift = other_rep_int - rep_int;
377 return index >> shift == other_index;
378}
379
380} // namespace internal
381} // namespace v8
static bool IsSupported(CpuFeature f)
static const RegisterConfiguration * Default()
static const RegisterConfiguration * RestrictGeneralRegisters(RegList registers)
RegisterConfiguration(AliasingKind fp_aliasing_kind, int num_general_registers, int num_double_registers, int num_simd128_registers, int num_simd256_registers, int num_allocatable_general_registers, int num_allocatable_double_registers, int num_allocatable_simd128_registers, int num_allocatable_simd256_registers, const int *allocatable_general_codes, const int *allocatable_double_codes, const int *independent_allocatable_simd128_codes=nullptr)
int GetAliases(MachineRepresentation rep, int index, MachineRepresentation other_rep, int *alias_base_index) const
bool AreAliases(MachineRepresentation rep, int index, MachineRepresentation other_rep, int other_index) const
static constexpr Register from_code(int code)
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
LiftoffRegister reg
RegListBase< RegisterT > registers
STL namespace.
constexpr AliasingKind kFPAliasing
DwVfpRegister DoubleRegister
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr int kNumRegisters
#define ALLOCATABLE_DOUBLE_REGISTERS(V)
#define REGISTER_CODE(R)
#define ALLOCATABLE_GENERAL_REGISTERS(V)
#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V)
#define REGISTER_COUNT(R)
#define ALLOCATABLE_SIMD128_REGISTERS(V)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485