v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
canonical-types.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include "src/base/hashing.h"
10#include "src/heap/heap-inl.h"
11#include "src/init/v8.h"
12#include "src/roots/roots-inl.h"
13#include "src/utils/utils.h"
17
18namespace v8::internal::wasm {
19
23
25
28 V8::FatalProcessOutOfMemory(nullptr, "too many canonicalized types");
29 }
30}
31
33 if (size == 0) return;
34 // If the caller knows statically that {size == 1}, it should have called
35 // {AddRecursiveSingletonGroup} directly. For cases where this is not
36 // statically determined we add this dispatch here.
37 if (size == 1) return AddRecursiveSingletonGroup(module);
38
39 uint32_t start_index = static_cast<uint32_t>(module->types.size() - size);
40
41 // Multiple threads could try to register recursive groups concurrently.
42 // TODO(manoskouk): Investigate if we can fine-grain the synchronization.
43 base::MutexGuard mutex_guard(&mutex_);
44 // Compute the first canonical index in the recgroup in the case that it does
45 // not already exist.
46 CanonicalTypeIndex first_new_canonical_index{
47 static_cast<uint32_t>(canonical_supertypes_.size())};
48
49 // Create a snapshot of the zone; this will be restored in case we find a
50 // matching recursion group.
51 ZoneSnapshot zone_snapshot = zone_.Snapshot();
52
53 DCHECK_GE(module->types.size(), start_index + size);
54 CanonicalGroup group{&zone_, size, first_new_canonical_index};
55 for (uint32_t i = 0; i < size; i++) {
56 group.types[i] = CanonicalizeTypeDef(
57 module, ModuleTypeIndex{start_index + i}, ModuleTypeIndex{start_index},
58 first_new_canonical_index);
59 }
60 if (CanonicalTypeIndex canonical_index = FindCanonicalGroup(group);
61 canonical_index.valid()) {
62 // Delete zone memory from {CanonicalizeTypeDef} and {CanonicalGroup}.
63 zone_snapshot.Restore(&zone_);
64
65 // Identical group found. Map new types to the old types's canonical
66 // representatives.
67 for (uint32_t i = 0; i < size; i++) {
68 CanonicalTypeIndex existing_type_index =
69 CanonicalTypeIndex{canonical_index.index + i};
70 module->isorecursive_canonical_type_ids[start_index + i] =
71 existing_type_index;
72 }
73 return;
74 }
75 canonical_supertypes_.resize(first_new_canonical_index.index + size);
77 canonical_types_.reserve(first_new_canonical_index.index + size, &zone_);
78 for (uint32_t i = 0; i < size; i++) {
79 CanonicalType& canonical_type = group.types[i];
80 CanonicalTypeIndex canonical_id{first_new_canonical_index.index + i};
81 // {CanonicalGroup} allocates types in the Zone.
82 DCHECK(zone_.Contains(&canonical_type));
83 canonical_types_.set(canonical_id, &canonical_type);
84 canonical_supertypes_[canonical_id.index] = canonical_type.supertype;
85 module->isorecursive_canonical_type_ids[start_index + i] = canonical_id;
86 }
87 // Check that this canonical ID is not used yet.
88 DCHECK(std::none_of(
90 [=](auto& entry) { return entry.index == first_new_canonical_index; }));
91 DCHECK(std::none_of(
93 [=](auto& entry) { return entry.first == first_new_canonical_index; }));
94 canonical_groups_.emplace(group);
95}
96
98 DCHECK(!module->types.empty());
99 uint32_t type_index = static_cast<uint32_t>(module->types.size() - 1);
100 base::MutexGuard guard(&mutex_);
101 CanonicalTypeIndex new_canonical_index{
102 static_cast<uint32_t>(canonical_supertypes_.size())};
103 // Snapshot the zone before allocating the new type; the zone will be reset if
104 // we find an identical type.
105 ZoneSnapshot zone_snapshot = zone_.Snapshot();
106 CanonicalType type =
107 CanonicalizeTypeDef(module, ModuleTypeIndex{type_index},
108 ModuleTypeIndex{type_index}, new_canonical_index);
109 CanonicalSingletonGroup group{type, new_canonical_index};
110 if (CanonicalTypeIndex index = FindCanonicalGroup(group); index.valid()) {
111 zone_snapshot.Restore(&zone_);
112 module->isorecursive_canonical_type_ids[type_index] = index;
113 return;
114 }
115 // Check that the new canonical ID is not used yet.
116 DCHECK(std::none_of(
118 [=](auto& entry) { return entry.index == new_canonical_index; }));
119 DCHECK(std::none_of(
121 [=](auto& entry) { return entry.first == new_canonical_index; }));
122 // {group.type} is stack-allocated, whereas {canonical_singleton_groups_}
123 // creates a long-lived zone-allocated copy of it.
124 auto stored_group = canonical_singleton_groups_.emplace(group).first;
125 canonical_supertypes_.push_back(type.supertype);
127 canonical_types_.reserve(new_canonical_index.index + 1, &zone_);
128 canonical_types_.set(new_canonical_index, &stored_group->type);
129 module->isorecursive_canonical_type_ids[type_index] = new_canonical_index;
130}
131
133 const FunctionSig* sig) {
134// Types in the signature must be module-independent.
135#if DEBUG
136 for (ValueType type : sig->all()) DCHECK(!type.has_index());
137#endif
138 const bool kFinal = true;
139 // Because of the checks above, we can treat the type_def as canonical.
140 // TODO(366180605): It would be nice to not have to rely on a cast here.
141 // Is there a way to avoid it? In the meantime, these asserts provide at
142 // least partial assurances that the cast is safe:
143 static_assert(sizeof(CanonicalValueType) == sizeof(ValueType));
144 static_assert(
145 CanonicalValueType::Primitive(NumericKind::kI32).raw_bit_field() ==
147 CanonicalType canonical{reinterpret_cast<const CanonicalSig*>(sig),
149 base::MutexGuard guard(&mutex_);
150 // Fast path lookup before canonicalizing (== copying into the
151 // TypeCanonicalizer's zone) the function signature.
152 CanonicalTypeIndex new_canonical_index{
153 static_cast<uint32_t>(canonical_supertypes_.size())};
155 CanonicalSingletonGroup{canonical, new_canonical_index});
156 if (index.valid()) return index;
157
158 // Copy into this class's zone to store this as a new canonical function type.
159 CanonicalSig::Builder builder(&zone_, sig->return_count(),
160 sig->parameter_count());
161 for (ValueType ret : sig->returns()) {
162 builder.AddReturn(CanonicalValueType{ret});
163 }
164 for (ValueType param : sig->parameters()) {
165 builder.AddParam(CanonicalValueType{param});
166 }
167 canonical.function_sig = builder.Get();
168
169 CanonicalSingletonGroup group{canonical, new_canonical_index};
170 // Copying the signature shouldn't make a difference: There is no match.
171 DCHECK(!FindCanonicalGroup(group).valid());
172 // Check that the new canonical ID is not used yet.
173 DCHECK(std::none_of(
175 [=](auto& entry) { return entry.index == new_canonical_index; }));
176 DCHECK(std::none_of(
178 [=](auto& entry) { return entry.first == new_canonical_index; }));
179 // {group.type} is stack-allocated, whereas {canonical_singleton_groups_}
180 // creates a long-lived zone-allocated copy of it.
181 const CanonicalSingletonGroup& stored_group =
182 *canonical_singleton_groups_.emplace(group).first;
185 canonical_types_.reserve(new_canonical_index.index + 1, &zone_);
186 canonical_types_.set(new_canonical_index, &stored_group.type);
187 return new_canonical_index;
188}
189
191 CanonicalTypeIndex index) const {
192 const CanonicalType* type = canonical_types_[index];
194 return type->function_sig;
195}
196
198 CanonicalTypeIndex index) const {
199 const CanonicalType* type = canonical_types_[index];
201 return type->struct_type;
202}
203
205 CanonicalTypeIndex index) const {
206 const CanonicalType* type = canonical_types_[index];
208 return type->array_type;
209}
210
212 static constexpr std::pair<CanonicalTypeIndex, CanonicalValueType>
213 kPredefinedArrayTypes[] = {{kPredefinedArrayI8Index, {kWasmI8}},
216 for (auto [index, element_type] : kPredefinedArrayTypes) {
218 DCHECK_EQ(index.index, canonical_singleton_groups_.size());
219 static constexpr bool kMutable = true;
220 static constexpr bool kFinal = true;
221 static constexpr bool kShared = false; // TODO(14616): Fix this.
222 CanonicalArrayType* type =
223 zone_.New<CanonicalArrayType>(element_type, kMutable);
225 .type = CanonicalType(type, CanonicalTypeIndex{kNoSuperType}, kFinal,
226 kShared),
227 .index = index};
228 const CanonicalSingletonGroup& stored_group =
229 *canonical_singleton_groups_.emplace(group).first;
230 canonical_types_.set(index, &stored_group.type);
233 }
234}
235
237 CanonicalTypeIndex super_index) {
238 // Fast path without synchronization:
239 if (sub_index == super_index) return true;
240
241 // Multiple threads could try to register and access recursive groups
242 // concurrently.
243 // TODO(manoskouk): Investigate if we can improve this synchronization.
244 base::MutexGuard mutex_guard(&mutex_);
245 return IsCanonicalSubtype_Locked(sub_index, super_index);
246}
248 CanonicalTypeIndex sub_index, CanonicalTypeIndex super_index) const {
249 while (sub_index.valid()) {
250 if (sub_index == super_index) return true;
251 // TODO(jkummerow): Investigate if replacing this with
252 // `sub_index = canonical_types_[sub_index].supertype;`
253 // has acceptable performance, which would allow us to save the memory
254 // cost of storing {canonical_supertypes_}.
255 sub_index = canonical_supertypes_[sub_index.index];
256 }
257 return false;
258}
259
261 ModuleTypeIndex super_index,
262 const WasmModule* sub_module,
263 const WasmModule* super_module) {
264 CanonicalTypeIndex canonical_super =
265 super_module->canonical_type_id(super_index);
266 CanonicalTypeIndex canonical_sub = sub_module->canonical_type_id(sub_index);
267 return IsCanonicalSubtype(canonical_sub, canonical_super);
268}
269
271 CanonicalTypeIndex super) const {
272 DCHECK_NE(sub, super);
273 base::MutexGuard mutex_guard(&mutex_);
274 return IsCanonicalSubtype_Locked(sub, super);
275}
276
278 // Any remaining native modules might reference the types we're about to
279 // clear.
280 CHECK_EQ(GetWasmEngine()->NativeModuleCount(), 0);
281
282 base::MutexGuard mutex_guard(&mutex_);
284 canonical_supertypes_.clear();
285 canonical_groups_.clear();
287 zone_.Reset();
289}
290
292 const WasmModule* module, ModuleTypeIndex module_type_idx,
293 ModuleTypeIndex recgroup_start,
294 CanonicalTypeIndex canonical_recgroup_start) {
295 mutex_.AssertHeld(); // The caller must hold the mutex.
296
297 auto CanonicalizeTypeIndex = [=](ModuleTypeIndex type_index) {
298 if (!type_index.valid()) return CanonicalTypeIndex::Invalid();
299 DCHECK(type_index.valid());
300 if (type_index < recgroup_start) {
301 // This references a type from an earlier recgroup; use the
302 // already-canonicalized type index.
303 return module->canonical_type_id(type_index);
304 }
305 // For types within the same recgroup, generate indexes assuming that this
306 // is a new canonical recgroup. To prevent truncation in the
307 // CanonicalValueType's bit field, we must check the range here.
308 uint32_t new_index = canonical_recgroup_start.index +
309 (type_index.index - recgroup_start.index);
310 if (V8_UNLIKELY(new_index >= kMaxCanonicalTypes)) {
311 V8::FatalProcessOutOfMemory(nullptr, "too many canonicalized types");
312 }
313 return CanonicalTypeIndex{new_index};
314 };
315
316 auto CanonicalizeValueType = [=](ValueType type) {
317 if (!type.has_index()) return CanonicalValueType{type};
318 static_assert(kMaxCanonicalTypes <=
320 return type.Canonicalize(CanonicalizeTypeIndex(type.ref_index()));
321 };
322
323 TypeDefinition type = module->type(module_type_idx);
324 CanonicalTypeIndex supertype = CanonicalizeTypeIndex(type.supertype);
325 switch (type.kind) {
327 const FunctionSig* original_sig = type.function_sig;
328 CanonicalSig::Builder builder(&zone_, original_sig->return_count(),
329 original_sig->parameter_count());
330 for (ValueType ret : original_sig->returns()) {
331 builder.AddReturn(CanonicalizeValueType(ret));
332 }
333 for (ValueType param : original_sig->parameters()) {
334 builder.AddParam(CanonicalizeValueType(param));
335 }
336 return CanonicalType(builder.Get(), supertype, type.is_final,
337 type.is_shared);
338 }
340 const StructType* original_type = type.struct_type;
341 CanonicalStructType::Builder builder(&zone_, original_type->field_count(),
342 original_type->is_descriptor());
343 for (uint32_t i = 0; i < original_type->field_count(); i++) {
344 builder.AddField(CanonicalizeValueType(original_type->field(i)),
345 original_type->mutability(i),
346 original_type->field_offset(i));
347 }
348 builder.set_total_fields_size(original_type->total_fields_size());
349 return CanonicalType(
351 supertype, CanonicalizeTypeIndex(type.descriptor),
352 CanonicalizeTypeIndex(type.describes), type.is_final, type.is_shared);
353 }
355 CanonicalValueType element_type =
356 CanonicalizeValueType(type.array_type->element_type());
358 element_type, type.array_type->mutability());
359 return CanonicalType(array_type, supertype, type.is_final,
360 type.is_shared);
361 }
363 CanonicalTypeIndex canonical_index =
364 CanonicalizeTypeIndex(type.cont_type->contfun_typeindex());
365 CanonicalContType* canonical_cont =
366 zone_.New<CanonicalContType>(canonical_index);
367 return CanonicalType(canonical_cont, supertype, type.is_final,
368 type.is_shared);
369 }
370 }
371}
372
373// Returns the index of the canonical representative of the first type in this
374// group if it exists, and `CanonicalTypeIndex::Invalid()` otherwise.
376 const CanonicalGroup& group) const {
377 // Groups of size 0 do not make sense here; groups of size 1 should use
378 // {CanonicalSingletonGroup} (see below).
379 DCHECK_LT(1, group.types.size());
380 auto it = canonical_groups_.find(group);
382 : it->first;
383}
384
385// Returns the canonical index of the given group if it already exists.
387 const CanonicalSingletonGroup& group) const {
388 auto it = canonical_singleton_groups_.find(group);
389 static_assert(kMaxCanonicalTypes <= kMaxInt);
391 : it->index;
392}
393
396 // The storage of the canonical group's types is accounted for via the
397 // allocator below (which tracks the zone memory).
398 base::MutexGuard mutex_guard(&mutex_);
402 // Note: the allocator also tracks zone allocations of `canonical_types_`.
404 if (v8_flags.trace_wasm_offheap_memory) {
405 PrintF("TypeCanonicalizer: %zu\n", result);
406 }
407 return result;
408}
409
411 base::MutexGuard mutex_guard(&mutex_);
412 return canonical_supertypes_.size();
413}
414
415// static
418 if (!id.valid()) return;
419 Heap* heap = isolate->heap();
420 // {2 * (id + 1)} needs to fit in an int.
421 CHECK_LE(id.index, kMaxInt / 2 - 1);
422 // Canonical types and wrappers are zero-indexed.
423 const int length = id.index + 1;
424 // The fast path is non-handlified.
425 Tagged<WeakFixedArray> old_rtts_raw = heap->wasm_canonical_rtts();
426 Tagged<WeakFixedArray> old_wrappers_raw = heap->js_to_wasm_wrappers();
427
428 // Fast path: Lengths are sufficient.
429 int old_length = old_rtts_raw->length();
430 DCHECK_EQ(old_length, old_wrappers_raw->length());
431 if (old_length >= length) return;
432
433 // Allocate bigger WeakFixedArrays for rtts and wrappers. Grow them
434 // exponentially.
435 const int new_length = std::max(old_length * 3 / 2, length);
436 CHECK_LT(old_length, new_length);
437
438 // Allocation can invalidate previous unhandled pointers.
439 DirectHandle<WeakFixedArray> old_rtts{old_rtts_raw, isolate};
440 DirectHandle<WeakFixedArray> old_wrappers{old_wrappers_raw, isolate};
441 old_rtts_raw = old_wrappers_raw = {};
442
443 // We allocate the WeakFixedArray filled with undefined values, as we cannot
444 // pass the cleared value in a handle (see https://crbug.com/364591622). We
445 // overwrite the new entries via {MemsetTagged} afterwards.
448 WeakFixedArray::CopyElements(isolate, *new_rtts, 0, *old_rtts, 0, old_length);
449 MemsetTagged(new_rtts->RawFieldOfFirstElement() + old_length,
450 ClearedValue(isolate), new_length - old_length);
451 DirectHandle<WeakFixedArray> new_wrappers =
453 WeakFixedArray::CopyElements(isolate, *new_wrappers, 0, *old_wrappers, 0,
454 old_length);
455 MemsetTagged(new_wrappers->RawFieldOfFirstElement() + old_length,
456 ClearedValue(isolate), new_length - old_length);
457 heap->SetWasmCanonicalRttsAndJSToWasmWrappers(*new_rtts, *new_wrappers);
458}
459
460// static
462 ReadOnlyRoots roots(isolate);
463 isolate->heap()->SetWasmCanonicalRttsAndJSToWasmWrappers(
464 roots.empty_weak_fixed_array(), roots.empty_weak_fixed_array());
465}
466
476
478 const CanonicalSig* sig) const {
479 // TODO(397489547): Make this faster. The plan is to allocate an extra
480 // slot in the Zone immediately preceding each CanonicalSig, so we can
481 // get from the sig's address to that slot's address via pointer arithmetic.
482 // For now, just search through all known signatures, which is acceptable
483 // as long as only the type-reflection proposal needs this.
484 // TODO(42210967): Improve this before shipping Type Reflection.
486}
487
488#ifdef DEBUG
489bool TypeCanonicalizer::Contains(const CanonicalSig* sig) const {
490 base::MutexGuard mutex_guard(&mutex_);
491 return zone_.Contains(sig);
492}
493#endif
494
495} // namespace v8::internal::wasm
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
V8_INLINE void AssertHeld() const
Definition mutex.h:58
size_t return_count() const
Definition signature.h:93
base::Vector< const T > returns() const
Definition signature.h:116
base::Vector< const T > parameters() const
Definition signature.h:113
size_t parameter_count() const
Definition signature.h:94
static void CopyElements(Isolate *isolate, Tagged< WeakFixedArray > dst, int dst_index, Tagged< WeakFixedArray > src, int src_index, int len, WriteBarrierMode mode=kDefaultMode)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
static Handle< WeakFixedArray > New(IsolateT *isolate, int capacity, AllocationType allocation=AllocationType::kYoung, MaybeDirectHandle< Object > initial_value={})
void Restore(Zone *zone) const
Definition zone.cc:215
V8_WARN_UNUSED_RESULT ZoneSnapshot Snapshot() const
Definition zone.cc:111
T * New(Args &&... args)
Definition zone.h:114
static constexpr CanonicalValueType Primitive(NumericKind kind)
void AddField(ValueTypeSubclass type, bool mutability, uint32_t offset=0)
Subclass * Build(ComputeOffsets compute_offsets=kComputeOffsets)
bool mutability(uint32_t index) const
uint32_t field_offset(uint32_t index) const
ValueType field(uint32_t index) const
const CanonicalTypeIndex FindIndex_Slow(const CanonicalSig *sig) const
void set(CanonicalTypeIndex index, const CanonicalType *type)
V8_EXPORT_PRIVATE bool IsCanonicalSubtype(CanonicalTypeIndex sub_index, CanonicalTypeIndex super_index)
V8_EXPORT_PRIVATE void AddRecursiveSingletonGroup(WasmModule *module)
V8_EXPORT_PRIVATE const CanonicalStructType * LookupStruct(CanonicalTypeIndex index) const
V8_EXPORT_PRIVATE bool IsFunctionSignature(CanonicalTypeIndex index) const
bool IsCanonicalSubtype_Locked(CanonicalTypeIndex sub_index, CanonicalTypeIndex super_index) const
static constexpr uint32_t kNumberOfPredefinedTypes
CanonicalType CanonicalizeTypeDef(const WasmModule *module, ModuleTypeIndex module_type_idx, ModuleTypeIndex recgroup_start, CanonicalTypeIndex canonical_recgroup_start)
CanonicalTypeIndex FindIndex_Slow(const CanonicalSig *sig) const
V8_EXPORT_PRIVATE const CanonicalArrayType * LookupArray(CanonicalTypeIndex index) const
static V8_EXPORT_PRIVATE void ClearWasmCanonicalTypesForTesting(Isolate *isolate)
V8_EXPORT_PRIVATE size_t GetCurrentNumberOfTypes() const
V8_EXPORT_PRIVATE bool IsStruct(CanonicalTypeIndex index) const
static constexpr CanonicalTypeIndex kPredefinedArrayI16Index
CanonicalTypeIndex FindCanonicalGroup(const CanonicalGroup &) const
bool IsHeapSubtype(CanonicalTypeIndex sub, CanonicalTypeIndex super) const
V8_EXPORT_PRIVATE bool IsArray(CanonicalTypeIndex index) const
V8_EXPORT_PRIVATE const CanonicalSig * LookupFunctionSignature(CanonicalTypeIndex index) const
V8_EXPORT_PRIVATE void AddRecursiveGroup(WasmModule *module, uint32_t size)
static V8_EXPORT_PRIVATE void PrepareForCanonicalTypeId(Isolate *isolate, CanonicalTypeIndex id)
std::unordered_set< CanonicalSingletonGroup > canonical_singleton_groups_
std::vector< CanonicalTypeIndex > canonical_supertypes_
static constexpr CanonicalTypeIndex kPredefinedArrayI8Index
std::unordered_set< CanonicalGroup > canonical_groups_
V8_EXPORT_PRIVATE void EmptyStorageForTesting()
constexpr uint32_t raw_bit_field() const
Definition value-type.h:594
static constexpr ValueType Primitive(NumericKind kind)
Definition value-type.h:880
TypeCanonicalizer * type_canonicalizer()
ZoneVector< RpoNumber > & result
static constexpr size_t kMaxCanonicalTypes
constexpr IndependentValueType kWasmI8
TypeCanonicalizer * GetTypeCanonicalizer()
constexpr ModuleTypeIndex kNoSuperType
size_t ContentSize(const std::vector< T > &vector)
static constexpr bool kNotShared
Definition value-type.h:101
WasmEngine * GetWasmEngine()
constexpr IndependentValueType kWasmI16
void PrintF(const char *format,...)
Definition utils.cc:39
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Definition slots-inl.h:486
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kMaxInt
Definition globals.h:374
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define UPDATE_WHEN_CLASS_CHANGES(classname, size)
static constexpr CanonicalTypeIndex Invalid()
Definition value-type.h:89
constexpr bool valid() const
Definition value-type.h:58
std::vector< TypeDefinition > types
CanonicalTypeIndex canonical_type_id(ModuleTypeIndex index) const
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type