v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
random-module-generation.cc
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <array>
9#include <optional>
10
17
18// This whole compilation unit should only be included in non-official builds to
19// reduce binary size (it's a testing-only implementation which lives in src/ so
20// that the GenerateRandomWasmModule runtime function can use it). We normally
21// disable V8_WASM_RANDOM_FUZZERS in official builds.
22#ifndef V8_WASM_RANDOM_FUZZERS
23#error Exclude this compilation unit in official builds.
24#endif
25
27
28namespace {
29
30constexpr int kMaxArrays = 3;
31constexpr int kMaxStructs = 4;
32constexpr int kMaxStructFields = 4;
33constexpr int kMaxGlobals = 64;
34constexpr uint32_t kMaxLocals = 32;
35constexpr int kMaxParameters = 15;
36constexpr int kMaxReturns = 15;
37constexpr int kMaxExceptions = 4;
38constexpr int kMaxTables = 4;
39constexpr int kMaxMemories = 4;
40constexpr int kMaxArraySize = 20;
41constexpr int kMaxPassiveDataSegments = 2;
42constexpr uint32_t kMaxRecursionDepth = 64;
43constexpr int kMaxCatchCases = 6;
44
45int MaxTableSize() {
46 return std::min(static_cast<int>(v8_flags.wasm_max_table_size.value()), 32);
47}
48
49int MaxNumOfFunctions() {
50 return std::min(static_cast<int>(v8_flags.max_wasm_functions.value()), 4);
51}
52
53struct StringImports {
54 uint32_t cast;
55 uint32_t test;
56 uint32_t fromCharCode;
57 uint32_t fromCodePoint;
58 uint32_t charCodeAt;
59 uint32_t codePointAt;
60 uint32_t length;
61 uint32_t concat;
62 uint32_t substring;
63 uint32_t equals;
64 uint32_t compare;
71
72 // These aren't imports, but closely related, so store them here as well:
73 ModuleTypeIndex array_i16;
74 ModuleTypeIndex array_i8;
75};
76
77// Creates an array out of the arguments without hardcoding the exact number of
78// arguments.
79template <typename... T>
80constexpr auto CreateArray(T... elements) {
81 std::array result = {elements...};
82 return result;
83}
84
85// Concatenate arrays into one array in compile-time.
86template <typename T, size_t... N>
87constexpr auto ConcatArrays(std::array<T, N>... array) {
88 constexpr size_t kNumArrays = sizeof...(array);
89 std::array<T*, kNumArrays> kArrays = {&array[0]...};
90 constexpr size_t kLengths[kNumArrays] = {array.size()...};
91 constexpr size_t kSumOfLengths = (... + array.size());
92
93 std::array<T, kSumOfLengths> result = {0};
94 size_t result_index = 0;
95 for (size_t arr = 0; arr < kNumArrays; arr++) {
96 for (size_t pos = 0; pos < kLengths[arr]; pos++) {
97 result[result_index++] = kArrays[arr][pos];
98 }
99 }
100 return result;
101}
102
103class DataRange {
104 // data_ is used for general random values for fuzzing.
105 base::Vector<const uint8_t> data_;
106 // The RNG is used for generating random values (i32.consts etc.) for which
107 // the quality of the input is less important.
108 base::RandomNumberGenerator rng_;
109
110 public:
111 explicit DataRange(base::Vector<const uint8_t> data, int64_t seed = -1)
112 : data_(data), rng_(seed == -1 ? get<int64_t>() : seed) {}
113 DataRange(const DataRange&) = delete;
114 DataRange& operator=(const DataRange&) = delete;
115
116 // Don't accidentally pass DataRange by value. This will reuse bytes and might
117 // lead to OOM because the end might not be reached.
118 // Define move constructor and move assignment, disallow copy constructor and
119 // copy assignment (below).
120 DataRange(DataRange&& other) V8_NOEXCEPT : data_(other.data_),
121 rng_(other.rng_) {
122 other.data_ = {};
123 }
124 DataRange& operator=(DataRange&& other) V8_NOEXCEPT {
125 data_ = other.data_;
126 rng_ = other.rng_;
127 other.data_ = {};
128 return *this;
129 }
130
131 size_t size() const { return data_.size(); }
132
133 DataRange split() {
134 // As we might split many times, only use 2 bytes if the data size is large.
135 uint16_t random_choice = data_.size() > std::numeric_limits<uint8_t>::max()
136 ? get<uint16_t>()
137 : get<uint8_t>();
138 uint16_t num_bytes = random_choice % std::max(size_t{1}, data_.size());
139 int64_t new_seed = rng_.initial_seed() ^ rng_.NextInt64();
140 DataRange split(data_.SubVector(0, num_bytes), new_seed);
141 data_ += num_bytes;
142 return split;
143 }
144
145 template <typename T, size_t max_bytes = sizeof(T)>
146 T getPseudoRandom() {
147 static_assert(!std::is_same_v<T, bool>, "bool needs special handling");
148 static_assert(max_bytes <= sizeof(T));
149 // Special handling for signed integers: Calling getPseudoRandom<int32_t, 1>
150 // () should be equal to getPseudoRandom<int8_t>(). (The NextBytes() below
151 // does not achieve that due to depending on endianness and either never
152 // generating negative values or filling in the highest significant bits
153 // which would be unexpected).
154 if constexpr (std::is_integral_v<T> && std::is_signed_v<T>) {
155 switch (max_bytes) {
156 case 1:
157 return static_cast<int8_t>(getPseudoRandom<uint8_t>());
158 case 2:
159 return static_cast<int16_t>(getPseudoRandom<uint16_t>());
160 case 4:
161 return static_cast<int32_t>(getPseudoRandom<uint32_t>());
162 default:
163 return static_cast<T>(
164 getPseudoRandom<std::make_unsigned_t<T>, max_bytes>());
165 }
166 }
167
168 T result{};
169 rng_.NextBytes(&result, max_bytes);
170 return result;
171 }
172
173 template <typename T>
174 T get() {
175 // Bool needs special handling (see template specialization below).
176 static_assert(!std::is_same_v<T, bool>, "bool needs special handling");
177
178 // We want to support the case where we have less than sizeof(T) bytes
179 // remaining in the slice. We'll just use what we have, so we get a bit of
180 // randomness when there are still some bytes left. If size == 0, get<T>()
181 // returns the type's value-initialized value.
182 const size_t num_bytes = std::min(sizeof(T), data_.size());
183 T result{};
184 memcpy(&result, data_.begin(), num_bytes);
185 data_ += num_bytes;
186 return result;
187 }
188};
189
190// Explicit specialization must be defined outside of class body.
191template <>
192bool DataRange::get() {
193 // The general implementation above is not instantiable for bool, as that
194 // would cause undefinied behaviour when memcpy'ing random bytes to the
195 // bool. This can result in different observable side effects when invoking
196 // get<bool> between debug and release version, which eventually makes the
197 // code output different as well as raising various unrecoverable errors on
198 // runtime.
199 // Hence we specialize get<bool> to consume a full byte and use the least
200 // significant bit only (0 == false, 1 == true).
201 return get<uint8_t>() % 2;
202}
203
204enum IncludeNumericTypes {
205 kIncludeNumericTypes = true,
206 kExcludeNumericTypes = false
207};
208enum IncludePackedTypes {
209 kIncludePackedTypes = true,
210 kExcludePackedTypes = false
211};
212enum IncludeAllGenerics {
213 kIncludeAllGenerics = true,
214 kExcludeSomeGenerics = false
215};
216enum IncludeS128 { kIncludeS128 = true, kExcludeS128 = false };
217
218// Chooses one `ValueType` randomly based on `options` and the enums specified
219// above.
220ValueType GetValueTypeHelper(WasmModuleGenerationOptions options,
221 DataRange* data, uint32_t num_nullable_types,
222 uint32_t num_non_nullable_types,
223 IncludeNumericTypes include_numeric_types,
224 IncludePackedTypes include_packed_types,
225 IncludeAllGenerics include_all_generics,
226 IncludeS128 include_s128 = kIncludeS128) {
227 // Create and fill a vector of potential types to choose from.
228 base::SmallVector<ValueType, 32> types;
229
230 // Numeric non-wasmGC types.
231 if (include_numeric_types) {
232 // Many "general-purpose" instructions return i32, so give that a higher
233 // probability (such as 3x).
234 types.insert(types.end(),
235 {kWasmI32, kWasmI32, kWasmI32, kWasmI64, kWasmF32, kWasmF64});
236
237 // SIMD type.
238 if (options.generate_simd() && include_s128) {
239 types.push_back(kWasmS128);
240 }
241 }
242
243 // The MVP types: apart from numeric types, contains only the non-nullable
244 // funcRef. We don't add externRef, because for externRef globals we generate
245 // initialiser expressions where we need wasmGC types. Also, externRef is not
246 // really useful for the MVP fuzzer, as there is nothing that we could
247 // generate.
248 types.push_back(kWasmFuncRef);
249
250 // WasmGC types (including user-defined types).
251 // Decide if the return type will be nullable or not.
252 const bool nullable = options.generate_wasm_gc() ? data->get<bool>() : false;
253
254 if (options.generate_wasm_gc()) {
255 types.push_back(kWasmI31Ref);
256
257 if (include_numeric_types && include_packed_types) {
258 types.insert(types.end(), {kWasmI8, kWasmI16});
259 }
260
261 if (nullable) {
262 types.insert(types.end(),
263 {kWasmNullRef, kWasmNullExternRef, kWasmNullFuncRef});
264 }
265 if (nullable || include_all_generics) {
266 types.insert(types.end(), {kWasmStructRef, kWasmArrayRef, kWasmAnyRef,
267 kWasmEqRef, kWasmExternRef});
268 }
269 }
270
271 // The last index of user-defined types allowed is different based on the
272 // nullability of the output. User-defined types are function signatures or
273 // structs and arrays (in case of wasmGC).
274 const uint32_t num_user_defined_types =
275 nullable ? num_nullable_types : num_non_nullable_types;
276
277 // Conceptually, user-defined types are added to the end of the list. Pick a
278 // random one among them.
279 uint32_t chosen_id =
280 data->get<uint8_t>() % (types.size() + num_user_defined_types);
281
282 Nullability nullability = nullable ? kNullable : kNonNullable;
283
284 if (chosen_id >= types.size()) {
285 // Return user-defined type.
287 ModuleTypeIndex{chosen_id - static_cast<uint32_t>(types.size())},
288 nullability, kNotShared, RefTypeKind::kOther /* unknown */);
289 }
290 // If returning a reference type, fix its nullability according to {nullable}.
291 if (types[chosen_id].is_reference()) {
292 return ValueType::RefMaybeNull(types[chosen_id].heap_type(), nullability);
293 }
294 // Otherwise, just return the picked type.
295 return types[chosen_id];
296}
297
298ValueType GetValueType(WasmModuleGenerationOptions options, DataRange* data,
299 uint32_t num_types) {
300 return GetValueTypeHelper(options, data, num_types, num_types,
301 kIncludeNumericTypes, kExcludePackedTypes,
302 kIncludeAllGenerics);
303}
304
305void GeneratePassiveDataSegment(DataRange* range, WasmModuleBuilder* builder) {
306 int length = range->get<uint8_t>() % 65;
307 ZoneVector<uint8_t> data(length, builder->zone());
308 for (int i = 0; i < length; ++i) {
309 data[i] = range->getPseudoRandom<uint8_t>();
310 }
311 builder->AddPassiveDataSegment(data.data(),
312 static_cast<uint32_t>(data.size()));
313}
314
315uint32_t GenerateRefTypeElementSegment(DataRange* range,
316 WasmModuleBuilder* builder,
317 ValueType element_type) {
318 DCHECK(element_type.is_object_reference());
319 DCHECK(element_type.has_index());
320 WasmModuleBuilder::WasmElemSegment segment(
321 builder->zone(), element_type, false,
322 WasmInitExpr::RefNullConst(element_type.heap_type()));
323 size_t element_count = range->get<uint8_t>() % 11;
324 for (size_t i = 0; i < element_count; ++i) {
325 segment.entries.emplace_back(
327 element_type.ref_index().index);
328 }
329 return builder->AddElementSegment(std::move(segment));
330}
331
332std::vector<ValueType> GenerateTypes(WasmModuleGenerationOptions options,
333 DataRange* data, uint32_t num_ref_types) {
334 std::vector<ValueType> types;
335 int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
336 types.reserve(num_params);
337 for (int i = 0; i < num_params; ++i) {
338 types.push_back(GetValueType(options, data, num_ref_types));
339 }
340 return types;
341}
342
343FunctionSig* CreateSignature(Zone* zone,
344 base::Vector<const ValueType> param_types,
345 base::Vector<const ValueType> return_types) {
346 FunctionSig::Builder builder(zone, return_types.size(), param_types.size());
347 for (auto& type : param_types) {
348 builder.AddParam(type);
349 }
350 for (auto& type : return_types) {
351 builder.AddReturn(type);
352 }
353 return builder.Get();
354}
355
356class BodyGen;
357using GenerateFn = void (BodyGen::*)(DataRange*);
358using GenerateFnWithHeap = bool (BodyGen::*)(HeapType, DataRange*, Nullability);
359
360// GeneratorAlternativesPerOption allows to statically precompute the generator
361// arrays for different {WasmModuleGenerationOptions}.
362template <size_t kNumMVP, size_t kAdditionalSimd, size_t kAdditionalWasmGC>
363class GeneratorAlternativesPerOption {
364 static constexpr size_t kNumSimd = kNumMVP + kAdditionalSimd;
365 static constexpr size_t kNumWasmGC = kNumMVP + kAdditionalWasmGC;
366 static constexpr size_t kNumAll = kNumSimd + kAdditionalWasmGC;
367
368 public:
369 constexpr GeneratorAlternativesPerOption(
370 std::array<GenerateFn, kNumMVP> mvp,
371 std::array<GenerateFn, kAdditionalSimd> simd,
372 std::array<GenerateFn, kAdditionalWasmGC> wasmgc)
373 : mvp_(mvp),
374 simd_(ConcatArrays(mvp, simd)),
375 wasmgc_(ConcatArrays(mvp, wasmgc)),
376 all_(ConcatArrays(mvp, ConcatArrays(simd, wasmgc))) {}
377
378 constexpr base::Vector<const GenerateFn> GetAlternatives(
379 WasmModuleGenerationOptions options) const {
380 switch (options.ToIntegral()) {
381 case 0: // 0
382 return base::VectorOf(mvp_);
383 case 1 << kGenerateSIMD: // 1
384 return base::VectorOf(simd_);
385 case 1 << kGenerateWasmGC: // 2
386 return base::VectorOf(wasmgc_);
387 case (1 << kGenerateSIMD) | (1 << kGenerateWasmGC): // 3
388 return base::VectorOf(all_);
389 }
390 UNREACHABLE();
391 }
392
393 private:
394 const std::array<GenerateFn, kNumMVP> mvp_;
395 const std::array<GenerateFn, kNumSimd> simd_;
396 const std::array<GenerateFn, kNumWasmGC> wasmgc_;
397 const std::array<GenerateFn, kNumAll> all_;
398};
399
400// Deduction guide for the GeneratorAlternativesPerOption template.
401template <size_t kNumMVP, size_t kAdditionalSimd, size_t kAdditionalWasmGC>
402GeneratorAlternativesPerOption(std::array<GenerateFn, kNumMVP>,
403 std::array<GenerateFn, kAdditionalSimd>,
404 std::array<GenerateFn, kAdditionalWasmGC>)
405 -> GeneratorAlternativesPerOption<kNumMVP, kAdditionalSimd,
406 kAdditionalWasmGC>;
407
408class BodyGen {
409 template <WasmOpcode Op, ValueKind... Args>
410 void op(DataRange* data) {
411 Generate<Args...>(data);
412 builder_->Emit(Op);
413 }
414
415 class V8_NODISCARD BlockScope {
416 public:
417 BlockScope(BodyGen* gen, WasmOpcode block_type,
418 base::Vector<const ValueType> param_types,
419 base::Vector<const ValueType> result_types,
420 base::Vector<const ValueType> br_types, bool emit_end = true)
421 : gen_(gen), emit_end_(emit_end) {
422 gen->blocks_.emplace_back(br_types.begin(), br_types.end());
423 gen->builder_->EmitByte(block_type);
424
425 if (param_types.size() == 0 && result_types.size() == 0) {
426 gen->builder_->EmitValueType(kWasmVoid);
427 return;
428 }
429 if (param_types.size() == 0 && result_types.size() == 1) {
430 gen->builder_->EmitValueType(result_types[0]);
431 return;
432 }
433 // Multi-value block.
434 Zone* zone = gen->builder_->builder()->zone();
435 FunctionSig::Builder builder(zone, result_types.size(),
436 param_types.size());
437 for (auto& type : param_types) {
438 DCHECK_NE(type, kWasmVoid);
439 builder.AddParam(type);
440 }
441 for (auto& type : result_types) {
442 DCHECK_NE(type, kWasmVoid);
443 builder.AddReturn(type);
444 }
445 FunctionSig* sig = builder.Get();
446 const bool is_final = true;
447 ModuleTypeIndex sig_id =
448 gen->builder_->builder()->AddSignature(sig, is_final);
449 gen->builder_->EmitI32V(sig_id);
450 }
451
452 ~BlockScope() {
453 if (emit_end_) gen_->builder_->Emit(kExprEnd);
454 gen_->blocks_.pop_back();
455 }
456
457 private:
458 BodyGen* const gen_;
460 };
461
462 void block(base::Vector<const ValueType> param_types,
463 base::Vector<const ValueType> return_types, DataRange* data) {
464 BlockScope block_scope(this, kExprBlock, param_types, return_types,
465 return_types);
466 ConsumeAndGenerate(param_types, return_types, data);
467 }
468
469 template <ValueKind T>
470 void block(DataRange* data) {
471 if constexpr (T == kVoid) {
472 block({}, {}, data);
473 } else {
474 block({}, base::VectorOf({ValueType::Primitive(T)}), data);
475 }
476 }
477
478 void loop(base::Vector<const ValueType> param_types,
479 base::Vector<const ValueType> return_types, DataRange* data) {
480 BlockScope block_scope(this, kExprLoop, param_types, return_types,
481 param_types);
482 ConsumeAndGenerate(param_types, return_types, data);
483 }
484
485 template <ValueKind T>
486 void loop(DataRange* data) {
487 if constexpr (T == kVoid) {
488 loop({}, {}, data);
489 } else {
490 loop({}, base::VectorOf({ValueType::Primitive(T)}), data);
491 }
492 }
493
494 void finite_loop(base::Vector<const ValueType> param_types,
495 base::Vector<const ValueType> return_types,
496 DataRange* data) {
497 // int counter = `kLoopConstant`;
498 int kLoopConstant = data->get<uint8_t>() % 8 + 1;
499 uint32_t counter = builder_->AddLocal(kWasmI32);
500 builder_->EmitI32Const(kLoopConstant);
501 builder_->EmitSetLocal(counter);
502
503 // begin loop {
504 BlockScope loop_scope(this, kExprLoop, param_types, return_types,
505 param_types);
506 // Consume the parameters:
507 // Resetting locals in each iteration can create interesting loop-phis.
508 // TODO(evih): Iterate through existing locals and try to reuse them instead
509 // of creating new locals.
510 for (auto it = param_types.rbegin(); it != param_types.rend(); it++) {
511 uint32_t local = builder_->AddLocal(*it);
512 builder_->EmitSetLocal(local);
513 }
514
515 // Loop body.
516 Generate(kWasmVoid, data);
517
518 // Decrement the counter.
519 builder_->EmitGetLocal(counter);
520 builder_->EmitI32Const(1);
521 builder_->Emit(kExprI32Sub);
522 builder_->EmitTeeLocal(counter);
523
524 // If there is another iteration, generate new parameters for the loop and
525 // go to the beginning of the loop.
526 {
527 BlockScope if_scope(this, kExprIf, {}, {}, {});
528 Generate(param_types, data);
529 builder_->EmitWithI32V(kExprBr, 1);
530 }
531
532 // Otherwise, generate the return types.
533 Generate(return_types, data);
534 // } end loop
535 }
536
537 template <ValueKind T>
538 void finite_loop(DataRange* data) {
539 if constexpr (T == kVoid) {
540 finite_loop({}, {}, data);
541 } else {
542 finite_loop({}, base::VectorOf({ValueType::Primitive(T)}), data);
543 }
544 }
545
546 enum IfType { kIf, kIfElse };
547
548 void if_(base::Vector<const ValueType> param_types,
549 base::Vector<const ValueType> return_types, IfType type,
550 DataRange* data) {
551 // One-armed "if" are only valid if the input and output types are the same.
552 DCHECK_IMPLIES(type == kIf, param_types == return_types);
553 Generate(kWasmI32, data);
554 BlockScope block_scope(this, kExprIf, param_types, return_types,
555 return_types);
556 ConsumeAndGenerate(param_types, return_types, data);
557 if (type == kIfElse) {
558 builder_->Emit(kExprElse);
559 ConsumeAndGenerate(param_types, return_types, data);
560 }
561 }
562
563 template <ValueKind T, IfType type>
564 void if_(DataRange* data) {
565 static_assert(T == kVoid || type == kIfElse,
566 "if without else cannot produce a value");
567 if_({},
568 T == kVoid ? base::Vector<ValueType>{}
569 : base::VectorOf({ValueType::Primitive(T)}),
570 type, data);
571 }
572
573 void try_block_helper(ValueType return_type, DataRange* data) {
574 bool has_catch_all = data->get<bool>();
575 uint8_t num_catch =
576 data->get<uint8_t>() % (builder_->builder()->NumTags() + 1);
577 bool is_delegate = num_catch == 0 && !has_catch_all && data->get<bool>();
578 // Allow one more target than there are enclosing try blocks, for delegating
579 // to the caller.
580
581 base::Vector<const ValueType> return_type_vec =
582 return_type.kind() == kVoid ? base::Vector<ValueType>{}
583 : base::VectorOf(&return_type, 1);
584 BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec,
585 !is_delegate);
586 int control_depth = static_cast<int>(blocks_.size()) - 1;
587 Generate(return_type, data);
588 catch_blocks_.push_back(control_depth);
589 for (int i = 0; i < num_catch; ++i) {
590 const FunctionSig* exception_type = builder_->builder()->GetTagType(i);
591 builder_->EmitWithU32V(kExprCatch, i);
592 ConsumeAndGenerate(exception_type->parameters(), return_type_vec, data);
593 }
594 if (has_catch_all) {
595 builder_->Emit(kExprCatchAll);
596 Generate(return_type, data);
597 }
598 if (is_delegate) {
599 // The delegate target depth does not include the current try block,
600 // because 'delegate' closes this scope. However it is still in the
601 // {blocks_} list, so remove one to get the correct size.
602 int delegate_depth = data->get<uint8_t>() % (blocks_.size() - 1);
603 builder_->EmitWithU32V(kExprDelegate, delegate_depth);
604 }
605 catch_blocks_.pop_back();
606 }
607
608 template <ValueKind T>
609 void try_block(DataRange* data) {
610 try_block_helper(ValueType::Primitive(T), data);
611 }
612
613 struct CatchCase {
615 CatchKind kind;
616 };
617
618 // Generates the i-th nested block for the try-table, and recursively generate
619 // the blocks inside it.
620 void try_table_rec(base::Vector<const ValueType> param_types,
621 base::Vector<const ValueType> return_types,
622 base::Vector<CatchCase> catch_cases, size_t i,
623 DataRange* data) {
624 DCHECK(v8_flags.experimental_wasm_exnref);
625 if (i == catch_cases.size()) {
626 // Base case: emit the try-table itself.
627 builder_->Emit(kExprTryTable);
628 blocks_.emplace_back(return_types.begin(), return_types.end());
629 const bool is_final = true;
630 ModuleTypeIndex try_sig_index = builder_->builder()->AddSignature(
631 CreateSignature(builder_->builder()->zone(), param_types,
632 return_types),
633 is_final);
634 builder_->EmitI32V(try_sig_index);
635 builder_->EmitU32V(static_cast<uint32_t>(catch_cases.size()));
636 for (size_t j = 0; j < catch_cases.size(); ++j) {
637 builder_->EmitByte(catch_cases[j].kind);
638 if (catch_cases[j].kind == kCatch || catch_cases[j].kind == kCatchRef) {
639 builder_->EmitByte(catch_cases[j].tag_index);
640 }
641 builder_->EmitByte(catch_cases.size() - j - 1);
642 }
643 ConsumeAndGenerate(param_types, return_types, data);
644 builder_->Emit(kExprEnd);
645 blocks_.pop_back();
646 builder_->EmitWithI32V(kExprBr, static_cast<int32_t>(catch_cases.size()));
647 return;
648 }
649
650 // Enter the i-th nested block. The signature of the block is built as
651 // follows:
652 // - The input types are the same for each block, the operands are forwarded
653 // as-is to the inner try-table.
654 // - The output types can be empty, or contain the tag types and/or an
655 // exnref depending on the catch kind
656 const FunctionSig* type =
657 builder_->builder()->GetTagType(catch_cases[i].tag_index);
658 int has_tag =
659 catch_cases[i].kind == kCatchRef || catch_cases[i].kind == kCatch;
660 int has_ref =
661 catch_cases[i].kind == kCatchAllRef || catch_cases[i].kind == kCatchRef;
662 size_t return_count =
663 (has_tag ? type->parameter_count() : 0) + (has_ref ? 1 : 0);
664 auto block_returns =
665 builder_->builder()->zone()->AllocateVector<ValueType>(return_count);
666 if (has_tag) {
667 std::copy_n(type->parameters().begin(), type->parameter_count(),
668 block_returns.begin());
669 }
670 if (has_ref) block_returns.last() = kWasmExnRef;
671 {
672 BlockScope block(this, kExprBlock, param_types, block_returns,
673 block_returns);
674 try_table_rec(param_types, return_types, catch_cases, i + 1, data);
675 }
676 // Catch label. Consume the unpacked values and exnref (if any), produce
677 // values that match the outer scope, and branch to it.
678 ConsumeAndGenerate(block_returns, return_types, data);
679 builder_->EmitWithU32V(kExprBr, static_cast<uint32_t>(i));
680 }
681
682 void try_table_block_helper(base::Vector<const ValueType> param_types,
683 base::Vector<const ValueType> return_types,
684 DataRange* data) {
685 uint8_t num_catch = data->get<uint8_t>() % kMaxCatchCases;
686 auto catch_cases =
687 builder_->builder()->zone()->AllocateVector<CatchCase>(num_catch);
688 for (int i = 0; i < num_catch; ++i) {
689 catch_cases[i].tag_index =
690 data->get<uint8_t>() % builder_->builder()->NumTags();
691 catch_cases[i].kind =
692 static_cast<CatchKind>(data->get<uint8_t>() % (kLastCatchKind + 1));
693 }
694
695 BlockScope block_scope(this, kExprBlock, param_types, return_types,
696 return_types);
697 try_table_rec(param_types, return_types, catch_cases, 0, data);
698 }
699
700 template <ValueKind T>
701 void try_table_block(DataRange* data) {
702 ValueType return_types_arr[1] = {ValueType::Primitive(T)};
703 auto return_types = base::VectorOf(return_types_arr, T == kVoid ? 0 : 1);
704 if (!v8_flags.experimental_wasm_exnref) {
705 // Can't generate a try_table block. Just generate something else.
706 any_block({}, return_types, data);
707 return;
708 }
709 try_table_block_helper({}, return_types, data);
710 }
711
712 void any_block(base::Vector<const ValueType> param_types,
713 base::Vector<const ValueType> return_types, DataRange* data) {
714 uint8_t available_cases = v8_flags.experimental_wasm_exnref ? 6 : 5;
715 uint8_t block_type = data->get<uint8_t>() % available_cases;
716 switch (block_type) {
717 case 0:
718 block(param_types, return_types, data);
719 return;
720 case 1:
721 loop(param_types, return_types, data);
722 return;
723 case 2:
724 finite_loop(param_types, return_types, data);
725 return;
726 case 3:
727 if (param_types == return_types) {
728 if_({}, {}, kIf, data);
729 return;
730 }
731 [[fallthrough]];
732 case 4:
733 if_(param_types, return_types, kIfElse, data);
734 return;
735 case 5:
736 try_table_block_helper(param_types, return_types, data);
737 return;
738 }
739 }
740
741 void br(DataRange* data) {
742 // There is always at least the block representing the function body.
743 DCHECK(!blocks_.empty());
744 const uint32_t target_block = data->get<uint8_t>() % blocks_.size();
745 const auto break_types = base::VectorOf(blocks_[target_block]);
746
747 Generate(break_types, data);
748 builder_->EmitWithI32V(
749 kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
750 }
751
752 template <ValueKind wanted_kind>
753 void br_if(DataRange* data) {
754 // There is always at least the block representing the function body.
755 DCHECK(!blocks_.empty());
756 const uint32_t target_block = data->get<uint8_t>() % blocks_.size();
757 const auto break_types = base::VectorOf(blocks_[target_block]);
758
759 Generate(break_types, data);
760 Generate(kWasmI32, data);
761 builder_->EmitWithI32V(
762 kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
763 ConsumeAndGenerate(
764 break_types,
765 wanted_kind == kVoid
766 ? base::Vector<ValueType>{}
767 : base::VectorOf({ValueType::Primitive(wanted_kind)}),
768 data);
769 }
770
771 template <ValueKind wanted_kind>
772 void br_on_null(DataRange* data) {
773 DCHECK(!blocks_.empty());
774 const uint32_t target_block = data->get<uint8_t>() % blocks_.size();
775 const auto break_types = base::VectorOf(blocks_[target_block]);
776 Generate(break_types, data);
777 GenerateRef(data);
778 builder_->EmitWithI32V(
779 kExprBrOnNull,
780 static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
781 builder_->Emit(kExprDrop);
782 ConsumeAndGenerate(
783 break_types,
784 wanted_kind == kVoid
785 ? base::Vector<ValueType>{}
786 : base::VectorOf({ValueType::Primitive(wanted_kind)}),
787 data);
788 }
789
790 template <ValueKind wanted_kind>
791 void br_on_non_null(DataRange* data) {
792 DCHECK(!blocks_.empty());
793 const uint32_t target_block = data->get<uint8_t>() % blocks_.size();
794 const auto break_types = base::VectorOf(blocks_[target_block]);
795 if (break_types.empty() ||
796 !break_types[break_types.size() - 1].is_reference()) {
797 // Invalid break_types for br_on_non_null.
798 Generate<wanted_kind>(data);
799 return;
800 }
801 Generate(break_types, data);
802 builder_->EmitWithI32V(
803 kExprBrOnNonNull,
804 static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
805 ConsumeAndGenerate(
806 break_types.SubVector(0, break_types.size() - 1),
807 wanted_kind == kVoid
808 ? base::Vector<ValueType>{}
809 : base::VectorOf({ValueType::Primitive(wanted_kind)}),
810 data);
811 }
812
813 void br_table(ValueType result_type, DataRange* data) {
814 const uint8_t block_count = 1 + data->get<uint8_t>() % 8;
815 // Generate the block entries.
816 uint16_t entry_bits =
817 block_count > 4 ? data->get<uint16_t>() : data->get<uint8_t>();
818 for (size_t i = 0; i < block_count; ++i) {
819 builder_->Emit(kExprBlock);
820 builder_->EmitValueType(result_type);
821 blocks_.emplace_back();
822 if (result_type != kWasmVoid) {
823 blocks_.back().push_back(result_type);
824 }
825 // There can be additional instructions in each block.
826 // Only generate it with a 25% chance as it's otherwise quite unlikely to
827 // have enough random bytes left for the br_table instruction.
828 if ((entry_bits & 3) == 3) {
829 Generate(kWasmVoid, data);
830 }
831 entry_bits >>= 2;
832 }
833 // Generate the br_table.
834 Generate(result_type, data);
835 Generate(kWasmI32, data);
836 builder_->Emit(kExprBrTable);
837 uint32_t entry_count = 1 + data->get<uint8_t>() % 8;
838 builder_->EmitU32V(entry_count);
839 for (size_t i = 0; i < entry_count + 1; ++i) {
840 builder_->EmitU32V(data->get<uint8_t>() % block_count);
841 }
842 // Generate the block ends.
843 uint8_t exit_bits = result_type == kWasmVoid ? 0 : data->get<uint8_t>();
844 for (size_t i = 0; i < block_count; ++i) {
845 if (exit_bits & 1) {
846 // Drop and generate new value.
847 builder_->Emit(kExprDrop);
848 Generate(result_type, data);
849 }
850 exit_bits >>= 1;
851 builder_->Emit(kExprEnd);
852 blocks_.pop_back();
853 }
854 }
855
856 template <ValueKind wanted_kind>
857 void br_table(DataRange* data) {
858 br_table(
859 wanted_kind == kVoid ? kWasmVoid : ValueType::Primitive(wanted_kind),
860 data);
861 }
862
863 void return_op(DataRange* data) {
864 auto returns = builder_->signature()->returns();
865 Generate(returns, data);
866 builder_->Emit(kExprReturn);
867 }
868
869 constexpr static uint8_t max_alignment(WasmOpcode memop) {
870 switch (memop) {
871 case kExprS128LoadMem:
872 case kExprS128StoreMem:
873 return 4;
874 case kExprI64LoadMem:
875 case kExprF64LoadMem:
876 case kExprI64StoreMem:
877 case kExprF64StoreMem:
878 case kExprI64AtomicStore:
879 case kExprI64AtomicLoad:
880 case kExprI64AtomicAdd:
881 case kExprI64AtomicSub:
882 case kExprI64AtomicAnd:
883 case kExprI64AtomicOr:
884 case kExprI64AtomicXor:
885 case kExprI64AtomicExchange:
886 case kExprI64AtomicCompareExchange:
887 case kExprS128Load8x8S:
888 case kExprS128Load8x8U:
889 case kExprS128Load16x4S:
890 case kExprS128Load16x4U:
891 case kExprS128Load32x2S:
892 case kExprS128Load32x2U:
893 case kExprS128Load64Splat:
894 case kExprS128Load64Zero:
895 return 3;
896 case kExprI32LoadMem:
897 case kExprI64LoadMem32S:
898 case kExprI64LoadMem32U:
899 case kExprF32LoadMem:
900 case kExprI32StoreMem:
901 case kExprI64StoreMem32:
902 case kExprF32StoreMem:
903 case kExprI32AtomicStore:
904 case kExprI64AtomicStore32U:
905 case kExprI32AtomicLoad:
906 case kExprI64AtomicLoad32U:
907 case kExprI32AtomicAdd:
908 case kExprI32AtomicSub:
909 case kExprI32AtomicAnd:
910 case kExprI32AtomicOr:
911 case kExprI32AtomicXor:
912 case kExprI32AtomicExchange:
913 case kExprI32AtomicCompareExchange:
914 case kExprI64AtomicAdd32U:
915 case kExprI64AtomicSub32U:
916 case kExprI64AtomicAnd32U:
917 case kExprI64AtomicOr32U:
918 case kExprI64AtomicXor32U:
919 case kExprI64AtomicExchange32U:
920 case kExprI64AtomicCompareExchange32U:
921 case kExprS128Load32Splat:
922 case kExprS128Load32Zero:
923 return 2;
924 case kExprI32LoadMem16S:
925 case kExprI32LoadMem16U:
926 case kExprI64LoadMem16S:
927 case kExprI64LoadMem16U:
928 case kExprI32StoreMem16:
929 case kExprI64StoreMem16:
930 case kExprI32AtomicStore16U:
931 case kExprI64AtomicStore16U:
932 case kExprI32AtomicLoad16U:
933 case kExprI64AtomicLoad16U:
934 case kExprI32AtomicAdd16U:
935 case kExprI32AtomicSub16U:
936 case kExprI32AtomicAnd16U:
937 case kExprI32AtomicOr16U:
938 case kExprI32AtomicXor16U:
939 case kExprI32AtomicExchange16U:
940 case kExprI32AtomicCompareExchange16U:
941 case kExprI64AtomicAdd16U:
942 case kExprI64AtomicSub16U:
943 case kExprI64AtomicAnd16U:
944 case kExprI64AtomicOr16U:
945 case kExprI64AtomicXor16U:
946 case kExprI64AtomicExchange16U:
947 case kExprI64AtomicCompareExchange16U:
948 case kExprS128Load16Splat:
949 return 1;
950 case kExprI32LoadMem8S:
951 case kExprI32LoadMem8U:
952 case kExprI64LoadMem8S:
953 case kExprI64LoadMem8U:
954 case kExprI32StoreMem8:
955 case kExprI64StoreMem8:
956 case kExprI32AtomicStore8U:
957 case kExprI64AtomicStore8U:
958 case kExprI32AtomicLoad8U:
959 case kExprI64AtomicLoad8U:
960 case kExprI32AtomicAdd8U:
961 case kExprI32AtomicSub8U:
962 case kExprI32AtomicAnd8U:
963 case kExprI32AtomicOr8U:
964 case kExprI32AtomicXor8U:
965 case kExprI32AtomicExchange8U:
966 case kExprI32AtomicCompareExchange8U:
967 case kExprI64AtomicAdd8U:
968 case kExprI64AtomicSub8U:
969 case kExprI64AtomicAnd8U:
970 case kExprI64AtomicOr8U:
971 case kExprI64AtomicXor8U:
972 case kExprI64AtomicExchange8U:
973 case kExprI64AtomicCompareExchange8U:
974 case kExprS128Load8Splat:
975 return 0;
976 default:
977 return 0;
978 }
979 }
980
981 template <WasmOpcode memory_op, ValueKind... arg_kinds>
982 void memop(DataRange* data) {
983 // Atomic operations need to be aligned exactly to their max alignment.
984 const bool is_atomic = memory_op >> 8 == kAtomicPrefix;
985 const uint8_t align = is_atomic ? max_alignment(memory_op)
986 : data->getPseudoRandom<uint8_t>() %
987 (max_alignment(memory_op) + 1);
988
989 uint8_t memory_index =
990 data->get<uint8_t>() % builder_->builder()->NumMemories();
991
992 uint64_t offset = data->get<uint16_t>();
993 // With a 1/256 chance generate potentially very large offsets.
994 if ((offset & 0xff) == 0xff) {
995 offset = builder_->builder()->IsMemory64(memory_index)
996 ? data->getPseudoRandom<uint64_t>() & 0x1ffffffff
997 : data->getPseudoRandom<uint32_t>();
998 }
999
1000 // Generate the index and the arguments, if any.
1001 builder_->builder()->IsMemory64(memory_index)
1002 ? Generate<kI64, arg_kinds...>(data)
1003 : Generate<kI32, arg_kinds...>(data);
1004
1005 // Format of the instruction (supports multi-memory):
1006 // memory_op (align | 0x40) memory_index offset
1007 if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
1008 DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
1009 builder_->EmitWithPrefix(memory_op);
1010 } else {
1011 builder_->Emit(memory_op);
1012 }
1013 builder_->EmitU32V(align | 0x40);
1014 builder_->EmitU32V(memory_index);
1015 builder_->EmitU64V(offset);
1016 }
1017
1018 template <WasmOpcode Op, ValueKind... Args>
1019 void op_with_prefix(DataRange* data) {
1020 Generate<Args...>(data);
1021 builder_->EmitWithPrefix(Op);
1022 }
1023
1024 void simd_const(DataRange* data) {
1025 builder_->EmitWithPrefix(kExprS128Const);
1026 for (int i = 0; i < kSimd128Size; i++) {
1027 builder_->EmitByte(data->getPseudoRandom<uint8_t>());
1028 }
1029 }
1030
1031 template <WasmOpcode Op, int lanes, ValueKind... Args>
1032 void simd_lane_op(DataRange* data) {
1033 Generate<Args...>(data);
1034 builder_->EmitWithPrefix(Op);
1035 builder_->EmitByte(data->get<uint8_t>() % lanes);
1036 }
1037
1038 template <WasmOpcode Op, int lanes, ValueKind... Args>
1039 void simd_lane_memop(DataRange* data) {
1040 // Simd load/store instructions that have a lane immediate.
1041 memop<Op, Args...>(data);
1042 builder_->EmitByte(data->get<uint8_t>() % lanes);
1043 }
1044
1045 void simd_shuffle(DataRange* data) {
1046 Generate<kS128, kS128>(data);
1047 builder_->EmitWithPrefix(kExprI8x16Shuffle);
1048 for (int i = 0; i < kSimd128Size; i++) {
1049 builder_->EmitByte(static_cast<uint8_t>(data->get<uint8_t>() % 32));
1050 }
1051 }
1052
1053 void drop(DataRange* data) {
1054 Generate(
1055 GetValueType(options_, data,
1056 static_cast<uint32_t>(functions_.size() + structs_.size() +
1057 arrays_.size())),
1058 data);
1059 builder_->Emit(kExprDrop);
1060 }
1061
1062 enum CallKind { kCallDirect, kCallIndirect, kCallRef };
1063
1064 template <ValueKind wanted_kind>
1065 void call(DataRange* data) {
1066 call(data, ValueType::Primitive(wanted_kind), kCallDirect);
1067 }
1068
1069 template <ValueKind wanted_kind>
1070 void call_indirect(DataRange* data) {
1071 call(data, ValueType::Primitive(wanted_kind), kCallIndirect);
1072 }
1073
1074 template <ValueKind wanted_kind>
1075 void call_ref(DataRange* data) {
1076 call(data, ValueType::Primitive(wanted_kind), kCallRef);
1077 }
1078
1079 void Convert(ValueType src, ValueType dst) {
1080 auto idx = [](ValueType t) -> int {
1081 switch (t.kind()) {
1082 case kI32:
1083 return 0;
1084 case kI64:
1085 return 1;
1086 case kF32:
1087 return 2;
1088 case kF64:
1089 return 3;
1090 default:
1091 UNREACHABLE();
1092 }
1093 };
1094 static constexpr WasmOpcode kConvertOpcodes[] = {
1095 // {i32, i64, f32, f64} -> i32
1096 kExprNop, kExprI32ConvertI64, kExprI32SConvertF32, kExprI32SConvertF64,
1097 // {i32, i64, f32, f64} -> i64
1098 kExprI64SConvertI32, kExprNop, kExprI64SConvertF32, kExprI64SConvertF64,
1099 // {i32, i64, f32, f64} -> f32
1100 kExprF32SConvertI32, kExprF32SConvertI64, kExprNop, kExprF32ConvertF64,
1101 // {i32, i64, f32, f64} -> f64
1102 kExprF64SConvertI32, kExprF64SConvertI64, kExprF64ConvertF32, kExprNop};
1103 int arr_idx = idx(dst) << 2 | idx(src);
1104 builder_->Emit(kConvertOpcodes[arr_idx]);
1105 }
1106
1107 int choose_function_table_index(DataRange* data) {
1108 int table_count = builder_->builder()->NumTables();
1109 int start = data->get<uint8_t>() % table_count;
1110 for (int i = 0; i < table_count; ++i) {
1111 int index = (start + i) % table_count;
1112 if (builder_->builder()->GetTableType(index).is_reference_to(
1113 HeapType::kFunc)) {
1114 return index;
1115 }
1116 }
1117 FATAL("No funcref table found; table index 0 is expected to be funcref");
1118 }
1119
1120 void call(DataRange* data, ValueType wanted_kind, CallKind call_kind) {
1121 uint8_t random_byte = data->get<uint8_t>();
1122 int func_index = random_byte % functions_.size();
1123 ModuleTypeIndex sig_index = functions_[func_index];
1124 const FunctionSig* sig = builder_->builder()->GetSignature(sig_index);
1125 // Generate arguments.
1126 for (size_t i = 0; i < sig->parameter_count(); ++i) {
1127 Generate(sig->GetParam(i), data);
1128 }
1129 // Emit call.
1130 // If the return types of the callee happen to match the return types of the
1131 // caller, generate a tail call.
1132 bool use_return_call = random_byte > 127;
1133 if (use_return_call &&
1134 std::equal(sig->returns().begin(), sig->returns().end(),
1135 builder_->signature()->returns().begin(),
1136 builder_->signature()->returns().end())) {
1137 if (call_kind == kCallDirect) {
1138 builder_->EmitWithU32V(kExprReturnCall,
1139 NumImportedFunctions() + func_index);
1140 } else if (call_kind == kCallIndirect) {
1141 // This will not trap because table[func_index] always contains function
1142 // func_index.
1143 uint32_t table_index = choose_function_table_index(data);
1144 builder_->builder()->IsTable64(table_index)
1145 ? builder_->EmitI64Const(func_index)
1146 : builder_->EmitI32Const(func_index);
1147 builder_->EmitWithU32V(kExprReturnCallIndirect, sig_index);
1148 builder_->EmitByte(table_index);
1149 } else {
1150 GenerateRef(
1151 HeapType::Index(sig_index, kNotShared, RefTypeKind::kFunction),
1152 data);
1153 builder_->EmitWithU32V(kExprReturnCallRef, sig_index);
1154 }
1155 return;
1156 } else {
1157 if (call_kind == kCallDirect) {
1158 builder_->EmitWithU32V(kExprCallFunction,
1159 NumImportedFunctions() + func_index);
1160 } else if (call_kind == kCallIndirect) {
1161 // This will not trap because table[func_index] always contains function
1162 // func_index.
1163 uint32_t table_index = choose_function_table_index(data);
1164 builder_->builder()->IsTable64(table_index)
1165 ? builder_->EmitI64Const(func_index)
1166 : builder_->EmitI32Const(func_index);
1167 builder_->EmitWithU32V(kExprCallIndirect, sig_index);
1168 builder_->EmitByte(table_index);
1169 } else {
1170 GenerateRef(
1171 HeapType::Index(sig_index, kNotShared, RefTypeKind::kFunction),
1172 data);
1173 builder_->EmitWithU32V(kExprCallRef, sig_index);
1174 }
1175 }
1176 if (sig->return_count() == 0 && wanted_kind != kWasmVoid) {
1177 // The call did not generate a value. Thus just generate it here.
1178 Generate(wanted_kind, data);
1179 return;
1180 }
1181 if (wanted_kind == kWasmVoid) {
1182 // The call did generate values, but we did not want one.
1183 for (size_t i = 0; i < sig->return_count(); ++i) {
1184 builder_->Emit(kExprDrop);
1185 }
1186 return;
1187 }
1188 auto wanted_types =
1189 base::VectorOf(&wanted_kind, wanted_kind == kWasmVoid ? 0 : 1);
1190 ConsumeAndGenerate(sig->returns(), wanted_types, data);
1191 }
1192
1193 struct Var {
1194 uint32_t index;
1195 ValueType type = kWasmVoid;
1196 Var() = default;
1197 Var(uint32_t index, ValueType type) : index(index), type(type) {}
1198 bool is_valid() const { return type != kWasmVoid; }
1199 };
1200
1201 ValueType AsNullable(ValueType original) {
1202 if (!original.is_ref()) return original;
1203 return original.AsNullable();
1204 }
1205
1206 Var GetRandomLocal(DataRange* data, ValueType type = kWasmTop) {
1207 const size_t locals_count = all_locals_count();
1208 if (locals_count == 0) return {};
1209 uint32_t start_index = data->get<uint8_t>() % locals_count;
1210 uint32_t index = start_index;
1211 // TODO(14034): Ideally we would check for subtyping here over type
1212 // equality, but we don't have a module.
1213 while (type != kWasmTop && local_type(index) != type &&
1214 AsNullable(local_type(index)) != type) {
1215 index = (index + 1) % locals_count;
1216 if (index == start_index) return {};
1217 }
1218 return {index, local_type(index)};
1219 }
1220
1221 constexpr static bool is_convertible_kind(ValueKind kind) {
1222 return kind == kI32 || kind == kI64 || kind == kF32 || kind == kF64;
1223 }
1224
1225 template <ValueKind wanted_kind>
1226 void local_op(DataRange* data, WasmOpcode opcode) {
1227 static_assert(wanted_kind == kVoid || is_convertible_kind(wanted_kind));
1228 Var local = GetRandomLocal(data);
1229 // If there are no locals and no parameters, just generate any value (if a
1230 // value is needed), or do nothing.
1231 if (!local.is_valid() || !is_convertible_kind(local.type.kind())) {
1232 if (wanted_kind == kVoid) return;
1233 return Generate<wanted_kind>(data);
1234 }
1235
1236 if (opcode != kExprLocalGet) Generate(local.type, data);
1237 builder_->EmitWithU32V(opcode, local.index);
1238 if (wanted_kind != kVoid && local.type.kind() != wanted_kind) {
1239 Convert(local.type, ValueType::Primitive(wanted_kind));
1240 }
1241 }
1242
1243 template <ValueKind wanted_kind>
1244 void get_local(DataRange* data) {
1245 static_assert(wanted_kind != kVoid, "illegal type");
1246 local_op<wanted_kind>(data, kExprLocalGet);
1247 }
1248
1249 void set_local(DataRange* data) { local_op<kVoid>(data, kExprLocalSet); }
1250
1251 template <ValueKind wanted_kind>
1252 void tee_local(DataRange* data) {
1253 local_op<wanted_kind>(data, kExprLocalTee);
1254 }
1255
1256 // Shifts the assigned values of some locals towards the start_local.
1257 // Assuming 4 locals with index 0-3 of the same type and start_local with
1258 // index 0, this emits e.g.:
1259 // local.set 0 (local.get 1)
1260 // local.set 1 (local.get 2)
1261 // local.set 2 (local.get 3)
1262 // If this is executed in a loop, the value in local 3 will eventually end up
1263 // in local 0, but only after multiple iterations of the loop.
1264 void shift_locals_to(DataRange* data, Var start_local) {
1265 const uint32_t max_shift = data->get<uint8_t>() % 8 + 2;
1266 const auto [start_index, type] = start_local;
1267 const size_t locals_count = all_locals_count();
1268 uint32_t previous_index = start_index;
1269 uint32_t index = start_index;
1270 for (uint32_t i = 0; i < max_shift; ++i) {
1271 do {
1272 index = (index + 1) % locals_count;
1273 } while (local_type(index) != type);
1274 // Never emit more than one shift over all same-typed locals.
1275 // (In many cases we might end up with only one local with the same type.)
1276 if (index == start_index) break;
1277 builder_->EmitGetLocal(index);
1278 builder_->EmitSetLocal(previous_index);
1279 previous_index = index;
1280 }
1281 }
1282
1283 void shift_locals(DataRange* data) {
1284 const Var local = GetRandomLocal(data);
1285 if (local.type == kWasmVoid ||
1286 (local.type.is_non_nullable() && !locals_initialized_)) {
1287 return;
1288 }
1289 shift_locals_to(data, local);
1290 }
1291
1292 template <size_t num_bytes>
1293 void i32_const(DataRange* data) {
1294 builder_->EmitI32Const(data->getPseudoRandom<int32_t, num_bytes>());
1295 }
1296
1297 template <size_t num_bytes>
1298 void i64_const(DataRange* data) {
1299 builder_->EmitI64Const(data->getPseudoRandom<int64_t, num_bytes>());
1300 }
1301
1302 Var GetRandomGlobal(DataRange* data, bool ensure_mutable) {
1303 uint32_t index;
1304 if (ensure_mutable) {
1305 if (mutable_globals_.empty()) return {};
1306 index = mutable_globals_[data->get<uint8_t>() % mutable_globals_.size()];
1307 } else {
1308 if (globals_.empty()) return {};
1309 index = data->get<uint8_t>() % globals_.size();
1310 }
1311 ValueType type = globals_[index];
1312 return {index, type};
1313 }
1314
1315 template <ValueKind wanted_kind>
1316 void global_op(DataRange* data) {
1317 static_assert(wanted_kind == kVoid || is_convertible_kind(wanted_kind));
1318 constexpr bool is_set = wanted_kind == kVoid;
1319 Var global = GetRandomGlobal(data, is_set);
1320 // If there are no globals, just generate any value (if a value is needed),
1321 // or do nothing.
1322 if (!global.is_valid() || !is_convertible_kind(global.type.kind())) {
1323 if (wanted_kind == kVoid) return;
1324 return Generate<wanted_kind>(data);
1325 }
1326
1327 if (is_set) Generate(global.type, data);
1328 builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
1329 global.index);
1330 if (!is_set && global.type.kind() != wanted_kind) {
1331 Convert(global.type, ValueType::Primitive(wanted_kind));
1332 }
1333 }
1334
1335 template <ValueKind wanted_kind>
1336 void get_global(DataRange* data) {
1337 static_assert(wanted_kind != kVoid, "illegal type");
1338 global_op<wanted_kind>(data);
1339 }
1340
1341 template <ValueKind select_kind>
1342 void select_with_type(DataRange* data) {
1343 static_assert(select_kind != kVoid, "illegal kind for select");
1344 Generate<select_kind, select_kind, kI32>(data);
1345 // num_types is always 1.
1346 uint8_t num_types = 1;
1347 builder_->EmitWithU8U8(kExprSelectWithType, num_types,
1348 ValueType::Primitive(select_kind).value_type_code());
1349 }
1350
1351 void set_global(DataRange* data) { global_op<kVoid>(data); }
1352
1353 void throw_or_rethrow(DataRange* data) {
1354 bool rethrow = data->get<bool>();
1355 if (rethrow && !catch_blocks_.empty()) {
1356 int control_depth = static_cast<int>(blocks_.size() - 1);
1357 int catch_index =
1358 data->get<uint8_t>() % static_cast<int>(catch_blocks_.size());
1359 builder_->EmitWithU32V(kExprRethrow,
1360 control_depth - catch_blocks_[catch_index]);
1361 } else {
1362 int tag = data->get<uint8_t>() % builder_->builder()->NumTags();
1363 const FunctionSig* exception_sig = builder_->builder()->GetTagType(tag);
1364 Generate(exception_sig->parameters(), data);
1365 builder_->EmitWithU32V(kExprThrow, tag);
1366 }
1367 }
1368
1369 template <ValueKind... Types>
1370 void sequence(DataRange* data) {
1371 Generate<Types...>(data);
1372 }
1373
1374 void memory_size(DataRange* data) {
1375 uint8_t memory_index =
1376 data->get<uint8_t>() % builder_->builder()->NumMemories();
1377
1378 builder_->EmitWithU8(kExprMemorySize, memory_index);
1379 // The `memory_size` returns an I32. However, `kExprMemorySize` for memory64
1380 // returns an I64, so we should convert it.
1381 if (builder_->builder()->IsMemory64(memory_index)) {
1382 builder_->Emit(kExprI32ConvertI64);
1383 }
1384 }
1385
1386 void grow_memory(DataRange* data) {
1387 uint8_t memory_index =
1388 data->get<uint8_t>() % builder_->builder()->NumMemories();
1389
1390 // Generate the index and the arguments, if any.
1391 builder_->builder()->IsMemory64(memory_index) ? Generate<kI64>(data)
1392 : Generate<kI32>(data);
1393 builder_->EmitWithU8(kExprMemoryGrow, memory_index);
1394 // The `grow_memory` returns an I32. However, `kExprMemoryGrow` for memory64
1395 // returns an I64, so we should convert it.
1396 if (builder_->builder()->IsMemory64(memory_index)) {
1397 builder_->Emit(kExprI32ConvertI64);
1398 }
1399 }
1400
1401 void ref_null(HeapType type, DataRange* data) {
1402 builder_->Emit(kExprRefNull);
1403 builder_->EmitHeapType(type);
1404 }
1405
1406 bool get_local_ref(HeapType type, DataRange* data, Nullability nullable) {
1407 Var local = GetRandomLocal(data, ValueType::RefMaybeNull(type, nullable));
1408 if (local.is_valid() && (local.type.is_nullable() || locals_initialized_)) {
1409 // With a small chance don't only get the local but first "shift" local
1410 // values around. This creates more interesting patterns for the typed
1411 // optimizations.
1412 if (data->get<uint8_t>() % 8 == 1) {
1413 shift_locals_to(data, local);
1414 }
1415 builder_->EmitWithU32V(kExprLocalGet, local.index);
1416 return true;
1417 }
1418
1419 return false;
1420 }
1421
1422 bool new_object(HeapType type, DataRange* data, Nullability nullable) {
1423 DCHECK(type.is_index());
1424
1425 ModuleTypeIndex index = type.ref_index();
1426 bool new_default = data->get<bool>();
1427
1428 if (builder_->builder()->IsStructType(index)) {
1429 const StructType* struct_gen = builder_->builder()->GetStructType(index);
1430 int field_count = struct_gen->field_count();
1431 bool can_be_defaultable = std::all_of(
1432 struct_gen->fields().begin(), struct_gen->fields().end(),
1433 [](ValueType type) -> bool { return type.is_defaultable(); });
1434
1435 if (new_default && can_be_defaultable) {
1436 builder_->EmitWithPrefix(kExprStructNewDefault);
1437 builder_->EmitU32V(index);
1438 } else {
1439 for (int i = 0; i < field_count; i++) {
1440 Generate(struct_gen->field(i).Unpacked(), data);
1441 }
1442 builder_->EmitWithPrefix(kExprStructNew);
1443 builder_->EmitU32V(index);
1444 }
1445 } else if (builder_->builder()->IsArrayType(index)) {
1446 ValueType element_type =
1447 builder_->builder()->GetArrayType(index)->element_type();
1448 bool can_be_defaultable = element_type.is_defaultable();
1449 WasmOpcode array_new_op[] = {
1450 kExprArrayNew, kExprArrayNewFixed,
1451 kExprArrayNewData, kExprArrayNewElem,
1452 kExprArrayNewDefault, // default op has to be at the end of the list.
1453 };
1454 size_t op_size = arraysize(array_new_op);
1455 if (!can_be_defaultable) --op_size;
1456 switch (array_new_op[data->get<uint8_t>() % op_size]) {
1457 case kExprArrayNewElem:
1458 case kExprArrayNewData: {
1459 // This is more restrictive than it has to be.
1460 // TODO(14034): Also support nonnullable and non-index reference
1461 // types.
1462 if (element_type.is_reference() && element_type.is_nullable() &&
1463 element_type.has_index()) {
1464 // Add a new element segment with the corresponding type.
1465 uint32_t element_segment = GenerateRefTypeElementSegment(
1466 data, builder_->builder(), element_type);
1467 // Generate offset, length.
1468 // TODO(14034): Change the distribution here to make it more likely
1469 // that the numbers are in range.
1470 Generate({kWasmI32, kWasmI32}, data);
1471 // Generate array.new_elem instruction.
1472 builder_->EmitWithPrefix(kExprArrayNewElem);
1473 builder_->EmitU32V(index);
1474 builder_->EmitU32V(element_segment);
1475 break;
1476 } else if (!element_type.is_reference()) {
1477 // Lazily create a data segment if the module doesn't have one yet.
1478 if (builder_->builder()->NumDataSegments() == 0) {
1479 GeneratePassiveDataSegment(data, builder_->builder());
1480 }
1481 int data_index =
1482 data->get<uint8_t>() % builder_->builder()->NumDataSegments();
1483 // Generate offset, length.
1484 Generate({kWasmI32, kWasmI32}, data);
1485 builder_->EmitWithPrefix(kExprArrayNewData);
1486 builder_->EmitU32V(index);
1487 builder_->EmitU32V(data_index);
1488 break;
1489 }
1490 [[fallthrough]]; // To array.new.
1491 }
1492 case kExprArrayNew:
1493 Generate(element_type.Unpacked(), data);
1494 Generate(kWasmI32, data);
1495 builder_->EmitI32Const(kMaxArraySize);
1496 builder_->Emit(kExprI32RemS);
1497 builder_->EmitWithPrefix(kExprArrayNew);
1498 builder_->EmitU32V(index);
1499 break;
1500 case kExprArrayNewFixed: {
1501 size_t element_count =
1502 std::min(static_cast<size_t>(data->get<uint8_t>()), data->size());
1503 for (size_t i = 0; i < element_count; ++i) {
1504 Generate(element_type.Unpacked(), data);
1505 }
1506 builder_->EmitWithPrefix(kExprArrayNewFixed);
1507 builder_->EmitU32V(index);
1508 builder_->EmitU32V(static_cast<uint32_t>(element_count));
1509 break;
1510 }
1511 case kExprArrayNewDefault:
1512 Generate(kWasmI32, data);
1513 builder_->EmitI32Const(kMaxArraySize);
1514 builder_->Emit(kExprI32RemS);
1515 builder_->EmitWithPrefix(kExprArrayNewDefault);
1516 builder_->EmitU32V(index);
1517 break;
1518 default:
1519 FATAL("Unimplemented opcode");
1520 }
1521 } else {
1522 CHECK(builder_->builder()->IsSignature(index));
1523 // Map the type index to a function index.
1524 // TODO(11954. 7748): Once we have type canonicalization, choose a random
1525 // function from among those matching the signature (consider function
1526 // subtyping?).
1527 uint32_t declared_func_index =
1528 index.index - static_cast<uint32_t>(arrays_.size() + structs_.size());
1529 size_t num_functions = builder_->builder()->NumDeclaredFunctions();
1530 const FunctionSig* sig = builder_->builder()->GetSignature(index);
1531 for (size_t i = 0; i < num_functions; ++i) {
1532 if (sig == builder_->builder()
1533 ->GetFunction(declared_func_index)
1534 ->signature()) {
1535 uint32_t absolute_func_index =
1536 NumImportedFunctions() + declared_func_index;
1537 builder_->EmitWithU32V(kExprRefFunc, absolute_func_index);
1538 return true;
1539 }
1540 declared_func_index = (declared_func_index + 1) % num_functions;
1541 }
1542 // We did not find a function matching the requested signature.
1543 builder_->EmitWithI32V(kExprRefNull, index.index);
1544 if (!nullable) {
1545 builder_->Emit(kExprRefAsNonNull);
1546 }
1547 }
1548
1549 return true;
1550 }
1551
1552 void table_op(uint32_t index, std::vector<ValueType> types, DataRange* data,
1553 WasmOpcode opcode) {
1554 DCHECK(opcode == kExprTableSet || opcode == kExprTableSize ||
1555 opcode == kExprTableGrow || opcode == kExprTableFill);
1556 for (size_t i = 0; i < types.size(); i++) {
1557 // When passing the reftype by default kWasmFuncRef is used.
1558 // Then the type is changed according to its table type.
1559 if (types[i] == kWasmFuncRef) {
1560 types[i] = builder_->builder()->GetTableType(index);
1561 }
1562 }
1563 Generate(base::VectorOf(types), data);
1564 if (opcode == kExprTableSet) {
1565 builder_->Emit(opcode);
1566 } else {
1567 builder_->EmitWithPrefix(opcode);
1568 }
1569 builder_->EmitU32V(index);
1570
1571 // The `table_size` and `table_grow` should return an I32. However, the Wasm
1572 // instruction for table64 returns an I64, so it should be converted.
1573 if ((opcode == kExprTableSize || opcode == kExprTableGrow) &&
1574 builder_->builder()->IsTable64(index)) {
1575 builder_->Emit(kExprI32ConvertI64);
1576 }
1577 }
1578
1579 ValueType table_address_type(int table_index) {
1580 return builder_->builder()->IsTable64(table_index) ? kWasmI64 : kWasmI32;
1581 }
1582
1583 std::pair<int, ValueType> select_random_table(DataRange* data) {
1584 int num_tables = builder_->builder()->NumTables();
1585 DCHECK_GT(num_tables, 0);
1586 int index = data->get<uint8_t>() % num_tables;
1587 ValueType address_type = table_address_type(index);
1588
1589 return {index, address_type};
1590 }
1591
1592 bool table_get(HeapType type, DataRange* data, Nullability nullable) {
1593 ValueType needed_type = ValueType::RefMaybeNull(type, nullable);
1594 int table_count = builder_->builder()->NumTables();
1595 DCHECK_GT(table_count, 0);
1596 ZoneVector<uint32_t> table(builder_->builder()->zone());
1597 for (int i = 0; i < table_count; i++) {
1598 if (builder_->builder()->GetTableType(i) == needed_type) {
1599 table.push_back(i);
1600 }
1601 }
1602 if (table.empty()) {
1603 return false;
1604 }
1605 int table_index =
1606 table[data->get<uint8_t>() % static_cast<int>(table.size())];
1607 ValueType address_type = table_address_type(table_index);
1608 Generate(address_type, data);
1609 builder_->Emit(kExprTableGet);
1610 builder_->EmitU32V(table_index);
1611 return true;
1612 }
1613
1614 void table_set(DataRange* data) {
1615 auto [table_index, address_type] = select_random_table(data);
1616 table_op(table_index, {address_type, kWasmFuncRef}, data, kExprTableSet);
1617 }
1618
1619 void table_size(DataRange* data) {
1620 auto [table_index, _] = select_random_table(data);
1621 table_op(table_index, {}, data, kExprTableSize);
1622 }
1623
1624 void table_grow(DataRange* data) {
1625 auto [table_index, address_type] = select_random_table(data);
1626 table_op(table_index, {kWasmFuncRef, address_type}, data, kExprTableGrow);
1627 }
1628
1629 void table_fill(DataRange* data) {
1630 auto [table_index, address_type] = select_random_table(data);
1631 table_op(table_index, {address_type, kWasmFuncRef, address_type}, data,
1632 kExprTableFill);
1633 }
1634
1635 void table_copy(DataRange* data) {
1636 ValueType needed_type = data->get<bool>() ? kWasmFuncRef : kWasmExternRef;
1637 int table_count = builder_->builder()->NumTables();
1638 ZoneVector<uint32_t> table(builder_->builder()->zone());
1639 for (int i = 0; i < table_count; i++) {
1640 if (builder_->builder()->GetTableType(i) == needed_type) {
1641 table.push_back(i);
1642 }
1643 }
1644 if (table.empty()) {
1645 return;
1646 }
1647 int first_index = data->get<uint8_t>() % static_cast<int>(table.size());
1648 int second_index = data->get<uint8_t>() % static_cast<int>(table.size());
1649 ValueType first_addrtype = table_address_type(table[first_index]);
1650 ValueType second_addrtype = table_address_type(table[second_index]);
1651 ValueType result_addrtype =
1652 first_addrtype == kWasmI32 ? kWasmI32 : second_addrtype;
1653 Generate(first_addrtype, data);
1654 Generate(second_addrtype, data);
1655 Generate(result_addrtype, data);
1656 builder_->EmitWithPrefix(kExprTableCopy);
1657 builder_->EmitU32V(table[first_index]);
1658 builder_->EmitU32V(table[second_index]);
1659 }
1660
1661 bool array_get_helper(ValueType value_type, DataRange* data) {
1662 WasmModuleBuilder* builder = builder_->builder();
1663 ZoneVector<ModuleTypeIndex> array_indices(builder->zone());
1664
1665 for (ModuleTypeIndex i : arrays_) {
1666 DCHECK(builder->IsArrayType(i));
1667 if (builder->GetArrayType(i)->element_type().Unpacked() == value_type) {
1668 array_indices.push_back(i);
1669 }
1670 }
1671
1672 if (!array_indices.empty()) {
1673 int index = data->get<uint8_t>() % static_cast<int>(array_indices.size());
1674 GenerateRef(HeapType::Index(array_indices[index], kNotShared,
1675 RefTypeKind::kArray),
1676 data, kNullable);
1677 Generate(kWasmI32, data);
1678 if (builder->GetArrayType(array_indices[index])
1679 ->element_type()
1680 .is_packed()) {
1681 builder_->EmitWithPrefix(data->get<bool>() ? kExprArrayGetS
1682 : kExprArrayGetU);
1683
1684 } else {
1685 builder_->EmitWithPrefix(kExprArrayGet);
1686 }
1687 builder_->EmitU32V(array_indices[index]);
1688 return true;
1689 }
1690
1691 return false;
1692 }
1693
1694 template <ValueKind wanted_kind>
1695 void array_get(DataRange* data) {
1696 bool got_array_value =
1697 array_get_helper(ValueType::Primitive(wanted_kind), data);
1698 if (!got_array_value) {
1699 Generate<wanted_kind>(data);
1700 }
1701 }
1702 bool array_get_ref(HeapType type, DataRange* data, Nullability nullable) {
1703 ValueType needed_type = ValueType::RefMaybeNull(type, nullable);
1704 return array_get_helper(needed_type, data);
1705 }
1706
1707 void i31_get(DataRange* data) {
1708 GenerateRef(kWasmI31Ref, data);
1709 if (data->get<bool>()) {
1710 builder_->EmitWithPrefix(kExprI31GetS);
1711 } else {
1712 builder_->EmitWithPrefix(kExprI31GetU);
1713 }
1714 }
1715
1716 void array_len(DataRange* data) {
1717 DCHECK_NE(0, arrays_.size()); // We always emit at least one array type.
1718 GenerateRef(kWasmArrayRef, data);
1719 builder_->EmitWithPrefix(kExprArrayLen);
1720 }
1721
1722 void array_copy(DataRange* data) {
1723 DCHECK_NE(0, arrays_.size()); // We always emit at least one array type.
1724 // TODO(14034): The source element type only has to be a subtype of the
1725 // destination element type. Currently this only generates copy from same
1726 // typed arrays.
1727 ModuleTypeIndex array_index =
1728 arrays_[data->get<uint8_t>() % arrays_.size()];
1729 DCHECK(builder_->builder()->IsArrayType(array_index));
1730 GenerateRef(HeapType::Index(array_index, kNotShared, RefTypeKind::kArray),
1731 data); // destination
1732 Generate(kWasmI32, data); // destination index
1733 GenerateRef(HeapType::Index(array_index, kNotShared, RefTypeKind::kArray),
1734 data); // source
1735 Generate(kWasmI32, data); // source index
1736 Generate(kWasmI32, data); // length
1737 builder_->EmitWithPrefix(kExprArrayCopy);
1738 builder_->EmitU32V(array_index); // destination array type index
1739 builder_->EmitU32V(array_index); // source array type index
1740 }
1741
1742 void array_fill(DataRange* data) {
1743 DCHECK_NE(0, arrays_.size()); // We always emit at least one array type.
1744 ModuleTypeIndex array_index =
1745 arrays_[data->get<uint8_t>() % arrays_.size()];
1746 DCHECK(builder_->builder()->IsArrayType(array_index));
1747 ValueType element_type = builder_->builder()
1748 ->GetArrayType(array_index)
1749 ->element_type()
1750 .Unpacked();
1751 GenerateRef(HeapType::Index(array_index, kNotShared, RefTypeKind::kArray),
1752 data); // array
1753 Generate(kWasmI32, data); // offset
1754 Generate(element_type, data); // value
1755 Generate(kWasmI32, data); // length
1756 builder_->EmitWithPrefix(kExprArrayFill);
1757 builder_->EmitU32V(array_index);
1758 }
1759
1760 void array_init_data(DataRange* data) {
1761 DCHECK_NE(0, arrays_.size()); // We always emit at least one array type.
1762 ModuleTypeIndex array_index =
1763 arrays_[data->get<uint8_t>() % arrays_.size()];
1764 DCHECK(builder_->builder()->IsArrayType(array_index));
1765 const ArrayType* array_type =
1766 builder_->builder()->GetArrayType(array_index);
1767 DCHECK(array_type->mutability());
1768 ValueType element_type = array_type->element_type().Unpacked();
1769 if (element_type.is_reference()) {
1770 return;
1771 }
1772 if (builder_->builder()->NumDataSegments() == 0) {
1773 GeneratePassiveDataSegment(data, builder_->builder());
1774 }
1775
1776 int data_index =
1777 data->get<uint8_t>() % builder_->builder()->NumDataSegments();
1778 // Generate array, index, data_offset, length.
1779 Generate({ValueType::RefNull(array_index, kNotShared, RefTypeKind::kArray),
1781 data);
1782 builder_->EmitWithPrefix(kExprArrayInitData);
1783 builder_->EmitU32V(array_index);
1784 builder_->EmitU32V(data_index);
1785 }
1786
1787 void array_init_elem(DataRange* data) {
1788 DCHECK_NE(0, arrays_.size()); // We always emit at least one array type.
1789 ModuleTypeIndex array_index =
1790 arrays_[data->get<uint8_t>() % arrays_.size()];
1791 DCHECK(builder_->builder()->IsArrayType(array_index));
1792 const ArrayType* array_type =
1793 builder_->builder()->GetArrayType(array_index);
1794 DCHECK(array_type->mutability());
1795 ValueType element_type = array_type->element_type().Unpacked();
1796 // This is more restrictive than it has to be.
1797 // TODO(14034): Also support nonnullable and non-index reference
1798 // types.
1799 if (!element_type.is_reference() || element_type.is_non_nullable() ||
1800 !element_type.has_index()) {
1801 return;
1802 }
1803 // Add a new element segment with the corresponding type.
1804 uint32_t element_segment =
1805 GenerateRefTypeElementSegment(data, builder_->builder(), element_type);
1806 // Generate array, index, elem_offset, length.
1807 // TODO(14034): Change the distribution here to make it more likely
1808 // that the numbers are in range.
1809 Generate({ValueType::RefNull(array_index, kNotShared, RefTypeKind::kArray),
1811 data);
1812 // Generate array.new_elem instruction.
1813 builder_->EmitWithPrefix(kExprArrayInitElem);
1814 builder_->EmitU32V(array_index);
1815 builder_->EmitU32V(element_segment);
1816 }
1817
1818 void array_set(DataRange* data) {
1819 WasmModuleBuilder* builder = builder_->builder();
1820 ZoneVector<ModuleTypeIndex> array_indices(builder->zone());
1821 for (ModuleTypeIndex i : arrays_) {
1822 DCHECK(builder->IsArrayType(i));
1823 if (builder->GetArrayType(i)->mutability()) {
1824 array_indices.push_back(i);
1825 }
1826 }
1827
1828 if (array_indices.empty()) {
1829 return;
1830 }
1831
1832 int index = data->get<uint8_t>() % static_cast<int>(array_indices.size());
1833 GenerateRef(
1834 HeapType::Index(array_indices[index], kNotShared, RefTypeKind::kArray),
1835 data);
1836 Generate(kWasmI32, data);
1837 Generate(
1838 builder->GetArrayType(array_indices[index])->element_type().Unpacked(),
1839 data);
1840 builder_->EmitWithPrefix(kExprArraySet);
1841 builder_->EmitU32V(array_indices[index]);
1842 }
1843
1844 bool struct_get_helper(ValueType value_type, DataRange* data) {
1845 WasmModuleBuilder* builder = builder_->builder();
1846 ZoneVector<uint32_t> field_index(builder->zone());
1847 ZoneVector<ModuleTypeIndex> struct_index(builder->zone());
1848 for (ModuleTypeIndex i : structs_) {
1849 DCHECK(builder->IsStructType(i));
1850 int field_count = builder->GetStructType(i)->field_count();
1851 for (int index = 0; index < field_count; index++) {
1852 // TODO(14034): This should be a subtype check!
1853 if (builder->GetStructType(i)->field(index) == value_type) {
1854 field_index.push_back(index);
1855 struct_index.push_back(i);
1856 }
1857 }
1858 }
1859 if (!field_index.empty()) {
1860 int index = data->get<uint8_t>() % static_cast<int>(field_index.size());
1861 GenerateRef(HeapType::Index(struct_index[index], kNotShared,
1862 RefTypeKind::kStruct),
1863 data, kNullable);
1864 if (builder->GetStructType(struct_index[index])
1865 ->field(field_index[index])
1866 .is_packed()) {
1867 builder_->EmitWithPrefix(data->get<bool>() ? kExprStructGetS
1868 : kExprStructGetU);
1869 } else {
1870 builder_->EmitWithPrefix(kExprStructGet);
1871 }
1872 builder_->EmitU32V(struct_index[index]);
1873 builder_->EmitU32V(field_index[index]);
1874 return true;
1875 }
1876 return false;
1877 }
1878
1879 template <ValueKind wanted_kind>
1880 void struct_get(DataRange* data) {
1881 bool got_struct_value =
1882 struct_get_helper(ValueType::Primitive(wanted_kind), data);
1883 if (!got_struct_value) {
1884 Generate<wanted_kind>(data);
1885 }
1886 }
1887
1888 bool struct_get_ref(HeapType type, DataRange* data, Nullability nullable) {
1889 ValueType needed_type = ValueType::RefMaybeNull(type, nullable);
1890 return struct_get_helper(needed_type, data);
1891 }
1892
1893 bool ref_cast(HeapType type, DataRange* data, Nullability nullable) {
1894 HeapType input_type = top_type(type);
1895 GenerateRef(input_type, data);
1896 builder_->EmitWithPrefix(nullable ? kExprRefCastNull : kExprRefCast);
1897 builder_->EmitHeapType(type);
1898 return true; // It always produces the desired result type.
1899 }
1900
1901 HeapType top_type(HeapType type) {
1902 switch (type.representation()) {
1903 case HeapType::kAny:
1904 case HeapType::kEq:
1905 case HeapType::kArray:
1906 case HeapType::kStruct:
1907 case HeapType::kI31:
1908 case HeapType::kNone:
1909 return kWasmAnyRef;
1910 case HeapType::kExtern:
1911 case HeapType::kNoExtern:
1912 return kWasmExternRef;
1913 case HeapType::kExn:
1914 case HeapType::kNoExn:
1915 return kWasmExnRef;
1916 case HeapType::kFunc:
1917 case HeapType::kNoFunc:
1918 return kWasmFuncRef;
1919 default:
1920 DCHECK(type.is_index());
1921 if (builder_->builder()->IsSignature(type.ref_index())) {
1922 return kWasmFuncRef;
1923 }
1924 DCHECK(builder_->builder()->IsStructType(type.ref_index()) ||
1925 builder_->builder()->IsArrayType(type.ref_index()));
1926 return kWasmAnyRef;
1927 }
1928 }
1929
1930 HeapType choose_sub_type(HeapType type, DataRange* data) {
1931 switch (type.representation()) {
1932 case HeapType::kAny: {
1933 constexpr GenericKind generic_types[] = {
1934 GenericKind::kAny, GenericKind::kEq, GenericKind::kArray,
1935 GenericKind::kStruct, GenericKind::kI31, GenericKind::kNone,
1936 };
1937 size_t choice =
1938 data->get<uint8_t>() %
1939 (arrays_.size() + structs_.size() + arraysize(generic_types));
1940
1941 if (choice < arrays_.size()) {
1942 return HeapType::Index(arrays_[choice], kNotShared,
1943 RefTypeKind::kArray);
1944 }
1945 choice -= arrays_.size();
1946 if (choice < structs_.size()) {
1947 return HeapType::Index(structs_[choice], kNotShared,
1948 RefTypeKind::kStruct);
1949 }
1950 choice -= structs_.size();
1951 return HeapType::Generic(generic_types[choice], kNotShared);
1952 }
1953 case HeapType::kEq: {
1954 constexpr GenericKind generic_types[] = {
1955 GenericKind::kEq, GenericKind::kArray, GenericKind::kStruct,
1956 GenericKind::kI31, GenericKind::kNone,
1957 };
1958 size_t choice =
1959 data->get<uint8_t>() %
1960 (arrays_.size() + structs_.size() + arraysize(generic_types));
1961
1962 if (choice < arrays_.size()) {
1963 return HeapType::Index(arrays_[choice], kNotShared,
1964 RefTypeKind::kArray);
1965 }
1966 choice -= arrays_.size();
1967 if (choice < structs_.size()) {
1968 return HeapType::Index(structs_[choice], kNotShared,
1969 RefTypeKind::kStruct);
1970 }
1971 choice -= structs_.size();
1972 return HeapType::Generic(generic_types[choice], kNotShared);
1973 }
1974 case HeapType::kStruct: {
1975 constexpr GenericKind generic_types[] = {
1976 GenericKind::kStruct,
1977 GenericKind::kNone,
1978 };
1979 const size_t type_count = structs_.size();
1980 const size_t choice =
1981 data->get<uint8_t>() % (type_count + arraysize(generic_types));
1982 return choice >= type_count
1983 ? HeapType::Generic(generic_types[choice - type_count],
1984 kNotShared)
1985 : HeapType::Index(structs_[choice], kNotShared,
1987 }
1988 case HeapType::kArray: {
1989 constexpr GenericKind generic_types[] = {
1990 GenericKind::kArray,
1991 GenericKind::kNone,
1992 };
1993 const size_t type_count = arrays_.size();
1994 const size_t choice =
1995 data->get<uint8_t>() % (type_count + arraysize(generic_types));
1996 return choice >= type_count
1997 ? HeapType::Generic(generic_types[choice - type_count],
1998 kNotShared)
1999 : HeapType::Index(arrays_[choice], kNotShared,
2001 }
2002 case HeapType::kFunc: {
2003 constexpr GenericKind generic_types[] = {GenericKind::kFunc,
2004 GenericKind::kNoFunc};
2005 const size_t type_count = functions_.size();
2006 const size_t choice =
2007 data->get<uint8_t>() % (type_count + arraysize(generic_types));
2008 return choice >= type_count
2009 ? HeapType::Generic(generic_types[choice - type_count],
2010 kNotShared)
2011 : HeapType::Index(functions_[choice], kNotShared,
2013 }
2014 case HeapType::kExtern:
2015 // About 10% of chosen subtypes will be kNoExtern.
2016 return HeapType::Generic(data->get<uint8_t>() > 25
2017 ? GenericKind::kExtern
2018 : GenericKind::kNoExtern,
2019 kNotShared);
2020 default:
2021 if (!type.is_index()) {
2022 // No logic implemented to find a sub-type.
2023 return type;
2024 }
2025 // Collect all (direct) sub types.
2026 // TODO(14034): Also collect indirect sub types.
2027 std::vector<ModuleTypeIndex> subtypes;
2028 uint32_t type_count = builder_->builder()->NumTypes();
2029 for (uint32_t i = 0; i < type_count; ++i) {
2030 if (builder_->builder()->GetSuperType(i) == type.ref_index()) {
2031 subtypes.push_back(ModuleTypeIndex{i});
2032 }
2033 }
2034 if (subtypes.empty()) return type; // No downcast possible.
2035 return HeapType::Index(subtypes[data->get<uint8_t>() % subtypes.size()],
2036 kNotShared, type.ref_type_kind());
2037 }
2038 }
2039
2040 bool br_on_cast(HeapType type, DataRange* data, Nullability nullable) {
2041 DCHECK(!blocks_.empty());
2042 const uint32_t target_block = data->get<uint8_t>() % blocks_.size();
2043 const uint32_t block_index =
2044 static_cast<uint32_t>(blocks_.size()) - 1 - target_block;
2045 const auto break_types = base::VectorOf(blocks_[target_block]);
2046 if (break_types.empty()) {
2047 return false;
2048 }
2049 ValueType break_type = break_types.last();
2050 if (!break_type.is_reference()) {
2051 return false;
2052 }
2053
2054 Generate(break_types.SubVector(0, break_types.size() - 1), data);
2055 if (data->get<bool>()) {
2056 // br_on_cast
2057 HeapType source_type = top_type(break_type.heap_type());
2058 const bool source_is_nullable = data->get<bool>();
2059 GenerateRef(source_type, data,
2060 source_is_nullable ? kNullable : kNonNullable);
2061 const bool target_is_nullable =
2062 source_is_nullable && break_type.is_nullable() && data->get<bool>();
2063 builder_->EmitWithPrefix(kExprBrOnCast);
2064 builder_->EmitU32V(source_is_nullable + (target_is_nullable << 1));
2065 builder_->EmitU32V(block_index);
2066 builder_->EmitHeapType(source_type); // source type
2067 builder_->EmitHeapType(break_type.heap_type()); // target type
2068 // Fallthrough: The type has been up-cast to the source type of the
2069 // br_on_cast instruction! (If the type on the stack was more specific,
2070 // this loses type information.)
2071 base::SmallVector<ValueType, 32> fallthrough_types(break_types);
2072 fallthrough_types.back() = ValueType::RefMaybeNull(
2073 source_type, source_is_nullable ? kNullable : kNonNullable);
2074 ConsumeAndGenerate(base::VectorOf(fallthrough_types), {}, data);
2075 // Generate the actually desired ref type.
2076 GenerateRef(type, data, nullable);
2077 } else {
2078 // br_on_cast_fail
2079 HeapType source_type = break_type.heap_type();
2080 const bool source_is_nullable = data->get<bool>();
2081 GenerateRef(source_type, data,
2082 source_is_nullable ? kNullable : kNonNullable);
2083 const bool target_is_nullable =
2084 source_is_nullable &&
2085 (!break_type.is_nullable() || data->get<bool>());
2086 HeapType target_type = choose_sub_type(source_type, data);
2087
2088 builder_->EmitWithPrefix(kExprBrOnCastFail);
2089 builder_->EmitU32V(source_is_nullable + (target_is_nullable << 1));
2090 builder_->EmitU32V(block_index);
2091 builder_->EmitHeapType(source_type);
2092 builder_->EmitHeapType(target_type);
2093 // Fallthrough: The type has been cast to the target type.
2094 base::SmallVector<ValueType, 32> fallthrough_types(break_types);
2095 fallthrough_types.back() = ValueType::RefMaybeNull(
2096 target_type, target_is_nullable ? kNullable : kNonNullable);
2097 ConsumeAndGenerate(base::VectorOf(fallthrough_types), {}, data);
2098 // Generate the actually desired ref type.
2099 GenerateRef(type, data, nullable);
2100 }
2101 return true;
2102 }
2103
2104 bool any_convert_extern(HeapType type, DataRange* data,
2105 Nullability nullable) {
2106 if (type.representation() != HeapType::kAny) {
2107 return false;
2108 }
2109 GenerateRef(kWasmExternRef, data);
2110 builder_->EmitWithPrefix(kExprAnyConvertExtern);
2111 if (nullable == kNonNullable) {
2112 builder_->Emit(kExprRefAsNonNull);
2113 }
2114 return true;
2115 }
2116
2117 bool ref_as_non_null(HeapType type, DataRange* data, Nullability nullable) {
2118 GenerateRef(type, data, kNullable);
2119 builder_->Emit(kExprRefAsNonNull);
2120 return true;
2121 }
2122
2123 void struct_set(DataRange* data) {
2124 WasmModuleBuilder* builder = builder_->builder();
2125 DCHECK_NE(0, structs_.size()); // We always emit at least one struct type.
2126 ModuleTypeIndex struct_index =
2127 structs_[data->get<uint8_t>() % structs_.size()];
2128 DCHECK(builder->IsStructType(struct_index));
2129 const StructType* struct_type = builder->GetStructType(struct_index);
2130 ZoneVector<uint32_t> field_indices(builder->zone());
2131 for (uint32_t i = 0; i < struct_type->field_count(); i++) {
2132 if (struct_type->mutability(i)) {
2133 field_indices.push_back(i);
2134 }
2135 }
2136 if (field_indices.empty()) {
2137 return;
2138 }
2139 int field_index =
2140 field_indices[data->get<uint8_t>() % field_indices.size()];
2141 GenerateRef(HeapType::Index(struct_index, kNotShared, RefTypeKind::kStruct),
2142 data);
2143 Generate(struct_type->field(field_index).Unpacked(), data);
2144 builder_->EmitWithPrefix(kExprStructSet);
2145 builder_->EmitU32V(struct_index);
2146 builder_->EmitU32V(field_index);
2147 }
2148
2149 void ref_is_null(DataRange* data) {
2150 GenerateRef(kWasmAnyRef, data);
2151 builder_->Emit(kExprRefIsNull);
2152 }
2153
2154 template <WasmOpcode opcode>
2155 void ref_test(DataRange* data) {
2156 GenerateRef(kWasmAnyRef, data);
2157 constexpr int generic_types[] = {kAnyRefCode, kEqRefCode, kArrayRefCode,
2159 size_t num_types = structs_.size() + arrays_.size();
2160 size_t num_all_types = num_types + arraysize(generic_types);
2161 size_t type_choice = data->get<uint8_t>() % num_all_types;
2162 builder_->EmitWithPrefix(opcode);
2163 if (type_choice < structs_.size()) {
2164 builder_->EmitU32V(structs_[type_choice]);
2165 return;
2166 }
2167 type_choice -= structs_.size();
2168 if (type_choice < arrays_.size()) {
2169 builder_->EmitU32V(arrays_[type_choice]);
2170 return;
2171 }
2172 type_choice -= arrays_.size();
2173 builder_->EmitU32V(generic_types[type_choice]);
2174 }
2175
2176 void ref_eq(DataRange* data) {
2177 GenerateRef(kWasmEqRef, data);
2178 GenerateRef(kWasmEqRef, data);
2179 builder_->Emit(kExprRefEq);
2180 }
2181
2182 void call_string_import(uint32_t index) {
2183 builder_->EmitWithU32V(kExprCallFunction, index);
2184 }
2185
2186 void string_cast(DataRange* data) {
2187 GenerateRef(kWasmExternRef, data);
2188 call_string_import(string_imports_.cast);
2189 }
2190
2191 void string_test(DataRange* data) {
2192 GenerateRef(kWasmExternRef, data);
2193 call_string_import(string_imports_.test);
2194 }
2195
2196 void string_fromcharcode(DataRange* data) {
2197 Generate(kWasmI32, data);
2198 call_string_import(string_imports_.fromCharCode);
2199 }
2200
2201 void string_fromcodepoint(DataRange* data) {
2202 Generate(kWasmI32, data);
2203 call_string_import(string_imports_.fromCodePoint);
2204 }
2205
2206 void string_charcodeat(DataRange* data) {
2207 GenerateRef(kWasmExternRef, data);
2208 Generate(kWasmI32, data);
2209 call_string_import(string_imports_.charCodeAt);
2210 }
2211
2212 void string_codepointat(DataRange* data) {
2213 GenerateRef(kWasmExternRef, data);
2214 Generate(kWasmI32, data);
2215 call_string_import(string_imports_.codePointAt);
2216 }
2217
2218 void string_length(DataRange* data) {
2219 GenerateRef(kWasmExternRef, data);
2220 call_string_import(string_imports_.length);
2221 }
2222
2223 void string_concat(DataRange* data) {
2224 GenerateRef(kWasmExternRef, data);
2225 GenerateRef(kWasmExternRef, data);
2226 call_string_import(string_imports_.concat);
2227 }
2228
2229 void string_substring(DataRange* data) {
2230 GenerateRef(kWasmExternRef, data);
2231 Generate(kWasmI32, data);
2232 Generate(kWasmI32, data);
2233 call_string_import(string_imports_.substring);
2234 }
2235
2236 void string_equals(DataRange* data) {
2237 GenerateRef(kWasmExternRef, data);
2238 GenerateRef(kWasmExternRef, data);
2239 call_string_import(string_imports_.equals);
2240 }
2241
2242 void string_compare(DataRange* data) {
2243 GenerateRef(kWasmExternRef, data);
2244 GenerateRef(kWasmExternRef, data);
2245 call_string_import(string_imports_.compare);
2246 }
2247
2248 void string_fromcharcodearray(DataRange* data) {
2249 GenerateRef(HeapType::Index(string_imports_.array_i16, kNotShared,
2250 RefTypeKind::kArray),
2251 data);
2252 Generate(kWasmI32, data);
2253 Generate(kWasmI32, data);
2254 call_string_import(string_imports_.fromCharCodeArray);
2255 }
2256
2257 void string_intocharcodearray(DataRange* data) {
2258 GenerateRef(kWasmExternRef, data);
2259 GenerateRef(HeapType::Index(string_imports_.array_i16, kNotShared,
2260 RefTypeKind::kArray),
2261 data);
2262 Generate(kWasmI32, data);
2263 call_string_import(string_imports_.intoCharCodeArray);
2264 }
2265
2266 void string_measureutf8(DataRange* data) {
2267 GenerateRef(kWasmExternRef, data);
2268 call_string_import(string_imports_.measureStringAsUTF8);
2269 }
2270
2271 void string_intoutf8array(DataRange* data) {
2272 GenerateRef(kWasmExternRef, data);
2273 GenerateRef(HeapType::Index(string_imports_.array_i8, kNotShared,
2274 RefTypeKind::kArray),
2275 data);
2276 Generate(kWasmI32, data);
2277 call_string_import(string_imports_.encodeStringIntoUTF8Array);
2278 }
2279
2280 void string_toutf8array(DataRange* data) {
2281 GenerateRef(kWasmExternRef, data);
2282 call_string_import(string_imports_.encodeStringToUTF8Array);
2283 }
2284
2285 void string_fromutf8array(DataRange* data) {
2286 GenerateRef(HeapType::Index(string_imports_.array_i8, kNotShared,
2287 RefTypeKind::kArray),
2288 data);
2289 Generate(kWasmI32, data);
2290 Generate(kWasmI32, data);
2291 call_string_import(string_imports_.decodeStringFromUTF8Array);
2292 }
2293
2294 template <typename Arr>
2295 requires requires(const Arr& arr) {
2296 { arr.size() } -> std::convertible_to<std::size_t>;
2297 { arr.data()[0] } -> std::convertible_to<GenerateFn>;
2298 }
2299 void GenerateOneOf(const Arr& alternatives, DataRange* data) {
2300 DCHECK_LT(alternatives.size(), std::numeric_limits<uint8_t>::max());
2301 const auto which = data->get<uint8_t>();
2302
2303 GenerateFn alternate = alternatives[which % alternatives.size()];
2304 (this->*alternate)(data);
2305 }
2306
2307 template <size_t... kAlternativesSizes>
2308 void GenerateOneOf(const GeneratorAlternativesPerOption<
2309 kAlternativesSizes...>& alternatives_per_option,
2310 DataRange* data) {
2311 return GenerateOneOf(alternatives_per_option.GetAlternatives(options_),
2312 data);
2313 }
2314
2315 // Returns true if it had successfully generated a randomly chosen expression
2316 // from the `alternatives`.
2317 template <typename Arr>
2318 requires requires(const Arr& arr) {
2319 { arr.size() } -> std::convertible_to<std::size_t>;
2320 { arr.data()[0] } -> std::convertible_to<GenerateFnWithHeap>;
2321 }
2322 bool GenerateOneOf(const Arr& alternatives, HeapType type, DataRange* data,
2323 Nullability nullability) {
2324 DCHECK_LT(alternatives.size(), std::numeric_limits<uint8_t>::max());
2325
2326 size_t index = data->get<uint8_t>() % (alternatives.size() + 1);
2327
2328 if (nullability && index == alternatives.size()) {
2329 ref_null(type, data);
2330 return true;
2331 }
2332
2333 for (size_t i = index; i < alternatives.size(); i++) {
2334 if ((this->*alternatives[i])(type, data, nullability)) {
2335 return true;
2336 }
2337 }
2338
2339 for (size_t i = 0; i < index; i++) {
2340 if ((this->*alternatives[i])(type, data, nullability)) {
2341 return true;
2342 }
2343 }
2344
2345 if (nullability == kNullable) {
2346 ref_null(type, data);
2347 return true;
2348 }
2349
2350 return false;
2351 }
2352
2353 struct GeneratorRecursionScope {
2354 explicit GeneratorRecursionScope(BodyGen* gen) : gen(gen) {
2355 ++gen->recursion_depth;
2356 DCHECK_LE(gen->recursion_depth, kMaxRecursionDepth);
2357 }
2358 ~GeneratorRecursionScope() {
2359 DCHECK_GT(gen->recursion_depth, 0);
2360 --gen->recursion_depth;
2361 }
2362 BodyGen* gen;
2363 };
2364
2365 public:
2366 BodyGen(WasmModuleGenerationOptions options, WasmFunctionBuilder* fn,
2367 const std::vector<ModuleTypeIndex>& functions,
2368 const std::vector<ValueType>& globals,
2369 const std::vector<uint8_t>& mutable_globals,
2370 const std::vector<ModuleTypeIndex>& structs,
2371 const std::vector<ModuleTypeIndex>& arrays,
2372 const StringImports& strings, DataRange* data)
2373 : options_(options),
2374 builder_(fn),
2375 functions_(functions),
2376 globals_(globals),
2377 mutable_globals_(mutable_globals),
2378 structs_(structs),
2379 arrays_(arrays),
2380 string_imports_(strings) {
2381 const FunctionSig* sig = fn->signature();
2382 blocks_.emplace_back();
2383 for (size_t i = 0; i < sig->return_count(); ++i) {
2384 blocks_.back().push_back(sig->GetReturn(i));
2385 }
2386 locals_.resize(data->get<uint8_t>() % kMaxLocals);
2387 uint32_t num_types = static_cast<uint32_t>(
2388 functions_.size() + structs_.size() + arrays_.size());
2389 for (ValueType& local : locals_) {
2390 local = GetValueType(options, data, num_types);
2391 fn->AddLocal(local);
2392 }
2393 }
2394
2395 int NumImportedFunctions() {
2396 return builder_->builder()->NumImportedFunctions();
2397 }
2398
2399 // Returns the number of locals including parameters.
2400 size_t all_locals_count() const {
2401 return builder_->signature()->parameter_count() + locals_.size();
2402 }
2403
2404 // Returns the type of the local with the given index.
2405 ValueType local_type(uint32_t index) const {
2406 size_t num_params = builder_->signature()->parameter_count();
2407 return index < num_params ? builder_->signature()->GetParam(index)
2408 : locals_[index - num_params];
2409 }
2410
2411 // Generator functions.
2412 // Implementation detail: We define non-template Generate*TYPE*() functions
2413 // instead of templatized Generate<TYPE>(). This is because we cannot define
2414 // the templatized Generate<TYPE>() functions:
2415 // - outside of the class body without specializing the template of the
2416 // `BodyGen` (results in partial template specialization error);
2417 // - inside of the class body (gcc complains about explicit specialization in
2418 // non-namespace scope).
2419
2420 void GenerateVoid(DataRange* data) {
2421 GeneratorRecursionScope rec_scope(this);
2422 if (recursion_limit_reached() || data->size() == 0) return;
2423
2424 static constexpr auto kMvpAlternatives =
2425 CreateArray(&BodyGen::sequence<kVoid, kVoid>,
2426 &BodyGen::sequence<kVoid, kVoid, kVoid, kVoid>,
2427 &BodyGen::sequence<kVoid, kVoid, kVoid, kVoid, kVoid, kVoid,
2428 kVoid, kVoid>,
2429 &BodyGen::block<kVoid>, //
2430 &BodyGen::loop<kVoid>, //
2431 &BodyGen::finite_loop<kVoid>, //
2432 &BodyGen::if_<kVoid, kIf>, //
2433 &BodyGen::if_<kVoid, kIfElse>, //
2434 &BodyGen::br, //
2435 &BodyGen::br_if<kVoid>, //
2436 &BodyGen::br_on_null<kVoid>, //
2437 &BodyGen::br_on_non_null<kVoid>, //
2438 &BodyGen::br_table<kVoid>, //
2439 &BodyGen::try_table_block<kVoid>, //
2440 &BodyGen::return_op, //
2441
2442 &BodyGen::memop<kExprI32StoreMem, kI32>,
2443 &BodyGen::memop<kExprI32StoreMem8, kI32>,
2444 &BodyGen::memop<kExprI32StoreMem16, kI32>,
2445 &BodyGen::memop<kExprI64StoreMem, kI64>,
2446 &BodyGen::memop<kExprI64StoreMem8, kI64>,
2447 &BodyGen::memop<kExprI64StoreMem16, kI64>,
2448 &BodyGen::memop<kExprI64StoreMem32, kI64>,
2449 &BodyGen::memop<kExprF32StoreMem, kF32>,
2450 &BodyGen::memop<kExprF64StoreMem, kF64>,
2451 &BodyGen::memop<kExprI32AtomicStore, kI32>,
2452 &BodyGen::memop<kExprI32AtomicStore8U, kI32>,
2453 &BodyGen::memop<kExprI32AtomicStore16U, kI32>,
2454 &BodyGen::memop<kExprI64AtomicStore, kI64>,
2455 &BodyGen::memop<kExprI64AtomicStore8U, kI64>,
2456 &BodyGen::memop<kExprI64AtomicStore16U, kI64>,
2457 &BodyGen::memop<kExprI64AtomicStore32U, kI64>,
2458
2459 &BodyGen::drop,
2460
2461 &BodyGen::call<kVoid>, //
2462 &BodyGen::call_indirect<kVoid>, //
2463 &BodyGen::call_ref<kVoid>, //
2464
2465 &BodyGen::set_local, //
2466 &BodyGen::set_global, //
2467 &BodyGen::throw_or_rethrow, //
2468 &BodyGen::try_block<kVoid>, //
2469
2470 &BodyGen::shift_locals, //
2471
2472 &BodyGen::table_set, //
2473 &BodyGen::table_fill, //
2474 &BodyGen::table_copy);
2475
2476 static constexpr auto kSimdAlternatives =
2477 CreateArray(&BodyGen::memop<kExprS128StoreMem, kS128>,
2478 &BodyGen::simd_lane_memop<kExprS128Store8Lane, 16, kS128>,
2479 &BodyGen::simd_lane_memop<kExprS128Store16Lane, 8, kS128>,
2480 &BodyGen::simd_lane_memop<kExprS128Store32Lane, 4, kS128>,
2481 &BodyGen::simd_lane_memop<kExprS128Store64Lane, 2, kS128>);
2482
2483 static constexpr auto kWasmGCAlternatives =
2484 CreateArray(&BodyGen::struct_set, //
2485 &BodyGen::array_set, //
2486 &BodyGen::array_copy, //
2487 &BodyGen::array_fill, //
2488 &BodyGen::array_init_data, //
2489 &BodyGen::array_init_elem);
2490
2491 static constexpr GeneratorAlternativesPerOption kAlternativesPerOptions{
2492 kMvpAlternatives, kSimdAlternatives, kWasmGCAlternatives};
2493
2494 GenerateOneOf(kAlternativesPerOptions, data);
2495 }
2496
2497 void GenerateI32(DataRange* data) {
2498 GeneratorRecursionScope rec_scope(this);
2499 if (recursion_limit_reached() || data->size() <= 1) {
2500 // Rather than evenly distributing values across the full 32-bit range,
2501 // distribute them evenly over the possible bit lengths. In particular,
2502 // for values used as indices into something else, smaller values are
2503 // more likely to be useful.
2504 uint8_t size = 1 + (data->getPseudoRandom<uint8_t>() & 31);
2505 uint32_t mask = kMaxUInt32 >> (32 - size);
2506 builder_->EmitI32Const(data->getPseudoRandom<uint32_t>() & mask);
2507 return;
2508 }
2509
2510 static constexpr auto kMvpAlternatives = CreateArray(
2511 &BodyGen::i32_const<1>, //
2512 &BodyGen::i32_const<2>, //
2513 &BodyGen::i32_const<3>, //
2514 &BodyGen::i32_const<4>, //
2515
2516 &BodyGen::sequence<kI32, kVoid>, //
2517 &BodyGen::sequence<kVoid, kI32>, //
2518 &BodyGen::sequence<kVoid, kI32, kVoid>, //
2519
2520 &BodyGen::op<kExprI32Eqz, kI32>, //
2521 &BodyGen::op<kExprI32Eq, kI32, kI32>, //
2522 &BodyGen::op<kExprI32Ne, kI32, kI32>, //
2523 &BodyGen::op<kExprI32LtS, kI32, kI32>, //
2524 &BodyGen::op<kExprI32LtU, kI32, kI32>, //
2525 &BodyGen::op<kExprI32GeS, kI32, kI32>, //
2526 &BodyGen::op<kExprI32GeU, kI32, kI32>, //
2527
2528 &BodyGen::op<kExprI64Eqz, kI64>, //
2529 &BodyGen::op<kExprI64Eq, kI64, kI64>, //
2530 &BodyGen::op<kExprI64Ne, kI64, kI64>, //
2531 &BodyGen::op<kExprI64LtS, kI64, kI64>, //
2532 &BodyGen::op<kExprI64LtU, kI64, kI64>, //
2533 &BodyGen::op<kExprI64GeS, kI64, kI64>, //
2534 &BodyGen::op<kExprI64GeU, kI64, kI64>, //
2535
2536 &BodyGen::op<kExprF32Eq, kF32, kF32>,
2537 &BodyGen::op<kExprF32Ne, kF32, kF32>,
2538 &BodyGen::op<kExprF32Lt, kF32, kF32>,
2539 &BodyGen::op<kExprF32Ge, kF32, kF32>,
2540
2541 &BodyGen::op<kExprF64Eq, kF64, kF64>,
2542 &BodyGen::op<kExprF64Ne, kF64, kF64>,
2543 &BodyGen::op<kExprF64Lt, kF64, kF64>,
2544 &BodyGen::op<kExprF64Ge, kF64, kF64>,
2545
2546 &BodyGen::op<kExprI32Add, kI32, kI32>,
2547 &BodyGen::op<kExprI32Sub, kI32, kI32>,
2548 &BodyGen::op<kExprI32Mul, kI32, kI32>,
2549
2550 &BodyGen::op<kExprI32DivS, kI32, kI32>,
2551 &BodyGen::op<kExprI32DivU, kI32, kI32>,
2552 &BodyGen::op<kExprI32RemS, kI32, kI32>,
2553 &BodyGen::op<kExprI32RemU, kI32, kI32>,
2554
2555 &BodyGen::op<kExprI32And, kI32, kI32>,
2556 &BodyGen::op<kExprI32Ior, kI32, kI32>,
2557 &BodyGen::op<kExprI32Xor, kI32, kI32>,
2558 &BodyGen::op<kExprI32Shl, kI32, kI32>,
2559 &BodyGen::op<kExprI32ShrU, kI32, kI32>,
2560 &BodyGen::op<kExprI32ShrS, kI32, kI32>,
2561 &BodyGen::op<kExprI32Ror, kI32, kI32>,
2562 &BodyGen::op<kExprI32Rol, kI32, kI32>,
2563
2564 &BodyGen::op<kExprI32Clz, kI32>, //
2565 &BodyGen::op<kExprI32Ctz, kI32>, //
2566 &BodyGen::op<kExprI32Popcnt, kI32>, //
2567
2568 &BodyGen::op<kExprI32ConvertI64, kI64>,
2569 &BodyGen::op<kExprI32SConvertF32, kF32>,
2570 &BodyGen::op<kExprI32UConvertF32, kF32>,
2571 &BodyGen::op<kExprI32SConvertF64, kF64>,
2572 &BodyGen::op<kExprI32UConvertF64, kF64>,
2573 &BodyGen::op<kExprI32ReinterpretF32, kF32>,
2574
2575 &BodyGen::op_with_prefix<kExprI32SConvertSatF32, kF32>,
2576 &BodyGen::op_with_prefix<kExprI32UConvertSatF32, kF32>,
2577 &BodyGen::op_with_prefix<kExprI32SConvertSatF64, kF64>,
2578 &BodyGen::op_with_prefix<kExprI32UConvertSatF64, kF64>,
2579
2580 &BodyGen::block<kI32>, //
2581 &BodyGen::loop<kI32>, //
2582 &BodyGen::finite_loop<kI32>, //
2583 &BodyGen::if_<kI32, kIfElse>, //
2584 &BodyGen::br_if<kI32>, //
2585 &BodyGen::br_on_null<kI32>, //
2586 &BodyGen::br_on_non_null<kI32>, //
2587 &BodyGen::br_table<kI32>, //
2588 &BodyGen::try_table_block<kI32>, //
2589
2590 &BodyGen::memop<kExprI32LoadMem>, //
2591 &BodyGen::memop<kExprI32LoadMem8S>, //
2592 &BodyGen::memop<kExprI32LoadMem8U>, //
2593 &BodyGen::memop<kExprI32LoadMem16S>, //
2594 &BodyGen::memop<kExprI32LoadMem16U>, //
2595 //
2596 &BodyGen::memop<kExprI32AtomicLoad>, //
2597 &BodyGen::memop<kExprI32AtomicLoad8U>, //
2598 &BodyGen::memop<kExprI32AtomicLoad16U>, //
2599 &BodyGen::memop<kExprI32AtomicAdd, kI32>, //
2600 &BodyGen::memop<kExprI32AtomicSub, kI32>, //
2601 &BodyGen::memop<kExprI32AtomicAnd, kI32>, //
2602 &BodyGen::memop<kExprI32AtomicOr, kI32>, //
2603 &BodyGen::memop<kExprI32AtomicXor, kI32>, //
2604 &BodyGen::memop<kExprI32AtomicExchange, kI32>, //
2605 &BodyGen::memop<kExprI32AtomicCompareExchange, kI32, kI32>, //
2606 &BodyGen::memop<kExprI32AtomicAdd8U, kI32>, //
2607 &BodyGen::memop<kExprI32AtomicSub8U, kI32>, //
2608 &BodyGen::memop<kExprI32AtomicAnd8U, kI32>, //
2609 &BodyGen::memop<kExprI32AtomicOr8U, kI32>, //
2610 &BodyGen::memop<kExprI32AtomicXor8U, kI32>, //
2611 &BodyGen::memop<kExprI32AtomicExchange8U, kI32>, //
2612 &BodyGen::memop<kExprI32AtomicCompareExchange8U, kI32, kI32>, //
2613 &BodyGen::memop<kExprI32AtomicAdd16U, kI32>, //
2614 &BodyGen::memop<kExprI32AtomicSub16U, kI32>, //
2615 &BodyGen::memop<kExprI32AtomicAnd16U, kI32>, //
2616 &BodyGen::memop<kExprI32AtomicOr16U, kI32>, //
2617 &BodyGen::memop<kExprI32AtomicXor16U, kI32>, //
2618 &BodyGen::memop<kExprI32AtomicExchange16U, kI32>, //
2619 &BodyGen::memop<kExprI32AtomicCompareExchange16U, kI32, kI32>, //
2620
2621 &BodyGen::memory_size, //
2622 &BodyGen::grow_memory, //
2623
2624 &BodyGen::get_local<kI32>, //
2625 &BodyGen::tee_local<kI32>, //
2626 &BodyGen::get_global<kI32>, //
2627 &BodyGen::op<kExprSelect, kI32, kI32, kI32>, //
2628 &BodyGen::select_with_type<kI32>, //
2629
2630 &BodyGen::call<kI32>, //
2631 &BodyGen::call_indirect<kI32>, //
2632 &BodyGen::call_ref<kI32>, //
2633 &BodyGen::try_block<kI32>, //
2634
2635 &BodyGen::table_size, //
2636 &BodyGen::table_grow);
2637
2638 static constexpr auto kSimdAlternatives =
2639 CreateArray(&BodyGen::op_with_prefix<kExprV128AnyTrue, kS128>,
2640 &BodyGen::op_with_prefix<kExprI8x16AllTrue, kS128>,
2641 &BodyGen::op_with_prefix<kExprI8x16BitMask, kS128>,
2642 &BodyGen::op_with_prefix<kExprI16x8AllTrue, kS128>,
2643 &BodyGen::op_with_prefix<kExprI16x8BitMask, kS128>,
2644 &BodyGen::op_with_prefix<kExprI32x4AllTrue, kS128>,
2645 &BodyGen::op_with_prefix<kExprI32x4BitMask, kS128>,
2646 &BodyGen::op_with_prefix<kExprI64x2AllTrue, kS128>,
2647 &BodyGen::op_with_prefix<kExprI64x2BitMask, kS128>,
2648 &BodyGen::simd_lane_op<kExprI8x16ExtractLaneS, 16, kS128>,
2649 &BodyGen::simd_lane_op<kExprI8x16ExtractLaneU, 16, kS128>,
2650 &BodyGen::simd_lane_op<kExprI16x8ExtractLaneS, 8, kS128>,
2651 &BodyGen::simd_lane_op<kExprI16x8ExtractLaneU, 8, kS128>,
2652 &BodyGen::simd_lane_op<kExprI32x4ExtractLane, 4, kS128>);
2653
2654 static constexpr auto kWasmGCAlternatives =
2655 CreateArray(&BodyGen::i31_get, //
2656 //
2657 &BodyGen::struct_get<kI32>, //
2658 &BodyGen::array_get<kI32>, //
2659 &BodyGen::array_len, //
2660 //
2661 &BodyGen::ref_is_null, //
2662 &BodyGen::ref_eq, //
2663 &BodyGen::ref_test<kExprRefTest>, //
2664 &BodyGen::ref_test<kExprRefTestNull>, //
2665 //
2666 &BodyGen::string_test, //
2667 &BodyGen::string_charcodeat, //
2668 &BodyGen::string_codepointat, //
2669 &BodyGen::string_length, //
2670 &BodyGen::string_equals, //
2671 &BodyGen::string_compare, //
2672 &BodyGen::string_intocharcodearray, //
2673 &BodyGen::string_intoutf8array, //
2674 &BodyGen::string_measureutf8);
2675
2676 static constexpr GeneratorAlternativesPerOption kAlternativesPerOptions{
2677 kMvpAlternatives, kSimdAlternatives, kWasmGCAlternatives};
2678
2679 GenerateOneOf(kAlternativesPerOptions, data);
2680 }
2681
2682 void GenerateI64(DataRange* data) {
2683 GeneratorRecursionScope rec_scope(this);
2684 if (recursion_limit_reached() || data->size() <= 1) {
2685 builder_->EmitI64Const(data->getPseudoRandom<int64_t>());
2686 return;
2687 }
2688
2689 static constexpr auto kMvpAlternatives = CreateArray(
2690 &BodyGen::i64_const<1>, //
2691 &BodyGen::i64_const<2>, //
2692 &BodyGen::i64_const<3>, //
2693 &BodyGen::i64_const<4>, //
2694 &BodyGen::i64_const<5>, //
2695 &BodyGen::i64_const<6>, //
2696 &BodyGen::i64_const<7>, //
2697 &BodyGen::i64_const<8>, //
2698
2699 &BodyGen::sequence<kI64, kVoid>, //
2700 &BodyGen::sequence<kVoid, kI64>, //
2701 &BodyGen::sequence<kVoid, kI64, kVoid>, //
2702
2703 &BodyGen::op<kExprI64Add, kI64, kI64>,
2704 &BodyGen::op<kExprI64Sub, kI64, kI64>,
2705 &BodyGen::op<kExprI64Mul, kI64, kI64>,
2706
2707 &BodyGen::op<kExprI64DivS, kI64, kI64>,
2708 &BodyGen::op<kExprI64DivU, kI64, kI64>,
2709 &BodyGen::op<kExprI64RemS, kI64, kI64>,
2710 &BodyGen::op<kExprI64RemU, kI64, kI64>,
2711
2712 &BodyGen::op<kExprI64And, kI64, kI64>,
2713 &BodyGen::op<kExprI64Ior, kI64, kI64>,
2714 &BodyGen::op<kExprI64Xor, kI64, kI64>,
2715 &BodyGen::op<kExprI64Shl, kI64, kI64>,
2716 &BodyGen::op<kExprI64ShrU, kI64, kI64>,
2717 &BodyGen::op<kExprI64ShrS, kI64, kI64>,
2718 &BodyGen::op<kExprI64Ror, kI64, kI64>,
2719 &BodyGen::op<kExprI64Rol, kI64, kI64>,
2720
2721 &BodyGen::op<kExprI64Clz, kI64>, //
2722 &BodyGen::op<kExprI64Ctz, kI64>, //
2723 &BodyGen::op<kExprI64Popcnt, kI64>, //
2724
2725 &BodyGen::op_with_prefix<kExprI64SConvertSatF32, kF32>,
2726 &BodyGen::op_with_prefix<kExprI64UConvertSatF32, kF32>,
2727 &BodyGen::op_with_prefix<kExprI64SConvertSatF64, kF64>,
2728 &BodyGen::op_with_prefix<kExprI64UConvertSatF64, kF64>,
2729
2730 &BodyGen::block<kI64>, //
2731 &BodyGen::loop<kI64>, //
2732 &BodyGen::finite_loop<kI64>, //
2733 &BodyGen::if_<kI64, kIfElse>, //
2734 &BodyGen::br_if<kI64>, //
2735 &BodyGen::br_on_null<kI64>, //
2736 &BodyGen::br_on_non_null<kI64>, //
2737 &BodyGen::br_table<kI64>, //
2738 &BodyGen::try_table_block<kI64>, //
2739
2740 &BodyGen::memop<kExprI64LoadMem>, //
2741 &BodyGen::memop<kExprI64LoadMem8S>, //
2742 &BodyGen::memop<kExprI64LoadMem8U>, //
2743 &BodyGen::memop<kExprI64LoadMem16S>, //
2744 &BodyGen::memop<kExprI64LoadMem16U>, //
2745 &BodyGen::memop<kExprI64LoadMem32S>, //
2746 &BodyGen::memop<kExprI64LoadMem32U>, //
2747 //
2748 &BodyGen::memop<kExprI64AtomicLoad>, //
2749 &BodyGen::memop<kExprI64AtomicLoad8U>, //
2750 &BodyGen::memop<kExprI64AtomicLoad16U>, //
2751 &BodyGen::memop<kExprI64AtomicLoad32U>, //
2752 &BodyGen::memop<kExprI64AtomicAdd, kI64>, //
2753 &BodyGen::memop<kExprI64AtomicSub, kI64>, //
2754 &BodyGen::memop<kExprI64AtomicAnd, kI64>, //
2755 &BodyGen::memop<kExprI64AtomicOr, kI64>, //
2756 &BodyGen::memop<kExprI64AtomicXor, kI64>, //
2757 &BodyGen::memop<kExprI64AtomicExchange, kI64>, //
2758 &BodyGen::memop<kExprI64AtomicCompareExchange, kI64, kI64>, //
2759 &BodyGen::memop<kExprI64AtomicAdd8U, kI64>, //
2760 &BodyGen::memop<kExprI64AtomicSub8U, kI64>, //
2761 &BodyGen::memop<kExprI64AtomicAnd8U, kI64>, //
2762 &BodyGen::memop<kExprI64AtomicOr8U, kI64>, //
2763 &BodyGen::memop<kExprI64AtomicXor8U, kI64>, //
2764 &BodyGen::memop<kExprI64AtomicExchange8U, kI64>, //
2765 &BodyGen::memop<kExprI64AtomicCompareExchange8U, kI64, kI64>, //
2766 &BodyGen::memop<kExprI64AtomicAdd16U, kI64>, //
2767 &BodyGen::memop<kExprI64AtomicSub16U, kI64>, //
2768 &BodyGen::memop<kExprI64AtomicAnd16U, kI64>, //
2769 &BodyGen::memop<kExprI64AtomicOr16U, kI64>, //
2770 &BodyGen::memop<kExprI64AtomicXor16U, kI64>, //
2771 &BodyGen::memop<kExprI64AtomicExchange16U, kI64>, //
2772 &BodyGen::memop<kExprI64AtomicCompareExchange16U, kI64, kI64>, //
2773 &BodyGen::memop<kExprI64AtomicAdd32U, kI64>, //
2774 &BodyGen::memop<kExprI64AtomicSub32U, kI64>, //
2775 &BodyGen::memop<kExprI64AtomicAnd32U, kI64>, //
2776 &BodyGen::memop<kExprI64AtomicOr32U, kI64>, //
2777 &BodyGen::memop<kExprI64AtomicXor32U, kI64>, //
2778 &BodyGen::memop<kExprI64AtomicExchange32U, kI64>, //
2779 &BodyGen::memop<kExprI64AtomicCompareExchange32U, kI64, kI64>, //
2780
2781 &BodyGen::get_local<kI64>, //
2782 &BodyGen::tee_local<kI64>, //
2783 &BodyGen::get_global<kI64>, //
2784 &BodyGen::op<kExprSelect, kI64, kI64, kI32>, //
2785 &BodyGen::select_with_type<kI64>, //
2786
2787 &BodyGen::call<kI64>, //
2788 &BodyGen::call_indirect<kI64>, //
2789 &BodyGen::call_ref<kI64>, //
2790 &BodyGen::try_block<kI64>);
2791
2792 static constexpr auto kSimdAlternatives =
2793 CreateArray(&BodyGen::simd_lane_op<kExprI64x2ExtractLane, 2, kS128>);
2794
2795 static constexpr auto kWasmGCAlternatives =
2796 CreateArray(&BodyGen::struct_get<kI64>, //
2797 &BodyGen::array_get<kI64>);
2798
2799 static constexpr GeneratorAlternativesPerOption kAlternativesPerOptions{
2800 kMvpAlternatives, kSimdAlternatives, kWasmGCAlternatives};
2801
2802 GenerateOneOf(kAlternativesPerOptions, data);
2803 }
2804
2805 void GenerateF32(DataRange* data) {
2806 GeneratorRecursionScope rec_scope(this);
2807 if (recursion_limit_reached() || data->size() <= sizeof(float)) {
2808 builder_->EmitF32Const(data->getPseudoRandom<float>());
2809 return;
2810 }
2811
2812 static constexpr auto kMvpAlternatives = CreateArray(
2813 &BodyGen::sequence<kF32, kVoid>, &BodyGen::sequence<kVoid, kF32>,
2814 &BodyGen::sequence<kVoid, kF32, kVoid>,
2815
2816 &BodyGen::op<kExprF32Abs, kF32>, //
2817 &BodyGen::op<kExprF32Neg, kF32>, //
2818 &BodyGen::op<kExprF32Ceil, kF32>, //
2819 &BodyGen::op<kExprF32Floor, kF32>, //
2820 &BodyGen::op<kExprF32Trunc, kF32>, //
2821 &BodyGen::op<kExprF32NearestInt, kF32>, //
2822 &BodyGen::op<kExprF32Sqrt, kF32>, //
2823 &BodyGen::op<kExprF32Add, kF32, kF32>, //
2824 &BodyGen::op<kExprF32Sub, kF32, kF32>, //
2825 &BodyGen::op<kExprF32Mul, kF32, kF32>, //
2826 &BodyGen::op<kExprF32Div, kF32, kF32>, //
2827 &BodyGen::op<kExprF32Min, kF32, kF32>, //
2828 &BodyGen::op<kExprF32Max, kF32, kF32>, //
2829 &BodyGen::op<kExprF32CopySign, kF32, kF32>, //
2830
2831 &BodyGen::op<kExprF32SConvertI32, kI32>,
2832 &BodyGen::op<kExprF32UConvertI32, kI32>,
2833 &BodyGen::op<kExprF32SConvertI64, kI64>,
2834 &BodyGen::op<kExprF32UConvertI64, kI64>,
2835 &BodyGen::op<kExprF32ConvertF64, kF64>,
2836 &BodyGen::op<kExprF32ReinterpretI32, kI32>,
2837
2838 &BodyGen::block<kF32>, //
2839 &BodyGen::loop<kF32>, //
2840 &BodyGen::finite_loop<kF32>, //
2841 &BodyGen::if_<kF32, kIfElse>, //
2842 &BodyGen::br_if<kF32>, //
2843 &BodyGen::br_on_null<kF32>, //
2844 &BodyGen::br_on_non_null<kF32>, //
2845 &BodyGen::br_table<kF32>, //
2846 &BodyGen::try_table_block<kF32>, //
2847
2848 &BodyGen::memop<kExprF32LoadMem>,
2849
2850 &BodyGen::get_local<kF32>, //
2851 &BodyGen::tee_local<kF32>, //
2852 &BodyGen::get_global<kF32>, //
2853 &BodyGen::op<kExprSelect, kF32, kF32, kI32>, //
2854 &BodyGen::select_with_type<kF32>, //
2855
2856 &BodyGen::call<kF32>, //
2857 &BodyGen::call_indirect<kF32>, //
2858 &BodyGen::call_ref<kF32>, //
2859 &BodyGen::try_block<kF32>);
2860
2861 static constexpr auto kSimdAlternatives =
2862 CreateArray(&BodyGen::simd_lane_op<kExprF32x4ExtractLane, 4, kS128>);
2863
2864 static constexpr auto kWasmGCAlternatives =
2865 CreateArray(&BodyGen::struct_get<kF32>, //
2866 &BodyGen::array_get<kF32>);
2867
2868 static constexpr GeneratorAlternativesPerOption kAlternativesPerOptions{
2869 kMvpAlternatives, kSimdAlternatives, kWasmGCAlternatives};
2870
2871 GenerateOneOf(kAlternativesPerOptions, data);
2872 }
2873
2874 void GenerateF64(DataRange* data) {
2875 GeneratorRecursionScope rec_scope(this);
2876 if (recursion_limit_reached() || data->size() <= sizeof(double)) {
2877 builder_->EmitF64Const(data->getPseudoRandom<double>());
2878 return;
2879 }
2880
2881 static constexpr auto kMvpAlternatives = CreateArray(
2882 &BodyGen::sequence<kF64, kVoid>, &BodyGen::sequence<kVoid, kF64>,
2883 &BodyGen::sequence<kVoid, kF64, kVoid>,
2884
2885 &BodyGen::op<kExprF64Abs, kF64>, //
2886 &BodyGen::op<kExprF64Neg, kF64>, //
2887 &BodyGen::op<kExprF64Ceil, kF64>, //
2888 &BodyGen::op<kExprF64Floor, kF64>, //
2889 &BodyGen::op<kExprF64Trunc, kF64>, //
2890 &BodyGen::op<kExprF64NearestInt, kF64>, //
2891 &BodyGen::op<kExprF64Sqrt, kF64>, //
2892 &BodyGen::op<kExprF64Add, kF64, kF64>, //
2893 &BodyGen::op<kExprF64Sub, kF64, kF64>, //
2894 &BodyGen::op<kExprF64Mul, kF64, kF64>, //
2895 &BodyGen::op<kExprF64Div, kF64, kF64>, //
2896 &BodyGen::op<kExprF64Min, kF64, kF64>, //
2897 &BodyGen::op<kExprF64Max, kF64, kF64>, //
2898 &BodyGen::op<kExprF64CopySign, kF64, kF64>, //
2899
2900 &BodyGen::op<kExprF64SConvertI32, kI32>,
2901 &BodyGen::op<kExprF64UConvertI32, kI32>,
2902 &BodyGen::op<kExprF64SConvertI64, kI64>,
2903 &BodyGen::op<kExprF64UConvertI64, kI64>,
2904 &BodyGen::op<kExprF64ConvertF32, kF32>,
2905 &BodyGen::op<kExprF64ReinterpretI64, kI64>,
2906
2907 &BodyGen::block<kF64>, //
2908 &BodyGen::loop<kF64>, //
2909 &BodyGen::finite_loop<kF64>, //
2910 &BodyGen::if_<kF64, kIfElse>, //
2911 &BodyGen::br_if<kF64>, //
2912 &BodyGen::br_on_null<kF64>, //
2913 &BodyGen::br_on_non_null<kF64>, //
2914 &BodyGen::br_table<kF64>, //
2915 &BodyGen::try_table_block<kF64>, //
2916
2917 &BodyGen::memop<kExprF64LoadMem>,
2918
2919 &BodyGen::get_local<kF64>, //
2920 &BodyGen::tee_local<kF64>, //
2921 &BodyGen::get_global<kF64>, //
2922 &BodyGen::op<kExprSelect, kF64, kF64, kI32>, //
2923 &BodyGen::select_with_type<kF64>, //
2924
2925 &BodyGen::call<kF64>, //
2926 &BodyGen::call_indirect<kF64>, //
2927 &BodyGen::call_ref<kF64>, //
2928 &BodyGen::try_block<kF64>);
2929
2930 static constexpr auto kSimdAlternatives =
2931 CreateArray(&BodyGen::simd_lane_op<kExprF64x2ExtractLane, 2, kS128>);
2932
2933 static constexpr auto kWasmGCAlternatives =
2934 CreateArray(&BodyGen::struct_get<kF64>, //
2935 &BodyGen::array_get<kF64>);
2936
2937 static constexpr GeneratorAlternativesPerOption kAlternativesPerOptions{
2938 kMvpAlternatives, kSimdAlternatives, kWasmGCAlternatives};
2939
2940 GenerateOneOf(kAlternativesPerOptions, data);
2941 }
2942
2943 void GenerateS128(DataRange* data) {
2944 CHECK(options_.generate_simd());
2945 GeneratorRecursionScope rec_scope(this);
2946 if (recursion_limit_reached() || data->size() <= sizeof(int32_t)) {
2947 // TODO(v8:8460): v128.const is not implemented yet, and we need a way to
2948 // "bottom-out", so use a splat to generate this.
2949 builder_->EmitI32Const(0);
2950 builder_->EmitWithPrefix(kExprI8x16Splat);
2951 return;
2952 }
2953
2954 constexpr auto alternatives = CreateArray(
2955 &BodyGen::simd_const,
2956 &BodyGen::simd_lane_op<kExprI8x16ReplaceLane, 16, kS128, kI32>,
2957 &BodyGen::simd_lane_op<kExprI16x8ReplaceLane, 8, kS128, kI32>,
2958 &BodyGen::simd_lane_op<kExprI32x4ReplaceLane, 4, kS128, kI32>,
2959 &BodyGen::simd_lane_op<kExprI64x2ReplaceLane, 2, kS128, kI64>,
2960 &BodyGen::simd_lane_op<kExprF32x4ReplaceLane, 4, kS128, kF32>,
2961 &BodyGen::simd_lane_op<kExprF64x2ReplaceLane, 2, kS128, kF64>,
2962
2963 &BodyGen::op_with_prefix<kExprI8x16Splat, kI32>,
2964 &BodyGen::op_with_prefix<kExprI8x16Eq, kS128, kS128>,
2965 &BodyGen::op_with_prefix<kExprI8x16Ne, kS128, kS128>,
2966 &BodyGen::op_with_prefix<kExprI8x16LtS, kS128, kS128>,
2967 &BodyGen::op_with_prefix<kExprI8x16LtU, kS128, kS128>,
2968 &BodyGen::op_with_prefix<kExprI8x16GtS, kS128, kS128>,
2969 &BodyGen::op_with_prefix<kExprI8x16GtU, kS128, kS128>,
2970 &BodyGen::op_with_prefix<kExprI8x16LeS, kS128, kS128>,
2971 &BodyGen::op_with_prefix<kExprI8x16LeU, kS128, kS128>,
2972 &BodyGen::op_with_prefix<kExprI8x16GeS, kS128, kS128>,
2973 &BodyGen::op_with_prefix<kExprI8x16GeU, kS128, kS128>,
2974 &BodyGen::op_with_prefix<kExprI8x16Abs, kS128>,
2975 &BodyGen::op_with_prefix<kExprI8x16Neg, kS128>,
2976 &BodyGen::op_with_prefix<kExprI8x16Shl, kS128, kI32>,
2977 &BodyGen::op_with_prefix<kExprI8x16ShrS, kS128, kI32>,
2978 &BodyGen::op_with_prefix<kExprI8x16ShrU, kS128, kI32>,
2979 &BodyGen::op_with_prefix<kExprI8x16Add, kS128, kS128>,
2980 &BodyGen::op_with_prefix<kExprI8x16AddSatS, kS128, kS128>,
2981 &BodyGen::op_with_prefix<kExprI8x16AddSatU, kS128, kS128>,
2982 &BodyGen::op_with_prefix<kExprI8x16Sub, kS128, kS128>,
2983 &BodyGen::op_with_prefix<kExprI8x16SubSatS, kS128, kS128>,
2984 &BodyGen::op_with_prefix<kExprI8x16SubSatU, kS128, kS128>,
2985 &BodyGen::op_with_prefix<kExprI8x16MinS, kS128, kS128>,
2986 &BodyGen::op_with_prefix<kExprI8x16MinU, kS128, kS128>,
2987 &BodyGen::op_with_prefix<kExprI8x16MaxS, kS128, kS128>,
2988 &BodyGen::op_with_prefix<kExprI8x16MaxU, kS128, kS128>,
2989 &BodyGen::op_with_prefix<kExprI8x16RoundingAverageU, kS128, kS128>,
2990 &BodyGen::op_with_prefix<kExprI8x16Popcnt, kS128>,
2991
2992 &BodyGen::op_with_prefix<kExprI16x8Splat, kI32>,
2993 &BodyGen::op_with_prefix<kExprI16x8Eq, kS128, kS128>,
2994 &BodyGen::op_with_prefix<kExprI16x8Ne, kS128, kS128>,
2995 &BodyGen::op_with_prefix<kExprI16x8LtS, kS128, kS128>,
2996 &BodyGen::op_with_prefix<kExprI16x8LtU, kS128, kS128>,
2997 &BodyGen::op_with_prefix<kExprI16x8GtS, kS128, kS128>,
2998 &BodyGen::op_with_prefix<kExprI16x8GtU, kS128, kS128>,
2999 &BodyGen::op_with_prefix<kExprI16x8LeS, kS128, kS128>,
3000 &BodyGen::op_with_prefix<kExprI16x8LeU, kS128, kS128>,
3001 &BodyGen::op_with_prefix<kExprI16x8GeS, kS128, kS128>,
3002 &BodyGen::op_with_prefix<kExprI16x8GeU, kS128, kS128>,
3003 &BodyGen::op_with_prefix<kExprI16x8Abs, kS128>,
3004 &BodyGen::op_with_prefix<kExprI16x8Neg, kS128>,
3005 &BodyGen::op_with_prefix<kExprI16x8Shl, kS128, kI32>,
3006 &BodyGen::op_with_prefix<kExprI16x8ShrS, kS128, kI32>,
3007 &BodyGen::op_with_prefix<kExprI16x8ShrU, kS128, kI32>,
3008 &BodyGen::op_with_prefix<kExprI16x8Add, kS128, kS128>,
3009 &BodyGen::op_with_prefix<kExprI16x8AddSatS, kS128, kS128>,
3010 &BodyGen::op_with_prefix<kExprI16x8AddSatU, kS128, kS128>,
3011 &BodyGen::op_with_prefix<kExprI16x8Sub, kS128, kS128>,
3012 &BodyGen::op_with_prefix<kExprI16x8SubSatS, kS128, kS128>,
3013 &BodyGen::op_with_prefix<kExprI16x8SubSatU, kS128, kS128>,
3014 &BodyGen::op_with_prefix<kExprI16x8Mul, kS128, kS128>,
3015 &BodyGen::op_with_prefix<kExprI16x8MinS, kS128, kS128>,
3016 &BodyGen::op_with_prefix<kExprI16x8MinU, kS128, kS128>,
3017 &BodyGen::op_with_prefix<kExprI16x8MaxS, kS128, kS128>,
3018 &BodyGen::op_with_prefix<kExprI16x8MaxU, kS128, kS128>,
3019 &BodyGen::op_with_prefix<kExprI16x8RoundingAverageU, kS128, kS128>,
3020 &BodyGen::op_with_prefix<kExprI16x8ExtMulLowI8x16S, kS128, kS128>,
3021 &BodyGen::op_with_prefix<kExprI16x8ExtMulLowI8x16U, kS128, kS128>,
3022 &BodyGen::op_with_prefix<kExprI16x8ExtMulHighI8x16S, kS128, kS128>,
3023 &BodyGen::op_with_prefix<kExprI16x8ExtMulHighI8x16U, kS128, kS128>,
3024 &BodyGen::op_with_prefix<kExprI16x8Q15MulRSatS, kS128, kS128>,
3025 &BodyGen::op_with_prefix<kExprI16x8ExtAddPairwiseI8x16S, kS128>,
3026 &BodyGen::op_with_prefix<kExprI16x8ExtAddPairwiseI8x16U, kS128>,
3027
3028 &BodyGen::op_with_prefix<kExprI32x4Splat, kI32>,
3029 &BodyGen::op_with_prefix<kExprI32x4Eq, kS128, kS128>,
3030 &BodyGen::op_with_prefix<kExprI32x4Ne, kS128, kS128>,
3031 &BodyGen::op_with_prefix<kExprI32x4LtS, kS128, kS128>,
3032 &BodyGen::op_with_prefix<kExprI32x4LtU, kS128, kS128>,
3033 &BodyGen::op_with_prefix<kExprI32x4GtS, kS128, kS128>,
3034 &BodyGen::op_with_prefix<kExprI32x4GtU, kS128, kS128>,
3035 &BodyGen::op_with_prefix<kExprI32x4LeS, kS128, kS128>,
3036 &BodyGen::op_with_prefix<kExprI32x4LeU, kS128, kS128>,
3037 &BodyGen::op_with_prefix<kExprI32x4GeS, kS128, kS128>,
3038 &BodyGen::op_with_prefix<kExprI32x4GeU, kS128, kS128>,
3039 &BodyGen::op_with_prefix<kExprI32x4Abs, kS128>,
3040 &BodyGen::op_with_prefix<kExprI32x4Neg, kS128>,
3041 &BodyGen::op_with_prefix<kExprI32x4Shl, kS128, kI32>,
3042 &BodyGen::op_with_prefix<kExprI32x4ShrS, kS128, kI32>,
3043 &BodyGen::op_with_prefix<kExprI32x4ShrU, kS128, kI32>,
3044 &BodyGen::op_with_prefix<kExprI32x4Add, kS128, kS128>,
3045 &BodyGen::op_with_prefix<kExprI32x4Sub, kS128, kS128>,
3046 &BodyGen::op_with_prefix<kExprI32x4Mul, kS128, kS128>,
3047 &BodyGen::op_with_prefix<kExprI32x4MinS, kS128, kS128>,
3048 &BodyGen::op_with_prefix<kExprI32x4MinU, kS128, kS128>,
3049 &BodyGen::op_with_prefix<kExprI32x4MaxS, kS128, kS128>,
3050 &BodyGen::op_with_prefix<kExprI32x4MaxU, kS128, kS128>,
3051 &BodyGen::op_with_prefix<kExprI32x4DotI16x8S, kS128, kS128>,
3052 &BodyGen::op_with_prefix<kExprI32x4ExtMulLowI16x8S, kS128, kS128>,
3053 &BodyGen::op_with_prefix<kExprI32x4ExtMulLowI16x8U, kS128, kS128>,
3054 &BodyGen::op_with_prefix<kExprI32x4ExtMulHighI16x8S, kS128, kS128>,
3055 &BodyGen::op_with_prefix<kExprI32x4ExtMulHighI16x8U, kS128, kS128>,
3056 &BodyGen::op_with_prefix<kExprI32x4ExtAddPairwiseI16x8S, kS128>,
3057 &BodyGen::op_with_prefix<kExprI32x4ExtAddPairwiseI16x8U, kS128>,
3058
3059 &BodyGen::op_with_prefix<kExprI64x2Splat, kI64>,
3060 &BodyGen::op_with_prefix<kExprI64x2Eq, kS128, kS128>,
3061 &BodyGen::op_with_prefix<kExprI64x2Ne, kS128, kS128>,
3062 &BodyGen::op_with_prefix<kExprI64x2LtS, kS128, kS128>,
3063 &BodyGen::op_with_prefix<kExprI64x2GtS, kS128, kS128>,
3064 &BodyGen::op_with_prefix<kExprI64x2LeS, kS128, kS128>,
3065 &BodyGen::op_with_prefix<kExprI64x2GeS, kS128, kS128>,
3066 &BodyGen::op_with_prefix<kExprI64x2Abs, kS128>,
3067 &BodyGen::op_with_prefix<kExprI64x2Neg, kS128>,
3068 &BodyGen::op_with_prefix<kExprI64x2Shl, kS128, kI32>,
3069 &BodyGen::op_with_prefix<kExprI64x2ShrS, kS128, kI32>,
3070 &BodyGen::op_with_prefix<kExprI64x2ShrU, kS128, kI32>,
3071 &BodyGen::op_with_prefix<kExprI64x2Add, kS128, kS128>,
3072 &BodyGen::op_with_prefix<kExprI64x2Sub, kS128, kS128>,
3073 &BodyGen::op_with_prefix<kExprI64x2Mul, kS128, kS128>,
3074 &BodyGen::op_with_prefix<kExprI64x2ExtMulLowI32x4S, kS128, kS128>,
3075 &BodyGen::op_with_prefix<kExprI64x2ExtMulLowI32x4U, kS128, kS128>,
3076 &BodyGen::op_with_prefix<kExprI64x2ExtMulHighI32x4S, kS128, kS128>,
3077 &BodyGen::op_with_prefix<kExprI64x2ExtMulHighI32x4U, kS128, kS128>,
3078
3079 &BodyGen::op_with_prefix<kExprF32x4Splat, kF32>,
3080 &BodyGen::op_with_prefix<kExprF32x4Eq, kS128, kS128>,
3081 &BodyGen::op_with_prefix<kExprF32x4Ne, kS128, kS128>,
3082 &BodyGen::op_with_prefix<kExprF32x4Lt, kS128, kS128>,
3083 &BodyGen::op_with_prefix<kExprF32x4Gt, kS128, kS128>,
3084 &BodyGen::op_with_prefix<kExprF32x4Le, kS128, kS128>,
3085 &BodyGen::op_with_prefix<kExprF32x4Ge, kS128, kS128>,
3086 &BodyGen::op_with_prefix<kExprF32x4Abs, kS128>,
3087 &BodyGen::op_with_prefix<kExprF32x4Neg, kS128>,
3088 &BodyGen::op_with_prefix<kExprF32x4Sqrt, kS128>,
3089 &BodyGen::op_with_prefix<kExprF32x4Add, kS128, kS128>,
3090 &BodyGen::op_with_prefix<kExprF32x4Sub, kS128, kS128>,
3091 &BodyGen::op_with_prefix<kExprF32x4Mul, kS128, kS128>,
3092 &BodyGen::op_with_prefix<kExprF32x4Div, kS128, kS128>,
3093 &BodyGen::op_with_prefix<kExprF32x4Min, kS128, kS128>,
3094 &BodyGen::op_with_prefix<kExprF32x4Max, kS128, kS128>,
3095 &BodyGen::op_with_prefix<kExprF32x4Pmin, kS128, kS128>,
3096 &BodyGen::op_with_prefix<kExprF32x4Pmax, kS128, kS128>,
3097 &BodyGen::op_with_prefix<kExprF32x4Ceil, kS128>,
3098 &BodyGen::op_with_prefix<kExprF32x4Floor, kS128>,
3099 &BodyGen::op_with_prefix<kExprF32x4Trunc, kS128>,
3100 &BodyGen::op_with_prefix<kExprF32x4NearestInt, kS128>,
3101
3102 &BodyGen::op_with_prefix<kExprF64x2Splat, kF64>,
3103 &BodyGen::op_with_prefix<kExprF64x2Eq, kS128, kS128>,
3104 &BodyGen::op_with_prefix<kExprF64x2Ne, kS128, kS128>,
3105 &BodyGen::op_with_prefix<kExprF64x2Lt, kS128, kS128>,
3106 &BodyGen::op_with_prefix<kExprF64x2Gt, kS128, kS128>,
3107 &BodyGen::op_with_prefix<kExprF64x2Le, kS128, kS128>,
3108 &BodyGen::op_with_prefix<kExprF64x2Ge, kS128, kS128>,
3109 &BodyGen::op_with_prefix<kExprF64x2Abs, kS128>,
3110 &BodyGen::op_with_prefix<kExprF64x2Neg, kS128>,
3111 &BodyGen::op_with_prefix<kExprF64x2Sqrt, kS128>,
3112 &BodyGen::op_with_prefix<kExprF64x2Add, kS128, kS128>,
3113 &BodyGen::op_with_prefix<kExprF64x2Sub, kS128, kS128>,
3114 &BodyGen::op_with_prefix<kExprF64x2Mul, kS128, kS128>,
3115 &BodyGen::op_with_prefix<kExprF64x2Div, kS128, kS128>,
3116 &BodyGen::op_with_prefix<kExprF64x2Min, kS128, kS128>,
3117 &BodyGen::op_with_prefix<kExprF64x2Max, kS128, kS128>,
3118 &BodyGen::op_with_prefix<kExprF64x2Pmin, kS128, kS128>,
3119 &BodyGen::op_with_prefix<kExprF64x2Pmax, kS128, kS128>,
3120 &BodyGen::op_with_prefix<kExprF64x2Ceil, kS128>,
3121 &BodyGen::op_with_prefix<kExprF64x2Floor, kS128>,
3122 &BodyGen::op_with_prefix<kExprF64x2Trunc, kS128>,
3123 &BodyGen::op_with_prefix<kExprF64x2NearestInt, kS128>,
3124
3125 &BodyGen::op_with_prefix<kExprF64x2PromoteLowF32x4, kS128>,
3126 &BodyGen::op_with_prefix<kExprF64x2ConvertLowI32x4S, kS128>,
3127 &BodyGen::op_with_prefix<kExprF64x2ConvertLowI32x4U, kS128>,
3128 &BodyGen::op_with_prefix<kExprF32x4DemoteF64x2Zero, kS128>,
3129 &BodyGen::op_with_prefix<kExprI32x4TruncSatF64x2SZero, kS128>,
3130 &BodyGen::op_with_prefix<kExprI32x4TruncSatF64x2UZero, kS128>,
3131
3132 &BodyGen::op_with_prefix<kExprI64x2SConvertI32x4Low, kS128>,
3133 &BodyGen::op_with_prefix<kExprI64x2SConvertI32x4High, kS128>,
3134 &BodyGen::op_with_prefix<kExprI64x2UConvertI32x4Low, kS128>,
3135 &BodyGen::op_with_prefix<kExprI64x2UConvertI32x4High, kS128>,
3136
3137 &BodyGen::op_with_prefix<kExprI32x4SConvertF32x4, kS128>,
3138 &BodyGen::op_with_prefix<kExprI32x4UConvertF32x4, kS128>,
3139 &BodyGen::op_with_prefix<kExprF32x4SConvertI32x4, kS128>,
3140 &BodyGen::op_with_prefix<kExprF32x4UConvertI32x4, kS128>,
3141
3142 &BodyGen::op_with_prefix<kExprI8x16SConvertI16x8, kS128, kS128>,
3143 &BodyGen::op_with_prefix<kExprI8x16UConvertI16x8, kS128, kS128>,
3144 &BodyGen::op_with_prefix<kExprI16x8SConvertI32x4, kS128, kS128>,
3145 &BodyGen::op_with_prefix<kExprI16x8UConvertI32x4, kS128, kS128>,
3146
3147 &BodyGen::op_with_prefix<kExprI16x8SConvertI8x16Low, kS128>,
3148 &BodyGen::op_with_prefix<kExprI16x8SConvertI8x16High, kS128>,
3149 &BodyGen::op_with_prefix<kExprI16x8UConvertI8x16Low, kS128>,
3150 &BodyGen::op_with_prefix<kExprI16x8UConvertI8x16High, kS128>,
3151 &BodyGen::op_with_prefix<kExprI32x4SConvertI16x8Low, kS128>,
3152 &BodyGen::op_with_prefix<kExprI32x4SConvertI16x8High, kS128>,
3153 &BodyGen::op_with_prefix<kExprI32x4UConvertI16x8Low, kS128>,
3154 &BodyGen::op_with_prefix<kExprI32x4UConvertI16x8High, kS128>,
3155
3156 &BodyGen::op_with_prefix<kExprS128Not, kS128>,
3157 &BodyGen::op_with_prefix<kExprS128And, kS128, kS128>,
3158 &BodyGen::op_with_prefix<kExprS128AndNot, kS128, kS128>,
3159 &BodyGen::op_with_prefix<kExprS128Or, kS128, kS128>,
3160 &BodyGen::op_with_prefix<kExprS128Xor, kS128, kS128>,
3161 &BodyGen::op_with_prefix<kExprS128Select, kS128, kS128, kS128>,
3162
3163 &BodyGen::simd_shuffle,
3164 &BodyGen::op_with_prefix<kExprI8x16Swizzle, kS128, kS128>,
3165
3166 &BodyGen::memop<kExprS128LoadMem>, //
3167 &BodyGen::memop<kExprS128Load8x8S>, //
3168 &BodyGen::memop<kExprS128Load8x8U>, //
3169 &BodyGen::memop<kExprS128Load16x4S>, //
3170 &BodyGen::memop<kExprS128Load16x4U>, //
3171 &BodyGen::memop<kExprS128Load32x2S>, //
3172 &BodyGen::memop<kExprS128Load32x2U>, //
3173 &BodyGen::memop<kExprS128Load8Splat>, //
3174 &BodyGen::memop<kExprS128Load16Splat>, //
3175 &BodyGen::memop<kExprS128Load32Splat>, //
3176 &BodyGen::memop<kExprS128Load64Splat>, //
3177 &BodyGen::memop<kExprS128Load32Zero>, //
3178 &BodyGen::memop<kExprS128Load64Zero>, //
3179 &BodyGen::simd_lane_memop<kExprS128Load8Lane, 16, kS128>, //
3180 &BodyGen::simd_lane_memop<kExprS128Load16Lane, 8, kS128>, //
3181 &BodyGen::simd_lane_memop<kExprS128Load32Lane, 4, kS128>, //
3182 &BodyGen::simd_lane_memop<kExprS128Load64Lane, 2, kS128>, //
3183
3184 &BodyGen::op_with_prefix<kExprI8x16RelaxedSwizzle, kS128, kS128>,
3185 &BodyGen::op_with_prefix<kExprI8x16RelaxedLaneSelect, kS128, kS128,
3186 kS128>,
3187 &BodyGen::op_with_prefix<kExprI16x8RelaxedLaneSelect, kS128, kS128,
3188 kS128>,
3189 &BodyGen::op_with_prefix<kExprI32x4RelaxedLaneSelect, kS128, kS128,
3190 kS128>,
3191 &BodyGen::op_with_prefix<kExprI64x2RelaxedLaneSelect, kS128, kS128,
3192 kS128>,
3193 &BodyGen::op_with_prefix<kExprF32x4Qfma, kS128, kS128, kS128>,
3194 &BodyGen::op_with_prefix<kExprF32x4Qfms, kS128, kS128, kS128>,
3195 &BodyGen::op_with_prefix<kExprF64x2Qfma, kS128, kS128, kS128>,
3196 &BodyGen::op_with_prefix<kExprF64x2Qfms, kS128, kS128, kS128>,
3197 &BodyGen::op_with_prefix<kExprF32x4RelaxedMin, kS128, kS128>,
3198 &BodyGen::op_with_prefix<kExprF32x4RelaxedMax, kS128, kS128>,
3199 &BodyGen::op_with_prefix<kExprF64x2RelaxedMin, kS128, kS128>,
3200 &BodyGen::op_with_prefix<kExprF64x2RelaxedMax, kS128, kS128>,
3201 &BodyGen::op_with_prefix<kExprI32x4RelaxedTruncF32x4S, kS128>,
3202 &BodyGen::op_with_prefix<kExprI32x4RelaxedTruncF32x4U, kS128>,
3203 &BodyGen::op_with_prefix<kExprI32x4RelaxedTruncF64x2SZero, kS128>,
3204 &BodyGen::op_with_prefix<kExprI32x4RelaxedTruncF64x2UZero, kS128>,
3205 &BodyGen::op_with_prefix<kExprI16x8DotI8x16I7x16S, kS128, kS128>,
3206 &BodyGen::op_with_prefix<kExprI32x4DotI8x16I7x16AddS, kS128, kS128,
3207 kS128>);
3208
3209 GenerateOneOf(alternatives, data);
3210 }
3211
3212 void Generate(ValueType type, DataRange* data) {
3213 switch (type.kind()) {
3214 case kVoid:
3215 return GenerateVoid(data);
3216 case kI32:
3217 return GenerateI32(data);
3218 case kI64:
3219 return GenerateI64(data);
3220 case kF32:
3221 return GenerateF32(data);
3222 case kF64:
3223 return GenerateF64(data);
3224 case kS128:
3225 return GenerateS128(data);
3226 case kRefNull:
3227 return GenerateRef(type.heap_type(), data, kNullable);
3228 case kRef:
3229 return GenerateRef(type.heap_type(), data, kNonNullable);
3230 default:
3231 UNREACHABLE();
3232 }
3233 }
3234
3235 template <ValueKind kind>
3236 constexpr void Generate(DataRange* data) {
3237 switch (kind) {
3238 case kVoid:
3239 return GenerateVoid(data);
3240 case kI32:
3241 return GenerateI32(data);
3242 case kI64:
3243 return GenerateI64(data);
3244 case kF32:
3245 return GenerateF32(data);
3246 case kF64:
3247 return GenerateF64(data);
3248 case kS128:
3249 return GenerateS128(data);
3250 default:
3251 // For kRefNull and kRef we need the HeapType which we can get from the
3252 // ValueType.
3253 UNREACHABLE();
3254 }
3255 }
3256
3257 template <ValueKind T1, ValueKind T2, ValueKind... Ts>
3258 void Generate(DataRange* data) {
3259 // TODO(clemensb): Implement a more even split.
3260 auto first_data = data->split();
3261 Generate<T1>(&first_data);
3262 Generate<T2, Ts...>(data);
3263 }
3264
3265 void GenerateRef(HeapType type, DataRange* data,
3266 Nullability nullability = kNullable) {
3267 std::optional<GeneratorRecursionScope> rec_scope;
3268 if (nullability) {
3269 rec_scope.emplace(this);
3270 }
3271
3272 if (recursion_limit_reached() || data->size() == 0) {
3273 if (nullability == kNullable) {
3274 ref_null(type, data);
3275 return;
3276 }
3277 // It is ok not to return here because the non-nullable types are not
3278 // recursive by construction, so the depth is limited already.
3279 }
3280
3281 constexpr auto alternatives_indexed_type =
3282 CreateArray(&BodyGen::new_object, //
3283 &BodyGen::get_local_ref, //
3284 &BodyGen::array_get_ref, //
3285 &BodyGen::struct_get_ref, //
3286 &BodyGen::ref_cast, //
3287 &BodyGen::ref_as_non_null, //
3288 &BodyGen::br_on_cast); //
3289
3290 constexpr auto alternatives_func_any =
3291 CreateArray(&BodyGen::table_get, //
3292 &BodyGen::get_local_ref, //
3293 &BodyGen::array_get_ref, //
3294 &BodyGen::struct_get_ref, //
3295 &BodyGen::ref_cast, //
3296 &BodyGen::any_convert_extern, //
3297 &BodyGen::ref_as_non_null, //
3298 &BodyGen::br_on_cast); //
3299
3300 constexpr auto alternatives_other =
3301 CreateArray(&BodyGen::array_get_ref, //
3302 &BodyGen::get_local_ref, //
3303 &BodyGen::struct_get_ref, //
3304 &BodyGen::ref_cast, //
3305 &BodyGen::ref_as_non_null, //
3306 &BodyGen::br_on_cast); //
3307
3308 switch (type.representation()) {
3309 // For abstract types, sometimes generate one of their subtypes.
3310 case HeapType::kAny: {
3311 // Weighted according to the types in the module:
3312 // If there are D data types and F function types, the relative
3313 // frequencies for dataref is D, for funcref F, and for i31ref and
3314 // falling back to anyref 2.
3315 const uint8_t num_data_types =
3316 static_cast<uint8_t>(structs_.size() + arrays_.size());
3317 const uint8_t emit_i31ref = 2;
3318 const uint8_t fallback_to_anyref = 2;
3319 uint8_t random = data->get<uint8_t>() %
3320 (num_data_types + emit_i31ref + fallback_to_anyref);
3321 // We have to compute this first so in case GenerateOneOf fails
3322 // we will continue to fall back on an alternative that is guaranteed
3323 // to generate a value of the wanted type.
3324 // In order to know which alternative to fall back to in case
3325 // GenerateOneOf failed, the random variable is recomputed.
3326 if (random >= num_data_types + emit_i31ref) {
3327 if (GenerateOneOf(alternatives_func_any, type, data, nullability)) {
3328 return;
3329 }
3330 random = data->get<uint8_t>() % (num_data_types + emit_i31ref);
3331 }
3332 if (random < structs_.size()) {
3333 GenerateRef(kWasmStructRef, data, nullability);
3334 } else if (random < num_data_types) {
3335 GenerateRef(kWasmArrayRef, data, nullability);
3336 } else {
3337 GenerateRef(kWasmI31Ref, data, nullability);
3338 }
3339 return;
3340 }
3341 case HeapType::kArray: {
3342 constexpr uint8_t fallback_to_dataref = 1;
3343 uint8_t random =
3344 data->get<uint8_t>() % (arrays_.size() + fallback_to_dataref);
3345 // Try generating one of the alternatives and continue to the rest of
3346 // the methods in case it fails.
3347 if (random >= arrays_.size()) {
3348 if (GenerateOneOf(alternatives_other, type, data, nullability))
3349 return;
3350 random = data->get<uint8_t>() % arrays_.size();
3351 }
3352 ModuleTypeIndex index = arrays_[random];
3353 DCHECK(builder_->builder()->IsArrayType(index));
3354 GenerateRef(HeapType::Index(index, kNotShared, RefTypeKind::kArray),
3355 data, nullability);
3356 return;
3357 }
3358 case HeapType::kStruct: {
3359 constexpr uint8_t fallback_to_dataref = 2;
3360 uint8_t random =
3361 data->get<uint8_t>() % (structs_.size() + fallback_to_dataref);
3362 // Try generating one of the alternatives
3363 // and continue to the rest of the methods in case it fails.
3364 if (random >= structs_.size()) {
3365 if (GenerateOneOf(alternatives_other, type, data, nullability)) {
3366 return;
3367 }
3368 random = data->get<uint8_t>() % structs_.size();
3369 }
3370 ModuleTypeIndex index = structs_[random];
3371 DCHECK(builder_->builder()->IsStructType(index));
3372 GenerateRef(HeapType::Index(index, kNotShared, RefTypeKind::kStruct),
3373 data, nullability);
3374 return;
3375 }
3376 case HeapType::kEq: {
3377 const uint8_t num_types = arrays_.size() + structs_.size();
3378 const uint8_t emit_i31ref = 2;
3379 constexpr uint8_t fallback_to_eqref = 1;
3380 uint8_t random = data->get<uint8_t>() %
3381 (num_types + emit_i31ref + fallback_to_eqref);
3382 // Try generating one of the alternatives
3383 // and continue to the rest of the methods in case it fails.
3384 if (random >= num_types + emit_i31ref) {
3385 if (GenerateOneOf(alternatives_other, type, data, nullability)) {
3386 return;
3387 }
3388 random = data->get<uint8_t>() % (num_types + emit_i31ref);
3389 }
3390 if (random < num_types) {
3391 // Using `HeapType(random)` here relies on the assumption that struct
3392 // and array types come before signatures.
3393 RefTypeKind kind = RefTypeKind::kOther;
3394 if (builder_->builder()->IsArrayType(random)) {
3395 kind = RefTypeKind::kArray;
3396 } else if (builder_->builder()->IsStructType(random)) {
3397 kind = RefTypeKind::kStruct;
3398 } else {
3399 UNREACHABLE();
3400 }
3401 GenerateRef(
3402 HeapType::Index(ModuleTypeIndex{random}, kNotShared, kind), data,
3403 nullability);
3404 } else {
3405 GenerateRef(kWasmI31Ref, data, nullability);
3406 }
3407 return;
3408 }
3409 case HeapType::kFunc: {
3410 uint32_t random = data->get<uint8_t>() % (functions_.size() + 1);
3412 // and continue to the rest of the methods in case it fails.
3413 if (random >= functions_.size()) {
3414 if (GenerateOneOf(alternatives_func_any, type, data, nullability)) {
3415 return;
3416 }
3417 random = data->get<uint8_t>() % functions_.size();
3418 }
3419 ModuleTypeIndex signature_index = functions_[random];
3420 DCHECK(builder_->builder()->IsSignature(signature_index));
3421 GenerateRef(HeapType::Index(signature_index, kNotShared,
3422 RefTypeKind::kFunction),
3423 data, nullability);
3424 return;
3425 }
3426 case HeapType::kI31: {
3427 // Try generating one of the alternatives
3428 // and continue to the rest of the methods in case it fails.
3429 if (data->get<bool>() &&
3430 GenerateOneOf(alternatives_other, type, data, nullability)) {
3431 return;
3432 }
3433 Generate(kWasmI32, data);
3434 builder_->EmitWithPrefix(kExprRefI31);
3435 return;
3436 }
3437 case HeapType::kExn: {
3438 // TODO(manoskouk): Can we somehow come up with a nontrivial exnref?
3439 ref_null(type, data);
3440 if (nullability == kNonNullable) {
3441 builder_->Emit(kExprRefAsNonNull);
3442 }
3443 return;
3444 }
3445 case HeapType::kExtern: {
3446 uint8_t choice = data->get<uint8_t>();
3447 if (choice < 25) {
3448 // ~10% chance of extern.convert_any.
3449 GenerateRef(kWasmAnyRef, data);
3450 builder_->EmitWithPrefix(kExprExternConvertAny);
3451 if (nullability == kNonNullable) {
3452 builder_->Emit(kExprRefAsNonNull);
3453 }
3454 return;
3455 }
3456 // ~80% chance of string.
3457 if (choice < 230 && options_.generate_wasm_gc()) {
3458 uint8_t subchoice = choice % 7;
3459 switch (subchoice) {
3460 case 0:
3461 return string_cast(data);
3462 case 1:
3463 return string_fromcharcode(data);
3464 case 2:
3465 return string_fromcodepoint(data);
3466 case 3:
3467 return string_concat(data);
3468 case 4:
3469 return string_substring(data);
3470 case 5:
3471 return string_fromcharcodearray(data);
3472 case 6:
3473 return string_fromutf8array(data);
3474 }
3475 }
3476 // ~10% chance of fallthrough.
3477 [[fallthrough]];
3478 }
3479 case HeapType::kNoExtern:
3480 case HeapType::kNoFunc:
3481 case HeapType::kNone:
3482 case HeapType::kNoExn:
3483 ref_null(type, data);
3484 if (nullability == kNonNullable) {
3485 builder_->Emit(kExprRefAsNonNull);
3486 }
3487 return;
3488 default:
3489 // Indexed type (i.e. user-defined type).
3490 DCHECK(type.is_index());
3491 if (options_.generate_wasm_gc() &&
3492 type.ref_index() == string_imports_.array_i8 &&
3493 data->get<uint8_t>() < 32) {
3494 // 1/8th chance, fits the number of remaining alternatives (7) well.
3495 return string_toutf8array(data);
3496 }
3497 GenerateOneOf(alternatives_indexed_type, type, data, nullability);
3498 return;
3499 }
3500 UNREACHABLE();
3501 }
3502
3503 void GenerateRef(DataRange* data) {
3504 constexpr HeapType top_types[] = {
3508 };
3509 HeapType type = top_types[data->get<uint8_t>() % arraysize(top_types)];
3510 GenerateRef(type, data);
3511 }
3512
3513 std::vector<ValueType> GenerateTypes(DataRange* data) {
3514 return fuzzing::GenerateTypes(
3515 options_, data,
3516 static_cast<uint32_t>(functions_.size() + structs_.size() +
3517 arrays_.size()));
3518 }
3519
3520 void Generate(base::Vector<const ValueType> types, DataRange* data) {
3521 // Maybe emit a multi-value block with the expected return type. Use a
3522 // non-default value to indicate block generation to avoid recursion when we
3523 // reach the end of the data.
3524 bool generate_block = data->get<uint8_t>() % 32 == 1;
3525 if (generate_block) {
3526 GeneratorRecursionScope rec_scope(this);
3527 if (!recursion_limit_reached()) {
3528 const auto param_types = GenerateTypes(data);
3529 Generate(base::VectorOf(param_types), data);
3530 any_block(base::VectorOf(param_types), types, data);
3531 return;
3532 }
3533 }
3534
3535 if (types.size() == 0) {
3536 Generate(kWasmVoid, data);
3537 return;
3538 }
3539 if (types.size() == 1) {
3540 Generate(types[0], data);
3541 return;
3542 }
3543
3544 // Split the types in two halves and recursively generate each half.
3545 // Each half is non empty to ensure termination.
3546 size_t split_index = data->get<uint8_t>() % (types.size() - 1) + 1;
3547 base::Vector<const ValueType> lower_half = types.SubVector(0, split_index);
3548 base::Vector<const ValueType> upper_half =
3549 types.SubVector(split_index, types.size());
3550 DataRange first_range = data->split();
3551 Generate(lower_half, &first_range);
3552 Generate(upper_half, data);
3553 }
3554 void Generate(std::initializer_list<ValueTypeBase> types, DataRange* data) {
3555 base::Vector<const ValueType> cast_types = base::VectorOf<const ValueType>(
3556 static_cast<const ValueType*>(types.begin()), types.size());
3557 return Generate(cast_types, data);
3558 }
3559
3560 void Consume(ValueType type) {
3561 // Try to store the value in a local if there is a local with the same
3562 // type. TODO(14034): For reference types a local with a super type
3563 // would also be fine.
3564 size_t num_params = builder_->signature()->parameter_count();
3565 for (uint32_t local_offset = 0; local_offset < locals_.size();
3566 ++local_offset) {
3567 if (locals_[local_offset] == type) {
3568 uint32_t local_index = static_cast<uint32_t>(local_offset + num_params);
3569 builder_->EmitWithU32V(kExprLocalSet, local_index);
3570 return;
3571 }
3572 }
3573 for (uint32_t param_index = 0; param_index < num_params; ++param_index) {
3574 if (builder_->signature()->GetParam(param_index) == type) {
3575 builder_->EmitWithU32V(kExprLocalSet, param_index);
3576 return;
3577 }
3578 }
3579 // No opportunity found to use the value, so just drop it.
3580 builder_->Emit(kExprDrop);
3581 }
3582
3583 // Emit code to match an arbitrary signature.
3584 // TODO(11954): Add the missing reference type conversion/upcasting.
3585 void ConsumeAndGenerate(base::Vector<const ValueType> param_types,
3586 base::Vector<const ValueType> return_types,
3587 DataRange* data) {
3588 // This numeric conversion logic consists of picking exactly one
3589 // index in the return values and dropping all the values that come
3590 // before that index. Then we convert the value from that index to the
3591 // wanted type. If we don't find any value we generate it.
3592 auto primitive = [](ValueType t) -> bool {
3593 switch (t.kind()) {
3594 case kI32:
3595 case kI64:
3596 case kF32:
3597 case kF64:
3598 return true;
3599 default:
3600 return false;
3601 }
3602 };
3603
3604 if (return_types.size() == 0 || param_types.size() == 0 ||
3605 !primitive(return_types[0])) {
3606 for (auto iter = param_types.rbegin(); iter != param_types.rend();
3607 ++iter) {
3608 Consume(*iter);
3609 }
3610 Generate(return_types, data);
3611 return;
3612 }
3613
3614 int bottom_primitives = 0;
3615
3616 while (static_cast<int>(param_types.size()) > bottom_primitives &&
3617 primitive(param_types[bottom_primitives])) {
3618 bottom_primitives++;
3619 }
3620 int return_index =
3621 bottom_primitives > 0 ? (data->get<uint8_t>() % bottom_primitives) : -1;
3622 for (int i = static_cast<int>(param_types.size() - 1); i > return_index;
3623 --i) {
3624 Consume(param_types[i]);
3625 }
3626 for (int i = return_index; i > 0; --i) {
3627 Convert(param_types[i], param_types[i - 1]);
3628 builder_->EmitI32Const(0);
3629 builder_->Emit(kExprSelect);
3630 }
3631 DCHECK(!return_types.empty());
3632 if (return_index >= 0) {
3633 Convert(param_types[0], return_types[0]);
3634 Generate(return_types + 1, data);
3635 } else {
3636 Generate(return_types, data);
3637 }
3638 }
3639
3640 void InitializeNonDefaultableLocals(DataRange* data) {
3641 for (uint32_t i = 0; i < locals_.size(); i++) {
3642 if (!locals_[i].is_defaultable()) {
3643 GenerateRef(locals_[i].heap_type(), data, kNonNullable);
3644 builder_->EmitWithU32V(
3645 kExprLocalSet, i + static_cast<uint32_t>(
3646 builder_->signature()->parameter_count()));
3647 }
3648 }
3649 locals_initialized_ = true;
3650 }
3651
3652 private:
3653 bool recursion_limit_reached() {
3654 return recursion_depth >= kMaxRecursionDepth;
3655 }
3656
3657 const WasmModuleGenerationOptions options_;
3658 WasmFunctionBuilder* const builder_;
3659 std::vector<std::vector<ValueType>> blocks_;
3660 const std::vector<ModuleTypeIndex>& functions_;
3661 std::vector<ValueType> locals_;
3662 std::vector<ValueType> globals_;
3663 std::vector<uint8_t> mutable_globals_; // indexes into {globals_}.
3664 uint32_t recursion_depth = 0;
3665 std::vector<int> catch_blocks_;
3666 const std::vector<ModuleTypeIndex>& structs_;
3667 const std::vector<ModuleTypeIndex>& arrays_;
3668 const StringImports& string_imports_;
3670};
3671
3672WasmInitExpr GenerateInitExpr(Zone* zone, DataRange& range,
3673 WasmModuleBuilder* builder, ValueType type,
3674 const std::vector<ModuleTypeIndex>& structs,
3675 const std::vector<ModuleTypeIndex>& arrays,
3676 uint32_t recursion_depth);
3677
3678class ModuleGen {
3679 public:
3680 explicit ModuleGen(Zone* zone, WasmModuleGenerationOptions options,
3681 WasmModuleBuilder* fn, DataRange* module_range,
3682 uint8_t num_functions, uint8_t num_structs,
3683 uint8_t num_arrays, uint8_t num_signatures)
3684 : zone_(zone),
3685 options_(options),
3686 builder_(fn),
3687 module_range_(module_range),
3688 num_functions_(num_functions),
3689 num_structs_(num_structs),
3690 num_arrays_(num_arrays),
3691 num_types_(num_signatures + num_structs + num_arrays) {}
3692
3693 // Generates and adds random number of memories.
3694 void GenerateRandomMemories() {
3695 int num_memories = 1 + (module_range_->get<uint8_t>() % kMaxMemories);
3696 for (int i = 0; i < num_memories; i++) {
3697 uint8_t random_byte = module_range_->get<uint8_t>();
3698 bool mem64 = random_byte & 1;
3699 bool has_maximum = random_byte & 2;
3700 static_assert(kV8MaxWasmMemory64Pages <= kMaxUInt32);
3701 uint32_t max_supported_pages =
3702 mem64 ? max_mem64_pages() : max_mem32_pages();
3703 uint32_t min_pages =
3704 module_range_->get<uint32_t>() % (max_supported_pages + 1);
3705 if (has_maximum) {
3706 uint32_t max_pages =
3707 std::max(min_pages, module_range_->get<uint32_t>() %
3708 (max_supported_pages + 1));
3709 if (mem64) {
3710 builder_->AddMemory64(min_pages, max_pages);
3711 } else {
3712 builder_->AddMemory(min_pages, max_pages);
3713 }
3714 } else {
3715 if (mem64) {
3716 builder_->AddMemory64(min_pages);
3717 } else {
3718 builder_->AddMemory(min_pages);
3719 }
3720 }
3721 }
3722 }
3723
3724 // Puts the types into random recursive groups.
3725 std::map<uint8_t, uint8_t> GenerateRandomRecursiveGroups(
3726 uint8_t kNumDefaultArrayTypes) {
3727 // (Type_index -> end of explicit rec group).
3728 std::map<uint8_t, uint8_t> explicit_rec_groups;
3729 uint8_t current_type_index = 0;
3730
3731 // The default array types are each in their own recgroup.
3732 for (uint8_t i = 0; i < kNumDefaultArrayTypes; i++) {
3733 explicit_rec_groups.emplace(current_type_index, current_type_index);
3734 builder_->AddRecursiveTypeGroup(current_type_index++, 1);
3735 }
3736
3737 while (current_type_index < num_types_) {
3738 // First, pick a random start for the next group. We allow it to be
3739 // beyond the end of types (i.e., we add no further recursive groups).
3740 uint8_t group_start = module_range_->get<uint8_t>() %
3741 (num_types_ - current_type_index + 1) +
3742 current_type_index;
3743 DCHECK_GE(group_start, current_type_index);
3744 current_type_index = group_start;
3745 if (group_start < num_types_) {
3746 // If we did not reach the end of the types, pick a random group size.
3747 uint8_t group_size =
3748 module_range_->get<uint8_t>() % (num_types_ - group_start) + 1;
3749 DCHECK_LE(group_start + group_size, num_types_);
3750 for (uint8_t i = group_start; i < group_start + group_size; i++) {
3751 explicit_rec_groups.emplace(i, group_start + group_size - 1);
3752 }
3753 builder_->AddRecursiveTypeGroup(group_start, group_size);
3754 current_type_index += group_size;
3755 }
3756 }
3757 return explicit_rec_groups;
3758 }
3759
3760 // Generates and adds random struct types.
3761 void GenerateRandomStructs(
3762 const std::map<uint8_t, uint8_t>& explicit_rec_groups,
3763 std::vector<ModuleTypeIndex>& struct_types, uint8_t& current_type_index,
3764 uint8_t kNumDefaultArrayTypes) {
3765 uint8_t last_struct_type_index = current_type_index + num_structs_;
3766 for (; current_type_index < last_struct_type_index; current_type_index++) {
3767 auto rec_group = explicit_rec_groups.find(current_type_index);
3768 uint8_t current_rec_group_end = rec_group != explicit_rec_groups.end()
3769 ? rec_group->second
3770 : current_type_index;
3771
3772 ModuleTypeIndex supertype = kNoSuperType;
3773 uint8_t num_fields =
3774 module_range_->get<uint8_t>() % (kMaxStructFields + 1);
3775
3776 uint32_t existing_struct_types =
3777 current_type_index - kNumDefaultArrayTypes;
3778 if (existing_struct_types > 0 && module_range_->get<bool>()) {
3779 supertype = ModuleTypeIndex{module_range_->get<uint8_t>() %
3780 existing_struct_types +
3781 kNumDefaultArrayTypes};
3782 num_fields += builder_->GetStructType(supertype)->field_count();
3783 }
3784 StructType::Builder struct_builder(zone_, num_fields, false);
3785
3786 // Add all fields from super type.
3787 uint32_t field_index = 0;
3788 if (supertype != kNoSuperType) {
3789 const StructType* parent = builder_->GetStructType(supertype);
3790 for (; field_index < parent->field_count(); ++field_index) {
3791 // TODO(14034): This could also be any sub type of the supertype's
3792 // element type.
3793 struct_builder.AddField(parent->field(field_index),
3794 parent->mutability(field_index));
3795 }
3796 }
3797 for (; field_index < num_fields; field_index++) {
3798 // Notes:
3799 // - We allow a type to only have non-nullable fields of types that
3800 // are defined earlier. This way we avoid infinite non-nullable
3801 // constructions. Also relevant for arrays and functions.
3802 // - On the other hand, nullable fields can be picked up to the end of
3803 // the current recursive group.
3804 // - We exclude the non-nullable generic types arrayref, anyref,
3805 // structref, eqref and externref from the fields of structs and
3806 // arrays. This is so that GenerateInitExpr has a way to break a
3807 // recursion between a struct/array field and those types
3808 // ((ref extern) gets materialized through (ref any)).
3809 ValueType type = GetValueTypeHelper(
3810 options_, module_range_, current_rec_group_end + 1,
3811 current_type_index, kIncludeNumericTypes, kIncludePackedTypes,
3812 kExcludeSomeGenerics);
3813
3814 bool mutability = module_range_->get<bool>();
3815 struct_builder.AddField(type, mutability);
3816 }
3817 StructType* struct_fuz = struct_builder.Build();
3818 // TODO(14034): Generate some final types too.
3819 ModuleTypeIndex index =
3820 builder_->AddStructType(struct_fuz, false, supertype);
3821 struct_types.push_back(index);
3822 }
3823 }
3824
3825 // Creates and adds random array types.
3826 void GenerateRandomArrays(
3827 const std::map<uint8_t, uint8_t>& explicit_rec_groups,
3828 std::vector<ModuleTypeIndex>& array_types, uint8_t& current_type_index) {
3829 uint32_t last_struct_type_index = current_type_index + num_structs_;
3830 for (; current_type_index < num_structs_ + num_arrays_;
3831 current_type_index++) {
3832 auto rec_group = explicit_rec_groups.find(current_type_index);
3833 uint8_t current_rec_group_end = rec_group != explicit_rec_groups.end()
3834 ? rec_group->second
3835 : current_type_index;
3836 ValueType type =
3837 GetValueTypeHelper(options_, module_range_, current_rec_group_end + 1,
3838 current_type_index, kIncludeNumericTypes,
3839 kIncludePackedTypes, kExcludeSomeGenerics);
3840 ModuleTypeIndex supertype = kNoSuperType;
3841 if (current_type_index > last_struct_type_index &&
3842 module_range_->get<bool>()) {
3843 // Do not include the default array types, because they are final.
3844 uint8_t existing_array_types =
3845 current_type_index - last_struct_type_index;
3846 supertype = ModuleTypeIndex{
3847 last_struct_type_index +
3848 (module_range_->get<uint8_t>() % existing_array_types)};
3849 // TODO(14034): This could also be any sub type of the supertype's
3850 // element type.
3851 type = builder_->GetArrayType(supertype)->element_type();
3852 }
3853 ArrayType* array_fuz = zone_->New<ArrayType>(type, true);
3854 // TODO(14034): Generate some final types too.
3855 ModuleTypeIndex index =
3856 builder_->AddArrayType(array_fuz, false, supertype);
3857 array_types.push_back(index);
3858 }
3859 }
3860
3861 enum SigKind { kFunctionSig, kExceptionSig };
3862
3863 FunctionSig* GenerateSig(SigKind sig_kind, int num_types) {
3864 // Generate enough parameters to spill some to the stack.
3865 int num_params = int{module_range_->get<uint8_t>()} % (kMaxParameters + 1);
3866 int num_returns =
3867 sig_kind == kFunctionSig
3868 ? int{module_range_->get<uint8_t>()} % (kMaxReturns + 1)
3869 : 0;
3870
3871 FunctionSig::Builder builder(zone_, num_returns, num_params);
3872 for (int i = 0; i < num_returns; ++i) {
3873 builder.AddReturn(GetValueType(options_, module_range_, num_types));
3874 }
3875 for (int i = 0; i < num_params; ++i) {
3876 builder.AddParam(GetValueType(options_, module_range_, num_types));
3877 }
3878 return builder.Get();
3879 }
3880
3881 // Creates and adds random function signatures.
3882 void GenerateRandomFunctionSigs(
3883 const std::map<uint8_t, uint8_t>& explicit_rec_groups,
3884 std::vector<ModuleTypeIndex>& function_signatures,
3885 uint8_t& current_type_index, bool kIsFinal) {
3886 // Recursive groups consist of recursive types that came with the WasmGC
3887 // proposal.
3888 DCHECK_IMPLIES(!options_.generate_wasm_gc(), explicit_rec_groups.empty());
3889
3890 for (; current_type_index < num_types_; current_type_index++) {
3891 auto rec_group = explicit_rec_groups.find(current_type_index);
3892 uint8_t current_rec_group_end = rec_group != explicit_rec_groups.end()
3893 ? rec_group->second
3894 : current_type_index;
3895 FunctionSig* sig = GenerateSig(kFunctionSig, current_rec_group_end + 1);
3896 ModuleTypeIndex signature_index =
3897 builder_->ForceAddSignature(sig, kIsFinal);
3898 function_signatures.push_back(signature_index);
3899 }
3900 }
3901
3902 void GenerateRandomExceptions(uint8_t num_exceptions) {
3903 for (int i = 0; i < num_exceptions; ++i) {
3904 FunctionSig* sig = GenerateSig(kExceptionSig, num_types_);
3905 builder_->AddTag(sig);
3906 }
3907 }
3908
3909 // Adds the "wasm:js-string" imports to the module.
3910 StringImports AddImportedStringImports() {
3911 static constexpr ModuleTypeIndex kArrayI8{0};
3912 static constexpr ModuleTypeIndex kArrayI16{1};
3913 StringImports strings;
3914 strings.array_i8 = kArrayI8;
3915 strings.array_i16 = kArrayI16;
3916 static constexpr ValueType kRefExtern = kWasmRefExtern;
3917 static constexpr ValueType kExternRef = kWasmExternRef;
3918 static constexpr ValueType kI32 = kWasmI32;
3919 static constexpr ValueType kRefA8 =
3920 ValueType::Ref(kArrayI8, kNotShared, RefTypeKind::kArray);
3921 static constexpr ValueType kRefNullA8 =
3922 ValueType::RefNull(kArrayI8, kNotShared, RefTypeKind::kArray);
3923 static constexpr ValueType kRefNullA16 =
3924 ValueType::RefNull(kArrayI16, kNotShared, RefTypeKind::kArray);
3925
3926 // Shorthands: "r" = nullable "externref",
3927 // "e" = non-nullable "ref extern".
3928 static constexpr ValueType kReps_e_i[] = {kRefExtern, kI32};
3929 static constexpr ValueType kReps_e_rr[] = {kRefExtern, kExternRef,
3930 kExternRef};
3931 static constexpr ValueType kReps_e_rii[] = {kRefExtern, kExternRef, kI32,
3932 kI32};
3933 static constexpr ValueType kReps_i_ri[] = {kI32, kExternRef, kI32};
3934 static constexpr ValueType kReps_i_rr[] = {kI32, kExternRef, kExternRef};
3935 static constexpr ValueType kReps_from_a16[] = {kRefExtern, kRefNullA16,
3936 kI32, kI32};
3937 static constexpr ValueType kReps_from_a8[] = {kRefExtern, kRefNullA8, kI32,
3938 kI32};
3939 static constexpr ValueType kReps_into_a16[] = {kI32, kExternRef,
3940 kRefNullA16, kI32};
3941 static constexpr ValueType kReps_into_a8[] = {kI32, kExternRef, kRefNullA8,
3942 kI32};
3943 static constexpr ValueType kReps_to_a8[] = {kRefA8, kExternRef};
3944
3945 static constexpr FunctionSig kSig_e_i(1, 1, kReps_e_i);
3946 static constexpr FunctionSig kSig_e_r(1, 1, kReps_e_rr);
3947 static constexpr FunctionSig kSig_e_rr(1, 2, kReps_e_rr);
3948 static constexpr FunctionSig kSig_e_rii(1, 3, kReps_e_rii);
3949
3950 static constexpr FunctionSig kSig_i_r(1, 1, kReps_i_ri);
3951 static constexpr FunctionSig kSig_i_ri(1, 2, kReps_i_ri);
3952 static constexpr FunctionSig kSig_i_rr(1, 2, kReps_i_rr);
3953 static constexpr FunctionSig kSig_from_a16(1, 3, kReps_from_a16);
3954 static constexpr FunctionSig kSig_from_a8(1, 3, kReps_from_a8);
3955 static constexpr FunctionSig kSig_into_a16(1, 3, kReps_into_a16);
3956 static constexpr FunctionSig kSig_into_a8(1, 3, kReps_into_a8);
3957 static constexpr FunctionSig kSig_to_a8(1, 1, kReps_to_a8);
3958
3959 static constexpr base::Vector<const char> kJsString =
3960 base::StaticCharVector("wasm:js-string");
3961 static constexpr base::Vector<const char> kTextDecoder =
3962 base::StaticCharVector("wasm:text-decoder");
3963 static constexpr base::Vector<const char> kTextEncoder =
3964 base::StaticCharVector("wasm:text-encoder");
3965
3966#define STRINGFUNC(name, sig, group) \
3967 strings.name = builder_->AddImport(base::CStrVector(#name), &sig, group)
3968
3969 STRINGFUNC(cast, kSig_e_r, kJsString);
3970 STRINGFUNC(test, kSig_i_r, kJsString);
3971 STRINGFUNC(fromCharCode, kSig_e_i, kJsString);
3972 STRINGFUNC(fromCodePoint, kSig_e_i, kJsString);
3973 STRINGFUNC(charCodeAt, kSig_i_ri, kJsString);
3974 STRINGFUNC(codePointAt, kSig_i_ri, kJsString);
3975 STRINGFUNC(length, kSig_i_r, kJsString);
3976 STRINGFUNC(concat, kSig_e_rr, kJsString);
3977 STRINGFUNC(substring, kSig_e_rii, kJsString);
3978 STRINGFUNC(equals, kSig_i_rr, kJsString);
3979 STRINGFUNC(compare, kSig_i_rr, kJsString);
3980 STRINGFUNC(fromCharCodeArray, kSig_from_a16, kJsString);
3981 STRINGFUNC(intoCharCodeArray, kSig_into_a16, kJsString);
3982 STRINGFUNC(measureStringAsUTF8, kSig_i_r, kTextEncoder);
3983 STRINGFUNC(encodeStringIntoUTF8Array, kSig_into_a8, kTextEncoder);
3984 STRINGFUNC(encodeStringToUTF8Array, kSig_to_a8, kTextEncoder);
3985 STRINGFUNC(decodeStringFromUTF8Array, kSig_from_a8, kTextDecoder);
3986
3987#undef STRINGFUNC
3988
3989 return strings;
3990 }
3991
3992 // Creates and adds random tables.
3993 void GenerateRandomTables(const std::vector<ModuleTypeIndex>& array_types,
3994 const std::vector<ModuleTypeIndex>& struct_types) {
3995 int num_tables = module_range_->get<uint8_t>() % kMaxTables + 1;
3996 int are_table64 = module_range_->get<uint8_t>();
3997 static_assert(
3998 kMaxTables <= 8,
3999 "Too many tables. Use more random bits to choose their address type.");
4000 const int max_table_size = MaxTableSize();
4001 for (int i = 0; i < num_tables; i++) {
4002 uint32_t min_size = i == 0
4004 : module_range_->get<uint8_t>() % max_table_size;
4005 uint32_t max_size =
4006 module_range_->get<uint8_t>() % (max_table_size - min_size) +
4007 min_size;
4008 // Table 0 is always funcref. This guarantees that
4009 // - call_indirect has at least one funcref table to work with,
4010 // - we have a place to reference all functions in the program, so they
4011 // count as "declared" for ref.func.
4012 bool force_funcref = i == 0;
4013 ValueType type =
4014 force_funcref
4015 ? kWasmFuncRef
4016 : GetValueTypeHelper(options_, module_range_, num_types_,
4017 num_types_, kExcludeNumericTypes,
4018 kExcludePackedTypes, kIncludeAllGenerics);
4019 bool use_initializer =
4020 !type.is_defaultable() || module_range_->get<bool>();
4021
4022 bool use_table64 = are_table64 & 1;
4023 are_table64 >>= 1;
4024 AddressType address_type =
4025 use_table64 ? AddressType::kI64 : AddressType::kI32;
4026 uint32_t table_index =
4027 use_initializer
4028 ? builder_->AddTable(
4029 type, min_size, max_size,
4030 GenerateInitExpr(zone_, *module_range_, builder_, type,
4031 struct_types, array_types, 0),
4032 address_type)
4033 : builder_->AddTable(type, min_size, max_size, address_type);
4034 if (type.is_reference_to(HeapType::kFunc)) {
4035 // For function tables, initialize them with functions from the program.
4036 // Currently, the fuzzer assumes that every funcref/(ref func) table
4037 // contains the functions in the program in the order they are defined.
4038 // TODO(11954): Consider generalizing this.
4039 WasmInitExpr init_expr = builder_->IsTable64(table_index)
4040 ? WasmInitExpr(static_cast<int64_t>(0))
4041 : WasmInitExpr(static_cast<int32_t>(0));
4042 WasmModuleBuilder::WasmElemSegment segment(zone_, type, table_index,
4043 init_expr);
4044 for (int entry_index = 0; entry_index < static_cast<int>(min_size);
4045 entry_index++) {
4046 segment.entries.emplace_back(
4047 WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry,
4048 builder_->NumImportedFunctions() +
4049 (entry_index % num_functions_));
4050 }
4051 builder_->AddElementSegment(std::move(segment));
4052 }
4053 }
4054 }
4055
4056 // Creates and adds random globals.
4057 std::tuple<std::vector<ValueType>, std::vector<uint8_t>>
4058 GenerateRandomGlobals(const std::vector<ModuleTypeIndex>& array_types,
4059 const std::vector<ModuleTypeIndex>& struct_types) {
4060 int num_globals = module_range_->get<uint8_t>() % (kMaxGlobals + 1);
4061 std::vector<ValueType> globals;
4062 std::vector<uint8_t> mutable_globals;
4063 globals.reserve(num_globals);
4064 mutable_globals.reserve(num_globals);
4065
4066 for (int i = 0; i < num_globals; ++i) {
4067 ValueType type = GetValueType(options_, module_range_, num_types_);
4068 // 1/8 of globals are immutable.
4069 const bool mutability = (module_range_->get<uint8_t>() % 8) != 0;
4070 builder_->AddGlobal(type, mutability,
4071 GenerateInitExpr(zone_, *module_range_, builder_,
4072 type, struct_types, array_types, 0));
4073 globals.push_back(type);
4074 if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
4075 }
4076
4077 return {globals, mutable_globals};
4078 }
4079
4080 private:
4081 Zone* const zone_;
4082 const WasmModuleGenerationOptions options_;
4083 WasmModuleBuilder* const builder_;
4084 DataRange* const module_range_;
4085 const uint8_t num_functions_;
4086 const uint8_t num_structs_;
4087 const uint8_t num_arrays_;
4088 const uint16_t num_types_;
4089};
4090
4091WasmInitExpr GenerateStructNewInitExpr(
4092 Zone* zone, DataRange& range, WasmModuleBuilder* builder,
4093 ModuleTypeIndex index, const std::vector<ModuleTypeIndex>& structs,
4094 const std::vector<ModuleTypeIndex>& arrays, uint32_t recursion_depth) {
4095 const StructType* struct_type = builder->GetStructType(index);
4096 bool use_new_default =
4097 std::all_of(struct_type->fields().begin(), struct_type->fields().end(),
4098 [](ValueType type) { return type.is_defaultable(); }) &&
4099 range.get<bool>();
4100
4101 if (use_new_default) {
4102 return WasmInitExpr::StructNewDefault(index);
4103 } else {
4104 ZoneVector<WasmInitExpr>* elements =
4105 zone->New<ZoneVector<WasmInitExpr>>(zone);
4106 int field_count = struct_type->field_count();
4107 for (int field_index = 0; field_index < field_count; field_index++) {
4108 elements->push_back(GenerateInitExpr(
4109 zone, range, builder, struct_type->field(field_index), structs,
4110 arrays, recursion_depth + 1));
4111 }
4112 return WasmInitExpr::StructNew(index, elements);
4113 }
4114}
4115
4116WasmInitExpr GenerateArrayInitExpr(Zone* zone, DataRange& range,
4117 WasmModuleBuilder* builder,
4118 ModuleTypeIndex index,
4119 const std::vector<ModuleTypeIndex>& structs,
4120 const std::vector<ModuleTypeIndex>& arrays,
4121 uint32_t recursion_depth) {
4122 constexpr int kMaxArrayLength = 20;
4123 uint8_t choice = range.get<uint8_t>() % 3;
4124 ValueType element_type = builder->GetArrayType(index)->element_type();
4125 if (choice == 0) {
4126 size_t element_count = range.get<uint8_t>() % kMaxArrayLength;
4127 if (!element_type.is_defaultable()) {
4128 // If the element type is not defaultable, limit the size to 0 or 1
4129 // to prevent having to create too many such elements on the value
4130 // stack. (With multiple non-nullable references, this can explode
4131 // in size very quickly.)
4132 element_count %= 2;
4133 }
4134 ZoneVector<WasmInitExpr>* elements =
4135 zone->New<ZoneVector<WasmInitExpr>>(zone);
4136 for (size_t i = 0; i < element_count; i++) {
4137 elements->push_back(GenerateInitExpr(zone, range, builder, element_type,
4138 structs, arrays,
4139 recursion_depth + 1));
4140 }
4141 return WasmInitExpr::ArrayNewFixed(index, elements);
4142 } else if (choice == 1 || !element_type.is_defaultable()) {
4143 // TODO(14034): Add other int expressions to length (same below).
4144 WasmInitExpr length = WasmInitExpr(range.get<uint8_t>() % kMaxArrayLength);
4145 WasmInitExpr init = GenerateInitExpr(zone, range, builder, element_type,
4146 structs, arrays, recursion_depth + 1);
4147 return WasmInitExpr::ArrayNew(zone, index, init, length);
4148 } else {
4149 WasmInitExpr length = WasmInitExpr(range.get<uint8_t>() % kMaxArrayLength);
4150 return WasmInitExpr::ArrayNewDefault(zone, index, length);
4151 }
4152}
4153
4154WasmInitExpr GenerateInitExpr(Zone* zone, DataRange& range,
4155 WasmModuleBuilder* builder, ValueType type,
4156 const std::vector<ModuleTypeIndex>& structs,
4157 const std::vector<ModuleTypeIndex>& arrays,
4158 uint32_t recursion_depth) {
4159 switch (type.kind()) {
4160 case kI8:
4161 case kI16:
4162 case kI32: {
4163 if (range.size() == 0 || recursion_depth >= kMaxRecursionDepth) {
4164 return WasmInitExpr(int32_t{0});
4165 }
4166 // 50% to generate a constant, 50% to generate a binary operator.
4167 uint8_t choice = range.get<uint8_t>() % 6;
4168 switch (choice) {
4169 case 0:
4170 case 1:
4171 case 2:
4172 if (choice % 2 == 0 && builder->NumGlobals()) {
4173 // Search for a matching global to emit a global.get.
4174 int num_globals = builder->NumGlobals();
4175 int start_index = range.get<uint8_t>() % num_globals;
4176 for (int i = 0; i < num_globals; ++i) {
4177 int index = (start_index + i) % num_globals;
4178 if (builder->GetGlobalType(index) == type &&
4179 !builder->IsMutableGlobal(index)) {
4180 return WasmInitExpr::GlobalGet(index);
4181 }
4182 }
4183 // Fall back to constant if no matching global was found.
4184 }
4185 return WasmInitExpr(range.getPseudoRandom<int32_t>());
4186 default:
4188 : choice == 4 ? WasmInitExpr::kI32Sub
4190 return WasmInitExpr::Binop(
4191 zone, op,
4192 GenerateInitExpr(zone, range, builder, kWasmI32, structs, arrays,
4193 recursion_depth + 1),
4194 GenerateInitExpr(zone, range, builder, kWasmI32, structs, arrays,
4195 recursion_depth + 1));
4196 }
4197 }
4198 case kI64: {
4199 if (range.size() == 0 || recursion_depth >= kMaxRecursionDepth) {
4200 return WasmInitExpr(int64_t{0});
4201 }
4202 // 50% to generate a constant, 50% to generate a binary operator.
4203 uint8_t choice = range.get<uint8_t>() % 6;
4204 switch (choice) {
4205 case 0:
4206 case 1:
4207 case 2:
4208 return WasmInitExpr(range.get<int64_t>());
4209 default:
4211 : choice == 4 ? WasmInitExpr::kI64Sub
4213 return WasmInitExpr::Binop(
4214 zone, op,
4215 GenerateInitExpr(zone, range, builder, kWasmI64, structs, arrays,
4216 recursion_depth + 1),
4217 GenerateInitExpr(zone, range, builder, kWasmI64, structs, arrays,
4218 recursion_depth + 1));
4219 }
4220 }
4221 case kF16:
4222 case kF32:
4223 return WasmInitExpr(0.0f);
4224 case kF64:
4225 return WasmInitExpr(0.0);
4226 case kS128: {
4227 uint8_t s128_const[kSimd128Size] = {0};
4228 return WasmInitExpr(s128_const);
4229 }
4230 case kRefNull: {
4231 bool null_only = false;
4232 switch (type.heap_representation()) {
4233 case HeapType::kNone:
4234 case HeapType::kNoFunc:
4236 null_only = true;
4237 break;
4238 default:
4239 break;
4240 }
4241 if (range.size() == 0 || recursion_depth >= kMaxRecursionDepth ||
4242 null_only || (range.get<uint8_t>() % 4 == 0)) {
4243 return WasmInitExpr::RefNullConst(type.heap_type());
4244 }
4245 [[fallthrough]];
4246 }
4247 case kRef: {
4248 switch (type.heap_representation()) {
4249 case HeapType::kStruct: {
4250 ModuleTypeIndex index =
4251 structs[range.get<uint8_t>() % structs.size()];
4252 return GenerateStructNewInitExpr(zone, range, builder, index, structs,
4253 arrays, recursion_depth);
4254 }
4255 case HeapType::kAny: {
4256 // Do not use 0 as the determining value here, otherwise an exhausted
4257 // {range} will generate an infinite recursion with the {kExtern}
4258 // case.
4259 if (recursion_depth < kMaxRecursionDepth && range.size() > 0 &&
4260 range.get<uint8_t>() % 4 == 3) {
4262 zone, GenerateInitExpr(zone, range, builder,
4264 kWasmExternRef, type.nullability()),
4265 structs, arrays, recursion_depth + 1));
4266 }
4267 [[fallthrough]];
4268 }
4269 case HeapType::kEq: {
4270 uint8_t choice = range.get<uint8_t>() % 3;
4271 HeapType subtype = choice == 0 ? kWasmI31Ref
4272 : choice == 1 ? kWasmArrayRef
4274
4275 return GenerateInitExpr(
4276 zone, range, builder,
4277 ValueType::RefMaybeNull(subtype, type.nullability()), structs,
4278 arrays, recursion_depth);
4279 }
4280 case HeapType::kFunc: {
4281 uint32_t index =
4282 range.get<uint32_t>() % (builder->NumDeclaredFunctions() +
4283 builder->NumImportedFunctions());
4284 return WasmInitExpr::RefFuncConst(index);
4285 }
4286 case HeapType::kExtern:
4288 zone, GenerateInitExpr(zone, range, builder,
4290 kWasmAnyRef, type.nullability()),
4291 structs, arrays, recursion_depth + 1));
4292 case HeapType::kI31:
4293 return WasmInitExpr::RefI31(
4294 zone, GenerateInitExpr(zone, range, builder, kWasmI32, structs,
4295 arrays, recursion_depth + 1));
4296 case HeapType::kArray: {
4297 ModuleTypeIndex index = arrays[range.get<uint8_t>() % arrays.size()];
4298 return GenerateArrayInitExpr(zone, range, builder, index, structs,
4299 arrays, recursion_depth);
4300 }
4301 case HeapType::kNone:
4302 case HeapType::kNoFunc:
4304 UNREACHABLE();
4305 default: {
4306 ModuleTypeIndex index = type.ref_index();
4307 if (builder->IsStructType(index)) {
4308 return GenerateStructNewInitExpr(zone, range, builder, index,
4309 structs, arrays, recursion_depth);
4310 } else if (builder->IsArrayType(index)) {
4311 return GenerateArrayInitExpr(zone, range, builder, index, structs,
4312 arrays, recursion_depth);
4313 } else {
4314 DCHECK(builder->IsSignature(index));
4315 for (int i = 0; i < builder->NumDeclaredFunctions(); ++i) {
4316 if (builder->GetFunction(i)->sig_index() == index) {
4318 builder->NumImportedFunctions() + i);
4319 }
4320 }
4321 // There has to be at least one function per signature, otherwise
4322 // the init expression is unable to generate a non-nullable
4323 // reference with the correct type.
4324 UNREACHABLE();
4325 }
4326 UNREACHABLE();
4327 }
4328 }
4329 }
4330 case kVoid:
4331 case kTop:
4332 case kBottom:
4333 UNREACHABLE();
4334 }
4335}
4336
4337} // namespace
4338
4340 Zone* zone, WasmModuleGenerationOptions options,
4342 WasmModuleBuilder builder(zone);
4343
4344 // Split input data in two parts:
4345 // - One for the "module" (types, globals, ..)
4346 // - One for all the function bodies
4347 // This prevents using a too large portion on the module resulting in
4348 // uninteresting function bodies.
4349 DataRange module_range(data);
4350 DataRange functions_range = module_range.split();
4351 std::vector<ModuleTypeIndex> function_signatures;
4352
4353 // At least 1 function is needed.
4354 int max_num_functions = MaxNumOfFunctions();
4355 CHECK_GE(max_num_functions, 1);
4356 uint8_t num_functions = 1 + (module_range.get<uint8_t>() % max_num_functions);
4357
4358 // In case of WasmGC expressions:
4359 // Add struct and array types first so that we get a chance to generate
4360 // these types in function signatures.
4361 // Currently, `BodyGen` assumes this order for struct/array/signature
4362 // definitions.
4363 // Otherwise, for non-WasmGC we can't use structs/arrays.
4364 uint8_t num_structs = 0;
4365 uint8_t num_arrays = 0;
4366 std::vector<ModuleTypeIndex> array_types;
4367 std::vector<ModuleTypeIndex> struct_types;
4368
4369 // In case of WasmGC expressions:
4370 // We always add two default array types with mutable i8 and i16 elements,
4371 // respectively.
4372 constexpr uint8_t kNumDefaultArrayTypesForWasmGC = 2;
4373 if (options.generate_wasm_gc()) {
4374 // We need at least one struct and one array in order to support
4375 // WasmInitExpr for abstract types.
4376 num_structs = 1 + module_range.get<uint8_t>() % kMaxStructs;
4377 num_arrays = kNumDefaultArrayTypesForWasmGC +
4378 module_range.get<uint8_t>() % kMaxArrays;
4379 }
4380
4381 uint8_t num_signatures = num_functions;
4382 ModuleGen gen_module(zone, options, &builder, &module_range, num_functions,
4383 num_structs, num_arrays, num_signatures);
4384
4385 // Add random number of memories.
4386 // TODO(v8:14674): Add a mode without declaring any memory or memory
4387 // instructions.
4388 gen_module.GenerateRandomMemories();
4389
4390 uint8_t current_type_index = 0;
4391 // In case of WasmGC expressions, we create recursive groups for the recursive
4392 // types.
4393 std::map<uint8_t, uint8_t> explicit_rec_groups;
4394 if (options.generate_wasm_gc()) {
4395 // Put the types into random recursive groups.
4396 explicit_rec_groups = gen_module.GenerateRandomRecursiveGroups(
4397 kNumDefaultArrayTypesForWasmGC);
4398
4399 // Add default array types.
4400 static constexpr ModuleTypeIndex kArrayI8{0};
4401 static constexpr ModuleTypeIndex kArrayI16{1};
4402 {
4403 ArrayType* a8 = zone->New<ArrayType>(kWasmI8, 1);
4404 CHECK_EQ(kArrayI8, builder.AddArrayType(a8, true, kNoSuperType));
4405 array_types.push_back(kArrayI8);
4406 ArrayType* a16 = zone->New<ArrayType>(kWasmI16, 1);
4407 CHECK_EQ(kArrayI16, builder.AddArrayType(a16, true, kNoSuperType));
4408 array_types.push_back(kArrayI16);
4409 }
4410 static_assert(kNumDefaultArrayTypesForWasmGC == kArrayI16.index + 1);
4411 current_type_index = kNumDefaultArrayTypesForWasmGC;
4412
4413 // Add randomly generated structs.
4414 gen_module.GenerateRandomStructs(explicit_rec_groups, struct_types,
4415 current_type_index,
4416 kNumDefaultArrayTypesForWasmGC);
4417 DCHECK_EQ(current_type_index, kNumDefaultArrayTypesForWasmGC + num_structs);
4418
4419 // Add randomly generated arrays.
4420 gen_module.GenerateRandomArrays(explicit_rec_groups, array_types,
4421 current_type_index);
4422 DCHECK_EQ(current_type_index, num_structs + num_arrays);
4423 }
4424
4425 // We keep the signature for the first (main) function constant.
4426 constexpr bool kIsFinal = true;
4427 auto kMainFnSig = FixedSizeSignature<ValueType>::Returns(kWasmI32).Params(
4429 function_signatures.push_back(
4430 builder.ForceAddSignature(&kMainFnSig, kIsFinal));
4431 current_type_index++;
4432
4433 // Add randomly generated signatures.
4434 gen_module.GenerateRandomFunctionSigs(
4435 explicit_rec_groups, function_signatures, current_type_index, kIsFinal);
4436 DCHECK_EQ(current_type_index, num_functions + num_structs + num_arrays);
4437
4438 // Add exceptions.
4439 int num_exceptions = 1 + (module_range.get<uint8_t>() % kMaxExceptions);
4440 gen_module.GenerateRandomExceptions(num_exceptions);
4441
4442 // In case of WasmGC expressions:
4443 // Add the "wasm:js-string" imports to the module. They may or may not be
4444 // used later, but they'll always be available.
4445 StringImports strings = options.generate_wasm_gc()
4446 ? gen_module.AddImportedStringImports()
4447 : StringImports();
4448
4449 // Generate function declarations before tables. This will be needed once we
4450 // have typed-function tables.
4451 std::vector<WasmFunctionBuilder*> functions;
4452 functions.reserve(num_functions);
4453 for (uint8_t i = 0; i < num_functions; i++) {
4454 // If we are using wasm-gc, we cannot allow signature normalization
4455 // performed by adding a function by {FunctionSig}, because we emit
4456 // everything in one recursive group which blocks signature
4457 // canonicalization.
4458 // TODO(14034): Relax this when we implement proper recursive-group
4459 // support.
4460 functions.push_back(builder.AddFunction(function_signatures[i]));
4461 }
4462
4463 // Generate tables before function bodies, so they are available for table
4464 // operations. Generate tables before the globals, so tables don't
4465 // accidentally use globals in their initializer expressions.
4466 // Always generate at least one table for call_indirect.
4467 gen_module.GenerateRandomTables(array_types, struct_types);
4468
4469 // Add globals.
4470 auto [globals, mutable_globals] =
4471 gen_module.GenerateRandomGlobals(array_types, struct_types);
4472
4473 // Add passive data segments.
4474 int num_data_segments = module_range.get<uint8_t>() % kMaxPassiveDataSegments;
4475 for (int i = 0; i < num_data_segments; i++) {
4476 GeneratePassiveDataSegment(&module_range, &builder);
4477 }
4478
4479 // Generate function bodies.
4480 for (int i = 0; i < num_functions; ++i) {
4481 WasmFunctionBuilder* f = functions[i];
4482 // On the last function don't split the DataRange but just use the
4483 // existing DataRange.
4484 DataRange function_range = i != num_functions - 1
4485 ? functions_range.split()
4486 : std::move(functions_range);
4487 BodyGen gen_body(options, f, function_signatures, globals, mutable_globals,
4488 struct_types, array_types, strings, &function_range);
4489 const FunctionSig* sig = f->signature();
4490 base::Vector<const ValueType> return_types(sig->returns().begin(),
4491 sig->return_count());
4492 gen_body.InitializeNonDefaultableLocals(&function_range);
4493 gen_body.Generate(return_types, &function_range);
4494 f->Emit(kExprEnd);
4495 if (i == 0) builder.AddExport(base::CStrVector("main"), f);
4496 }
4497
4498 ZoneBuffer buffer{zone};
4499 builder.WriteTo(&buffer);
4500 return base::VectorOf(buffer);
4501}
4502
4503// Used by the initializer expression fuzzer.
4505 Zone* zone, base::Vector<const uint8_t> data, size_t* count) {
4506 // Don't limit expressions for the initializer expression fuzzer.
4507 constexpr WasmModuleGenerationOptions options =
4508 WasmModuleGenerationOptions::All();
4509 WasmModuleBuilder builder(zone);
4510
4511 DataRange module_range(data);
4512 std::vector<ModuleTypeIndex> function_signatures;
4513 std::vector<ModuleTypeIndex> array_types;
4514 std::vector<ModuleTypeIndex> struct_types;
4515
4516 int num_globals = 1 + module_range.get<uint8_t>() % (kMaxGlobals + 1);
4517
4518 uint8_t num_functions = num_globals;
4519 *count = num_functions;
4520
4521 // We need at least one struct and one array in order to support
4522 // WasmInitExpr for abstract types.
4523 uint8_t num_structs = 1 + module_range.get<uint8_t>() % kMaxStructs;
4524 uint8_t num_arrays = 1 + module_range.get<uint8_t>() % kMaxArrays;
4525 uint16_t num_types = num_functions + num_structs + num_arrays;
4526
4527 uint8_t current_type_index = 0;
4528
4529 // Add random-generated types.
4530 uint8_t last_struct_type = current_type_index + num_structs;
4531 for (; current_type_index < last_struct_type; current_type_index++) {
4532 ModuleTypeIndex supertype = kNoSuperType;
4533 uint8_t num_fields = module_range.get<uint8_t>() % (kMaxStructFields + 1);
4534
4535 uint32_t existing_struct_types = current_type_index;
4536 if (existing_struct_types > 0 && module_range.get<bool>()) {
4537 supertype =
4538 ModuleTypeIndex{module_range.get<uint8_t>() % existing_struct_types};
4539 num_fields += builder.GetStructType(supertype)->field_count();
4540 }
4541 // TODO(403372470): Add support for custom descriptors.
4542 StructType::Builder struct_builder(zone, num_fields, false);
4543
4544 // Add all fields from super type.
4545 uint32_t field_index = 0;
4546 if (supertype != kNoSuperType) {
4547 const StructType* parent = builder.GetStructType(supertype);
4548 for (; field_index < parent->field_count(); ++field_index) {
4549 struct_builder.AddField(parent->field(field_index),
4550 parent->mutability(field_index));
4551 }
4552 }
4553 for (; field_index < num_fields; field_index++) {
4554 ValueType type = GetValueTypeHelper(
4555 options, &module_range, current_type_index, current_type_index,
4556 kIncludeNumericTypes, kIncludePackedTypes, kExcludeSomeGenerics);
4557 // To prevent huge initializer expressions, limit the existence of
4558 // non-nullable references to the first 2 struct types (for non-inherited
4559 // fields).
4560 if (current_type_index >= 2 && type.is_non_nullable()) {
4561 type = type.AsNullable();
4562 }
4563
4564 bool mutability = module_range.get<bool>();
4565 struct_builder.AddField(type, mutability);
4566 }
4567 StructType* struct_fuz = struct_builder.Build();
4568 ModuleTypeIndex index = builder.AddStructType(struct_fuz, false, supertype);
4569 struct_types.push_back(index);
4570 }
4571
4572 for (; current_type_index < num_structs + num_arrays; current_type_index++) {
4573 ValueType type = GetValueTypeHelper(
4574 options, &module_range, current_type_index, current_type_index,
4575 kIncludeNumericTypes, kIncludePackedTypes, kExcludeSomeGenerics);
4576 ModuleTypeIndex supertype = kNoSuperType;
4577 if (current_type_index > last_struct_type && module_range.get<bool>()) {
4578 uint32_t existing_array_types = current_type_index - last_struct_type;
4579 supertype =
4580 ModuleTypeIndex{last_struct_type +
4581 (module_range.get<uint8_t>() % existing_array_types)};
4582 type = builder.GetArrayType(supertype)->element_type();
4583 }
4584 ArrayType* array_fuz = zone->New<ArrayType>(type, true);
4585 ModuleTypeIndex index = builder.AddArrayType(array_fuz, false, supertype);
4586 array_types.push_back(index);
4587 }
4588
4589 // Choose global types and create function signatures.
4590 constexpr bool kIsFinal = true;
4591 std::vector<ValueType> globals;
4592 for (; current_type_index < num_types; current_type_index++) {
4593 ValueType return_type = GetValueTypeHelper(
4594 options, &module_range, num_types - num_globals,
4595 num_types - num_globals, kIncludeNumericTypes, kExcludePackedTypes,
4596 kIncludeAllGenerics, kExcludeS128);
4597 globals.push_back(return_type);
4598 // Create a new function signature for each global. These functions will be
4599 // used to compare against the initializer value of the global.
4600 FunctionSig::Builder sig_builder(zone, 1, 0);
4601 sig_builder.AddReturn(return_type);
4602 ModuleTypeIndex signature_index =
4603 builder.ForceAddSignature(sig_builder.Get(), kIsFinal);
4604 function_signatures.push_back(signature_index);
4605 }
4606
4607 std::vector<WasmFunctionBuilder*> functions;
4608 functions.reserve(num_functions);
4609 for (uint8_t i = 0; i < num_functions; i++) {
4610 functions.push_back(builder.AddFunction(function_signatures[i]));
4611 }
4612
4613 // Create globals.
4614 std::vector<uint8_t> mutable_globals;
4615 std::vector<WasmInitExpr> init_exprs;
4616 init_exprs.reserve(num_globals);
4617 mutable_globals.reserve(num_globals);
4618 CHECK_EQ(globals.size(), num_globals);
4619 uint64_t mutabilities = module_range.get<uint64_t>();
4620 for (int i = 0; i < num_globals; ++i) {
4621 ValueType type = globals[i];
4622 // 50% of globals are immutable.
4623 const bool mutability = mutabilities & 1;
4624 mutabilities >>= 1;
4625 WasmInitExpr init_expr = GenerateInitExpr(
4626 zone, module_range, &builder, type, struct_types, array_types, 0);
4627 init_exprs.push_back(init_expr);
4628 auto buffer = zone->AllocateVector<char>(8);
4629 size_t len = base::SNPrintF(buffer, "g%i", i);
4630 builder.AddExportedGlobal(type, mutability, init_expr,
4631 {buffer.begin(), len});
4632 if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
4633 }
4634
4635 // Create functions containing the initializer of each global as its function
4636 // body.
4637 for (int i = 0; i < num_functions; ++i) {
4638 WasmFunctionBuilder* f = functions[i];
4639 f->EmitFromInitializerExpression(init_exprs[i]);
4640 auto buffer = zone->AllocateVector<char>(8);
4641 size_t len = base::SNPrintF(buffer, "f%i", i);
4642 builder.AddExport({buffer.begin(), len}, f);
4643 }
4644
4645 ZoneBuffer buffer{zone};
4646 builder.WriteTo(&buffer);
4647 return base::VectorOf(buffer);
4648}
4649
4650namespace {
4651
4652bool HasSameReturns(const FunctionSig* a, const FunctionSig* b) {
4653 if (a->return_count() != b->return_count()) return false;
4654 for (size_t i = 0; i < a->return_count(); ++i) {
4655 if (a->GetReturn(i) != b->GetReturn(i)) return false;
4656 }
4657 return true;
4658}
4659
4660void EmitDeoptAndReturnValues(BodyGen gen_body, WasmFunctionBuilder* f,
4661 const FunctionSig* target_sig,
4662 ModuleTypeIndex target_sig_index,
4663 uint32_t global_index, uint32_t table_index,
4664 bool use_table64, DataRange* data) {
4665 base::Vector<const ValueType> return_types = f->signature()->returns();
4666 // Split the return types randomly and generate some values before the
4667 // deopting call and some afterwards. (This makes sure that we have deopts
4668 // where there are values on the wasm value stack which are not used by the
4669 // deopting call itself.)
4670 uint32_t returns_split = data->get<uint8_t>() % (return_types.size() + 1);
4671 if (returns_split) {
4672 gen_body.Generate(return_types.SubVector(0, returns_split), data);
4673 }
4674 gen_body.Generate(target_sig->parameters(), data);
4675 f->EmitWithU32V(kExprGlobalGet, global_index);
4676 if (use_table64) {
4677 f->Emit(kExprI64UConvertI32);
4678 }
4679 // Tail calls can only be emitted if the return types match.
4680 bool same_returns = HasSameReturns(target_sig, f->signature());
4681 size_t option_count = (same_returns + 1) * 2;
4682 switch (data->get<uint8_t>() % option_count) {
4683 case 0:
4684 // Emit call_ref.
4685 f->Emit(kExprTableGet);
4686 f->EmitU32V(table_index);
4687 f->EmitWithPrefix(kExprRefCast);
4688 f->EmitI32V(target_sig_index);
4689 f->EmitWithU32V(kExprCallRef, target_sig_index);
4690 break;
4691 case 1:
4692 // Emit call_indirect.
4693 f->EmitWithU32V(kExprCallIndirect, target_sig_index);
4694 f->EmitByte(table_index);
4695 break;
4696 case 2:
4697 // Emit return_call_ref.
4698 f->Emit(kExprTableGet);
4699 f->EmitU32V(table_index);
4700 f->EmitWithPrefix(kExprRefCast);
4701 f->EmitI32V(target_sig_index);
4702 f->EmitWithU32V(kExprReturnCallRef, target_sig_index);
4703 break;
4704 case 3:
4705 // Emit return_call_indirect.
4706 f->EmitWithU32V(kExprReturnCallIndirect, target_sig_index);
4707 f->EmitByte(table_index);
4708 break;
4709 default:
4710 UNREACHABLE();
4711 }
4712 gen_body.ConsumeAndGenerate(target_sig->returns(),
4713 return_types.SubVectorFrom(returns_split), data);
4714}
4715
4716void EmitCallAndReturnValues(BodyGen gen_body, WasmFunctionBuilder* f,
4717 WasmFunctionBuilder* callee, uint32_t table_index,
4718 bool use_table64, DataRange* data) {
4719 const FunctionSig* callee_sig = callee->signature();
4720 uint32_t callee_index =
4721 callee->func_index() + gen_body.NumImportedFunctions();
4722
4723 base::Vector<const ValueType> return_types = f->signature()->returns();
4724 // Split the return types randomly and generate some values before the
4725 // deopting call and some afterwards to create more interesting test cases.
4726 uint32_t returns_split = data->get<uint8_t>() % (return_types.size() + 1);
4727 if (returns_split) {
4728 gen_body.Generate(return_types.SubVector(0, returns_split), data);
4729 }
4730 gen_body.Generate(callee_sig->parameters(), data);
4731 // Tail calls can only be emitted if the return types match.
4732 bool same_returns = HasSameReturns(callee_sig, f->signature());
4733 size_t option_count = (same_returns + 1) * 3;
4734 switch (data->get<uint8_t>() % option_count) {
4735 case 0:
4736 f->EmitWithU32V(kExprCallFunction, callee_index);
4737 break;
4738 case 1:
4739 f->EmitWithU32V(kExprRefFunc, callee_index);
4740 f->EmitWithU32V(kExprCallRef, callee->sig_index());
4741 break;
4742 case 2:
4743 // Note that this assumes that the declared function index is the same as
4744 // the index of the function in the table.
4745 use_table64 ? f->EmitI64Const(callee->func_index())
4746 : f->EmitI32Const(callee->func_index());
4747 f->EmitWithU32V(kExprCallIndirect, callee->sig_index());
4748 f->EmitByte(table_index);
4749 break;
4750 case 3:
4751 f->EmitWithU32V(kExprReturnCall, callee_index);
4752 break;
4753 case 4:
4754 f->EmitWithU32V(kExprRefFunc, callee_index);
4755 f->EmitWithU32V(kExprReturnCallRef, callee->sig_index());
4756 break;
4757 case 5:
4758 // Note that this assumes that the declared function index is the same as
4759 // the index of the function in the table.
4760 use_table64 ? f->EmitI64Const(callee->func_index())
4761 : f->EmitI32Const(callee->func_index());
4762 f->EmitWithU32V(kExprReturnCallIndirect, callee->sig_index());
4763 f->EmitByte(table_index);
4764 break;
4765 default:
4766 UNREACHABLE();
4767 }
4768 gen_body.ConsumeAndGenerate(callee_sig->returns(),
4769 return_types.SubVectorFrom(returns_split), data);
4770}
4771} // anonymous namespace
4772
4775 std::vector<std::string>& callees, std::vector<std::string>& inlinees) {
4776 // Don't limit the features for the deopt fuzzer.
4777 constexpr WasmModuleGenerationOptions options =
4778 WasmModuleGenerationOptions::All();
4779 WasmModuleBuilder builder(zone);
4780
4781 DataRange range(data);
4782 std::vector<ModuleTypeIndex> function_signatures;
4783 std::vector<ModuleTypeIndex> array_types;
4784 std::vector<ModuleTypeIndex> struct_types;
4785
4786 const int kMaxCallTargets = 5;
4787 const int kMaxInlinees = 3;
4788
4789 // We need at least 2 call targets to be able to trigger a deopt.
4790 const int num_call_targets = 2 + range.get<uint8_t>() % (kMaxCallTargets - 1);
4791 const int num_inlinees = range.get<uint8_t>() % (kMaxInlinees + 1);
4792
4793 // 1 main function + x inlinees + x callees.
4794 uint8_t num_functions = 1 + num_inlinees + num_call_targets;
4795 // 1 signature for all the callees, 1 signature for the main function +
4796 // 1 signature per inlinee.
4797 uint8_t num_signatures = 2 + num_inlinees;
4798
4799 uint8_t num_structs = 1 + range.get<uint8_t>() % kMaxStructs;
4800 // In case of WasmGC expressions:
4801 // We always add two default array types with mutable i8 and i16 elements,
4802 // respectively.
4803 constexpr uint8_t kNumDefaultArrayTypesForWasmGC = 2;
4804 uint8_t num_arrays =
4805 range.get<uint8_t>() % kMaxArrays + kNumDefaultArrayTypesForWasmGC;
4806 // Just ignoring user-defined signature types in the signatures.
4807 uint16_t num_types = num_structs + num_arrays;
4808
4809 uint8_t current_type_index = kNumDefaultArrayTypesForWasmGC;
4810
4811 // Add random-generated types.
4812 ModuleGen gen_module(zone, options, &builder, &range, num_functions,
4813 num_structs, num_arrays, num_signatures);
4814
4815 gen_module.GenerateRandomMemories();
4816 std::map<uint8_t, uint8_t> explicit_rec_groups =
4817 gen_module.GenerateRandomRecursiveGroups(kNumDefaultArrayTypesForWasmGC);
4818 // Add default array types.
4819 static constexpr ModuleTypeIndex kArrayI8{0};
4820 static constexpr ModuleTypeIndex kArrayI16{1};
4821 {
4822 ArrayType* a8 = zone->New<ArrayType>(kWasmI8, true);
4823 CHECK_EQ(kArrayI8, builder.AddArrayType(a8, true, kNoSuperType));
4824 array_types.push_back(kArrayI8);
4825 ArrayType* a16 = zone->New<ArrayType>(kWasmI16, true);
4826 CHECK_EQ(kArrayI16, builder.AddArrayType(a16, true, kNoSuperType));
4827 array_types.push_back(kArrayI16);
4828 }
4829 static_assert(kNumDefaultArrayTypesForWasmGC == kArrayI16.index + 1);
4830 gen_module.GenerateRandomStructs(explicit_rec_groups, struct_types,
4831 current_type_index,
4832 kNumDefaultArrayTypesForWasmGC);
4833 DCHECK_EQ(current_type_index, kNumDefaultArrayTypesForWasmGC + num_structs);
4834 gen_module.GenerateRandomArrays(explicit_rec_groups, array_types,
4835 current_type_index);
4836 DCHECK_EQ(current_type_index, num_structs + num_arrays);
4837
4838 // Create signature for call target.
4839 std::vector<ValueType> return_types =
4840 GenerateTypes(options, &range, num_types);
4841 constexpr bool kIsFinal = true;
4842 const FunctionSig* target_sig = CreateSignature(
4843 builder.zone(), base::VectorOf(GenerateTypes(options, &range, num_types)),
4844 base::VectorOf(return_types));
4845 ModuleTypeIndex target_sig_index =
4846 builder.ForceAddSignature(target_sig, kIsFinal);
4847
4848 function_signatures.reserve(num_call_targets);
4849 for (int i = 0; i < num_call_targets; ++i) {
4850 // Simplification: All call targets of a call_ref / call_indirect have the
4851 // same signature.
4852 function_signatures.push_back(target_sig_index);
4853 }
4854
4855 // Create signatures for inlinees.
4856 // Use the same return types with a certain chance. This increases the chance
4857 // to emit return calls.
4858 uint8_t use_same_return = range.get<uint8_t>();
4859 for (int i = 0; i < num_inlinees; ++i) {
4860 if ((use_same_return & (1 << i)) == 0) {
4861 return_types = GenerateTypes(options, &range, num_types);
4862 }
4863 const FunctionSig* inlinee_sig = CreateSignature(
4864 builder.zone(),
4865 base::VectorOf(GenerateTypes(options, &range, num_types)),
4866 base::VectorOf(return_types));
4867 function_signatures.push_back(
4868 builder.ForceAddSignature(inlinee_sig, kIsFinal));
4869 }
4870
4871 // Create signature for main function.
4872 const FunctionSig* main_sig =
4873 CreateSignature(builder.zone(), base::VectorOf({ValueType{kWasmI32}}),
4875 function_signatures.push_back(builder.ForceAddSignature(main_sig, kIsFinal));
4876
4877 DCHECK_EQ(function_signatures.back().index,
4878 num_structs + num_arrays + num_signatures - 1);
4879
4880 // This needs to be done after the signatures are added.
4881 int num_exceptions = 1 + range.get<uint8_t>() % kMaxExceptions;
4882 gen_module.GenerateRandomExceptions(num_exceptions);
4883 StringImports strings = gen_module.AddImportedStringImports();
4884
4885 // Add functions to module.
4886 std::vector<WasmFunctionBuilder*> functions;
4887 DCHECK_EQ(num_functions, function_signatures.size());
4888 functions.reserve(num_functions);
4889 for (uint8_t i = 0; i < num_functions; i++) {
4890 functions.push_back(builder.AddFunction(function_signatures[i]));
4891 }
4892
4893 uint32_t num_entries = num_call_targets + num_inlinees;
4894 bool use_table64 = range.get<bool>();
4895 AddressType address_type =
4896 use_table64 ? AddressType::kI64 : AddressType::kI32;
4897 uint32_t table_index =
4898 builder.AddTable(kWasmFuncRef, num_entries, num_entries, address_type);
4899 WasmModuleBuilder::WasmElemSegment segment(
4900 zone, kWasmFuncRef, table_index,
4901 use_table64 ? WasmInitExpr(int64_t{0}) : WasmInitExpr(0));
4902 for (uint32_t i = 0; i < num_entries; i++) {
4903 segment.entries.emplace_back(
4905 builder.NumImportedFunctions() + i);
4906 }
4907 builder.AddElementSegment(std::move(segment));
4908
4909 gen_module.GenerateRandomTables(array_types, struct_types);
4910
4911 // Create global for call target index.
4912 // Simplification: This global is used to specify the call target at the deopt
4913 // point instead of passing the call target around dynamically.
4914 uint32_t global_index =
4915 builder.AddExportedGlobal(kWasmI32, true, WasmInitExpr(0),
4916 base::StaticCharVector("call_target_index"));
4917
4918 // Create inlinee bodies.
4919 for (int i = 0; i < num_inlinees; ++i) {
4920 uint32_t declared_func_index = i + num_call_targets;
4921 WasmFunctionBuilder* f = functions[declared_func_index];
4922 DataRange function_range = range.split();
4923 BodyGen gen_body(options, f, function_signatures, {}, {}, struct_types,
4924 array_types, strings, &function_range);
4925 gen_body.InitializeNonDefaultableLocals(&function_range);
4926 if (i == 0) {
4927 // For the inner-most inlinee, emit the deopt point (e.g. a call_ref).
4928 EmitDeoptAndReturnValues(gen_body, f, target_sig, target_sig_index,
4929 global_index, table_index, use_table64,
4930 &function_range);
4931 } else {
4932 // All other inlinees call the previous inlinee.
4933 uint32_t callee_declared_index = declared_func_index - 1;
4934 EmitCallAndReturnValues(gen_body, f, functions[callee_declared_index],
4935 table_index, use_table64, &function_range);
4936 }
4937 f->Emit(kExprEnd);
4938 auto buffer = zone->AllocateVector<char>(32);
4939 size_t len = base::SNPrintF(buffer, "inlinee_%i", i);
4940 builder.AddExport({buffer.begin(), len}, f);
4941 inlinees.emplace_back(buffer.begin(), len);
4942 }
4943
4944 // Create main function body.
4945 {
4946 uint32_t declared_func_index = num_functions - 1;
4947 WasmFunctionBuilder* f = functions[declared_func_index];
4948 DataRange function_range = range.split();
4949 BodyGen gen_body(options, f, function_signatures, {}, {}, struct_types,
4950 array_types, strings, &function_range);
4951 gen_body.InitializeNonDefaultableLocals(&function_range);
4952 // Store the call target
4953 f->EmitWithU32V(kExprLocalGet, 0);
4954 f->EmitWithU32V(kExprGlobalSet, 0);
4955 // Call inlinee or emit deopt.
4956 if (num_inlinees == 0) {
4957 // If we don't have any inlinees, directly emit the deopt point.
4958 EmitDeoptAndReturnValues(gen_body, f, target_sig, target_sig_index,
4959 global_index, table_index, use_table64,
4960 &function_range);
4961 } else {
4962 // Otherwise call the "outer-most" inlinee.
4963 uint32_t callee_declared_index = declared_func_index - 1;
4964 EmitCallAndReturnValues(gen_body, f, functions[callee_declared_index],
4965 table_index, use_table64, &function_range);
4966 }
4967
4968 f->Emit(kExprEnd);
4969 builder.AddExport(base::StaticCharVector("main"), f);
4970 }
4971
4972 // Create call target bodies.
4973 // This is done last as we care much less about the content of these
4974 // functions, so it's less of an issue if there aren't (m)any random bytes
4975 // left.
4976 for (int i = 0; i < num_call_targets; ++i) {
4977 WasmFunctionBuilder* f = functions[i];
4978 DataRange function_range = range.split();
4979 BodyGen gen_body(options, f, function_signatures, {}, {}, struct_types,
4980 array_types, strings, &function_range);
4981 const FunctionSig* sig = f->signature();
4982 base::Vector<const ValueType> target_return_types(sig->returns().begin(),
4983 sig->return_count());
4984 gen_body.InitializeNonDefaultableLocals(&function_range);
4985 gen_body.Generate(target_return_types, &function_range);
4986
4987 f->Emit(kExprEnd);
4988 auto buffer = zone->AllocateVector<char>(32);
4989 size_t len = base::SNPrintF(buffer, "callee_%i", i);
4990 builder.AddExport({buffer.begin(), len}, f);
4991 callees.emplace_back(buffer.begin(), len);
4992 }
4993
4994 ZoneBuffer buffer{zone};
4995 builder.WriteTo(&buffer);
4996 return base::VectorOf(buffer);
4997}
4998
4999} // namespace v8::internal::wasm::fuzzing
friend Zone
Definition asm-types.cc:195
#define T
uint8_t data_[MAX_STACK_LENGTH]
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
Vector< T > SubVector(size_t from, size_t to) const
Definition vector.h:41
constexpr size_t size() const
Definition vector.h:70
Vector< T > SubVectorFrom(size_t from) const
Definition vector.h:46
auto Returns(ReturnTypes... return_types) const
Definition signature.h:166
size_t return_count() const
Definition signature.h:93
T GetReturn(size_t index=0) const
Definition signature.h:103
SignatureBuilder< Signature< T >, T > Builder
Definition signature.h:130
T * New(Args &&... args)
Definition zone.h:114
base::Vector< T > AllocateVector(size_t length)
Definition zone.h:136
void AddField(ValueTypeSubclass type, bool mutability, uint32_t offset=0)
Subclass * Build(ComputeOffsets compute_offsets=kComputeOffsets)
bool mutability(uint32_t index) const
ValueType field(uint32_t index) const
constexpr Nullability nullability() const
Definition value-type.h:389
static constexpr ValueType RefMaybeNull(ModuleTypeIndex index, Nullability nullable, bool shared, RefTypeKind kind)
Definition value-type.h:903
void EmitFromInitializerExpression(const WasmInitExpr &init_expr)
static WasmInitExpr RefI31(Zone *zone, WasmInitExpr value)
static WasmInitExpr AnyConvertExtern(Zone *zone, WasmInitExpr arg)
static WasmInitExpr GlobalGet(uint32_t index)
static WasmInitExpr ExternConvertAny(Zone *zone, WasmInitExpr arg)
static WasmInitExpr RefNullConst(HeapType heap_type)
static WasmInitExpr StructNewDefault(ModuleTypeIndex index)
static WasmInitExpr ArrayNewDefault(Zone *zone, ModuleTypeIndex index, WasmInitExpr length)
static WasmInitExpr ArrayNewFixed(ModuleTypeIndex index, ZoneVector< WasmInitExpr > *elements)
static WasmInitExpr ArrayNew(Zone *zone, ModuleTypeIndex index, WasmInitExpr initial, WasmInitExpr length)
static WasmInitExpr Binop(Zone *zone, Operator op, WasmInitExpr lhs, WasmInitExpr rhs)
static WasmInitExpr StructNew(ModuleTypeIndex index, ZoneVector< WasmInitExpr > *elements)
static WasmInitExpr RefFuncConst(uint32_t index)
const ArrayType * GetArrayType(uint32_t index)
void AddExport(base::Vector< const char > name, ImportExportKindCode kind, uint32_t index)
const StructType * GetStructType(uint32_t index)
ModuleTypeIndex AddArrayType(ArrayType *type, bool is_final, ModuleTypeIndex supertype=kNoSuperType)
void WriteTo(ZoneBuffer *buffer) const
ModuleTypeIndex ForceAddSignature(const FunctionSig *sig, bool is_final, ModuleTypeIndex supertype=kNoSuperType)
WasmFunctionBuilder * AddFunction(const FunctionSig *sig=nullptr)
ModuleTypeIndex AddStructType(StructType *type, bool is_final, ModuleTypeIndex supertype=kNoSuperType)
uint32_t AddExportedGlobal(ValueType type, bool mutability, WasmInitExpr init, base::Vector< const char > name)
Zone * zone_
int start
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after random(0, X) V8 allocations. It override s " "gc_interval.") DEFINE_INT(cppgc_random_gc_interval
GCOptions options_
OptionalOpIndex index
int32_t offset
#define _
RpoNumber block
ZoneVector< RpoNumber > & result
int num_exceptions
EmitFn fn
uint32_t const mask
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
int SNPrintF(Vector< char > str, const char *format,...)
Definition strings.cc:20
constexpr Vector< const char > StaticCharVector(const char(&array)[N])
Definition vector.h:326
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
Vector< const char > CStrVector(const char *data)
Definition vector.h:331
void split(const std::string &str, char delimiter, std::vector< std::string > *vparams)
base::Vector< uint8_t > GenerateRandomWasmModule(Zone *zone, WasmModuleGenerationOptions options, base::Vector< const uint8_t > data)
base::Vector< uint8_t > GenerateWasmModuleForInitExpressions(Zone *zone, base::Vector< const uint8_t > data, size_t *count)
base::Vector< uint8_t > GenerateWasmModuleForDeopt(Zone *zone, base::Vector< const uint8_t > data, std::vector< std::string > &callees, std::vector< std::string > &inlinees)
uint32_t max_mem32_pages()
uint32_t max_table_size()
constexpr IndependentHeapType kWasmRefExtern
uint32_t max_mem64_pages()
constexpr size_t kV8MaxWasmMemory64Pages
Definition wasm-limits.h:46
constexpr IndependentValueType kWasmI8
constexpr IndependentHeapType kWasmAnyRef
constexpr IndependentHeapType kWasmExternRef
constexpr IndependentValueType kWasmI32
constexpr IndependentHeapType kWasmI31Ref
constexpr IndependentHeapType kWasmFuncRef
constexpr ModuleTypeIndex kNoSuperType
constexpr IndependentHeapType kWasmExnRef
static constexpr bool kNotShared
Definition value-type.h:101
constexpr IndependentHeapType kWasmVoid
constexpr IndependentHeapType kWasmStructRef
typedef void(VECTORCALL PWasmOp)(const uint8_t *code
constexpr IndependentValueType kWasmS128
Signature< ValueType > FunctionSig
constexpr bool is_reference(ValueKind kind)
constexpr IndependentValueType kWasmI64
constexpr IndependentValueType kWasmI16
constexpr IndependentHeapType kWasmArrayRef
constexpr int kSimd128Size
Definition globals.h:706
constexpr int N
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint32_t kMaxUInt32
Definition globals.h:387
const uint8_t num_functions_
const std::vector< ModuleTypeIndex > & structs_
uint32_t equals
uint32_t encodeStringToUTF8Array
const uint16_t num_types_
uint32_t fromCharCode
static constexpr size_t kNumAll
std::vector< ValueType > locals_
static constexpr size_t kNumSimd
DataRange *const module_range_
uint32_t decodeStringFromUTF8Array
std::vector< int > catch_blocks_
uint32_t charCodeAt
ModuleTypeIndex array_i16
ModuleTypeIndex array_i8
const std::array< GenerateFn, kNumAll > all_
uint32_t intoCharCodeArray
std::vector< uint8_t > mutable_globals_
uint32_t fromCodePoint
const std::array< GenerateFn, kNumMVP > mvp_
uint32_t fromCharCodeArray
bool locals_initialized_
uint32_t cast
uint32_t encodeStringIntoUTF8Array
uint32_t recursion_depth
const std::vector< ModuleTypeIndex > & functions_
const uint8_t num_structs_
const uint8_t num_arrays_
uint32_t codePointAt
static constexpr size_t kNumWasmGC
BodyGen *const gen_
const std::vector< ModuleTypeIndex > & arrays_
BodyGen * gen
uint32_t compare
std::vector< ValueType > globals_
const std::array< GenerateFn, kNumWasmGC > wasmgc_
uint32_t measureStringAsUTF8
std::vector< std::vector< ValueType > > blocks_
uint32_t concat
const std::array< GenerateFn, kNumSimd > simd_
#define STRINGFUNC(name, sig, group)
const StringImports & string_imports_
RegExpBuilder builder_
base::RandomNumberGenerator *const rng_
Definition scavenger.cc:611
#define V8_NOEXCEPT
#define UNREACHABLE()
Definition logging.h:67
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define arraysize(array)
Definition macros.h:67
#define T1(name, string, precedence)
Definition token.cc:28
#define T2(name, string, precedence)
Definition token.cc:30
#define V8_NODISCARD
Definition v8config.h:693
wasm::ValueType type