v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-serialization.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
9#include "src/debug/debug.h"
10#include "src/runtime/runtime.h"
12#include "src/utils/ostreams.h"
13#include "src/utils/version.h"
25
26namespace v8::internal::wasm {
27
28namespace {
29constexpr uint8_t kLazyFunction = 2;
30constexpr uint8_t kEagerFunction = 3;
31constexpr uint8_t kTurboFanFunction = 4;
32
33// TODO(bbudge) Try to unify the various implementations of readers and writers
34// in Wasm, e.g. StreamProcessor and ZoneBuffer, with these.
35class Writer {
36 public:
37 explicit Writer(base::Vector<uint8_t> buffer)
38 : start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
39
40 size_t bytes_written() const { return pos_ - start_; }
41 uint8_t* current_location() const { return pos_; }
42 size_t current_size() const { return end_ - pos_; }
43 base::Vector<uint8_t> current_buffer() const {
44 return {current_location(), current_size()};
45 }
46
47 template <typename T>
48 void Write(const T& value) {
49 DCHECK_GE(current_size(), sizeof(T));
50 WriteUnalignedValue(reinterpret_cast<Address>(current_location()), value);
51 pos_ += sizeof(T);
52 if (v8_flags.trace_wasm_serialization) {
53 StdoutStream{} << "wrote: " << static_cast<size_t>(value)
54 << " sized: " << sizeof(T) << std::endl;
55 }
56 }
57
58 template <typename T>
59 void WriteVector(const base::Vector<T> v) {
60 base::Vector<const uint8_t> bytes = base::Vector<const uint8_t>::cast(v);
61 DCHECK_GE(current_size(), bytes.size());
62 if (!bytes.empty()) {
63 memcpy(current_location(), bytes.begin(), bytes.size());
64 pos_ += bytes.size();
65 }
66 if (v8_flags.trace_wasm_serialization) {
67 StdoutStream{} << "wrote vector of " << v.size()
68 << " elements (total size " << bytes.size() << " bytes)"
69 << std::endl;
70 }
71 }
72
73 void Skip(size_t size) { pos_ += size; }
74
75 private:
76 uint8_t* const start_;
77 uint8_t* const end_;
78 uint8_t* pos_;
79};
80
81class Reader {
82 public:
83 explicit Reader(base::Vector<const uint8_t> buffer)
84 : start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
85
86 size_t bytes_read() const { return pos_ - start_; }
87 const uint8_t* current_location() const { return pos_; }
88 size_t current_size() const { return end_ - pos_; }
89 base::Vector<const uint8_t> current_buffer() const {
90 return {current_location(), current_size()};
91 }
92
93 template <typename T>
94 T Read() {
95 DCHECK_GE(current_size(), sizeof(T));
96 T value =
97 ReadUnalignedValue<T>(reinterpret_cast<Address>(current_location()));
98 pos_ += sizeof(T);
99 if (v8_flags.trace_wasm_serialization) {
100 StdoutStream{} << "read: " << static_cast<size_t>(value)
101 << " sized: " << sizeof(T) << std::endl;
102 }
103 return value;
104 }
105
106 template <typename T>
107 base::Vector<const T> ReadVector(size_t size) {
108 DCHECK_GE(current_size(), size);
109 base::Vector<const uint8_t> bytes{pos_, size * sizeof(T)};
110 pos_ += size * sizeof(T);
111 if (v8_flags.trace_wasm_serialization) {
112 StdoutStream{} << "read vector of " << size << " elements of size "
113 << sizeof(T) << " (total size " << size * sizeof(T) << ")"
114 << std::endl;
115 }
116 return base::Vector<const T>::cast(bytes);
117 }
118
119 void Skip(size_t size) { pos_ += size; }
120
121 private:
122 const uint8_t* const start_;
123 const uint8_t* const end_;
124 const uint8_t* pos_;
125};
126
127void WriteHeader(Writer* writer, WasmEnabledFeatures enabled_features) {
128 DCHECK_EQ(0, writer->bytes_written());
129 writer->Write(SerializedData::kMagicNumber);
130 writer->Write(Version::Hash());
131 writer->Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
132 writer->Write(FlagList::Hash());
133 writer->Write(enabled_features.ToIntegral());
134 DCHECK_EQ(WasmSerializer::kHeaderSize, writer->bytes_written());
135}
136
137// On Intel, call sites are encoded as a displacement. For linking and for
138// serialization/deserialization, we want to store/retrieve a tag (the function
139// index). On Intel, that means accessing the raw displacement.
140// On ARM64, call sites are encoded as either a literal load or a direct branch.
141// Other platforms simply require accessing the target address.
142void SetWasmCalleeTag(WritableRelocInfo* rinfo, uint32_t tag) {
143#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
144 DCHECK(rinfo->HasTargetAddressAddress());
146 WriteUnalignedValue(rinfo->target_address_address(), tag);
147#elif V8_TARGET_ARCH_ARM64
148 Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
149 if (instr->IsLdrLiteralX()) {
150 WriteUnalignedValue(rinfo->constant_pool_entry_address(),
151 static_cast<Address>(tag));
152 } else {
153 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
154 instr->SetBranchImmTarget<UncondBranchType>(
155 reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstrSize));
156 }
157#elif V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
158 Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
159 if (instr->IsAUIPC()) {
160 Instr auipc = instr->InstructionBits();
161 Instr jalr = reinterpret_cast<Instruction*>(rinfo->pc() + 1 * kInstrSize)
162 ->InstructionBits();
163 DCHECK(is_int32(tag + 0x800));
164 Assembler::PatchBranchlongOffset(rinfo->pc(), auipc, jalr, (int32_t)tag,
165 nullptr);
166 } else {
167 Assembler::set_target_address_at(rinfo->pc(), rinfo->constant_pool(),
168 static_cast<Address>(tag), nullptr,
170 }
171#else
172 Address addr = static_cast<Address>(tag);
173 if (rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE) {
174 rinfo->set_target_external_reference(addr, SKIP_ICACHE_FLUSH);
175 } else if (rinfo->rmode() == RelocInfo::WASM_STUB_CALL) {
176 rinfo->set_wasm_stub_call_address(addr);
177 } else {
178 rinfo->set_target_address(addr, SKIP_ICACHE_FLUSH);
179 }
180#endif
181}
182
183uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
184#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
186 return ReadUnalignedValue<uint32_t>(rinfo->target_address_address());
187#elif V8_TARGET_ARCH_ARM64
188 Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
189 if (instr->IsLdrLiteralX()) {
190 return ReadUnalignedValue<uint32_t>(rinfo->constant_pool_entry_address());
191 } else {
192 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
193 return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
194 }
195#elif V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
196 Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
197 if (instr->IsAUIPC()) {
198 Instr auipc = instr->InstructionBits();
199 Instr jalr = reinterpret_cast<Instruction*>(rinfo->pc() + 1 * kInstrSize)
200 ->InstructionBits();
201 return Assembler::BrachlongOffset(auipc, jalr);
202 } else {
203 return static_cast<uint32_t>(rinfo->target_address());
204 }
205#else
206 Address addr;
207 if (rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE) {
208 addr = rinfo->target_external_reference();
209 } else if (rinfo->rmode() == RelocInfo::WASM_STUB_CALL) {
210 addr = rinfo->wasm_stub_call_address();
211 } else {
212 addr = rinfo->target_address();
213 }
214 return static_cast<uint32_t>(addr);
215#endif
216}
217
218constexpr size_t kCodeHeaderSize = sizeof(uint8_t) + // code kind
219 sizeof(int) + // offset of constant pool
220 sizeof(int) + // offset of safepoint table
221 sizeof(int) + // offset of handler table
222 sizeof(int) + // offset of code comments
223 sizeof(int) + // unpadded binary size
224 sizeof(int) + // stack slots
225 sizeof(int) + // ool slots
226 sizeof(int) + // tagged parameter slots
227 sizeof(int) + // code size
228 sizeof(int) + // reloc size
229 sizeof(int) + // source positions size
230 sizeof(int) + // inlining positions size
231 sizeof(int) + // deopt data size
232 sizeof(int) + // protected instructions size
233 sizeof(WasmCode::Kind) + // code kind
234 sizeof(ExecutionTier); // tier
235
236// A List of all isolate-independent external references. This is used to create
237// a tag from the Address of an external reference and vice versa.
238class ExternalReferenceList {
239 public:
240 ExternalReferenceList(const ExternalReferenceList&) = delete;
241 ExternalReferenceList& operator=(const ExternalReferenceList&) = delete;
242
243 uint32_t tag_from_address(Address ext_ref_address) const {
244 auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
245 return external_reference_by_tag_[tag] < searched_addr;
246 };
247 auto it = std::lower_bound(std::begin(tags_ordered_by_address_),
248 std::end(tags_ordered_by_address_),
249 ext_ref_address, tag_addr_less_than);
250 DCHECK_NE(std::end(tags_ordered_by_address_), it);
251 uint32_t tag = *it;
252 DCHECK_EQ(address_from_tag(tag), ext_ref_address);
253 return tag;
254 }
255
256 Address address_from_tag(uint32_t tag) const {
257 DCHECK_GT(kNumExternalReferences, tag);
258 return external_reference_by_tag_[tag];
259 }
260
261 static const ExternalReferenceList& Get() {
262 static ExternalReferenceList list; // Lazily initialized.
263 return list;
264 }
265
266 private:
267 // Private constructor. There will only be a single instance of this object.
268 ExternalReferenceList() {
269 for (uint32_t i = 0; i < kNumExternalReferences; ++i) {
271 }
272 auto addr_by_tag_less_than = [this](uint32_t a, uint32_t b) {
274 };
275 std::sort(std::begin(tags_ordered_by_address_),
276 std::end(tags_ordered_by_address_), addr_by_tag_less_than);
277 }
278
279#define COUNT_EXTERNAL_REFERENCE(name, ...) +1
280 static constexpr uint32_t kNumExternalReferencesList =
282 static constexpr uint32_t kNumExternalReferencesIntrinsics =
284 static constexpr uint32_t kNumExternalReferences =
286#undef COUNT_EXTERNAL_REFERENCE
287
289#define EXT_REF_ADDR(name, desc) ExternalReference::name().address(),
291#undef EXT_REF_ADDR
292#define RUNTIME_ADDR(name, ...) \
293 ExternalReference::Create(Runtime::k##name).address(),
295#undef RUNTIME_ADDR
296 };
298};
299
300static_assert(std::is_trivially_destructible_v<ExternalReferenceList>,
301 "static destructors not allowed");
302
303} // namespace
304
306 public:
311
312 size_t Measure() const;
313 bool Write(Writer* writer);
314
315 private:
316 size_t MeasureCode(const WasmCode*) const;
317 void WriteHeader(Writer*, size_t total_code_size);
318 void WriteCode(const WasmCode*, Writer*,
320 void WriteTieringBudget(Writer* writer);
321
322 uint32_t CanonicalSigIdToModuleLocalTypeId(uint32_t canonical_sig_id);
323
327 // Map back canonical signature IDs to module-local IDs. Initialized lazily.
328 std::unordered_map<uint32_t, uint32_t> canonical_sig_ids_to_module_local_ids_;
329 bool write_called_ = false;
330 size_t total_written_code_ = 0;
331 int num_turbofan_functions_ = 0;
332};
333
335 const NativeModule* module, base::Vector<WasmCode* const> code_table,
337 : native_module_(module),
338 code_table_(code_table),
339 import_statuses_(import_statuses) {
341 // TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
342 // the unique ones, i.e. the cache.
343}
344
346 if (code == nullptr) return sizeof(uint8_t);
347 DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
348 if (code->tier() != ExecutionTier::kTurbofan) {
349 return sizeof(uint8_t);
350 }
351 return kCodeHeaderSize + code->instructions().size() +
352 code->reloc_info().size() + code->source_positions().size() +
353 code->inlining_positions().size() +
354 code->protected_instructions_data().size() + code->deopt_data().size();
355}
356
358 // From {WriteHeader}:
359 size_t size = sizeof(WasmDetectedFeatures::StorageType) +
360 sizeof(size_t) + // total code size
361 sizeof(bool) + // all functions validated
362 sizeof(typename CompileTimeImportFlags::StorageType) +
363 sizeof(uint32_t) + // length of constants_module.
364 native_module_->compile_imports().constants_module().size() +
365 import_statuses_.size() * sizeof(WellKnownImport);
366
367 // From {WriteCode}, called repeatedly.
368 for (WasmCode* code : code_table_) {
369 size += MeasureCode(code);
370 }
371
372 // Tiering budget, wrote in {Write} directly.
373 size += native_module_->module()->num_declared_functions * sizeof(uint32_t);
374
375 return size;
376}
377
379 size_t total_code_size) {
380 // TODO(eholk): We need to properly preserve the flag whether the trap
381 // handler was used or not when serializing.
382
383 // Serialize the set of detected features; this contains
384 // - all features detected during module decoding,
385 // - all features detected during function body decoding (if lazy validation
386 // is disabled), and
387 // - some features detected during compilation; some might still be missing
388 // because installing code and publishing detected features is not atomic.
389 writer->Write(
390 native_module_->compilation_state()->detected_features().ToIntegral());
391
392 writer->Write(total_code_size);
393
394 // We do not ship lazy validation, so in most cases all functions will be
395 // validated. Thus only write out a single bit instead of serializing the
396 // information per function.
397 const bool fully_validated = !v8_flags.wasm_lazy_validation;
398 writer->Write(fully_validated);
399#ifdef DEBUG
400 if (fully_validated) {
401 const WasmModule* module = native_module_->module();
402 for (auto& function : module->declared_functions()) {
403 DCHECK(module->function_was_validated(function.func_index));
404 }
405 }
406#endif
407
408 const CompileTimeImports& compile_imports = native_module_->compile_imports();
409 const std::string& constants_module = compile_imports.constants_module();
410 writer->Write(compile_imports.flags().ToIntegral());
411 writer->Write(static_cast<uint32_t>(constants_module.size()));
412 writer->WriteVector(base::VectorOf(constants_module));
413 writer->WriteVector(base::VectorOf(import_statuses_));
414}
415
417 const WasmCode* code, Writer* writer,
418 const NativeModule::CallIndirectTargetMap& function_index_map) {
419 if (code == nullptr) {
420 writer->Write(kLazyFunction);
421 return;
422 }
423
424 DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
425 // Only serialize TurboFan code, as Liftoff code can contain breakpoints or
426 // non-relocatable constants.
427 if (code->tier() != ExecutionTier::kTurbofan) {
428 // We check if the function has been executed already. If so, we serialize
429 // it as {kEagerFunction} so that upon deserialization the function will
430 // get eagerly compiled with Liftoff (if enabled). If the function has not
431 // been executed yet, we serialize it as {kLazyFunction}, and the function
432 // will not get compiled upon deserialization.
433 NativeModule* native_module = code->native_module();
434 uint32_t budget = native_module
436 native_module->module(), code->index())]
437 .load(std::memory_order_relaxed);
438 writer->Write(budget == static_cast<uint32_t>(v8_flags.wasm_tiering_budget)
439 ? kLazyFunction
440 : kEagerFunction);
441 return;
442 }
443
445 writer->Write(kTurboFanFunction);
446 // Write the size of the entire code section, followed by the code header.
447 writer->Write(code->constant_pool_offset());
448 writer->Write(code->safepoint_table_offset());
449 writer->Write(code->handler_table_offset());
450 writer->Write(code->code_comments_offset());
451 writer->Write(code->unpadded_binary_size());
452 writer->Write(code->stack_slots());
453 writer->Write(code->ool_spills());
454 writer->Write(code->raw_tagged_parameter_slots_for_serialization());
455 writer->Write(code->instructions().length());
456 writer->Write(code->reloc_info().length());
457 writer->Write(code->source_positions().length());
458 writer->Write(code->inlining_positions().length());
459 writer->Write(code->deopt_data().length());
460 writer->Write(code->protected_instructions_data().length());
461 writer->Write(code->kind());
462 writer->Write(code->tier());
463
464 // Get a pointer to the destination buffer, to hold relocated code.
465 uint8_t* serialized_code_start = writer->current_buffer().begin();
466 uint8_t* code_start = serialized_code_start;
467 size_t code_size = code->instructions().size();
468 writer->Skip(code_size);
469 // Write the reloc info, source positions, inlining positions and protected
470 // code.
471 writer->WriteVector(code->reloc_info());
472 writer->WriteVector(code->source_positions());
473 writer->WriteVector(code->inlining_positions());
474 writer->WriteVector(code->deopt_data());
475 writer->WriteVector(code->protected_instructions_data());
476#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC64 || \
477 V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
478 // On platforms that don't support misaligned word stores, copy to an aligned
479 // buffer if necessary so we can relocate the serialized code.
480 std::unique_ptr<uint8_t[]> aligned_buffer;
481 if (!IsAligned(reinterpret_cast<Address>(serialized_code_start),
483 // 'uint8_t' does not guarantee an alignment but seems to work well enough
484 // in practice.
485 aligned_buffer.reset(new uint8_t[code_size]);
486 code_start = aligned_buffer.get();
487 }
488#endif
489 memcpy(code_start, code->instructions().begin(), code_size);
490 // Relocate the code.
491 constexpr int kMask =
499 RelocIterator orig_iter(code->instructions(), code->reloc_info(),
500 code->constant_pool(), kMask);
501
502 WritableJitAllocation jit_allocation =
504 reinterpret_cast<Address>(code_start), code->instructions().size(),
506 for (WritableRelocIterator iter(
507 jit_allocation, {code_start, code->instructions().size()},
508 code->reloc_info(),
509 reinterpret_cast<Address>(code_start) + code->constant_pool_offset(),
510 kMask);
511 !iter.done(); iter.next(), orig_iter.next()) {
512 RelocInfo::Mode mode = orig_iter.rinfo()->rmode();
513 switch (mode) {
515 Address orig_target = orig_iter.rinfo()->wasm_call_address();
516 uint32_t tag =
517 native_module_->GetFunctionIndexFromJumpTableSlot(orig_target);
518 SetWasmCalleeTag(iter.rinfo(), tag);
519 } break;
521 Address target = orig_iter.rinfo()->wasm_stub_call_address();
522 uint32_t tag = static_cast<uint32_t>(
523 native_module_->GetBuiltinInJumptableSlot(target));
524 SetWasmCalleeTag(iter.rinfo(), tag);
525 } break;
527 uint32_t canonical_sig_id = orig_iter.rinfo()->wasm_canonical_sig_id();
528 uint32_t module_local_sig_id =
529 CanonicalSigIdToModuleLocalTypeId(canonical_sig_id);
530 iter.rinfo()->set_wasm_canonical_sig_id(module_local_sig_id);
531 } break;
533 WasmCodePointer target =
535 uint32_t function_index = function_index_map.at(target);
536 iter.rinfo()->set_wasm_code_pointer_table_entry(
537 WasmCodePointer{function_index}, SKIP_ICACHE_FLUSH);
538 } break;
540 Address orig_target = orig_iter.rinfo()->target_external_reference();
541 uint32_t ext_ref_tag =
542 ExternalReferenceList::Get().tag_from_address(orig_target);
543 SetWasmCalleeTag(iter.rinfo(), ext_ref_tag);
544 } break;
547 Address orig_target = orig_iter.rinfo()->target_internal_reference();
548 Address offset = orig_target - code->instruction_start();
550 iter.rinfo()->pc(), offset, jit_allocation, mode);
551 } break;
552 default:
553 UNREACHABLE();
554 }
555 }
556 // If we copied to an aligned buffer, copy code into serialized buffer.
557 if (code_start != serialized_code_start) {
558 memcpy(serialized_code_start, code_start, code_size);
559 }
560 total_written_code_ += code_size;
561}
562
564 for (size_t i = 0; i < native_module_->module()->num_declared_functions;
565 ++i) {
566 writer->Write(native_module_->tiering_budget_array()[i].load(
567 std::memory_order_relaxed));
568 }
569}
570
572 uint32_t canonical_sig_id) {
574 const WasmModule* module = native_module_->module();
576 size_t num_types = module->types.size();
577 DCHECK_EQ(num_types, module->isorecursive_canonical_type_ids.size());
578 for (uint32_t local_id = 0; local_id < num_types; ++local_id) {
579 // Only add function signatures.
580 if (!module->has_signature(ModuleTypeIndex{local_id})) continue;
581 CanonicalTypeIndex canonical_id =
582 module->canonical_sig_id(ModuleTypeIndex{local_id});
583 // Try to emplace, skip if an entry exists already. It does not matter
584 // which local type ID we use if multiple types got canonicalized to the
585 // same ID.
587 std::make_pair(canonical_id.index, local_id));
588 }
589 }
590 auto it = canonical_sig_ids_to_module_local_ids_.find(canonical_sig_id);
592 return it->second;
593}
594
595bool NativeModuleSerializer::Write(Writer* writer) {
597 write_called_ = true;
598
599 size_t total_code_size = 0;
600 for (WasmCode* code : code_table_) {
601 if (code && code->tier() == ExecutionTier::kTurbofan) {
602 DCHECK(IsAligned(code->instructions().size(), kCodeAlignment));
603 total_code_size += code->instructions().size();
604 }
605 }
606 WriteHeader(writer, total_code_size);
607
608 NativeModule::CallIndirectTargetMap function_index_map =
609 native_module_->CreateIndirectCallTargetToFunctionIndexMap();
610 for (WasmCode* code : code_table_) {
611 WriteCode(code, writer, function_index_map);
612 }
613 // No TurboFan-compiled functions in jitless mode.
614 if (!v8_flags.wasm_jitless) {
615 // If not a single function was written, serialization was not successful.
616 if (num_turbofan_functions_ == 0) return false;
617 }
618
619 // Make sure that the serialized total code size was correct.
620 CHECK_EQ(total_written_code_, total_code_size);
621
622 WriteTieringBudget(writer);
623 return true;
624}
625
627 : native_module_(native_module) {
628 std::tie(code_table_, import_statuses_) = native_module->SnapshotCodeTable();
629}
630
636
640 size_t measured_size = kHeaderSize + serializer.Measure();
641 if (buffer.size() < measured_size) return false;
642
643 Writer writer(buffer);
645
646 if (!serializer.Write(&writer)) return false;
647 DCHECK_EQ(measured_size, writer.bytes_written());
648 return true;
649}
650
656
658 public:
659 void Add(std::vector<DeserializationUnit> batch) {
660 DCHECK(!batch.empty());
661 base::MutexGuard guard(&mutex_);
662 queue_.emplace(std::move(batch));
663 }
664
665 std::vector<DeserializationUnit> Pop() {
666 base::MutexGuard guard(&mutex_);
667 if (queue_.empty()) return {};
668 auto batch = std::move(queue_.front());
669 queue_.pop();
670 return batch;
671 }
672
673 std::vector<DeserializationUnit> PopAll() {
674 base::MutexGuard guard(&mutex_);
675 if (queue_.empty()) return {};
676 auto units = std::move(queue_.front());
677 queue_.pop();
678 while (!queue_.empty()) {
679 units.insert(units.end(), std::make_move_iterator(queue_.front().begin()),
680 std::make_move_iterator(queue_.front().end()));
681 queue_.pop();
682 }
683 return units;
684 }
685
686 size_t NumBatches() const {
687 base::MutexGuard guard(&mutex_);
688 return queue_.size();
689 }
690
691 private:
693 std::queue<std::vector<DeserializationUnit>> queue_;
694};
695
697 public:
701
702 bool Read(Reader* reader);
703
705 return base::VectorOf(lazy_functions_);
706 }
707
709 return base::VectorOf(eager_functions_);
710 }
711
712 private:
714
715 void ReadHeader(Reader* reader);
716 DeserializationUnit ReadCode(int fn_index, Reader* reader);
717 void ReadTieringBudget(Reader* reader);
718 void CopyAndRelocate(const DeserializationUnit& unit);
719 void Publish(std::vector<DeserializationUnit> batch);
720
722#ifdef DEBUG
723 bool read_called_ = false;
724#endif
725
726 // Updated in {ReadCode}.
727 size_t remaining_code_size_ = 0;
728 bool all_functions_validated_ = false;
732 std::vector<int> lazy_functions_;
733 std::vector<int> eager_functions_;
734};
735
737 public:
739 DeserializationQueue* reloc_queue)
740 : deserializer_(deserializer), reloc_queue_(reloc_queue) {}
741
742 void Run(JobDelegate* delegate) override {
743 bool finished = false;
744 while (!finished) {
745 // Repeatedly publish everything that was copied already.
746 finished = TryPublishing(delegate);
747
748 auto batch = reloc_queue_->Pop();
749 if (batch.empty()) break;
750 for (const auto& unit : batch) {
752 }
753 publish_queue_.Add(std::move(batch));
754 delegate->NotifyConcurrencyIncrease();
755 }
756 }
757
758 size_t GetMaxConcurrency(size_t /* worker_count */) const override {
759 // Number of copy&reloc batches, plus 1 if there is also something to
760 // publish.
761 bool publish = publishing_.load(std::memory_order_relaxed) == false &&
763 return reloc_queue_->NumBatches() + (publish ? 1 : 0);
764 }
765
766 private:
767 bool TryPublishing(JobDelegate* delegate) {
768 // Publishing is sequential, so only start publishing if no one else is.
769 if (publishing_.exchange(true, std::memory_order_relaxed)) return false;
770
771 WasmCodeRefScope code_scope;
772 while (true) {
773 bool yield = false;
774 while (!yield) {
775 auto to_publish = publish_queue_.PopAll();
776 if (to_publish.empty()) break;
777 deserializer_->Publish(std::move(to_publish));
778 yield = delegate->ShouldYield();
779 }
780 publishing_.store(false, std::memory_order_relaxed);
781 if (yield) return true;
782 // After finishing publishing, check again if new work arrived in the mean
783 // time. If so, continue publishing.
784 if (publish_queue_.NumBatches() == 0) break;
785 if (publishing_.exchange(true, std::memory_order_relaxed)) break;
786 // We successfully reset {publishing_} from {false} to {true}.
787 }
788 return false;
789 }
790
794 std::atomic<bool> publishing_{false};
795};
796
799
800bool NativeModuleDeserializer::Read(Reader* reader) {
801 DCHECK(!read_called_);
802#ifdef DEBUG
803 read_called_ = true;
804#endif
805
806 ReadHeader(reader);
808 return false;
809 }
810
811 uint32_t total_fns = native_module_->num_functions();
812 uint32_t first_wasm_fn = native_module_->num_imported_functions();
813
815 native_module_->module()->set_all_functions_validated();
816 }
817
818 WasmCodeRefScope wasm_code_ref_scope;
819
820 DeserializationQueue reloc_queue;
821
822 // Create a new job without any workers; those are spawned on
823 // {NotifyConcurrencyIncrease}.
824 std::unique_ptr<JobHandle> job_handle = V8::GetCurrentPlatform()->CreateJob(
826 std::make_unique<DeserializeCodeTask>(this, &reloc_queue));
827
828 // Choose a batch size such that we do not create too small batches (>=100k
829 // code bytes), but also not too many (<=100 batches).
830 constexpr size_t kMinBatchSizeInBytes = 100000;
831 size_t batch_limit =
832 std::max(kMinBatchSizeInBytes, remaining_code_size_ / 100);
833
834 std::vector<DeserializationUnit> batch;
835 size_t batch_size = 0;
836 for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
838 if (!unit.code) continue;
839 batch_size += unit.code->instructions().size();
840 batch.emplace_back(std::move(unit));
841 if (batch_size >= batch_limit) {
842 reloc_queue.Add(std::move(batch));
843 DCHECK(batch.empty());
844 batch_size = 0;
845 job_handle->NotifyConcurrencyIncrease();
846 }
847 }
848
849 // We should have read the expected amount of code now, and should have fully
850 // utilized the allocated code space.
853
854 if (!batch.empty()) {
855 reloc_queue.Add(std::move(batch));
856 job_handle->NotifyConcurrencyIncrease();
857 }
858
859 // Wait for all tasks to finish, while participating in their work.
860 job_handle->Join();
861
862 ReadTieringBudget(reader);
863 return reader->current_size() == 0;
864}
865
868 reader->Read<WasmDetectedFeatures::StorageType>());
869 // Ignore the return value of UpdateDetectedFeatures; all features will be
870 // published after deserialization anyway.
872 detected_features));
873
874 remaining_code_size_ = reader->Read<size_t>();
875
876 all_functions_validated_ = reader->Read<bool>();
877
878 auto compile_imports_flags =
880 uint32_t constants_module_size = reader->Read<uint32_t>();
881 base::Vector<const char> constants_module_data =
882 reader->ReadVector<char>(constants_module_size);
884 constants_module_data);
885
886 uint32_t imported = native_module_->module()->num_imported_functions;
887 if (imported > 0) {
888 base::Vector<const WellKnownImport> well_known_imports =
889 reader->ReadVector<WellKnownImport>(imported);
890 native_module_->module()->type_feedback.well_known_imports.Initialize(
891 well_known_imports);
892 }
893}
894
896 Reader* reader) {
897 uint8_t code_kind = reader->Read<uint8_t>();
898 if (code_kind == kLazyFunction) {
899 lazy_functions_.push_back(fn_index);
900 return {};
901 }
902 if (code_kind == kEagerFunction) {
903 eager_functions_.push_back(fn_index);
904 return {};
905 }
906
907 int constant_pool_offset = reader->Read<int>();
908 int safepoint_table_offset = reader->Read<int>();
909 int handler_table_offset = reader->Read<int>();
910 int code_comment_offset = reader->Read<int>();
911 int unpadded_binary_size = reader->Read<int>();
912 int stack_slot_count = reader->Read<int>();
913 int ool_spill_count = reader->Read<int>();
914 uint32_t tagged_parameter_slots = reader->Read<uint32_t>();
915 int code_size = reader->Read<int>();
916 int reloc_size = reader->Read<int>();
917 int source_position_size = reader->Read<int>();
918 int inlining_position_size = reader->Read<int>();
919 int deopt_data_size = reader->Read<int>();
920 // TODO(mliedtke): protected_instructions_data is the first part of the
921 // meta_data_ array. Ideally the sizes would be in the same order...
922 int protected_instructions_size = reader->Read<int>();
923 WasmCode::Kind kind = reader->Read<WasmCode::Kind>();
924 ExecutionTier tier = reader->Read<ExecutionTier>();
925
926 DCHECK(IsAligned(code_size, kCodeAlignment));
928 if (current_code_space_.size() < static_cast<size_t>(code_size)) {
929 // Allocate the next code space. Don't allocate more than 90% of
930 // {kMaxCodeSpaceSize}, to leave some space for jump tables.
931 size_t max_reservation = RoundUp<kCodeAlignment>(
932 v8_flags.wasm_max_code_space_size_mb * MB * 9 / 10);
933 size_t code_space_size = std::min(max_reservation, remaining_code_size_);
936 DCHECK_EQ(current_code_space_.size(), code_space_size);
938 }
939
941 unit.src_code_buffer = reader->ReadVector<uint8_t>(code_size);
942 auto reloc_info = reader->ReadVector<uint8_t>(reloc_size);
943 auto source_pos = reader->ReadVector<uint8_t>(source_position_size);
944 auto inlining_pos = reader->ReadVector<uint8_t>(inlining_position_size);
945 auto deopt_data = reader->ReadVector<uint8_t>(deopt_data_size);
946 auto protected_instructions =
947 reader->ReadVector<uint8_t>(protected_instructions_size);
948
949 base::Vector<uint8_t> instructions =
950 current_code_space_.SubVector(0, code_size);
951 current_code_space_ += code_size;
952 remaining_code_size_ -= code_size;
953
955 fn_index, instructions, stack_slot_count, ool_spill_count,
956 tagged_parameter_slots, safepoint_table_offset, handler_table_offset,
957 constant_pool_offset, code_comment_offset, unpadded_binary_size,
958 protected_instructions, reloc_info, source_pos, inlining_pos, deopt_data,
959 kind, tier);
960 unit.jump_tables = current_jump_tables_;
961 return unit;
962}
963
965 const DeserializationUnit& unit) {
967 reinterpret_cast<Address>(unit.code->instructions().begin()),
968 unit.code->instructions().size(),
970
971 jit_allocation.CopyCode(0, unit.src_code_buffer.begin(),
972 unit.src_code_buffer.size());
973
974 // Relocate the code.
982 for (WritableRelocIterator iter(jit_allocation, unit.code->instructions(),
983 unit.code->reloc_info(),
984 unit.code->constant_pool(), kMask);
985 !iter.done(); iter.next()) {
986 RelocInfo::Mode mode = iter.rinfo()->rmode();
987 switch (mode) {
989 uint32_t tag = GetWasmCalleeTag(iter.rinfo());
990 Address target =
992 iter.rinfo()->set_wasm_call_address(target);
993 break;
994 }
996 uint32_t tag = GetWasmCalleeTag(iter.rinfo());
998 static_cast<Builtin>(tag), unit.jump_tables);
999 iter.rinfo()->set_wasm_stub_call_address(target);
1000 break;
1001 }
1003 // This is intentional: in serialized code, we patched embedded
1004 // canonical signature IDs with their module-specific equivalents,
1005 // so although the accessor is called "wasm_canonical_sig_id()", what
1006 // we get back is actually a module-specific signature ID, which we
1007 // now need to translate back to a canonical ID.
1008 ModuleTypeIndex module_local_sig_id{
1009 iter.rinfo()->wasm_canonical_sig_id()};
1010 CanonicalTypeIndex canonical_sig_id =
1011 native_module_->module()->canonical_sig_id(module_local_sig_id);
1012 iter.rinfo()->set_wasm_canonical_sig_id(canonical_sig_id.index);
1013 } break;
1015 Address function_index =
1016 iter.rinfo()->wasm_code_pointer_table_entry().value();
1018 base::checked_cast<uint32_t>(function_index));
1019 iter.rinfo()->set_wasm_code_pointer_table_entry(target,
1021 } break;
1023 uint32_t tag = GetWasmCalleeTag(iter.rinfo());
1024 Address address = ExternalReferenceList::Get().address_from_tag(tag);
1025 iter.rinfo()->set_target_external_reference(address, SKIP_ICACHE_FLUSH);
1026 break;
1027 }
1030 Address offset = iter.rinfo()->target_internal_reference();
1031 Address target = unit.code->instruction_start() + offset;
1033 iter.rinfo()->pc(), target, jit_allocation, mode);
1034 break;
1035 }
1036 default:
1037 UNREACHABLE();
1038 }
1039 }
1040
1041 // Finally, flush the icache for that code.
1042 FlushInstructionCache(unit.code->instructions().begin(),
1043 unit.code->instructions().size());
1044}
1045
1047 size_t size_of_tiering_budget =
1048 native_module_->module()->num_declared_functions * sizeof(uint32_t);
1049 if (size_of_tiering_budget > reader->current_size()) {
1050 return;
1051 }
1052 base::Vector<const uint8_t> serialized_budget =
1053 reader->ReadVector<const uint8_t>(size_of_tiering_budget);
1054
1055 memcpy(native_module_->tiering_budget_array(), serialized_budget.begin(),
1056 size_of_tiering_budget);
1057}
1058
1059void NativeModuleDeserializer::Publish(std::vector<DeserializationUnit> batch) {
1060 DCHECK(!batch.empty());
1061 std::vector<UnpublishedWasmCode> codes;
1062 codes.reserve(batch.size());
1063 for (auto& unit : batch) {
1064 // We serialized the code assumptions for well-known imports (see
1065 // {WasmSerializer::import_statuses_}, so when publishing the deserialized
1066 // code here we do not need to pass any assumptions.
1067 codes.emplace_back(UnpublishedWasmCode{
1068 std::move(unit).code, std::unique_ptr<AssumptionsJournal>{
1070 }
1071 auto published_codes = native_module_->PublishCode(base::VectorOf(codes));
1072 for (auto* wasm_code : published_codes) {
1073 wasm_code->MaybePrint();
1074 wasm_code->Validate();
1075 }
1076}
1077
1079 WasmEnabledFeatures enabled_features) {
1080 if (header.size() < WasmSerializer::kHeaderSize) return false;
1081 uint8_t current_version[WasmSerializer::kHeaderSize];
1082 Writer writer({current_version, WasmSerializer::kHeaderSize});
1083 WriteHeader(&writer, enabled_features);
1084 return memcmp(header.begin(), current_version, WasmSerializer::kHeaderSize) ==
1085 0;
1086}
1087
1089 Isolate* isolate, base::Vector<const uint8_t> data,
1090 base::Vector<const uint8_t> wire_bytes_vec,
1091 const CompileTimeImports& compile_imports,
1092 base::Vector<const char> source_url) {
1093 WasmEnabledFeatures enabled_features =
1095 if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) return {};
1096 if (!IsSupportedVersion(data, enabled_features)) return {};
1097
1098 // Make the copy of the wire bytes early, so we use the same memory for
1099 // decoding, lookup in the native module cache, and insertion into the cache.
1100 base::OwnedVector<const uint8_t> owned_wire_bytes =
1101 base::OwnedCopyOf(wire_bytes_vec);
1102
1103 WasmDetectedFeatures detected_features;
1104 ModuleResult decode_result = DecodeWasmModule(
1105 enabled_features, owned_wire_bytes.as_vector(), false,
1106 i::wasm::kWasmOrigin, isolate->counters(), isolate->metrics_recorder(),
1107 isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
1108 DecodingMethod::kDeserialize, &detected_features);
1109 if (decode_result.failed()) return {};
1110 std::shared_ptr<WasmModule> module = std::move(decode_result).value();
1111 CHECK_NOT_NULL(module);
1112
1113 WasmEngine* wasm_engine = GetWasmEngine();
1114 auto shared_native_module = wasm_engine->MaybeGetNativeModule(
1115 module->origin, owned_wire_bytes.as_vector(), compile_imports, isolate);
1116 if (shared_native_module == nullptr) {
1117 size_t code_size_estimate =
1119 shared_native_module = wasm_engine->NewNativeModule(
1120 isolate, enabled_features, detected_features, compile_imports,
1121 std::move(module), code_size_estimate);
1122 // We have to assign a compilation ID here, as it is required for a
1123 // potential re-compilation, e.g. triggered by
1124 // {EnterDebuggingForIsolate}. The value is -2 so that it is different
1125 // than the compilation ID of actual compilations, and also different than
1126 // the sentinel value of the CompilationState.
1127 shared_native_module->compilation_state()->set_compilation_id(-2);
1128 shared_native_module->SetWireBytes(std::move(owned_wire_bytes));
1129
1130 NativeModuleDeserializer deserializer(shared_native_module.get());
1131 Reader reader(data + WasmSerializer::kHeaderSize);
1132 bool error = !deserializer.Read(&reader);
1133 if (error) {
1134 wasm_engine->UpdateNativeModuleCache(
1135 error, std::move(shared_native_module), isolate);
1136 return {};
1137 }
1138 shared_native_module->compilation_state()->InitializeAfterDeserialization(
1139 deserializer.lazy_functions(), deserializer.eager_functions());
1140 wasm_engine->UpdateNativeModuleCache(error, shared_native_module, isolate);
1141 // Now publish the full set of detected features (read during
1142 // deserialization, so potentially more than from DecodeWasmModule above).
1143 detected_features =
1144 shared_native_module->compilation_state()->detected_features();
1145 PublishDetectedFeatures(detected_features, isolate, true);
1146 }
1147
1148 DirectHandle<Script> script =
1149 wasm_engine->GetOrCreateScript(isolate, shared_native_module, source_url);
1150 DirectHandle<WasmModuleObject> module_object =
1151 WasmModuleObject::New(isolate, shared_native_module, script);
1152
1153 // Finish the Wasm script now and make it public to the debugger.
1154 isolate->debug()->OnAfterCompile(script);
1155
1156 // Log the code within the generated module for profiling.
1157 shared_native_module->LogWasmCodes(isolate, *script);
1158
1159 return module_object;
1160}
1161
1162} // namespace v8::internal::wasm
int pos_
#define T
Builtins::Kind kind
Definition builtins.cc:40
virtual void NotifyConcurrencyIncrease()=0
virtual bool ShouldYield()=0
std::unique_ptr< JobHandle > CreateJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
static constexpr EnumSet FromIntegral(int bits)
Definition enum-set.h:91
constexpr T ToIntegral() const
Definition enum-set.h:56
Vector< T > as_vector() const
Definition vector.h:276
Vector< T > SubVector(size_t from, size_t to) const
Definition vector.h:41
constexpr size_t size() const
Definition vector.h:70
constexpr T * begin() const
Definition vector.h:96
static Vector< T > cast(Vector< S > input)
Definition vector.h:157
static void deserialization_set_target_internal_reference_at(Address pc, Address target, WritableJitAllocation &jit_allocation, RelocInfo::Mode mode=RelocInfo::INTERNAL_REFERENCE)
static V8_INLINE void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I, int32_t offset, WritableJitAllocation *jit_allocation=nullptr)
static int BrachlongOffset(Instr auipc, Instr jalr)
static unsigned SupportedFeatures()
static uint32_t Hash()
Definition flags.cc:1196
V8_INLINE Address target_internal_reference()
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
V8_EXPORT_PRIVATE uint32_t wasm_canonical_sig_id() const
V8_INLINE WasmCodePointer wasm_code_pointer_table_entry() const
V8_INLINE Address target_external_reference()
Address wasm_call_address() const
Address wasm_stub_call_address() const
static constexpr uint32_t kMagicNumber
static WritableJitAllocation RegisterJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static uint32_t Hash()
Definition version.h:30
static V8_EXPORT_PRIVATE DirectHandle< WasmModuleObject > New(Isolate *isolate, std::shared_ptr< wasm::NativeModule > native_module, DirectHandle< Script > script)
static V8_INLINE WritableJitAllocation ForNonExecutableMemory(Address addr, size_t size, ThreadIsolation::JitAllocationType type)
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_WARN_UNUSED_RESULT WasmDetectedFeatures UpdateDetectedFeatures(WasmDetectedFeatures)
CompileTimeImportFlags flags() const
static CompileTimeImports FromSerialized(CompileTimeImportFlags::StorageType flags, base::Vector< const char > constants_module)
int compare(const CompileTimeImports &other) const
std::vector< DeserializationUnit > PopAll()
std::queue< std::vector< DeserializationUnit > > queue_
void Add(std::vector< DeserializationUnit > batch)
std::vector< DeserializationUnit > Pop()
DeserializeCodeTask(NativeModuleDeserializer *deserializer, DeserializationQueue *reloc_queue)
NativeModuleDeserializer *const deserializer_
void Run(JobDelegate *delegate) override
size_t GetMaxConcurrency(size_t) const override
DeserializationUnit ReadCode(int fn_index, Reader *reader)
NativeModuleDeserializer(const NativeModuleDeserializer &)=delete
void CopyAndRelocate(const DeserializationUnit &unit)
NativeModuleDeserializer & operator=(const NativeModuleDeserializer &)=delete
void Publish(std::vector< DeserializationUnit > batch)
uint32_t CanonicalSigIdToModuleLocalTypeId(uint32_t canonical_sig_id)
void WriteHeader(Writer *, size_t total_code_size)
NativeModuleSerializer(const NativeModule *, base::Vector< WasmCode *const >, base::Vector< WellKnownImport const >)
NativeModuleSerializer & operator=(const NativeModuleSerializer &)=delete
NativeModuleSerializer(const NativeModuleSerializer &)=delete
const base::Vector< WellKnownImport const > import_statuses_
std::unordered_map< uint32_t, uint32_t > canonical_sig_ids_to_module_local_ids_
const base::Vector< WasmCode *const > code_table_
void WriteCode(const WasmCode *, Writer *, const NativeModule::CallIndirectTargetMap &)
CompilationState * compilation_state() const
Address GetNearCallTargetForFunction(uint32_t func_index, const JumpTablesRef &) const
std::atomic< uint32_t > * tiering_budget_array() const
std::pair< std::vector< WasmCode * >, std::vector< WellKnownImport > > SnapshotCodeTable() const
const WasmModule * module() const
std::pair< base::Vector< uint8_t >, JumpTablesRef > AllocateForDeserializedCode(size_t total_code_size)
WasmCodePointer GetCodePointerHandle(int index) const
Address GetJumpTableEntryForBuiltin(Builtin builtin, const JumpTablesRef &) const
std::unique_ptr< WasmCode > AddDeserializedCode(int index, base::Vector< uint8_t > instructions, int stack_slots, int ool_spills, uint32_t tagged_parameter_slots, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, int unpadded_binary_size, base::Vector< const uint8_t > protected_instructions_data, base::Vector< const uint8_t > reloc_info, base::Vector< const uint8_t > source_position_table, base::Vector< const uint8_t > inlining_positions, base::Vector< const uint8_t > deopt_data, WasmCode::Kind kind, ExecutionTier tier)
WasmCode * PublishCode(UnpublishedWasmCode)
absl::flat_hash_map< WasmCodePointer, uint32_t > CallIndirectTargetMap
WasmEnabledFeatures enabled_features() const
const CompileTimeImports & compile_imports() const
static size_t EstimateNativeModuleCodeSize(const WasmModule *)
static V8_EXPORT_PRIVATE WasmEnabledFeatures FromIsolate(Isolate *)
std::shared_ptr< NativeModule > MaybeGetNativeModule(ModuleOrigin origin, base::Vector< const uint8_t > wire_bytes, const CompileTimeImports &compile_imports, Isolate *isolate)
DirectHandle< Script > GetOrCreateScript(Isolate *, const std::shared_ptr< NativeModule > &, base::Vector< const char > source_url)
std::shared_ptr< NativeModule > NewNativeModule(Isolate *isolate, WasmEnabledFeatures enabled_features, WasmDetectedFeatures detected_features, CompileTimeImports compile_imports, std::shared_ptr< const WasmModule > module, size_t code_size_estimate)
std::shared_ptr< NativeModule > UpdateNativeModuleCache(bool has_error, std::shared_ptr< NativeModule > native_module, Isolate *isolate)
std::vector< WasmCode * > code_table_
std::vector< WellKnownImport > import_statuses_
bool SerializeNativeModule(base::Vector< uint8_t > buffer) const
WasmSerializer(NativeModule *native_module)
uint8_t *const start_
Definition assembler.cc:131
const v8::base::TimeTicks end_
Definition sweeper.cc:54
int end
#define COUNT_EXTERNAL_REFERENCE(name, desc)
#define EXTERNAL_REFERENCE_LIST(V)
int32_t offset
std::optional< TNode< JSArray > > a
Instruction * instr
std::shared_ptr< NativeModule > native_module_
std::priority_queue< BigUnit > units[CompilationTier::kNumTiers]
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
OwnedVector< T > OwnedCopyOf(const T *data, size_t size)
Definition vector.h:383
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
ModuleResult DecodeWasmModule(WasmEnabledFeatures enabled_features, base::Vector< const uint8_t > wire_bytes, bool validate_functions, ModuleOrigin origin, Counters *counters, std::shared_ptr< metrics::Recorder > metrics_recorder, v8::metrics::Recorder::ContextId context_id, DecodingMethod decoding_method, WasmDetectedFeatures *detected_features)
MaybeDirectHandle< WasmModuleObject > DeserializeNativeModule(Isolate *isolate, base::Vector< const uint8_t > data, base::Vector< const uint8_t > wire_bytes_vec, const CompileTimeImports &compile_imports, base::Vector< const char > source_url)
WireBytesRef Get(const NameMap &map, uint32_t index)
WasmEngine * GetWasmEngine()
int declared_function_index(const WasmModule *module, int func_index)
void PublishDetectedFeatures(WasmDetectedFeatures detected_features, Isolate *isolate, bool is_initial_compilation)
bool IsWasmCodegenAllowed(Isolate *isolate, DirectHandle< NativeContext > context)
bool IsSupportedVersion(base::Vector< const uint8_t > header, WasmEnabledFeatures enabled_features)
static void WriteHeader(std::ostream &os, const char *compiler)
void FlushInstructionCache(void *start, size_t size)
constexpr intptr_t kCodeAlignment
Definition globals.h:964
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
constexpr uint8_t kInstrSize
constexpr uint32_t kMaxUInt32
Definition globals.h:387
#define FOR_EACH_INTRINSIC(F)
Definition runtime.h:884
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define V8_EXPORT_PRIVATE
Definition macros.h:460
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
base::Vector< const uint8_t > src_code_buffer
static constexpr AssumptionsJournal * kNoAssumptions
base::Vector< const WasmFunction > declared_functions() const
std::vector< CanonicalTypeIndex > isorecursive_canonical_type_ids
bool has_signature(ModuleTypeIndex index) const
bool function_was_validated(int func_index) const
#define EXT_REF_ADDR(name, desc)
uint32_t tags_ordered_by_address_[kNumExternalReferences]
static constexpr uint32_t kNumExternalReferences
static constexpr uint32_t kNumExternalReferencesList
static constexpr uint32_t kNumExternalReferencesIntrinsics
Address external_reference_by_tag_[kNumExternalReferences]
#define RUNTIME_ADDR(name,...)