v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-code-manager.h
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_WASM_CODE_MANAGER_H_
6#define V8_WASM_WASM_CODE_MANAGER_H_
7
8#if !V8_ENABLE_WEBASSEMBLY
9#error This header should only be included if WebAssembly is enabled.
10#endif // !V8_ENABLE_WEBASSEMBLY
11
12#include <atomic>
13#include <map>
14#include <memory>
15#include <set>
16#include <utility>
17#include <vector>
18
19#include "absl/container/flat_hash_map.h"
21#include "src/base/bit-field.h"
22#include "src/base/macros.h"
23#include "src/base/vector.h"
27#include "src/handles/handles.h"
35#include "src/wasm/wasm-tier.h"
36
37namespace v8 {
38class CFunctionInfo;
39namespace internal {
40
41class InstructionStream;
42class CodeDesc;
43class Isolate;
44
45namespace wasm {
46
47class AssumptionsJournal;
48class DebugInfo;
49class NamesProvider;
50class NativeModule;
51struct WasmCompilationResult;
52class WasmEngine;
53class WasmImportWrapperCache;
54struct WasmModule;
55enum class WellKnownImport : uint8_t;
56
57// Sorted, disjoint and non-overlapping memory regions. A region is of the
58// form [start, end). So there's no [start, end), [end, other_end),
59// because that should have been reduced to [start, other_end).
61 public:
64 : regions_({region}) {}
65
66 // Merge the parameter region into this object. The assumption is that the
67 // passed parameter is not intersecting this object - for example, it was
68 // obtained from a previous Allocate. Returns the merged region.
70
71 // Allocate a contiguous region of size {size}. Return an empty region on
72 // failure.
73 base::AddressRegion Allocate(size_t size);
74
75 // Allocate a contiguous region of size {size} within {region}. Return an
76 // empty region on failure.
77 base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
78
79 bool IsEmpty() const { return regions_.empty(); }
80
81 const auto& regions() const { return regions_; }
82
83 private:
84 std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
85};
86
89
91 public:
92 enum Kind {
96#if V8_ENABLE_DRUMBRAKE
97 kInterpreterEntry,
98#endif // V8_ENABLE_DRUMBRAKE
99 kJumpTable
100 };
101
102 static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode) {
103 switch (fp_mode) {
104 case SaveFPRegsMode::kIgnore:
105 return Builtin::kRecordWriteIgnoreFP;
106 case SaveFPRegsMode::kSave:
107 return Builtin::kRecordWriteSaveFP;
108 }
109 }
110
111#ifdef V8_IS_TSAN
112 static Builtin GetTSANStoreBuiltin(SaveFPRegsMode fp_mode, int size,
113 std::memory_order order) {
114 if (order == std::memory_order_relaxed) {
115 if (size == kInt8Size) {
116 return fp_mode == SaveFPRegsMode::kIgnore
117 ? Builtin::kTSANRelaxedStore8IgnoreFP
118 : Builtin::kTSANRelaxedStore8SaveFP;
119 } else if (size == kInt16Size) {
120 return fp_mode == SaveFPRegsMode::kIgnore
121 ? Builtin::kTSANRelaxedStore16IgnoreFP
122 : Builtin::kTSANRelaxedStore16SaveFP;
123 } else if (size == kInt32Size) {
124 return fp_mode == SaveFPRegsMode::kIgnore
125 ? Builtin::kTSANRelaxedStore32IgnoreFP
126 : Builtin::kTSANRelaxedStore32SaveFP;
127 } else {
128 CHECK_EQ(size, kInt64Size);
129 return fp_mode == SaveFPRegsMode::kIgnore
130 ? Builtin::kTSANRelaxedStore64IgnoreFP
131 : Builtin::kTSANRelaxedStore64SaveFP;
132 }
133 } else {
134 DCHECK_EQ(order, std::memory_order_seq_cst);
135 if (size == kInt8Size) {
136 return fp_mode == SaveFPRegsMode::kIgnore
137 ? Builtin::kTSANSeqCstStore8IgnoreFP
138 : Builtin::kTSANSeqCstStore8SaveFP;
139 } else if (size == kInt16Size) {
140 return fp_mode == SaveFPRegsMode::kIgnore
141 ? Builtin::kTSANSeqCstStore16IgnoreFP
142 : Builtin::kTSANSeqCstStore16SaveFP;
143 } else if (size == kInt32Size) {
144 return fp_mode == SaveFPRegsMode::kIgnore
145 ? Builtin::kTSANSeqCstStore32IgnoreFP
146 : Builtin::kTSANSeqCstStore32SaveFP;
147 } else {
148 CHECK_EQ(size, kInt64Size);
149 return fp_mode == SaveFPRegsMode::kIgnore
150 ? Builtin::kTSANSeqCstStore64IgnoreFP
151 : Builtin::kTSANSeqCstStore64SaveFP;
152 }
153 }
154 }
155
156 static Builtin GetTSANRelaxedLoadBuiltin(SaveFPRegsMode fp_mode, int size) {
157 if (size == kInt32Size) {
158 return fp_mode == SaveFPRegsMode::kIgnore
159 ? Builtin::kTSANRelaxedLoad32IgnoreFP
160 : Builtin::kTSANRelaxedLoad32SaveFP;
161 } else {
162 CHECK_EQ(size, kInt64Size);
163 return fp_mode == SaveFPRegsMode::kIgnore
164 ? Builtin::kTSANRelaxedLoad64IgnoreFP
165 : Builtin::kTSANRelaxedLoad64SaveFP;
166 }
167 }
168#endif // V8_IS_TSAN
169
171 return base::VectorOf(instructions_,
172 static_cast<size_t>(instructions_size_));
173 }
174 Address instruction_start() const {
175 return reinterpret_cast<Address>(instructions_);
176 }
177 size_t instructions_size() const {
178 return static_cast<size_t>(instructions_size_);
179 }
181 return {protected_instructions_data().end(),
182 static_cast<size_t>(reloc_info_size_)};
183 }
185 return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
186 }
188 return {source_positions().end(),
189 static_cast<size_t>(inlining_positions_size_)};
190 }
192 return {inlining_positions().end(), static_cast<size_t>(deopt_data_size_)};
193 }
194
195 int index() const { return index_; }
196 // Anonymous functions are functions that don't carry an index.
197 bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
198 Kind kind() const { return KindField::decode(flags_); }
200 ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
201 Address constant_pool() const;
202 Address handler_table() const;
203 int handler_table_size() const;
204 Address code_comments() const;
205 int code_comments_size() const;
206 int constant_pool_offset() const { return constant_pool_offset_; }
207 int safepoint_table_offset() const { return safepoint_table_offset_; }
209 int code_comments_offset() const { return code_comments_offset_; }
210 int unpadded_binary_size() const { return unpadded_binary_size_; }
211 int stack_slots() const { return stack_slots_; }
212 int ool_spills() const { return ool_spills_; }
213 uint64_t signature_hash() const { return signature_hash_; }
215 return tagged_parameter_slots_ >> 16;
216 }
217 uint16_t num_tagged_parameter_slots() const {
218 return tagged_parameter_slots_ & 0xFFFF;
219 }
221 return tagged_parameter_slots_;
222 }
223
224 bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
225
226 bool is_turbofan() const { return tier() == ExecutionTier::kTurbofan; }
227
228 bool contains(Address pc) const {
229 return reinterpret_cast<Address>(instructions_) <= pc &&
230 pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
231 }
232
233 // Only Liftoff code that was generated for debugging can be inspected
234 // (otherwise debug side table positions would not match up).
235 bool is_inspectable() const { return is_liftoff() && for_debugging(); }
236
238 return {meta_data_.get(),
239 static_cast<size_t>(protected_instructions_size_)};
240 }
241
245 protected_instructions_data());
246 }
247
248 bool IsProtectedInstruction(Address pc);
249
250 void Validate() const;
251 void Print(const char* name = nullptr) const;
252 void MaybePrint() const;
253 void Disassemble(const char* name, std::ostream& os,
254 Address current_pc = kNullAddress) const;
255
256 static bool ShouldBeLogged(Isolate* isolate);
257 void LogCode(Isolate* isolate, const char* source_url, int script_id) const;
258
259 WasmCode(const WasmCode&) = delete;
260 WasmCode& operator=(const WasmCode&) = delete;
261 ~WasmCode();
262
263 void IncRef() {
264 [[maybe_unused]] int old_val =
265 ref_count_.fetch_add(1, std::memory_order_acq_rel);
266 DCHECK_LE(1, old_val);
267 DCHECK_GT(kMaxInt, old_val);
268 }
269
270 // Decrement the ref count. Returns whether this code becomes dead and needs
271 // to be freed.
273 int old_count = ref_count_.load(std::memory_order_acquire);
274 while (true) {
275 DCHECK_LE(1, old_count);
276 if (V8_UNLIKELY(old_count == 1)) {
277 if (is_dying()) {
278 // The code was already on the path to deletion, only temporary
279 // C++ references to it are left. Decrement the refcount, and
280 // return true if it drops to zero.
281 return DecRefOnDeadCode();
282 }
283 // Otherwise, the code enters the path to destruction now.
284 mark_as_dying();
285 old_count = ref_count_.load(std::memory_order_acquire);
286 if (V8_LIKELY(old_count == 1)) {
287 // No other thread got in the way. Commit to the decision.
288 DecRefOnPotentiallyDeadCode();
289 return false;
290 }
291 // Another thread managed to increment the refcount again, just
292 // before we set the "dying" bit. So undo that, and resume the
293 // loop to evaluate again what needs to be done.
294 undo_mark_as_dying();
295 }
296 DCHECK_LT(1, old_count);
297 if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
298 std::memory_order_acq_rel)) {
299 return false;
300 }
301 }
302 }
303
304 // Decrement the ref count on code that is known to be in use (i.e. the ref
305 // count cannot drop to zero here).
307 [[maybe_unused]] int old_count =
308 ref_count_.fetch_sub(1, std::memory_order_acq_rel);
309 DCHECK_LE(2, old_count);
310 }
311
312 // Decrement the ref count on code that is known to be dead, even though there
313 // might still be C++ references. Returns whether this drops the last
314 // reference and the code needs to be freed.
316 return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
317 }
318
319 // Decrement the ref count on a set of {WasmCode} objects, potentially
320 // belonging to different {NativeModule}s. Dead code will be deleted.
321 static void DecrementRefCount(base::Vector<WasmCode* const>);
322
323 // Called by the WasmEngine when it shuts down for code it thinks is
324 // probably dead (i.e. is in the "potentially_dead_code_" set). Wrapped
325 // in a method only because {ref_count_} is private.
327 DCHECK_EQ(1, ref_count_.load(std::memory_order_acquire));
328 }
329
330 // Returns the last source position before {offset}.
331 SourcePosition GetSourcePositionBefore(int code_offset);
332 int GetSourceOffsetBefore(int code_offset);
333
334 std::tuple<int, bool, SourcePosition> GetInliningPosition(
335 int inlining_id) const;
336
337 // Returns whether this code was generated for debugging. If this returns
338 // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
339 // bailed out.
341 return ForDebuggingField::decode(flags_);
342 }
343
344 bool is_dying() const { return dying_.load(std::memory_order_acquire); }
345
346 // Returns {true} for Liftoff code that sets up a feedback vector slot in its
347 // stack frame.
348 // TODO(jkummerow): This can be dropped when we ship Wasm inlining.
350 return FrameHasFeedbackSlotField::decode(flags_);
351 }
352
353 enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
354
355 size_t EstimateCurrentMemoryConsumption() const;
356
357 // Tries to get a reasonable name. Lazily looks up the name section, and falls
358 // back to the function index. Return value is guaranteed to not be empty.
359 std::string DebugName() const;
360
361 private:
362 friend class NativeModule;
364
365 WasmCode(NativeModule* native_module, int index,
366 base::Vector<uint8_t> instructions, int stack_slots, int ool_spills,
367 uint32_t tagged_parameter_slots, int safepoint_table_offset,
368 int handler_table_offset, int constant_pool_offset,
369 int code_comments_offset, int unpadded_binary_size,
370 base::Vector<const uint8_t> protected_instructions_data,
372 base::Vector<const uint8_t> source_position_table,
373 base::Vector<const uint8_t> inlining_positions,
375 ExecutionTier tier, ForDebugging for_debugging,
376 uint64_t signature_hash, bool frame_has_feedback_slot = false)
377 : native_module_(native_module),
378 instructions_(instructions.begin()),
379 signature_hash_(signature_hash),
380 meta_data_(ConcatenateBytes({protected_instructions_data, reloc_info,
381 source_position_table, inlining_positions,
382 deopt_data})),
383 instructions_size_(instructions.length()),
384 reloc_info_size_(reloc_info.length()),
385 source_positions_size_(source_position_table.length()),
386 inlining_positions_size_(inlining_positions.length()),
387 deopt_data_size_(deopt_data.length()),
388 protected_instructions_size_(protected_instructions_data.length()),
389 index_(index),
390 constant_pool_offset_(constant_pool_offset),
391 stack_slots_(stack_slots),
392 ool_spills_(ool_spills),
393 tagged_parameter_slots_(tagged_parameter_slots),
394 safepoint_table_offset_(safepoint_table_offset),
395 handler_table_offset_(handler_table_offset),
396 code_comments_offset_(code_comments_offset),
397 unpadded_binary_size_(unpadded_binary_size),
398 flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
399 ForDebuggingField::encode(for_debugging) |
400 FrameHasFeedbackSlotField::encode(frame_has_feedback_slot)) {
401 DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
402 DCHECK_LE(handler_table_offset, unpadded_binary_size);
403 DCHECK_LE(code_comments_offset, unpadded_binary_size);
404 DCHECK_LE(constant_pool_offset, unpadded_binary_size);
405 }
406
407 std::unique_ptr<const uint8_t[]> ConcatenateBytes(
408 std::initializer_list<base::Vector<const uint8_t>>);
409
410 // Code objects that have been registered with the global trap
411 // handler within this process, will have a {trap_handler_index} associated
412 // with them.
413 int trap_handler_index() const {
414 CHECK(has_trap_handler_index());
415 return trap_handler_index_;
416 }
417 void set_trap_handler_index(int value) {
418 CHECK(!has_trap_handler_index());
419 trap_handler_index_ = value;
420 }
421 bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
422
423 // Register protected instruction information with the trap handler. Sets
424 // trap_handler_index.
425 void RegisterTrapHandlerData();
426
427 // Slow path for {DecRef}: The code becomes potentially dead. Schedule it
428 // for consideration in the next Code GC cycle.
429 V8_NOINLINE void DecRefOnPotentiallyDeadCode();
430
431 void mark_as_dying() { dying_.store(true, std::memory_order_release); }
432 // This is rarely necessary to mitigate a race condition. See the comment
433 // at its (only) call site.
434 void undo_mark_as_dying() { dying_.store(false, std::memory_order_release); }
435
436 NativeModule* const native_module_ = nullptr;
437 uint8_t* const instructions_;
438 const uint64_t signature_hash_;
439 // {meta_data_} contains several byte vectors concatenated into one:
440 // - protected instructions data of size {protected_instructions_size_}
441 // - relocation info of size {reloc_info_size_}
442 // - source positions of size {source_positions_size_}
443 // - deopt data of size {deopt_data_size_}
444 // Note that the protected instructions come first to ensure alignment.
445 std::unique_ptr<const uint8_t[]> meta_data_;
452 const int index_; // The wasm function-index within the module.
454 const int stack_slots_;
455 const int ool_spills_;
456 // Number and position of tagged parameters passed to this function via the
457 // stack, packed into a single uint32. These values are used by the stack
458 // walker (e.g. GC) to find references.
460 // We care about safepoint data for wasm-to-js functions, since there may be
461 // stack/register tagged values for large number conversions.
466 int trap_handler_index_ = -1;
467
468 const uint8_t flags_; // Bit field, see below.
469 // Bits encoded in {flags_}:
470#if !V8_ENABLE_DRUMBRAKE
472#else // !V8_ENABLE_DRUMBRAKE
473 // We have an additional kind: Wasm interpreter.
475#endif // !V8_ENABLE_DRUMBRAKE
479
480 // Will be set to {true} the first time this code object is considered
481 // "potentially dead" (to be confirmed by the next Wasm Code GC cycle).
482 std::atomic<bool> dying_{false};
483
484 // WasmCode is ref counted. Counters are held by:
485 // 1) The jump table / code table.
486 // 2) {WasmCodeRefScope}s.
487 // 3) The set of potentially dead code in the {WasmEngine}.
488 // If a decrement of (1) would drop the ref count to 0, that code becomes a
489 // candidate for garbage collection. At that point, we add a ref count for (3)
490 // *before* decrementing the counter to ensure the code stays alive as long as
491 // it's being used. Once the ref count drops to zero (i.e. after being removed
492 // from (3) and all (2)), the code object is deleted and the memory for the
493 // machine code is freed.
494 std::atomic<int> ref_count_{1};
495};
496
497WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
498
499// Return a textual description of the kind.
501
502// Unpublished code is still tied to the assumptions made when generating this
503// code; those will be checked right before publishing.
505 std::unique_ptr<WasmCode> code;
506 std::unique_ptr<AssumptionsJournal> assumptions;
507
508 static constexpr AssumptionsJournal* kNoAssumptions = nullptr;
509};
510
511// Manages the code reservations and allocations of a single {NativeModule}.
513 public:
514 explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
516
517 // Call before use, after the {NativeModule} is set up completely.
518 void Init(VirtualMemory code_space);
519
520 // Call on newly allocated code ranges, to write platform-specific headers.
521 void InitializeCodeRange(NativeModule* native_module,
522 base::AddressRegion region);
523
524 size_t committed_code_space() const {
525 return committed_code_space_.load(std::memory_order_acquire);
526 }
527 size_t generated_code_size() const {
528 return generated_code_size_.load(std::memory_order_acquire);
529 }
530 size_t freed_code_size() const {
531 return freed_code_size_.load(std::memory_order_acquire);
532 }
533
534 // Allocate code space. Returns a valid buffer or fails with OOM (crash).
535 // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
537 // Same, but for wrappers (which are shared across NativeModules).
539
540 // Allocate code space within a specific region. Returns a valid buffer or
541 // fails with OOM (crash).
542 // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
545
546 // Free memory pages of all given code objects. Used for wasm code GC.
547 // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
549
550 // Retrieve the number of separately reserved code spaces.
551 // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
552 size_t GetNumCodeSpaces() const;
553
554 Counters* counters() const { return async_counters_.get(); }
555
556 private:
558 // These fields are protected by the mutex in {NativeModule}.
559
560 // Code space that was reserved and is available for allocations
561 // (subset of {owned_code_space_}).
563 // Code space that was allocated before but is dead now. Full
564 // pages within this region are discarded. It's still a subset of
565 // {owned_code_space_}.
567 std::vector<VirtualMemory> owned_code_space_;
568
569 // End of fields protected by {mutex_}.
571
572 std::atomic<size_t> committed_code_space_{0};
573 std::atomic<size_t> generated_code_size_{0};
574 std::atomic<size_t> freed_code_size_{0};
575
576 std::shared_ptr<Counters> async_counters_;
577};
578
580 public:
581 static constexpr ExternalPointerTag kManagedTag = kWasmNativeModuleTag;
582
583#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 || \
584 V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64 || \
585 V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64
586 static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
587#else
588 static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
589#endif
590
591 NativeModule(const NativeModule&) = delete;
594
595 // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
596 // code below, i.e. it can be called concurrently from background threads.
597 // The returned code still needs to be published via {PublishCode}.
598 std::unique_ptr<WasmCode> AddCode(
599 int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
600 uint32_t tagged_parameter_slots,
601 base::Vector<const uint8_t> protected_instructions,
602 base::Vector<const uint8_t> source_position_table,
603 base::Vector<const uint8_t> inlining_positions,
605 ExecutionTier tier, ForDebugging for_debugging);
606
607 // {PublishCode} makes the code available to the system by entering it into
608 // the code table and patching the jump table. It returns a raw pointer to the
609 // {WasmCode} object in the argument. Ownership is transferred to the
610 // {NativeModule}. Returns {nullptr} if the {AssumptionsJournal} in the
611 // argument is non-nullptr and contains invalid assumptions.
612 WasmCode* PublishCode(UnpublishedWasmCode);
613 std::vector<WasmCode*> PublishCode(base::Vector<UnpublishedWasmCode>);
614
615 // Clears outdated code as necessary when a new instantiation's imports
616 // conflict with previously seen well-known imports.
617 void UpdateWellKnownImports(base::Vector<WellKnownImport> entries);
618
619 // ReinstallDebugCode does a subset of PublishCode: It installs the code in
620 // the code table and patches the jump table. The given code must be debug
621 // code (with breakpoints) and must be owned by this {NativeModule} already.
622 // This method is used to re-instantiate code that was removed from the code
623 // table and jump table via another {PublishCode}.
624 void ReinstallDebugCode(WasmCode*);
625
627 Address jump_table_start = kNullAddress;
628 Address far_jump_table_start = kNullAddress;
629
630 bool is_valid() const { return far_jump_table_start != kNullAddress; }
631 };
632
633 std::pair<base::Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
634 size_t total_code_size);
635
636 std::unique_ptr<WasmCode> AddDeserializedCode(
637 int index, base::Vector<uint8_t> instructions, int stack_slots,
638 int ool_spills, uint32_t tagged_parameter_slots,
639 int safepoint_table_offset, int handler_table_offset,
640 int constant_pool_offset, int code_comments_offset,
641 int unpadded_binary_size,
642 base::Vector<const uint8_t> protected_instructions_data,
644 base::Vector<const uint8_t> source_position_table,
645 base::Vector<const uint8_t> inlining_positions,
647 ExecutionTier tier);
648
649 // Adds anonymous code for testing purposes.
650 WasmCode* AddCodeForTesting(DirectHandle<Code> code, uint64_t signature_hash);
651
652 // Allocates and initializes the {lazy_compile_table_} and initializes the
653 // first jump table with jumps to the {lazy_compile_table_}.
654 void InitializeJumpTableForLazyCompilation(uint32_t num_wasm_functions);
655
656 // Initialize/Free the code pointer table handles for declared functions.
657 void InitializeCodePointerTableHandles(uint32_t num_wasm_functions);
658 void FreeCodePointerTableHandles();
659
660 // Use {UseLazyStubLocked} to setup lazy compilation per function. It will use
661 // the existing {WasmCode::kWasmCompileLazy} runtime stub and populate the
662 // jump table with trampolines accordingly.
663 void UseLazyStubLocked(uint32_t func_index);
664
665 // Creates a snapshot of the current state of the code table, along with the
666 // current import statuses that these code objects depend on. This is useful
667 // to get a consistent view of the table (e.g. used by the serializer).
668 std::pair<std::vector<WasmCode*>, std::vector<WellKnownImport>>
669 SnapshotCodeTable() const;
670 // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
671 // {owned_code_}.
672 std::vector<WasmCode*> SnapshotAllOwnedCode() const;
673
674 WasmCode* GetCode(uint32_t index) const;
675 bool HasCode(uint32_t index) const;
676 bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
677
678 void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
679 WasmModuleSourceMap* GetWasmSourceMap() const;
680
681 Address jump_table_start() const {
682 return main_jump_table_ ? main_jump_table_->instruction_start()
683 : kNullAddress;
684 }
685
686 // Get the call target in the jump table previously looked up via
687 // {FindJumpTablesForRegionLocked}.
688 Address GetNearCallTargetForFunction(uint32_t func_index,
689 const JumpTablesRef&) const;
690
691 // Get the slot offset in the far jump table that jumps to the given builtin.
692 Address GetJumpTableEntryForBuiltin(Builtin builtin,
693 const JumpTablesRef&) const;
694
695 // Reverse lookup from a given call target (which must be a jump table slot)
696 // to a function index.
697 uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
698
699 using CallIndirectTargetMap = absl::flat_hash_map<WasmCodePointer, uint32_t>;
700 CallIndirectTargetMap CreateIndirectCallTargetToFunctionIndexMap() const;
701
702 // Log all owned code in the given isolate, using the given script as the
703 // containing script. Use this after transferring the module to a new isolate
704 // or when enabling a component that needs all code to be logged (profiler).
705 void LogWasmCodes(Isolate*, Tagged<Script>);
706
708 return compilation_state_.get();
709 }
710
711 uint32_t num_functions() const {
712 return module_->num_declared_functions + module_->num_imported_functions;
713 }
714 uint32_t num_imported_functions() const {
715 return module_->num_imported_functions;
716 }
717 uint32_t num_declared_functions() const {
718 return module_->num_declared_functions;
719 }
720 void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
721 bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
723 return std::atomic_load(&wire_bytes_)->as_vector();
724 }
725 const WasmModule* module() const { return module_.get(); }
726 std::shared_ptr<const WasmModule> shared_module() const { return module_; }
727 size_t committed_code_space() const {
728 return code_allocator_.committed_code_space();
729 }
730 size_t generated_code_size() const {
731 return code_allocator_.generated_code_size();
732 }
733 size_t liftoff_bailout_count() const {
734 return liftoff_bailout_count_.load(std::memory_order_relaxed);
735 }
736 size_t liftoff_code_size() const {
737 return liftoff_code_size_.load(std::memory_order_relaxed);
738 }
739 size_t turbofan_code_size() const {
740 return turbofan_code_size_.load(std::memory_order_relaxed);
741 }
742
743 void AddLazyCompilationTimeSample(int64_t sample);
744
746 return num_lazy_compilations_.load(std::memory_order_relaxed);
747 }
748
750 return sum_lazy_compilation_time_in_micro_sec_.load(
751 std::memory_order_relaxed) /
752 1000;
753 }
754
756 return max_lazy_compilation_time_in_micro_sec_.load(
757 std::memory_order_relaxed) /
758 1000;
759 }
760
761 // To avoid double-reporting, only the first instantiation should report lazy
762 // compilation performance metrics.
764 return should_metrics_be_reported_.exchange(false,
765 std::memory_order_relaxed);
766 }
767
768 // Similar to above, scheduling a repeated task to write out PGO data is only
769 // needed once per module, not per instantiation.
771 return should_pgo_data_be_written_.exchange(false,
772 std::memory_order_relaxed);
773 }
774
775 bool HasWireBytes() const {
776 auto wire_bytes = std::atomic_load(&wire_bytes_);
777 return wire_bytes && !wire_bytes->empty();
778 }
779 void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
780
782 liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
783 }
784
785 WasmCode* Lookup(Address) const;
786
788 const CompileTimeImports& compile_imports() const { return compile_imports_; }
789
790 // Returns the builtin that corresponds to the given address (which
791 // must be a far jump table slot). Returns {kNoBuiltinId} on failure.
792 Builtin GetBuiltinInJumptableSlot(Address target) const;
793
794 // Sample the current code size of this modules to the given counters.
795 void SampleCodeSize(Counters*) const;
796
798 AddCompiledCode(WasmCompilationResult&);
799 V8_WARN_UNUSED_RESULT std::vector<UnpublishedWasmCode> AddCompiledCode(
801
802 // Set a new debugging state, but don't trigger any recompilation;
803 // recompilation happens lazily.
804 void SetDebugState(DebugState);
805
806 // Check whether this modules is in debug state.
808 base::RecursiveMutexGuard lock(&allocation_mutex_);
809 return debug_state_;
810 }
811
812 enum class RemoveFilter {
813 kRemoveDebugCode,
814 kRemoveNonDebugCode,
815 kRemoveLiftoffCode,
816 kRemoveTurbofanCode,
817 kRemoveAllCode,
818 };
819 // Remove all compiled code based on the `filter` from the {NativeModule},
820 // replace it with {CompileLazy} builtins and return the sizes of the removed
821 // (executable) code and the removed metadata.
822 std::pair<size_t, size_t> RemoveCompiledCode(RemoveFilter filter);
823
824 // Returns the code size of all Liftoff compiled functions.
825 size_t SumLiftoffCodeSizeForTesting() const;
826
827 // Free a set of functions of this module. Uncommits whole pages if possible.
828 // The given vector must be ordered by the instruction start address, and all
829 // {WasmCode} objects must not be used any more.
830 // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
831 // its accounting.
832 void FreeCode(base::Vector<WasmCode* const>);
833
834 // Retrieve the number of separately reserved code spaces for this module.
835 size_t GetNumberOfCodeSpacesForTesting() const;
836
837 // Check whether there is DebugInfo for this NativeModule.
838 bool HasDebugInfo() const;
839
840 // Get or create the debug info for this NativeModule.
841 DebugInfo* GetDebugInfo();
842
843 // Get or create the NamesProvider. Requires {HasWireBytes()}.
844 NamesProvider* GetNamesProvider();
845
846 std::atomic<uint32_t>* tiering_budget_array() const {
847 return tiering_budgets_.get();
848 }
849
850 Counters* counters() const { return code_allocator_.counters(); }
851
852 // Returns an approximation of current off-heap memory used by this module.
853 size_t EstimateCurrentMemoryConsumption() const;
854 // Print the current memory consumption estimate to standard output.
855 void PrintCurrentMemoryConsumptionEstimate() const;
856
857 bool log_code() const { return log_code_.load(std::memory_order_relaxed); }
858
859 void EnableCodeLogging() { log_code_.store(true, std::memory_order_relaxed); }
860
862 log_code_.store(false, std::memory_order_relaxed);
863 }
864
865 enum class JumpTableType {
866 kJumpTable,
867 kFarJumpTable,
868 kLazyCompileTable,
869 };
870
871 // This function tries to set the fast API call target of function import
872 // `index`. If the call target has been set before with a different value,
873 // then this function returns false, and this import will be marked as not
874 // suitable for wellknown imports, i.e. all existing compiled code of the
875 // module gets flushed, and future calls to this import will not use fast API
876 // calls.
877 bool TrySetFastApiCallTarget(int func_index, Address target) {
878 Address old_val =
879 fast_api_targets_[func_index].load(std::memory_order_relaxed);
880 if (old_val == target) {
881 return true;
882 }
883 if (old_val != kNullAddress) {
884 // If already a different target is stored, then there are conflicting
885 // targets and fast api calls are not possible. In that case the import
886 // will be marked as not suitable for wellknown imports, and the
887 // `fast_api_target` of this import will never be used anymore in the
888 // future.
889 return false;
890 }
891 if (fast_api_targets_[func_index].compare_exchange_strong(
892 old_val, target, std::memory_order_relaxed)) {
893 return true;
894 }
895 // If a concurrent call to `TrySetFastAPICallTarget` set the call target to
896 // the same value as this call, we consider also this call successful.
897 return old_val == target;
898 }
899
900 std::atomic<Address>* fast_api_targets() const {
901 return fast_api_targets_.get();
902 }
903
904 // Stores the signature of the C++ call target of an imported web API
905 // function. The signature got copied from the `FunctionTemplateInfo` object
906 // of the web API function into the `signature_zone` of the `WasmModule` so
907 // that it stays alive as long as the `WasmModule` exists.
908 void set_fast_api_signature(int func_index, const MachineSignature* sig) {
909 fast_api_signatures_[func_index] = sig;
910 }
911
912 bool has_fast_api_signature(int index) {
913 return fast_api_signatures_[index] != nullptr;
914 }
915
916 std::atomic<const MachineSignature*>* fast_api_signatures() const {
917 return fast_api_signatures_.get();
918 }
919
920 WasmCodePointer GetCodePointerHandle(int index) const;
921
922 private:
923 friend class WasmCode;
924 friend class WasmCodeAllocator;
925 friend class WasmCodeManager;
927
933
934 // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
935 NativeModule(WasmEnabledFeatures enabled_features,
936 WasmDetectedFeatures detected_features,
937 CompileTimeImports compile_imports, VirtualMemory code_space,
938 std::shared_ptr<const WasmModule> module,
939 std::shared_ptr<Counters> async_counters,
940 std::shared_ptr<NativeModule>* shared_this);
941
942 std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
943 int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
944 uint32_t tagged_parameter_slots,
945 base::Vector<const uint8_t> protected_instructions_data,
946 base::Vector<const uint8_t> source_position_table,
947 base::Vector<const uint8_t> inlining_positions,
949 ExecutionTier tier, ForDebugging for_debugging,
950 bool frame_has_feedback_slot, base::Vector<uint8_t> code_space,
951 const JumpTablesRef& jump_tables_ref);
952
953 WasmCode* CreateEmptyJumpTableLocked(int jump_table_size, JumpTableType type);
954
955 WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
957 JumpTableType type);
958
959 // Finds the jump tables that should be used for given code region. This
960 // information is then passed to {GetNearCallTargetForFunction} and
961 // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
962 // up there. Return an empty struct if no suitable jump tables exist.
963 JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
964
965 void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
966
967 // Hold the {allocation_mutex_} when calling one of these methods.
968 // {slot_index} is the index in the declared functions, i.e. function index
969 // minus the number of imported functions.
970 // The {code_pointer_table_target} will be used to update the code pointer
971 // table. It should usually be the same as target, except for jump to the lazy
972 // compile table which doesn't have the bti instruction on ARM and is thus not
973 // a valid target for indirect branches.
974 void PatchJumpTablesLocked(uint32_t slot_index, Address target,
975 Address code_pointer_table_target,
976 uint64_t signature_hash);
977 void PatchJumpTableLocked(WritableJumpTablePair& jump_table_pair,
978 const CodeSpaceData&, uint32_t slot_index,
979 Address target);
980
981 // Called by the {WasmCodeAllocator} to register a new code space.
982 void AddCodeSpaceLocked(base::AddressRegion);
983
984 // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
985 // This takes the code by value because ownership will be transferred to the
986 // {NativeModule}. The {AssumptionsJournal} (if provided) will be checked
987 // before publishing the code, but should only be deallocated by the caller
988 // after releasing the lock, to keep the critical section small.
989 WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>, AssumptionsJournal*);
990
991 // Transfer owned code from {new_owned_code_} to {owned_code_}.
992 void TransferNewOwnedCodeLocked() const;
993
994 bool should_update_code_table(WasmCode* new_code, WasmCode* prior_code) const;
995
996 // -- Fields of {NativeModule} start here.
997
998 // Keep the engine alive as long as this NativeModule is alive. In its
999 // destructor, the NativeModule still communicates with the WasmCodeManager,
1000 // owned by the engine. This fields comes before other fields which also still
1001 // access the engine (like the code allocator), so that it's destructor runs
1002 // last.
1004
1005 // {WasmCodeAllocator} manages all code reservations and allocations for this
1006 // {NativeModule}.
1008
1009 // Features enabled for this module. We keep a copy of the features that
1010 // were enabled at the time of the creation of this native module,
1011 // to be consistent across asynchronous compilations later.
1013
1014 // Compile-time imports requested for this module.
1016
1017 // The decoded module, stored in a shared_ptr such that background compile
1018 // tasks can keep this alive.
1019 std::shared_ptr<const WasmModule> module_;
1020
1021 std::unique_ptr<WasmModuleSourceMap> source_map_;
1022
1023 // Wire bytes, held in a shared_ptr so they can be kept alive by the
1024 // {WireBytesStorage}, held by background compile tasks.
1025 std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
1026
1027 // The first allocated jump table. Always used by external calls (from JS).
1028 // Wasm calls might use one of the other jump tables stored in
1029 // {code_space_data_}.
1030 WasmCode* main_jump_table_ = nullptr;
1031
1032 // The first allocated far jump table.
1033 WasmCode* main_far_jump_table_ = nullptr;
1034
1035 // Lazy compile stub table, containing entries to jump to the
1036 // {WasmCompileLazy} builtin, passing the function index.
1037 WasmCode* lazy_compile_table_ = nullptr;
1038
1039 // The compilation state keeps track of compilation tasks for this module.
1040 // Note that its destructor blocks until all tasks are finished/aborted and
1041 // hence needs to be destructed first when this native module dies.
1042 std::unique_ptr<CompilationState> compilation_state_;
1043
1044 // Array to handle number of function calls.
1045 std::unique_ptr<std::atomic<uint32_t>[]> tiering_budgets_;
1046
1047 // This mutex protects concurrent calls to {AddCode} and friends.
1048 // TODO(dlehmann): Revert this to a regular {Mutex} again.
1049 // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
1050 // usages, which are (1) either at places that already hold the
1051 // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
1052 // in the call hierarchy. Both are fixable.
1054
1056 // Protected by {allocation_mutex_}:
1057
1058 // Holds allocated code objects for fast lookup and deletion. For lookup based
1059 // on pc, the key is the instruction start address of the value. Filled lazily
1060 // from {new_owned_code_} (below).
1061 mutable std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
1062
1063 // Holds owned code which is not inserted into {owned_code_} yet. It will be
1064 // inserted on demand. This has much better performance than inserting
1065 // individual code objects.
1066 mutable std::vector<std::unique_ptr<WasmCode>> new_owned_code_;
1067
1068 // Table of the latest code object per function, updated on initial
1069 // compilation and tier up. The number of entries is
1070 // {WasmModule::num_declared_functions}, i.e. there are no entries for
1071 // imported functions.
1072 std::unique_ptr<WasmCode*[]> code_table_;
1073
1074 // CodePointerTable handles for all declared functions. The entries are
1075 // initialized to point to the lazy compile table and will later be updated to
1076 // point to the compiled code.
1077 std::unique_ptr<WasmCodePointer[]> code_pointer_handles_;
1078 // The size will usually be num_declared_functions, except that we sometimes
1079 // allocate larger arrays for testing.
1080 size_t code_pointer_handles_size_ = 0;
1081
1082 // Data (especially jump table) per code space.
1083 std::vector<CodeSpaceData> code_space_data_;
1084
1085 // Debug information for this module. You only need to hold the allocation
1086 // mutex while getting the {DebugInfo} pointer, or initializing this field.
1087 // Further accesses to the {DebugInfo} do not need to be protected by the
1088 // mutex.
1089 std::unique_ptr<DebugInfo> debug_info_;
1090
1091 std::unique_ptr<NamesProvider> names_provider_;
1092
1094
1095 // End of fields protected by {allocation_mutex_}.
1097
1098 bool lazy_compile_frozen_ = false;
1099 std::atomic<size_t> liftoff_bailout_count_{0};
1100 std::atomic<size_t> liftoff_code_size_{0};
1101 std::atomic<size_t> turbofan_code_size_{0};
1102
1103 // Metrics for lazy compilation.
1104 std::atomic<int> num_lazy_compilations_{0};
1105 std::atomic<int64_t> sum_lazy_compilation_time_in_micro_sec_{0};
1106 std::atomic<int64_t> max_lazy_compilation_time_in_micro_sec_{0};
1107 std::atomic<bool> should_metrics_be_reported_{true};
1108
1109 // Whether the next instantiation should trigger repeated output of PGO data
1110 // (if --experimental-wasm-pgo-to-file is enabled).
1111 std::atomic<bool> should_pgo_data_be_written_{true};
1112
1113 // A lock-free quick-access flag to indicate whether code for this
1114 // NativeModule might need to be logged in any isolate. This is updated by the
1115 // {WasmEngine}, which keeps the source of truth. After checking this flag,
1116 // you would typically call into {WasmEngine::LogCode} which then checks
1117 // (under a mutex) which isolate needs logging.
1118 std::atomic<bool> log_code_{false};
1119
1120 std::unique_ptr<std::atomic<Address>[]> fast_api_targets_;
1121 std::unique_ptr<std::atomic<const MachineSignature*>[]> fast_api_signatures_;
1122};
1123
1125 public:
1129
1131
1132#if defined(V8_OS_WIN64)
1133 static bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
1134#endif // V8_OS_WIN64
1135
1136 NativeModule* LookupNativeModule(Address pc) const;
1137 // Returns the Wasm code that contains the given address. The result
1138 // is cached. There is one cache per isolate for performance reasons
1139 // (to avoid locking and reference counting). Note that the returned
1140 // value is not reference counted. This should not be an issue since
1141 // we expect that the code is currently being executed. If 'isolate'
1142 // is nullptr, no caching occurs.
1143 WasmCode* LookupCode(Isolate* isolate, Address pc) const;
1144 std::pair<WasmCode*, SafepointEntry> LookupCodeAndSafepoint(Isolate* isolate,
1145 Address pc);
1146 void FlushCodeLookupCache(Isolate* isolate);
1147 size_t committed_code_space() const {
1148 return total_committed_code_space_.load();
1149 }
1150
1151 // Estimate the needed code space for a Liftoff function based on the size of
1152 // the function body (wasm byte code).
1153 static size_t EstimateLiftoffCodeSize(int body_size);
1154 // Estimate the needed code space from a completely decoded module.
1155 static size_t EstimateNativeModuleCodeSize(const WasmModule*);
1156 // Estimate the needed code space from the number of functions and total code
1157 // section length.
1158 static size_t EstimateNativeModuleCodeSize(int num_functions,
1159 int code_section_length);
1160 // Estimate the size of metadata needed for the NativeModule, excluding
1161 // generated code. This data is stored on the C++ heap.
1162 static size_t EstimateNativeModuleMetaDataSize(const WasmModule*);
1163
1164 // Returns true if there is hardware support for PKU. Use
1165 // {MemoryProtectionKeysEnabled} to also check if PKU usage is enabled via
1166 // flags.
1167 static bool HasMemoryProtectionKeySupport();
1168
1169 // Returns true if PKU should be used.
1170 static bool MemoryProtectionKeysEnabled();
1171
1172 // Returns {true} if the memory protection key is write-enabled for the
1173 // current thread.
1174 // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
1175 static bool MemoryProtectionKeyWritable();
1176
1177 private:
1178 friend class WasmCodeAllocator;
1180 friend class WasmEngine;
1182
1183 std::shared_ptr<NativeModule> NewNativeModule(
1184 Isolate* isolate, WasmEnabledFeatures enabled_features,
1185 WasmDetectedFeatures detected_features,
1186 CompileTimeImports compile_imports, size_t code_size_estimate,
1187 std::shared_ptr<const WasmModule> module);
1188
1189 V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size);
1190 void Commit(base::AddressRegion);
1191 void Decommit(base::AddressRegion);
1192
1193 void FreeNativeModule(base::Vector<VirtualMemory> owned_code,
1194 size_t committed_size);
1195
1196 void AssignRange(base::AddressRegion, NativeModule*);
1197
1198 WasmCode* LookupCode(Address pc) const;
1199
1201
1202 std::atomic<size_t> total_committed_code_space_{0};
1203 // If the committed code space exceeds {critical_committed_code_space_}, then
1204 // we trigger a GC before creating the next module. This value is set to the
1205 // currently committed space plus 50% of the available code space on creation
1206 // and updated after each GC.
1208
1210
1212 // Protected by {native_modules_mutex_}:
1213
1214 std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
1215
1216 // End of fields protected by {native_modules_mutex_}.
1218
1219 // We remember the end address of the last allocated code space and use that
1220 // as a hint for the next code space. As the WasmCodeManager is shared by the
1221 // whole process this ensures that Wasm code spaces are allocated next to each
1222 // other with a high likelyhood. This improves the performance of cross-module
1223 // calls as the branch predictor can only predict indirect call targets within
1224 // a certain range around the call instruction.
1225 std::atomic<Address> next_code_space_hint_;
1226};
1227
1228// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
1229// by e.g. creating new code or looking up code by its address are added to the
1230// top-most {WasmCodeRefScope}.
1232 public:
1237
1238 // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
1239 // there is no current scope.
1240 static void AddRef(WasmCode*);
1241
1242 private:
1244 std::vector<WasmCode*> code_ptrs_;
1245};
1246
1249
1250 public:
1252
1255
1256 private:
1263
1264 void Flush();
1265 CacheEntry* GetCacheEntry(Address pc);
1266
1267 static const int kWasmCodeLookupCacheSize = 1024;
1269};
1270
1271} // namespace wasm
1272} // namespace internal
1273} // namespace v8
1274
1275#endif // V8_WASM_WASM_CODE_MANAGER_H_
Builtins::Kind kind
Definition builtins.cc:40
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool)
DisjointAllocationPool(base::AddressRegion region)
std::set< base::AddressRegion, base::AddressRegion::StartAddressLess > regions_
CompilationState * compilation_state() const
std::unique_ptr< NamesProvider > names_provider_
std::vector< CodeSpaceData > code_space_data_
const WasmEnabledFeatures enabled_features_
NativeModule(const NativeModule &)=delete
std::shared_ptr< base::OwnedVector< const uint8_t > > wire_bytes_
std::shared_ptr< const WasmModule > module_
std::atomic< uint32_t > * tiering_budget_array() const
std::unique_ptr< WasmModuleSourceMap > source_map_
std::map< Address, std::unique_ptr< WasmCode > > owned_code_
std::unique_ptr< CompilationState > compilation_state_
std::unique_ptr< DebugInfo > debug_info_
const WasmModule * module() const
std::shared_ptr< const WasmModule > shared_module() const
base::Vector< const uint8_t > wire_bytes() const
std::unique_ptr< WasmCodePointer[]> code_pointer_handles_
const CompileTimeImports compile_imports_
OperationsBarrier::Token engine_scope_
std::unique_ptr< std::atomic< const MachineSignature * >[]> fast_api_signatures_
std::unique_ptr< std::atomic< Address >[]> fast_api_targets_
absl::flat_hash_map< WasmCodePointer, uint32_t > CallIndirectTargetMap
std::unique_ptr< WasmCode *[]> code_table_
bool TrySetFastApiCallTarget(int func_index, Address target)
std::atomic< Address > * fast_api_targets() const
std::atomic< const MachineSignature * > * fast_api_signatures() const
WasmEnabledFeatures enabled_features() const
void set_fast_api_signature(int func_index, const MachineSignature *sig)
std::unique_ptr< std::atomic< uint32_t >[]> tiering_budgets_
std::vector< std::unique_ptr< WasmCode > > new_owned_code_
NativeModule & operator=(const NativeModule &)=delete
const CompileTimeImports & compile_imports() const
base::Vector< uint8_t > AllocateForCode(NativeModule *, size_t size)
base::Vector< uint8_t > AllocateForCodeInRegion(NativeModule *, size_t size, base::AddressRegion)
std::vector< VirtualMemory > owned_code_space_
WasmCodeAllocator(std::shared_ptr< Counters > async_counters)
void Init(VirtualMemory code_space)
std::shared_ptr< Counters > async_counters_
base::Vector< uint8_t > AllocateForWrapper(size_t size)
void FreeCode(base::Vector< WasmCode *const >)
void InitializeCodeRange(NativeModule *native_module, base::AddressRegion region)
WasmCodeLookupCache & operator=(const WasmCodeLookupCache &)=delete
CacheEntry cache_[kWasmCodeLookupCacheSize]
WasmCodeLookupCache(const WasmCodeLookupCache &)=delete
std::atomic< Address > next_code_space_hint_
std::map< Address, std::pair< Address, NativeModule * > > lookup_map_
std::atomic< size_t > critical_committed_code_space_
WasmCodeManager(const WasmCodeManager &)=delete
WasmCodeManager & operator=(const WasmCodeManager &)=delete
WasmCodeRefScope(const WasmCodeRefScope &)=delete
WasmCodeRefScope & operator=(const WasmCodeRefScope &)=delete
V8_WARN_UNUSED_RESULT bool DecRef()
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
base::Vector< uint8_t > instructions() const
WasmCode(NativeModule *native_module, int index, base::Vector< uint8_t > instructions, int stack_slots, int ool_spills, uint32_t tagged_parameter_slots, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, int unpadded_binary_size, base::Vector< const uint8_t > protected_instructions_data, base::Vector< const uint8_t > reloc_info, base::Vector< const uint8_t > source_position_table, base::Vector< const uint8_t > inlining_positions, base::Vector< const uint8_t > deopt_data, Kind kind, ExecutionTier tier, ForDebugging for_debugging, uint64_t signature_hash, bool frame_has_feedback_slot=false)
base::Vector< const trap_handler::ProtectedInstructionData > protected_instructions() const
base::Vector< const uint8_t > source_positions() const
uint16_t first_tagged_parameter_slot() const
base::Vector< const uint8_t > deopt_data() const
NativeModule * native_module() const
base::Vector< const uint8_t > reloc_info() const
bool contains(Address pc) const
base::Vector< const uint8_t > inlining_positions() const
uint16_t num_tagged_parameter_slots() const
V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode()
WasmCode(const WasmCode &)=delete
std::unique_ptr< const uint8_t[]> meta_data_
ForDebugging for_debugging() const
uint32_t raw_tagged_parameter_slots_for_serialization() const
base::Vector< const uint8_t > protected_instructions_data() const
WasmCode & operator=(const WasmCode &)=delete
Register const index_
JSRegExp::Flags flags_
SourcePositionTable * source_positions
OptionalOpIndex index
TNode< Object > target
ZoneVector< RpoNumber > & result
int handler_table_offset_
ZoneVector< Entry > entries
base::SmallVector< int32_t, 1 > stack_slots
std::shared_ptr< NativeModule > native_module_
const base::Vector< const uint8_t > wire_bytes_
const WasmEnabledFeatures enabled_features_
constexpr int kAnonymousFuncIndex
void Disassemble(const WasmModule *module, ModuleWireBytes wire_bytes, NamesProvider *names, v8::debug::DisassemblyCollector *collector, std::vector< int > *function_body_offsets)
constexpr WasmCodePointer kInvalidWasmCodePointer
const char * GetWasmCodeKindAsString(WasmCode::Kind kind)
WasmCode::Kind GetCodeKind(const WasmCompilationResult &result)
wasm::WasmModule WasmModule
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset jump_table_start
constexpr int kMaxInt
Definition globals.h:374
Definition c-api.cc:87
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_EQ(lhs, rhs)
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
static constexpr AssumptionsJournal * kNoAssumptions
std::unique_ptr< AssumptionsJournal > assumptions
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NOINLINE
Definition v8config.h:586
#define V8_NODISCARD
Definition v8config.h:693
std::unique_ptr< ValueMirror > value
const wasm::WasmModule * module_