v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-import-wrapper-cache.cc
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <vector>
8
19
20namespace v8::internal::wasm {
21
26
27// The wrapper cache is shared per-process; but it is initialized on demand, and
28// this action is triggered by some isolate; so we use this isolate for error
29// reporting and running GCs if required.
32 if (code_allocator_.get() != nullptr) return; // Already initialized.
33 // Most wrappers are small (200-300 bytes), most modules don't need many.
34 // 32K is enough for ~100 wrappers.
35 static constexpr size_t kInitialReservationSize = 1 << 15;
36 // See {NewNativeModule} for reasoning.
37 static constexpr int kAllocationRetries = 2;
38 VirtualMemory code_space;
39 for (int retries = 0;; ++retries) {
40 code_space = GetWasmCodeManager()->TryAllocate(kInitialReservationSize);
41 if (code_space.IsReserved()) break;
42 if (retries == kAllocationRetries) {
44 triggering_isolate,
45 "Failed to allocate code space for import wrappers");
47 }
48 triggering_isolate->heap()->MemoryPressureNotification(
50 }
51 code_allocator_.reset(
52 new WasmCodeAllocator(triggering_isolate->async_counters()));
53 base::AddressRegion initial_region = code_space.region();
54 code_allocator_->Init(std::move(code_space));
55 code_allocator_->InitializeCodeRange(nullptr, initial_region);
56}
57
60 uint64_t signature_hash) {
61 cache_->mutex_.AssertHeld();
62 // Equivalent of NativeModule::AddCode().
63 const CodeDesc& desc = result.code_desc;
64 base::Vector<uint8_t> code_space =
65 cache_->code_allocator_->AllocateForWrapper(desc.instr_size);
66
67 // Equivalent of NativeModule::AddCodeWithCodeSpace().
68 base::Vector<uint8_t> reloc_info{
69 desc.buffer + desc.buffer_size - desc.reloc_size,
70 static_cast<size_t>(desc.reloc_size)};
71 // Contrary to the NativeModule, we don't track code size here, because we
72 // have no source to attribute it to.
73 const int safepoint_table_offset =
74 desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
75 const int handler_table_offset = desc.handler_table_offset;
76 const int constant_pool_offset = desc.constant_pool_offset;
77 const int code_comments_offset = desc.code_comments_offset;
78 const int instr_size = desc.instr_size;
79 {
80 WritableJitAllocation jit_allocation =
82 reinterpret_cast<Address>(code_space.begin()), code_space.size(),
84 jit_allocation.CopyCode(0, desc.buffer, desc.instr_size);
85
86 intptr_t delta = code_space.begin() - desc.buffer;
87 Address code_start = reinterpret_cast<Address>(code_space.begin());
88 Address constant_pool_start = code_start + constant_pool_offset;
89 for (WritableRelocIterator it(jit_allocation, code_space, reloc_info,
90 constant_pool_start, RelocInfo::kApplyMask);
91 !it.done(); it.next()) {
92 // Wrappers should contain no direct calls to Wasm functions.
93 DCHECK(!RelocInfo::IsWasmCall(it.rinfo()->rmode()));
94 // Wrappers should not call builtins via a Wasm jump table.
95 DCHECK(!RelocInfo::IsWasmStubCall(it.rinfo()->rmode()));
96 it.rinfo()->apply(delta);
97 }
98 }
99 FlushInstructionCache(code_space.begin(), code_space.size());
100 const int frame_slot_count = result.frame_slot_count;
101 const int ool_spill_count = result.ool_spill_count;
102 constexpr bool frame_has_feedback_slot = false;
103 WasmCode* code = new WasmCode{nullptr /* no NativeModule */,
105 code_space,
106 frame_slot_count,
107 ool_spill_count,
108 result.tagged_parameter_slots,
109 safepoint_table_offset,
110 handler_table_offset,
111 constant_pool_offset,
112 code_comments_offset,
113 instr_size,
114 result.protected_instructions_data.as_vector(),
115 reloc_info,
116 result.source_positions.as_vector(),
117 result.inlining_positions.as_vector(),
118 result.deopt_data.as_vector(),
119 kind,
122 signature_hash,
123 frame_has_feedback_slot};
124 // The refcount of a WasmCode is initialized to 1. For wrappers, we track
125 // all refcounts explicitly, i.e. there will be a call to {IncRef()} that
126 // doesn't distinguish between newly compiled and older cached wrappers.
127 // So at this point, we lower the refcount to zero (reflecting the fact that
128 // there are no references yet), while using a WasmCodeRefScope to make sure
129 // that this doesn't cause the WasmCode to be freed immediately.
131 code->DecRefOnLiveCode();
132
133 code->Validate();
134 cache_->entry_map_[key] = code;
135 // As an optimization, we assume that wrappers are allocated in increasing
136 // memory locations.
137 std::map<Address, WasmCode*>& codes = cache_->codes_;
138 codes.emplace_hint(codes.end(), code->instruction_start(), code);
139 return code;
140}
141
143 if (call_target == kInvalidWasmCodePointer) return nullptr;
145 auto iter = codes_.find(
146 GetProcessWideWasmCodePointerTable()->GetEntrypointWithoutSignatureCheck(
147 call_target));
148 if (iter == codes_.end()) return nullptr;
149 WasmCodeRefScope::AddRef(iter->second);
150 if (iter->second->is_dying()) return nullptr;
151 return iter->second;
152}
153
155 Isolate* isolate, ImportCallKind kind, const CanonicalSig* sig,
156 CanonicalTypeIndex sig_index, bool source_positions, int expected_arity,
157 Suspend suspend) {
159 kind, sig, source_positions, expected_arity, suspend);
160 WasmCode* wasm_code;
161 {
162 ModificationScope cache_scope(this);
163 CacheKey key(kind, sig_index, expected_arity, suspend);
164 // Now that we have the lock (in the form of the cache_scope), check
165 // again whether another thread has just created the wrapper.
166 wasm_code = cache_scope[key];
167 if (wasm_code) {
168 WasmCodeRefScope::AddRef(wasm_code);
169 if (!wasm_code->is_dying()) return wasm_code;
170 }
171
172 wasm_code = cache_scope.AddWrapper(key, std::move(result),
174 sig->signature_hash());
175 }
176
177 // To avoid lock order inversion, code printing must happen after the
178 // end of the {cache_scope}.
179 wasm_code->MaybePrint();
180 isolate->counters()->wasm_generated_code_size()->Increment(
181 wasm_code->instructions().length());
182 isolate->counters()->wasm_reloc_size()->Increment(
183 wasm_code->reloc_info().length());
184 if (GetWasmEngine()->LogWrapperCode(wasm_code)) {
185 // Log the code immediately in the current isolate.
187 }
188 return wasm_code;
189}
190
193 for (const auto& entry : codes_) {
194 entry.second->LogCode(isolate, "", -1); // No source URL, no ScriptId.
195 }
196}
197
198void WasmImportWrapperCache::Free(std::vector<WasmCode*>& wrappers) {
200 if (codes_.empty() || wrappers.empty()) return;
201 // {WasmCodeAllocator::FreeCode()} wants code objects to be sorted.
202 std::sort(wrappers.begin(), wrappers.end(), [](WasmCode* a, WasmCode* b) {
203 return a->instruction_start() < b->instruction_start();
204 });
205 // Possible future optimization: if the size of {wrappers} is very small,
206 // don't allocate the set, use linear scan instead.
207 std::unordered_set<WasmCode*> fastset;
208 for (WasmCode* wrapper : wrappers) {
209 fastset.insert(wrapper);
210 codes_.erase(wrapper->instruction_start());
211 }
212 for (auto it = entry_map_.begin(); it != entry_map_.end();) {
213 if (fastset.contains(it->second)) {
214 it = entry_map_.erase(it);
215 } else {
216 it++;
217 }
218 }
219 code_allocator_->FreeCode(base::VectorOf(wrappers));
220 for (WasmCode* wrapper : wrappers) {
221 // TODO(407003348): Drop these checks if they don't trigger in the wild.
222 CHECK(wrapper->is_dying());
223 CHECK_EQ(wrapper->ref_count_.load(std::memory_order_acquire), 0);
224 delete wrapper;
225 }
226 // Make sure nobody tries to access stale pointers.
227 wrappers.clear();
228}
229
231 CanonicalTypeIndex type_index,
232 int expected_arity,
233 Suspend suspend) const {
235
236 auto it = entry_map_.find({kind, type_index, expected_arity, suspend});
237 if (it == entry_map_.end()) return nullptr;
238 WasmCodeRefScope::AddRef(it->second);
239 if (it->second->is_dying()) return nullptr;
240 return it->second;
241}
242
244 // This can be called from the disassembler via `code->MaybePrint()` in
245 // `AddWrapper()` above, so we need a recursive mutex.
247 auto iter = codes_.upper_bound(pc);
248 if (iter == codes_.begin()) return nullptr;
249 --iter;
250 WasmCode* candidate = iter->second;
251 DCHECK_EQ(candidate->instruction_start(), iter->first);
252 if (!candidate->contains(pc)) return nullptr;
253 WasmCodeRefScope::AddRef(candidate);
254 // Note: this function is used for iterating the stack, where dying
255 // code objects can still have their last few activations, so we
256 // must return {candidate} even if {candidate->is_dying()}.
257 return candidate;
258}
259
266
267} // namespace v8::internal::wasm
Builtins::Kind kind
Definition builtins.cc:40
int length() const
Definition vector.h:64
constexpr size_t size() const
Definition vector.h:70
constexpr T * begin() const
Definition vector.h:96
V8_EXPORT_PRIVATE void MemoryPressureNotification(v8::MemoryPressureLevel level, bool is_isolate_locked)
Definition heap.cc:4185
const std::shared_ptr< Counters > & async_counters()
Definition isolate.h:1182
static const int kApplyMask
Definition reloc-info.h:369
static constexpr bool IsWasmCall(Mode mode)
Definition reloc-info.h:213
static constexpr bool IsWasmStubCall(Mode mode)
Definition reloc-info.h:214
static WritableJitAllocation RegisterJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
const base::AddressRegion & region() const
Definition allocation.h:243
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size)
base::Vector< uint8_t > instructions() const
base::Vector< const uint8_t > reloc_info() const
bool contains(Address pc) const
void LogOutstandingCodesForIsolate(Isolate *)
V8_EXPORT_PRIVATE WasmCode * operator[](const CacheKey &key)
WasmCode * AddWrapper(const CacheKey &key, WasmCompilationResult result, WasmCode::Kind kind, uint64_t signature_hash)
std::unordered_map< CacheKey, WasmCode *, CacheKeyHash > entry_map_
V8_EXPORT_PRIVATE WasmCode * MaybeGet(ImportCallKind kind, CanonicalTypeIndex type_index, int expected_arity, Suspend suspend) const
WasmCode * CompileWasmImportCallWrapper(Isolate *isolate, ImportCallKind kind, const CanonicalSig *sig, CanonicalTypeIndex sig_index, bool source_positions, int expected_arity, Suspend suspend)
void Free(std::vector< WasmCode * > &wrappers)
V8_EXPORT_PRIVATE WasmCode * FindWrapper(WasmCodePointer call_target)
std::unique_ptr< WasmCodeAllocator > code_allocator_
Handle< Code > code
SourcePositionTable * source_positions
ZoneVector< RpoNumber > & result
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
wasm::WasmCompilationResult CompileWasmImportCallWrapper(wasm::ImportCallKind kind, const wasm::CanonicalSig *sig, bool source_positions, int expected_arity, wasm::Suspend suspend)
V8_EXPORT_PRIVATE WasmCodePointerTable * GetProcessWideWasmCodePointerTable()
constexpr int kAnonymousFuncIndex
WasmCodeManager * GetWasmCodeManager()
constexpr WasmCodePointer kInvalidWasmCodePointer
size_t ContentSize(const std::vector< T > &vector)
WasmEngine * GetWasmEngine()
void FlushInstructionCache(void *start, size_t size)
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
#define CHECK(condition)
Definition logging.h:124
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define UPDATE_WHEN_CLASS_CHANGES(classname, size)