16#if V8_ENABLE_WEBASSEMBLY
25#define TRACE_BS(...) \
27 if (v8_flags.trace_backing_store) PrintF(__VA_ARGS__); \
34#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_64_BIT
35constexpr size_t kFullGuardSize32 = uint64_t{8} *
GB;
38std::atomic<uint32_t> next_backing_store_id_{1};
43enum class AllocationStatus {
48 kAddressSpaceLimitReachedFailure,
54size_t GetReservationSize(
bool has_guard_regions,
size_t byte_capacity,
55 bool is_wasm_memory64) {
56#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
58 v8_flags.wasm_memory64_trap_handling);
59 if (has_guard_regions) {
60 if (is_wasm_memory64) {
64 static_assert(kFullGuardSize32 >=
size_t{4} *
GB);
66 return kFullGuardSize32;
70 DCHECK(!has_guard_regions);
76base::AddressRegion GetReservedRegion(
bool has_guard_regions,
77 bool is_wasm_memory64,
void* buffer_start,
78 size_t byte_capacity) {
79 return base::AddressRegion(
80 reinterpret_cast<Address>(buffer_start),
81 GetReservationSize(has_guard_regions, byte_capacity, is_wasm_memory64));
84void RecordStatus(Isolate* isolate, AllocationStatus status) {
85 isolate->counters()->wasm_memory_allocation_result()->AddSample(
86 static_cast<int>(status));
98 size_t byte_length,
size_t max_byte_length,
101 bool is_wasm_memory64,
bool has_guard_regions,
102 bool custom_deleter,
bool empty_deleter)
103 : buffer_start_(buffer_start),
104 byte_length_(byte_length),
105 max_byte_length_(max_byte_length),
106 byte_capacity_(byte_capacity),
107 id_(next_backing_store_id_.fetch_add(1)),
133 flags_.store(flags, std::memory_order_relaxed);
139 struct ClearSharedAllocator {
142 ~ClearSharedAllocator() {
145 .std::shared_ptr<v8::ArrayBuffer::Allocator>::~shared_ptr();
147 } clear_shared_allocator{
this};
151 auto FreeResizableMemory = [
this] {
156 if (!region.is_empty()) {
162#if V8_ENABLE_WEBASSEMBLY
164 size_t reservation_size = GetReservationSize(
167 "BSw:free bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
175 FreeResizableMemory();
181 FreeResizableMemory();
186 TRACE_BS(
"BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
195 TRACE_BS(
"BS:free bs=%p mem=%p (length=%zu, capacity=%zu)\n",
this,
205 auto allocator = isolate->array_buffer_allocator();
207 if (
byte_length > allocator->MaxAllocationSize())
return {};
209 auto counters = isolate->counters();
212 counters->array_buffer_big_allocations()->AddSample(mb_length);
215 counters->shared_array_allocations()->AddSample(mb_length);
217 auto allocate_buffer = [allocator, initialized](
size_t byte_length) {
219 return allocator->AllocateUninitialized(
byte_length);
224 buffer_start = isolate->heap()->AllocateExternalBackingStore(
229 counters->array_buffer_new_size_failures()->AddSample(mb_length);
232#ifdef V8_ENABLE_SANDBOX
235 "When the V8 Sandbox is enabled, ArrayBuffer backing stores "
236 "must be allocated inside the sandbox address space. Please "
237 "use an appropriate ArrayBuffer::Allocator to allocate "
238 "these buffers, or disable the sandbox.");
243 isolate->isolate_group()->GetBackingStorePageAllocator();
259 result->SetAllocatorFromIsolate(isolate);
260 return std::unique_ptr<BackingStore>(
result);
264 if (
auto allocator_shared = isolate->array_buffer_allocator_shared()) {
267 std::shared_ptr<v8::ArrayBuffer::Allocator>(
268 std::move(allocator_shared));
271 isolate->array_buffer_allocator();
276 Isolate* isolate,
size_t byte_length,
size_t max_byte_length,
277 size_t page_size,
size_t initial_pages,
size_t maximum_pages,
280 if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
285 if (maximum_pages == 0) maximum_pages = 1;
287 TRACE_BS(
"BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
289#if V8_ENABLE_WEBASSEMBLY
300 bool did_retry =
false;
304 auto gc_retry = [&](
const std::function<
bool()>&
fn) {
305 for (
int i = 0;
i < 3;
i++) {
306 if (
fn())
return true;
309 if (isolate !=
nullptr) {
310 isolate->heap()->MemoryPressureNotification(
318 size_t reservation_size =
324 void* allocation_base =
nullptr;
325#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
326#ifdef V8_ENABLE_SANDBOX
331 "One must enter an v8::Isolate before allocating resizable "
332 "array backing stores");
335 isolate ? isolate->isolate_group()->GetBackingStorePageAllocator()
337 auto allocate_pages = [&] {
338 allocation_base =
AllocatePages(page_allocator,
nullptr, reservation_size,
340 return allocation_base !=
nullptr;
342 if (!gc_retry(allocate_pages)) {
344 if (isolate !=
nullptr) {
345 RecordStatus(isolate, AllocationStatus::kOtherFailure);
347 TRACE_BS(
"BSw:try failed to allocate pages\n");
351 uint8_t*
buffer_start =
reinterpret_cast<uint8_t*
>(allocation_base);
356 size_t committed_byte_length = initial_pages * page_size;
357 auto commit_memory = [&] {
358 return committed_byte_length == 0 ||
362 if (!gc_retry(commit_memory)) {
364 committed_byte_length);
365 FreePages(page_allocator, allocation_base, reservation_size);
371 if (isolate !=
nullptr) {
372 RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
373 : AllocationStatus::kSuccess);
392 "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
396 return std::unique_ptr<BackingStore>(
result);
399#if V8_ENABLE_WEBASSEMBLY
402std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
403 Isolate* isolate,
size_t initial_pages,
size_t maximum_pages,
421 auto TryAllocate = [
isolate, initial_pages, wasm_memory,
shared,
428 result->type_specific_data_.shared_wasm_memory_data =
433 auto backing_store = TryAllocate(maximum_pages);
435 if (!backing_store &&
438 maximum_pages - initial_pages >= 4) {
440 auto delta = (maximum_pages - initial_pages) / 4;
441 size_t sizes[] = {maximum_pages - delta, maximum_pages - 2 * delta,
442 maximum_pages - 3 * delta, initial_pages};
444 for (
size_t reduced_maximum_pages : sizes) {
445 backing_store = TryAllocate(reduced_maximum_pages);
446 if (backing_store)
break;
449 return backing_store;
452std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(
453 Isolate* isolate,
size_t new_pages,
size_t max_pages,
458 auto new_backing_store = BackingStore::AllocateWasmMemory(
459 isolate, new_pages, max_pages, wasm_memory,
462 if (!new_backing_store ||
474 return new_backing_store;
478std::optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
516 size_t old_length =
byte_length_.load(std::memory_order_relaxed);
518 if (delta_pages == 0)
520 if (delta_pages > max_pages)
return {};
527 if (current_pages > (max_pages - delta_pages))
return {};
540 std::memory_order_acq_rel)) {
549void BackingStore::AttachSharedWasmMemoryObject(
550 Isolate* isolate, DirectHandle<WasmMemoryObject> memory_object) {
558void BackingStore::BroadcastSharedWasmMemoryGrow(Isolate* isolate)
const {
562void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
566void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
570void BackingStore::MakeWasmMemoryResizableByJS(
bool resizable) {
583 Isolate* isolate,
size_t new_byte_length) {
585 size_t new_committed_pages;
586 bool round_return_value =
589 CHECK(round_return_value);
591 size_t new_committed_length = new_committed_pages * page_size;
592 DCHECK_LE(new_byte_length, new_committed_length);
602 memset(
reinterpret_cast<uint8_t*
>(
buffer_start_) + new_byte_length, 0,
606 size_t old_committed_pages;
610 CHECK(round_return_value);
611 DCHECK_LE(new_committed_pages, old_committed_pages);
613 if (new_committed_pages < old_committed_pages) {
614 size_t old_committed_length = old_committed_pages * page_size;
617 reinterpret_cast<uint8_t*
>(
buffer_start_) + new_committed_length,
618 old_committed_length - new_committed_length,
648 Isolate* isolate,
size_t new_byte_length) {
650 size_t new_committed_pages;
651 bool round_return_value =
654 CHECK(round_return_value);
656 size_t new_committed_length = new_committed_pages * page_size;
657 DCHECK_LE(new_byte_length, new_committed_length);
666 size_t old_byte_length =
byte_length_.load(std::memory_order_seq_cst);
668 if (new_byte_length < old_byte_length) {
673 if (new_byte_length == old_byte_length) {
687 if (
byte_length_.compare_exchange_weak(old_byte_length, new_byte_length,
688 std::memory_order_seq_cst)) {
697 void* allocation_base,
size_t allocation_length,
713 result->type_specific_data_.deleter = {deleter, deleter_data};
716 return std::unique_ptr<BackingStore>(
result);
734 return std::unique_ptr<BackingStore>(
result);
739 auto array_buffer_allocator =
744 return array_buffer_allocator;
750 CHECK(shared_wasm_memory_data);
751 return shared_wasm_memory_data;
756struct GlobalBackingStoreRegistryImpl {
757 GlobalBackingStoreRegistryImpl() =
default;
759 std::unordered_map<const void*, std::weak_ptr<BackingStore>>
map_;
763 GetGlobalBackingStoreRegistryImpl)
767 std::shared_ptr<BackingStore> backing_store) {
768 if (!backing_store || !backing_store->buffer_start())
return;
770 CHECK(backing_store->is_wasm_memory());
772 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
774 if (backing_store->globally_registered())
return;
775 TRACE_BS(
"BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n",
776 backing_store.get(), backing_store->buffer_start(),
777 backing_store->byte_length(), backing_store->byte_capacity());
778 std::weak_ptr<BackingStore> weak = backing_store;
779 auto result = impl->map_.insert({backing_store->buffer_start(), weak});
791 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
794 if (
result != impl->map_.end()) {
807 std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock;
808 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
811 for (
auto& entry : impl->map_) {
812 auto backing_store = entry.second.lock();
813 prevent_destruction_under_lock.emplace_back(backing_store);
814 if (!backing_store)
continue;
815 CHECK(backing_store->is_wasm_memory());
816 if (!backing_store->is_shared())
continue;
818 backing_store->get_shared_wasm_memory_data();
820 std::vector<Isolate*>& isolates = shared_data->
isolates_;
821 auto isolates_it = std::find(isolates.begin(), isolates.end(), isolate);
822 if (isolates_it != isolates.end()) {
823 *isolates_it = isolates.back();
827 std::find(isolates.begin(), isolates.end(), isolate));
831#if V8_ENABLE_WEBASSEMBLY
836 isolate->AddSharedWasmMemory(memory_object);
839 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
845 for (
size_t i = 0;
i < isolates.
size();
i++) {
846 if (isolates[
i] == isolate)
return;
847 if (isolates[
i] ==
nullptr) free_entry =
static_cast<int>(
i);
850 isolates[free_entry] =
isolate;
852 isolates.push_back(isolate);
856 Isolate* isolate,
const BackingStore* backing_store) {
858 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
861 SharedWasmMemoryData* shared_data =
862 backing_store->get_shared_wasm_memory_data();
863 for (Isolate* other : shared_data->isolates_) {
864 if (other == isolate)
continue;
865 other->stack_guard()->RequestGrowSharedMemory();
875 HandleScope scope(isolate);
876 DirectHandle<WeakArrayList> shared_wasm_memories =
877 isolate->factory()->shared_wasm_memories();
879 for (
int i = 0, e = shared_wasm_memories->
length();
i < e; ++
i) {
881 if (!shared_wasm_memories->Get(
i).GetHeapObject(&obj))
continue;
885 if (memory_object->array_buffer()->is_resizable_by_js()) {
889 memory_object->UpdateInstances(isolate);
void(*)(void *data, size_t length, void *deleter_data) DeleterCallback
static void EmptyDeleter(void *data, size_t length, void *deleter_data)
void * buffer_start() const
@ kHoldsSharedPtrToAllocater
void clear_flag(Flag flag)
ResizeOrGrowResult GrowInPlace(Isolate *isolate, size_t new_byte_length)
static std::unique_ptr< BackingStore > TryAllocateAndPartiallyCommitMemory(Isolate *isolate, size_t byte_length, size_t max_byte_length, size_t page_size, size_t initial_pages, size_t maximum_pages, WasmMemoryFlag wasm_memory, SharedFlag shared, bool has_guard_regions=false)
static std::unique_ptr< BackingStore > WrapAllocation(void *allocation_base, size_t allocation_length, v8::BackingStore::DeleterCallback deleter, void *deleter_data, SharedFlag shared)
std::atomic< base::EnumSet< Flag, uint16_t > > flags_
bool custom_deleter() const
BackingStore(PageAllocator *page_allocator, void *buffer_start, size_t byte_length, size_t max_byte_length, size_t byte_capacity, SharedFlag shared, ResizableFlag resizable, bool is_wasm_memory, bool is_wasm_memory64, bool has_guard_regions, bool custom_deleter, bool empty_deleter)
bool globally_registered() const
static std::unique_ptr< BackingStore > Allocate(Isolate *isolate, size_t byte_length, SharedFlag shared, InitializedFlag initialized)
bool holds_shared_ptr_to_allocator() const
size_t max_byte_length() const
bool is_wasm_memory() const
static std::unique_ptr< BackingStore > EmptyBackingStore(SharedFlag shared)
v8::ArrayBuffer::Allocator * get_v8_api_array_buffer_allocator()
void SetAllocatorFromIsolate(Isolate *isolate)
std::atomic< size_t > byte_length_
SharedWasmMemoryData * get_shared_wasm_memory_data() const
v8::PageAllocator * page_allocator_
bool has_guard_regions() const
bool is_resizable_by_js() const
size_t byte_length(std::memory_order memory_order=std::memory_order_relaxed) const
size_t byte_capacity() const
union v8::internal::BackingStore::TypeSpecificData type_specific_data_
bool is_wasm_memory64() const
ResizeOrGrowResult ResizeInPlace(Isolate *isolate, size_t new_byte_length)
static void Purge(Isolate *isolate)
static void Register(std::shared_ptr< BackingStore > backing_store)
static void AddSharedWasmMemoryObject(Isolate *isolate, BackingStore *backing_store, DirectHandle< WasmMemoryObject > memory_object)
static void Unregister(BackingStore *backing_store)
static void BroadcastSharedWasmMemoryGrow(Isolate *isolate, const BackingStore *backing_store)
static void UpdateSharedWasmMemoryObjects(Isolate *isolate)
static constexpr size_t kMaxByteLength
static DirectHandle< JSArrayBuffer > RefreshSharedBuffer(Isolate *isolate, DirectHandle< WasmMemoryObject > memory, ResizableFlag resizable_by_js)
static void set_had_nondeterminism()
cppgc::PageAllocator * page_allocator_
SharedFunctionInfoRef shared
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
LockGuard< Mutex > MutexGuard
bool IsTrapHandlerEnabled()
constexpr size_t kV8MaxWasmMemory64Pages
constexpr size_t kMaxMemory64Size
constexpr size_t kWasmPageSize
constexpr size_t kV8MaxWasmMemory32Pages
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
v8::PageAllocator * GetPlatformPageAllocator()
void * AllocatePages(v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access)
kInterpreterTrampolineOffset Tagged< HeapObject >
v8::PageAllocator * GetArrayBufferPageAllocator()
bool RoundUpToPageSize(size_t byte_length, size_t page_size, size_t max_allowed_byte_length, size_t *pages)
size_t AllocatePageSize()
V8_EXPORT_PRIVATE FlagValues v8_flags
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_LE(v1, v2)
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK_WITH_MSG(condition, message)
#define DCHECK_NOT_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
v8::BackingStore::DeleterCallback callback
std::vector< Isolate * > isolates_
struct v8::internal::BackingStore::TypeSpecificData::DeleterInfo deleter
std::shared_ptr< v8::ArrayBuffer::Allocator > v8_api_array_buffer_allocator_shared
SharedWasmMemoryData * shared_wasm_memory_data
v8::ArrayBuffer::Allocator * v8_api_array_buffer_allocator