v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
backing-store.cc
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <cstring>
8#include <optional>
9
10#include "src/base/bits.h"
14#include "src/sandbox/sandbox.h"
15
16#if V8_ENABLE_WEBASSEMBLY
23#endif // V8_ENABLE_WEBASSEMBLY
24
25#define TRACE_BS(...) \
26 do { \
27 if (v8_flags.trace_backing_store) PrintF(__VA_ARGS__); \
28 } while (false)
29
30namespace v8::internal {
31
32namespace {
33
34#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_64_BIT
35constexpr size_t kFullGuardSize32 = uint64_t{8} * GB;
36#endif
37
38std::atomic<uint32_t> next_backing_store_id_{1};
39
40// Allocation results are reported to UMA
41//
42// See wasm_memory_allocation_result in counters-definitions.h
43enum class AllocationStatus {
44 kSuccess, // Succeeded on the first try
45
46 kSuccessAfterRetry, // Succeeded after garbage collection
47
48 kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
49 // space limit
50
51 kOtherFailure // Failed for an unknown reason
52};
53
54size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity,
55 bool is_wasm_memory64) {
56#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
57 DCHECK_IMPLIES(is_wasm_memory64 && has_guard_regions,
58 v8_flags.wasm_memory64_trap_handling);
59 if (has_guard_regions) {
60 if (is_wasm_memory64) {
61 DCHECK_LE(byte_capacity, wasm::kMaxMemory64Size);
63 } else {
64 static_assert(kFullGuardSize32 >= size_t{4} * GB);
65 DCHECK_LE(byte_capacity, size_t{4} * GB);
66 return kFullGuardSize32;
67 }
68 }
69#else
70 DCHECK(!has_guard_regions);
71#endif
72
73 return byte_capacity;
74}
75
76base::AddressRegion GetReservedRegion(bool has_guard_regions,
77 bool is_wasm_memory64, void* buffer_start,
78 size_t byte_capacity) {
79 return base::AddressRegion(
80 reinterpret_cast<Address>(buffer_start),
81 GetReservationSize(has_guard_regions, byte_capacity, is_wasm_memory64));
82}
83
84void RecordStatus(Isolate* isolate, AllocationStatus status) {
85 isolate->counters()->wasm_memory_allocation_result()->AddSample(
86 static_cast<int>(status));
87}
88
89} // namespace
90
91// The backing store for a Wasm shared memory remembers all the isolates
92// with which it has been shared.
94 std::vector<Isolate*> isolates_;
95};
96
97BackingStore::BackingStore(PageAllocator* page_allocator, void* buffer_start,
98 size_t byte_length, size_t max_byte_length,
99 size_t byte_capacity, SharedFlag shared,
100 ResizableFlag resizable, bool is_wasm_memory,
101 bool is_wasm_memory64, bool has_guard_regions,
102 bool custom_deleter, bool empty_deleter)
103 : buffer_start_(buffer_start),
104 byte_length_(byte_length),
105 max_byte_length_(max_byte_length),
106 byte_capacity_(byte_capacity),
107 id_(next_backing_store_id_.fetch_add(1)),
108 page_allocator_(page_allocator) {
109 // TODO(v8:11111): RAB / GSAB - Wasm integration.
118 // TODO(1445003): Demote to a DCHECK once we found the issue.
119 // Wasm memory should never be empty (== zero capacity). Otherwise
120 // {JSArrayBuffer::Attach} would replace it by the {EmptyBackingStore} and we
121 // loose information.
122 // This is particularly important for shared Wasm memory.
124
126 if (shared == SharedFlag::kShared) flags.Add(kIsShared);
127 if (resizable == ResizableFlag::kResizable) flags.Add(kIsResizableByJs);
128 if (is_wasm_memory) flags.Add(kIsWasmMemory);
129 if (is_wasm_memory64) flags.Add(kIsWasmMemory64);
130 if (has_guard_regions) flags.Add(kHasGuardRegions);
131 if (custom_deleter) flags.Add(kCustomDeleter);
132 if (empty_deleter) flags.Add(kEmptyDeleter);
133 flags_.store(flags, std::memory_order_relaxed);
134}
135
138
139 struct ClearSharedAllocator {
140 BackingStore* const bs;
141
142 ~ClearSharedAllocator() {
143 if (!bs->holds_shared_ptr_to_allocator()) return;
145 .std::shared_ptr<v8::ArrayBuffer::Allocator>::~shared_ptr();
146 }
147 } clear_shared_allocator{this};
148
149 if (buffer_start_ == nullptr) return;
150
151 auto FreeResizableMemory = [this] {
154 auto region = GetReservedRegion(has_guard_regions(), is_wasm_memory64(),
156 if (!region.is_empty()) {
157 FreePages(page_allocator_, reinterpret_cast<void*>(region.begin()),
158 region.size());
159 }
160 };
161
162#if V8_ENABLE_WEBASSEMBLY
163 if (is_wasm_memory()) {
164 size_t reservation_size = GetReservationSize(
166 TRACE_BS(
167 "BSw:free bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
168 this, buffer_start_, byte_length(), byte_capacity_, reservation_size);
169 if (is_shared()) {
170 // Deallocate the list of attached memory objects.
172 delete shared_data;
173 }
174 // Wasm memories are always allocated through the page allocator.
175 FreeResizableMemory();
176 return;
177 }
178#endif // V8_ENABLE_WEBASSEMBLY
179
180 if (is_resizable_by_js()) {
181 FreeResizableMemory();
182 return;
183 }
184
185 if (custom_deleter()) {
186 TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
190 return;
191 }
192
193 // JSArrayBuffer backing store. Deallocate through the embedder's allocator.
194 auto allocator = get_v8_api_array_buffer_allocator();
195 TRACE_BS("BS:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
197 allocator->Free(buffer_start_, byte_length_);
198}
199
200// Allocate a backing store using the array buffer allocator from the embedder.
201std::unique_ptr<BackingStore> BackingStore::Allocate(
202 Isolate* isolate, size_t byte_length, SharedFlag shared,
203 InitializedFlag initialized) {
204 void* buffer_start = nullptr;
205 auto allocator = isolate->array_buffer_allocator();
206 CHECK_NOT_NULL(allocator);
207 if (byte_length > allocator->MaxAllocationSize()) return {};
208 if (byte_length != 0) {
209 auto counters = isolate->counters();
210 int mb_length = static_cast<int>(byte_length / MB);
211 if (mb_length > 0) {
212 counters->array_buffer_big_allocations()->AddSample(mb_length);
213 }
214 if (shared == SharedFlag::kShared) {
215 counters->shared_array_allocations()->AddSample(mb_length);
216 }
217 auto allocate_buffer = [allocator, initialized](size_t byte_length) {
218 if (initialized == InitializedFlag::kUninitialized) {
219 return allocator->AllocateUninitialized(byte_length);
220 }
221 return allocator->Allocate(byte_length);
222 };
223
224 buffer_start = isolate->heap()->AllocateExternalBackingStore(
225 allocate_buffer, byte_length);
226
227 if (buffer_start == nullptr) {
228 // Allocation failed.
229 counters->array_buffer_new_size_failures()->AddSample(mb_length);
230 return {};
231 }
232#ifdef V8_ENABLE_SANDBOX
233 // Check to catch use of a non-sandbox-compatible ArrayBufferAllocator.
234 CHECK_WITH_MSG(isolate->isolate_group()->sandbox()->Contains(buffer_start),
235 "When the V8 Sandbox is enabled, ArrayBuffer backing stores "
236 "must be allocated inside the sandbox address space. Please "
237 "use an appropriate ArrayBuffer::Allocator to allocate "
238 "these buffers, or disable the sandbox.");
239#endif
240 }
241
242 PageAllocator* page_allocator =
243 isolate->isolate_group()->GetBackingStorePageAllocator();
244 auto result = new BackingStore(page_allocator,
245 buffer_start, // start
246 byte_length, // length
247 byte_length, // max length
248 byte_length, // capacity
249 shared, // shared
250 ResizableFlag::kNotResizable, // resizable
251 false, // is_wasm_memory
252 false, // is_wasm_memory64
253 false, // has_guard_regions
254 false, // custom_deleter
255 false); // empty_deleter
256
257 TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result,
258 result->buffer_start(), byte_length);
259 result->SetAllocatorFromIsolate(isolate);
260 return std::unique_ptr<BackingStore>(result);
261}
262
264 if (auto allocator_shared = isolate->array_buffer_allocator_shared()) {
267 std::shared_ptr<v8::ArrayBuffer::Allocator>(
268 std::move(allocator_shared));
269 } else {
271 isolate->array_buffer_allocator();
272 }
273}
274
276 Isolate* isolate, size_t byte_length, size_t max_byte_length,
277 size_t page_size, size_t initial_pages, size_t maximum_pages,
278 WasmMemoryFlag wasm_memory, SharedFlag shared, bool has_guard_regions) {
279 // Enforce engine limitation on the maximum number of pages.
280 if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
281 return nullptr;
282 }
283
284 // Cannot reserve 0 pages on some OSes.
285 if (maximum_pages == 0) maximum_pages = 1;
286
287 TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
288
289#if V8_ENABLE_WEBASSEMBLY
290 bool is_wasm_memory = wasm_memory != WasmMemoryFlag::kNotWasm;
292#else
294 constexpr bool is_wasm_memory = false;
295 constexpr bool is_wasm_memory64 = false;
296#endif // V8_ENABLE_WEBASSEMBLY
298
299 // For accounting purposes, whether a GC was necessary.
300 bool did_retry = false;
301
302 // A helper to try running a function up to 3 times, executing a GC
303 // if the first and second attempts failed.
304 auto gc_retry = [&](const std::function<bool()>& fn) {
305 for (int i = 0; i < 3; i++) {
306 if (fn()) return true;
307 // Collect garbage and retry.
308 did_retry = true;
309 if (isolate != nullptr) {
310 isolate->heap()->MemoryPressureNotification(
312 }
313 }
314 return false;
315 };
316
317 size_t byte_capacity = maximum_pages * page_size;
318 size_t reservation_size =
320
321 //--------------------------------------------------------------------------
322 // Allocate pages (inaccessible by default).
323 //--------------------------------------------------------------------------
324 void* allocation_base = nullptr;
325#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
326#ifdef V8_ENABLE_SANDBOX
327 CHECK_WITH_MSG(isolate || Sandbox::current(),
328#else
329 CHECK_WITH_MSG(isolate,
330#endif
331 "One must enter an v8::Isolate before allocating resizable "
332 "array backing stores");
333#endif
334 PageAllocator* page_allocator =
335 isolate ? isolate->isolate_group()->GetBackingStorePageAllocator()
337 auto allocate_pages = [&] {
338 allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
339 page_size, PageAllocator::kNoAccess);
340 return allocation_base != nullptr;
341 };
342 if (!gc_retry(allocate_pages)) {
343 // Page allocator could not reserve enough pages.
344 if (isolate != nullptr) {
345 RecordStatus(isolate, AllocationStatus::kOtherFailure);
346 }
347 TRACE_BS("BSw:try failed to allocate pages\n");
348 return {};
349 }
350
351 uint8_t* buffer_start = reinterpret_cast<uint8_t*>(allocation_base);
352
353 //--------------------------------------------------------------------------
354 // Commit the initial pages (allow read/write).
355 //--------------------------------------------------------------------------
356 size_t committed_byte_length = initial_pages * page_size;
357 auto commit_memory = [&] {
358 return committed_byte_length == 0 ||
359 SetPermissions(page_allocator, buffer_start, committed_byte_length,
361 };
362 if (!gc_retry(commit_memory)) {
363 TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
364 committed_byte_length);
365 FreePages(page_allocator, allocation_base, reservation_size);
366 // SetPermissions put us over the process memory limit.
367 // We return an empty result so that the caller can throw an exception.
368 return {};
369 }
370
371 if (isolate != nullptr) {
372 RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
373 : AllocationStatus::kSuccess);
374 }
375
376 ResizableFlag resizable =
378
379 auto result = new BackingStore(page_allocator,
380 buffer_start, // start
381 byte_length, // length
382 max_byte_length, // max_byte_length
383 byte_capacity, // capacity
384 shared, // shared
385 resizable, // resizable
386 is_wasm_memory, // is_wasm_memory
387 is_wasm_memory64, // is_wasm_memory64
388 has_guard_regions, // has_guard_regions
389 false, // custom_deleter
390 false); // empty_deleter
391 TRACE_BS(
392 "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
393 result, result->buffer_start(), byte_length, byte_capacity,
394 reservation_size);
395
396 return std::unique_ptr<BackingStore>(result);
397}
398
399#if V8_ENABLE_WEBASSEMBLY
400// Allocate a backing store for a Wasm memory. Always use the page allocator
401// and add guard regions.
402std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
403 Isolate* isolate, size_t initial_pages, size_t maximum_pages,
404 WasmMemoryFlag wasm_memory, SharedFlag shared) {
405 // Wasm pages must be a multiple of the allocation page size.
407 DCHECK_LE(initial_pages, maximum_pages);
408 DCHECK_LE(maximum_pages, wasm_memory == WasmMemoryFlag::kWasmMemory32
411
412 DCHECK(wasm_memory == WasmMemoryFlag::kWasmMemory32 ||
413 wasm_memory == WasmMemoryFlag::kWasmMemory64);
414
416 bool has_guard_regions =
418 (wasm_memory == WasmMemoryFlag::kWasmMemory32 ||
419 (is_wasm_memory64 && v8_flags.wasm_memory64_trap_handling));
420
421 auto TryAllocate = [isolate, initial_pages, wasm_memory, shared,
422 has_guard_regions](size_t maximum_pages) {
424 isolate, initial_pages * wasm::kWasmPageSize,
425 maximum_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, initial_pages,
426 maximum_pages, wasm_memory, shared, has_guard_regions);
427 if (result && shared == SharedFlag::kShared) {
428 result->type_specific_data_.shared_wasm_memory_data =
430 }
431 return result;
432 };
433 auto backing_store = TryAllocate(maximum_pages);
434
435 if (!backing_store &&
436 !has_guard_regions && // With guard regions we always reserved a fixed
437 // number of pages.
438 maximum_pages - initial_pages >= 4) {
439 // Retry with smaller maximum pages at each retry.
440 auto delta = (maximum_pages - initial_pages) / 4;
441 size_t sizes[] = {maximum_pages - delta, maximum_pages - 2 * delta,
442 maximum_pages - 3 * delta, initial_pages};
443
444 for (size_t reduced_maximum_pages : sizes) {
445 backing_store = TryAllocate(reduced_maximum_pages);
446 if (backing_store) break;
447 }
448 }
449 return backing_store;
450}
451
452std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(
453 Isolate* isolate, size_t new_pages, size_t max_pages,
454 WasmMemoryFlag wasm_memory) {
455 // Note that we could allocate uninitialized to save initialization cost here,
456 // but since Wasm memories are allocated by the page allocator, the zeroing
457 // cost is already built-in.
458 auto new_backing_store = BackingStore::AllocateWasmMemory(
459 isolate, new_pages, max_pages, wasm_memory,
461
462 if (!new_backing_store ||
463 new_backing_store->has_guard_regions() != has_guard_regions()) {
464 return {};
465 }
466
467 if (byte_length_ > 0) {
468 // If the allocation was successful, then the new buffer must be at least
469 // as big as the old one.
471 memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_);
472 }
473
474 return new_backing_store;
475}
476
477// Try to grow the size of a wasm memory in place, without realloc + copy.
478std::optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
479 size_t delta_pages,
480 size_t max_pages) {
481 // This function grows wasm memory by
482 // * changing the permissions of additional {delta_pages} pages to kReadWrite;
483 // * increment {byte_length_};
484 //
485 // As this code is executed concurrently, the following steps are executed:
486 // 1) Read the current value of {byte_length_};
487 // 2) Change the permission of all pages from {buffer_start_} to
488 // {byte_length_} + {delta_pages} * {page_size} to kReadWrite;
489 // * This operation may be executed racefully. The OS takes care of
490 // synchronization.
491 // 3) Try to update {byte_length_} with a compare_exchange;
492 // 4) Repeat 1) to 3) until the compare_exchange in 3) succeeds;
493 //
494 // The result of this function is the {byte_length_} before growing in pages.
495 // The result of this function appears like the result of an RMW-update on
496 // {byte_length_}, i.e. two concurrent calls to this function will result in
497 // different return values if {delta_pages} != 0.
498 //
499 // Invariants:
500 // * Permissions are always set incrementally, i.e. for any page {b} with
501 // kReadWrite permission, all pages between the first page {a} and page {b}
502 // also have kReadWrite permission.
503 // * {byte_length_} is always lower or equal than the amount of memory with
504 // permissions set to kReadWrite;
505 // * This is guaranteed by incrementing {byte_length_} with a
506 // compare_exchange after changing the permissions.
507 // * This invariant is the reason why we cannot use a fetch_add.
509 max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
510
511 // Do a compare-exchange loop, because we also need to adjust page
512 // permissions. Note that multiple racing grows both try to set page
513 // permissions for the entire range (to be RW), so the operating system
514 // should deal with that raciness. We know we succeeded when we can
515 // compare/swap the old length with the new length.
516 size_t old_length = byte_length_.load(std::memory_order_relaxed);
517
518 if (delta_pages == 0)
519 return {old_length / wasm::kWasmPageSize}; // degenerate grow.
520 if (delta_pages > max_pages) return {}; // would never work.
521
522 size_t new_length = 0;
523 while (true) {
524 size_t current_pages = old_length / wasm::kWasmPageSize;
525
526 // Check if we have exceed the supplied maximum.
527 if (current_pages > (max_pages - delta_pages)) return {};
528
529 new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
530
531 // Try to adjust the permissions on the memory.
534 // This is a nondeterministic failure; mark as such in the WasmEngine (for
535 // differential fuzzing).
537 return {};
538 }
539 if (byte_length_.compare_exchange_weak(old_length, new_length,
540 std::memory_order_acq_rel)) {
541 // Successfully updated both the length and permissions.
542 break;
543 }
544 }
545
546 return {old_length / wasm::kWasmPageSize};
547}
548
549void BackingStore::AttachSharedWasmMemoryObject(
550 Isolate* isolate, DirectHandle<WasmMemoryObject> memory_object) {
552 DCHECK(is_shared());
553 // We need to take the global registry lock for this operation.
555 memory_object);
556}
557
558void BackingStore::BroadcastSharedWasmMemoryGrow(Isolate* isolate) const {
560}
561
562void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
564}
565
566void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
568}
569
570void BackingStore::MakeWasmMemoryResizableByJS(bool resizable) {
572 DCHECK(!is_shared());
573 if (resizable) {
575 } else {
577 }
578}
579#endif // V8_ENABLE_WEBASSEMBLY
580
581// Commit already reserved memory (for RAB backing stores (not shared)).
583 Isolate* isolate, size_t new_byte_length) {
584 size_t page_size = AllocatePageSize();
585 size_t new_committed_pages;
586 bool round_return_value =
587 RoundUpToPageSize(new_byte_length, page_size,
588 JSArrayBuffer::kMaxByteLength, &new_committed_pages);
589 CHECK(round_return_value);
590
591 size_t new_committed_length = new_committed_pages * page_size;
592 DCHECK_LE(new_byte_length, new_committed_length);
593 DCHECK(!is_shared());
594
595 if (new_byte_length < byte_length_) {
596 // Zero the memory so that in case the buffer is grown later, we have
597 // zeroed the contents already. This is especially needed for the portion of
598 // the memory we're not going to decommit below (since it belongs to a
599 // committed page). In addition, we don't rely on all platforms always
600 // zeroing decommitted-then-recommitted memory, but zero the memory
601 // explicitly here.
602 memset(reinterpret_cast<uint8_t*>(buffer_start_) + new_byte_length, 0,
603 byte_length_ - new_byte_length);
604
605 // Check if we can un-commit some pages.
606 size_t old_committed_pages;
607 round_return_value =
609 JSArrayBuffer::kMaxByteLength, &old_committed_pages);
610 CHECK(round_return_value);
611 DCHECK_LE(new_committed_pages, old_committed_pages);
612
613 if (new_committed_pages < old_committed_pages) {
614 size_t old_committed_length = old_committed_pages * page_size;
617 reinterpret_cast<uint8_t*>(buffer_start_) + new_committed_length,
618 old_committed_length - new_committed_length,
620 return kFailure;
621 }
622 }
623
624 // Changing the byte length wouldn't strictly speaking be needed, since
625 // the JSArrayBuffer already stores the updated length. This is to keep
626 // the BackingStore and JSArrayBuffer in sync.
627 byte_length_ = new_byte_length;
628 return kSuccess;
629 }
630 if (new_byte_length == byte_length_) {
631 // i::SetPermissions with size 0 fails on some platforms, so special
632 // handling for the case byte_length_ == new_byte_length == 0 is required.
633 return kSuccess;
634 }
635
636 // Try to adjust the permissions on the memory.
638 new_committed_length, PageAllocator::kReadWrite)) {
639 return kFailure;
640 }
641
642 byte_length_ = new_byte_length;
643 return kSuccess;
644}
645
646// Commit already reserved memory (for GSAB backing stores (shared)).
648 Isolate* isolate, size_t new_byte_length) {
649 size_t page_size = AllocatePageSize();
650 size_t new_committed_pages;
651 bool round_return_value =
652 RoundUpToPageSize(new_byte_length, page_size,
653 JSArrayBuffer::kMaxByteLength, &new_committed_pages);
654 CHECK(round_return_value);
655
656 size_t new_committed_length = new_committed_pages * page_size;
657 DCHECK_LE(new_byte_length, new_committed_length);
658 DCHECK(is_shared());
659 // See comment in GrowWasmMemoryInPlace.
660 // GrowableSharedArrayBuffer.prototype.grow can be called from several
661 // threads. If two threads try to grow() in a racy way, the spec allows the
662 // larger grow to throw also if the smaller grow succeeds first. The
663 // implementation below doesn't throw in that case - instead, it retries and
664 // succeeds. If the larger grow finishes first though, the smaller grow must
665 // throw.
666 size_t old_byte_length = byte_length_.load(std::memory_order_seq_cst);
667 while (true) {
668 if (new_byte_length < old_byte_length) {
669 // The caller checks for the new_byte_length < old_byte_length_ case. This
670 // can only happen if another thread grew the memory after that.
671 return kRace;
672 }
673 if (new_byte_length == old_byte_length) {
674 // i::SetPermissions with size 0 fails on some platforms, so special
675 // handling for the case old_byte_length == new_byte_length == 0 is
676 // required.
677 return kSuccess;
678 }
679
680 // Try to adjust the permissions on the memory.
682 new_committed_length, PageAllocator::kReadWrite)) {
683 return kFailure;
684 }
685
686 // compare_exchange_weak updates old_byte_length.
687 if (byte_length_.compare_exchange_weak(old_byte_length, new_byte_length,
688 std::memory_order_seq_cst)) {
689 // Successfully updated both the length and permissions.
690 break;
691 }
692 }
693 return kSuccess;
694}
695
696std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
697 void* allocation_base, size_t allocation_length,
698 v8::BackingStore::DeleterCallback deleter, void* deleter_data,
699 SharedFlag shared) {
700 bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
701 auto result = new BackingStore(nullptr,
702 allocation_base, // start
703 allocation_length, // length
704 allocation_length, // max length
705 allocation_length, // capacity
706 shared, // shared
707 ResizableFlag::kNotResizable, // resizable
708 false, // is_wasm_memory
709 false, // is_wasm_memory64
710 false, // has_guard_regions
711 true, // custom_deleter
712 is_empty_deleter); // empty_deleter
713 result->type_specific_data_.deleter = {deleter, deleter_data};
714 TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
715 result->buffer_start(), result->byte_length());
716 return std::unique_ptr<BackingStore>(result);
717}
718
719std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
720 SharedFlag shared) {
721 auto result = new BackingStore(nullptr,
722 nullptr, // start
723 0, // length
724 0, // max length
725 0, // capacity
726 shared, // shared
727 ResizableFlag::kNotResizable, // resizable
728 false, // is_wasm_memory
729 false, // is_wasm_memory64
730 false, // has_guard_regions
731 false, // custom_deleter
732 false); // empty_deleter
733
734 return std::unique_ptr<BackingStore>(result);
735}
736
746
749 auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data;
750 CHECK(shared_wasm_memory_data);
751 return shared_wasm_memory_data;
752}
753
754namespace {
755// Implementation details of GlobalBackingStoreRegistry.
756struct GlobalBackingStoreRegistryImpl {
757 GlobalBackingStoreRegistryImpl() = default;
758 base::Mutex mutex_;
759 std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
760};
761
762DEFINE_LAZY_LEAKY_OBJECT_GETTER(GlobalBackingStoreRegistryImpl,
763 GetGlobalBackingStoreRegistryImpl)
764} // namespace
765
767 std::shared_ptr<BackingStore> backing_store) {
768 if (!backing_store || !backing_store->buffer_start()) return;
769 // Only wasm memory backing stores need to be registered globally.
770 CHECK(backing_store->is_wasm_memory());
771
772 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
773 base::MutexGuard scope_lock(&impl->mutex_);
774 if (backing_store->globally_registered()) return;
775 TRACE_BS("BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n",
776 backing_store.get(), backing_store->buffer_start(),
777 backing_store->byte_length(), backing_store->byte_capacity());
778 std::weak_ptr<BackingStore> weak = backing_store;
779 auto result = impl->map_.insert({backing_store->buffer_start(), weak});
780 CHECK(result.second);
781 backing_store->set_flag(BackingStore::kGloballyRegistered);
782}
783
785 if (!backing_store->globally_registered()) return;
786
787 CHECK(backing_store->is_wasm_memory());
788
789 DCHECK_NOT_NULL(backing_store->buffer_start());
790
791 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
792 base::MutexGuard scope_lock(&impl->mutex_);
793 const auto& result = impl->map_.find(backing_store->buffer_start());
794 if (result != impl->map_.end()) {
795 DCHECK(!result->second.lock());
796 impl->map_.erase(result);
797 }
799}
800
802 // We need to keep a reference to all backing stores that are inspected
803 // in the purging loop below. Otherwise, we might get a deadlock
804 // if the temporary backing store reference created in the loop is
805 // the last reference. In that case the destructor of the backing store
806 // may try to take the &impl->mutex_ in order to unregister itself.
807 std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock;
808 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
809 base::MutexGuard scope_lock(&impl->mutex_);
810 // Purge all entries in the map that refer to the given isolate.
811 for (auto& entry : impl->map_) {
812 auto backing_store = entry.second.lock();
813 prevent_destruction_under_lock.emplace_back(backing_store);
814 if (!backing_store) continue; // skip entries where weak ptr is null
815 CHECK(backing_store->is_wasm_memory());
816 if (!backing_store->is_shared()) continue; // skip non-shared memory
817 SharedWasmMemoryData* shared_data =
818 backing_store->get_shared_wasm_memory_data();
819 // Remove this isolate from the isolates list.
820 std::vector<Isolate*>& isolates = shared_data->isolates_;
821 auto isolates_it = std::find(isolates.begin(), isolates.end(), isolate);
822 if (isolates_it != isolates.end()) {
823 *isolates_it = isolates.back();
824 isolates.pop_back();
825 }
826 DCHECK_EQ(isolates.end(),
827 std::find(isolates.begin(), isolates.end(), isolate));
828 }
829}
830
831#if V8_ENABLE_WEBASSEMBLY
833 Isolate* isolate, BackingStore* backing_store,
834 DirectHandle<WasmMemoryObject> memory_object) {
835 // Add to the weak array list of shared memory objects in the isolate.
836 isolate->AddSharedWasmMemory(memory_object);
837
838 // Add the isolate to the list of isolates sharing this backing store.
839 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
840 base::MutexGuard scope_lock(&impl->mutex_);
841 SharedWasmMemoryData* shared_data =
842 backing_store->get_shared_wasm_memory_data();
843 auto& isolates = shared_data->isolates_;
844 int free_entry = -1;
845 for (size_t i = 0; i < isolates.size(); i++) {
846 if (isolates[i] == isolate) return;
847 if (isolates[i] == nullptr) free_entry = static_cast<int>(i);
848 }
849 if (free_entry >= 0)
850 isolates[free_entry] = isolate;
851 else
852 isolates.push_back(isolate);
853}
854
856 Isolate* isolate, const BackingStore* backing_store) {
857 {
858 GlobalBackingStoreRegistryImpl* impl = GetGlobalBackingStoreRegistryImpl();
859 // The global lock protects the list of isolates per backing store.
860 base::MutexGuard scope_lock(&impl->mutex_);
861 SharedWasmMemoryData* shared_data =
862 backing_store->get_shared_wasm_memory_data();
863 for (Isolate* other : shared_data->isolates_) {
864 if (other == isolate) continue;
865 other->stack_guard()->RequestGrowSharedMemory();
866 }
867 }
868 // Update memory objects in this isolate.
870}
871
873 Isolate* isolate) {
874
875 HandleScope scope(isolate);
876 DirectHandle<WeakArrayList> shared_wasm_memories =
877 isolate->factory()->shared_wasm_memories();
878
879 for (int i = 0, e = shared_wasm_memories->length(); i < e; ++i) {
881 if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue;
882
883 DirectHandle<WasmMemoryObject> memory_object(Cast<WasmMemoryObject>(obj),
884 isolate);
885 if (memory_object->array_buffer()->is_resizable_by_js()) {
886 // If the SharedArrayBuffer is exposed as growable already, there's no
887 // need to refresh it, but instances still need to be updated with the new
888 // length.
889 memory_object->UpdateInstances(isolate);
890 } else {
891 WasmMemoryObject::RefreshSharedBuffer(isolate, memory_object,
893 }
894 }
895}
896#endif // V8_ENABLE_WEBASSEMBLY
897
898} // namespace v8::internal
899
900#undef TRACE_BS
#define TRACE_BS(...)
void(*)(void *data, size_t length, void *deleter_data) DeleterCallback
static void EmptyDeleter(void *data, size_t length, void *deleter_data)
Definition api.cc:4023
ResizeOrGrowResult GrowInPlace(Isolate *isolate, size_t new_byte_length)
static std::unique_ptr< BackingStore > TryAllocateAndPartiallyCommitMemory(Isolate *isolate, size_t byte_length, size_t max_byte_length, size_t page_size, size_t initial_pages, size_t maximum_pages, WasmMemoryFlag wasm_memory, SharedFlag shared, bool has_guard_regions=false)
static std::unique_ptr< BackingStore > WrapAllocation(void *allocation_base, size_t allocation_length, v8::BackingStore::DeleterCallback deleter, void *deleter_data, SharedFlag shared)
std::atomic< base::EnumSet< Flag, uint16_t > > flags_
BackingStore(PageAllocator *page_allocator, void *buffer_start, size_t byte_length, size_t max_byte_length, size_t byte_capacity, SharedFlag shared, ResizableFlag resizable, bool is_wasm_memory, bool is_wasm_memory64, bool has_guard_regions, bool custom_deleter, bool empty_deleter)
static std::unique_ptr< BackingStore > Allocate(Isolate *isolate, size_t byte_length, SharedFlag shared, InitializedFlag initialized)
bool holds_shared_ptr_to_allocator() const
size_t max_byte_length() const
static std::unique_ptr< BackingStore > EmptyBackingStore(SharedFlag shared)
v8::ArrayBuffer::Allocator * get_v8_api_array_buffer_allocator()
void SetAllocatorFromIsolate(Isolate *isolate)
std::atomic< size_t > byte_length_
SharedWasmMemoryData * get_shared_wasm_memory_data() const
v8::PageAllocator * page_allocator_
size_t byte_length(std::memory_order memory_order=std::memory_order_relaxed) const
union v8::internal::BackingStore::TypeSpecificData type_specific_data_
ResizeOrGrowResult ResizeInPlace(Isolate *isolate, size_t new_byte_length)
static void Purge(Isolate *isolate)
static void Register(std::shared_ptr< BackingStore > backing_store)
static void AddSharedWasmMemoryObject(Isolate *isolate, BackingStore *backing_store, DirectHandle< WasmMemoryObject > memory_object)
static void Unregister(BackingStore *backing_store)
static void BroadcastSharedWasmMemoryGrow(Isolate *isolate, const BackingStore *backing_store)
static void UpdateSharedWasmMemoryObjects(Isolate *isolate)
static constexpr size_t kMaxByteLength
static DirectHandle< JSArrayBuffer > RefreshSharedBuffer(Isolate *isolate, DirectHandle< WasmMemoryObject > memory, ResizableFlag resizable_by_js)
base::Mutex & mutex_
const MapRef map_
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
Isolate * isolate
SharedFunctionInfoRef shared
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
EmitFn fn
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
constexpr size_t kV8MaxWasmMemory64Pages
Definition wasm-limits.h:46
constexpr size_t kMaxMemory64Size
constexpr size_t kWasmPageSize
constexpr size_t kV8MaxWasmMemory32Pages
Definition wasm-limits.h:43
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
constexpr int GB
Definition v8-internal.h:57
void * AllocatePages(v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access)
kInterpreterTrampolineOffset Tagged< HeapObject >
Flag flags[]
Definition flags.cc:3797
v8::PageAllocator * GetArrayBufferPageAllocator()
Definition allocation.h:125
bool RoundUpToPageSize(size_t byte_length, size_t page_size, size_t max_allowed_byte_length, size_t *pages)
Definition utils.h:844
size_t AllocatePageSize()
V8_EXPORT_PRIVATE FlagValues v8_flags
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_WITH_MSG(condition, message)
Definition logging.h:118
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
std::vector< Isolate * > isolates_
struct v8::internal::BackingStore::TypeSpecificData::DeleterInfo deleter
std::shared_ptr< v8::ArrayBuffer::Allocator > v8_api_array_buffer_allocator_shared
v8::ArrayBuffer::Allocator * v8_api_array_buffer_allocator