25#ifdef V8_ENABLE_SANDBOX
27bool Sandbox::first_four_gb_of_address_space_are_reserved_ =
false;
29#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
30thread_local Sandbox* Sandbox::current_ =
nullptr;
32Sandbox* Sandbox::current_non_inlined() {
return current_; }
34void Sandbox::set_current_non_inlined(Sandbox* sandbox) {
current_ = sandbox; }
37Sandbox* Sandbox::default_sandbox_ =
nullptr;
43static Address DetermineAddressSpaceLimit() {
44#ifndef V8_TARGET_ARCH_64_BIT
45#error Unsupported target architecture.
49 constexpr unsigned kDefaultVirtualAddressBits = 48;
51 constexpr unsigned kMinVirtualAddressBits = 36;
52 constexpr unsigned kMaxVirtualAddressBits = 64;
54 unsigned hardware_virtual_address_bits = kDefaultVirtualAddressBits;
55#if defined(V8_TARGET_ARCH_X64)
57 if (cpu.exposes_num_virtual_address_bits()) {
58 hardware_virtual_address_bits = cpu.num_virtual_address_bits();
62#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_TARGET_OS_ANDROID)
67 hardware_virtual_address_bits = 40;
71 hardware_virtual_address_bits -= 1;
78 unsigned software_virtual_address_bits =
82 unsigned virtual_address_bits =
83 std::min(hardware_virtual_address_bits, software_virtual_address_bits);
86 if (virtual_address_bits < kMinVirtualAddressBits ||
87 virtual_address_bits > kMaxVirtualAddressBits) {
88 virtual_address_bits = kDefaultVirtualAddressBits;
91 return 1ULL << virtual_address_bits;
100 Address address_space_limit = DetermineAddressSpaceLimit();
103 size_t max_reservation_size = address_space_limit / 4;
107 CHECK_LT(kSandboxSize, address_space_limit);
120 max_reservation_size = kSandboxMinimumReservationSize;
126 size_t reservation_size = std::min(kSandboxSize, max_reservation_size);
128 if (reservation_size < kSandboxSize) {
129 DCHECK_GE(max_reservation_size, kSandboxMinimumReservationSize);
130 success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
133 DCHECK_EQ(kSandboxSize, reservation_size);
134 constexpr bool use_guard_regions =
true;
135 success = Initialize(vas, kSandboxSize, use_guard_regions);
139 while (!success && reservation_size > kSandboxMinimumReservationSize) {
140 static_assert(kFallbackToPartiallyReservedSandboxAllowed);
141 reservation_size /= 2;
142 DCHECK_GE(reservation_size, kSandboxMinimumReservationSize);
143 success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
150 "Failed to reserve the virtual address space for the V8 sandbox");
153#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
155 trap_handler_initialized_ =
true;
158 nullptr,
"Failed to allocate sandbox record for trap handling.");
168 bool use_guard_regions) {
169 CHECK(!initialized_);
173 size_t reservation_size =
size;
177 size_t true_reservation_size =
size;
178#if defined(V8_TARGET_OS_ANDROID)
180 const size_t kAdditionalTrailingGuardRegionSize = 0;
184 const size_t kTotalTrailingGuardRegionSize = 260ULL *
GB;
185 const size_t kAdditionalTrailingGuardRegionSize =
186 kTotalTrailingGuardRegionSize - kSandboxGuardRegionSize;
188 if (use_guard_regions) {
189 reservation_size += 2 * kSandboxGuardRegionSize;
190 true_reservation_size =
191 reservation_size + kAdditionalTrailingGuardRegionSize;
207 if (!address_space_)
return false;
209 reservation_base_ = address_space_->base();
210 base_ = reservation_base_ + (use_guard_regions ? kSandboxGuardRegionSize : 0);
213 reservation_size_ = reservation_size;
214 sandbox_page_allocator_ =
215 std::make_unique<base::VirtualAddressSpacePageAllocator>(
216 address_space_.get());
218 if (use_guard_regions) {
219 Address front = reservation_base_;
222 CHECK(address_space_->AllocateGuardRegion(front, kSandboxGuardRegionSize));
223 CHECK(address_space_->AllocateGuardRegion(
224 back, kSandboxGuardRegionSize + kAdditionalTrailingGuardRegionSize));
230 if (!first_four_gb_of_address_space_are_reserved_) {
232 size_t step = address_space_->allocation_granularity();
235 first_four_gb_of_address_space_are_reserved_ =
true;
243 FinishInitialization();
245 DCHECK(!is_partially_reserved());
251 size_t size_to_reserve) {
252 CHECK(!initialized_);
261 base::RandomNumberGenerator rng;
271 Address address_space_end = DetermineAddressSpaceLimit();
272 Address highest_allowed_address = address_space_end / 2;
274 constexpr int kMaxAttempts = 10;
275 for (
int i = 1;
i <= kMaxAttempts;
i++) {
276 Address hint = rng.NextInt64() % highest_allowed_address;
277 hint =
RoundDown(hint, kSandboxAlignment);
282 if (!reservation_base_)
return false;
286 if (reservation_base_ <= highest_allowed_address ||
i == kMaxAttempts)
290 vas->
FreePages(reservation_base_, size_to_reserve);
293 DCHECK(reservation_base_);
295 base_ = reservation_base_;
298 reservation_size_ = size_to_reserve;
300 address_space_ = std::make_unique<base::EmulatedVirtualAddressSubspace>(
301 vas, reservation_base_, reservation_size_,
size_);
302 sandbox_page_allocator_ =
303 std::make_unique<base::VirtualAddressSpacePageAllocator>(
304 address_space_.get());
306 FinishInitialization();
308 DCHECK(is_partially_reserved());
312void Sandbox::FinishInitialization() {
321 size_t allocation_granularity = address_space_->allocation_granularity();
322 bool success = address_space_->AllocateGuardRegion(
323 end_ - allocation_granularity, allocation_granularity);
326 CHECK(success || is_partially_reserved());
328 InitializeConstants();
331void Sandbox::InitializeConstants() {
334 constants_.set_empty_backing_store_buffer(
end_ - 1);
337void Sandbox::TearDown() {
339#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
340 if (trap_handler_initialized_) {
342 trap_handler_initialized_ =
false;
347 address_space_.reset();
348 sandbox_page_allocator_.reset();
353 reservation_size_ = 0;
354 initialized_ =
false;
361 static base::LeakyObject<Sandbox> default_sandbox;
362 default_sandbox_ = default_sandbox.get();
364#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
365 set_current(default_sandbox_);
367 default_sandbox_->Initialize(vas);
371void Sandbox::TearDownDefault() {
372 GetDefault()->TearDown();
374#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
375 set_current(
nullptr);
383 "Creation of new sandboxes requires enabling "
384 "multiple pointer compression cages at build-time");
386 Sandbox* sandbox =
new Sandbox;
387 sandbox->Initialize(vas);
virtual bool CanAllocateSubspaces()=0
virtual void FreePages(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions)=0
virtual Address RandomPageAddress()=0
virtual std::unique_ptr< VirtualAddressSpace > AllocateSubspace(Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions)=0
static uintptr_t AddressSpaceEnd()
static bool TryEnable(Address addr, size_t size)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
#define COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL
const v8::base::TimeTicks end_
constexpr unsigned CountLeadingZeros(T value)
constexpr bool IsPowerOfTwo(T value)
void UnregisterV8Sandbox(uintptr_t base, size_t size)
bool RegisterV8Sandbox(uintptr_t base, size_t size)
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define CHECK_LT(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr T RoundDown(T x, intptr_t m)