v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
sandbox.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8#include "src/base/bits.h"
10#include "src/base/cpu.h"
13#include "src/base/sys-info.h"
17#include "src/flags/flags.h"
21
22namespace v8 {
23namespace internal {
24
25#ifdef V8_ENABLE_SANDBOX
26
27bool Sandbox::first_four_gb_of_address_space_are_reserved_ = false;
28
29#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
30thread_local Sandbox* Sandbox::current_ = nullptr;
31// static
32Sandbox* Sandbox::current_non_inlined() { return current_; }
33// static
34void Sandbox::set_current_non_inlined(Sandbox* sandbox) { current_ = sandbox; }
35#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
36
37Sandbox* Sandbox::default_sandbox_ = nullptr;
38
39// Best-effort function to determine the approximate size of the virtual
40// address space that can be addressed by this process. Used to determine
41// appropriate sandbox size and placement.
42// The value returned by this function will always be a power of two.
43static Address DetermineAddressSpaceLimit() {
44#ifndef V8_TARGET_ARCH_64_BIT
45#error Unsupported target architecture.
46#endif
47
48 // Assume 48 bits by default, which seems to be the most common configuration.
49 constexpr unsigned kDefaultVirtualAddressBits = 48;
50 // 36 bits should realistically be the lowest value we could ever see.
51 constexpr unsigned kMinVirtualAddressBits = 36;
52 constexpr unsigned kMaxVirtualAddressBits = 64;
53
54 unsigned hardware_virtual_address_bits = kDefaultVirtualAddressBits;
55#if defined(V8_TARGET_ARCH_X64)
56 base::CPU cpu;
57 if (cpu.exposes_num_virtual_address_bits()) {
58 hardware_virtual_address_bits = cpu.num_virtual_address_bits();
59 }
60#endif // V8_TARGET_ARCH_X64
61
62#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_TARGET_OS_ANDROID)
63 // On Arm64 Android assume a 40-bit virtual address space (39 bits for
64 // userspace and kernel each) as that appears to be the most common
65 // configuration and there seems to be no easy way to retrieve the actual
66 // number of virtual address bits from the CPU in userspace.
67 hardware_virtual_address_bits = 40;
68#endif
69
70 // Assume virtual address space is split 50/50 between userspace and kernel.
71 hardware_virtual_address_bits -= 1;
72
73 // Check if there is a software-imposed limits on the size of the address
74 // space. For example, older Windows versions limit the address space to 8TB:
75 // https://learn.microsoft.com/en-us/windows/win32/memory/memory-limits-for-windows-releases).
76 Address software_limit = base::SysInfo::AddressSpaceEnd();
77 // Compute the next power of two that is larger or equal to the limit.
78 unsigned software_virtual_address_bits =
79 64 - base::bits::CountLeadingZeros(software_limit - 1);
80
81 // The available address space is the smaller of the two limits.
82 unsigned virtual_address_bits =
83 std::min(hardware_virtual_address_bits, software_virtual_address_bits);
84
85 // Guard against nonsensical values.
86 if (virtual_address_bits < kMinVirtualAddressBits ||
87 virtual_address_bits > kMaxVirtualAddressBits) {
88 virtual_address_bits = kDefaultVirtualAddressBits;
89 }
90
91 return 1ULL << virtual_address_bits;
92}
93
94void Sandbox::Initialize(v8::VirtualAddressSpace* vas) {
95 // Take the size of the virtual address space into account when determining
96 // the size of the address space reservation backing the sandbox. For
97 // example, if we only have a 40-bit address space, split evenly between
98 // userspace and kernel, then userspace can only address 512GB and so we use
99 // a quarter of that, 128GB, as maximum reservation size.
100 Address address_space_limit = DetermineAddressSpaceLimit();
101 // Note: this is technically the maximum reservation size excluding the guard
102 // regions (which are not created for partially-reserved sandboxes).
103 size_t max_reservation_size = address_space_limit / 4;
104
105 // In any case, the sandbox should be smaller than our address space since we
106 // otherwise wouldn't always be able to allocate objects inside of it.
107 CHECK_LT(kSandboxSize, address_space_limit);
108
109 if (!vas->CanAllocateSubspaces()) {
110 // If we cannot create virtual memory subspaces, we fall back to creating a
111 // partially reserved sandbox. This will happen for example on older
112 // Windows versions (before Windows 10) where the necessary memory
113 // management APIs, in particular, VirtualAlloc2, are not available.
114 // Since reserving virtual memory is an expensive operation on Windows
115 // before version 8.1 (reserving 1TB of address space will increase private
116 // memory usage by around 2GB), we only reserve the minimal amount of
117 // address space here. This way, we don't incur the cost of reserving
118 // virtual memory, but also don't get the desired security properties as
119 // unrelated mappings may end up inside the sandbox.
120 max_reservation_size = kSandboxMinimumReservationSize;
121 }
122
123 // If the maximum reservation size is less than the size of the sandbox, we
124 // can only create a partially-reserved sandbox.
125 bool success;
126 size_t reservation_size = std::min(kSandboxSize, max_reservation_size);
127 DCHECK(base::bits::IsPowerOfTwo(reservation_size));
128 if (reservation_size < kSandboxSize) {
129 DCHECK_GE(max_reservation_size, kSandboxMinimumReservationSize);
130 success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
131 reservation_size);
132 } else {
133 DCHECK_EQ(kSandboxSize, reservation_size);
134 constexpr bool use_guard_regions = true;
135 success = Initialize(vas, kSandboxSize, use_guard_regions);
136 }
137
138 // Fall back to creating a (smaller) partially reserved sandbox.
139 while (!success && reservation_size > kSandboxMinimumReservationSize) {
140 static_assert(kFallbackToPartiallyReservedSandboxAllowed);
141 reservation_size /= 2;
142 DCHECK_GE(reservation_size, kSandboxMinimumReservationSize);
143 success = InitializeAsPartiallyReservedSandbox(vas, kSandboxSize,
144 reservation_size);
145 }
146
147 if (!success) {
149 nullptr,
150 "Failed to reserve the virtual address space for the V8 sandbox");
151 }
152
153#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
155 trap_handler_initialized_ = true;
156 } else {
158 nullptr, "Failed to allocate sandbox record for trap handling.");
159 }
160#endif // V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
161
163
164 DCHECK(initialized_);
165}
166
167bool Sandbox::Initialize(v8::VirtualAddressSpace* vas, size_t size,
168 bool use_guard_regions) {
169 CHECK(!initialized_);
172
173 size_t reservation_size = size;
174 // As a temporary workaround for crbug.com/40070746 we use larger guard
175 // regions at the end of the sandbox.
176 // TODO(40070746): remove this workaround again once we have a proper fix.
177 size_t true_reservation_size = size;
178#if defined(V8_TARGET_OS_ANDROID)
179 // On Android, we often won't have sufficient virtual address space available.
180 const size_t kAdditionalTrailingGuardRegionSize = 0;
181#else
182 // Worst-case, we currently need 8 (max element size) * 32GB (max ArrayBuffer
183 // size) + 4GB (additional offset for TypedArray access).
184 const size_t kTotalTrailingGuardRegionSize = 260ULL * GB;
185 const size_t kAdditionalTrailingGuardRegionSize =
186 kTotalTrailingGuardRegionSize - kSandboxGuardRegionSize;
187#endif
188 if (use_guard_regions) {
189 reservation_size += 2 * kSandboxGuardRegionSize;
190 true_reservation_size =
191 reservation_size + kAdditionalTrailingGuardRegionSize;
192 }
193
194 Address hint = RoundDown(vas->RandomPageAddress(), kSandboxAlignment);
195
196 // There should be no executable pages mapped inside the sandbox since
197 // those could be corrupted by an attacker and therefore pose a security
198 // risk. Furthermore, allowing executable mappings in the sandbox requires
199 // MAP_JIT on macOS, which causes fork() to become excessively slow
200 // (multiple seconds or even minutes for a 1TB sandbox on macOS 12.X), in
201 // turn causing tests to time out. As such, the maximum page permission
202 // inside the sandbox should be read + write.
203 address_space_ =
204 vas->AllocateSubspace(hint, true_reservation_size, kSandboxAlignment,
206
207 if (!address_space_) return false;
208
209 reservation_base_ = address_space_->base();
210 base_ = reservation_base_ + (use_guard_regions ? kSandboxGuardRegionSize : 0);
211 size_ = size;
212 end_ = base_ + size_;
213 reservation_size_ = reservation_size;
214 sandbox_page_allocator_ =
215 std::make_unique<base::VirtualAddressSpacePageAllocator>(
216 address_space_.get());
217
218 if (use_guard_regions) {
219 Address front = reservation_base_;
220 Address back = end_;
221 // These must succeed since nothing was allocated in the subspace yet.
222 CHECK(address_space_->AllocateGuardRegion(front, kSandboxGuardRegionSize));
223 CHECK(address_space_->AllocateGuardRegion(
224 back, kSandboxGuardRegionSize + kAdditionalTrailingGuardRegionSize));
225 }
226
227 // Also try to reserve the first 4GB of the process' address space. This
228 // mitigates Smi<->HeapObject confusion bugs in which we end up treating a
229 // Smi value as a pointer.
230 if (!first_four_gb_of_address_space_are_reserved_) {
231 Address end = 4UL * GB;
232 size_t step = address_space_->allocation_granularity();
233 for (Address start = 0; start <= 1 * MB; start += step) {
234 if (vas->AllocateGuardRegion(start, end - start)) {
235 first_four_gb_of_address_space_are_reserved_ = true;
236 break;
237 }
238 }
239 }
240
241 initialized_ = true;
242
243 FinishInitialization();
244
245 DCHECK(!is_partially_reserved());
246 return true;
247}
248
249bool Sandbox::InitializeAsPartiallyReservedSandbox(v8::VirtualAddressSpace* vas,
250 size_t size,
251 size_t size_to_reserve) {
252 CHECK(!initialized_);
254 CHECK(base::bits::IsPowerOfTwo(size_to_reserve));
255 CHECK_LT(size_to_reserve, size);
256
257 // Use a custom random number generator here to ensure that we get uniformly
258 // distributed random numbers. We figure out the available address space
259 // ourselves, and so are potentially better positioned to determine a good
260 // base address for the sandbox than the embedder.
261 base::RandomNumberGenerator rng;
262 if (v8_flags.random_seed != 0) {
263 rng.SetSeed(v8_flags.random_seed);
264 }
265
266 // We try to ensure that base + size is still (mostly) within the process'
267 // address space, even though we only reserve a fraction of the memory. For
268 // that, we attempt to map the sandbox into the first half of the usable
269 // address space. This keeps the implementation simple and should, In any
270 // realistic scenario, leave plenty of space after the actual reservation.
271 Address address_space_end = DetermineAddressSpaceLimit();
272 Address highest_allowed_address = address_space_end / 2;
273 DCHECK(base::bits::IsPowerOfTwo(highest_allowed_address));
274 constexpr int kMaxAttempts = 10;
275 for (int i = 1; i <= kMaxAttempts; i++) {
276 Address hint = rng.NextInt64() % highest_allowed_address;
277 hint = RoundDown(hint, kSandboxAlignment);
278
279 reservation_base_ = vas->AllocatePages(
280 hint, size_to_reserve, kSandboxAlignment, PagePermissions::kNoAccess);
281
282 if (!reservation_base_) return false;
283
284 // Take this base if it meets the requirements or if this is the last
285 // attempt.
286 if (reservation_base_ <= highest_allowed_address || i == kMaxAttempts)
287 break;
288
289 // Can't use this base, so free the reservation and try again
290 vas->FreePages(reservation_base_, size_to_reserve);
291 reservation_base_ = kNullAddress;
292 }
293 DCHECK(reservation_base_);
294
295 base_ = reservation_base_;
296 size_ = size;
297 end_ = base_ + size_;
298 reservation_size_ = size_to_reserve;
299 initialized_ = true;
300 address_space_ = std::make_unique<base::EmulatedVirtualAddressSubspace>(
301 vas, reservation_base_, reservation_size_, size_);
302 sandbox_page_allocator_ =
303 std::make_unique<base::VirtualAddressSpacePageAllocator>(
304 address_space_.get());
305
306 FinishInitialization();
307
308 DCHECK(is_partially_reserved());
309 return true;
310}
311
312void Sandbox::FinishInitialization() {
313 // Reserve the last page in the sandbox. This way, we can place inaccessible
314 // "objects" (e.g. the empty backing store buffer) there that are guaranteed
315 // to cause a fault on any accidental access.
316 // Further, this also prevents the accidental construction of invalid
317 // SandboxedPointers: if an ArrayBuffer is placed right at the end of the
318 // sandbox, an ArrayBufferView could be constructed with byteLength=0 and
319 // offset=buffer.byteLength, which would lead to a pointer that points just
320 // outside of the sandbox.
321 size_t allocation_granularity = address_space_->allocation_granularity();
322 bool success = address_space_->AllocateGuardRegion(
323 end_ - allocation_granularity, allocation_granularity);
324 // If the sandbox is partially-reserved, this operation may fail, for example
325 // if the last page is outside of the mappable address space of the process.
326 CHECK(success || is_partially_reserved());
327
328 InitializeConstants();
329}
330
331void Sandbox::InitializeConstants() {
332 // Place the empty backing store buffer at the end of the sandbox, so that any
333 // accidental access to it will most likely hit a guard page.
334 constants_.set_empty_backing_store_buffer(end_ - 1);
335}
336
337void Sandbox::TearDown() {
338 if (initialized_) {
339#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
340 if (trap_handler_initialized_) {
342 trap_handler_initialized_ = false;
343 }
344#endif // V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
345
346 // This destroys the sub space and frees the underlying reservation.
347 address_space_.reset();
348 sandbox_page_allocator_.reset();
351 size_ = 0;
352 reservation_base_ = kNullAddress;
353 reservation_size_ = 0;
354 initialized_ = false;
355 constants_.Reset();
356 }
357}
358
359// static
360void Sandbox::InitializeDefaultOncePerProcess(v8::VirtualAddressSpace* vas) {
361 static base::LeakyObject<Sandbox> default_sandbox;
362 default_sandbox_ = default_sandbox.get();
363
364#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
365 set_current(default_sandbox_);
366#endif
367 default_sandbox_->Initialize(vas);
368}
369
370// static
371void Sandbox::TearDownDefault() {
372 GetDefault()->TearDown();
373
374#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
375 set_current(nullptr);
376#endif
377}
378
379// static
380Sandbox* Sandbox::New(v8::VirtualAddressSpace* vas) {
382 FATAL(
383 "Creation of new sandboxes requires enabling "
384 "multiple pointer compression cages at build-time");
385 }
386 Sandbox* sandbox = new Sandbox;
387 sandbox->Initialize(vas);
388 CHECK(!v8_flags.sandbox_testing && !v8_flags.sandbox_fuzzing);
389 return sandbox;
390}
391
392#endif // V8_ENABLE_SANDBOX
393
394} // namespace internal
395} // namespace v8
virtual bool CanAllocateSubspaces()=0
virtual void FreePages(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions)=0
virtual Address RandomPageAddress()=0
virtual std::unique_ptr< VirtualAddressSpace > AllocateSubspace(Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions)=0
static uintptr_t AddressSpaceEnd()
Definition sys-info.cc:136
static bool TryEnable(Address addr, size_t size)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
const int size_
Definition assembler.cc:132
#define COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL
Definition globals.h:117
const v8::base::TimeTicks end_
Definition sweeper.cc:54
int start
int end
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
void UnregisterV8Sandbox(uintptr_t base, size_t size)
bool RegisterV8Sandbox(uintptr_t base, size_t size)
constexpr int GB
Definition v8-internal.h:57
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
base::uc32 current_
#define FATAL(...)
Definition logging.h:47
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LT(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371