v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
trusted-range.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8#include "src/base/once.h"
9#include "src/heap/heap-inl.h"
11
12namespace v8 {
13namespace internal {
14
15#ifdef V8_ENABLE_SANDBOX
16
17bool TrustedRange::InitReservation(size_t requested) {
18 DCHECK_LE(requested, kMaximalTrustedRangeSize);
19 DCHECK_GE(requested, kMinimumTrustedRangeSize);
20
21 auto page_allocator = GetPlatformPageAllocator();
22
24 CHECK(IsAligned(kPageSize, page_allocator->AllocatePageSize()));
25
26 // We want the trusted range to be allocated above 4GB, for a few reasons:
27 // 1. Certain (sandbox) bugs allow access to (only) the first 4GB of the
28 // address space, so we don't want sensitive objects to live there.
29 // 2. When pointers to trusted objects have the upper 32 bits cleared, they
30 // may look like compressed pointers to some code in V8. For example, the
31 // stack spill slot visiting logic (VisitSpillSlot in frames.cc)
32 // currently assumes that when the top 32-bits are zero, then it's
33 // dealing with a compressed pointer and will attempt to decompress them
34 // with the main cage base, which in this case would break.
35 //
36 // To achieve this, we simply require 4GB alignment of the allocation and
37 // assume that we can never map the zeroth page.
38 const size_t base_alignment = size_t{4} * GB;
39
40 const Address requested_start_hint =
41 RoundDown(reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()),
42 base_alignment);
43
44 VirtualMemoryCage::ReservationParams params;
45 params.page_allocator = page_allocator;
46 params.reservation_size = requested;
47 params.page_size = kPageSize;
48 params.base_alignment = base_alignment;
49 params.requested_start_hint = requested_start_hint;
50 params.permissions = PageAllocator::Permission::kNoAccess;
51 params.page_initialization_mode =
53 params.page_freeing_mode = base::PageFreeingMode::kMakeInaccessible;
54 bool success = VirtualMemoryCage::InitReservation(params);
55
56 if (success) {
57 // Reserve the null page to mitigate (compressed) nullptr dereference bugs.
58 //
59 // We typically use Smi::zero()/nullptr for protected pointer fields
60 // (compressed pointers in trusted space) if the field is empty.
61 // As such, we can have the equivalent of nullptr deref bugs if either some
62 // code doesn't handle empty fields or if objects aren't correctly
63 // initialized and fields are left empty. To mitigate these, we make the
64 // first pages of trusted space inaccessible so that any access is
65 // guaranteed to crash safely.
66 size_t guard_region_size = 1 * MB;
67 DCHECK(IsAligned(guard_region_size, page_allocator_->AllocatePageSize()));
68 CHECK(page_allocator_->AllocatePagesAt(base(), guard_region_size,
70 }
71
72 return success;
73}
74
75namespace {
76
77TrustedRange* process_wide_trusted_range_ = nullptr;
78
79V8_DECLARE_ONCE(init_trusted_range_once);
80void InitProcessWideTrustedRange(size_t requested_size) {
81 TrustedRange* trusted_range = new TrustedRange();
82 if (!trusted_range->InitReservation(requested_size)) {
84 nullptr, "Failed to reserve virtual memory for TrustedRange");
85 }
86 process_wide_trusted_range_ = trusted_range;
87
88 TrustedSpaceCompressionScheme::InitBase(trusted_range->base());
89}
90} // namespace
91
92// static
93TrustedRange* TrustedRange::EnsureProcessWideTrustedRange(
94 size_t requested_size) {
95 base::CallOnce(&init_trusted_range_once, InitProcessWideTrustedRange,
96 requested_size);
97 return process_wide_trusted_range_;
98}
99
100// static
101TrustedRange* TrustedRange::GetProcessWideTrustedRange() {
102 return process_wide_trusted_range_;
103}
104
105#endif // V8_ENABLE_SANDBOX
106
107} // namespace internal
108} // namespace v8
virtual size_t AllocatePageSize()=0
static V8_INLINE void InitBase(Address base)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
bool InitReservation(const ReservationParams &params, base::AddressRegion existing_reservation=base::AddressRegion())
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
constexpr size_t kPageSize
Definition globals.h:42
void CallOnce(OnceType *once, std::function< void()> init_func)
Definition once.h:90
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
constexpr int GB
Definition v8-internal.h:57
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
#define V8_DECLARE_ONCE(NAME)
Definition once.h:72
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403