v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
virtual-address-space.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
10#include "src/base/bits.h"
12
13namespace v8 {
14namespace base {
15
16#define STATIC_ASSERT_ENUM(a, b) \
17 static_assert(static_cast<int>(a) == static_cast<int>(b), \
18 "mismatching enum: " #a)
19
27
28#undef STATIC_ASSERT_ENUM
29
30namespace {
31uint8_t PagePermissionsToBitset(PagePermissions permissions) {
32 switch (permissions) {
34 return 0b000;
36 return 0b100;
38 return 0b110;
40 return 0b111;
42 return 0b101;
43 }
44}
45} // namespace
46
48 uint8_t lhs_bits = PagePermissionsToBitset(lhs);
49 uint8_t rhs_bits = PagePermissionsToBitset(rhs);
50 return (lhs_bits & rhs_bits) == lhs_bits;
51}
52
54 : VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(),
56 std::numeric_limits<uintptr_t>::max(),
58#if V8_OS_WIN
59 // On Windows, this additional step is required to lookup the VirtualAlloc2
60 // and friends functions.
61 OS::EnsureWin32MemoryAPILoaded();
62#endif // V8_OS_WIN
63 DCHECK(bits::IsPowerOfTwo(page_size()));
64 DCHECK(bits::IsPowerOfTwo(allocation_granularity()));
65 DCHECK_GE(allocation_granularity(), page_size());
66 DCHECK(IsAligned(allocation_granularity(), page_size()));
67}
68
72
76
78 size_t alignment,
79 PagePermissions permissions) {
80 DCHECK(IsAligned(alignment, allocation_granularity()));
81 DCHECK(IsAligned(hint, alignment));
82 DCHECK(IsAligned(size, allocation_granularity()));
83
84 return reinterpret_cast<Address>(
85 OS::Allocate(reinterpret_cast<void*>(hint), size, alignment,
86 static_cast<OS::MemoryPermission>(permissions)));
87}
88
89void VirtualAddressSpace::FreePages(Address address, size_t size) {
90 DCHECK(IsAligned(address, allocation_granularity()));
91 DCHECK(IsAligned(size, allocation_granularity()));
92
93 OS::Free(reinterpret_cast<void*>(address), size);
94}
95
97 PagePermissions permissions) {
98 DCHECK(IsAligned(address, page_size()));
99 DCHECK(IsAligned(size, page_size()));
100
101 return OS::SetPermissions(reinterpret_cast<void*>(address), size,
102 static_cast<OS::MemoryPermission>(permissions));
103}
104
106 DCHECK(IsAligned(address, allocation_granularity()));
107 DCHECK(IsAligned(size, allocation_granularity()));
108
109 void* hint = reinterpret_cast<void*>(address);
110 void* result = OS::Allocate(hint, size, allocation_granularity(),
112 if (result && result != hint) {
113 OS::Free(result, size);
114 }
115 return result == hint;
116}
117
119 DCHECK(IsAligned(address, allocation_granularity()));
120 DCHECK(IsAligned(size, allocation_granularity()));
121
122 OS::Free(reinterpret_cast<void*>(address), size);
123}
124
128
130 Address hint, size_t size, PagePermissions permissions,
131 PlatformSharedMemoryHandle handle, uint64_t offset) {
132 DCHECK(IsAligned(hint, allocation_granularity()));
133 DCHECK(IsAligned(size, allocation_granularity()));
134 DCHECK(IsAligned(offset, allocation_granularity()));
135
136 return reinterpret_cast<Address>(OS::AllocateShared(
137 reinterpret_cast<void*>(hint), size,
138 static_cast<OS::MemoryPermission>(permissions), handle, offset));
139}
140
142 DCHECK(IsAligned(address, allocation_granularity()));
143 DCHECK(IsAligned(size, allocation_granularity()));
144
145 OS::FreeShared(reinterpret_cast<void*>(address), size);
146}
147
148std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
149 Address hint, size_t size, size_t alignment,
150 PagePermissions max_page_permissions) {
151 DCHECK(IsAligned(alignment, allocation_granularity()));
152 DCHECK(IsAligned(hint, alignment));
153 DCHECK(IsAligned(size, allocation_granularity()));
154
155 std::optional<AddressSpaceReservation> reservation =
157 reinterpret_cast<void*>(hint), size, alignment,
158 static_cast<OS::MemoryPermission>(max_page_permissions));
159 if (!reservation.has_value())
160 return std::unique_ptr<v8::VirtualAddressSpace>();
161 return std::unique_ptr<v8::VirtualAddressSpace>(
162 new VirtualAddressSubspace(*reservation, this, max_page_permissions));
163}
164
166 PagePermissions permissions) {
167 DCHECK(IsAligned(address, page_size()));
168 DCHECK(IsAligned(size, page_size()));
169
170 return OS::RecommitPages(reinterpret_cast<void*>(address), size,
171 static_cast<OS::MemoryPermission>(permissions));
172}
173
175 DCHECK(IsAligned(address, page_size()));
176 DCHECK(IsAligned(size, page_size()));
177
178 return OS::DiscardSystemPages(reinterpret_cast<void*>(address), size);
179}
180
181bool VirtualAddressSpace::DecommitPages(Address address, size_t size) {
182 DCHECK(IsAligned(address, page_size()));
183 DCHECK(IsAligned(size, page_size()));
184
185 return OS::DecommitPages(reinterpret_cast<void*>(address), size);
186}
187
191
193 AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space,
194 PagePermissions max_page_permissions)
195 : VirtualAddressSpaceBase(parent_space->page_size(),
196 parent_space->allocation_granularity(),
197 reinterpret_cast<Address>(reservation.base()),
198 reservation.size(), max_page_permissions),
199 reservation_(reservation),
200 region_allocator_(reinterpret_cast<Address>(reservation.base()),
201 reservation.size(),
202 parent_space->allocation_granularity()),
203 parent_space_(parent_space) {
204#if V8_OS_WIN
205 // On Windows, the address space reservation needs to be split and merged at
206 // the OS level as well.
208 DCHECK(IsAligned(start, allocation_granularity()));
209 CHECK(reservation_.SplitPlaceholder(reinterpret_cast<void*>(start), size));
210 });
212 DCHECK(IsAligned(start, allocation_granularity()));
213 CHECK(reservation_.MergePlaceholders(reinterpret_cast<void*>(start), size));
214 });
215#endif // V8_OS_WIN
216}
217
219 // TODO(chromium:1218005) here or in the RegionAllocator destructor we should
220 // assert that all allocations have been freed. Otherwise we may end up
221 // leaking memory on Windows because VirtualFree(subspace_base, 0) will then
222 // only free the first allocation in the subspace, not the entire subspace.
224}
225
227 MutexGuard guard(&mutex_);
228 rng_.SetSeed(seed);
229}
230
232 MutexGuard guard(&mutex_);
233 // Note: the random numbers generated here aren't uniformly distributed if the
234 // size isn't a power of two.
235 Address addr = base() + (static_cast<uint64_t>(rng_.NextInt64()) % size());
236 return RoundDown(addr, allocation_granularity());
237}
238
240 size_t alignment,
241 PagePermissions permissions) {
242 DCHECK(IsAligned(alignment, allocation_granularity()));
243 DCHECK(IsAligned(hint, alignment));
244 DCHECK(IsAligned(size, allocation_granularity()));
245 DCHECK(IsSubset(permissions, max_page_permissions()));
246
247 MutexGuard guard(&mutex_);
248
249 Address address = region_allocator_.AllocateRegion(hint, size, alignment);
251
252 if (!reservation_.Allocate(reinterpret_cast<void*>(address), size,
253 static_cast<OS::MemoryPermission>(permissions))) {
254 // This most likely means that we ran out of memory.
255 CHECK_EQ(size, region_allocator_.FreeRegion(address));
256 return kNullAddress;
257 }
258
259 return address;
260}
261
262void VirtualAddressSubspace::FreePages(Address address, size_t size) {
263 DCHECK(IsAligned(address, allocation_granularity()));
264 DCHECK(IsAligned(size, allocation_granularity()));
265
266 MutexGuard guard(&mutex_);
267 // The order here is important: on Windows, the allocation first has to be
268 // freed to a placeholder before the placeholder can be merged (during the
269 // merge_callback) with any surrounding placeholder mappings.
270 if (!reservation_.Free(reinterpret_cast<void*>(address), size)) {
271 // This can happen due to an out-of-memory condition, such as running out
272 // of available VMAs for the process.
273 FatalOOM(OOMType::kProcess, "VirtualAddressSubspace::FreePages");
274 }
275 CHECK_EQ(size, region_allocator_.FreeRegion(address));
276}
277
279 PagePermissions permissions) {
280 DCHECK(IsAligned(address, page_size()));
281 DCHECK(IsAligned(size, page_size()));
282 DCHECK(IsSubset(permissions, max_page_permissions()));
283
285 reinterpret_cast<void*>(address), size,
286 static_cast<OS::MemoryPermission>(permissions));
287}
288
290 DCHECK(IsAligned(address, allocation_granularity()));
291 DCHECK(IsAligned(size, allocation_granularity()));
292
293 MutexGuard guard(&mutex_);
294
295 // It is guaranteed that reserved address space is inaccessible, so we just
296 // need to mark the region as in-use in the region allocator.
297 return region_allocator_.AllocateRegionAt(address, size);
298}
299
301 DCHECK(IsAligned(address, allocation_granularity()));
302 DCHECK(IsAligned(size, allocation_granularity()));
303
304 MutexGuard guard(&mutex_);
305 CHECK_EQ(size, region_allocator_.FreeRegion(address));
306}
307
309 Address hint, size_t size, PagePermissions permissions,
310 PlatformSharedMemoryHandle handle, uint64_t offset) {
311 DCHECK(IsAligned(hint, allocation_granularity()));
312 DCHECK(IsAligned(size, allocation_granularity()));
313 DCHECK(IsAligned(offset, allocation_granularity()));
314
315 MutexGuard guard(&mutex_);
316
317 Address address =
318 region_allocator_.AllocateRegion(hint, size, allocation_granularity());
320
322 reinterpret_cast<void*>(address), size,
323 static_cast<OS::MemoryPermission>(permissions), handle, offset)) {
324 CHECK_EQ(size, region_allocator_.FreeRegion(address));
325 return kNullAddress;
326 }
327
328 return address;
329}
330
332 DCHECK(IsAligned(address, allocation_granularity()));
333 DCHECK(IsAligned(size, allocation_granularity()));
334
335 MutexGuard guard(&mutex_);
336 // The order here is important: on Windows, the allocation first has to be
337 // freed to a placeholder before the placeholder can be merged (during the
338 // merge_callback) with any surrounding placeholder mappings.
339 CHECK(reservation_.FreeShared(reinterpret_cast<void*>(address), size));
340 CHECK_EQ(size, region_allocator_.FreeRegion(address));
341}
342
343std::unique_ptr<v8::VirtualAddressSpace>
345 size_t alignment,
346 PagePermissions max_page_permissions) {
347 DCHECK(IsAligned(alignment, allocation_granularity()));
348 DCHECK(IsAligned(hint, alignment));
349 DCHECK(IsAligned(size, allocation_granularity()));
350 DCHECK(IsSubset(max_page_permissions, this->max_page_permissions()));
351
352 MutexGuard guard(&mutex_);
353
354 Address address = region_allocator_.AllocateRegion(hint, size, alignment);
356 return std::unique_ptr<v8::VirtualAddressSpace>();
357 }
358
359 std::optional<AddressSpaceReservation> reservation =
361 reinterpret_cast<void*>(address), size,
362 static_cast<OS::MemoryPermission>(max_page_permissions));
363 if (!reservation.has_value()) {
364 CHECK_EQ(size, region_allocator_.FreeRegion(address));
365 return nullptr;
366 }
367 return std::unique_ptr<v8::VirtualAddressSpace>(
368 new VirtualAddressSubspace(*reservation, this, max_page_permissions));
369}
370
372 PagePermissions permissions) {
373 DCHECK(IsAligned(address, page_size()));
374 DCHECK(IsAligned(size, page_size()));
375 DCHECK(IsSubset(permissions, max_page_permissions()));
376
378 reinterpret_cast<void*>(address), size,
379 static_cast<OS::MemoryPermission>(permissions));
380}
381
383 DCHECK(IsAligned(address, page_size()));
384 DCHECK(IsAligned(size, page_size()));
385
386 return reservation_.DiscardSystemPages(reinterpret_cast<void*>(address),
387 size);
388}
389
391 DCHECK(IsAligned(address, page_size()));
392 DCHECK(IsAligned(size, page_size()));
393
394 return reservation_.DecommitPages(reinterpret_cast<void*>(address), size);
395}
396
398 MutexGuard guard(&mutex_);
399
400 AddressSpaceReservation reservation = subspace->reservation_;
401 Address base = reinterpret_cast<Address>(reservation.base());
404}
405
406} // namespace base
407} // namespace v8
V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool Allocate(void *address, size_t size, OS::MemoryPermission access)
V8_WARN_UNUSED_RESULT bool RecommitPages(void *address, size_t size, OS::MemoryPermission access)
V8_WARN_UNUSED_RESULT bool Free(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool SetPermissions(void *address, size_t size, OS::MemoryPermission access)
V8_WARN_UNUSED_RESULT bool DecommitPages(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool FreeShared(void *address, size_t size)
V8_WARN_UNUSED_RESULT std::optional< AddressSpaceReservation > CreateSubReservation(void *address, size_t size, OS::MemoryPermission max_permission)
V8_WARN_UNUSED_RESULT bool AllocateShared(void *address, size_t size, OS::MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)
static V8_WARN_UNUSED_RESULT bool FreeSubReservation(AddressSpaceReservation reservation)
static V8_WARN_UNUSED_RESULT void * AllocateShared(size_t size, MemoryPermission access)
static void * GetRandomMmapAddr()
static V8_WARN_UNUSED_RESULT bool CanReserveAddressSpace()
static V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void *address, size_t size)
static void FreeAddressSpaceReservation(AddressSpaceReservation reservation)
static void FreeShared(void *address, size_t size)
static V8_WARN_UNUSED_RESULT bool SetPermissions(void *address, size_t size, MemoryPermission access)
static V8_WARN_UNUSED_RESULT bool DecommitPages(void *address, size_t size)
static V8_WARN_UNUSED_RESULT bool RecommitPages(void *address, size_t size, MemoryPermission access)
static V8_WARN_UNUSED_RESULT void * Allocate(void *address, size_t size, size_t alignment, MemoryPermission access)
static void SetRandomMmapSeed(int64_t seed)
static V8_WARN_UNUSED_RESULT std::optional< AddressSpaceReservation > CreateAddressSpaceReservation(void *hint, size_t size, size_t alignment, MemoryPermission max_permission)
static void Free(void *address, size_t size)
int64_t NextInt64() V8_WARN_UNUSED_RESULT
Address AllocateRegion(size_t size)
bool AllocateRegionAt(Address requested_address, size_t size, RegionState region_state=RegionState::kAllocated)
void set_on_split_callback(SplitMergeCallback callback)
void set_on_merge_callback(SplitMergeCallback callback)
size_t FreeRegion(Address address)
static constexpr Address kAllocationFailure
virtual void FreeSubspace(VirtualAddressSubspace *subspace)=0
void FreePages(Address address, size_t size) override
Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions access) override
void FreeGuardRegion(Address address, size_t size) override
void SetRandomSeed(int64_t seed) override
void FreeSharedPages(Address address, size_t size) override
bool SetPagePermissions(Address address, size_t size, PagePermissions access) override
void FreeSubspace(VirtualAddressSubspace *subspace) override
bool RecommitPages(Address address, size_t size, PagePermissions access) override
bool DiscardSystemPages(Address address, size_t size) override
bool DecommitPages(Address address, size_t size) override
Address AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, PlatformSharedMemoryHandle handle, uint64_t offset) override
bool AllocateGuardRegion(Address address, size_t size) override
std::unique_ptr< v8::VirtualAddressSpace > AllocateSubspace(Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions) override
bool AllocateGuardRegion(Address address, size_t size) override
bool SetPagePermissions(Address address, size_t size, PagePermissions permissions) override
bool DecommitPages(Address address, size_t size) override
void FreeGuardRegion(Address address, size_t size) override
void FreePages(Address address, size_t size) override
bool RecommitPages(Address address, size_t size, PagePermissions permissions) override
bool DiscardSystemPages(Address address, size_t size) override
VirtualAddressSpaceBase * parent_space_
std::unique_ptr< v8::VirtualAddressSpace > AllocateSubspace(Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions) override
void FreeSubspace(VirtualAddressSubspace *subspace) override
Address AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, PlatformSharedMemoryHandle handle, uint64_t offset) override
void SetRandomSeed(int64_t seed) override
void FreeSharedPages(Address address, size_t size) override
Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions) override
int start
int32_t offset
ZoneVector< RpoNumber > & result
STL namespace.
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
void FatalOOM(OOMType type, const char *msg)
Definition logging.cc:80
uintptr_t Address
Definition memory.h:13
bool IsSubset(PagePermissions lhs, PagePermissions rhs)
constexpr Address kNullAddress
intptr_t PlatformSharedMemoryHandle
PagePermissions
#define STATIC_ASSERT_ENUM(a, b)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403