v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
allocation.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <stdlib.h> // For free, malloc.
8
9#include "src/base/bits.h"
12#include "src/base/logging.h"
18#include "src/flags/flags.h"
19#include "src/init/v8.h"
20#include "src/sandbox/sandbox.h"
21#include "src/utils/memcopy.h"
22
23#if V8_LIBC_BIONIC
24#include <malloc.h>
25#endif
26
27namespace v8 {
28namespace internal {
29
30namespace {
31
32class PageAllocatorInitializer {
33 public:
34 PageAllocatorInitializer() {
36 if (page_allocator_ == nullptr) {
37 static base::LeakyObject<base::PageAllocator> default_page_allocator;
38 page_allocator_ = default_page_allocator.get();
39 }
40#if defined(LEAK_SANITIZER)
41 static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
42 page_allocator_);
43 page_allocator_ = lsan_allocator.get();
44#endif
45 }
46
47 PageAllocator* page_allocator() const { return page_allocator_; }
48
49 void SetPageAllocatorForTesting(PageAllocator* allocator) {
50 page_allocator_ = allocator;
51 }
52
53 private:
54 PageAllocator* page_allocator_;
55};
56
57DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
58 GetPageAllocatorInitializer)
59
60// We will attempt allocation this many times. After each failure, we call
61// OnCriticalMemoryPressure to try to free some memory.
62const int kAllocationTries = 2;
63
64} // namespace
65
67 DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
68 return GetPageAllocatorInitializer()->page_allocator();
69}
70
72#if defined(LEAK_SANITIZER)
74 std::make_unique<base::VirtualAddressSpace>());
75#else
77#endif
78 return vas.get();
79}
80
81#ifdef V8_ENABLE_SANDBOX
82v8::PageAllocator* GetSandboxPageAllocator() {
83 CHECK(Sandbox::current()->is_initialized());
84 return Sandbox::current()->page_allocator();
85}
86#endif
87
89 v8::PageAllocator* new_page_allocator) {
90 v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
91 GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
92 return old_page_allocator;
93}
94
95void* Malloced::operator new(size_t size) {
96 void* result = AllocWithRetry(size);
97 if (V8_UNLIKELY(result == nullptr)) {
98 V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
99 }
100 return result;
101}
102
103void Malloced::operator delete(void* p) { base::Free(p); }
104
105char* StrDup(const char* str) {
106 size_t length = strlen(str);
107 char* result = NewArray<char>(length + 1);
108 MemCopy(result, str, length);
109 result[length] = '\0';
110 return result;
111}
112
113char* StrNDup(const char* str, size_t n) {
114 size_t length = strlen(str);
115 if (n < length) length = n;
116 char* result = NewArray<char>(length + 1);
117 MemCopy(result, str, length);
118 result[length] = '\0';
119 return result;
120}
121
122void* AllocWithRetry(size_t size, MallocFn malloc_fn) {
123 void* result = nullptr;
124 for (int i = 0; i < kAllocationTries; ++i) {
125 result = malloc_fn(size);
126 if (V8_LIKELY(result != nullptr)) break;
128 }
129 return result;
130}
131
134 for (int i = 0; i < kAllocationTries; ++i) {
136 if (V8_LIKELY(result.ptr != nullptr)) break;
138 }
139 return {result.ptr, result.count};
140}
141
142void* AlignedAllocWithRetry(size_t size, size_t alignment) {
143 void* result = nullptr;
144 for (int i = 0; i < kAllocationTries; ++i) {
145 result = base::AlignedAlloc(size, alignment);
146 if (V8_LIKELY(result != nullptr)) return result;
148 }
149 V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
150}
151
152void AlignedFree(void* ptr) { base::AlignedFree(ptr); }
153
157
159
163
164void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
165 size_t alignment, PageAllocator::Permission access) {
166 DCHECK_NOT_NULL(page_allocator);
167 DCHECK(IsAligned(reinterpret_cast<Address>(hint), alignment));
168 DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
169 if (!hint && v8_flags.randomize_all_allocations) {
170 hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
171 }
172 void* result = nullptr;
173 for (int i = 0; i < kAllocationTries; ++i) {
174 result = page_allocator->AllocatePages(hint, size, alignment, access);
175 if (V8_LIKELY(result != nullptr)) break;
177 }
178 return result;
179}
180
181void FreePages(v8::PageAllocator* page_allocator, void* address,
182 const size_t size) {
183 DCHECK_NOT_NULL(page_allocator);
184 DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
185 if (!page_allocator->FreePages(address, size)) {
186 V8::FatalProcessOutOfMemory(nullptr, "FreePages");
187 }
188}
189
190void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
191 size_t new_size) {
192 DCHECK_NOT_NULL(page_allocator);
193 DCHECK_LT(new_size, size);
194 DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
195 CHECK(page_allocator->ReleasePages(address, size, new_size));
196}
197
198bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
199 size_t size, PageAllocator::Permission access) {
200 DCHECK_NOT_NULL(page_allocator);
201 return page_allocator->SetPermissions(address, size, access);
202}
203
207
209
211 void* hint, size_t alignment,
212 PageAllocator::Permission permissions)
213 : page_allocator_(page_allocator) {
216 size_t page_size = page_allocator_->AllocatePageSize();
217 alignment = RoundUp(alignment, page_size);
218 Address address = reinterpret_cast<Address>(AllocatePages(
219 page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
220 if (address != kNullAddress) {
221 DCHECK(IsAligned(address, alignment));
223 }
224}
225
227 if (IsReserved()) {
228 Free();
229 }
230}
231
236
237bool VirtualMemory::SetPermissions(Address address, size_t size,
239 CHECK(InVM(address, size));
241 reinterpret_cast<void*>(address), size, access);
242 return result;
243}
244
245bool VirtualMemory::RecommitPages(Address address, size_t size,
247 CHECK(InVM(address, size));
248 bool result = page_allocator_->RecommitPages(reinterpret_cast<void*>(address),
249 size, access);
250 return result;
251}
252
253bool VirtualMemory::DiscardSystemPages(Address address, size_t size) {
254 CHECK(InVM(address, size));
256 reinterpret_cast<void*>(address), size);
257 DCHECK(result);
258 return result;
259}
260
261size_t VirtualMemory::Release(Address free_start) {
264 // Notice: Order is important here. The VirtualMemory object might live
265 // inside the allocated region.
266
267 const size_t old_size = region_.size();
268 const size_t free_size = old_size - (free_start - region_.begin());
269 CHECK(InVM(free_start, free_size));
270 region_.set_size(old_size - free_size);
271 ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
272 old_size, region_.size());
273 return free_size;
274}
275
278 // Notice: Order is important here. The VirtualMemory object might live
279 // inside the allocated region.
282 Reset();
283 // FreePages expects size to be aligned to allocation granularity however
284 // ReleasePages may leave size at only commit granularity. Align it here.
285 FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
286 RoundUp(region.size(), page_allocator->AllocatePageSize()));
287}
288
290
292
294 *this = std::move(other);
295}
296
299 base_ = other.base_;
300 size_ = other.size_;
301 page_allocator_ = std::move(other.page_allocator_);
302 reservation_ = std::move(other.reservation_);
303 other.base_ = kNullAddress;
304 other.size_ = 0;
305 return *this;
306}
307
309 const ReservationParams& params, base::AddressRegion existing_reservation) {
311
312 const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
313 CHECK(IsAligned(params.reservation_size, allocate_page_size));
314 CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
315 IsAligned(params.base_alignment, allocate_page_size));
316
317 if (!existing_reservation.is_empty()) {
318 CHECK_EQ(existing_reservation.size(), params.reservation_size);
319 CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
320 IsAligned(existing_reservation.begin(), params.base_alignment));
322 VirtualMemory(params.page_allocator, existing_reservation.begin(),
323 existing_reservation.size());
325 } else {
326 Address hint = params.requested_start_hint;
327 // Require the hint to be properly aligned because here it's not clear
328 // anymore whether it should be rounded up or down.
329 CHECK(IsAligned(hint, params.base_alignment));
330 VirtualMemory reservation(params.page_allocator, params.reservation_size,
331 reinterpret_cast<void*>(hint),
332 params.base_alignment, params.permissions);
333 // The virtual memory reservation fails only due to OOM.
334 if (!reservation.IsReserved()) return false;
335
336 reservation_ = std::move(reservation);
338 CHECK_EQ(reservation_.size(), params.reservation_size);
339 }
341 CHECK(IsAligned(base_, params.base_alignment));
342
343 const Address allocatable_base = RoundUp(base_, params.page_size);
344 const size_t allocatable_size = RoundDown(
345 params.reservation_size - (allocatable_base - base_), params.page_size);
346 size_ = allocatable_base + allocatable_size - base_;
347
348 page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
349 params.page_allocator, allocatable_base, allocatable_size,
350 params.page_size, params.page_initialization_mode,
351 params.page_freeing_mode);
352 return true;
353}
354
356 if (IsReserved()) {
358 size_ = 0;
359 page_allocator_.reset();
361 }
362}
363
364} // namespace internal
365} // namespace v8
virtual void * GetRandomMmapAddr()=0
virtual bool ReleasePages(void *address, size_t length, size_t new_length)=0
virtual size_t AllocatePageSize()=0
virtual void * AllocatePages(void *address, size_t length, size_t alignment, Permission permissions)=0
virtual bool RecommitPages(void *address, size_t length, Permission permissions)
virtual bool SetPermissions(void *address, size_t length, Permission permissions)=0
virtual bool FreePages(void *address, size_t length)=0
virtual bool DiscardSystemPages(void *address, size_t size)
virtual size_t CommitPageSize()=0
virtual PageAllocator * GetPageAllocator()
virtual void OnCriticalMemoryPressure()
void set_size(size_t size)
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
bool InitReservation(const ReservationParams &params, base::AddressRegion existing_reservation=base::AddressRegion())
std::unique_ptr< base::BoundedPageAllocator > page_allocator_
Definition allocation.h:412
VirtualMemory * reservation()
Definition allocation.h:373
VirtualMemoryCage & operator=(VirtualMemoryCage &)=delete
v8::PageAllocator * page_allocator_
Definition allocation.h:296
V8_EXPORT_PRIVATE ~VirtualMemory()
V8_EXPORT_PRIVATE bool DiscardSystemPages(Address address, size_t size)
V8_EXPORT_PRIVATE VirtualMemory()
bool InVM(Address address, size_t size) const
Definition allocation.h:290
v8::PageAllocator * page_allocator()
Definition allocation.h:241
base::AddressRegion region_
Definition allocation.h:297
V8_EXPORT_PRIVATE void Free()
V8_EXPORT_PRIVATE void Reset()
V8_EXPORT_PRIVATE size_t Release(Address free_start)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool SetPermissions(Address address, size_t size, PageAllocator::Permission access)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool RecommitPages(Address address, size_t size, PageAllocator::Permission access)
const int size_
Definition assembler.cc:132
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
int n
Definition mul-fft.cc:296
v8::PageAllocator PageAllocator
Definition platform.h:22
V8_NODISCARD AllocationResult< T * > AllocateAtLeast(size_t n)
Definition memory.h:144
void AlignedFree(void *ptr)
Definition memory.h:102
void * AlignedAlloc(size_t size, size_t alignment)
Definition memory.h:84
void Free(void *memory)
Definition memory.h:63
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
void * AllocatePages(v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access)
v8::PageAllocator * SetPlatformPageAllocatorForTesting(v8::PageAllocator *new_page_allocator)
Definition allocation.cc:88
void * AlignedAllocWithRetry(size_t size, size_t alignment)
void OnCriticalMemoryPressure()
void * GetRandomMmapAddr()
size_t CommitPageSize()
char * StrDup(const char *str)
void *(*)(size_t) MallocFn
Definition allocation.h:95
v8::VirtualAddressSpace * GetPlatformVirtualAddressSpace()
Definition allocation.cc:71
size_t AllocatePageSize()
V8_EXPORT_PRIVATE FlagValues v8_flags
void * AllocWithRetry(size_t size, MallocFn malloc_fn)
void AlignedFree(void *ptr)
base::AllocationResult< void * > AllocAtLeastWithRetry(size_t size)
char * StrNDup(const char *str, size_t n)
static constexpr Address kNullAddress
Definition v8-internal.h:53
void ReleasePages(v8::PageAllocator *page_allocator, void *address, size_t size, size_t new_size)
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
T * NewArray(size_t size)
Definition allocation.h:43
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
#define V8_NOEXCEPT
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
void * AlignedAddress(void *address, size_t alignment)
Definition macros.h:407
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660