v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
bounded-page-allocator.cc
Go to the documentation of this file.
1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7namespace v8 {
8namespace base {
9
11 v8::PageAllocator* page_allocator, Address start, size_t size,
12 size_t allocate_page_size, PageInitializationMode page_initialization_mode,
13 PageFreeingMode page_freeing_mode)
14 : allocate_page_size_(allocate_page_size),
15 commit_page_size_(page_allocator->CommitPageSize()),
16 page_allocator_(page_allocator),
17 region_allocator_(start, size, allocate_page_size_),
18 page_initialization_mode_(page_initialization_mode),
19 page_freeing_mode_(page_freeing_mode) {
20 DCHECK_NOT_NULL(page_allocator);
21 DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
23}
24
28
30
31void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
32 size_t alignment,
34 MutexGuard guard(&mutex_);
37
39
40 Address hint_address = reinterpret_cast<Address>(hint);
41 if (hint_address && IsAligned(hint_address, alignment) &&
42 region_allocator_.contains(hint_address, size)) {
43 if (region_allocator_.AllocateRegionAt(hint_address, size)) {
44 address = hint_address;
45 }
46 }
47
49 if (alignment <= allocate_page_size_) {
50 // TODO(ishell): Consider using randomized version here.
51 address = region_allocator_.AllocateRegion(size);
52 } else {
53 address = region_allocator_.AllocateAlignedRegion(size, alignment);
54 }
55 }
56
59 return nullptr;
60 }
61
62 void* ptr = reinterpret_cast<void*>(address);
63 // It's assumed that free regions are in kNoAccess/kNoAccessWillJitLater
64 // state.
65 if (access == PageAllocator::kNoAccess ||
68 return ptr;
69 }
70
72 if (page_allocator_->RecommitPages(ptr, size, access)) {
74 return ptr;
75 }
76 } else {
77 if (page_allocator_->SetPermissions(ptr, size, access)) {
79 return ptr;
80 }
81 }
82
83 // This most likely means that we ran out of memory.
86 return nullptr;
87}
88
91 MutexGuard guard(&mutex_);
92
95
96 DCHECK(region_allocator_.contains(address, size));
97
98 if (!region_allocator_.AllocateRegionAt(address, size)) {
100 return false;
101 }
102
103 void* ptr = reinterpret_cast<void*>(address);
104 if (!page_allocator_->SetPermissions(ptr, size, access)) {
105 // This most likely means that we ran out of memory.
106 CHECK_EQ(region_allocator_.FreeRegion(address), size);
108 return false;
109 }
110
112 return true;
113}
114
116 size_t size) {
117 MutexGuard guard(&mutex_);
118
119 Address address = reinterpret_cast<Address>(ptr);
122
123 DCHECK(region_allocator_.contains(address, size));
124
125 // Region allocator requires page size rather than commit size so just over-
126 // allocate there since any extra space couldn't be used anyway.
127 size_t region_size = RoundUp(size, allocate_page_size_);
129 address, region_size, RegionAllocator::RegionState::kExcluded)) {
131 return false;
132 }
133
134 const bool success = page_allocator_->SetPermissions(
136 if (success) {
138 } else {
140 }
141 return success;
142}
143
144bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
145 // Careful: we are not locked here, do not touch BoundedPageAllocator
146 // metadata.
147 bool success;
148 Address address = reinterpret_cast<Address>(raw_address);
149
150 // The operations below can be expensive, don't hold the lock while they
151 // happen. There is still potentially contention in the kernel, but at least
152 // we don't need to hold the V8-side lock.
156 // When we are required to return zero-initialized pages, we decommit the
157 // pages here, which will cause any wired pages to be removed by the OS.
158 success = page_allocator_->DecommitPages(raw_address, size);
159 } else {
160 switch (page_freeing_mode_) {
164 success = page_allocator_->SetPermissions(raw_address, size,
166 break;
167
169 success = page_allocator_->DiscardSystemPages(raw_address, size);
170 break;
171 }
172 }
173
174 MutexGuard guard(&mutex_);
175 CHECK_EQ(size, region_allocator_.FreeRegion(address));
176
177 return success;
178}
179
180bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
181 size_t new_size) {
182 Address address = reinterpret_cast<Address>(raw_address);
184
185 DCHECK_LT(new_size, size);
186 DCHECK(IsAligned(size - new_size, commit_page_size_));
187
188 // This must be held until the page permissions are updated.
189 MutexGuard guard(&mutex_);
190
191 // Check if we freed any allocatable pages by this release.
192 size_t allocated_size = RoundUp(size, allocate_page_size_);
193 size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
194
195#ifdef DEBUG
196 {
197 // There must be an allocated region at given |address| of a size not
198 // smaller than |size|.
199 DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
200 }
201#endif
202
203 if (new_allocated_size < allocated_size) {
204 region_allocator_.TrimRegion(address, new_allocated_size);
205 }
206
207 // Keep the region in "used" state just uncommit some pages.
208 void* free_address = reinterpret_cast<void*>(address + new_size);
209 size_t free_size = size - new_size;
213 // See comment in FreePages().
214 return (page_allocator_->DecommitPages(free_address, free_size));
215 }
219 return page_allocator_->SetPermissions(free_address, free_size,
221 }
223 return page_allocator_->DiscardSystemPages(free_address, free_size);
224}
225
226bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
228 DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
230 DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
231 const bool success = page_allocator_->SetPermissions(address, size, access);
232 if (!success) {
234 }
235 return success;
236}
237
238bool BoundedPageAllocator::RecommitPages(void* address, size_t size,
240 DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
242 DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
243 const bool success = page_allocator_->RecommitPages(address, size, access);
244 if (!success) {
246 }
247 return success;
248}
249
250bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
251 return page_allocator_->DiscardSystemPages(address, size);
252}
253
254bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
255 return page_allocator_->DecommitPages(address, size);
256}
257
258bool BoundedPageAllocator::SealPages(void* address, size_t size) {
259 return page_allocator_->SealPages(address, size);
260}
261
263 AllocationStatus allocation_status) {
264 switch (allocation_status) {
266 return "Success";
268 return "Failed to commit";
270 return "Ran out of reservation";
272 return "Hinted address was taken or not found";
273 }
274}
275
276} // namespace base
277} // namespace v8
virtual bool DecommitPages(void *address, size_t size)=0
virtual size_t AllocatePageSize()=0
virtual bool RecommitPages(void *address, size_t length, Permission permissions)
virtual bool SetPermissions(void *address, size_t length, Permission permissions)=0
virtual bool SealPages(void *address, size_t length)
virtual bool DiscardSystemPages(void *address, size_t size)
v8::base::RegionAllocator region_allocator_
void * AllocatePages(void *hint, size_t size, size_t alignment, Permission access) override
bool AllocatePagesAt(Address address, size_t size, Permission access)
bool DiscardSystemPages(void *address, size_t size) override
bool SealPages(void *address, size_t size) override
bool FreePages(void *address, size_t size) override
const PageInitializationMode page_initialization_mode_
bool DecommitPages(void *address, size_t size) override
static const char * AllocationStatusToString(AllocationStatus)
bool RecommitPages(void *address, size_t size, PageAllocator::Permission access) override
bool ReserveForSharedMemoryMapping(void *address, size_t size) override
bool ReleasePages(void *address, size_t size, size_t new_size) override
bool SetPermissions(void *address, size_t size, Permission access) override
v8::PageAllocator *const page_allocator_
BoundedPageAllocator(v8::PageAllocator *page_allocator, Address start, size_t size, size_t allocate_page_size, PageInitializationMode page_initialization_mode, PageFreeingMode page_freeing_mode)
Address AllocateRegion(size_t size)
bool AllocateRegionAt(Address requested_address, size_t size, RegionState region_state=RegionState::kAllocated)
bool contains(Address address) const
size_t TrimRegion(Address address, size_t new_size)
Address AllocateAlignedRegion(size_t size, size_t alignment)
size_t FreeRegion(Address address)
size_t CheckRegion(Address address)
static constexpr Address kAllocationFailure
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
int start
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403