v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-allocator.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
6#define V8_HEAP_MEMORY_ALLOCATOR_H_
7
8#include <atomic>
9#include <memory>
10#include <optional>
11#include <set>
12#include <unordered_set>
13#include <utility>
14
15#include "include/v8-platform.h"
18#include "src/base/hashing.h"
19#include "src/base/macros.h"
22#include "src/common/globals.h"
23#include "src/heap/code-range.h"
26#include "src/heap/spaces.h"
29
30namespace v8 {
31namespace internal {
32
33namespace heap {
34class TestMemoryAllocatorScope;
35} // namespace heap
36
37class Heap;
38class Isolate;
39class ReadOnlyPageMetadata;
40class PagePool;
41
42// ----------------------------------------------------------------------------
43// A space acquires chunks of memory from the operating system. The memory
44// allocator allocates and deallocates pages for the paged heap spaces and large
45// pages for large object space.
47 public:
48 enum class AllocationMode {
49 // Regular allocation path. Does not use pool.
51
52 // Uses the pool for allocation first.
54 };
55
56 enum class FreeMode {
57 // Frees page immediately on the main thread.
59
60 // Postpone freeing, until MemoryAllocator::ReleaseQueuedPages() is called.
61 // This is used in the major GC to allow the pointer-update phase to touch
62 // dead memory.
64
65 // Pool page.
66 kPool,
67 };
68
69 // Initialize page sizes field in V8::Initialize.
70 static void InitializeOncePerProcess();
71
72 V8_INLINE static intptr_t GetCommitPageSize() {
74 return commit_page_size_;
75 }
76
81
85 size_t max_capacity);
86
88
89 // Allocates a Page from the allocator. AllocationMode is used to indicate
90 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
91 // should be tried first.
93 MemoryAllocator::AllocationMode alloc_mode, Space* space,
94 Executability executable);
95
97 LargeObjectSpace* space, size_t object_size, Executability executable);
98
100 Address hint = kNullAddress);
101
102 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
103 ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
104
106 MutablePageMetadata* chunk);
108
109 // Returns allocated spaces in bytes.
110 size_t Size() const { return size_; }
111
112 // Returns allocated executable spaces in bytes.
113 size_t SizeExecutable() const { return size_executable_; }
114
115 // Returns the maximum available bytes of heaps.
116 size_t Available() const {
117 const size_t size = Size();
118 return capacity_ < size ? 0 : capacity_ - size;
119 }
120
121 // Returns an indication of whether a pointer is in a space that has
122 // been allocated by this MemoryAllocator. It is conservative, allowing
123 // false negatives (i.e., if a pointer is outside the allocated space, it may
124 // return false) but not false positives (i.e., if a pointer is inside the
125 // allocated space, it will definitely return false).
131 Executability executable) const {
132 switch (executable) {
133 case NOT_EXECUTABLE:
134 return address < lowest_not_executable_ever_allocated_ ||
136 case EXECUTABLE:
137 return address < lowest_executable_ever_allocated_ ||
139 }
140 }
141
142 // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
143 // internally memory is freed from |start_free| to the end of the reservation.
144 // Additional memory beyond the page is not accounted though, so
145 // |bytes_to_free| is computed by the caller.
146 void PartialFreeMemory(MemoryChunkMetadata* chunk, Address start_free,
147 size_t bytes_to_free, Address new_area_end);
148
149#ifdef DEBUG
150 // Checks if an allocated MemoryChunk was intended to be used for executable
151 // memory.
152 bool IsMemoryChunkExecutable(MutablePageMetadata* chunk) {
153 base::MutexGuard guard(&executable_memory_mutex_);
154 return executable_memory_.find(chunk) != executable_memory_.end();
155 }
156#endif // DEBUG
157
158 // Page allocator instance for allocating non-executable pages.
159 // Guaranteed to be a valid pointer.
161
162 // Page allocator instance for allocating executable pages.
163 // Guaranteed to be a valid pointer.
165
166 // Page allocator instance for allocating "trusted" pages. When the sandbox is
167 // enabled, these pages are guaranteed to be allocated outside of the sandbox,
168 // so their content cannot be corrupted by an attacker.
169 // Guaranteed to be a valid pointer.
173
174 // Returns page allocator suitable for allocating pages for the given space.
176 switch (space) {
177 case CODE_SPACE:
178 case CODE_LO_SPACE:
180 case TRUSTED_SPACE:
182 case TRUSTED_LO_SPACE:
185 default:
187 }
188 }
189
190 PagePool* pool() { return pool_; }
191
193
195
196 // Return the normal or large page that contains this address, if it is owned
197 // by this heap, otherwise a nullptr.
199 Address addr) const;
200 // This version can be used when all threads are either parked or in a
201 // safepoint. In that case we can skip taking a mutex.
203 Address addr) const;
204
205 // Insert and remove normal and large pages that are owned by this heap.
206 void RecordMemoryChunkCreated(const MemoryChunk* chunk);
207 void RecordMemoryChunkDestroyed(const MemoryChunk* chunk);
208
209 // We postpone page freeing until the pointer-update phase is done (updating
210 // slots may happen for dead objects which point to dead memory).
211 void ReleaseQueuedPages();
212
213 // Returns the number of cached chunks for this isolate.
215
216 // Returns the number of shared cached chunks.
218
219 // Returns the number of total cached chunks (including cached pages of other
220 // isolates).
222
223 // Releases all pooled chunks for this isolate immediately.
225
226 static void DeleteMemoryChunk(MutablePageMetadata* metadata);
227
228 private:
229 // Used to store all data about MemoryChunk allocation, e.g. in
230 // AllocateUninitializedChunk.
232 void* chunk;
233 // If we reuse a pooled chunk return the metadata allocation here to be
234 // reused.
236 size_t size;
238 size_t area_end;
240 };
241
242 // Computes the size of a MemoryChunk from the size of the object_area.
243 static size_t ComputeChunkSize(size_t area_size, AllocationSpace space);
244
245 // Internal allocation method for all pages/memory chunks. Returns data about
246 // the uninitialized memory region.
247 V8_WARN_UNUSED_RESULT std::optional<MemoryChunkAllocationResult>
248 AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
249 Executability executable, PageSize page_size) {
250 return AllocateUninitializedChunkAt(space, area_size, executable,
251 kNullAddress, page_size);
252 }
253 V8_WARN_UNUSED_RESULT std::optional<MemoryChunkAllocationResult>
254 AllocateUninitializedChunkAt(BaseSpace* space, size_t area_size,
255 Executability executable, Address hint,
256 PageSize page_size);
257
258 // Internal raw allocation method that allocates an aligned MemoryChunk and
259 // sets the right memory permissions.
260 Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
261 size_t alignment, AllocationSpace space,
262 Executability executable, void* hint,
263 VirtualMemory* controller);
264
265 // Commit memory region owned by given reservation object. Returns true if
266 // it succeeded and false otherwise.
267 bool CommitMemory(VirtualMemory* reservation, Executability executable);
268
269 // Sets memory permissions on executable memory chunks. This entails page
270 // header (RW), guard pages (no access) and the object area (code modification
271 // permissions).
273 VirtualMemory* vm, Address start, size_t reserved_size);
274
275 // Disallows any access on memory region owned by given reservation object.
276 // Returns true if it succeeded and false otherwise.
277 bool UncommitMemory(VirtualMemory* reservation);
278
279 // Frees the given memory region.
281 size_t size);
282
283 // PreFreeMemory logically frees the object, i.e., it unregisters the
284 // memory, logs a delete event and adds the chunk to remembered unmapped
285 // pages.
287
288 // PerformFreeMemory can be called concurrently when PreFree was executed
289 // before.
291
292 // See AllocatePage for public interface. Note that currently we only
293 // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
294 std::optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
295 Space* space);
296
297 // Initializes pages in a chunk. Returns the first page address.
298 // This function and GetChunkId() are provided for the mark-compact
299 // collector to rebuild page headers in the from space, which is
300 // used as a marking stack and its page headers are destroyed.
301 PageMetadata* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
302 PagedSpace* space);
303
305 Executability executable) {
306 // The use of atomic primitives does not guarantee correctness (wrt.
307 // desired semantics) by default. The loop here ensures that we update the
308 // values only if they did not change in between.
309 Address ptr;
310 switch (executable) {
311 case NOT_EXECUTABLE:
313 std::memory_order_relaxed);
314 while ((low < ptr) &&
315 !lowest_not_executable_ever_allocated_.compare_exchange_weak(
316 ptr, low, std::memory_order_acq_rel)) {
317 }
319 std::memory_order_relaxed);
320 while ((high > ptr) &&
321 !highest_not_executable_ever_allocated_.compare_exchange_weak(
322 ptr, high, std::memory_order_acq_rel)) {
323 }
324 break;
325 case EXECUTABLE:
326 ptr = lowest_executable_ever_allocated_.load(std::memory_order_relaxed);
327 while ((low < ptr) &&
328 !lowest_executable_ever_allocated_.compare_exchange_weak(
329 ptr, low, std::memory_order_acq_rel)) {
330 }
331 ptr =
332 highest_executable_ever_allocated_.load(std::memory_order_relaxed);
333 while ((high > ptr) &&
334 !highest_executable_ever_allocated_.compare_exchange_weak(
335 ptr, high, std::memory_order_acq_rel)) {
336 }
337 break;
338 }
339 }
340
341 // Performs all necessary bookkeeping to free the memory, but does not free
342 // it.
346 Executability executable = NOT_EXECUTABLE);
347
349
350#ifdef DEBUG
351 void RegisterExecutableMemoryChunk(MutablePageMetadata* chunk) {
352 base::MutexGuard guard(&executable_memory_mutex_);
354 DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
355 executable_memory_.insert(chunk);
356 }
357
358 void UnregisterExecutableMemoryChunk(MutablePageMetadata* chunk) {
359 base::MutexGuard guard(&executable_memory_mutex_);
360 DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
361 executable_memory_.erase(chunk);
362 }
363#endif // DEBUG
364
366
367 // Page allocator used for allocating data pages. Depending on the
368 // configuration it may be a page allocator instance provided by v8::Platform
369 // or a BoundedPageAllocator (when pointer compression is enabled).
371
372 // Page allocator used for allocating code pages. Depending on the
373 // configuration it may be a page allocator instance provided by v8::Platform
374 // or a BoundedPageAllocator from Heap::code_range_ (when pointer compression
375 // is enabled or on those 64-bit architectures where pc-relative 32-bit
376 // displacement can be used for call and jump instructions).
378
379 // Page allocator used for allocating trusted pages. When the sandbox is
380 // enabled, trusted pages are allocated outside of the sandbox so that their
381 // content cannot be corrupted by an attacker. When the sandbox is disabled,
382 // this is the same as data_page_allocator_.
384
385 // Maximum space size in bytes.
386 size_t capacity_;
387
388 // Allocated space size in bytes.
389 std::atomic<size_t> size_ = 0;
390 // Allocated executable space size in bytes.
391 std::atomic<size_t> size_executable_ = 0;
392
393 // We keep the lowest and highest addresses allocated as a quick way
394 // of determining that pointers are outside the heap. The estimate is
395 // conservative, i.e. not all addresses in 'allocated' space are allocated
396 // to our heap. The range is [lowest, highest[, inclusive on the low end
397 // and exclusive on the high end. Addresses are distinguished between
398 // executable and not-executable, as they may generally be placed in distinct
399 // areas of the heap.
401 static_cast<Address>(-1ll)};
404 static_cast<Address>(-1ll)};
406
407 std::optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
409 std::vector<MutablePageMetadata*> queued_pages_to_be_freed_;
410
411#ifdef DEBUG
412 // Data structure to remember allocated executable memory chunks.
413 // This data structure is used only in DCHECKs.
414 std::unordered_set<MutablePageMetadata*, base::hash<MutablePageMetadata*>>
415 executable_memory_;
416 base::Mutex executable_memory_mutex_;
417#endif // DEBUG
418
419 // Allocated normal and large pages are stored here, to be used during
420 // conservative stack scanning.
421 std::unordered_set<const MemoryChunk*, base::hash<const MemoryChunk*>>
423 std::set<const MemoryChunk*> large_pages_;
424
426
429
432
434};
435
436} // namespace internal
437} // namespace v8
438
439#endif // V8_HEAP_MEMORY_ALLOCATOR_H_
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping > RemapSharedPage(::v8::PageAllocator::SharedMemory *shared_memory, Address new_address)
void UnregisterMemoryChunk(MemoryChunkMetadata *chunk, Executability executable=NOT_EXECUTABLE)
void RegisterReadOnlyMemory(ReadOnlyPageMetadata *page)
void UnregisterReadOnlyPage(ReadOnlyPageMetadata *page)
static V8_EXPORT_PRIVATE size_t commit_page_size_bits_
void FreeMemoryRegion(v8::PageAllocator *page_allocator, Address addr, size_t size)
V8_EXPORT_PRIVATE const MemoryChunk * LookupChunkContainingAddress(Address addr) const
v8::PageAllocator * code_page_allocator_
V8_INLINE bool IsOutsideAllocatedSpace(Address address) const
static V8_INLINE intptr_t GetCommitPageSizeBits()
V8_EXPORT_PRIVATE void TearDown()
V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode, MutablePageMetadata *chunk)
std::set< const MemoryChunk * > large_pages_
bool UncommitMemory(VirtualMemory *reservation)
v8::PageAllocator * trusted_page_allocator_
friend class heap::TestCodePageAllocatorScope
v8::PageAllocator * page_allocator(AllocationSpace space)
v8::PageAllocator * data_page_allocator_
std::atomic< Address > highest_executable_ever_allocated_
static V8_INLINE intptr_t GetCommitPageSize()
std::atomic< Address > highest_not_executable_ever_allocated_
V8_WARN_UNUSED_RESULT std::optional< MemoryChunkAllocationResult > AllocateUninitializedChunk(BaseSpace *space, size_t area_size, Executability executable, PageSize page_size)
void UnregisterSharedMemoryChunk(MemoryChunkMetadata *chunk)
V8_EXPORT_PRIVATE size_t GetSharedPooledChunksCount()
PageMetadata * InitializePagesInChunk(int chunk_id, int pages_in_chunk, PagedSpace *space)
v8::PageAllocator * trusted_page_allocator()
std::atomic< size_t > size_executable_
ReadOnlyPageMetadata * AllocateReadOnlyPage(ReadOnlySpace *space, Address hint=kNullAddress)
V8_EXPORT_PRIVATE LargePageMetadata * AllocateLargePage(LargeObjectSpace *space, size_t object_size, Executability executable)
static void DeleteMemoryChunk(MutablePageMetadata *metadata)
friend class heap::TestMemoryAllocatorScope
void PerformFreeMemory(MutablePageMetadata *chunk)
V8_WARN_UNUSED_RESULT std::optional< MemoryChunkAllocationResult > AllocateUninitializedChunkAt(BaseSpace *space, size_t area_size, Executability executable, Address hint, PageSize page_size)
v8::PageAllocator * code_page_allocator()
std::optional< VirtualMemory > reserved_chunk_at_virtual_memory_limit_
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
std::atomic< Address > lowest_not_executable_ever_allocated_
V8_EXPORT_PRIVATE PageMetadata * AllocatePage(MemoryAllocator::AllocationMode alloc_mode, Space *space, Executability executable)
void RecordMemoryChunkCreated(const MemoryChunk *chunk)
std::unordered_set< const MemoryChunk *, base::hash< const MemoryChunk * > > normal_pages_
V8_EXPORT_PRIVATE MemoryAllocator(Isolate *isolate, v8::PageAllocator *code_page_allocator, v8::PageAllocator *trusted_page_allocator, size_t max_capacity)
std::optional< MemoryChunkAllocationResult > AllocateUninitializedPageFromPool(Space *space)
std::atomic< Address > lowest_executable_ever_allocated_
V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(VirtualMemory *vm, Address start, size_t reserved_size)
std::vector< MutablePageMetadata * > queued_pages_to_be_freed_
void PartialFreeMemory(MemoryChunkMetadata *chunk, Address start_free, size_t bytes_to_free, Address new_area_end)
static V8_EXPORT_PRIVATE size_t commit_page_size_
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size, size_t alignment, AllocationSpace space, Executability executable, void *hint, VirtualMemory *controller)
bool CommitMemory(VirtualMemory *reservation, Executability executable)
void PreFreeMemory(MutablePageMetadata *chunk)
void UpdateAllocatedSpaceLimits(Address low, Address high, Executability executable)
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator)
static size_t ComputeChunkSize(size_t area_size, AllocationSpace space)
v8::PageAllocator * data_page_allocator()
void UnregisterMutableMemoryChunk(MutablePageMetadata *chunk)
void RecordMemoryChunkDestroyed(const MemoryChunk *chunk)
V8_INLINE bool IsOutsideAllocatedSpace(Address address, Executability executable) const
V8_EXPORT_PRIVATE size_t GetTotalPooledChunksCount()
void FreeReadOnlyPage(ReadOnlyPageMetadata *chunk)
V8_EXPORT_PRIVATE const MemoryChunk * LookupChunkContainingAddressInSafepoint(Address addr) const
V8_EXPORT_PRIVATE void ReleasePooledChunksImmediately()
Address HandleAllocationFailure(Executability executable)
V8_INLINE bool IsFlagSet(Flag flag) const
int start
@ SHARED_TRUSTED_LO_SPACE
Definition globals.h:1319
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
static constexpr Address kNullAddress
Definition v8-internal.h:53
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_INLINE
Definition v8config.h:500
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671