v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
accounting-allocator.cc
Go to the documentation of this file.
1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8
10#include "src/base/logging.h"
11#include "src/base/macros.h"
15
16namespace v8 {
17namespace internal {
18
19// These definitions are here in order to please the linker, which in debug mode
20// sometimes requires static constants to be defined in .cc files.
23
24namespace {
25
26static constexpr size_t kZonePageSize = 256 * KB;
27
28VirtualMemory ReserveAddressSpace(v8::PageAllocator* platform_allocator) {
30 platform_allocator->AllocatePageSize()));
31
32 void* hint = reinterpret_cast<void*>(RoundDown(
33 reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
35
36 VirtualMemory memory(platform_allocator, ZoneCompression::kReservationSize,
38 if (memory.IsReserved()) {
40 return memory;
41 }
42
44 "Failed to reserve memory for compressed zones");
46}
47
48std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
49 v8::PageAllocator* platform_allocator, Address reservation_start) {
50 CHECK(reservation_start);
52
53 auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
54 platform_allocator, reservation_start, ZoneCompression::kReservationSize,
55 kZonePageSize,
58
59 // Exclude first page from allocation to ensure that accesses through
60 // decompressed null pointer will seg-fault.
61 allocator->AllocatePagesAt(reservation_start, kZonePageSize,
63 return allocator;
64}
65
66} // namespace
67
70 v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
71 VirtualMemory memory = ReserveAddressSpace(platform_page_allocator);
72 reserved_area_ = std::make_unique<VirtualMemory>(std::move(memory));
73 bounded_page_allocator_ = CreateBoundedAllocator(platform_page_allocator,
74 reserved_area_->address());
75 }
76}
77
79
81 bool supports_compression) {
82 void* memory;
83 if (COMPRESS_ZONES_BOOL && supports_compression) {
84 bytes = RoundUp(bytes, kZonePageSize);
85 memory = AllocatePages(bounded_page_allocator_.get(), nullptr, bytes,
86 kZonePageSize, PageAllocator::kReadWrite);
87
88 } else {
89 auto result = AllocAtLeastWithRetry(bytes);
90 memory = result.ptr;
91 bytes = result.count;
92 }
93 if (memory == nullptr) return nullptr;
94
95 size_t current =
96 current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed) + bytes;
97 size_t max = max_memory_usage_.load(std::memory_order_relaxed);
98 while (current > max && !max_memory_usage_.compare_exchange_weak(
99 max, current, std::memory_order_relaxed)) {
100 // {max} was updated by {compare_exchange_weak}; retry.
101 }
102 DCHECK_LE(sizeof(Segment), bytes);
103 return new (memory) Segment(bytes);
104}
105
107 bool supports_compression) {
108 segment->ZapContents();
109 size_t segment_size = segment->total_size();
110 current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed);
111 segment->ZapHeader();
112 if (COMPRESS_ZONES_BOOL && supports_compression) {
114 } else {
115 free(segment);
116 }
117}
118
119} // namespace internal
120} // namespace v8
virtual void * GetRandomMmapAddr()=0
virtual size_t AllocatePageSize()=0
std::atomic< size_t > current_memory_usage_
std::unique_ptr< base::BoundedPageAllocator > bounded_page_allocator_
void ReturnSegment(Segment *memory, bool supports_compression)
std::unique_ptr< VirtualMemory > reserved_area_
Segment * AllocateSegment(size_t bytes, bool supports_compression)
size_t total_size() const
#define COMPRESS_ZONES_BOOL
Definition globals.h:520
ZoneVector< RpoNumber > & result
void FatalOOM(OOMType type, const char *msg)
Definition logging.cc:80
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
void * AllocatePages(v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
base::AllocationResult< void * > AllocAtLeastWithRetry(size_t size)
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
const size_t segment_size
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK(condition)
Definition logging.h:482
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
static const size_t kReservationSize
static const size_t kReservationAlignment