v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-range.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CODE_RANGE_H_
6#define V8_HEAP_CODE_RANGE_H_
7
8#include <unordered_map>
9#include <vector>
10
12#include "src/common/globals.h"
14#include "v8-internal.h"
15
16namespace v8 {
17namespace internal {
18
19// The process-wide singleton that keeps track of code range regions with the
20// intention to reuse free code range regions as a workaround for CFG memory
21// leaks (see crbug.com/870054).
23 public:
24 // When near code range is enabled, an address within
25 // kMaxPCRelativeCodeRangeInMB to the embedded blob is returned if
26 // there is enough space. Otherwise a random address is returned.
27 // When near code range is disabled, returns the most recently freed code
28 // range start address for the given size. If there is no such entry, then a
29 // random address is returned.
30 V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size,
31 size_t alignment);
32
34 size_t code_range_size);
35
36 private:
38 // A map from code range size to an array of recently freed code range
39 // addresses. There should be O(1) different code range sizes.
40 // The length of each array is limited by the peak number of code ranges,
41 // which should be also O(1).
42 std::unordered_map<size_t, std::vector<Address>> recently_freed_;
43};
44
45// A code range is a virtual memory cage that may contain executable code. It
46// has the following layout.
47//
48// +---------+---------+----------------- ~~~ -+
49// | RW | ... | ... |
50// +---------+---------+------------------ ~~~ -+
51// ^ ^
52// base allocatable base
53//
54// <------------------><------------------------->
55// non-allocatable allocatable region
56// region
57// <-------->
58// reserved
59// <--------------------------------------------->
60// CodeRange
61//
62// The start of the reservation may include reserved page with read-write access
63// as required by some platforms (Win64) followed by an unmapped region which
64// make allocatable base MemoryChunk::kAlignment-aligned. The cage's page
65// allocator explicitly marks the optional reserved page as occupied, so it's
66// excluded from further allocations.
67//
68// The following conditions hold:
69// 1) |reservation()->region()| == [base(), base() + size()[,
70// 2) |base| is OS page size aligned,
71// 3) |allocatable base| is MemoryChunk::kAlignment-aligned,
72// 4) non-allocatable region might be empty (if |base| == |allocatable base|),
73// 5) if optional RW pages are necessary and they don't fit into non-allocatable
74// region, then the first page is excluded from allocatable area.
75class CodeRange final : public VirtualMemoryCage {
76 public:
78
79 // Returns the size of the initial area of a code range, which is marked
80 // writable and reserved to contain unwind information.
81 static size_t GetWritableReservedAreaSize();
82
83 uint8_t* embedded_blob_code_copy() const {
84 // remap_embedded_builtins_mutex_ is designed to protect write contention to
85 // embedded_blob_code_copy_. It is safe to be read without taking the
86 // mutex. It is read to check if short builtins ought to be enabled because
87 // a shared CodeRange has already remapped builtins and to find where the
88 // instruction stream for a builtin is.
89 //
90 // For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
91 // may result in disabling short builtins, which is not a correctness issue.
92 //
93 // For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
94 // may result in an already running Isolate that did not have short builtins
95 // enabled (due to max old generation size) to switch over to using remapped
96 // builtins, which is also not a correctness issue as the remapped builtins
97 // are byte-equivalent.
98 //
99 // Both these scenarios should be rare. The initial Isolate is usually
100 // created by itself, i.e. without contention. Additionally, the first
101 // Isolate usually remaps builtins on machines with enough memory, not
102 // subsequent Isolates in the same process.
103 return embedded_blob_code_copy_.load(std::memory_order_acquire);
104 }
105
106 // Initialize the address space reservation for the code range. The immutable
107 // flag specifies if the reservation will live until the end of the process
108 // and can be sealed.
109 bool InitReservation(v8::PageAllocator* page_allocator, size_t requested,
110 bool immutable);
111
112 V8_EXPORT_PRIVATE void Free();
113
114 // Remap and copy the embedded builtins into this CodeRange. This method is
115 // idempotent and only performs the copy once. This property is so that this
116 // method can be used uniformly regardless of whether there is a single global
117 // pointer address space or multiple pointer cages. Returns the address of
118 // the copy.
119 //
120 // The builtins code region will be freed with the code range at tear down.
121 //
122 // When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
123 // compared against the already copied version.
124 uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
125 const uint8_t* embedded_blob_code,
126 size_t embedded_blob_code_size);
127
128 private:
129 static base::AddressRegion GetPreferredRegion(size_t radius_in_megabytes,
130 size_t allocate_page_size);
131
132 // Used when short builtin calls are enabled, where embedded builtins are
133 // copied into the CodeRange so calls can be nearer.
134 std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
135
136 // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
137 // race during Isolate::Init.
139
140#if !defined(V8_OS_WIN) && !defined(V8_OS_IOS) && defined(DEBUG)
141 bool immutable_ = false;
142#endif
143};
144
145} // namespace internal
146} // namespace v8
147
148#endif // V8_HEAP_CODE_RANGE_H_
std::unordered_map< size_t, std::vector< Address > > recently_freed_
Definition code-range.h:42
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start, size_t code_range_size)
Definition code-range.cc:54
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size, size_t alignment)
Definition code-range.cc:35
bool InitReservation(v8::PageAllocator *page_allocator, size_t requested, bool immutable)
Definition code-range.cc:70
V8_EXPORT_PRIVATE ~CodeRange() override
Definition code-range.cc:60
uint8_t * embedded_blob_code_copy() const
Definition code-range.h:83
static size_t GetWritableReservedAreaSize()
Definition code-range.cc:63
V8_EXPORT_PRIVATE void Free()
base::Mutex remap_embedded_builtins_mutex_
Definition code-range.h:138
uint8_t * RemapEmbeddedBuiltins(Isolate *isolate, const uint8_t *embedded_blob_code, size_t embedded_blob_code_size)
std::atomic< uint8_t * > embedded_blob_code_copy_
Definition code-range.h:134
static base::AddressRegion GetPreferredRegion(size_t radius_in_megabytes, size_t allocate_page_size)
base::BoundedPageAllocator * page_allocator() const
Definition allocation.h:369
#define V8_EXPORT_PRIVATE
Definition macros.h:460