v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
isolate-group.h
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_INIT_ISOLATE_GROUP_H_
6#define V8_INIT_ISOLATE_GROUP_H_
7
8#include <memory>
9
10#include "absl/container/flat_hash_set.h"
12#include "src/base/logging.h"
13#include "src/base/once.h"
17#include "src/common/globals.h"
18#include "src/flags/flags.h"
22
23#ifdef V8_ENABLE_LEAPTIERING
25#endif // V8_ENABLE_LEAPTIERING
26
27#ifdef V8_ENABLE_SANDBOX
29#endif
30
31namespace v8 {
32
33namespace base {
34template <typename T>
36} // namespace base
37
38namespace internal {
39
40class PagePool;
41
42#ifdef V8_ENABLE_SANDBOX
43class MemoryChunkMetadata;
44class Sandbox;
45
46// Backend allocator shared by all ArrayBufferAllocator instances inside one
47// sandbox. This way, there is a single region of virtual address space
48// reserved inside a sandbox from which all ArrayBufferAllocators allocate
49// their memory, instead of each allocator creating their own region, which
50// may cause address space exhaustion inside the sandbox.
51// TODO(chromium:1340224): replace this with a more efficient allocator.
52class SandboxedArrayBufferAllocator {
53 public:
54 SandboxedArrayBufferAllocator() = default;
55 SandboxedArrayBufferAllocator(const SandboxedArrayBufferAllocator&) = delete;
56 SandboxedArrayBufferAllocator& operator=(
57 const SandboxedArrayBufferAllocator&) = delete;
58
59 void LazyInitialize(Sandbox* sandbox);
60
61 bool is_initialized() const { return !!sandbox_; }
62
63 // Returns page allocator that's supposed to be used for allocating pages
64 // for V8 heap. In case pointer compression is enabled it allocates pages
65 // within the pointer compression cage.
66 v8::PageAllocator* page_allocator();
67
68 ~SandboxedArrayBufferAllocator();
69
70 void* Allocate(size_t length);
71
72 void Free(void* data);
73
74 private:
75 // Use a region allocator with a "page size" of 128 bytes as a reasonable
76 // compromise between the number of regions it has to manage and the amount
77 // of memory wasted due to rounding allocation sizes up to the page size.
78 static constexpr size_t kAllocationGranularity = 128;
79 // The backing memory's accessible region is grown in chunks of this size.
80 static constexpr size_t kChunkSize = 1 * MB;
81
82 std::unique_ptr<base::RegionAllocator> region_alloc_;
83 size_t end_of_accessible_region_ = 0;
84 Sandbox* sandbox_ = nullptr;
86};
87#endif
88
89class CodeRange;
90class Isolate;
91class OptimizingCompileTaskExecutor;
92class ReadOnlyHeap;
93class ReadOnlyArtifacts;
94class SnapshotData;
95
96// An IsolateGroup allows an API user to control which isolates get allocated
97// together in a shared pointer cage.
98//
99// The standard configuration of V8 is to enable pointer compression and to
100// allocate all isolates in a single shared pointer cage
101// (V8_COMPRESS_POINTERS_IN_SHARED_CAGE). This also enables the sandbox
102// (V8_ENABLE_SANDBOX), of which there can currently be only one per process, as
103// it requires a large part of the virtual address space.
104//
105// The standard configuration comes with a limitation, in that the total size of
106// the compressed pointer cage is limited to 4 GB. Some API users would like
107// pointer compression but also want to avoid the 4 GB limit of the shared
108// pointer cage. Isolate groups allow users to declare which isolates should be
109// co-located in a single pointer cage.
110//
111// Isolate groups are useful only if pointer compression is enabled. Otherwise,
112// the isolate could just allocate pages from the global system allocator;
113// there's no need to stay within any particular address range. If pointer
114// compression is disabled, there is just one global isolate group.
115//
116// Note that JavaScript objects can only be passed between isolates of the same
117// group. Ensuring this invariant is the responsibility of the API user.
119 public:
120 // InitializeOncePerProcess should be called early on to initialize the
121 // process-wide group.
122 static IsolateGroup* AcquireDefault() { return GetDefault()->Acquire(); }
123
124 // Return true if we can create additional isolate groups: only the case if
125 // multiple pointer cages were configured in at build-time.
126 static constexpr bool CanCreateNewGroups() {
128 }
129
130 // Create a new isolate group, allocating a fresh pointer cage if pointer
131 // compression is enabled. If new groups cannot be created in this build
132 // configuration, abort.
133 //
134 // The pointer cage for isolates in this group will be released when the
135 // group's refcount drops to zero. The group's initial refcount is 1.
136 static IsolateGroup* New();
137
138 static void InitializeOncePerProcess();
139 static void TearDownOncePerProcess();
140
141 // Obtain a fresh reference on the isolate group.
143 DCHECK_LT(0, reference_count_.load());
144 reference_count_++;
145 return this;
146 }
147
148 // Release a reference on an isolate group, possibly freeing any shared memory
149 // resources.
150 void Release();
151
153
154#ifdef V8_COMPRESS_POINTERS
155 VirtualMemoryCage* GetPtrComprCage() const {
156 return pointer_compression_cage_;
157 }
158 VirtualMemoryCage* GetTrustedPtrComprCage() const {
159 return trusted_pointer_compression_cage_;
160 }
161 Address GetPtrComprCageBase() const { return GetPtrComprCage()->base(); }
162 Address GetTrustedPtrComprCageBase() const {
163 return GetTrustedPtrComprCage()->base();
164 }
165#endif // V8_COMPRESS_POINTERS
166
167 CodeRange* EnsureCodeRange(size_t requested_size);
168 CodeRange* GetCodeRange() const { return code_range_.get(); }
169
170#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
171#ifdef USING_V8_SHARED_PRIVATE
172 static IsolateGroup* current() { return current_non_inlined(); }
173 static void set_current(IsolateGroup* group) {
174 set_current_non_inlined(group);
175 }
176#else // !USING_V8_SHARED_PRIVATE
177 static IsolateGroup* current() { return current_; }
178 static void set_current(IsolateGroup* group) { current_ = group; }
179#endif // USING_V8_SHARED_PRIVATE
180#else // !V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
181 static IsolateGroup* current() { return GetDefault(); }
182#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
183
184 MemorySpan<Address> external_ref_table() { return external_ref_table_; }
185
187 return shared_space_isolate_ != nullptr;
188 }
189
191 return shared_space_isolate_;
192 }
193
195 DCHECK(!has_shared_space_isolate());
196 shared_space_isolate_ = isolate;
197 }
198
199 OptimizingCompileTaskExecutor* optimizing_compile_task_executor();
200
201 ReadOnlyHeap* shared_read_only_heap() const { return shared_read_only_heap_; }
203 shared_read_only_heap_ = heap;
204 }
205
206 base::Mutex* mutex() { return &mutex_; }
207
209 return read_only_artifacts_.get();
210 }
211
212 ReadOnlyArtifacts* InitializeReadOnlyArtifacts();
213
214 // Unlike page_allocator() this one is supposed to be used for allocation
215 // of memory for array backing stores or Wasm memory. When pointer compression
216 // is enabled it allocates memory outside of the pointer compression
217 // cage. When sandbox is enabled, it allocates memory within the sandbox.
218 PageAllocator* GetBackingStorePageAllocator();
219
220#ifdef V8_ENABLE_SANDBOX
221 Sandbox* sandbox() { return sandbox_; }
222
223 CodePointerTable* code_pointer_table() { return &code_pointer_table_; }
224
225 MemoryChunkMetadata** metadata_pointer_table() {
226 return metadata_pointer_table_;
227 }
228
229 SandboxedArrayBufferAllocator* GetSandboxedArrayBufferAllocator();
230#endif // V8_ENABLE_SANDBOX
231
232#ifdef V8_ENABLE_LEAPTIERING
233 JSDispatchTable* js_dispatch_table() { return &js_dispatch_table_; }
234#endif // V8_ENABLE_LEAPTIERING
235
236 void SetupReadOnlyHeap(Isolate* isolate,
237 SnapshotData* read_only_snapshot_data,
238 bool can_rehash);
239 void AddIsolate(Isolate* isolate);
240 void RemoveIsolate(Isolate* isolate);
241
243 DCHECK(page_pool_);
244 return page_pool_.get();
245 }
246
247 template <typename Callback>
248 bool FindAnotherIsolateLocked(Isolate* isolate, Callback callback) {
249 // Holding this mutex while invoking the callback avoids the isolate tearing
250 // down in the mean time.
251 base::MutexGuard group_guard(mutex_);
252 Isolate* target_isolate = nullptr;
253 DCHECK_NOT_NULL(main_isolate_);
254
255 if (main_isolate_ != isolate) {
256 target_isolate = main_isolate_;
257 } else {
258 for (Isolate* entry : isolates_) {
259 if (entry != isolate) {
260 target_isolate = entry;
261 break;
262 }
263 }
264 }
265
266 if (target_isolate) {
267 callback(target_isolate);
268 return true;
269 }
270
271 return false;
272 }
273
274 V8_INLINE static IsolateGroup* GetDefault() { return default_isolate_group_; }
275
276 private:
277 friend class base::LeakyObject<IsolateGroup>;
278 friend class PoolTest;
279 friend class PagePool;
280
281 // Unless you manually create a new isolate group, all isolates in a process
282 // are in the same isolate group and share process-wide resources from
283 // that default group.
285
286 IsolateGroup() = default;
288 IsolateGroup(const IsolateGroup&) = delete;
290
291 // Only used for testing.
292 static void ReleaseDefault();
293
294#ifdef V8_ENABLE_SANDBOX
295 void Initialize(bool process_wide, Sandbox* sandbox);
296#else // V8_ENABLE_SANDBOX
297 void Initialize(bool process_wide);
298#endif // V8_ENABLE_SANDBOX
299
300#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
301 static IsolateGroup* current_non_inlined();
302 static void set_current_non_inlined(IsolateGroup* group);
303#endif
304
305 std::atomic<int> reference_count_{1};
306 int isolate_count_{0};
308
309#ifdef V8_COMPRESS_POINTERS
310 VirtualMemoryCage* trusted_pointer_compression_cage_ = nullptr;
311 VirtualMemoryCage* pointer_compression_cage_ = nullptr;
312 VirtualMemoryCage reservation_;
313#endif // V8_COMPRESS_POINTERS
314
315#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
316 thread_local static IsolateGroup* current_;
317#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
318
319 std::unique_ptr<PagePool> page_pool_;
320
321 base::OnceType init_code_range_ = V8_ONCE_INIT;
322 std::unique_ptr<CodeRange> code_range_;
323 Address external_ref_table_[ExternalReferenceTable::kSizeIsolateIndependent] =
324 {0};
325
327
328 // Mutex used to synchronize adding and removing of isolates to this group. It
329 // is also used to ensure that ReadOnlyArtifacts creation is only done once.
331 std::unique_ptr<ReadOnlyArtifacts> read_only_artifacts_;
332 ReadOnlyHeap* shared_read_only_heap_ = nullptr;
333 Isolate* shared_space_isolate_ = nullptr;
334 std::unique_ptr<OptimizingCompileTaskExecutor>
336
337 // Set of isolates currently in the IsolateGroup. Guarded by mutex_.
338 absl::flat_hash_set<Isolate*> isolates_;
339
340 // The first isolate to join the group. However, it will be replaced by
341 // another isolate if that isolate tears down before all other isolates have
342 // left.
343 Isolate* main_isolate_ = nullptr;
344
345#ifdef V8_ENABLE_SANDBOX
346 Sandbox* sandbox_ = nullptr;
347 CodePointerTable code_pointer_table_;
349 metadata_pointer_table_[MemoryChunkConstants::kMetadataPointerTableSize] =
350 {nullptr};
351 SandboxedArrayBufferAllocator backend_allocator_;
352#endif // V8_ENABLE_SANDBOX
353
354#ifdef V8_ENABLE_LEAPTIERING
355 JSDispatchTable js_dispatch_table_;
356#endif // V8_ENABLE_LEAPTIERING
357};
358
359} // namespace internal
360} // namespace v8
361
362#endif // V8_INIT_ISOLATE_GROUP_H_
static IsolateGroup * current()
bool has_shared_space_isolate() const
MemorySpan< Address > external_ref_table()
static IsolateGroup * default_isolate_group_
absl::flat_hash_set< Isolate * > isolates_
static constexpr bool CanCreateNewGroups()
void init_shared_space_isolate(Isolate *isolate)
std::unique_ptr< PagePool > page_pool_
ReadOnlyHeap * shared_read_only_heap() const
std::unique_ptr< OptimizingCompileTaskExecutor > optimizing_compile_task_executor_
Isolate * shared_space_isolate() const
v8::PageAllocator * page_allocator() const
void set_shared_read_only_heap(ReadOnlyHeap *heap)
PagePool * page_pool() const
IsolateGroup(const IsolateGroup &)=delete
std::unique_ptr< CodeRange > code_range_
static IsolateGroup * AcquireDefault()
ReadOnlyArtifacts * read_only_artifacts()
IsolateGroup & operator=(const IsolateGroup &)=delete
CodeRange * GetCodeRange() const
std::unique_ptr< ReadOnlyArtifacts > read_only_artifacts_
bool FindAnotherIsolateLocked(Isolate *isolate, Callback callback)
base::Mutex & mutex_
#define COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL
Definition globals.h:117
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
LineAndColumn current
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Isolate * isolate
TNode< Object > callback
uintptr_t Address
Definition memory.h:13
std::atomic< uint8_t > OnceType
Definition once.h:67
void AddIsolate(Isolate *isolate)
void RemoveIsolate(Isolate *isolate)
#define V8_ONCE_INIT
Definition once.h:69
absl::flat_hash_map< Isolate *, std::unique_ptr< PerfettoLogger > > isolates_
base::uc32 current_
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_INLINE
Definition v8config.h:500