5#ifndef V8_INIT_ISOLATE_GROUP_H_
6#define V8_INIT_ISOLATE_GROUP_H_
10#include "absl/container/flat_hash_set.h"
23#ifdef V8_ENABLE_LEAPTIERING
27#ifdef V8_ENABLE_SANDBOX
42#ifdef V8_ENABLE_SANDBOX
43class MemoryChunkMetadata;
52class SandboxedArrayBufferAllocator {
54 SandboxedArrayBufferAllocator() =
default;
55 SandboxedArrayBufferAllocator(
const SandboxedArrayBufferAllocator&) =
delete;
56 SandboxedArrayBufferAllocator& operator=(
57 const SandboxedArrayBufferAllocator&) =
delete;
59 void LazyInitialize(Sandbox* sandbox);
61 bool is_initialized()
const {
return !!sandbox_; }
68 ~SandboxedArrayBufferAllocator();
70 void* Allocate(
size_t length);
72 void Free(
void* data);
78 static constexpr size_t kAllocationGranularity = 128;
80 static constexpr size_t kChunkSize = 1 *
MB;
82 std::unique_ptr<base::RegionAllocator> region_alloc_;
83 size_t end_of_accessible_region_ = 0;
84 Sandbox* sandbox_ =
nullptr;
91class OptimizingCompileTaskExecutor;
93class ReadOnlyArtifacts;
138 static void InitializeOncePerProcess();
139 static void TearDownOncePerProcess();
154#ifdef V8_COMPRESS_POINTERS
156 return pointer_compression_cage_;
158 VirtualMemoryCage* GetTrustedPtrComprCage()
const {
159 return trusted_pointer_compression_cage_;
161 Address GetPtrComprCageBase()
const {
return GetPtrComprCage()->base(); }
162 Address GetTrustedPtrComprCageBase()
const {
163 return GetTrustedPtrComprCage()->base();
167 CodeRange* EnsureCodeRange(
size_t requested_size);
170#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
171#ifdef USING_V8_SHARED_PRIVATE
174 set_current_non_inlined(group);
178 static void set_current(IsolateGroup* group) {
current_ = group; }
187 return shared_space_isolate_ !=
nullptr;
191 return shared_space_isolate_;
195 DCHECK(!has_shared_space_isolate());
196 shared_space_isolate_ =
isolate;
203 shared_read_only_heap_ =
heap;
209 return read_only_artifacts_.get();
220#ifdef V8_ENABLE_SANDBOX
221 Sandbox* sandbox() {
return sandbox_; }
223 CodePointerTable* code_pointer_table() {
return &code_pointer_table_; }
225 MemoryChunkMetadata** metadata_pointer_table() {
226 return metadata_pointer_table_;
229 SandboxedArrayBufferAllocator* GetSandboxedArrayBufferAllocator();
232#ifdef V8_ENABLE_LEAPTIERING
233 JSDispatchTable* js_dispatch_table() {
return &js_dispatch_table_; }
236 void SetupReadOnlyHeap(Isolate* isolate,
237 SnapshotData* read_only_snapshot_data,
244 return page_pool_.get();
247 template <
typename Callback>
252 Isolate* target_isolate =
nullptr;
255 if (main_isolate_ != isolate) {
256 target_isolate = main_isolate_;
259 if (entry != isolate) {
260 target_isolate = entry;
266 if (target_isolate) {
278 friend class PoolTest;
292 static void ReleaseDefault();
294#ifdef V8_ENABLE_SANDBOX
295 void Initialize(
bool process_wide, Sandbox* sandbox);
297 void Initialize(
bool process_wide);
300#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
302 static void set_current_non_inlined(
IsolateGroup* group);
305 std::atomic<int> reference_count_{1};
306 int isolate_count_{0};
309#ifdef V8_COMPRESS_POINTERS
315#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
323 Address external_ref_table_[ExternalReferenceTable::kSizeIsolateIndependent] =
334 std::unique_ptr<OptimizingCompileTaskExecutor>
345#ifdef V8_ENABLE_SANDBOX
346 Sandbox* sandbox_ =
nullptr;
347 CodePointerTable code_pointer_table_;
349 metadata_pointer_table_[MemoryChunkConstants::kMetadataPointerTableSize] =
351 SandboxedArrayBufferAllocator backend_allocator_;
354#ifdef V8_ENABLE_LEAPTIERING
355 JSDispatchTable js_dispatch_table_;
static IsolateGroup * current()
bool has_shared_space_isolate() const
MemorySpan< Address > external_ref_table()
static IsolateGroup * default_isolate_group_
absl::flat_hash_set< Isolate * > isolates_
static constexpr bool CanCreateNewGroups()
void init_shared_space_isolate(Isolate *isolate)
std::unique_ptr< PagePool > page_pool_
ReadOnlyHeap * shared_read_only_heap() const
std::unique_ptr< OptimizingCompileTaskExecutor > optimizing_compile_task_executor_
Isolate * shared_space_isolate() const
v8::PageAllocator * page_allocator() const
void set_shared_read_only_heap(ReadOnlyHeap *heap)
PagePool * page_pool() const
IsolateGroup(const IsolateGroup &)=delete
std::unique_ptr< CodeRange > code_range_
static IsolateGroup * AcquireDefault()
ReadOnlyArtifacts * read_only_artifacts()
IsolateGroup & operator=(const IsolateGroup &)=delete
CodeRange * GetCodeRange() const
std::unique_ptr< ReadOnlyArtifacts > read_only_artifacts_
bool FindAnotherIsolateLocked(Isolate *isolate, Callback callback)
#define COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL
cppgc::PageAllocator * page_allocator_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
std::atomic< uint8_t > OnceType
void AddIsolate(Isolate *isolate)
void RemoveIsolate(Isolate *isolate)
absl::flat_hash_map< Isolate *, std::unique_ptr< PerfettoLogger > > isolates_
#define DCHECK_NOT_NULL(val)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define V8_EXPORT_PRIVATE