v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
isolate-group.cc
Go to the documentation of this file.
1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8
15#include "src/heap/code-range.h"
16#include "src/heap/page-pool.h"
21#include "src/sandbox/sandbox.h"
22#include "src/utils/memcopy.h"
23#include "src/utils/utils.h"
24
25namespace v8 {
26namespace internal {
27
28#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
29thread_local IsolateGroup* IsolateGroup::current_ = nullptr;
30
31// static
32IsolateGroup* IsolateGroup::current_non_inlined() { return current_; }
33// static
34void IsolateGroup::set_current_non_inlined(IsolateGroup* group) {
35 current_ = group;
36}
37
38class IsolateGroupAccessScope final {
39 public:
40 explicit IsolateGroupAccessScope(IsolateGroup* group)
41 : previous_(IsolateGroup::current()) {
42 IsolateGroup::set_current(group);
43 }
44
45 ~IsolateGroupAccessScope() { IsolateGroup::set_current(previous_); }
46
47 private:
48 IsolateGroup* previous_;
49};
50#else
57#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
58
60
61#ifdef V8_COMPRESS_POINTERS
62struct PtrComprCageReservationParams
63 : public VirtualMemoryCage::ReservationParams {
64 PtrComprCageReservationParams() {
65 page_allocator = GetPlatformPageAllocator();
66
67 reservation_size = kPtrComprCageReservationSize;
68 base_alignment = kPtrComprCageBaseAlignment;
69
70 // Simplify BoundedPageAllocator's life by configuring it to use same page
71 // size as the Heap will use (MemoryChunk::kPageSize).
72 page_size =
73 RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
74 requested_start_hint = RoundDown(
75 reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()),
76 base_alignment);
77
78#if V8_OS_FUCHSIA && !V8_EXTERNAL_CODE_SPACE
79 // If external code space is not enabled then executable pages (e.g. copied
80 // builtins, and JIT pages) will fall under the pointer compression range.
81 // Under Fuchsia that means the entire range must be allocated as JITtable.
82 permissions = PageAllocator::Permission::kNoAccessWillJitLater;
83#else
84 permissions = PageAllocator::Permission::kNoAccess;
85#endif
86 page_initialization_mode =
87 base::PageInitializationMode::kAllocatedPagesCanBeUninitialized;
88 page_freeing_mode = base::PageFreeingMode::kMakeInaccessible;
89 }
90};
91#endif // V8_COMPRESS_POINTERS
92
94 DCHECK_EQ(reference_count_.load(), 0);
96 DCHECK(isolates_.empty());
98
99 page_pool_->TearDown();
100
101#ifdef V8_ENABLE_LEAPTIERING
102 js_dispatch_table_.TearDown();
103#endif // V8_ENABLE_LEAPTIERING
104
105#ifdef V8_ENABLE_SANDBOX
106 code_pointer_table_.TearDown();
107#endif // V8_ENABLE_SANDBOX
108
109 // Reset before `reservation_` for pointer compression but disabled external
110 // code space.
111 code_range_.reset();
112
113#ifdef V8_COMPRESS_POINTERS
114 DCHECK(reservation_.IsReserved());
115 reservation_.Free();
116#endif // V8_COMPRESS_POINTERS
117
118#ifdef V8_ENABLE_SANDBOX
119 sandbox_->TearDown();
120#endif // V8_ENABLE_SANDBOX
121}
122
123#ifdef V8_ENABLE_SANDBOX
124void IsolateGroup::Initialize(bool process_wide, Sandbox* sandbox) {
125 DCHECK(!reservation_.IsReserved());
126 CHECK(sandbox->is_initialized());
127 process_wide_ = process_wide;
128 PtrComprCageReservationParams params;
129 Address base = sandbox->address_space()->AllocatePages(
130 sandbox->base(), params.reservation_size, params.base_alignment,
132 CHECK_EQ(sandbox->base(), base);
133 base::AddressRegion existing_reservation(base, params.reservation_size);
134 params.page_allocator = sandbox->page_allocator();
135 if (!reservation_.InitReservation(params, existing_reservation)) {
137 nullptr,
138 "Failed to reserve virtual memory for process-wide V8 "
139 "pointer compression cage");
140 }
141 page_allocator_ = reservation_.page_allocator();
142 pointer_compression_cage_ = &reservation_;
143 trusted_pointer_compression_cage_ =
144 TrustedRange::EnsureProcessWideTrustedRange(kMaximalTrustedRangeSize);
145 sandbox_ = sandbox;
146
147 code_pointer_table()->Initialize();
149 std::make_unique<OptimizingCompileTaskExecutor>();
150 page_pool_ = std::make_unique<PagePool>();
151
152#ifdef V8_ENABLE_LEAPTIERING
153 js_dispatch_table()->Initialize();
154#endif // V8_ENABLE_LEAPTIERING
155}
156#elif defined(V8_COMPRESS_POINTERS)
157void IsolateGroup::Initialize(bool process_wide) {
158 DCHECK(!reservation_.IsReserved());
159 process_wide_ = process_wide;
160 PtrComprCageReservationParams params;
161 if (!reservation_.InitReservation(params)) {
163 nullptr,
164 "Failed to reserve virtual memory for process-wide V8 "
165 "pointer compression cage");
166 }
167 page_allocator_ = reservation_.page_allocator();
168 pointer_compression_cage_ = &reservation_;
169 trusted_pointer_compression_cage_ = &reservation_;
171 std::make_unique<OptimizingCompileTaskExecutor>();
172 page_pool_ = std::make_unique<PagePool>();
173#ifdef V8_ENABLE_LEAPTIERING
174 js_dispatch_table()->Initialize();
175#endif // V8_ENABLE_LEAPTIERING
176}
177#else // !V8_COMPRESS_POINTERS
178void IsolateGroup::Initialize(bool process_wide) {
179 process_wide_ = process_wide;
182 std::make_unique<OptimizingCompileTaskExecutor>();
183 page_pool_ = std::make_unique<PagePool>();
184#ifdef V8_ENABLE_LEAPTIERING
185 js_dispatch_table()->Initialize();
186#endif // V8_ENABLE_LEAPTIERING
187}
188#endif // V8_ENABLE_SANDBOX
189
190// static
194 IsolateGroup* group = GetDefault();
195
197#ifdef V8_ENABLE_SANDBOX
198 group->Initialize(true, Sandbox::GetDefault());
199#else
200 group->Initialize(true);
201#endif
203
204#ifdef V8_COMPRESS_POINTERS
205 V8HeapCompressionScheme::InitBase(group->GetPtrComprCageBase());
206#endif // V8_COMPRESS_POINTERS
207#ifdef V8_EXTERNAL_CODE_SPACE
208 // Speculatively set the code cage base to the same value in case jitless
209 // mode will be used. Once the process-wide CodeRange instance is created
210 // the code cage base will be set accordingly.
211 ExternalCodeCompressionScheme::InitBase(V8HeapCompressionScheme::base());
212#endif // V8_EXTERNAL_CODE_SPACE
213#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
214 IsolateGroup::set_current(group);
215#endif
216}
217
218// static
220
222 DCHECK_LT(0, reference_count_.load());
223
224 if (--reference_count_ == 0) {
225 delete this;
226 }
227}
228
229namespace {
230void InitCodeRangeOnce(std::unique_ptr<CodeRange>* code_range_member,
231 v8::PageAllocator* page_allocator, size_t requested_size,
232 bool immutable) {
233 CodeRange* code_range = new CodeRange();
234 if (!code_range->InitReservation(page_allocator, requested_size, immutable)) {
236 nullptr, "Failed to reserve virtual memory for CodeRange");
237 }
238 code_range_member->reset(code_range);
239#ifdef V8_EXTERNAL_CODE_SPACE
240#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
241 ExternalCodeCompressionScheme::InitBase(
242 ExternalCodeCompressionScheme::PrepareCageBaseAddress(
243 code_range->base()));
244#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
245#endif // V8_EXTERNAL_CODE_SPACE
246}
247} // namespace
248
250 base::CallOnce(&init_code_range_, InitCodeRangeOnce, &code_range_,
251 page_allocator_, requested_size, process_wide_);
252 return code_range_.get();
253}
254
261
263#ifdef V8_ENABLE_SANDBOX
264 return sandbox()->page_allocator();
265#else
267#endif
268}
269
271 SnapshotData* read_only_snapshot_data,
272 bool can_rehash) {
273 DCHECK_EQ(isolate->isolate_group(), this);
274 base::MutexGuard guard(&mutex_);
275 ReadOnlyHeap::SetUp(isolate, read_only_snapshot_data, can_rehash);
276}
277
279 DCHECK_EQ(isolate->isolate_group(), this);
280 base::MutexGuard guard(&mutex_);
282
283 const bool inserted = isolates_.insert(isolate).second;
284 CHECK(inserted);
285
286 if (!main_isolate_) {
288 }
289
290 optimizing_compile_task_executor_->EnsureInitialized();
291
292 if (v8_flags.shared_heap) {
294 isolate->owns_shareable_data_ = false;
295 } else {
297 isolate->is_shared_space_isolate_ = true;
298 DCHECK(isolate->owns_shareable_data_);
299 }
300 }
301}
302
304 base::MutexGuard guard(&mutex_);
305
306 if (--isolate_count_ == 0) {
307 read_only_artifacts_.reset();
308
309 // We are removing the last isolate from the group. If this group has a
310 // shared heap, the last isolate has to be the shared space isolate.
311 DCHECK_EQ(has_shared_space_isolate(), isolate->is_shared_space_isolate());
312
313 if (isolate->is_shared_space_isolate()) {
315 shared_space_isolate_ = nullptr;
316 }
317 } else {
318 // The shared space isolate needs to be removed last.
319 DCHECK(!isolate->is_shared_space_isolate());
320 }
321
322 CHECK_EQ(isolates_.erase(isolate), 1);
323
324 if (main_isolate_ == isolate) {
325 if (isolates_.empty()) {
326 main_isolate_ = nullptr;
327 } else {
328 main_isolate_ = *isolates_.begin();
329 }
330 }
331}
332
333// static
335 if (!CanCreateNewGroups()) {
336 FATAL(
337 "Creation of new isolate groups requires enabling "
338 "multiple pointer compression cages at build-time");
339 }
340
341 IsolateGroup* group = new IsolateGroup;
342#ifdef V8_ENABLE_SANDBOX
343 Sandbox* sandbox = Sandbox::New(GetPlatformVirtualAddressSpace());
344 group->Initialize(false, sandbox);
345#else
346 group->Initialize(false);
347#endif
349
350 // We need to set this early, because it is needed while initializing the
351 // external reference table, eg. in the js_dispatch_table_address and
352 // code_pointer_table_address functions. This is also done in
353 // IsolateGroup::InitializeOncePerProcess for the single-IsolateGroup
354 // configurations.
355 IsolateGroupAccessScope group_access_scope(group);
357 group->external_ref_table());
358 return group;
359}
360
361// static
363 IsolateGroup* group = GetDefault();
364 CHECK_EQ(group->reference_count_.load(), 1);
366 group->Release();
367 default_isolate_group_ = nullptr;
368}
369
370#ifdef V8_ENABLE_SANDBOX
371void SandboxedArrayBufferAllocator::LazyInitialize(Sandbox* sandbox) {
372 base::MutexGuard guard(&mutex_);
373 if (is_initialized()) {
374 return;
375 }
376 CHECK(sandbox->is_initialized());
377 sandbox_ = sandbox;
378 constexpr size_t max_backing_memory_size = 8ULL * GB;
379 constexpr size_t min_backing_memory_size = 1ULL * GB;
380 size_t backing_memory_size = max_backing_memory_size;
381 Address backing_memory_base = 0;
382 while (!backing_memory_base &&
383 backing_memory_size >= min_backing_memory_size) {
384 backing_memory_base = sandbox_->address_space()->AllocatePages(
385 VirtualAddressSpace::kNoHint, backing_memory_size, kChunkSize,
387 if (!backing_memory_base) {
388 backing_memory_size /= 2;
389 }
390 }
391 if (!backing_memory_base) {
393 nullptr, "Could not reserve backing memory for ArrayBufferAllocators");
394 }
395 DCHECK(IsAligned(backing_memory_base, kChunkSize));
396
397 region_alloc_ = std::make_unique<base::RegionAllocator>(
398 backing_memory_base, backing_memory_size, kAllocationGranularity);
399 end_of_accessible_region_ = region_alloc_->begin();
400
401 // Install an on-merge callback to discard or decommit unused pages.
402 region_alloc_->set_on_merge_callback([this](Address start, size_t size) {
403 mutex_.AssertHeld();
404 Address end = start + size;
405 if (end == region_alloc_->end() &&
406 start <= end_of_accessible_region_ - kChunkSize) {
407 // Can shrink the accessible region.
408 Address new_end_of_accessible_region = RoundUp(start, kChunkSize);
409 size_t size_to_decommit =
410 end_of_accessible_region_ - new_end_of_accessible_region;
411 if (!sandbox_->address_space()->DecommitPages(
412 new_end_of_accessible_region, size_to_decommit)) {
413 V8::FatalProcessOutOfMemory(nullptr, "SandboxedArrayBufferAllocator()");
414 }
415 end_of_accessible_region_ = new_end_of_accessible_region;
416 } else if (size >= 2 * kChunkSize) {
417 // Can discard pages. The pages stay accessible, so the size of the
418 // accessible region doesn't change.
419 Address chunk_start = RoundUp(start, kChunkSize);
420 Address chunk_end = RoundDown(start + size, kChunkSize);
421 if (!sandbox_->address_space()->DiscardSystemPages(
422 chunk_start, chunk_end - chunk_start)) {
423 V8::FatalProcessOutOfMemory(nullptr, "SandboxedArrayBufferAllocator()");
424 }
425 }
426 });
427}
428
429SandboxedArrayBufferAllocator::~SandboxedArrayBufferAllocator() {
430 // The sandbox may already have been torn down, in which case there's no
431 // need to free any memory.
432 if (is_initialized() && sandbox_->is_initialized()) {
433 sandbox_->address_space()->FreePages(region_alloc_->begin(),
434 region_alloc_->size());
435 }
436}
437
438void* SandboxedArrayBufferAllocator::Allocate(size_t length) {
439 base::MutexGuard guard(&mutex_);
440
441 length = RoundUp(length, kAllocationGranularity);
442 Address region = region_alloc_->AllocateRegion(length);
443 if (region == base::RegionAllocator::kAllocationFailure) return nullptr;
444
445 // Check if the memory is inside the accessible region. If not, grow it.
446 Address end = region + length;
447 size_t length_to_memset = length;
448 if (end > end_of_accessible_region_) {
449 Address new_end_of_accessible_region = RoundUp(end, kChunkSize);
450 size_t size = new_end_of_accessible_region - end_of_accessible_region_;
451 if (!sandbox_->address_space()->SetPagePermissions(
452 end_of_accessible_region_, size, PagePermissions::kReadWrite)) {
453 if (!region_alloc_->FreeRegion(region)) {
455 nullptr, "SandboxedArrayBufferAllocator::Allocate()");
456 }
457 return nullptr;
458 }
459
460 // The pages that were inaccessible are guaranteed to be zeroed, so only
461 // memset until the previous end of the accessible region.
462 length_to_memset = end_of_accessible_region_ - region;
463 end_of_accessible_region_ = new_end_of_accessible_region;
464 }
465
466 void* mem = reinterpret_cast<void*>(region);
467 memset(mem, 0, length_to_memset);
468 return mem;
469}
470
471void SandboxedArrayBufferAllocator::Free(void* data) {
472 base::MutexGuard guard(&mutex_);
473 region_alloc_->FreeRegion(reinterpret_cast<Address>(data));
474}
475
476PageAllocator* SandboxedArrayBufferAllocator::page_allocator() {
477 return sandbox_->page_allocator();
478}
479
480SandboxedArrayBufferAllocator*
481IsolateGroup::GetSandboxedArrayBufferAllocator() {
482 // TODO(342905186): Consider initializing it during IsolateGroup
483 // initialization instead of doing it lazily.
484 backend_allocator_.LazyInitialize(sandbox());
485 return &backend_allocator_;
486}
487
488#endif // V8_ENABLE_SANDBOX
489
490OptimizingCompileTaskExecutor*
494
495} // namespace internal
496} // namespace v8
constexpr int kPageSizeBits
static constexpr Address kNoHint
V8_INLINE void AssertHeld() const
Definition mutex.h:58
static constexpr Address kAllocationFailure
bool InitReservation(v8::PageAllocator *page_allocator, size_t requested, bool immutable)
Definition code-range.cc:70
static void InitializeOncePerIsolateGroup(MemorySpan< Address > shared_external_references)
static void TearDownOncePerProcess()
CodeRange * EnsureCodeRange(size_t requested_size)
bool has_shared_space_isolate() const
void AddIsolate(Isolate *isolate)
PageAllocator * GetBackingStorePageAllocator()
base::OnceType init_code_range_
OptimizingCompileTaskExecutor * optimizing_compile_task_executor()
MemorySpan< Address > external_ref_table()
v8::PageAllocator * page_allocator_
void RemoveIsolate(Isolate *isolate)
static IsolateGroup * default_isolate_group_
absl::flat_hash_set< Isolate * > isolates_
static constexpr bool CanCreateNewGroups()
void init_shared_space_isolate(Isolate *isolate)
std::unique_ptr< PagePool > page_pool_
std::unique_ptr< OptimizingCompileTaskExecutor > optimizing_compile_task_executor_
static IsolateGroup * New()
void Initialize(bool process_wide)
static V8_INLINE IsolateGroup * GetDefault()
std::unique_ptr< CodeRange > code_range_
ReadOnlyArtifacts * InitializeReadOnlyArtifacts()
std::atomic< int > reference_count_
void SetupReadOnlyHeap(Isolate *isolate, SnapshotData *read_only_snapshot_data, bool can_rehash)
static void InitializeOncePerProcess()
std::unique_ptr< ReadOnlyArtifacts > read_only_artifacts_
static void SetUp(Isolate *isolate, SnapshotData *read_only_snapshot_data, bool can_rehash)
static V8_INLINE void InitBase(Address base)
V8_CONST static V8_INLINE Address base()
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
base::Mutex & mutex_
int start
int end
Isolate * isolate
v8::PageAllocator PageAllocator
Definition platform.h:22
void CallOnce(OnceType *once, std::function< void()> init_func)
Definition once.h:90
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
constexpr int GB
Definition v8-internal.h:57
v8::VirtualAddressSpace * GetPlatformVirtualAddressSpace()
Definition allocation.cc:71
V8_EXPORT_PRIVATE FlagValues v8_flags
base::uc32 current_
#define FATAL(...)
Definition logging.h:47
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_NULL(val)
#define CHECK_NOT_NULL(val)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403