v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
read-only-spaces.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8
10#include "include/v8-platform.h"
11#include "src/base/logging.h"
12#include "src/common/globals.h"
16#include "src/heap/heap-inl.h"
25
26namespace v8 {
27namespace internal {
28
30 // SharedReadOnlySpace is aware that it doesn't actually own the pages.
31 shared_read_only_space_->TearDown(nullptr);
32
33 for (ReadOnlyPageMetadata* metadata : pages_) {
34 void* chunk_address = reinterpret_cast<void*>(metadata->ChunkAddress());
35 size_t size =
36 RoundUp(metadata->size(), page_allocator_->AllocatePageSize());
37 CHECK(page_allocator_->FreePages(chunk_address, size));
38 delete metadata;
39 }
40}
41
43 std::vector<ReadOnlyPageMetadata*>&& pages,
44 const AllocationStats& stats) {
45 page_allocator_ = isolate->isolate_group()->page_allocator();
46 pages_ = std::move(pages);
47 stats_ = stats;
49 std::make_unique<SharedReadOnlySpace>(isolate->heap(), this);
50}
51
53 isolate->heap()->ReplaceReadOnlySpace(shared_read_only_space());
54}
55
57 DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
58
59 // Confirm the Isolate is using the shared ReadOnlyHeap and ReadOnlySpace.
60 DCHECK_EQ(read_only_heap(), isolate->read_only_heap());
62}
63
65 std::unique_ptr<ReadOnlyHeap> read_only_heap) {
67}
68
70 SnapshotData* read_only_snapshot_data) {
71#ifdef DEBUG
72 read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
73#endif // DEBUG
74}
75
76void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
77 bool read_only_heap_created) {
78#ifdef DEBUG
79 if (read_only_blob_checksum_) {
80 // The read-only heap was set up from a snapshot. Make sure it's the always
81 // the same snapshot.
82 uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
83 CHECK_WITH_MSG(snapshot_checksum,
84 "Attempt to create the read-only heap after already "
85 "creating from a snapshot.");
86 if (!v8_flags.stress_snapshot) {
87 // --stress-snapshot is only intended to check how well the
88 // serializer/deserializer copes with unexpected objects, and is not
89 // intended to test whether the newly deserialized Isolate would actually
90 // work since it serializes a currently running Isolate, which is not
91 // supported. As a result, it's possible that it will create a new
92 // read-only snapshot that is not compatible with the original one (for
93 // instance due to the string table being re-ordered). Since we won't
94 // actually use that new Isolate, we're ok with any potential corruption.
95 // See crbug.com/1043058.
96 CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
97 }
98 } else {
99 // If there's no checksum, then that means the read-only heap objects are
100 // being created.
101 CHECK(read_only_heap_created);
102 }
103#endif // DEBUG
104}
105
106// -----------------------------------------------------------------------------
107// ReadOnlySpace implementation
108
110
111// Needs to be defined in the cc file to force the vtable to be emitted in
112// component builds.
114
116 // SharedReadOnlySpaces do not tear down their own pages since they are either
117 // freed down by the ReadOnlyArtifacts that contains them.
118 pages_.clear();
120}
121
123 for (ReadOnlyPageMetadata* chunk : pages_) {
124 memory_allocator->FreeReadOnlyPage(chunk);
125 }
126 pages_.clear();
128}
129
132 // ReadOnlySpace pages are directly shared between all heaps in
133 // the isolate group and so must be unregistered from
134 // their originating allocator.
136 artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
137}
138
140 size_t chunk_size,
141 Address area_start, Address area_end,
142 VirtualMemory reservation)
143 : MemoryChunkMetadata(heap, space, chunk_size, area_start, area_end,
144 std::move(reservation)) {
146}
147
152
154 heap_ = nullptr;
155 owner_ = nullptr;
157}
158
161 for (MemoryChunkMetadata* chunk : pages_) {
162 // Read only pages don't have valid reservation object so we get proper
163 // page allocator manually.
164 v8::PageAllocator* page_allocator =
165 memory_allocator->page_allocator(RO_SPACE);
166 CHECK(SetPermissions(page_allocator, chunk->ChunkAddress(), chunk->size(),
167 access));
168 }
169}
170
171// After we have booted, we have created a map which represents free space
172// on the heap. If there was already a free list then the elements on it
173// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
174// fix them.
177 // Each page may have a small free space that is not tracked by a free list.
178 // Those free spaces still contain null as their map pointer.
179 // Overwrite them with new fillers.
180 for (MemoryChunkMetadata* chunk : pages_) {
181 Address start = chunk->HighWaterMark();
182 Address end = chunk->area_end();
183 // Put a filler object in the gap between the end of the allocated objects
184 // and the end of the allocatable area.
185 if (start < end) {
186 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start));
187 }
188 }
189}
190
193
196 auto* memory_allocator = heap()->memory_allocator();
197
198 if (ro_mode != SealMode::kDoNotDetachFromHeap) {
199 heap_ = nullptr;
200 for (ReadOnlyPageMetadata* p : pages_) {
202 memory_allocator->UnregisterReadOnlyPage(p);
203 }
204 p->MakeHeaderRelocatable();
205 }
206 }
207
209}
210
213 for (MemoryChunkMetadata* metadata : pages_) {
214 if (metadata->Chunk() == chunk) return true;
215 }
216 return false;
217}
218
219namespace {
220// Only iterates over a single chunk as the chunk iteration is done externally.
221class ReadOnlySpaceObjectIterator : public ObjectIterator {
222 public:
223 ReadOnlySpaceObjectIterator(const Heap* heap, const ReadOnlySpace* space,
224 MemoryChunkMetadata* chunk)
225 : cur_addr_(chunk->area_start()),
226 cur_end_(chunk->area_end()),
227 space_(space) {}
228
229 // Advance to the next object, skipping free spaces and other fillers and
230 // skipping the special garbage section of which there is one per space.
231 // Returns a null object when the iteration has ended.
232 Tagged<HeapObject> Next() override {
233 while (cur_addr_ != cur_end_) {
234 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
235 cur_addr_ = space_->limit();
236 continue;
237 }
239 const int obj_size = obj->Size();
241 DCHECK_LE(cur_addr_, cur_end_);
242 if (!IsFreeSpaceOrFiller(obj)) {
243 DCHECK_OBJECT_SIZE(obj_size);
244 return obj;
245 }
246 }
247 return HeapObject();
248 }
249
250 Address cur_addr_; // Current iteration point.
251 Address cur_end_; // End iteration point.
252 const ReadOnlySpace* const space_;
253};
254} // namespace
255
256#ifdef VERIFY_HEAP
257void ReadOnlySpace::Verify(Isolate* isolate,
258 SpaceVerificationVisitor* visitor) const {
259 bool allocation_pointer_found_in_space = top_ == limit_;
260
261 for (MemoryChunkMetadata* page : pages_) {
262 CHECK_NULL(page->owner());
263
264 visitor->VerifyPage(page);
265
267 allocation_pointer_found_in_space = true;
268 }
269 ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
270 Address end_of_previous_object = page->area_start();
271 Address top = page->area_end();
272
273 for (Tagged<HeapObject> object = it.Next(); !object.is_null();
274 object = it.Next()) {
275 CHECK(end_of_previous_object <= object.address());
276
277 visitor->VerifyObject(object);
278
279 // All the interior pointers should be contained in the heap.
280 int size = object->Size();
281 CHECK(object.address() + size <= top);
282 end_of_previous_object = object.address() + size;
283 }
284
285 visitor->VerifyPageDone(page);
286 }
287 CHECK(allocation_pointer_found_in_space);
288
289#ifdef DEBUG
290 VerifyCounters(isolate->heap());
291#endif
292}
293
294#ifdef DEBUG
295void ReadOnlySpace::VerifyCounters(Heap* heap) const {
296 size_t total_capacity = 0;
297 size_t total_allocated = 0;
298 for (MemoryChunkMetadata* page : pages_) {
299 total_capacity += page->area_size();
300 ReadOnlySpaceObjectIterator it(heap, this, page);
301 size_t real_allocated = 0;
302 for (Tagged<HeapObject> object = it.Next(); !object.is_null();
303 object = it.Next()) {
304 if (!IsFreeSpaceOrFiller(object)) {
305 real_allocated += object->Size();
306 }
307 }
308 total_allocated += page->allocated_bytes();
309 // The real size can be smaller than the accounted size if array trimming,
310 // object slack tracking happened after sweeping.
311 CHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
312 CHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
313 }
314 CHECK_EQ(total_capacity, accounting_stats_.Capacity());
315 CHECK_EQ(total_allocated, accounting_stats_.Size());
316}
317#endif // DEBUG
318#endif // VERIFY_HEAP
319
323 size_t size = 0;
324 for (auto* chunk : pages_) {
325 size += chunk->size();
326 }
327
328 return size;
329}
330
332 // Mark the old linear allocation area with a free space map so it can be
333 // skipped when scanning the heap.
334 if (top_ == kNullAddress) {
336 return;
337 }
338
339 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_));
340
342
345}
346
348 if (pages_.empty()) {
350 }
351 CHECK(!pages_.empty());
352 // For all configurations where static roots are supported the read only roots
353 // are currently allocated in the first page of the cage.
355 heap_->isolate()->cage_base() == pages_.back()->ChunkAddress());
356}
357
358namespace {
359
360constexpr inline int ReadOnlyAreaSize() {
361 return static_cast<int>(
363}
364
365} // namespace
366
368 if (top_ + size_in_bytes <= limit_) {
369 return;
370 }
371
372 DCHECK_GE(size_in_bytes, 0);
373
375
376 ReadOnlyPageMetadata* metadata =
378 CHECK_NOT_NULL(metadata);
379
380 capacity_ += ReadOnlyAreaSize();
381
383 AccountCommitted(metadata->size());
384 pages_.push_back(metadata);
385
386 heap()->CreateFillerObjectAt(metadata->area_start(),
387 static_cast<int>(metadata->area_size()));
388
389 top_ = metadata->area_start();
390 limit_ = metadata->area_end();
391}
392
394 int size_in_bytes, AllocationAlignment alignment) {
395 size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
396 Address current_top = top_;
397 int filler_size = Heap::GetFillToAlign(current_top, alignment);
398
399 Address new_top = current_top + filler_size + size_in_bytes;
400 if (new_top > limit_) return HeapObject();
401
402 // Allocation always occurs in the last chunk for RO_SPACE.
403 MemoryChunkMetadata* chunk = pages_.back();
404 int allocated_size = filler_size + size_in_bytes;
405 accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
406 chunk->IncreaseAllocatedBytes(allocated_size);
407
408 top_ = new_top;
409 if (filler_size > 0) {
410 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
411 filler_size);
412 }
413
414 return HeapObject::FromAddress(current_top);
415}
416
418 int size_in_bytes, AllocationAlignment alignment) {
419 DCHECK(!IsDetached());
420 size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
421 int allocation_size = size_in_bytes;
422
423 Tagged<HeapObject> object =
424 TryAllocateLinearlyAligned(allocation_size, alignment);
425 if (object.is_null()) {
426 // We don't know exactly how much filler we need to align until space is
427 // allocated, so assume the worst case.
428 EnsureSpaceForAllocation(allocation_size +
429 Heap::GetMaximumFillToAlign(alignment));
430 allocation_size = size_in_bytes;
431 object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
432 CHECK(!object.is_null());
433 }
434 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
435
436 return AllocationResult::FromObject(object);
437}
438
440 DCHECK(!IsDetached());
441 size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
442 EnsureSpaceForAllocation(size_in_bytes);
443 Address current_top = top_;
444 Address new_top = current_top + size_in_bytes;
445 DCHECK_LE(new_top, limit_);
446 top_ = new_top;
447 Tagged<HeapObject> object = HeapObject::FromAddress(current_top);
448
449 DCHECK(!object.is_null());
450 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
451
452 // Allocation always occurs in the last chunk for RO_SPACE.
453 MemoryChunkMetadata* chunk = pages_.back();
454 accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
455 chunk->IncreaseAllocatedBytes(size_in_bytes);
456
457 return AllocationResult::FromObject(object);
458}
459
461 AllocationAlignment alignment) {
462 return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
463 ? AllocateRawAligned(size_in_bytes, alignment)
464 : AllocateRawUnaligned(size_in_bytes);
465}
466
468 // Shrink pages to high water mark. The water mark points either to a filler
469 // or the area_end.
471 if (filler.address() == area_end()) return 0;
472 CHECK(IsFreeSpaceOrFiller(filler));
473 DCHECK_EQ(filler.address() + filler->Size(), area_end());
474
475 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
477 if (unused > 0) {
479 if (v8_flags.trace_gc_verbose) {
480 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
481 reinterpret_cast<void*>(this),
482 reinterpret_cast<void*>(area_end()),
483 reinterpret_cast<void*>(area_end() - unused));
484 }
486 filler.address(),
487 static_cast<int>(area_end() - filler.address() - unused));
489 this, ChunkAddress() + size() - unused, unused, area_end() - unused);
490 if (filler.address() != area_end()) {
491 CHECK(IsFreeSpaceOrFiller(filler));
492 CHECK_EQ(filler.address() + filler->Size(), area_end());
493 }
494 }
495 return unused;
496}
497
500 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_));
501
502 for (ReadOnlyPageMetadata* page : pages_) {
503 DCHECK(page->Chunk()->IsFlagSet(MemoryChunk::NEVER_EVACUATE));
504 size_t unused = page->ShrinkToHighWaterMark();
505 capacity_ -= unused;
506 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
507 AccountUncommitted(unused);
508 }
509 limit_ = pages_.back()->area_end();
510}
511
518
519size_t ReadOnlySpace::IndexOf(const MemoryChunkMetadata* chunk) const {
520 for (size_t i = 0; i < pages_.size(); i++) {
521 if (chunk == pages_[i]) return i;
522 }
523 UNREACHABLE();
524}
525
529 capacity_ += ReadOnlyAreaSize();
530 AccountCommitted(page->size());
531 pages_.push_back(page);
532 return pages_.size() - 1;
533}
534
539 if (!page) {
540 heap_->FatalProcessOutOfMemory("ReadOnly allocation failure");
541 }
542 // If this fails we got a wrong page. This means something allocated a page in
543 // the shared cage before us, stealing our required page (i.e.,
544 // ReadOnlyHeap::SetUp was called too late).
545 CHECK_EQ(pos, page->ChunkAddress());
546 capacity_ += ReadOnlyAreaSize();
547 AccountCommitted(page->size());
548 pages_.push_back(page);
549 return pages_.size() - 1;
550}
551
553 ReadOnlyPageMetadata* page, size_t area_size_in_bytes) {
554 page->IncreaseAllocatedBytes(area_size_in_bytes);
555 limit_ = top_ = page->area_start() + area_size_in_bytes;
556 page->high_water_mark_ = page->Offset(top_);
557}
558
560 // The ReadOnlyRoots table is now initialized. Create fillers, shrink pages,
561 // and update accounting stats.
562 for (ReadOnlyPageMetadata* page : pages_) {
563 Address top = page->ChunkAddress() + page->high_water_mark_;
564 heap()->CreateFillerObjectAt(top, static_cast<int>(page->area_end() - top));
565 page->ShrinkToHighWaterMark();
566 accounting_stats_.IncreaseCapacity(page->area_size());
567 accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);
568 }
569}
570
571} // namespace internal
572} // namespace v8
constexpr int kRegularPageSize
SourcePosition pos
virtual size_t AllocatePageSize()=0
virtual bool FreePages(void *address, size_t length)=0
static bool HasLazyCommits()
static AllocationResult FromObject(Tagged< HeapObject > heap_object)
void IncreaseCapacity(size_t bytes)
void DecreaseCapacity(size_t bytes)
void IncreaseAllocatedBytes(size_t bytes, const MemoryChunkMetadata *page)
Heap * heap() const
Definition base-space.h:27
void AccountCommitted(size_t bytes)
Definition base-space.h:56
void AccountUncommitted(size_t bytes)
Definition base-space.h:64
virtual size_t CommittedMemory() const
Definition base-space.h:36
static Tagged< HeapObject > FromAddress(Address address)
V8_EXPORT_PRIVATE Tagged< HeapObject > PrecedeWithFiller(Tagged< HeapObject > object, int filler_size)
Definition heap.cc:3029
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
Definition heap.cc:3202
MemoryAllocator * memory_allocator()
Definition heap.h:803
ReadOnlySpace * read_only_space() const
Definition heap.h:738
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char *location)
Definition heap.cc:6385
Isolate * isolate() const
Definition heap-inl.h:61
Address cage_base() const
Definition isolate.h:1213
v8::PageAllocator * page_allocator(AllocationSpace space)
static V8_INLINE intptr_t GetCommitPageSize()
ReadOnlyPageMetadata * AllocateReadOnlyPage(ReadOnlySpace *space, Address hint=kNullAddress)
void PartialFreeMemory(MemoryChunkMetadata *chunk, Address start_free, size_t bytes_to_free, Address new_area_end)
void FreeReadOnlyPage(ReadOnlyPageMetadata *chunk)
static constexpr size_t AllocatableMemoryInMemoryChunk(AllocationSpace space)
static V8_INLINE void UpdateHighWaterMark(Address mark)
static V8_INLINE MemoryChunk * FromAddress(Address addr)
static V8_INLINE PageMetadata * FromAllocationAreaAddress(Address address)
ReadOnlyHeap * read_only_heap() const
std::unique_ptr< ReadOnlyHeap > read_only_heap_
std::vector< ReadOnlyPageMetadata * > pages_
void ReinstallReadOnlySpace(Isolate *isolate)
void InitializeChecksum(SnapshotData *read_only_snapshot_data)
void VerifyChecksum(SnapshotData *read_only_snapshot_data, bool read_only_heap_created)
void set_read_only_heap(std::unique_ptr< ReadOnlyHeap > read_only_heap)
std::vector< ReadOnlyPageMetadata * > & pages()
const AllocationStats & accounting_stats() const
void Initialize(Isolate *isolate, std::vector< ReadOnlyPageMetadata * > &&pages, const AllocationStats &stats)
void VerifyHeapAndSpaceRelationships(Isolate *isolate)
SharedReadOnlySpace * shared_read_only_space()
std::unique_ptr< SharedReadOnlySpace > shared_read_only_space_
MemoryChunk::MainThreadFlags InitialFlags() const
ReadOnlyPageMetadata(Heap *heap, BaseSpace *space, size_t chunk_size, Address area_start, Address area_end, VirtualMemory reservation)
Tagged< HeapObject > TryAllocateLinearlyAligned(int size_in_bytes, AllocationAlignment alignment)
V8_EXPORT_PRIVATE ~ReadOnlySpace() override
V8_EXPORT_PRIVATE void Seal(SealMode ro_mode)
AllocationResult AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment)
virtual V8_EXPORT_PRIVATE void TearDown(MemoryAllocator *memory_allocator)
std::vector< ReadOnlyPageMetadata * > pages_
V8_EXPORT_PRIVATE void ShrinkPages()
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const override
bool ContainsSlow(Address addr) const
V8_EXPORT_PRIVATE AllocationResult AllocateRaw(int size_in_bytes, AllocationAlignment alignment)
size_t IndexOf(const MemoryChunkMetadata *chunk) const
V8_EXPORT_PRIVATE ReadOnlySpace(Heap *heap)
void DetachPagesAndAddToArtifacts(ReadOnlyArtifacts *artifacts)
AllocationResult AllocateRawUnaligned(int size_in_bytes)
void EnsureSpaceForAllocation(int size_in_bytes)
size_t AllocateNextPageAt(Address pos)
void InitializePageForDeserialization(ReadOnlyPageMetadata *page, size_t area_size_in_bytes)
void SetPermissionsForPages(MemoryAllocator *memory_allocator, PageAllocator::Permission access)
SharedReadOnlySpace(Heap *heap, ReadOnlyArtifacts *artifacts)
void TearDown(MemoryAllocator *memory_allocator) override
virtual base::Vector< const uint8_t > Payload() const
V8_EXPORT_PRIVATE void Reset()
#define USE_ALLOCATION_ALIGNMENT_BOOL
Definition globals.h:1562
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
NormalPageSpace * space_
Definition compactor.cc:324
int start
int end
Isolate * isolate
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size)
Definition msan.h:29
STL namespace.
V8_INLINE constexpr bool IsFreeSpaceOrFiller(InstanceType instance_type)
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
kInterpreterTrampolineOffset Tagged< HeapObject >
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
uint32_t Checksum(base::Vector< const uint8_t > payload)
Address cur_addr_
Address cur_end_
#define DCHECK_OBJECT_SIZE(size)
Definition spaces.h:51
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define CHECK_WITH_MSG(condition, message)
Definition logging.h:118
#define CHECK_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001