34    void* chunk_address = 
reinterpret_cast<void*
>(metadata->ChunkAddress());
 
 
   43                                   std::vector<ReadOnlyPageMetadata*>&& pages,
 
   49      std::make_unique<SharedReadOnlySpace>(isolate->heap(), 
this);
 
 
   65    std::unique_ptr<ReadOnlyHeap> read_only_heap) {
 
 
   72  read_only_blob_checksum_ = 
Checksum(read_only_snapshot_data->
Payload());
 
 
   77                                       bool read_only_heap_created) {
 
   79  if (read_only_blob_checksum_) {
 
   82    uint32_t snapshot_checksum = 
Checksum(read_only_snapshot_data->
Payload());
 
   84                   "Attempt to create the read-only heap after already " 
   85                   "creating from a snapshot.");
 
   96      CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
 
  101    CHECK(read_only_heap_created);
 
 
  144                          std::move(reservation)) {
 
 
  202        memory_allocator->UnregisterReadOnlyPage(p);
 
  204      p->MakeHeaderRelocatable();
 
 
  214    if (metadata->Chunk() == chunk) 
return true;
 
 
  233    while (cur_addr_ != cur_end_) {
 
  234      if (cur_addr_ == 
space_->top() && cur_addr_ != 
space_->limit()) {
 
  239      const int obj_size = obj->Size();
 
  242      if (!IsFreeSpaceOrFiller(obj)) {
 
  257void ReadOnlySpace::Verify(Isolate* isolate,
 
  258                           SpaceVerificationVisitor* visitor)
 const {
 
  259  bool allocation_pointer_found_in_space = 
top_ == 
limit_;
 
  261  for (MemoryChunkMetadata* page : 
pages_) {
 
  264    visitor->VerifyPage(page);
 
  267      allocation_pointer_found_in_space = 
true;
 
  269    ReadOnlySpaceObjectIterator it(isolate->heap(), 
this, page);
 
  270    Address end_of_previous_object = page->area_start();
 
  271    Address top = page->area_end();
 
  274         object = it.Next()) {
 
  275      CHECK(end_of_previous_object <= 
object.address());
 
  277      visitor->VerifyObject(
object);
 
  280      int size = 
object->Size();
 
  281      CHECK(
object.address() + size <= top);
 
  282      end_of_previous_object = 
object.address() + 
size;
 
  285    visitor->VerifyPageDone(page);
 
  287  CHECK(allocation_pointer_found_in_space);
 
  290  VerifyCounters(isolate->heap());
 
  295void ReadOnlySpace::VerifyCounters(Heap* 
heap)
 const {
 
  296  size_t total_capacity = 0;
 
  297  size_t total_allocated = 0;
 
  298  for (MemoryChunkMetadata* page : 
pages_) {
 
  299    total_capacity += page->area_size();
 
  300    ReadOnlySpaceObjectIterator it(
heap, 
this, page);
 
  301    size_t real_allocated = 0;
 
  303         object = it.Next()) {
 
  305        real_allocated += 
object->Size();
 
  308    total_allocated += page->allocated_bytes();
 
  324  for (
auto* chunk : 
pages_) {
 
  325    size += chunk->size();
 
 
  360constexpr inline int ReadOnlyAreaSize() {
 
  361  return static_cast<int>(
 
  384  pages_.push_back(metadata);
 
  387                               static_cast<int>(metadata->
area_size()));
 
 
  397  int filler_size = Heap::GetFillToAlign(current_top, alignment);
 
  399  Address new_top = current_top + filler_size + size_in_bytes;
 
  404  int allocated_size = filler_size + size_in_bytes;
 
  409  if (filler_size > 0) {
 
 
  421  int allocation_size = size_in_bytes;
 
  425  if (
object.is_null()) {
 
  429                             Heap::GetMaximumFillToAlign(alignment));
 
  430    allocation_size = size_in_bytes;
 
  432    CHECK(!
object.is_null());
 
 
  444  Address new_top = current_top + size_in_bytes;
 
  449  DCHECK(!
object.is_null());
 
 
  472  CHECK(IsFreeSpaceOrFiller(filler));
 
  481                   reinterpret_cast<void*
>(
this),
 
  482                   reinterpret_cast<void*
>(
area_end()),
 
  483                   reinterpret_cast<void*
>(
area_end() - unused));
 
  491      CHECK(IsFreeSpaceOrFiller(filler));
 
 
  504    size_t unused = page->ShrinkToHighWaterMark();
 
 
  554  page->IncreaseAllocatedBytes(area_size_in_bytes);
 
  555  limit_ = 
top_ = page->area_start() + area_size_in_bytes;
 
  556  page->high_water_mark_ = page->Offset(
top_);
 
 
  563    Address top = page->ChunkAddress() + page->high_water_mark_;
 
  565    page->ShrinkToHighWaterMark();
 
 
constexpr int kRegularPageSize
virtual size_t AllocatePageSize()=0
virtual bool FreePages(void *address, size_t length)=0
static bool HasLazyCommits()
static AllocationResult FromObject(Tagged< HeapObject > heap_object)
void IncreaseCapacity(size_t bytes)
void DecreaseCapacity(size_t bytes)
void IncreaseAllocatedBytes(size_t bytes, const MemoryChunkMetadata *page)
void AccountCommitted(size_t bytes)
void AccountUncommitted(size_t bytes)
virtual size_t CommittedMemory() const
static Tagged< HeapObject > FromAddress(Address address)
V8_EXPORT_PRIVATE Tagged< HeapObject > PrecedeWithFiller(Tagged< HeapObject > object, int filler_size)
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
MemoryAllocator * memory_allocator()
ReadOnlySpace * read_only_space() const
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char *location)
Isolate * isolate() const
Address cage_base() const
v8::PageAllocator * page_allocator(AllocationSpace space)
static V8_INLINE intptr_t GetCommitPageSize()
ReadOnlyPageMetadata * AllocateReadOnlyPage(ReadOnlySpace *space, Address hint=kNullAddress)
void PartialFreeMemory(MemoryChunkMetadata *chunk, Address start_free, size_t bytes_to_free, Address new_area_end)
void FreeReadOnlyPage(ReadOnlyPageMetadata *chunk)
static constexpr size_t AllocatableMemoryInMemoryChunk(AllocationSpace space)
static V8_INLINE MemoryChunk * FromAddress(Address addr)
static V8_INLINE PageMetadata * FromAllocationAreaAddress(Address address)
ReadOnlyHeap * read_only_heap() const
std::unique_ptr< ReadOnlyHeap > read_only_heap_
std::vector< ReadOnlyPageMetadata * > pages_
void ReinstallReadOnlySpace(Isolate *isolate)
void InitializeChecksum(SnapshotData *read_only_snapshot_data)
void VerifyChecksum(SnapshotData *read_only_snapshot_data, bool read_only_heap_created)
v8::PageAllocator * page_allocator_
void set_read_only_heap(std::unique_ptr< ReadOnlyHeap > read_only_heap)
std::vector< ReadOnlyPageMetadata * > & pages()
const AllocationStats & accounting_stats() const
void Initialize(Isolate *isolate, std::vector< ReadOnlyPageMetadata * > &&pages, const AllocationStats &stats)
void VerifyHeapAndSpaceRelationships(Isolate *isolate)
SharedReadOnlySpace * shared_read_only_space()
std::unique_ptr< SharedReadOnlySpace > shared_read_only_space_
MemoryChunk::MainThreadFlags InitialFlags() const
void MakeHeaderRelocatable()
size_t ShrinkToHighWaterMark()
ReadOnlyPageMetadata(Heap *heap, BaseSpace *space, size_t chunk_size, Address area_start, Address area_end, VirtualMemory reservation)
Tagged< HeapObject > TryAllocateLinearlyAligned(int size_in_bytes, AllocationAlignment alignment)
V8_EXPORT_PRIVATE ~ReadOnlySpace() override
V8_EXPORT_PRIVATE void Seal(SealMode ro_mode)
void RepairFreeSpacesAfterDeserialization()
AllocationResult AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment)
virtual V8_EXPORT_PRIVATE void TearDown(MemoryAllocator *memory_allocator)
bool is_marked_read_only_
AllocationStats accounting_stats_
std::vector< ReadOnlyPageMetadata * > pages_
V8_EXPORT_PRIVATE void ShrinkPages()
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const override
bool ContainsSlow(Address addr) const
V8_EXPORT_PRIVATE AllocationResult AllocateRaw(int size_in_bytes, AllocationAlignment alignment)
size_t IndexOf(const MemoryChunkMetadata *chunk) const
V8_EXPORT_PRIVATE ReadOnlySpace(Heap *heap)
void DetachPagesAndAddToArtifacts(ReadOnlyArtifacts *artifacts)
AllocationResult AllocateRawUnaligned(int size_in_bytes)
void FreeLinearAllocationArea()
void EnsureSpaceForAllocation(int size_in_bytes)
@ kDetachFromHeapAndUnregisterMemory
size_t AllocateNextPageAt(Address pos)
void InitializePageForDeserialization(ReadOnlyPageMetadata *page, size_t area_size_in_bytes)
void SetPermissionsForPages(MemoryAllocator *memory_allocator, PageAllocator::Permission access)
size_t AllocateNextPage()
void FinalizeSpaceForDeserialization()
SharedReadOnlySpace(Heap *heap, ReadOnlyArtifacts *artifacts)
void TearDown(MemoryAllocator *memory_allocator) override
virtual base::Vector< const uint8_t > Payload() const
V8_EXPORT_PRIVATE void Reset()
#define USE_ALLOCATION_ALIGNMENT_BOOL
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size)
V8_INLINE constexpr bool IsFreeSpaceOrFiller(InstanceType instance_type)
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
kInterpreterTrampolineOffset Tagged< HeapObject >
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
void PrintIsolate(void *isolate, const char *format,...)
uint32_t Checksum(base::Vector< const uint8_t > payload)
#define DCHECK_OBJECT_SIZE(size)
#define DCHECK_LE(v1, v2)
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define CHECK_WITH_MSG(condition, message)
#define CHECK_NOT_NULL(val)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)
constexpr T RoundDown(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
#define V8_STATIC_ROOTS_BOOL