v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-chunk-metadata.h
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MEMORY_CHUNK_METADATA_H_
6#define V8_HEAP_MEMORY_CHUNK_METADATA_H_
7
8#include <bit>
9#include <type_traits>
10#include <unordered_map>
11
13#include "src/base/flags.h"
14#include "src/base/hashing.h"
15#include "src/common/globals.h"
16#include "src/flags/flags.h"
17#include "src/heap/marking.h"
22
23namespace v8 {
24namespace internal {
25
26namespace debug_helper_internal {
27class ReadStringVisitor;
28} // namespace debug_helper_internal
29
30class BaseSpace;
31
33 public:
34 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
36
37 // Only works if the object is in the first kPageSize of the MemoryChunk.
39
40 // Only works if the object is in the first kPageSize of the MemoryChunk.
42 const HeapObjectLayout* o);
43
44 V8_INLINE static void UpdateHighWaterMark(Address mark);
45
46 MemoryChunkMetadata(Heap* heap, BaseSpace* space, size_t chunk_size,
48 VirtualMemory reservation);
50
51 Address ChunkAddress() const { return Chunk()->address(); }
52 Address MetadataAddress() const { return reinterpret_cast<Address>(this); }
53
54 // Returns the offset of a given address to this page.
55 inline size_t Offset(Address a) const { return Chunk()->Offset(a); }
56
57 size_t size() const { return size_; }
58 void set_size(size_t size) { size_ = size; }
59
60 Address area_start() const { return area_start_; }
61
62 Address area_end() const { return area_end_; }
64
65 size_t area_size() const {
66 return static_cast<size_t>(area_end() - area_start());
67 }
68
69 Heap* heap() const {
71 return heap_;
72 }
73
74 // Gets the chunk's owner or null if the space has been detached.
75 BaseSpace* owner() const { return owner_; }
76 void set_owner(BaseSpace* space) { owner_ = space; }
77
78 bool InSharedSpace() const;
79 bool InTrustedSpace() const;
80
81 bool IsWritable() const {
82 // If this is a read-only space chunk but heap_ is non-null, it has not yet
83 // been sealed and can be written to.
84 return !Chunk()->InReadOnlySpace() || heap_ != nullptr;
85 }
86
87 bool IsMutablePageMetadata() const { return owner() != nullptr; }
88
89 bool Contains(Address addr) const {
90 return addr >= area_start() && addr < area_end();
91 }
92
93 // Checks whether |addr| can be a limit of addresses in this page. It's a
94 // limit if it's in the page, or if it's just after the last byte of the page.
95 bool ContainsLimit(Address addr) const {
96 return addr >= area_start() && addr <= area_end();
97 }
98
99 size_t wasted_memory() const { return wasted_memory_; }
100 void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
101 size_t allocated_bytes() const { return allocated_bytes_; }
102
104
106
111
112 void IncreaseAllocatedBytes(size_t bytes) {
113 DCHECK_LE(bytes, area_size());
114 allocated_bytes_ += bytes;
115 }
116
117 void DecreaseAllocatedBytes(size_t bytes) {
118 DCHECK_LE(bytes, area_size());
119 DCHECK_GE(allocated_bytes(), bytes);
120 allocated_bytes_ -= bytes;
121 }
122
124 const MemoryChunk* Chunk() const {
126 }
127
128 protected:
129#ifdef THREAD_SANITIZER
130 // Perform a dummy acquire load to tell TSAN that there is no data race in
131 // mark-bit initialization. See MutablePageMetadata::Initialize for the
132 // corresponding release store.
133 void SynchronizedHeapLoad() const;
134 void SynchronizedHeapStore();
135 friend class MemoryChunk;
136#endif
137
138 // If the chunk needs to remember its memory reservation, it is stored here.
140
141 // Byte allocated on the page, which includes all objects on the page and the
142 // linear allocation area.
144 // Freed memory that was not added to the free list.
145 size_t wasted_memory_ = 0;
146
147 // Assuming the initial allocation on a page is sequential, count highest
148 // number of bytes ever allocated on the page.
149 std::atomic<intptr_t> high_water_mark_;
150
151 // Overall size of the chunk, including the header and guards.
152 size_t size_;
153
155
156 // The most accessed fields start at heap_ and end at
157 // MutablePageMetadata::slot_set_. See
158 // MutablePageMetadata::MutablePageMetadata() for details.
159
160 // The heap this chunk belongs to. May be null for read-only chunks.
162
163 // Start and end of allocatable memory on this chunk.
165
166 // The space owning this memory chunk.
167 std::atomic<BaseSpace*> owner_;
168
169 private:
170 static constexpr intptr_t HeapOffset() {
171 return offsetof(MemoryChunkMetadata, heap_);
172 }
173
174 static constexpr intptr_t AreaStartOffset() {
175 return offsetof(MemoryChunkMetadata, area_start_);
176 }
177
178 // For HeapOffset().
180 // For AreaStartOffset().
181 friend class CodeStubAssembler;
182 friend class MacroAssembler;
183};
184
185} // namespace internal
186
187namespace base {
188
189// Define special hash function for chunk pointers, to be used with std data
190// structures, e.g.
191// std::unordered_set<MemoryChunkMetadata*, base::hash<MemoryChunkMetadata*>
192// This hash function discards the trailing zero bits (chunk alignment).
193// Notice that, when pointer compression is enabled, it also discards the
194// cage base.
195template <>
196struct hash<const i::MemoryChunkMetadata*> {
197 V8_INLINE size_t
198 operator()(const i::MemoryChunkMetadata* chunk_metadata) const {
199 return hash<const i::MemoryChunk*>()(chunk_metadata->Chunk());
200 }
201};
202
203template <>
204struct hash<i::MemoryChunkMetadata*> {
205 V8_INLINE size_t operator()(i::MemoryChunkMetadata* chunk_metadata) const {
206 return hash<const i::MemoryChunkMetadata*>()(chunk_metadata);
207 }
208};
209
210} // namespace base
211} // namespace v8
212
213#endif // V8_HEAP_MEMORY_CHUNK_METADATA_H_
static constexpr intptr_t HeapOffset()
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
static constexpr intptr_t AreaStartOffset()
static V8_INLINE MemoryChunkMetadata * FromAddress(Address a)
static V8_INLINE void UpdateHighWaterMark(Address mark)
MemoryChunkMetadata(Heap *heap, BaseSpace *space, size_t chunk_size, Address area_start, Address area_end, VirtualMemory reservation)
friend class debug_helper_internal::ReadStringVisitor
V8_INLINE Address address() const
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
V8_INLINE bool InReadOnlySpace() const
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
Definition flags.cc:2128
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_GE(v1, v2)
Definition logging.h:488
V8_INLINE size_t operator()(const i::MemoryChunkMetadata *chunk_metadata) const
V8_INLINE size_t operator()(i::MemoryChunkMetadata *chunk_metadata) const
#define V8_INLINE
Definition v8config.h:500