v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-chunk.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
13
14namespace v8 {
15namespace internal {
16
17// This check is here to ensure that the lower 32 bits of any real heap object
18// can't overlap with the lower 32 bits of cleared weak reference value and
19// therefore it's enough to compare only the lower 32 bits of a
20// Tagged<MaybeObject> in order to figure out if it's a cleared weak reference
21// or not.
22static_assert(kClearedWeakHeapObjectLower32 > 0);
23static_assert(kClearedWeakHeapObjectLower32 < sizeof(MemoryChunk));
24
25// static
27// static
30// static
33// static
35// static
37// static
39// static
42
44 : main_thread_flags_(flags)
45#ifndef V8_ENABLE_SANDBOX
46 ,
47 metadata_(metadata)
48#endif
49{
50#ifdef V8_ENABLE_SANDBOX
51 auto metadata_index = MetadataTableIndex(address());
52 MemoryChunkMetadata** metadata_pointer_table = MetadataTableAddress();
53 DCHECK_IMPLIES(metadata_pointer_table[metadata_index] != nullptr,
54 metadata_pointer_table[metadata_index] == metadata);
55 metadata_pointer_table[metadata_index] = metadata;
56 metadata_index_ = metadata_index;
57#endif
58}
59
60#ifdef V8_ENABLE_SANDBOX
61// static
62void MemoryChunk::ClearMetadataPointer(MemoryChunkMetadata* metadata) {
63 uint32_t metadata_index = MetadataTableIndex(metadata->ChunkAddress());
64 MemoryChunkMetadata** metadata_pointer_table = MetadataTableAddress();
65 DCHECK_EQ(metadata_pointer_table[metadata_index], metadata);
66 metadata_pointer_table[metadata_index] = nullptr;
67}
68
69// static
70uint32_t MemoryChunk::MetadataTableIndex(Address chunk_address) {
71 uint32_t index;
74 static_assert(kPtrComprCageReservationSize == kPtrComprCageBaseAlignment);
76 DCHECK_LT(offset >> kPageSizeBits, MemoryChunkConstants::kPagesInMainCage);
77 index = MemoryChunkConstants::kMainCageMetadataOffset +
79 } else if (TrustedRange::GetProcessWideTrustedRange()->region().contains(
80 chunk_address)) {
83 MemoryChunkConstants::kPagesInTrustedCage);
84 index = MemoryChunkConstants::kTrustedSpaceMetadataOffset +
86 } else {
87 CodeRange* code_range = IsolateGroup::current()->GetCodeRange();
88 DCHECK(code_range->region().contains(chunk_address));
89 uint32_t offset = static_cast<uint32_t>(chunk_address - code_range->base());
90 DCHECK_LT(offset >> kPageSizeBits, MemoryChunkConstants::kPagesInCodeCage);
91 index = MemoryChunkConstants::kCodeRangeMetadataOffset +
93 }
94 DCHECK_LT(index, MemoryChunkConstants::kMetadataPointerTableSize);
95 return index;
96}
97
98#endif // V8_ENABLE_SANDBOX
99
102
103#ifdef THREAD_SANITIZER
104 // Since TSAN does not process memory fences, we use the following annotation
105 // to tell TSAN that there is no data race when emitting a
106 // InitializationMemoryFence. Note that the other thread still needs to
107 // perform MutablePageMetadata::synchronized_heap().
108 Metadata()->SynchronizedHeapStore();
109#ifndef V8_ENABLE_SANDBOX
111 reinterpret_cast<base::AtomicWord>(metadata_));
112#else
113 MemoryChunkMetadata** metadata_pointer_table = MetadataTableAddress();
114 static_assert(sizeof(base::AtomicWord) == sizeof(metadata_pointer_table[0]));
115 static_assert(sizeof(base::Atomic32) == sizeof(metadata_index_));
116 base::Release_Store(reinterpret_cast<base::AtomicWord*>(
117 &metadata_pointer_table[metadata_index_]),
118 reinterpret_cast<base::AtomicWord>(
119 metadata_pointer_table[metadata_index_]));
120 base::Release_Store(reinterpret_cast<base::Atomic32*>(&metadata_index_),
121 metadata_index_);
122#endif
123#endif
124}
125
126#ifdef THREAD_SANITIZER
127
128void MemoryChunk::SynchronizedLoad() const {
129#ifndef V8_ENABLE_SANDBOX
130 MemoryChunkMetadata* metadata = reinterpret_cast<MemoryChunkMetadata*>(
131 base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(
132 &(const_cast<MemoryChunk*>(this)->metadata_))));
133#else
134 MemoryChunkMetadata** metadata_pointer_table = MetadataTableAddress();
135 static_assert(sizeof(base::AtomicWord) == sizeof(metadata_pointer_table[0]));
136 static_assert(sizeof(base::Atomic32) == sizeof(metadata_index_));
137 uint32_t metadata_index =
138 base::Acquire_Load(reinterpret_cast<base::Atomic32*>(
139 &(const_cast<MemoryChunk*>(this)->metadata_index_)));
140 MemoryChunkMetadata* metadata = reinterpret_cast<MemoryChunkMetadata*>(
141 base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(
142 &metadata_pointer_table[metadata_index])));
143#endif
144 metadata->SynchronizedHeapLoad();
145}
146
147bool MemoryChunk::InReadOnlySpace() const {
148 // This is needed because TSAN does not process the memory fence
149 // emitted after page initialization.
150 SynchronizedLoad();
152}
153
154#endif // THREAD_SANITIZER
155
156#ifdef DEBUG
157
158bool MemoryChunk::IsTrusted() const {
159 bool is_trusted = IsFlagSet(IS_TRUSTED);
160#if DEBUG
162 DCHECK_EQ(is_trusted, IsAnyTrustedSpace(id) || IsAnyCodeSpace(id));
163#endif
164 return is_trusted;
165}
166
167size_t MemoryChunk::Offset(Address addr) const {
168 DCHECK_GE(addr, Metadata()->area_start());
169 DCHECK_LE(addr, address() + Metadata()->size());
170 return addr - address();
171}
172
174 DCHECK_GE(addr, Metadata()->area_start());
175 return addr - address();
176}
177
178#endif // DEBUG
179
181 if (executable()) {
182 RwxMemoryWriteScope scope("Set a MemoryChunk flag in executable memory.");
183 SetFlagUnlocked(flag);
184 } else {
186 }
187}
188
190 if (executable()) {
191 RwxMemoryWriteScope scope("Clear a MemoryChunk flag in executable memory.");
192 ClearFlagUnlocked(flag);
193 } else {
195 }
196}
197
198// static
200 MarkingMode marking_mode, AllocationSpace space) {
201 MainThreadFlags flags_to_set = NO_FLAGS;
202
203 if (!v8_flags.sticky_mark_bits || (space != OLD_SPACE)) {
204 flags_to_set |= MemoryChunk::CONTAINS_ONLY_OLD;
205 }
206
207 if (marking_mode == MarkingMode::kMajorMarking) {
212 } else if (IsAnySharedSpace(space)) {
213 // We need to track pointers into the SHARED_SPACE for OLD_TO_SHARED.
215 } else {
217 if (marking_mode == MarkingMode::kMinorMarking) {
218 flags_to_set |= MemoryChunk::INCREMENTAL_MARKING;
219 }
220 }
221
222 return flags_to_set;
223}
224
225// static
238
240 AllocationSpace space) {
241 MainThreadFlags flags_to_set = OldGenerationPageFlags(marking_mode, space);
242 MainThreadFlags flags_to_clear = NO_FLAGS;
243
244 if (marking_mode != MarkingMode::kMajorMarking) {
245 if (IsAnySharedSpace(space)) {
246 // No need to track OLD_TO_NEW or OLD_TO_SHARED within the shared space.
249 } else {
251 if (marking_mode != MarkingMode::kMinorMarking) {
252 flags_to_clear |= MemoryChunk::INCREMENTAL_MARKING;
253 }
254 }
255 }
256
257 SetFlagsUnlocked(flags_to_set, flags_to_set);
258 ClearFlagsUnlocked(flags_to_clear);
259}
260
262 MainThreadFlags flags_to_set = YoungGenerationPageFlags(marking_mode);
263 MainThreadFlags flags_to_clear = NO_FLAGS;
264
265 if (marking_mode == MarkingMode::kNoMarking) {
267 flags_to_clear |= MemoryChunk::INCREMENTAL_MARKING;
268 }
269
270 SetFlagsNonExecutable(flags_to_set, flags_to_set);
271 ClearFlagsNonExecutable(flags_to_clear);
272}
273
274#ifdef V8_ENABLE_SANDBOX
275bool MemoryChunk::SandboxSafeInReadOnlySpace() const {
276 // For the sandbox only flags from writable pages can be corrupted so we can
277 // use the flag check as a fast path in this case.
278 // It also helps making TSAN happy, since it doesn't like the way we
279 // initialize the MemoryChunks.
280 // (See MemoryChunkMetadata::SynchronizedHeapLoad).
281 if (!InReadOnlySpace()) {
282 return false;
283 }
284
285 // When the sandbox is enabled, only the ReadOnlyPageMetadata are stored
286 // inline in the MemoryChunk.
287 // ReadOnlyPageMetadata::ChunkAddress() is a special version that boils down
288 // to `metadata_address - kMemoryChunkHeaderSize`.
289 MemoryChunkMetadata** metadata_pointer_table = MetadataTableAddress();
290 MemoryChunkMetadata* metadata = metadata_pointer_table
291 [metadata_index_ & MemoryChunkConstants::kMetadataPointerTableSizeMask];
293 static_cast<const ReadOnlyPageMetadata*>(metadata)->ChunkAddress(),
294 address());
295
296 return true;
297}
298#endif
299
300} // namespace internal
301} // namespace v8
constexpr int kPageSizeBits
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
AllocationSpace identity() const
Definition base-space.h:32
static IsolateGroup * current()
CodeRange * GetCodeRange() const
void ClearFlagSlow(Flag flag)
static constexpr MainThreadFlags kAllFlagsMask
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
void SetOldGenerationPageFlags(MarkingMode marking_mode, AllocationSpace space)
V8_INLINE void SetFlagNonExecutable(Flag flag)
V8_INLINE void SetFlagUnlocked(Flag flag)
size_t OffsetMaybeOutOfRange(Address addr) const
static MainThreadFlags OldGenerationPageFlags(MarkingMode marking_mode, AllocationSpace space)
static constexpr MainThreadFlags kIsInYoungGenerationMask
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
V8_INLINE void SetFlagsNonExecutable(MainThreadFlags flags, MainThreadFlags mask=kAllFlagsMask)
V8_INLINE MemoryChunkMetadata * Metadata()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
V8_INLINE void ClearFlagUnlocked(Flag flag)
V8_INLINE void SetFlagsUnlocked(MainThreadFlags flags, MainThreadFlags mask=kAllFlagsMask)
size_t Offset(Address addr) const
void SetFlagSlow(Flag flag)
base::Flags< Flag, uintptr_t > MainThreadFlags
V8_INLINE void ClearFlagsNonExecutable(MainThreadFlags flags)
V8_INLINE void ClearFlagNonExecutable(Flag flag)
static MainThreadFlags YoungGenerationPageFlags(MarkingMode marking_mode)
MemoryChunk(MainThreadFlags flags, MemoryChunkMetadata *metadata)
static constexpr MainThreadFlags kIsLargePageMask
V8_INLINE bool InReadOnlySpace() const
MemoryChunkMetadata * metadata_
static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask
V8_INLINE void ClearFlagsUnlocked(MainThreadFlags flags)
void SetYoungGenerationPageFlags(MarkingMode marking_mode)
static constexpr MainThreadFlags kEvacuationCandidateMask
static V8_INLINE constexpr Tagged_t CompressAny(Address tagged)
V8_CONST static V8_INLINE Address base()
static V8_INLINE constexpr Address GetPtrComprCageBaseAddress(Address on_heap_addr)
int32_t offset
Atomic32 AtomicWord
Definition atomicops.h:76
Atomic8 Acquire_Load(volatile const Atomic8 *ptr)
Definition atomicops.h:249
void SeqCst_MemoryFence()
Definition atomicops.h:91
void Release_Store(volatile Atomic8 *ptr, Atomic8 value)
Definition atomicops.h:204
int32_t Atomic32
Definition atomicops.h:59
Address Tagged_t
Definition globals.h:547
Flag flags[]
Definition flags.cc:3797
constexpr bool IsAnyCodeSpace(AllocationSpace space)
Definition globals.h:1334
constexpr bool IsAnyTrustedSpace(AllocationSpace space)
Definition globals.h:1337
V8_EXPORT_PRIVATE FlagValues v8_flags
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr bool IsAnySharedSpace(AllocationSpace space)
Definition globals.h:1341
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485