v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
marking-barrier.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8
9#include "src/base/logging.h"
10#include "src/common/globals.h"
11#include "src/heap/heap-inl.h"
14#include "src/heap/heap.h"
23#include "src/heap/safepoint.h"
28
29namespace v8 {
30namespace internal {
31
33 : heap_(local_heap->heap()),
34 major_collector_(heap_->mark_compact_collector()),
35 minor_collector_(heap_->minor_mark_sweep_collector()),
36 incremental_marking_(heap_->incremental_marking()),
37 marking_state_(isolate()),
38 is_main_thread_barrier_(local_heap->is_main_thread()),
39 uses_shared_heap_(isolate()->has_shared_space()),
40 is_shared_space_isolate_(isolate()->is_shared_space_isolate()) {}
41
43
45#ifdef V8_ENABLE_SANDBOX
48 DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
49
50 // An indirect pointer slot can only contain a Smi if it is uninitialized (in
51 // which case the vaue will be Smi::zero()). However, at this point the slot
52 // must have been initialized because it was just written to.
53 Tagged<HeapObject> value = Cast<HeapObject>(slot.load(isolate()));
54
55 // If the host is in shared space, the target must be in the shared trusted
56 // space. No other edges indirect pointers are currently possible in shared
57 // space.
60 MemoryChunk::FromHeapObject(value)->Metadata()->owner()->identity() ==
62
63 if (HeapLayout::InReadOnlySpace(value)) return;
64
66
69 // References to the shared trusted space may only originate from the
70 // shared space.
72 DCHECK(MemoryChunk::FromHeapObject(value)->IsTrusted());
73 MarkValueShared(value);
74 } else {
75 MarkValueLocal(value);
76 }
77 } else {
78 MarkValueLocal(value);
79 }
80
81 // We don't need to record a slot here because the entries in the pointer
82 // tables are not compacted and because the pointers stored in the table
83 // entries are updated after compacting GC.
84 static_assert(!CodePointerTable::kSupportsCompaction);
85 static_assert(!TrustedPointerTable::kSupportsCompaction);
86#else
88#endif
89}
90
94
95 // Without a shared heap and on the shared space isolate (= main isolate) all
96 // objects are considered local.
98 // On client isolates (= worker isolates) shared values can be ignored.
100 return;
101 }
102 }
103 if (HeapLayout::InReadOnlySpace(value)) return;
104 MarkValueLocal(value);
105}
106
108 RelocInfo* reloc_info, Tagged<HeapObject> value) {
112 DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
113
114 MarkValue(host, value);
115
116 if (is_compacting_) {
117 DCHECK(is_major());
119 // An optimization to avoid allocating additional typed slots for the
120 // main thread.
121 major_collector_->RecordRelocSlot(host, reloc_info, value);
122 } else {
123 RecordRelocSlot(host, reloc_info, value);
124 }
125 }
126}
127
132 DCHECK(MemoryChunk::FromHeapObject(host)->IsMarking());
133
134 if (is_minor()) {
136 extension->YoungMark();
137 }
138 } else {
139 extension->Mark();
140 }
141}
142
144 int number_of_own_descriptors) {
145 DCHECK(IsCurrentMarkingBarrier(descriptor_array));
146 DCHECK(HeapLayout::InReadOnlySpace(descriptor_array->map()));
147 DCHECK(MemoryChunk::FromHeapObject(descriptor_array)->IsMarking());
148
149 // Only major GC uses custom liveness.
150 if (is_minor() || IsStrongDescriptorArray(descriptor_array)) {
151 MarkValueLocal(descriptor_array);
152 return;
153 }
154
155 unsigned gc_epoch;
156 MarkingWorklists::Local* worklist;
158 HeapLayout::InWritableSharedSpace(descriptor_array) &&
160 gc_epoch = isolate()
162 ->heap()
164 ->epoch();
165 DCHECK(shared_heap_worklists_.has_value());
166 worklist = &*shared_heap_worklists_;
167 } else {
168#ifdef DEBUG
169 if (const auto target_worklist =
170 MarkingHelper::ShouldMarkObject(heap_, descriptor_array)) {
171 DCHECK_EQ(target_worklist.value(),
173 } else {
174 DCHECK(HeapLayout::InBlackAllocatedPage(descriptor_array));
175 }
176#endif // DEBUG
177 gc_epoch = major_collector_->epoch();
178 worklist = current_worklists_.get();
179 }
180
181 if (v8_flags.black_allocated_pages) {
182 // Make sure to only mark the descriptor array for non black allocated
183 // pages. The atomic pause will fix it afterwards.
184 if (MarkingHelper::ShouldMarkObject(heap_, descriptor_array)) {
185 marking_state_.TryMark(descriptor_array);
186 }
187 } else {
188 marking_state_.TryMark(descriptor_array);
189 }
190
191 // `TryUpdateIndicesToMark()` acts as a barrier that publishes the slots'
192 // values corresponding to `number_of_own_descriptors`.
194 gc_epoch, descriptor_array, number_of_own_descriptors)) {
195 worklist->Push(descriptor_array);
196 }
197}
198
200 RelocInfo* rinfo,
201 Tagged<HeapObject> target) {
203 if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) return;
204
206 MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
207
208 auto& typed_slots = typed_slots_map_[info.page_metadata];
209 if (!typed_slots) {
210 typed_slots.reset(new TypedSlots());
211 }
212 typed_slots->Insert(info.slot_type, info.offset);
213}
214
215namespace {
216template <typename Space>
217void SetGenerationPageFlags(Space* space, MarkingMode marking_mode) {
218 if constexpr (std::is_same_v<Space, OldSpace> ||
219 std::is_same_v<Space, SharedSpace> ||
220 std::is_same_v<Space, TrustedSpace> ||
221 std::is_same_v<Space, CodeSpace>) {
222 for (auto* p : *space) {
223 p->SetOldGenerationPageFlags(marking_mode);
224 }
225 } else if constexpr (std::is_same_v<Space, OldLargeObjectSpace> ||
226 std::is_same_v<Space, SharedLargeObjectSpace> ||
227 std::is_same_v<Space, TrustedLargeObjectSpace> ||
228 std::is_same_v<Space, CodeLargeObjectSpace>) {
229 for (auto* p : *space) {
230 DCHECK(p->Chunk()->IsLargePage());
231 p->SetOldGenerationPageFlags(marking_mode);
232 }
233 } else if constexpr (std::is_same_v<Space, NewSpace>) {
234 for (auto* p : *space) {
235 p->SetYoungGenerationPageFlags(marking_mode);
236 }
237 } else {
238 static_assert(std::is_same_v<Space, NewLargeObjectSpace>);
239 for (auto* p : *space) {
240 DCHECK(p->Chunk()->IsLargePage());
241 p->SetYoungGenerationPageFlags(marking_mode);
242 }
243 }
244}
245
246template <typename Space>
247void ActivateSpace(Space* space, MarkingMode marking_mode) {
248 SetGenerationPageFlags(space, marking_mode);
249}
250
251template <typename Space>
252void DeactivateSpace(Space* space) {
253 SetGenerationPageFlags(space, MarkingMode::kNoMarking);
254}
255
256void ActivateSpaces(Heap* heap, MarkingMode marking_mode) {
257 ActivateSpace(heap->old_space(), marking_mode);
258 ActivateSpace(heap->lo_space(), marking_mode);
259 if (heap->new_space()) {
260 DCHECK(!v8_flags.sticky_mark_bits);
261 ActivateSpace(heap->new_space(), marking_mode);
262 }
263 ActivateSpace(heap->new_lo_space(), marking_mode);
264 {
265 RwxMemoryWriteScope scope("For writing flags.");
266 ActivateSpace(heap->code_space(), marking_mode);
267 ActivateSpace(heap->code_lo_space(), marking_mode);
268 }
269
270 if (marking_mode == MarkingMode::kMajorMarking) {
271 if (heap->shared_space()) {
272 ActivateSpace(heap->shared_space(), marking_mode);
273 }
274 if (heap->shared_lo_space()) {
275 ActivateSpace(heap->shared_lo_space(), marking_mode);
276 }
277 }
278
279 ActivateSpace(heap->trusted_space(), marking_mode);
280 ActivateSpace(heap->trusted_lo_space(), marking_mode);
281}
282
283void DeactivateSpaces(Heap* heap, MarkingMode marking_mode) {
284 DeactivateSpace(heap->old_space());
285 DeactivateSpace(heap->lo_space());
286 if (heap->new_space()) {
287 DCHECK(!v8_flags.sticky_mark_bits);
288 DeactivateSpace(heap->new_space());
289 }
290 DeactivateSpace(heap->new_lo_space());
291 {
292 RwxMemoryWriteScope scope("For writing flags.");
293 DeactivateSpace(heap->code_space());
294 DeactivateSpace(heap->code_lo_space());
295 }
296
297 if (marking_mode == MarkingMode::kMajorMarking) {
298 if (heap->shared_space()) {
299 DeactivateSpace(heap->shared_space());
300 }
301 if (heap->shared_lo_space()) {
302 DeactivateSpace(heap->shared_lo_space());
303 }
304 }
305
306 DeactivateSpace(heap->trusted_space());
307 DeactivateSpace(heap->trusted_lo_space());
308}
309} // namespace
310
311// static
312void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
313 ActivateSpaces(heap, MarkingMode::kMajorMarking);
314
315 heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
316 local_heap->marking_barrier()->Activate(is_compacting,
318 });
319
320 if (heap->isolate()->is_shared_space_isolate()) {
321 heap->isolate()
322 ->shared_space_isolate()
323 ->global_safepoint()
324 ->IterateClientIsolates([](Isolate* client) {
325 // Force the RecordWrite builtin into the incremental marking code
326 // path.
327 client->heap()->SetIsMarkingFlag(true);
328 client->heap()->safepoint()->IterateLocalHeaps(
329 [](LocalHeap* local_heap) {
330 local_heap->marking_barrier()->ActivateShared();
331 });
332 });
333 }
334}
335
336// static
338 ActivateSpaces(heap, MarkingMode::kMinorMarking);
339
340 heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
342 });
343}
344
345void MarkingBarrier::Activate(bool is_compacting, MarkingMode marking_mode) {
347 is_compacting_ = is_compacting;
348 marking_mode_ = marking_mode;
349 current_worklists_ = std::make_unique<MarkingWorklists::Local>(
352 is_activated_ = true;
353}
354
356 DCHECK(!shared_heap_worklists_.has_value());
357 Isolate* shared_isolate = isolate()->shared_space_isolate();
359 shared_isolate->heap()->mark_compact_collector()->marking_worklists());
360}
361
362// static
364 DeactivateSpaces(heap, MarkingMode::kMajorMarking);
365
366 heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
367 local_heap->marking_barrier()->Deactivate();
368 });
369
370 if (heap->isolate()->is_shared_space_isolate()) {
371 heap->isolate()
372 ->shared_space_isolate()
373 ->global_safepoint()
374 ->IterateClientIsolates([](Isolate* client) {
375 // We can't just simply disable the marking barrier for all clients. A
376 // client may still need it to be set for incremental marking in the
377 // local heap.
378 const bool is_marking =
379 client->heap()->incremental_marking()->IsMarking();
380 client->heap()->SetIsMarkingFlag(is_marking);
381 client->heap()->safepoint()->IterateLocalHeaps(
382 [](LocalHeap* local_heap) {
383 local_heap->marking_barrier()->DeactivateShared();
384 });
385 });
386 }
387}
388
389// static
391 DeactivateSpaces(heap, MarkingMode::kMinorMarking);
392
393 heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
394 local_heap->marking_barrier()->Deactivate();
395 });
396}
397
407
412
413// static
415 heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
416 local_heap->marking_barrier()->PublishIfNeeded();
417 });
418
419 if (heap->isolate()->is_shared_space_isolate()) {
420 heap->isolate()
421 ->shared_space_isolate()
422 ->global_safepoint()
423 ->IterateClientIsolates([](Isolate* client) {
424 client->heap()->safepoint()->IterateLocalHeaps(
425 [](LocalHeap* local_heap) {
426 local_heap->marking_barrier()->PublishSharedIfNeeded();
427 });
428 });
429 }
430}
431
432// static
434 heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
435 local_heap->marking_barrier()->PublishIfNeeded();
436 });
437}
438
440 if (is_activated_) {
441 current_worklists_->Publish();
442 for (auto& it : typed_slots_map_) {
443 MutablePageMetadata* memory_chunk = it.first;
444 // Access to TypeSlots need to be protected, since LocalHeaps might
445 // publish code in the background thread.
446 base::MutexGuard guard(memory_chunk->mutex());
447 std::unique_ptr<TypedSlots>& typed_slots = it.second;
449 std::move(typed_slots));
450 }
451 typed_slots_map_.clear();
452 }
453}
454
460
462 Tagged<HeapObject> verification_candidate) {
463 return WriteBarrier::CurrentMarkingBarrier(verification_candidate) == this;
464}
465
467
468#if DEBUG
469void MarkingBarrier::AssertMarkingIsActivated() const { DCHECK(is_activated_); }
470
471void MarkingBarrier::AssertSharedMarkingIsActivated() const {
472 DCHECK(shared_heap_worklists_.has_value());
473}
474bool MarkingBarrier::IsMarked(const Tagged<HeapObject> value) const {
475 return marking_state_.IsMarked(value);
476}
477#endif // DEBUG
478
479} // namespace internal
480} // namespace v8
static bool TryUpdateIndicesToMark(unsigned gc_epoch, Tagged< DescriptorArray > array, DescriptorIndex index_to_mark)
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE bool InBlackAllocatedPage(Tagged< HeapObject > object)
MarkCompactCollector * mark_compact_collector()
Definition heap.h:813
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
IsolateSafepoint * safepoint()
Definition heap.h:579
Isolate * isolate() const
Definition heap-inl.h:61
void SetIsMarkingFlag(bool value)
Definition heap.cc:7190
Tagged< Object > load(IsolateForSandbox isolate) const
Definition slots-inl.h:349
void IterateLocalHeaps(Callback callback)
Definition safepoint.h:37
Isolate * shared_space_isolate() const
Definition isolate.h:2295
MarkingBarrier * marking_barrier()
Definition local-heap.h:130
MarkingWorklists * marking_worklists()
static bool ShouldRecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
static void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
static V8_EXPORT_PRIVATE void PublishAll(Heap *heap)
void MarkValue(Tagged< HeapObject > host, Tagged< HeapObject > value)
static V8_EXPORT_PRIVATE void PublishYoung(Heap *heap)
std::optional< MarkingWorklists::Local > shared_heap_worklists_
void Activate(bool is_compacting, MarkingMode marking_mode)
void MarkValueLocal(Tagged< HeapObject > value)
void Write(Tagged< HeapObject > host, TSlot slot, Tagged< HeapObject > value)
std::unique_ptr< MarkingWorklists::Local > current_worklists_
bool IsCurrentMarkingBarrier(Tagged< HeapObject > verification_candidate)
void WriteWithoutHost(Tagged< HeapObject > value)
void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
std::unordered_map< MutablePageMetadata *, std::unique_ptr< TypedSlots >, base::hash< MutablePageMetadata * > > typed_slots_map_
MarkCompactCollector * major_collector_
MinorMarkSweepCollector * minor_collector_
void MarkValueShared(Tagged< HeapObject > value)
static void DeactivateAll(Heap *heap)
static void DeactivateYoung(Heap *heap)
static void ActivateAll(Heap *heap, bool is_compacting)
static void ActivateYoung(Heap *heap)
V8_INLINE bool IsMarked(const Tagged< HeapObject > obj) const
V8_INLINE bool TryMark(Tagged< HeapObject > obj)
void Push(Tagged< HeapObject > object)
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
static void MergeTyped(MutablePageMetadata *page, std::unique_ptr< TypedSlots > other)
static MarkingBarrier * CurrentMarkingBarrier(Tagged< HeapObject > verification_candidate)
std::string extension
NonAtomicMarkingState * marking_state_
kInterpreterTrampolineOffset Tagged< HeapObject >
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
Heap * heap_
#define V8_UNLIKELY(condition)
Definition v8config.h:660