v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
marker.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CPPGC_MARKER_H_
6#define V8_HEAP_CPPGC_MARKER_H_
7
8#include <memory>
9
10#include "include/cppgc/heap.h"
13#include "src/base/macros.h"
25
26namespace cppgc {
27namespace internal {
28
29class HeapBase;
30
31// Marking algorithm. Example for a valid call sequence creating the marking
32// phase:
33// 1. StartMarking()
34// 2. AdvanceMarkingWithLimits() [Optional, depending on environment.]
35// 3. EnterAtomicPause()
36// 4. AdvanceMarkingWithLimits() [Optional]
37// 5. EnterProcessGlobalAtomicPause()
38// 6. AdvanceMarkingWithLimits()
39// 7. LeaveAtomicPause()
40//
41// Alternatively, FinishMarking() combines steps 3.-7.
42//
43// The marker protects cross-thread roots from being created between 5.-7. This
44// currently requires entering a process-global atomic pause.
46 public:
48
49 enum class WriteBarrierType {
50 kDijkstra,
51 kSteele,
52 };
53
54 // Pauses concurrent marking if running while this scope is active.
56 public:
59
60 private:
62 const bool resume_on_exit_;
63 };
64
65 virtual ~MarkerBase();
66
67 MarkerBase(const MarkerBase&) = delete;
68 MarkerBase& operator=(const MarkerBase&) = delete;
69
70 template <typename Class>
71 Class& To() {
72 return *static_cast<Class*>(this);
73 }
74
75 // Signals entering the atomic marking pause. The method
76 // - stops incremental/concurrent marking;
77 // - flushes back any in-construction worklists if needed;
78 // - Updates the MarkingConfig if the stack state has changed;
79 // - marks local roots
80 void EnterAtomicPause(StackState);
81
82 // Enters the process-global pause. The phase marks cross-thread roots and
83 // acquires a lock that prevents any cross-thread references from being
84 // created.
85 //
86 // The phase is ended with `LeaveAtomicPause()`.
87 void EnterProcessGlobalAtomicPause();
88
89 // Re-enable concurrent marking assuming it isn't enabled yet in GC cycle.
90 void ReEnableConcurrentMarking();
91
92 // Makes marking progress. A `marked_bytes_limit` of 0 means that the limit
93 // is determined by the internal marking scheduler.
94 //
95 // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no
96 // longer uses it.
97 bool AdvanceMarkingWithLimits(
98 v8::base::TimeDelta = kMaximumIncrementalStepDuration,
99 size_t marked_bytes_limit = 0);
100
101 // Returns the size of the bytes marked in the last invocation of
102 // `AdvanceMarkingWithLimits()`.
103 size_t last_bytes_marked() const { return last_bytes_marked_; }
104
105 // Signals leaving the atomic marking pause. This method expects no more
106 // objects to be marked and merely updates marking states if needed.
107 void LeaveAtomicPause();
108
109 // Initialize marking according to the given config. This method will
110 // trigger incremental/concurrent marking if needed.
111 void StartMarking();
112
113 // Combines:
114 // - EnterAtomicPause()
115 // - EnterProcessGlobalAtomicPause()
116 // - AdvanceMarkingWithLimits()
117 // - ProcessWeakness()
118 // - LeaveAtomicPause()
119 void FinishMarking(StackState);
120
121 void ProcessCrossThreadWeaknessIfNeeded();
122 void ProcessWeakness();
123
124 bool JoinConcurrentMarkingIfNeeded();
125 void NotifyConcurrentMarkingOfWorkIfNeeded(cppgc::TaskPriority);
126
127 template <WriteBarrierType type>
128 inline void WriteBarrierForObject(HeapObjectHeader&);
129
130 HeapBase& heap() { return heap_; }
131
132 cppgc::Visitor& Visitor() { return visitor(); }
133
134 bool IsMarking() const { return is_marking_; }
135
136 void SetMainThreadMarkingDisabledForTesting(bool);
137 void WaitForConcurrentMarkingForTesting();
138 void ClearAllWorklistsForTesting();
139 bool IncrementalMarkingStepForTesting(StackState);
140
143 return mutator_marking_state_;
144 }
145
146 protected:
149 public:
150 static constexpr size_t kMinAllocatedBytesPerStep = 256 * kKB;
151
153
154 void AllocatedObjectSizeIncreased(size_t delta) final;
155
156 private:
158 size_t current_allocated_size_ = 0;
159 };
160
162
163 static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
165
167
168 virtual cppgc::Visitor& visitor() = 0;
173
174 // Processes the worklists with given deadlines. The deadlines are only
175 // checked every few objects.
176 // - `marked_bytes_deadline`: Only process this many bytes. Ignored for
177 // processing concurrent bailout objects.
178 // - `time_deadline`: Time deadline that is always respected.
179 bool ProcessWorklistsWithDeadline(size_t marked_bytes_deadline,
180 v8::base::TimeTicks time_deadline);
181 void AdvanceMarkingWithLimitsEpilogue();
182
183 void VisitLocalRoots(StackState);
184 void VisitCrossThreadRoots();
185
186 void MarkNotFullyConstructedObjects();
187
188 virtual void ScheduleIncrementalMarkingTask();
189
190 bool IncrementalMarkingStep(StackState);
191
192 void AdvanceMarkingOnAllocation();
193 virtual void AdvanceMarkingOnAllocationImpl();
194
195 void HandleNotFullyConstructedObjects();
196 void MarkStrongCrossThreadRoots();
197
199 MarkingConfig config_ = MarkingConfig::Default();
201 std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
206 size_t last_bytes_marked_ = 0;
207 bool is_marking_{false};
208 bool main_marking_disabled_for_testing_{false};
209 bool visited_cross_thread_persistents_in_atomic_pause_{false};
210 bool processed_cross_thread_weakness_{false};
211};
212
213class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
214 public:
215 Marker(HeapBase&, cppgc::Platform*, MarkingConfig = MarkingConfig::Default());
216
217 protected:
219
221 return conservative_marking_visitor_;
222 }
223
225 return conservative_marking_visitor_;
226 }
227
229
231 return *schedule_.get();
232 }
233
234 private:
237 std::unique_ptr<heap::base::IncrementalMarkingSchedule> schedule_;
239};
240
241template <MarkerBase::WriteBarrierType type>
243 // The barrier optimizes for the bailout cases:
244 // - kDijkstra: Marked objects.
245 // - kSteele: Unmarked objects.
246 switch (type) {
248 if (!header.TryMarkAtomic()) {
249 return;
250 }
251 break;
253 if (!header.IsMarked<AccessMode::kAtomic>()) {
254 return;
255 }
256 break;
257 }
258
259 // The barrier fired. Filter out in-construction objects here. This possibly
260 // requires unmarking the object again.
262 // In construction objects are traced only if they are unmarked. If marking
263 // reaches this object again when it is fully constructed, it will re-mark
264 // it and tracing it as a previously not fully constructed object would know
265 // to bail out.
266 header.Unmark<AccessMode::kAtomic>();
268 .Push<AccessMode::kAtomic>(&header);
269 return;
270 }
271
272 switch (type) {
275 break;
278 break;
279 }
280}
281
282} // namespace internal
283} // namespace cppgc
284
285#endif // V8_HEAP_CPPGC_MARKER_H_
MarkingWorklists::WriteBarrierWorklist::Local & write_barrier_worklist()
void WriteBarrierForObject(HeapObjectHeader &)
Definition marker.h:242
MarkerBase & operator=(const MarkerBase &)=delete
MarkerBase(const MarkerBase &)=delete
std::shared_ptr< cppgc::TaskRunner > foreground_task_runner_
Definition marker.h:201
virtual ConservativeTracingVisitor & conservative_visitor()=0
virtual heap::base::IncrementalMarkingSchedule & schedule()=0
IncrementalMarkingTaskHandle incremental_marking_handle_
Definition marker.h:202
MutatorMarkingState & MutatorMarkingStateForTesting()
Definition marker.h:142
virtual cppgc::Visitor & visitor()=0
MutatorMarkingState mutator_marking_state_
Definition marker.h:205
size_t last_bytes_marked() const
Definition marker.h:103
virtual ConcurrentMarkerBase & concurrent_marker()=0
MarkingWorklists & MarkingWorklistsForTesting()
Definition marker.h:141
cppgc::Platform * platform_
Definition marker.h:200
virtual heap::base::StackVisitor & stack_visitor()=0
cppgc::Visitor & Visitor()
Definition marker.h:132
MarkingWorklists marking_worklists_
Definition marker.h:204
IncrementalMarkingAllocationObserver incremental_marking_allocation_observer_
Definition marker.h:203
ConcurrentMarkerBase & concurrent_marker() final
Definition marker.h:228
ConservativeTracingVisitor & conservative_visitor() final
Definition marker.h:220
cppgc::Visitor & visitor() final
Definition marker.h:218
heap::base::IncrementalMarkingSchedule & schedule() final
Definition marker.h:230
std::unique_ptr< heap::base::IncrementalMarkingSchedule > schedule_
Definition marker.h:237
heap::base::StackVisitor & stack_visitor() final
Definition marker.h:224
MutatorMarkingVisitor marking_visitor_
Definition marker.h:235
ConcurrentMarker concurrent_marker_
Definition marker.h:238
ConservativeMarkingVisitor conservative_marking_visitor_
Definition marker.h:236
MarkingWorklists::NotFullyConstructedWorklist & not_fully_constructed_worklist()
MarkingWorklists::RetraceMarkedObjectsWorklist::Local & retrace_marked_objects_worklist()
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
ConcurrentMarkerBase & concurrent_marker_
std::unique_ptr< ConservativeTracedHandlesMarkingVisitor > marking_visitor_
Definition cpp-heap.cc:271
Schedule const *const schedule_
constexpr size_t kKB
Definition globals.h:20
EmbedderStackState
Definition common.h:15
TaskPriority
Definition v8-platform.h:24
MarkingWorklists marking_worklists_
#define V8_EXPORT_PRIVATE
Definition macros.h:460
Heap * heap_
#define V8_UNLIKELY(condition)
Definition v8config.h:660