v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
concurrent-marker.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
13
14namespace cppgc {
15namespace internal {
16
17namespace {
18
19static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
20 0.5;
21
22static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
23
24template <StatsCollector::ConcurrentScopeId scope_id,
25 size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
26 typename WorklistLocal, typename Callback>
27bool DrainWorklistWithYielding(JobDelegate* job_delegate,
28 StatsCollector* stats_collector,
29 ConcurrentMarkingState& marking_state,
30 ConcurrentMarkerBase& concurrent_marker,
31 WorklistLocal& worklist_local,
32 Callback callback) {
34 [&concurrent_marker, &marking_state, job_delegate]() {
35 concurrent_marker.AddConcurrentlyMarkedBytes(
36 marking_state.RecentlyMarkedBytes());
37 return job_delegate->ShouldYield();
38 },
39 [stats_collector]() {
40 return StatsCollector::DisabledConcurrentScope(stats_collector,
41 scope_id);
42 },
43 worklist_local, callback);
44}
45
46size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
47 return marking_worklists.marking_worklist()->Size() +
48 marking_worklists.write_barrier_worklist()->Size() +
49 marking_worklists.previously_not_fully_constructed_worklist()->Size();
50}
51
52// Checks whether worklists' global pools hold any segment a concurrent marker
53// can steal. This is called before the concurrent marker holds any Locals, so
54// no need to check local segments.
55bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
56 return !marking_worklists.marking_worklist()->IsEmpty() ||
57 !marking_worklists.write_barrier_worklist()->IsEmpty() ||
58 !marking_worklists.previously_not_fully_constructed_worklist()
59 ->IsEmpty();
60}
61
62class ConcurrentMarkingTask final : public v8::JobTask {
63 public:
64 explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
65
66 void Run(JobDelegate* delegate) final;
67
68 size_t GetMaxConcurrency(size_t) const final;
69
70 private:
71 void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
72
73 ConcurrentMarkerBase& concurrent_marker_;
74};
75
76ConcurrentMarkingTask::ConcurrentMarkingTask(
77 ConcurrentMarkerBase& concurrent_marker)
78 : concurrent_marker_(concurrent_marker) {}
79
80void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
81 StatsCollector::EnabledConcurrentScope stats_scope(
82 concurrent_marker_.heap().stats_collector(),
83 StatsCollector::kConcurrentMark);
84 if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
85 return;
86 ConcurrentMarkingState concurrent_marking_state(
87 concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
88 concurrent_marker_.heap().compactor().compaction_worklists());
89 std::unique_ptr<Visitor> concurrent_marking_visitor =
90 concurrent_marker_.CreateConcurrentMarkingVisitor(
91 concurrent_marking_state);
92 ProcessWorklists(job_delegate, concurrent_marking_state,
93 *concurrent_marking_visitor);
94 concurrent_marker_.AddConcurrentlyMarkedBytes(
95 concurrent_marking_state.RecentlyMarkedBytes());
96 concurrent_marking_state.Publish();
97}
98
99size_t ConcurrentMarkingTask::GetMaxConcurrency(
100 size_t current_worker_count) const {
101 return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
102 current_worker_count;
103}
104
105void ConcurrentMarkingTask::ProcessWorklists(
106 JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
107 Visitor& concurrent_marking_visitor) {
108 StatsCollector* stats_collector = concurrent_marker_.heap().stats_collector();
109 do {
110 if (!DrainWorklistWithYielding<
111 StatsCollector::kConcurrentMarkProcessNotFullyconstructedWorklist>(
112 job_delegate, stats_collector, concurrent_marking_state,
114 concurrent_marking_state
115 .previously_not_fully_constructed_worklist(),
116 [&concurrent_marking_state,
117 &concurrent_marking_visitor](HeapObjectHeader* header) {
118 BasePage::FromPayload(header)->SynchronizedLoad();
119 concurrent_marking_state.AccountMarkedBytes(*header);
120 DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
121 concurrent_marking_visitor, *header);
122 })) {
123 return;
124 }
125 if (!DrainWorklistWithYielding<
126 StatsCollector::kConcurrentMarkProcessMarkingWorklist>(
127 job_delegate, stats_collector, concurrent_marking_state,
128 concurrent_marker_, concurrent_marking_state.marking_worklist(),
129 [&concurrent_marking_state, &concurrent_marking_visitor](
130 const MarkingWorklists::MarkingItem& item) {
131 BasePage::FromPayload(item.base_object_payload)
132 ->SynchronizedLoad();
133 const HeapObjectHeader& header =
134 HeapObjectHeader::FromObject(item.base_object_payload);
135 DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
136 DCHECK(header.IsMarked<AccessMode::kAtomic>());
137 concurrent_marking_state.AccountMarkedBytes(header);
138 item.callback(&concurrent_marking_visitor,
139 item.base_object_payload);
140 })) {
141 return;
142 }
143 if (!DrainWorklistWithYielding<
144 StatsCollector::kConcurrentMarkProcessWriteBarrierWorklist>(
145 job_delegate, stats_collector, concurrent_marking_state,
147 concurrent_marking_state.write_barrier_worklist(),
148 [&concurrent_marking_state,
149 &concurrent_marking_visitor](HeapObjectHeader* header) {
150 BasePage::FromPayload(header)->SynchronizedLoad();
151 concurrent_marking_state.AccountMarkedBytes(*header);
152 DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
153 concurrent_marking_visitor, *header);
154 })) {
155 return;
156 }
157 if (!DrainWorklistWithYielding<
158 StatsCollector::kConcurrentMarkProcessEphemeronWorklist>(
159 job_delegate, stats_collector, concurrent_marking_state,
161 concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
162 [&concurrent_marking_state, &concurrent_marking_visitor](
163 const MarkingWorklists::EphemeronPairItem& item) {
164 concurrent_marking_state.ProcessEphemeron(
165 item.key, item.value, item.value_desc,
166 concurrent_marking_visitor);
167 })) {
168 return;
169 }
170 } while (
171 !concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
172}
173
174} // namespace
175
176ConcurrentMarkerBase::ConcurrentMarkerBase(
177 HeapBase& heap, MarkingWorklists& marking_worklists,
178 heap::base::IncrementalMarkingSchedule& incremental_marking_schedule,
179 cppgc::Platform* platform)
180 : heap_(heap),
181 marking_worklists_(marking_worklists),
182 incremental_marking_schedule_(incremental_marking_schedule),
183 platform_(platform) {}
184
192
195 return false;
196
198 return true;
199}
200
203 return false;
204
206 return true;
207}
208
212
214 concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
216}
217
222
225 if (HasWorkForConcurrentMarking(marking_worklists_)) {
226 // Notifies the scheduler that max concurrency might have increased.
227 // This will adjust the number of markers if necessary.
229 concurrent_marking_handle_->NotifyConcurrencyIncrease();
230 }
231}
232
234 if (HasWorkForConcurrentMarking(marking_worklists_)) {
235 concurrent_marking_handle_->UpdatePriority(priority);
236 concurrent_marking_handle_->NotifyConcurrencyIncrease();
237 }
238}
239
241 if (!concurrent_marking_handle_->UpdatePriorityEnabled() ||
243 return;
244 }
245 // If concurrent tasks aren't executed, it might delay GC finalization. As
246 // long as GC is active so is the write barrier, which incurs a performance
247 // cost. Marking is estimated to take overall
248 // |MarkingSchedulingOracle::kEstimatedMarkingTime|. If concurrent marking
249 // tasks have not reported any progress (i.e. the concurrently marked bytes
250 // count as not changed) in over
251 // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of that expected
252 // duration, we increase the concurrent task priority for the duration of the
253 // current GC. This is meant to prevent the GC from exceeding it's expected
254 // end time.
255 const auto time_delta =
257 if (!time_delta.IsZero() &&
258 (time_delta.InMillisecondsF() >
260 .InMillisecondsF() *
261 kMarkingScheduleRatioBeforeConcurrentPriorityIncrease))) {
262 concurrent_marking_handle_->UpdatePriority(
263 cppgc::TaskPriority::kUserBlocking);
265 }
266}
267
269 ConcurrentMarkingState& marking_state) const {
270 return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
271}
272
273} // namespace internal
274} // namespace cppgc
virtual std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task)
Definition platform.h:126
heap::base::IncrementalMarkingSchedule & incremental_marking_schedule_
void AddConcurrentlyMarkedBytes(size_t marked_bytes)
std::atomic< size_t > concurrently_marked_bytes_
std::unique_ptr< JobHandle > concurrent_marking_handle_
void NotifyOfWorkIfNeeded(cppgc::TaskPriority priority)
heap::base::IncrementalMarkingSchedule & incremental_marking_schedule() const
std::unique_ptr< Visitor > CreateConcurrentMarkingVisitor(ConcurrentMarkingState &) const final
InternalScope< kDisabled, kConcurrentThread > DisabledConcurrentScope
static constexpr v8::base::TimeDelta kEstimatedMarkingTime
double InMillisecondsF() const
Definition time.cc:226
ConcurrentMarkerBase & concurrent_marker_
v8::Platform * platform_
Definition cpp-heap.cc:193
TNode< Object > callback
size_t priority
bool DrainWorklistWithPredicate(Predicate ShouldYield, CreateStatsScopeCallback CreateStatsScope, WorklistLocal &worklist_local, ProcessWorklistItemCallback ProcessWorklistItem)
v8::JobDelegate JobDelegate
Definition platform.h:20
TaskPriority
Definition v8-platform.h:24
MarkingWorklists marking_worklists_
#define CHECK_IMPLIES(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
Heap * heap_