v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-concurrent-dispatcher.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include "src/base/fpu.h"
13#include "src/flags/flags.h"
26
27namespace v8 {
28namespace internal {
29
30namespace compiler {
31
40
45 std::unique_ptr<PersistentHandles> ph =
47 local_isolate_ = nullptr;
48 info->set_persistent_handles(std::move(ph));
49}
50
51} // namespace compiler
52
53namespace maglev {
54
55namespace {
56
57constexpr char kMaglevCompilerName[] = "Maglev";
58
59// LocalIsolateScope encapsulates the phase where persistent handles are
60// attached to the LocalHeap inside {local_isolate}.
61class V8_NODISCARD LocalIsolateScope final {
62 public:
63 explicit LocalIsolateScope(MaglevCompilationInfo* info,
64 LocalIsolate* local_isolate)
65 : info_(info) {
66 info_->broker()->AttachLocalIsolateForMaglev(info_, local_isolate);
67 }
68
69 ~LocalIsolateScope() { info_->broker()->DetachLocalIsolateForMaglev(info_); }
70
71 private:
72 MaglevCompilationInfo* const info_;
73};
74
75} // namespace
76
77Zone* ExportedMaglevCompilationInfo::zone() const { return info_->zone(); }
78
79void ExportedMaglevCompilationInfo::set_canonical_handles(
80 std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
81 info_->set_canonical_handles(std::move(canonical_handles));
82}
83
84// static
85std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
86 Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset) {
87 auto info = maglev::MaglevCompilationInfo::New(isolate, function, osr_offset);
88 return std::unique_ptr<MaglevCompilationJob>(
89 new MaglevCompilationJob(isolate, std::move(info)));
90}
91
92namespace {
93
94MaglevPipelineStatistics* CreatePipelineStatistics(
95 Isolate* isolate, MaglevCompilationInfo* compilation_info,
96 compiler::ZoneStats* zone_stats) {
97 MaglevPipelineStatistics* pipeline_stats = nullptr;
98 bool tracing_enabled;
100 &tracing_enabled);
101 if (tracing_enabled || v8_flags.maglev_stats || v8_flags.maglev_stats_nvp) {
102 pipeline_stats = new MaglevPipelineStatistics(
103 compilation_info, isolate->GetMaglevStatistics(), zone_stats);
104 }
105 return pipeline_stats;
106}
107
108} // namespace
109
110MaglevCompilationJob::MaglevCompilationJob(
111 Isolate* isolate, std::unique_ptr<MaglevCompilationInfo>&& info)
112 : OptimizedCompilationJob(kMaglevCompilerName, State::kReadyToPrepare),
113 info_(std::move(info)),
114 zone_stats_(isolate->allocator()),
115 pipeline_statistics_(
116 CreatePipelineStatistics(isolate, info_.get(), &zone_stats_)) {
117 DCHECK(maglev::IsMaglevEnabled());
118}
119
120MaglevCompilationJob::~MaglevCompilationJob() = default;
121
122CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
123 BeginPhaseKind("V8.MaglevPrepareJob");
124 if (info()->collect_source_positions()) {
125 SharedFunctionInfo::EnsureSourcePositionsAvailable(
126 isolate,
127 info()->toplevel_compilation_unit()->shared_function_info().object());
128 }
129 EndPhaseKind();
130 // TODO(v8:7700): Actual return codes.
131 return CompilationJob::SUCCEEDED;
132}
133
134CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
135 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
136 BeginPhaseKind("V8.MaglevExecuteJob");
137 LocalIsolateScope scope{info(), local_isolate};
138 if (!maglev::MaglevCompiler::Compile(local_isolate, info())) {
139 EndPhaseKind();
140 bailout_reason_ = BailoutReason::kGraphBuildingFailed;
141 return CompilationJob::FAILED;
142 }
143 EndPhaseKind();
144 // TODO(v8:7700): Actual return codes.
145 return CompilationJob::SUCCEEDED;
146}
147
148CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
149 BeginPhaseKind("V8.MaglevFinalizeJob");
150 Handle<Code> code;
151 auto [maybe_code, bailout_reason] =
152 maglev::MaglevCompiler::GenerateCode(isolate, info());
153 if (!maybe_code.ToHandle(&code)) {
154 EndPhaseKind();
155 bailout_reason_ = bailout_reason;
156 return CompilationJob::FAILED;
157 }
158 // Functions with many inline candidates are sensitive to correct call
159 // frequency feedback and should therefore not be tiered up early.
160 if (v8_flags.profile_guided_optimization &&
161 info()->could_not_inline_all_candidates() &&
162 info()->toplevel_function()->shared()->cached_tiering_decision() !=
163 CachedTieringDecision::kDelayMaglev) {
164 info()->toplevel_function()->shared()->set_cached_tiering_decision(
165 CachedTieringDecision::kNormal);
166 }
167 info()->set_code(code);
168 GlobalHandleVector<Map> maps = CollectRetainedMaps(isolate, code);
169 RegisterWeakObjectsInOptimizedCode(
170 isolate, info()->broker()->target_native_context().object(), code,
171 std::move(maps));
172 EndPhaseKind();
173 return CompilationJob::SUCCEEDED;
174}
175
176GlobalHandleVector<Map> MaglevCompilationJob::CollectRetainedMaps(
177 Isolate* isolate, DirectHandle<Code> code) {
178 if (v8_flags.maglev_build_code_on_background) {
179 return info()->code_generator()->RetainedMaps(isolate);
180 }
181 return OptimizedCompilationJob::CollectRetainedMaps(isolate, code);
182}
183
184void MaglevCompilationJob::DisposeOnMainThread(Isolate* isolate) {
185 // Drop canonical handles on the main thread, to avoid (in the case of
186 // background job destruction) needing to unpark the local isolate on the
187 // background thread for unregistering the identity map's strong roots.
188 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
189 info()->DetachCanonicalHandles()->Clear();
190}
191
192MaybeIndirectHandle<Code> MaglevCompilationJob::code() const {
193 return info_->get_code();
194}
195
196IndirectHandle<JSFunction> MaglevCompilationJob::function() const {
197 return info_->toplevel_function();
198}
199
200BytecodeOffset MaglevCompilationJob::osr_offset() const {
201 return info_->toplevel_osr_offset();
202}
203
204bool MaglevCompilationJob::is_osr() const { return info_->toplevel_is_osr(); }
205
206bool MaglevCompilationJob::specialize_to_function_context() const {
207 return info_->specialize_to_function_context();
208}
209
210void MaglevCompilationJob::RecordCompilationStats(Isolate* isolate) const {
211 // Don't record samples from machines without high-resolution timers,
212 // as that can cause serious reporting issues. See the thread at
213 // http://g/chrome-metrics-team/NwwJEyL8odU/discussion for more details.
214 if (base::TimeTicks::IsHighResolution()) {
215 Counters* const counters = isolate->counters();
216 counters->maglev_optimize_prepare()->AddSample(
217 static_cast<int>(time_taken_to_prepare_.InMicroseconds()));
218 counters->maglev_optimize_execute()->AddSample(
219 static_cast<int>(time_taken_to_execute_.InMicroseconds()));
220 counters->maglev_optimize_finalize()->AddSample(
221 static_cast<int>(time_taken_to_finalize_.InMicroseconds()));
222 counters->maglev_optimize_total_time()->AddSample(
223 static_cast<int>(ElapsedTime().InMicroseconds()));
224 }
225 if (v8_flags.trace_opt_stats) {
226 static double compilation_time = 0.0;
227 static int compiled_functions = 0;
228 static int code_size = 0;
229
230 compilation_time += (time_taken_to_prepare_.InMillisecondsF() +
231 time_taken_to_execute_.InMillisecondsF() +
232 time_taken_to_finalize_.InMillisecondsF());
233 compiled_functions++;
234 code_size += function()->shared()->SourceSize();
235 PrintF(
236 "[maglev] Compiled: %d functions with %d byte source size in %fms.\n",
237 compiled_functions, code_size, compilation_time);
238 }
239}
240
241uint64_t MaglevCompilationJob::trace_id() const {
242 // Xor together the this pointer, the info pointer, and the top level
243 // function's Handle address, to try to make the id more unique on platforms
244 // where just the `this` pointer is likely to be reused.
245 return reinterpret_cast<uint64_t>(this) ^
246 reinterpret_cast<uint64_t>(info_.get()) ^
247 info_->toplevel_function().address() ^
248 info_->toplevel_function()->shared()->function_literal_id();
249}
250
251void MaglevCompilationJob::BeginPhaseKind(const char* name) {
252 if (V8_UNLIKELY(pipeline_statistics_ != nullptr)) {
253 pipeline_statistics_->BeginPhaseKind(name);
254 }
255}
256
257void MaglevCompilationJob::EndPhaseKind() {
258 if (V8_UNLIKELY(pipeline_statistics_ != nullptr)) {
259 pipeline_statistics_->EndPhaseKind();
260 }
261}
262
263// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
264// processing the incoming queue on a worker thread.
266 public:
267 explicit JobTask(MaglevConcurrentDispatcher* dispatcher)
268 : dispatcher_(dispatcher) {}
269
270 void Run(JobDelegate* delegate) override {
271 if (incoming_queue()->IsEmpty() && destruction_queue()->IsEmpty()) {
272 return;
273 }
274 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.MaglevTask");
275 base::FlushDenormalsScope flush_denormals_scope(
276 isolate()->flush_denormals());
277 LocalIsolate local_isolate(isolate(), ThreadKind::kBackground);
278 DCHECK(local_isolate.heap()->IsParked());
279
280 std::unique_ptr<MaglevCompilationJob> job_to_destruct;
281 while (!delegate->ShouldYield()) {
282 std::unique_ptr<MaglevCompilationJob> job;
283 if (incoming_queue()->Dequeue(&job)) {
284 DCHECK_NOT_NULL(job);
286 TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.MaglevBackground",
287 job->trace_id(),
289 RCS_SCOPE(&local_isolate,
290 RuntimeCallCounterId::kOptimizeBackgroundMaglev);
292 job->ExecuteJob(local_isolate.runtime_call_stats(), &local_isolate);
293 if (status == CompilationJob::SUCCEEDED) {
294 outgoing_queue()->Enqueue(std::move(job));
295 isolate()->stack_guard()->RequestInstallMaglevCode();
296 }
297 } else if (destruction_queue()->Dequeue(&job)) {
298 // Maglev jobs aren't cheap to destruct, so destroy them here in the
299 // background thread rather than on the main thread.
300 DCHECK_NOT_NULL(job);
302 "V8.MaglevDestructBackground", job->trace_id(),
304 UnparkedScope unparked_scope(&local_isolate);
305 job.reset();
306 } else {
307 break;
308 }
309 }
310 }
311
312 size_t GetMaxConcurrency(size_t worker_count) const override {
313 size_t num_tasks =
314 incoming_queue()->size() + destruction_queue()->size() + worker_count;
315 size_t max_threads = v8_flags.concurrent_maglev_max_threads;
316 if (max_threads > 0) {
317 return std::min(max_threads, num_tasks);
318 }
319 return num_tasks;
320 }
321
322 private:
323 Isolate* isolate() const { return dispatcher_->isolate_; }
324 QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; }
325 QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; }
326 QueueT* destruction_queue() const { return &dispatcher_->destruction_queue_; }
327
328 MaglevConcurrentDispatcher* const dispatcher_;
329};
330
331MaglevConcurrentDispatcher::MaglevConcurrentDispatcher(Isolate* isolate)
332 : isolate_(isolate) {
333 bool enable = v8_flags.concurrent_recompilation && maglev::IsMaglevEnabled();
334 if (enable) {
335 bool is_tracing =
336 v8_flags.print_maglev_code || v8_flags.trace_maglev_graph_building ||
337 v8_flags.trace_maglev_inlining || v8_flags.print_maglev_deopt_verbose ||
338 v8_flags.print_maglev_graph || v8_flags.print_maglev_graphs ||
339 v8_flags.trace_maglev_phi_untagging || v8_flags.trace_maglev_regalloc;
340
341 if (is_tracing) {
342 PrintF("Concurrent maglev has been disabled for tracing.\n");
343 enable = false;
344 }
345 }
346 if (enable) {
347 TaskPriority priority = v8_flags.concurrent_maglev_high_priority_threads
350 job_handle_ = V8::GetCurrentPlatform()->PostJob(
351 priority, std::make_unique<JobTask>(this));
352 DCHECK(is_enabled());
353 } else {
354 DCHECK(!is_enabled());
355 }
356}
357
358MaglevConcurrentDispatcher::~MaglevConcurrentDispatcher() {
359 if (is_enabled() && job_handle_->IsValid()) {
360 // Wait for the job handle to complete, so that we know the queue
361 // pointers are safe.
362 job_handle_->Cancel();
363 }
364}
365
366void MaglevConcurrentDispatcher::EnqueueJob(
367 std::unique_ptr<MaglevCompilationJob>&& job) {
368 DCHECK(is_enabled());
369 incoming_queue_.Enqueue(std::move(job));
370 job_handle_->NotifyConcurrencyIncrease();
371}
372
373void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
374 HandleScope handle_scope(isolate_);
375 while (!outgoing_queue_.IsEmpty()) {
376 std::unique_ptr<MaglevCompilationJob> job;
377 outgoing_queue_.Dequeue(&job);
379 TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.MaglevConcurrentFinalize",
382 RuntimeCallCounterId::kOptimizeConcurrentFinalizeMaglev);
384 job->DisposeOnMainThread(isolate_);
385 if (v8_flags.maglev_destroy_on_background) {
386 // Maglev jobs aren't cheap to destruct, so re-enqueue them for
387 // destruction on a background thread.
388 destruction_queue_.Enqueue(std::move(job));
389 job_handle_->NotifyConcurrencyIncrease();
390 } else {
392 "V8.MaglevDestruct", job->trace_id(),
394 job.reset();
395 }
396 }
397}
398
399void MaglevConcurrentDispatcher::AwaitCompileJobs() {
400 // Use Join to wait until there are no more queued or running jobs.
401 {
402 AllowGarbageCollection allow_before_parking;
403 isolate_->main_thread_local_isolate()->ExecuteMainThreadWhileParked(
404 [this]() { job_handle_->Join(); });
405 }
406 // Join kills the job handle, so drop it and post a new one.
407 TaskPriority priority = v8_flags.concurrent_maglev_high_priority_threads
410 job_handle_ = V8::GetCurrentPlatform()->PostJob(
411 priority, std::make_unique<JobTask>(this));
412 DCHECK(incoming_queue_.IsEmpty());
413}
414
415void MaglevConcurrentDispatcher::Flush(BlockingBehavior behavior) {
416 while (!incoming_queue_.IsEmpty()) {
417 std::unique_ptr<MaglevCompilationJob> job;
418 if (incoming_queue_.Dequeue(&job)) {
420 }
421 }
422 while (!destruction_queue_.IsEmpty()) {
423 std::unique_ptr<MaglevCompilationJob> job;
424 destruction_queue_.Dequeue(&job);
425 }
426 if (behavior == BlockingBehavior::kBlock && job_handle_->IsValid()) {
427 AwaitCompileJobs();
428 }
429 while (!outgoing_queue_.IsEmpty()) {
430 std::unique_ptr<MaglevCompilationJob> job;
431 if (outgoing_queue_.Dequeue(&job)) {
433 }
434 }
435}
436
437} // namespace maglev
438} // namespace internal
439} // namespace v8
Isolate * isolate_
friend Zone
Definition asm-types.cc:195
virtual bool ShouldYield()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
static void FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob *job, Isolate *isolate)
Definition compiler.cc:4485
static void DisposeMaglevCompilationJob(maglev::MaglevCompilationJob *job, Isolate *isolate)
Definition compiler.cc:4476
std::unique_ptr< PersistentHandles > DetachPersistentHandles()
void AttachPersistentHandles(std::unique_ptr< PersistentHandles > persistent_handles)
RuntimeCallStats * runtime_call_stats() const
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
void AttachLocalIsolateForMaglev(maglev::MaglevCompilationInfo *info, LocalIsolate *local_isolate)
std::unique_ptr< PersistentHandles > ph_
void DetachLocalIsolateForMaglev(maglev::MaglevCompilationInfo *info)
static std::unique_ptr< MaglevCompilationInfo > New(Isolate *isolate, IndirectHandle< JSFunction > function, BytecodeOffset osr_offset)
Handle< Code > code
Handle< SharedFunctionInfo > info
Isolate * isolate
JSHeapBroker * broker
SharedFunctionInfoRef shared
LiftoffBailoutReason bailout_reason_
MaglevCompilationInfo *const info_
size_t priority
STL namespace.
static bool IsMaglevEnabled()
Definition compiler.h:57
void PrintF(const char *format,...)
Definition utils.cc:39
PerThreadAssertScopeDebugOnly< true, SAFEPOINTS_ASSERT, HEAP_ALLOCATION_ASSERT > AllowGarbageCollection
V8_EXPORT_PRIVATE FlagValues v8_flags
Local< T > Handle
TaskPriority
Definition v8-platform.h:24
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
#define RCS_SCOPE(...)
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags)
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693