v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
optimizing-compile-dispatcher.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8#include "src/base/fpu.h"
9#include "src/base/logging.h"
11#include "src/base/vector.h"
18#include "src/init/v8.h"
20#include "src/logging/log.h"
25
26namespace v8 {
27namespace internal {
28
30 public:
32 : task_executor_(task_executor) {}
33
34 void Run(JobDelegate* delegate) override {
35 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.TurbofanTask");
36 DCHECK_LT(delegate->GetTaskId(), task_executor_->task_states_.size());
37 OptimizingCompileTaskState& task_state =
39 bool should_yield = delegate->ShouldYield();
40
41 while (!should_yield) {
42 // NextInput() sets the isolate for task_state to job->isolate() while
43 // holding the lock.
45 if (!job) break;
46
47 Isolate* const isolate = job->isolate();
48
49 {
50 base::FlushDenormalsScope flush_denormals_scope(
51 isolate->flush_denormals());
52
53 // Note that LocalIsolate's lifetime is shorter than the isolate value
54 // in task_state which is only cleared after this LocalIsolate instance
55 // was destroyed.
56 LocalIsolate local_isolate(isolate, ThreadKind::kBackground);
57 DCHECK(local_isolate.heap()->IsParked());
58
59 do {
60 task_executor_->RunCompilationJob(task_state, isolate, local_isolate,
61 job);
62
63 should_yield = delegate->ShouldYield();
64 if (should_yield) break;
65
66 // Reuse the LocalIsolate if the next worklist item has the same
67 // isolate.
69 } while (job);
70 }
71
72 // Reset the isolate in the task state to nullptr. Only do this after the
73 // LocalIsolate was destroyed. This invariant is used by
74 // WaitUntilTasksStoppedForIsolate() to ensure all tasks are stopped for
75 // an isolate.
76 task_executor_->ClearTaskState(task_state);
77 }
78
79 // Here we are allowed to read the isolate without holding a lock because
80 // only this thread here will ever change this field and the main thread
81 // will only ever read it.
82 DCHECK_NULL(task_state.isolate);
83 }
84
85 size_t GetMaxConcurrency(size_t worker_count) const override {
86 size_t num_tasks = task_executor_->input_queue_.Length() + worker_count;
87 return std::min(num_tasks, task_executor_->task_states_.size());
88 }
89
90 private:
92};
93
95 : input_queue_(v8_flags.concurrent_recompilation_queue_length),
96 recompilation_delay_(v8_flags.concurrent_recompilation_delay) {}
97
100
101 if (job_handle_) {
102 DCHECK(job_handle_->IsValid());
103
104 // Wait for the job handle to complete, so that we know the queue
105 // pointers are safe.
106 job_handle_->Cancel();
107 }
108}
109
111 if (is_initialized_) return;
112 is_initialized_ = true;
113
114 if (v8_flags.concurrent_recompilation ||
115 v8_flags.concurrent_builtin_generation) {
116 int max_tasks;
117
118 if (v8_flags.concurrent_turbofan_max_threads == 0) {
120 } else {
121 max_tasks = v8_flags.concurrent_turbofan_max_threads;
122 }
123
127 kTaskPriority, std::make_unique<CompileTask>(this));
128 }
129}
130
135
141
143 OptimizingCompileTaskState& task_state, Isolate* isolate,
144 LocalIsolate& local_isolate, TurbofanCompilationJob* job) {
146 "V8.OptimizeBackground", job->trace_id(),
149
150 if (recompilation_delay_ != 0) {
152 }
153
154 RCS_SCOPE(&local_isolate, RuntimeCallCounterId::kOptimizeBackgroundTurbofan);
155
156 // The function may have already been optimized by OSR. Simply continue.
158 job->ExecuteJob(local_isolate.runtime_call_stats(), &local_isolate);
159 USE(status); // Prevent an unused-variable error.
160
161 // Remove the job first from task_state before adding it to the output queue.
162 // As soon as the job is in the output queue it could be deleted any moment.
163 ResetJob(task_state);
164
165 isolate->optimizing_compile_dispatcher()->QueueFinishedJob(job);
166}
167
170
171 for (auto& task_state : task_states_) {
172 if (task_state.isolate == isolate) {
173 return true;
174 }
175 }
176
177 return false;
178}
179
186
195
197 OptimizingCompileTaskState& task_state) {
199 DCHECK_NOT_NULL(task_state.isolate);
200 DCHECK_NOT_NULL(task_state.job);
201 task_state.job = nullptr;
202}
203
205 std::unique_ptr<TurbofanCompilationJob>& job) {
206 Isolate* isolate = job->isolate();
207 DCHECK_NOT_NULL(isolate);
208
209 if (input_queue_.Enqueue(job)) {
210 if (job_handle_->UpdatePriorityEnabled()) {
211 job_handle_->UpdatePriority(isolate->EfficiencyModeEnabledForTiering()
213 : kTaskPriority);
214 }
215 job_handle_->NotifyConcurrencyIncrease();
216 return true;
217 } else {
218 return false;
219 }
220}
221
223 Isolate* isolate) {
224 // Once we have ensured that no task is working on the given isolate, we also
225 // know that there are no more LocalHeaps for this isolate from CompileTask.
226 // This is because CompileTask::Run() only updates the isolate once the
227 // LocalIsolate/LocalHeap for it was destroyed.
229
230 while (input_queue_.HasJobForIsolate(isolate) ||
231 IsTaskRunningForIsolate(isolate)) {
233 }
234}
235
237 Isolate* isolate) {
240
241 for (auto& task_state : task_states_) {
242 if (task_state.isolate == isolate && task_state.job) {
243 task_state.job->Cancel();
244 }
245 }
246}
247
249 Isolate* isolate, OptimizingCompileTaskExecutor* task_executor)
250 : isolate_(isolate), task_executor_(task_executor) {}
251
255
262
264 for (;;) {
265 std::unique_ptr<TurbofanCompilationJob> job = output_queue_.Dequeue();
266 if (!job) break;
268 }
269}
270
277
281
288
298
300 HandleScope handle_scope(isolate_);
301 FlushQueues(blocking_behavior);
302 if (v8_flags.trace_concurrent_recompilation) {
303 PrintF(" ** Flushed concurrent recompilation queues. (mode: %s)\n",
304 (blocking_behavior == BlockingBehavior::kBlock) ? "blocking"
305 : "non blocking");
306 }
307}
308
315
317 HandleScope handle_scope(isolate_);
318
319 for (;;) {
320 std::unique_ptr<TurbofanCompilationJob> job = output_queue_.Dequeue();
321 if (!job) break;
322
323 OptimizedCompilationInfo* info = job->compilation_info();
324 DirectHandle<JSFunction> function(*info->closure(), isolate_);
325
326 // If another racing task has already finished compiling and installing the
327 // requested code kind on the function, throw out the current job.
328 if (!info->is_osr() &&
329 function->HasAvailableCodeKind(isolate_, info->code_kind())) {
330 if (v8_flags.trace_concurrent_recompilation) {
331 PrintF(" ** Aborting compilation for ");
332 ShortPrint(*function);
333 PrintF(" as it has already been optimized.\n");
334 }
336 continue;
337 }
338 // Discard code compiled for a discarded native context without
339 // finalization.
340 if (function->native_context()->global_object()->IsDetached()) {
342 continue;
343 }
344
346 }
347}
348
352
361
363 std::unique_ptr<TurbofanCompilationJob>& job) {
365}
366
371
373 Isolate* isolate, Tagged<SharedFunctionInfo> function) {
374 // Ensure that we only run this method on the main thread. This makes sure
375 // that we never dereference handles during a safepoint.
376 DCHECK_EQ(isolate->thread_id(), ThreadId::Current());
377 base::MutexGuard access(&mutex_);
378 auto it =
379 std::find_if(queue_.begin(), queue_.end(),
380 [isolate, function](TurbofanCompilationJob* job) {
381 // Early bailout to avoid dereferencing handles from other
382 // isolates. The other isolate could be in a safepoint/GC
383 // and dereferencing the handle is therefore invalid.
384 if (job->isolate() != isolate) return false;
385 return *job->compilation_info()->shared_info() == function;
386 });
387
388 if (it != queue_.end()) {
389 auto first_for_isolate = std::find_if(
390 queue_.begin(), queue_.end(), [isolate](TurbofanCompilationJob* job) {
391 return job->isolate() == isolate;
392 });
393 DCHECK_NE(first_for_isolate, queue_.end());
394 std::iter_swap(it, first_for_isolate);
395 }
396}
397
399 base::MutexGuard access(&mutex_);
400 std::erase_if(queue_, [isolate](TurbofanCompilationJob* job) {
401 if (job->isolate() != isolate) return false;
403 delete job;
404 return true;
405 });
406}
407
410 return std::find_if(queue_.begin(), queue_.end(),
411 [isolate](TurbofanCompilationJob* job) {
412 return job->isolate() == isolate;
413 }) != queue_.end();
414}
415
417 OptimizingCompileTaskState& task_state) {
418 base::MutexGuard access(&mutex_);
419 DCHECK_NULL(task_state.isolate);
420 if (queue_.empty()) return nullptr;
421 TurbofanCompilationJob* job = queue_.front();
422 queue_.pop_front();
423 DCHECK_NOT_NULL(job);
424 task_state.isolate = job->isolate();
425 task_state.job = job;
426 return job;
427}
428
430 OptimizingCompileTaskState& task_state) {
431 base::MutexGuard access(&mutex_);
432 if (queue_.empty()) return nullptr;
433 TurbofanCompilationJob* job = queue_.front();
434 DCHECK_NOT_NULL(job);
435 if (job->isolate() != task_state.isolate) return nullptr;
436 DCHECK_NULL(task_state.job);
437 task_state.job = job;
438 queue_.pop_front();
439 return job;
440}
441
443 std::unique_ptr<TurbofanCompilationJob>& job) {
444 base::MutexGuard access(&mutex_);
445 if (queue_.size() < capacity_) {
446 queue_.push_back(job.release());
447 return true;
448 } else {
449 return false;
450 }
451}
452
457
458std::unique_ptr<TurbofanCompilationJob>
460 base::MutexGuard guard(&mutex_);
461 if (queue_.empty()) return {};
462 std::unique_ptr<TurbofanCompilationJob> job(queue_.front());
463 queue_.pop_front();
464 return job;
465}
466
467size_t OptimizingCompileOutputQueue::size() const { return queue_.size(); }
468
469bool OptimizingCompileOutputQueue::empty() const { return queue_.empty(); }
470
472 Isolate* isolate, int installed_count) {
473 // Builtin generation needs to be deterministic, meaning heap allocations must
474 // happen in a deterministic order. To ensure determinism with concurrent
475 // compilation, only finalize contiguous builtins in ascending order of their
476 // finalization order, which is set at job creation time.
477
478 CHECK(isolate->IsGeneratingEmbeddedBuiltins());
479
480 base::MutexGuard guard(&mutex_);
481
482 std::sort(queue_.begin(), queue_.end(),
483 [](const TurbofanCompilationJob* job1,
484 const TurbofanCompilationJob* job2) {
485 return job1->FinalizeOrder() < job2->FinalizeOrder();
486 });
487
488 while (!queue_.empty()) {
489 int current = queue_.front()->FinalizeOrder();
490 CHECK_EQ(installed_count, current);
491 std::unique_ptr<TurbofanCompilationJob> job(queue_.front());
492 queue_.pop_front();
493 CHECK_EQ(CompilationJob::SUCCEEDED, job->FinalizeJob(isolate));
494 installed_count = current + 1;
495 }
496 return installed_count;
497}
498
499} // namespace internal
500} // namespace v8
Isolate * isolate_
virtual bool ShouldYield()=0
virtual uint8_t GetTaskId()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
virtual int NumberOfWorkerThreads()=0
V8_INLINE void AssertHeld() const
Definition mutex.h:58
static void Sleep(TimeDelta interval)
static OwnedVector< T > New(size_t size)
Definition vector.h:287
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
static void FinalizeTurbofanCompilationJob(TurbofanCompilationJob *job, Isolate *isolate)
Definition compiler.cc:4410
static void DisposeTurbofanCompilationJob(Isolate *isolate, TurbofanCompilationJob *job)
Definition compiler.cc:4400
LocalIsolate * main_thread_local_isolate()
Definition isolate.h:2183
ThreadId thread_id() const
Definition isolate.h:821
StackGuard * stack_guard()
Definition isolate.h:1198
V8_INLINE void ExecuteMainThreadWhileParked(Callback callback)
RuntimeCallStats * runtime_call_stats() const
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status ExecuteJob(RuntimeCallStats *stats, LocalIsolate *local_isolate=nullptr)
Definition compiler.cc:463
bool TryQueueForOptimization(std::unique_ptr< TurbofanCompilationJob > &job)
void Flush(BlockingBehavior blocking_behavior)
void FlushQueues(BlockingBehavior blocking_behavior)
void Prioritize(Tagged< SharedFunctionInfo > function)
OptimizingCompileDispatcher(Isolate *isolate, OptimizingCompileTaskExecutor *task_executor)
TurbofanCompilationJob * Dequeue(OptimizingCompileTaskState &task_state)
TurbofanCompilationJob * DequeueIfIsolateMatches(OptimizingCompileTaskState &task_state)
bool Enqueue(std::unique_ptr< TurbofanCompilationJob > &job)
std::deque< TurbofanCompilationJob * > queue_
void Prioritize(Isolate *isolate, Tagged< SharedFunctionInfo > function)
int InstallGeneratedBuiltins(Isolate *isolate, int installed_count)
std::unique_ptr< TurbofanCompilationJob > Dequeue()
bool TryQueueForOptimization(std::unique_ptr< TurbofanCompilationJob > &job)
void ClearTaskState(OptimizingCompileTaskState &task_state)
TurbofanCompilationJob * NextInputIfIsolateMatches(OptimizingCompileTaskState &task_state)
void RunCompilationJob(OptimizingCompileTaskState &task_state, Isolate *isolate, LocalIsolate &local_isolate, TurbofanCompilationJob *job)
base::OwnedVector< OptimizingCompileTaskState > task_states_
TurbofanCompilationJob * NextInput(OptimizingCompileTaskState &task_state)
void ResetJob(OptimizingCompileTaskState &task_state)
static ThreadId Current()
Definition thread-id.h:32
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
void PrintF(const char *format,...)
Definition utils.cc:39
void ShortPrint(Tagged< Object > obj, FILE *out)
Definition objects.cc:1865
V8_EXPORT_PRIVATE FlagValues v8_flags
#define RCS_SCOPE(...)
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN