15constexpr size_t kMaxWorkersPerJob = 32;
21 "kInvalidTaskId must be outside of the range of valid task_ids "
22 "[0, kMaxWorkersPerJob)");
27 if (task_id_ == kInvalidTaskId) task_id_ = outer_->AcquireTaskId();
32 std::unique_ptr<JobTask> job_task,
34 size_t num_worker_threads)
43 if (
is_canceled_.load(std::memory_order_relaxed))
return;
45 size_t num_tasks_to_post = 0;
58 for (
size_t i = 0;
i < num_tasks_to_post; ++
i) {
66 "TaskId bitfield isn't big enough to fit kMaxWorkersPerJob.");
67 uint32_t assigned_task_ids =
71 uint32_t new_assigned_task_ids = 0;
80 new_assigned_task_ids = assigned_task_ids | (uint32_t(1) << task_id);
82 assigned_task_ids, new_assigned_task_ids, std::memory_order_acquire,
83 std::memory_order_relaxed));
90 ~(uint32_t(1) << task_id), std::memory_order_release);
91 DCHECK(previous_task_ids & (uint32_t(1) << task_id));
92 USE(previous_task_ids);
96 auto WaitForParticipationOpportunity = [
this]() ->
size_t {
106 if (max_concurrency != 0)
return max_concurrency;
114 size_t num_tasks_to_post = 0;
123 size_t max_concurrency = WaitForParticipationOpportunity();
124 if (max_concurrency == 0)
return;
132 for (
size_t i = 0;
i < num_tasks_to_post; ++
i) {
134 std::make_unique<DefaultJobWorker>(shared_from_this(),
144 if (WaitForParticipationOpportunity() == 0)
return;
171 if (
is_canceled_.load(std::memory_order_relaxed))
return false;
179 size_t num_tasks_to_post = 0;
203 for (
size_t i = 0;
i < num_tasks_to_post; ++
i) {
211 return std::min(
job_task_->GetMaxConcurrency(worker_count),
216 std::unique_ptr<Task> task) {
240 state_->CancelAndDetach();
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
constexpr unsigned CountTrailingZeros32(uint32_t value)
constexpr unsigned CountPopulation(T value)
#define DCHECK_LE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)