v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
cpu-profiler.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <unordered_map>
8#include <utility>
9
10#include "include/v8-locker.h"
13#include "src/debug/debug.h"
20#include "src/logging/log.h"
25
26#if V8_ENABLE_WEBASSEMBLY
28#endif // V8_ENABLE_WEBASSEMBLY
29
30namespace v8 {
31namespace internal {
32
33static const int kProfilerStackSize = 256 * KB;
34
36 public:
38 : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
39 processor_(processor),
40 perThreadData_(isolate->FindPerThreadDataForThisThread()) {}
41
42 void SampleStack(const v8::RegisterState& regs) override {
43 Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
44 if (isolate->was_locker_ever_used() &&
45 (!isolate->thread_manager()->IsLockedByThread(
47 perThreadData_->thread_state() != nullptr)) {
50 return;
51 }
52#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
53 i::RwxMemoryWriteScope::SetDefaultPermissionsForSignalHandler();
54#endif
56 if (sample == nullptr) {
59 return;
60 }
61 // Every bailout up until here resulted in a dropped sample. From now on,
62 // the sample is created in the buffer.
63 sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame,
64 /* update_stats */ true,
65 /* use_simulator_reg_state */ true, processor_->period());
66 if (is_counting_samples_ && !sample->timestamp.IsNull()) {
67 if (sample->state == JS) ++js_sample_count_;
68 if (sample->state == EXTERNAL) ++external_sample_count_;
69 }
71 }
72
73 private:
76};
77
79 : isolate_(isolate), listener_(listener) {
80 size_t profiler_count = isolate_->num_cpu_profilers();
81 profiler_count++;
82 isolate_->set_num_cpu_profilers(profiler_count);
84#if V8_ENABLE_WEBASSEMBLY
86#endif // V8_ENABLE_WEBASSEMBLY
87
89 V8FileLogger* file_logger = isolate_->v8_file_logger();
90 // Populate the ProfilerCodeObserver with the initial functions and
91 // callbacks on the heap.
93
94 if (!v8_flags.prof_browser_mode) {
95 file_logger->LogCodeObjects();
96 }
97 file_logger->LogCompiledFunctions();
98 file_logger->LogAccessorCallbacks();
99}
100
103
104 size_t profiler_count = isolate_->num_cpu_profilers();
105 DCHECK_GT(profiler_count, 0);
106 profiler_count--;
107 isolate_->set_num_cpu_profilers(profiler_count);
108 if (profiler_count == 0) isolate_->SetIsProfiling(false);
109}
110
112 Isolate* isolate, Symbolizer* symbolizer,
113 ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles)
114 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
115 symbolizer_(symbolizer),
116 code_observer_(code_observer),
117 profiles_(profiles),
118 last_code_event_id_(0),
119 last_processed_code_event_id_(0),
120 isolate_(isolate) {
123}
124
126 Isolate* isolate, Symbolizer* symbolizer,
127 ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
128 base::TimeDelta period, bool use_precise_sampling)
129 : ProfilerEventsProcessor(isolate, symbolizer, code_observer, profiles),
130 sampler_(new CpuSampler(isolate, this)),
131 period_(period),
132 use_precise_sampling_(use_precise_sampling) {
133#if V8_OS_WIN
134 precise_sleep_timer_.TryInit();
135#endif // V8_OS_WIN
136
137 sampler_->Start();
138}
139
141
146
148 event.generic.order = ++last_code_event_id_;
149 events_buffer_.Enqueue(event);
150}
151
152void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
154 RegisterState regs;
156 regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
157 regs.fp = reinterpret_cast<void*>(fp);
158 regs.pc = reinterpret_cast<void*>(from);
159 record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
160 false);
162}
163
165 bool update_stats, const std::optional<uint64_t> trace_id) {
167 RegisterState regs;
169 if (!it.done()) {
170 StackFrame* frame = it.frame();
171 regs.sp = reinterpret_cast<void*>(frame->sp());
172 regs.fp = reinterpret_cast<void*>(frame->fp());
173 regs.pc = reinterpret_cast<void*>(frame->pc());
174 }
175 record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
176 false, base::TimeDelta(), trace_id);
178}
179
185
187 bool expected = true;
188 if (!running_.compare_exchange_strong(expected, false,
189 std::memory_order_relaxed))
190 return;
191 {
194 }
195 Join();
196}
197
198
201 if (events_buffer_.Dequeue(&record)) {
202 if (record.generic.type == CodeEventRecord::Type::kNativeContextMove) {
204 record.NativeContextMoveEventRecord_;
206 nc_record.from_address, nc_record.to_address);
207 } else {
209 }
211 return true;
212 }
213 return false;
214}
215
217 const CodeEventsContainer& evt_rec) {
218 switch (evt_rec.generic.type) {
219 case CodeEventRecord::Type::kCodeCreation:
220 case CodeEventRecord::Type::kCodeMove:
221 case CodeEventRecord::Type::kCodeDisableOpt:
222 case CodeEventRecord::Type::kCodeDelete:
223 case CodeEventRecord::Type::kNativeContextMove:
224 Enqueue(evt_rec);
225 break;
226 case CodeEventRecord::Type::kCodeDeopt: {
227 const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
228 Address pc = rec->pc;
229 int fp_to_sp_delta = rec->fp_to_sp_delta;
230 Enqueue(evt_rec);
231 AddDeoptStack(pc, fp_to_sp_delta);
232 break;
233 }
235 case CodeEventRecord::Type::kReportBuiltin:
236 UNREACHABLE();
237 }
238}
239
242 const TickSample& tick_sample = record->sample;
244 symbolizer_->SymbolizeTickSample(tick_sample);
246 tick_sample.timestamp, symbolized.stack_trace, symbolized.src_line,
247 tick_sample.update_stats_, tick_sample.sampling_interval_,
248 tick_sample.state, tick_sample.embedder_state,
249 reinterpret_cast<Address>(tick_sample.context),
250 reinterpret_cast<Address>(tick_sample.embedder_context),
251 tick_sample.trace_id_);
252}
253
277
279 // Set the current isolate such that trusted pointer tables etc are available.
280 SetCurrentIsolateScope isolate_scope(isolate_);
282 while (running_.load(std::memory_order_relaxed)) {
283 base::TimeTicks nextSampleTime = base::TimeTicks::Now() + period_;
284 base::TimeTicks now;
286 // Keep processing existing events until we need to do next sample
287 // or the ticks buffer is empty.
288 do {
291 // All ticks of the current last_processed_code_event_id_ are
292 // processed, proceed to the next code event.
294 }
295 now = base::TimeTicks::Now();
296 } while (result != NoSamplesInQueue && now < nextSampleTime);
297
298 if (nextSampleTime > now) {
299#if V8_OS_WIN
301 nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
302 if (precise_sleep_timer_.IsInitialized()) {
303 precise_sleep_timer_.Sleep(nextSampleTime - now);
304 } else {
305 // Do not use Sleep on Windows as it is very imprecise, with up to
306 // 16ms jitter, which is unacceptable for short profile intervals.
307 while (base::TimeTicks::Now() < nextSampleTime) {
308 }
309 }
310 } else // NOLINT
311#else
313#endif // V8_OS_WIN
314 {
315 // Allow another thread to interrupt the delay between samples in the
316 // event of profiler shutdown.
317 while (now < nextSampleTime &&
318 running_cond_.WaitFor(&running_mutex_, nextSampleTime - now)) {
319 // If true was returned, we got interrupted before the timeout
320 // elapsed. If this was not due to a change in running state, a
321 // spurious wakeup occurred (thus we should continue to wait).
322 if (!running_.load(std::memory_order_relaxed)) {
323 break;
324 }
325 now = base::TimeTicks::Now();
326 }
327 }
328 }
329
330 // Schedule next sample.
331 sampler_->DoSample();
332 }
333
334 // Process remaining tick events.
335 do {
337 do {
339 } while (result == OneSampleProcessed);
340 } while (ProcessCodeEvent());
341}
342
344 if (period_ == period) return;
346
347 period_ = period;
348 running_.store(true, std::memory_order_relaxed);
349
351}
352
353void* SamplingEventsProcessor::operator new(size_t size) {
354 return AlignedAllocWithRetry(size, alignof(SamplingEventsProcessor));
355}
356
357void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
358
360 CodeEntryStorage& storage)
361 : isolate_(isolate),
362 code_entries_(storage),
363 code_map_(storage),
364 weak_code_registry_(isolate),
365 processor_(nullptr) {
367 LogBuiltins();
368}
369
374
376 const CodeEventsContainer& evt_rec) {
377 if (processor_) {
379 return;
380 }
382}
383
385 // To avoid race condition in codemap,
386 // for now limit computation in kEagerLogging mode
387 if (!processor_) {
388 return sizeof(*this) + code_map_.GetEstimatedMemoryUsage() +
390 }
391 return 0;
392}
393
395 const CodeEventsContainer& evt_rec) {
396 CodeEventsContainer record = evt_rec;
397 switch (evt_rec.generic.type) {
398#define PROFILER_TYPE_CASE(type, clss) \
399 case CodeEventRecord::Type::type: \
400 record.clss##_.UpdateCodeMap(&code_map_); \
401 break;
402
404
405#undef PROFILER_TYPE_CASE
406 default:
407 break;
408 }
409}
410
412#ifdef V8_RUNTIME_CALL_STATS
414 for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
415 RuntimeCallCounter* counter = rcs->GetCounter(i);
416 DCHECK(counter->name());
417 auto entry = code_entries_.Create(LogEventListener::CodeTag::kFunction,
418 counter->name(), "native V8Runtime");
419 code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
420 }
421#endif // V8_RUNTIME_CALL_STATS
422}
423
425 Builtins* builtins = isolate_->builtins();
426 DCHECK(builtins->is_initialized());
427 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
428 ++builtin) {
429 CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
430 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
431 Tagged<Code> code = builtins->code(builtin);
432 rec->instruction_start = code->instruction_start();
433 rec->instruction_size = code->instruction_size();
434 rec->builtin = builtin;
436 }
437}
438
440 // The count of profiles doesn't depend on a security token.
441 return static_cast<int>(profiles_->profiles()->size());
442}
443
444
446 return profiles_->profiles()->at(index).get();
447}
448
449
454
455
457 profiles_->RemoveProfile(profile);
458 if (profiles_->profiles()->empty() && !is_profiling_) {
459 // If this was the last profile, clean up all accessory data as well.
461 }
462}
463
464namespace {
465
466class CpuProfilersManager {
467 public:
468 void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
469 base::MutexGuard lock(&mutex_);
470 profilers_.emplace(isolate, profiler);
471 }
472
473 void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
474 base::MutexGuard lock(&mutex_);
475 auto range = profilers_.equal_range(isolate);
476 for (auto it = range.first; it != range.second; ++it) {
477 if (it->second != profiler) continue;
478 profilers_.erase(it);
479 return;
480 }
481 UNREACHABLE();
482 }
483
484 void CallCollectSample(Isolate* isolate,
485 const std::optional<uint64_t> trace_id) {
486 base::MutexGuard lock(&mutex_);
487 auto range = profilers_.equal_range(isolate);
488 for (auto it = range.first; it != range.second; ++it) {
489 it->second->CollectSample(trace_id);
490 }
491 }
492
493 size_t GetAllProfilersMemorySize(Isolate* isolate) {
494 base::MutexGuard lock(&mutex_);
495 size_t estimated_memory = 0;
496 auto range = profilers_.equal_range(isolate);
497 for (auto it = range.first; it != range.second; ++it) {
498 estimated_memory += it->second->GetEstimatedMemoryUsage();
499 }
500 return estimated_memory;
501 }
502
503 private:
504 std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
505 base::Mutex mutex_;
506};
507
508DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
509
510} // namespace
511
513 CpuProfilingLoggingMode logging_mode)
514 : CpuProfiler(isolate, naming_mode, logging_mode,
515 new CpuProfilesCollection(isolate), nullptr, nullptr,
516 new ProfilerCodeObserver(isolate, code_entries_)) {}
517
519 CpuProfilingLoggingMode logging_mode,
520 CpuProfilesCollection* test_profiles,
521 Symbolizer* test_symbolizer,
522 ProfilerEventsProcessor* test_processor,
523 ProfilerCodeObserver* test_code_observer)
524 : isolate_(isolate),
525 naming_mode_(naming_mode),
526 logging_mode_(logging_mode),
527 base_sampling_interval_(base::TimeDelta::FromMicroseconds(
528 v8_flags.cpu_profiler_sampling_interval)),
529 code_observer_(test_code_observer),
530 profiles_(test_profiles),
531 symbolizer_(test_symbolizer),
532 processor_(test_processor),
533 is_profiling_(false) {
534 profiles_->set_cpu_profiler(this);
535 GetProfilersManager()->AddProfiler(isolate, this);
536
537 if (logging_mode == kEagerLogging) EnableLogging();
538}
539
542 GetProfilersManager()->RemoveProfiler(isolate_, this);
543
545 profiles_.reset();
546
547 // We don't currently expect any references to refcounted strings to be
548 // maintained with zero profiles after the code map is cleared.
550}
551
556
561
564 profiles_->set_cpu_profiler(this);
565}
566
568 if (profiling_scope_) return;
569
570 if (!profiler_listener_) {
572 isolate_, code_observer_.get(), *code_observer_->code_entries(),
573 *code_observer_->weak_code_registry(), naming_mode_));
574 }
575 profiling_scope_.reset(
577}
578
580 if (!profiling_scope_) return;
581
583 profiling_scope_.reset();
584 profiler_listener_.reset();
585 code_observer_->ClearCodeMap();
586}
587
589 return profiles_->GetCommonSamplingInterval();
590}
591
593 if (!processor_) return;
594
596 processor_->SetSamplingInterval(base_interval);
597}
598
599// static
600// |trace_id| is an optional identifier stored in the sample record used
601// to associate the sample with a trace event.
603 const std::optional<uint64_t> trace_id) {
604 GetProfilersManager()->CallCollectSample(isolate, trace_id);
605}
606
607void CpuProfiler::CollectSample(const std::optional<uint64_t> trace_id) {
608 if (processor_) {
609 processor_->AddCurrentStack(false, trace_id);
610 }
611}
612
613// static
615 return GetProfilersManager()->GetAllProfilersMemorySize(isolate);
616}
617
619 return code_observer_->GetEstimatedMemoryUsage();
620}
621
623 CpuProfilingOptions options,
624 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
625 return StartProfiling(nullptr, std::move(options), std::move(delegate));
626}
627
629 const char* title, CpuProfilingOptions options,
630 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
632 profiles_->StartProfiling(title, std::move(options), std::move(delegate));
633
634 // TODO(nicodubus): Revisit logic for if we want to do anything different for
635 // kAlreadyStarted
636 if (result.status == CpuProfilingStatus::kStarted ||
638 TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
641
642 // Collect script rundown at the start of profiling if trace category is
643 // turned on
644 bool source_rundown_trace_enabled;
645 bool source_rundown_sources_trace_enabled;
647 TRACE_DISABLED_BY_DEFAULT("devtools.v8-source-rundown"),
648 &source_rundown_trace_enabled);
650 TRACE_DISABLED_BY_DEFAULT("devtools.v8-source-rundown-sources"),
651 &source_rundown_sources_trace_enabled);
652 if (source_rundown_trace_enabled || source_rundown_sources_trace_enabled) {
653 Handle<WeakArrayList> script_objects = isolate_->factory()->script_list();
654 for (int i = 0; i < script_objects->length(); i++) {
655 if (Tagged<HeapObject> script_object;
656 script_objects->get(i).GetHeapObjectIfWeak(&script_object)) {
657 Tagged<Script> script(Cast<Script>(script_object));
658 if (source_rundown_trace_enabled) {
659 script->TraceScriptRundown();
660 }
661 if (source_rundown_sources_trace_enabled) {
662 script->TraceScriptRundownSources();
663 }
664 }
665 }
666 }
667 }
668 return result;
669}
670
672 Tagged<String> title, CpuProfilingOptions options,
673 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
674 return StartProfiling(profiles_->GetName(title), std::move(options),
675 std::move(delegate));
676}
677
679 if (processor_) {
680 processor_->AddCurrentStack();
681 return;
682 }
683
684 if (!profiling_scope_) {
687 }
688
689 if (!symbolizer_) {
691 std::make_unique<Symbolizer>(code_observer_->instruction_stream_map());
692 }
693
696 isolate_, symbolizer_.get(), code_observer_.get(), profiles_.get(),
698 is_profiling_ = true;
699
700 // Enable stack sampling.
701 processor_->AddCurrentStack();
702 CHECK(processor_->StartSynchronously());
703}
704
706 CpuProfile* profile = profiles_->Lookup(title);
707 if (profile) {
708 return StopProfiling(profile->id());
709 }
710 return nullptr;
711}
712
714 if (!is_profiling_) return nullptr;
715 const bool last_profile = profiles_->IsLastProfileLeft(id);
716 if (last_profile) StopProcessor();
717
718 CpuProfile* profile = profiles_->StopProfiling(id);
719
721
723 if (last_profile && logging_mode_ == kLazyLogging) {
725 }
726
727 return profile;
728}
729
733
735 is_profiling_ = false;
736 processor_->StopSynchronously();
737 processor_.reset();
738}
739} // namespace internal
740} // namespace v8
Isolate * isolate_
bool WaitFor(Mutex *mutex, const TimeDelta &rel_time) V8_WARN_UNUSED_RESULT
V8_WARN_UNUSED_RESULT bool Start()
bool StartSynchronously()
Definition platform.h:576
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
static TimeTicks Now()
Definition time.cc:736
static constexpr Builtin kFirst
Definition builtins.h:112
static constexpr Builtin kLast
Definition builtins.h:113
static CodeEntry * Create(Args &&... args)
RuntimeCallStats * runtime_call_stats()
Definition counters.h:633
static size_t GetAllProfilersMemorySize(Isolate *isolate)
CpuProfile * StopProfiling(const char *title)
CpuProfile * GetProfile(int index)
const NamingMode naming_mode_
static void CollectSample(Isolate *isolate, std::optional< uint64_t > trace_id=std::nullopt)
base::TimeDelta ComputeSamplingInterval()
std::unique_ptr< CpuProfilesCollection > profiles_
std::unique_ptr< ProfilerListener > profiler_listener_
CodeEntryStorage code_entries_
void set_sampling_interval(base::TimeDelta value)
std::unique_ptr< ProfilerCodeObserver > code_observer_
std::unique_ptr< Symbolizer > symbolizer_
std::unique_ptr< ProfilingScope > profiling_scope_
size_t GetEstimatedMemoryUsage() const
base::TimeDelta sampling_interval() const
base::TimeDelta base_sampling_interval_
CpuProfilingResult StartProfiling(CpuProfilingOptions options={}, std::unique_ptr< DiscardedSamplesDelegate > delegate=nullptr)
void DeleteProfile(CpuProfile *profile)
std::unique_ptr< ProfilerEventsProcessor > processor_
const LoggingMode logging_mode_
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to)
void AddPathToCurrentProfiles(base::TimeTicks timestamp, const ProfileStackTrace &path, int src_line, bool update_stats, base::TimeDelta sampling_interval, StateTag state, EmbedderStateTag embedder_state_tag, Address native_context_address=kNullAddress, Address native_embedder_context_address=kNullAddress, const std::optional< uint64_t > trace_id=std::nullopt)
Isolate::PerIsolateThreadData * perThreadData_
CpuSampler(Isolate *isolate, SamplingEventsProcessor *processor)
void SampleStack(const v8::RegisterState &regs) override
SamplingEventsProcessor * processor_
bool HasBeenSetUp() const
Definition heap.cc:451
void AddCode(Address addr, CodeEntry *entry, unsigned size)
static Address c_entry_fp(ThreadLocalTop *thread)
Definition isolate.h:889
Counters * counters()
Definition isolate.h:1180
Logger * logger() const
Definition isolate.h:1508
bool was_locker_ever_used() const
Definition isolate.h:1729
Builtins * builtins()
Definition isolate.h:1443
V8FileLogger * v8_file_logger() const
Definition isolate.h:1192
v8::internal::Factory * factory()
Definition isolate.h:1527
void SetIsProfiling(bool enabled)
Definition isolate.h:1481
ThreadLocalTop * thread_local_top()
Definition isolate.h:1331
bool AddListener(LogEventListener *listener)
bool RemoveListener(LogEventListener *listener)
void CodeEventHandler(const CodeEventsContainer &evt_rec) override
ProfilerCodeObserver(Isolate *, CodeEntryStorage &)
ProfilerEventsProcessor * processor_
void CodeEventHandlerInternal(const CodeEventsContainer &evt_rec)
void set_processor(ProfilerEventsProcessor *processor)
ProfilerEventsProcessor * processor()
ProfilerEventsProcessor(Isolate *isolate, Symbolizer *symbolizer, ProfilerCodeObserver *code_observer, CpuProfilesCollection *profiles)
std::atomic< unsigned > last_code_event_id_
LockedQueue< CodeEventsContainer > events_buffer_
void CodeEventHandler(const CodeEventsContainer &evt_rec) override
base::ConditionVariable running_cond_
void Enqueue(const CodeEventsContainer &event)
LockedQueue< TickSampleEventRecord > ticks_from_vm_buffer_
ProfilerCodeObserver * code_observer_
CpuProfilesCollection * profiles_
void AddDeoptStack(Address from, int fp_to_sp_delta)
void AddCurrentStack(bool update_stats=false, const std::optional< uint64_t > trace_id=std::nullopt)
static ProfilerStats * Instance()
void AddReason(Reason reason)
ProfilerListener *const listener_
ProfilingScope(Isolate *isolate, ProfilerListener *listener)
void SymbolizeAndAddToProfiles(const TickSampleEventRecord *record)
std::unique_ptr< sampler::Sampler > sampler_
SampleProcessingResult ProcessOneSample() override
void SetSamplingInterval(base::TimeDelta period) override
SamplingEventsProcessor(Isolate *isolate, Symbolizer *symbolizer, ProfilerCodeObserver *code_observer, CpuProfilesCollection *profiles, base::TimeDelta period, bool use_precise_sampling)
SamplingCircularQueue< TickSampleEventRecord, kTickSampleQueueLength > ticks_buffer_
Address sp() const
Definition frames.h:293
Address pc() const
Definition frames-inl.h:78
Address fp() const
Definition frames.h:297
SymbolizedSample SymbolizeTickSample(const TickSample &sample)
Definition symbolizer.cc:48
V8_EXPORT_PRIVATE void LogAccessorCallbacks()
Definition log.cc:2210
V8_EXPORT_PRIVATE void LogCompiledFunctions(bool ensure_source_positions_available=true)
Definition log.cc:2203
V8_EXPORT_PRIVATE void LogCodeObjects()
Definition log.cc:2196
Isolate * isolate() const
Definition sampler.h:43
Sampler(Isolate *isolate)
Definition sampler.cc:568
bool is_counting_samples_
Definition sampler.h:79
unsigned js_sample_count_
Definition sampler.h:80
unsigned external_sample_count_
Definition sampler.h:81
base::Mutex & mutex_
std::unordered_multimap< Isolate *, CpuProfiler * > profilers_
#define PROFILER_TYPE_CASE(type, clss)
#define CODE_EVENTS_TYPE_LIST(V)
DurationRecord record
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
Builtin builtin
Point from
ProcessorImpl * processor_
Definition mul-fft.cc:474
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
WasmEngine * GetWasmEngine()
void * AlignedAllocWithRetry(size_t size, size_t alignment)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
static const int kProfilerStackSize
return value
Definition map-inl.h:893
void AlignedFree(void *ptr)
kInterpreterTrampolineOffset script
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
CpuProfilingLoggingMode
@ kEagerLogging
@ kLazyLogging
CpuProfilingNamingMode
uint32_t ProfilerId
Definition v8-profiler.h:32
@ EXTERNAL
Definition v8-unwinder.h:43
#define CHECK(condition)
Definition logging.h:124
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
std::optional< uint64_t > trace_id_
base::TimeDelta sampling_interval_
Definition tick-sample.h:91
base::TimeTicks timestamp
Definition tick-sample.h:90
EmbedderStateTag embedder_state
Definition tick-sample.h:94
void Init(Isolate *isolate, const v8::RegisterState &state, RecordCEntryFrame record_c_entry_frame, bool update_stats, bool use_simulator_reg_state=true, base::TimeDelta sampling_interval=base::TimeDelta(), const std::optional< uint64_t > trace_id=std::nullopt)
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)