7#include <unordered_map>
26#if V8_ENABLE_WEBASSEMBLY
45 (!isolate->thread_manager()->IsLockedByThread(
52#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
53 i::RwxMemoryWriteScope::SetDefaultPermissionsForSignalHandler();
56 if (sample ==
nullptr) {
79 :
isolate_(isolate), listener_(listener) {
80 size_t profiler_count =
isolate_->num_cpu_profilers();
82 isolate_->set_num_cpu_profilers(profiler_count);
84#if V8_ENABLE_WEBASSEMBLY
104 size_t profiler_count =
isolate_->num_cpu_profilers();
107 isolate_->set_num_cpu_profilers(profiler_count);
115 symbolizer_(symbolizer),
116 code_observer_(code_observer),
118 last_code_event_id_(0),
119 last_processed_code_event_id_(0),
132 use_precise_sampling_(use_precise_sampling) {
134 precise_sleep_timer_.TryInit();
156 regs.
sp =
reinterpret_cast<void*
>(fp - fp_to_sp_delta);
157 regs.
fp =
reinterpret_cast<void*
>(fp);
158 regs.
pc =
reinterpret_cast<void*
>(
from);
165 bool update_stats,
const std::optional<uint64_t> trace_id) {
171 regs.
sp =
reinterpret_cast<void*
>(frame->
sp());
172 regs.
fp =
reinterpret_cast<void*
>(frame->
fp());
173 regs.
pc =
reinterpret_cast<void*
>(frame->
pc());
187 bool expected =
true;
188 if (!
running_.compare_exchange_strong(expected,
false,
189 std::memory_order_relaxed))
202 if (
record.generic.type == CodeEventRecord::Type::kNativeContextMove) {
204 record.NativeContextMoveEventRecord_;
219 case CodeEventRecord::Type::kCodeCreation:
220 case CodeEventRecord::Type::kCodeMove:
221 case CodeEventRecord::Type::kCodeDisableOpt:
222 case CodeEventRecord::Type::kCodeDelete:
223 case CodeEventRecord::Type::kNativeContextMove:
226 case CodeEventRecord::Type::kCodeDeopt: {
235 case CodeEventRecord::Type::kReportBuiltin:
282 while (
running_.load(std::memory_order_relaxed)) {
298 if (nextSampleTime > now) {
302 if (precise_sleep_timer_.IsInitialized()) {
303 precise_sleep_timer_.Sleep(nextSampleTime - now);
317 while (now < nextSampleTime &&
322 if (!
running_.load(std::memory_order_relaxed)) {
348 running_.store(
true, std::memory_order_relaxed);
353void* SamplingEventsProcessor::operator
new(
size_t size) {
357void SamplingEventsProcessor::operator
delete(
void* ptr) {
AlignedFree(ptr); }
362 code_entries_(storage),
364 weak_code_registry_(isolate),
398#define PROFILER_TYPE_CASE(type, clss) \
399 case CodeEventRecord::Type::type: \
400 record.clss##_.UpdateCodeMap(&code_map_); \
405#undef PROFILER_TYPE_CASE
412#ifdef V8_RUNTIME_CALL_STATS
414 for (
int i = 0;
i < RuntimeCallStats::kNumberOfCounters; ++
i) {
415 RuntimeCallCounter* counter = rcs->GetCounter(
i);
418 counter->name(),
"native V8Runtime");
426 DCHECK(builtins->is_initialized());
446 return profiles_->profiles()->at(index).get();
466class CpuProfilersManager {
476 for (
auto it = range.first; it != range.second; ++it) {
477 if (it->second != profiler)
continue;
484 void CallCollectSample(Isolate* isolate,
485 const std::optional<uint64_t> trace_id) {
488 for (
auto it = range.first; it != range.second; ++it) {
489 it->second->CollectSample(trace_id);
493 size_t GetAllProfilersMemorySize(Isolate* isolate) {
495 size_t estimated_memory = 0;
497 for (
auto it = range.first; it != range.second; ++it) {
498 estimated_memory += it->second->GetEstimatedMemoryUsage();
500 return estimated_memory;
525 naming_mode_(naming_mode),
526 logging_mode_(logging_mode),
527 base_sampling_interval_(
base::TimeDelta::FromMicroseconds(
528 v8_flags.cpu_profiler_sampling_interval)),
529 code_observer_(test_code_observer),
530 profiles_(test_profiles),
531 symbolizer_(test_symbolizer),
533 is_profiling_(false) {
535 GetProfilersManager()->AddProfiler(isolate,
this);
542 GetProfilersManager()->RemoveProfiler(
isolate_,
this);
589 return profiles_->GetCommonSamplingInterval();
596 processor_->SetSamplingInterval(base_interval);
603 const std::optional<uint64_t> trace_id) {
604 GetProfilersManager()->CallCollectSample(isolate, trace_id);
615 return GetProfilersManager()->GetAllProfilersMemorySize(isolate);
624 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
625 return StartProfiling(
nullptr, std::move(options), std::move(delegate));
630 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
632 profiles_->StartProfiling(title, std::move(options), std::move(delegate));
644 bool source_rundown_trace_enabled;
645 bool source_rundown_sources_trace_enabled;
648 &source_rundown_trace_enabled);
651 &source_rundown_sources_trace_enabled);
652 if (source_rundown_trace_enabled || source_rundown_sources_trace_enabled) {
654 for (
int i = 0;
i < script_objects->length();
i++) {
656 script_objects->get(
i).GetHeapObjectIfWeak(&script_object)) {
658 if (source_rundown_trace_enabled) {
659 script->TraceScriptRundown();
661 if (source_rundown_sources_trace_enabled) {
662 script->TraceScriptRundownSources();
673 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
675 std::move(delegate));
691 std::make_unique<Symbolizer>(
code_observer_->instruction_stream_map());
715 const bool last_profile =
profiles_->IsLastProfileLeft(
id);
bool WaitFor(Mutex *mutex, const TimeDelta &rel_time) V8_WARN_UNUSED_RESULT
V8_WARN_UNUSED_RESULT bool Start()
bool StartSynchronously()
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
static constexpr Builtin kFirst
static constexpr Builtin kLast
StringsStorage & strings()
static CodeEntry * Create(Args &&... args)
RuntimeCallStats * runtime_call_stats()
void StartProcessorIfNotStarted()
static size_t GetAllProfilersMemorySize(Isolate *isolate)
CpuProfile * StopProfiling(const char *title)
CpuProfile * GetProfile(int index)
void set_use_precise_sampling(bool)
const NamingMode naming_mode_
static void CollectSample(Isolate *isolate, std::optional< uint64_t > trace_id=std::nullopt)
base::TimeDelta ComputeSamplingInterval()
std::unique_ptr< CpuProfilesCollection > profiles_
bool use_precise_sampling_
std::unique_ptr< ProfilerListener > profiler_listener_
CodeEntryStorage code_entries_
void AdjustSamplingInterval()
void set_sampling_interval(base::TimeDelta value)
std::unique_ptr< ProfilerCodeObserver > code_observer_
std::unique_ptr< Symbolizer > symbolizer_
std::unique_ptr< ProfilingScope > profiling_scope_
size_t GetEstimatedMemoryUsage() const
base::TimeDelta sampling_interval() const
base::TimeDelta base_sampling_interval_
CpuProfilingResult StartProfiling(CpuProfilingOptions options={}, std::unique_ptr< DiscardedSamplesDelegate > delegate=nullptr)
void DeleteProfile(CpuProfile *profile)
std::unique_ptr< ProfilerEventsProcessor > processor_
const LoggingMode logging_mode_
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to)
void AddPathToCurrentProfiles(base::TimeTicks timestamp, const ProfileStackTrace &path, int src_line, bool update_stats, base::TimeDelta sampling_interval, StateTag state, EmbedderStateTag embedder_state_tag, Address native_context_address=kNullAddress, Address native_embedder_context_address=kNullAddress, const std::optional< uint64_t > trace_id=std::nullopt)
Isolate::PerIsolateThreadData * perThreadData_
CpuSampler(Isolate *isolate, SamplingEventsProcessor *processor)
void SampleStack(const v8::RegisterState ®s) override
SamplingEventsProcessor * processor_
bool HasBeenSetUp() const
void AddCode(Address addr, CodeEntry *entry, unsigned size)
size_t GetEstimatedMemoryUsage() const
ThreadId thread_id() const
static Address c_entry_fp(ThreadLocalTop *thread)
bool was_locker_ever_used() const
V8FileLogger * v8_file_logger() const
v8::internal::Factory * factory()
void SetIsProfiling(bool enabled)
ThreadLocalTop * thread_local_top()
bool AddListener(LogEventListener *listener)
bool RemoveListener(LogEventListener *listener)
CodeEntryStorage & code_entries_
size_t GetEstimatedMemoryUsage() const
InstructionStreamMap code_map_
void CodeEventHandler(const CodeEventsContainer &evt_rec) override
ProfilerCodeObserver(Isolate *, CodeEntryStorage &)
ProfilerEventsProcessor * processor_
void CodeEventHandlerInternal(const CodeEventsContainer &evt_rec)
WeakCodeRegistry weak_code_registry_
void set_processor(ProfilerEventsProcessor *processor)
void CreateEntriesForRuntimeCallStats()
ProfilerEventsProcessor * processor()
ProfilerEventsProcessor(Isolate *isolate, Symbolizer *symbolizer, ProfilerCodeObserver *code_observer, CpuProfilesCollection *profiles)
@ FoundSampleForNextCodeEvent
std::atomic< unsigned > last_code_event_id_
void AddSample(TickSample sample)
base::Mutex running_mutex_
LockedQueue< CodeEventsContainer > events_buffer_
void CodeEventHandler(const CodeEventsContainer &evt_rec) override
base::ConditionVariable running_cond_
void Enqueue(const CodeEventsContainer &event)
~ProfilerEventsProcessor() override
LockedQueue< TickSampleEventRecord > ticks_from_vm_buffer_
ProfilerCodeObserver * code_observer_
std::atomic_bool running_
CpuProfilesCollection * profiles_
unsigned last_processed_code_event_id_
void AddDeoptStack(Address from, int fp_to_sp_delta)
void AddCurrentStack(bool update_stats=false, const std::optional< uint64_t > trace_id=std::nullopt)
static ProfilerStats * Instance()
void AddReason(Reason reason)
ProfilerListener *const listener_
ProfilingScope(Isolate *isolate, ProfilerListener *listener)
Address instruction_start
unsigned instruction_size
base::TimeDelta period() const
TickSample * StartTickSample()
void SymbolizeAndAddToProfiles(const TickSampleEventRecord *record)
std::unique_ptr< sampler::Sampler > sampler_
SampleProcessingResult ProcessOneSample() override
const bool use_precise_sampling_
~SamplingEventsProcessor() override
void SetSamplingInterval(base::TimeDelta period) override
SamplingEventsProcessor(Isolate *isolate, Symbolizer *symbolizer, ProfilerCodeObserver *code_observer, CpuProfilesCollection *profiles, base::TimeDelta period, bool use_precise_sampling)
SamplingCircularQueue< TickSampleEventRecord, kTickSampleQueueLength > ticks_buffer_
SymbolizedSample SymbolizeTickSample(const TickSample &sample)
V8_EXPORT_PRIVATE void LogAccessorCallbacks()
V8_EXPORT_PRIVATE void LogCompiledFunctions(bool ensure_source_positions_available=true)
V8_EXPORT_PRIVATE void LogCodeObjects()
void EnableCodeLogging(Isolate *)
Isolate * isolate() const
Sampler(Isolate *isolate)
bool is_counting_samples_
unsigned js_sample_count_
unsigned external_sample_count_
std::unordered_multimap< Isolate *, CpuProfiler * > profilers_
#define PROFILER_TYPE_CASE(type, clss)
#define CODE_EVENTS_TYPE_LIST(V)
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
ProcessorImpl * processor_
LockGuard< Mutex > MutexGuard
WasmEngine * GetWasmEngine()
void * AlignedAllocWithRetry(size_t size, size_t alignment)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
V8_EXPORT_PRIVATE FlagValues v8_flags
static const int kProfilerStackSize
void AlignedFree(void *ptr)
kInterpreterTrampolineOffset script
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
ProfileStackTrace stack_trace
std::optional< uint64_t > trace_id_
base::TimeDelta sampling_interval_
base::TimeTicks timestamp
EmbedderStateTag embedder_state
void Init(Isolate *isolate, const v8::RegisterState &state, RecordCEntryFrame record_c_entry_frame, bool update_stats, bool use_simulator_reg_state=true, base::TimeDelta sampling_interval=base::TimeDelta(), const std::optional< uint64_t > trace_id=std::nullopt)
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)