5#ifndef V8_PROFILER_CPU_PROFILER_H_ 
    6#define V8_PROFILER_CPU_PROFILER_H_ 
   31class InstructionStreamMap;
 
   32class CpuProfilesCollection;
 
   36#define CODE_EVENTS_TYPE_LIST(V)                \ 
   37  V(kCodeCreation, CodeCreateEventRecord)       \ 
   38  V(kCodeMove, CodeMoveEventRecord)             \ 
   39  V(kCodeDisableOpt, CodeDisableOptEventRecord) \ 
   40  V(kCodeDeopt, CodeDeoptEventRecord)           \ 
   41  V(kReportBuiltin, ReportBuiltinEventRecord)   \ 
   42  V(kCodeDelete, CodeDeleteEventRecord) 
 
   44#define VM_EVENTS_TYPE_LIST(V) \ 
   45  CODE_EVENTS_TYPE_LIST(V)     \ 
   46  V(kNativeContextMove, NativeContextMoveEventRecord) 
 
   50#define DECLARE_TYPE(type, ignore) type, 
 
  147#define DECLARE_CLASS(ignore, type) type type##_; 
 
  178  void StopSynchronously();
 
  179  bool running() { 
return running_.load(std::memory_order_relaxed); }
 
  183  void AddCurrentStack(
bool update_stats = 
false,
 
  184                       const std::optional<uint64_t> trace_id = std::nullopt);
 
  185  void AddDeoptStack(
Address from, 
int fp_to_sp_delta);
 
  197  bool ProcessCodeEvent();
 
  209  std::atomic_bool running_{
true};
 
 
  230  void* 
operator new(
size_t size);
 
  231  void operator delete(
void* ptr);
 
  244  inline void FinishTickSample();
 
  250  SampleProcessingResult ProcessOneSample() 
override;
 
  253  static const size_t kTickSampleBufferSize = 512 * 
KB;
 
  254  static const size_t kTickSampleQueueLength =
 
 
  279  size_t GetEstimatedMemoryUsage() 
const;
 
  288  void CreateEntriesForRuntimeCallStats();
 
 
  343  static void CollectSample(
Isolate* isolate,
 
  344                            std::optional<uint64_t> trace_id = std::nullopt);
 
  345  static size_t GetAllProfilersMemorySize(
Isolate* isolate);
 
  355  void set_use_precise_sampling(
bool);
 
  356  void CollectSample(
const std::optional<uint64_t> trace_id = std::nullopt);
 
  357  size_t GetEstimatedMemoryUsage() 
const;
 
  360      std::unique_ptr<DiscardedSamplesDelegate> delegate = 
nullptr);
 
  363      std::unique_ptr<DiscardedSamplesDelegate> delegate = 
nullptr);
 
  364  CpuProfilingResult StartProfiling(
 
  365      Tagged<String> title, CpuProfilingOptions options = {},
 
  366      std::unique_ptr<DiscardedSamplesDelegate> delegate = 
nullptr);
 
  368  CpuProfile* StopProfiling(
const char* title);
 
  369  CpuProfile* StopProfiling(Tagged<String> title);
 
  370  CpuProfile* StopProfiling(ProfilerId 
id);
 
  372  int GetProfilesCount();
 
  373  CpuProfile* GetProfile(
int index);
 
  374  void DeleteAllProfiles();
 
  375  void DeleteProfile(CpuProfile* profile);
 
  385    return profiler_listener_.get();
 
 
  388    return code_observer_->instruction_stream_map();
 
 
  392  void StartProcessorIfNotStarted();
 
  393  void StopProcessor();
 
  394  void ResetProfiles();
 
  396  void EnableLogging();
 
  397  void DisableLogging();
 
  403  void AdjustSamplingInterval();
 
  408  bool use_precise_sampling_ = 
true;
 
 
unsigned instruction_size
Address instruction_start
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
CpuProfileDeoptFrame * deopt_frames
Address instruction_start
const char * deopt_reason
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
Address instruction_start
const char * bailout_reason
CodeEventsContainer(CodeEventRecord::Type type=CodeEventRecord::Type::kNoEvent)
Address to_instruction_start
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
Address from_instruction_start
ProfilerEventsProcessor * processor() const
Isolate * isolate() const
bool is_profiling() const
CpuProfiler & operator=(const CpuProfiler &)=delete
const NamingMode naming_mode_
ProfilerListener * profiler_listener_for_test() const
std::unique_ptr< CpuProfilesCollection > profiles_
Symbolizer * symbolizer() const
std::unique_ptr< ProfilerListener > profiler_listener_
CodeEntryStorage code_entries_
InstructionStreamMap * code_map_for_test()
std::unique_ptr< ProfilerCodeObserver > code_observer_
std::unique_ptr< Symbolizer > symbolizer_
CodeEntryStorage * code_entries()
std::unique_ptr< ProfilingScope > profiling_scope_
base::TimeDelta sampling_interval() const
base::TimeDelta base_sampling_interval_
CpuProfiler(const CpuProfiler &)=delete
std::unique_ptr< ProfilerEventsProcessor > processor_
const LoggingMode logging_mode_
WeakCodeRegistry * weak_code_registry()
CodeEntryStorage & code_entries_
InstructionStreamMap code_map_
ProfilerEventsProcessor * processor_
InstructionStreamMap * instruction_stream_map()
WeakCodeRegistry weak_code_registry_
void set_processor(ProfilerEventsProcessor *processor)
CodeEntryStorage * code_entries()
ProfilerEventsProcessor * processor()
@ FoundSampleForNextCodeEvent
virtual void SetSamplingInterval(base::TimeDelta)
std::atomic< unsigned > last_code_event_id_
base::Mutex running_mutex_
LockedQueue< CodeEventsContainer > events_buffer_
base::ConditionVariable running_cond_
virtual SampleProcessingResult ProcessOneSample()=0
LockedQueue< TickSampleEventRecord > ticks_from_vm_buffer_
ProfilerCodeObserver * code_observer_
CpuProfilesCollection * profiles_
unsigned last_processed_code_event_id_
ProfilerListener *const listener_
V8_INLINE void UpdateCodeMap(InstructionStreamMap *instruction_stream_map)
Address instruction_start
unsigned instruction_size
base::TimeDelta period() const
std::unique_ptr< sampler::Sampler > sampler_
const bool use_precise_sampling_
sampler::Sampler * sampler()
SamplingCircularQueue< TickSampleEventRecord, kTickSampleQueueLength > ticks_buffer_
TickSampleEventRecord()=default
TickSampleEventRecord(unsigned order)
#define DECLARE_TYPE(type, value)
#define VM_EVENTS_TYPE_LIST(V)
#define DECLARE_CLASS(ignore, type)
ProcessorImpl * processor_
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
#define V8_EXPORT_PRIVATE