v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
sampling-heap-profiler.h
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
6#define V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
7
8#include <deque>
9#include <map>
10#include <memory>
11#include <unordered_map>
12
13#include "include/v8-profiler.h"
14#include "src/heap/heap.h"
16
17namespace v8 {
18
19namespace base {
20class RandomNumberGenerator;
21} // namespace base
22
23namespace internal {
24
26 public:
27 AllocationProfile() = default;
30
32 return nodes_.size() == 0 ? nullptr : &nodes_.front();
33 }
34
35 const std::vector<v8::AllocationProfile::Sample>& GetSamples() override {
36 return samples_;
37 }
38
39 private:
40 std::deque<v8::AllocationProfile::Node> nodes_;
41 std::vector<v8::AllocationProfile::Sample> samples_;
42
44};
45
47 public:
49 public:
50 using FunctionId = uint64_t;
51 AllocationNode(AllocationNode* parent, const char* name, int script_id,
52 int start_position, uint32_t id)
53 : parent_(parent),
54 script_id_(script_id),
55 script_position_(start_position),
56 name_(name),
57 id_(id) {}
60
62 auto it = children_.find(id);
63 return it != children_.end() ? it->second.get() : nullptr;
64 }
65
67 std::unique_ptr<AllocationNode> node) {
68 return children_.emplace(id, std::move(node)).first->second.get();
69 }
70
71 static FunctionId function_id(int script_id, int start_position,
72 const char* name) {
73 // script_id == kNoScriptId case:
74 // Use function name pointer as an id. Names derived from VM state
75 // must not collide with the builtin names. The least significant bit
76 // of the id is set to 1.
77 if (script_id == v8::UnboundScript::kNoScriptId) {
78 return reinterpret_cast<intptr_t>(name) | 1;
79 }
80 // script_id != kNoScriptId case:
81 // Use script_id, start_position pair to uniquelly identify the node.
82 // The least significant bit of the id is set to 0.
83 DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
84 return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
85 }
86
87 private:
88 // TODO(alph): make use of unordered_map's here. Pay attention to
89 // iterator invalidation during TranslateAllocationNode.
90 std::map<size_t, unsigned int> allocations_;
91 std::map<FunctionId, std::unique_ptr<AllocationNode>> children_;
93 const int script_id_;
95 const char* const name_;
96 uint32_t id_;
97 bool pinned_ = false;
98
100 };
101
102 struct Sample {
104 SamplingHeapProfiler* profiler_, uint64_t sample_id)
105 : size(size_),
106 owner(owner_),
107 global(reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_),
108 profiler(profiler_),
110 Sample(const Sample&) = delete;
111 Sample& operator=(const Sample&) = delete;
112 const size_t size;
116 const uint64_t sample_id;
117 };
118
119 SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
120 int stack_depth, v8::HeapProfiler::SamplingFlags flags);
124
126 StringsStorage* names() const { return names_; }
127
128 private:
130 public:
131 Observer(Heap* heap, intptr_t step_size, uint64_t rate,
132 SamplingHeapProfiler* profiler,
134 : AllocationObserver(step_size),
135 profiler_(profiler),
136 heap_(heap),
137 random_(random),
138 rate_(rate) {}
139
140 protected:
141 void Step(int bytes_allocated, Address soon_object, size_t size) override {
142 USE(heap_);
143 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
144 if (soon_object) {
145 // TODO(ofrobots): it would be better to sample the next object rather
146 // than skipping this sample epoch if soon_object happens to be null.
147 profiler_->SampleObject(soon_object, size);
148 }
149 }
150
151 intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
152
153 private:
154 intptr_t GetNextSampleInterval(uint64_t rate);
156 Heap* const heap_;
158 uint64_t const rate_;
159 };
160
161 void SampleObject(Address soon_object, size_t size);
162
163 const std::vector<v8::AllocationProfile::Sample> BuildSamples() const;
164
165 AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
166 int script_id, int start_position);
167 static void OnWeakCallback(const WeakCallbackInfo<Sample>& data);
168
169 uint32_t next_node_id() { return ++last_node_id_; }
170 uint64_t next_sample_id() { return ++last_sample_id_; }
171
172 // Methods that construct v8::AllocationProfile.
173
174 // Translates the provided AllocationNode *node* returning an equivalent
175 // AllocationProfile::Node. The newly created AllocationProfile::Node is added
176 // to the provided AllocationProfile *profile*. Line numbers, column numbers,
177 // and script names are resolved using *scripts* which maps all currently
178 // loaded scripts keyed by their script id.
181 const std::map<int, Handle<Script>>& scripts);
183 unsigned int count) const;
184 AllocationNode* AddStack();
185
187 Heap* const heap_;
188 uint64_t last_sample_id_ = 0;
189 uint32_t last_node_id_ = 0;
193 std::unordered_map<Sample*, std::unique_ptr<Sample>> samples_;
194 const int stack_depth_;
195 const uint64_t rate_;
197};
198
199} // namespace internal
200} // namespace v8
201
202#endif // V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
static const int kNoScriptId
Definition v8-script.h:91
std::vector< v8::AllocationProfile::Sample > samples_
AllocationProfile(const AllocationProfile &)=delete
std::deque< v8::AllocationProfile::Node > nodes_
AllocationProfile & operator=(const AllocationProfile &)=delete
const std::vector< v8::AllocationProfile::Sample > & GetSamples() override
v8::AllocationProfile::Node * GetRootNode() override
HeapState gc_state() const
Definition heap.h:521
AllocationNode & operator=(const AllocationNode &)=delete
AllocationNode(const AllocationNode &)=delete
AllocationNode(AllocationNode *parent, const char *name, int script_id, int start_position, uint32_t id)
std::map< FunctionId, std::unique_ptr< AllocationNode > > children_
static FunctionId function_id(int script_id, int start_position, const char *name)
AllocationNode * AddChildNode(FunctionId id, std::unique_ptr< AllocationNode > node)
Observer(Heap *heap, intptr_t step_size, uint64_t rate, SamplingHeapProfiler *profiler, base::RandomNumberGenerator *random)
void Step(int bytes_allocated, Address soon_object, size_t size) override
v8::HeapProfiler::SamplingFlags flags_
const std::vector< v8::AllocationProfile::Sample > BuildSamples() const
v8::AllocationProfile::Allocation ScaleSample(size_t size, unsigned int count) const
void SampleObject(Address soon_object, size_t size)
SamplingHeapProfiler & operator=(const SamplingHeapProfiler &)=delete
v8::AllocationProfile * GetAllocationProfile()
static void OnWeakCallback(const WeakCallbackInfo< Sample > &data)
SamplingHeapProfiler(Heap *heap, StringsStorage *names, uint64_t rate, int stack_depth, v8::HeapProfiler::SamplingFlags flags)
std::unordered_map< Sample *, std::unique_ptr< Sample > > samples_
AllocationNode * FindOrAddChildNode(AllocationNode *parent, const char *name, int script_id, int start_position)
v8::AllocationProfile::Node * TranslateAllocationNode(AllocationProfile *profile, SamplingHeapProfiler::AllocationNode *node, const std::map< int, Handle< Script > > &scripts)
SamplingHeapProfiler(const SamplingHeapProfiler &)=delete
const int size_
Definition assembler.cc:132
const MapRef owner_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in name
Definition flags.cc:2086
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation nullptr
Definition flags.cc:1263
#define DCHECK(condition)
Definition logging.h:482
#define USE(...)
Definition macros.h:293
Sample & operator=(const Sample &)=delete
Sample(size_t size_, AllocationNode *owner_, Local< Value > local_, SamplingHeapProfiler *profiler_, uint64_t sample_id)