v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
basic-block-instrumentor.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <sstream>
8
13#include "src/compiler/node.h"
21
22namespace v8 {
23namespace internal {
24namespace compiler {
25
26// Find the first place to insert new nodes in a block that's already been
27// scheduled that won't upset the register allocator.
29 NodeVector::iterator i = block->begin();
30 for (; i != block->end(); ++i) {
31 const Operator* op = (*i)->op();
33 switch (op->opcode()) {
34 case IrOpcode::kParameter:
35 case IrOpcode::kPhi:
36 case IrOpcode::kEffectPhi:
37 continue;
38 }
39 break;
40 }
41 return i;
42}
43
45 intptr_t value) {
46 return kSystemPointerSize == 8
47 ? common->Int64Constant(value)
48 : common->Int32Constant(static_cast<int32_t>(value));
49}
50
51// TODO(dcarney): need to mark code as non-serializable.
53 const void* ptr) {
54 intptr_t ptr_as_int = reinterpret_cast<intptr_t>(ptr);
55 return IntPtrConstant(common, ptr_as_int);
56}
57
60 Isolate* isolate) {
61 // Basic block profiling disables concurrent compilation, so handle deref is
62 // fine.
63 AllowHandleDereference allow_handle_dereference;
64 // Skip the exit block in profiles, since the register allocator can't handle
65 // it and entry into it means falling off the end of the function anyway.
66 size_t n_blocks = schedule->RpoBlockCount();
68 // Set the function name.
69 data->SetFunctionName(info->GetDebugName());
70 // Capture the schedule string before instrumentation.
71 if (v8_flags.turbo_profiling_verbose) {
72 std::ostringstream os;
73 os << *schedule;
74 data->SetSchedule(os);
75 }
76 // Check whether we should write counts to a JS heap object or to the
77 // BasicBlockProfilerData directly. The JS heap object is only used for
78 // builtins.
79 bool on_heap_counters = isolate && isolate->IsGeneratingEmbeddedBuiltins();
80 // Add the increment instructions to the start of every block.
81 CommonOperatorBuilder common(graph->zone());
82 MachineOperatorBuilder machine(graph->zone());
83 Node* counters_array = nullptr;
84 if (on_heap_counters) {
85 // Allocation is disallowed here, so rather than referring to an actual
86 // counters array, create a reference to a special marker object. This
87 // object will get fixed up later in the constants table (see
88 // PatchBasicBlockCountersReference). An important and subtle point: we
89 // cannot use the root handle basic_block_counters_marker_handle() and must
90 // create a new separate handle. Otherwise
91 // MacroAssemblerBase::IndirectLoadConstant would helpfully emit a
92 // root-relative load rather than putting this value in the constants table
93 // where we expect it to be for patching.
94 counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
95 ReadOnlyRoots(isolate).basic_block_counters_marker(), isolate)));
96 } else {
97 counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
98 }
99 Node* zero = graph->NewNode(common.Int32Constant(0));
100 Node* one = graph->NewNode(common.Int32Constant(1));
101 BasicBlockVector* blocks = schedule->rpo_order();
102 size_t block_number = 0;
103 for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
104 ++it, ++block_number) {
105 BasicBlock* block = (*it);
106 if (block == schedule->end()) continue;
107 // Iteration is already in reverse post-order.
108 DCHECK_EQ(block->rpo_number(), block_number);
109 data->SetBlockId(block_number, block->id().ToInt());
110 // It is unnecessary to wire effect and control deps for load and store
111 // since this happens after scheduling.
112 // Construct increment operation.
113 int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
114 if (on_heap_counters) {
115 offset_to_counter_value +=
117 }
118 Node* offset_to_counter =
119 graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
120 Node* load =
121 graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
122 offset_to_counter, graph->start(), graph->start());
123 Node* inc = graph->NewNode(machine.Int32Add(), load, one);
124
125 // Branchless saturation, because we've already run the scheduler, so
126 // introducing extra control flow here would be surprising.
127 Node* overflow = graph->NewNode(machine.Uint32LessThan(), inc, load);
128 Node* overflow_mask = graph->NewNode(machine.Int32Sub(), zero, overflow);
129 Node* saturated_inc =
130 graph->NewNode(machine.Word32Or(), inc, overflow_mask);
131
132 Node* store =
133 graph->NewNode(machine.Store(StoreRepresentation(
135 counters_array, offset_to_counter, saturated_inc,
136 graph->start(), graph->start());
137 // Insert the new nodes.
138 static const int kArraySize = 10;
139 Node* to_insert[kArraySize] = {
140 counters_array, zero, one, offset_to_counter,
141 load, inc, overflow, overflow_mask,
142 saturated_inc, store};
143 // The first three Nodes are constant across all blocks.
144 int insertion_start = block_number == 0 ? 0 : 3;
145 NodeVector::iterator insertion_point = FindInsertionPoint(block);
146 block->InsertNodes(insertion_point, &to_insert[insertion_start],
147 &to_insert[kArraySize]);
148 // Tell the scheduler about the new nodes.
149 for (int i = insertion_start; i < kArraySize; ++i) {
150 schedule->SetBlockForNode(block, to_insert[i]);
151 }
152 // The exit block is not instrumented and so we must ignore that block
153 // count.
154 if (block->control() == BasicBlock::kBranch &&
155 block->successors()[0] != schedule->end() &&
156 block->successors()[1] != schedule->end()) {
157 data->AddBranch(block->successors()[0]->id().ToInt(),
158 block->successors()[1]->id().ToInt());
159 }
160 }
161 return data;
162}
163
164namespace {
165
166void StoreBuiltinCallForNode(Node* n, Builtin builtin, int block_id,
167 BuiltinsCallGraph* bcc_profiler) {
168 if (n == nullptr) return;
169 IrOpcode::Value opcode = n->opcode();
170 if (opcode == IrOpcode::kCall || opcode == IrOpcode::kTailCall) {
171 const CallDescriptor* des = CallDescriptorOf(n->op());
173 Node* callee = n->InputAt(0);
174 Operator* op = const_cast<Operator*>(callee->op());
175 if (op->opcode() == IrOpcode::kHeapConstant) {
178 if (IsCode(*para)) {
179 DirectHandle<Code> code = Cast<Code>(para);
180 if (code->is_builtin()) {
181 bcc_profiler->AddBuiltinCall(builtin, code->builtin_id(), block_id);
182 return;
183 }
184 }
185 }
186 }
187 }
188}
189
190} // namespace
191
194 CHECK(Builtins::IsBuiltinId(info->builtin()));
195 BasicBlockVector* blocks = schedule->rpo_order();
196 size_t block_number = 0;
197 size_t n_blocks = schedule->RpoBlockCount();
198 for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
199 ++it, ++block_number) {
200 BasicBlock* block = (*it);
201 if (block == schedule->end()) continue;
202 // Iteration is already in reverse post-order.
203 DCHECK_EQ(block->rpo_number(), block_number);
204 int block_id = block->id().ToInt();
205
207
208 for (Node* node : *block) {
209 StoreBuiltinCallForNode(node, info->builtin(), block_id, profiler);
210 }
211
212 BasicBlock::Control control = block->control();
213 if (control != BasicBlock::kNone) {
214 Node* cnt_node = block->control_input();
215 StoreBuiltinCallForNode(cnt_node, info->builtin(), block_id, profiler);
216 }
217 }
218}
219
221 const turboshaft::Graph& graph, Builtin* called_builtin) {
222 using namespace turboshaft; // NOLINT(build/namespaces)
223 DCHECK_NOT_NULL(called_builtin);
224 const TSCallDescriptor* ts_descriptor;
225 V<CallTarget> callee_index;
226 if (const auto* call_op = op.TryCast<CallOp>()) {
227 ts_descriptor = call_op->descriptor;
228 callee_index = call_op->callee();
229 } else if (const auto* tail_call_op = op.TryCast<TailCallOp>()) {
230 ts_descriptor = tail_call_op->descriptor;
231 callee_index = tail_call_op->callee();
232 } else {
233 return false;
234 }
235
236 DCHECK_NOT_NULL(ts_descriptor);
237 if (ts_descriptor->descriptor->kind() != CallDescriptor::kCallCodeObject) {
238 return false;
239 }
240
241 OperationMatcher matcher(graph);
242 Handle<HeapObject> heap_constant;
243 if (!matcher.MatchHeapConstant(callee_index, &heap_constant)) return false;
244 if (!IsCode(*heap_constant)) return false;
245 DirectHandle<Code> code = Cast<Code>(heap_constant);
246 if (!code->is_builtin()) return false;
247
248 *called_builtin = code->builtin_id();
249 return true;
250}
251
253 OptimizedCompilationInfo* info, const turboshaft::Graph& graph) {
254 using namespace turboshaft; // NOLINT(build/namespaces)
255 CHECK(Builtins::IsBuiltinId(info->builtin()));
257
258 for (const Block* block : graph.blocks_vector()) {
259 const int block_id = block->index().id();
260 for (const auto& op : graph.operations(*block)) {
261 Builtin called_builtin;
262 if (IsBuiltinCall(op, graph, &called_builtin)) {
263 profiler->AddBuiltinCall(info->builtin(), called_builtin, block_id);
264 }
265 }
266 }
267}
268
269} // namespace compiler
270} // namespace internal
271} // namespace v8
Schedule * schedule
#define one
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
void SetFunctionName(std::unique_ptr< char[]> name)
static V8_EXPORT_PRIVATE BasicBlockProfiler * Get()
BasicBlockProfilerData * NewData(size_t n_blocks)
void AddBuiltinCall(Builtin caller, Builtin callee, int32_t block_id)
static BuiltinsCallGraph * Get()
static constexpr bool IsBuiltinId(Builtin builtin)
Definition builtins.h:128
static constexpr MachineType Uint32()
static void StoreCallGraph(OptimizedCompilationInfo *info, Schedule *schedule)
static BasicBlockProfilerData * Instrument(OptimizedCompilationInfo *info, TFGraph *graph, Schedule *schedule, Isolate *isolate)
const Operator * HeapConstant(const Handle< HeapObject > &)
const Operator * op() const
Definition node.h:50
Node * InputAt(int index) const
Definition node.h:70
static bool IsBasicBlockBegin(const Operator *op)
constexpr Opcode opcode() const
Definition operator.h:75
bool MatchHeapConstant(V< Any > matched, Handle< HeapObject > *tagged=nullptr) const
static NodeVector::iterator FindInsertionPoint(BasicBlock *block)
static const Operator * PointerConstant(CommonOperatorBuilder *common, const void *ptr)
CallDescriptor const * CallDescriptorOf(const Operator *const op)
bool IsBuiltinCall(const turboshaft::Operation &op, const turboshaft::Graph &graph, Builtin *called_builtin)
T const & OpParameter(const Operator *op)
Definition operator.h:214
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kInt32Size
Definition globals.h:401
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
#define OFFSET_OF_DATA_START(Type)