v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
profile-generator.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <vector>
9
10#include "include/v8-profiler.h"
20
21namespace v8 {
22namespace internal {
23
25 int inlining_id) {
27 DCHECK_GT(line, 0); // The 1-based number of the source line.
28 // It's possible that we map multiple source positions to a pc_offset in
29 // optimized code. Usually these map to the same line, so there is no
30 // difference here as we only store line number and not line/col in the form
31 // of a script offset. Ignore any subsequent sets to the same offset.
32 if (!pc_offsets_to_lines_.empty() &&
33 pc_offsets_to_lines_.back().pc_offset == pc_offset) {
34 return;
35 }
36 // Check that we are inserting in ascending order, so that the vector remains
37 // sorted.
39 pc_offsets_to_lines_.back().pc_offset < pc_offset);
40 if (pc_offsets_to_lines_.empty() ||
41 pc_offsets_to_lines_.back().line_number != line ||
42 pc_offsets_to_lines_.back().inlining_id != inlining_id) {
43 pc_offsets_to_lines_.push_back({pc_offset, line, inlining_id});
44 }
45}
46
48 if (pc_offsets_to_lines_.empty()) {
50 }
51 auto it = std::lower_bound(
53 SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
54 if (it != pc_offsets_to_lines_.begin()) --it;
55 return it->line_number;
56}
57
59 if (pc_offsets_to_lines_.empty()) {
61 }
62 auto it = std::lower_bound(
64 SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
65 if (it != pc_offsets_to_lines_.begin()) --it;
66 return it->inlining_id;
67}
68
70 return sizeof(*this) + pc_offsets_to_lines_.capacity() *
71 sizeof(decltype(pc_offsets_to_lines_)::value_type);
72}
73
75 base::OS::Print(" - source position table at %p\n", this);
76 for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
77 base::OS::Print(" %d --> line_number: %d inlining_id: %d\n",
78 pos_info.pc_offset, pos_info.line_number,
79 pos_info.inlining_id);
80 }
81}
82
83const char* const CodeEntry::kEmptyResourceName = "";
84const char* const CodeEntry::kEmptyBailoutReason = "";
85const char* const CodeEntry::kNoDeoptReason = "";
86
87const char* const CodeEntry::kProgramEntryName = "(program)";
88const char* const CodeEntry::kIdleEntryName = "(idle)";
89const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
90const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
91const char* const CodeEntry::kRootEntryName = "(root)";
92
93// static
102
103// static
112
113// static
123
124// static
133
134// static
143
144uint32_t CodeEntry::GetHash() const {
145 uint32_t hash = 0;
147 hash ^= ComputeUnseededHash(static_cast<uint32_t>(script_id_));
148 hash ^= ComputeUnseededHash(static_cast<uint32_t>(position_));
149 } else {
150 hash ^= ComputeUnseededHash(
151 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
152 hash ^= ComputeUnseededHash(
153 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
155 }
156 return hash;
157}
158
159bool CodeEntry::IsSameFunctionAs(const CodeEntry* entry) const {
160 if (this == entry) return true;
162 return script_id_ == entry->script_id_ && position_ == entry->position_;
163 }
164 return name_ == entry->name_ && resource_name_ == entry->resource_name_ &&
165 line_number_ == entry->line_number_;
166}
167
169 bit_field_ =
170 CodeTagField::update(bit_field_, LogEventListener::CodeTag::kBuiltin);
172}
173
175 if (line_info_) return line_info_->GetSourceLineNumber(pc_offset);
177}
178
180 std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries,
181 std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
182 inline_stacks) {
183 EnsureRareData()->inline_entries_ = std::move(inline_entries);
184 rare_data_->inline_stacks_ = std::move(inline_stacks);
185}
186
187const std::vector<CodeEntryAndLineNumber>* CodeEntry::GetInlineStack(
188 int pc_offset) const {
189 if (!line_info_) return nullptr;
190
191 int inlining_id = line_info_->GetInliningId(pc_offset);
192 if (inlining_id == SourcePosition::kNotInlined) return nullptr;
194
195 auto it = rare_data_->inline_stacks_.find(inlining_id);
196 return it != rare_data_->inline_stacks_.end() ? &it->second : nullptr;
197}
198
200 const char* deopt_reason, int deopt_id,
201 std::vector<CpuProfileDeoptFrame> inlined_frames) {
202 RareData* rare_data = EnsureRareData();
203 rare_data->deopt_reason_ = deopt_reason;
204 rare_data->deopt_id_ = deopt_id;
205 rare_data->deopt_inlined_frames_ = std::move(inlined_frames);
206}
207
209 if (!IsScript(shared->script())) return;
210 Tagged<Script> script = Cast<Script>(shared->script());
211 set_script_id(script->id());
212 set_position(shared->StartPosition());
213 if (shared->optimization_disabled()) {
215 GetBailoutReason(shared->disabled_optimization_reason()));
216 }
217}
218
220 size_t estimated_size = 0;
221 if (rare_data_) {
222 estimated_size += sizeof(rare_data_.get());
223
224 for (const auto& inline_entry : rare_data_->inline_entries_) {
225 estimated_size += inline_entry->EstimatedSize();
226 }
227 estimated_size += rare_data_->inline_entries_.size() *
228 sizeof(decltype(rare_data_->inline_entries_)::value_type);
229
230 for (const auto& inline_stack_pair : rare_data_->inline_stacks_) {
231 estimated_size += inline_stack_pair.second.size() *
232 sizeof(decltype(inline_stack_pair.second)::value_type);
233 }
234 estimated_size +=
235 rare_data_->inline_stacks_.size() *
236 (sizeof(decltype(rare_data_->inline_stacks_)::key_type) +
237 sizeof(decltype(rare_data_->inline_stacks_)::value_type));
238
239 estimated_size +=
240 rare_data_->deopt_inlined_frames_.capacity() *
241 sizeof(decltype(rare_data_->deopt_inlined_frames_)::value_type);
242 }
243
244 if (line_info_) {
245 estimated_size += line_info_->Size();
246 }
247 return sizeof(*this) + estimated_size;
248}
249
252
254 info.deopt_reason = rare_data_->deopt_reason_;
256 if (rare_data_->deopt_inlined_frames_.empty()) {
257 info.stack.push_back(CpuProfileDeoptFrame(
258 {script_id_, static_cast<size_t>(std::max(0, position()))}));
259 } else {
260 info.stack = rare_data_->deopt_inlined_frames_;
261 }
262 return info;
263}
264
266 if (!rare_data_) {
267 rare_data_.reset(new RareData());
268 }
269 return rare_data_.get();
270}
271
273 DCHECK_EQ(ref_count_, 0UL);
274
275 if (name_) {
276 strings.Release(name_);
277 name_ = nullptr;
278 }
279 if (resource_name_) {
280 strings.Release(resource_name_);
281 resource_name_ = nullptr;
282 }
283}
284
285void CodeEntry::print() const {
286 base::OS::Print("CodeEntry: at %p\n", this);
287
288 base::OS::Print(" - name: %s\n", name_);
289 base::OS::Print(" - resource_name: %s\n", resource_name_);
290 base::OS::Print(" - line_number: %d\n", line_number_);
291 base::OS::Print(" - column_number: %d\n", column_number_);
292 base::OS::Print(" - script_id: %d\n", script_id_);
293 base::OS::Print(" - position: %d\n", position_);
294
295 if (line_info_) {
296 line_info_->print();
297 }
298
299 if (rare_data_) {
300 base::OS::Print(" - deopt_reason: %s\n", rare_data_->deopt_reason_);
301 base::OS::Print(" - bailout_reason: %s\n", rare_data_->bailout_reason_);
302 base::OS::Print(" - deopt_id: %d\n", rare_data_->deopt_id_);
303
304 if (!rare_data_->inline_stacks_.empty()) {
305 base::OS::Print(" - inline stacks:\n");
306 for (auto it = rare_data_->inline_stacks_.begin();
307 it != rare_data_->inline_stacks_.end(); it++) {
308 base::OS::Print(" inlining_id: [%d]\n", it->first);
309 for (const auto& e : it->second) {
310 base::OS::Print(" %s --> %d\n", e.code_entry->name(),
311 e.line_number);
312 }
313 }
314 } else {
315 base::OS::Print(" - inline stacks: (empty)\n");
316 }
317
318 if (!rare_data_->deopt_inlined_frames_.empty()) {
319 base::OS::Print(" - deopt inlined frames:\n");
320 for (const CpuProfileDeoptFrame& frame :
321 rare_data_->deopt_inlined_frames_) {
322 base::OS::Print("script_id: %d position: %zu\n", frame.script_id,
323 frame.position);
324 }
325 } else {
326 base::OS::Print(" - deopt inlined frames: (empty)\n");
327 }
328 }
329 base::OS::Print("\n");
330}
331
335
337 // Handle metadata and VM state code entry types.
342 }
345
346 // Otherwise, resolve based on logger tag.
347 switch (entry_->code_tag()) {
348 case LogEventListener::CodeTag::kEval:
349 case LogEventListener::CodeTag::kScript:
350 case LogEventListener::CodeTag::kFunction:
352 case LogEventListener::CodeTag::kBuiltin:
353 case LogEventListener::CodeTag::kHandler:
354 case LogEventListener::CodeTag::kBytecodeHandler:
355 case LogEventListener::CodeTag::kNativeFunction:
356 case LogEventListener::CodeTag::kNativeScript:
358 case LogEventListener::CodeTag::kCallback:
360 case LogEventListener::CodeTag::kRegExp:
361 case LogEventListener::CodeTag::kStub:
362 case LogEventListener::CodeTag::kLength:
364 }
366 UNREACHABLE();
367}
368
373
375 auto map_entry = children_.find({entry, line_number});
376 return map_entry != children_.end() ? map_entry->second : nullptr;
377}
378
380 auto map_entry = children_.find({entry, line_number});
381 if (map_entry == children_.end()) {
382 ProfileNode* node = new ProfileNode(tree_, entry, this, line_number);
384 children_list_.push_back(node);
385 return node;
386 } else {
387 return map_entry->second;
388 }
389}
390
391
393 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
394 // Increment a hit counter of a certain source line.
395 // Add a new source line if not found.
396 auto map_entry = line_ticks_.find(src_line);
397 if (map_entry == line_ticks_.end()) {
398 line_ticks_[src_line] = 1;
399 } else {
400 line_ticks_[src_line]++;
401 }
402}
403
404
406 unsigned int length) const {
407 if (entries == nullptr || length == 0) return false;
408
409 unsigned line_count = static_cast<unsigned>(line_ticks_.size());
410
411 if (line_count == 0) return true;
412 if (length < line_count) return false;
413
415
416 for (auto p = line_ticks_.begin(); p != line_ticks_.end(); p++, entry++) {
417 entry->line = p->first;
418 entry->hit_count = p->second;
419 }
420
421 return true;
422}
423
424void ProfileNode::Print(int indent) const {
426 base::OS::Print("%5u %*s %s:%d %d %d #%d", self_ticks_, indent, "",
428 entry_->script_id(), id());
429 if (entry_->resource_name()[0] != '\0')
430 base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
431 base::OS::Print("\n");
432 for (const CpuProfileDeoptInfo& info : deopt_infos_) {
433 base::OS::Print(
434 "%*s;;; deopted at script_id: %d position: %zu with reason '%s'.\n",
435 indent + 10, "", info.stack[0].script_id, info.stack[0].position,
436 info.deopt_reason);
437 for (size_t index = 1; index < info.stack.size(); ++index) {
438 base::OS::Print("%*s;;; Inline point: script_id %d position: %zu.\n",
439 indent + 10, "", info.stack[index].script_id,
440 info.stack[index].position);
441 }
442 }
443 const char* bailout_reason = entry_->bailout_reason();
444 if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
445 bailout_reason != CodeEntry::kEmptyBailoutReason) {
446 base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
447 bailout_reason);
448 }
449 for (auto child : children_) {
450 child.second->Print(indent + 2);
451 }
452}
453
462
464 : next_node_id_(1),
465 isolate_(isolate),
466 code_entries_(storage),
467 root_(new ProfileNode(this, CodeEntry::root_entry(), nullptr)) {}
468
473
474ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
475 int src_line, bool update_stats) {
476 ProfileNode* node = root_;
477 CodeEntry* last_entry = nullptr;
478 for (auto it = path.rbegin(); it != path.rend(); ++it) {
479 if (*it == nullptr) continue;
480 last_entry = *it;
481 node = node->FindOrAddChild(*it, v8::CpuProfileNode::kNoLineNumberInfo);
482 }
483 if (last_entry && last_entry->has_deopt_info()) {
484 node->CollectDeoptInfo(last_entry);
485 }
486 if (update_stats) {
487 node->IncrementSelfTicks();
489 node->IncrementLineTicks(src_line);
490 }
491 }
492 return node;
493}
494
496 int src_line, bool update_stats,
497 ProfilingMode mode) {
498 ProfileNode* node = root_;
499 CodeEntry* last_entry = nullptr;
500 int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
501 for (auto it = path.rbegin(); it != path.rend(); ++it) {
502 if (it->code_entry == nullptr) continue;
503 last_entry = it->code_entry;
504 node = node->FindOrAddChild(it->code_entry, parent_line_number);
505 parent_line_number = mode == ProfilingMode::kCallerLineNumbers
506 ? it->line_number
508 }
509 if (last_entry && last_entry->has_deopt_info()) {
510 node->CollectDeoptInfo(last_entry);
511 }
512 if (update_stats) {
513 node->IncrementSelfTicks();
515 node->IncrementLineTicks(src_line);
516 }
517 }
518 return node;
519}
520
521class Position {
522 public:
523 explicit Position(ProfileNode* node)
524 : node(node), child_idx_(0) { }
526 return node->children()->at(child_idx_);
527 }
529 return child_idx_ < static_cast<int>(node->children()->size());
530 }
532
534 private:
536};
537
538
539// Non-recursive implementation of a depth-first post-order tree traversal.
540template <typename Callback>
542 std::vector<Position> stack;
543 stack.emplace_back(root_);
544 while (!stack.empty()) {
545 Position& current = stack.back();
546 if (current.has_current_child()) {
547 callback->BeforeTraversingChild(current.node, current.current_child());
548 stack.emplace_back(current.current_child());
549 } else {
550 callback->AfterAllChildrenTraversed(current.node);
551 if (stack.size() > 1) {
552 Position& parent = stack[stack.size() - 2];
553 callback->AfterChildTraversed(parent.node, current.node);
554 parent.next_child();
555 }
556 // Remove child from the stack.
557 stack.pop_back();
558 }
559 }
560}
561
562void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
563 if (native_context_address() != from_address) return;
564
565 set_native_context_address(to_address);
566}
567
569
570std::atomic<ProfilerId> CpuProfilesCollection::last_id_{0};
571
572CpuProfile::CpuProfile(CpuProfiler* profiler, ProfilerId id, const char* title,
573 CpuProfilingOptions options,
574 std::unique_ptr<DiscardedSamplesDelegate> delegate)
575 : title_(title),
576 options_(std::move(options)),
577 delegate_(std::move(delegate)),
578 start_time_(base::TimeTicks::Now()),
579 top_down_(profiler->isolate(), profiler->code_entries()),
580 profiler_(profiler),
581 streaming_next_sample_(0),
582 id_(id) {
583 // The startTime timestamp is not converted to Perfetto's clock domain and
584 // will get out of sync with other timestamps Perfetto knows about, including
585 // the automatic trace event "ts" timestamp. startTime is included for
586 // backward compatibility with the tracing protocol but the value of "ts"
587 // should be used instead (it is recorded nearly immediately after).
588 auto value = TracedValue::Create();
589 value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
591 "Profile", id_, "data", std::move(value));
592
594 if (delegate_) {
595 delegate_->SetId(id_);
596 }
597 if (options_.has_filter_context()) {
598 i::Address raw_filter_context =
599 reinterpret_cast<i::Address>(options_.raw_filter_context());
601 }
602}
603
604bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
605 DCHECK_GE(source_sampling_interval, base::TimeDelta());
606
607 // If the sampling source's sampling interval is 0, record as many samples
608 // are possible irrespective of the profile's sampling interval. Manually
609 // taken samples (via CollectSample) fall into this case as well.
610 if (source_sampling_interval.IsZero()) return true;
611
612 next_sample_delta_ -= source_sampling_interval;
615 base::TimeDelta::FromMicroseconds(options_.sampling_interval_us());
616 return true;
617 }
618 return false;
619}
620
622 const ProfileStackTrace& path, int src_line,
623 bool update_stats, base::TimeDelta sampling_interval,
624 StateTag state_tag,
625 EmbedderStateTag embedder_state_tag,
626 const std::optional<uint64_t> trace_id) {
627 if (!CheckSubsample(sampling_interval)) return;
628 ProfileNode* top_frame_node =
629 top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
630
631 bool is_buffer_full =
633 samples_.size() >= options_.max_samples();
634 bool should_record_sample =
635 !timestamp.IsNull() && timestamp >= start_time_ && !is_buffer_full;
636
637 if (should_record_sample) {
638 samples_.push_back({top_frame_node, timestamp, src_line, state_tag,
639 embedder_state_tag, trace_id});
640 } else if (is_buffer_full && delegate_ != nullptr) {
641 const auto task_runner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
642 reinterpret_cast<v8::Isolate*>(profiler_->isolate()));
643
644 task_runner->PostTask(std::make_unique<CpuProfileMaxSamplesCallbackTask>(
645 std::move(delegate_)));
646 // std::move ensures that the delegate_ will be null on the next sample,
647 // so we don't post a task multiple times.
648 }
649
650 const int kSamplesFlushCount = 100;
651 const int kNodesFlushCount = 10;
652 if (samples_.size() - streaming_next_sample_ >= kSamplesFlushCount ||
653 top_down_.pending_nodes_count() >= kNodesFlushCount) {
655 }
656}
657
658namespace {
659
660void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
661 const CodeEntry* entry = node->entry();
662 value->BeginDictionary("callFrame");
663 value->SetString("functionName", entry->name());
664 if (*entry->resource_name()) {
665 value->SetString("url", entry->resource_name());
666 }
667 value->SetInteger("scriptId", entry->script_id());
668 if (entry->line_number()) {
669 value->SetInteger("lineNumber", entry->line_number() - 1);
670 }
671 if (entry->column_number()) {
672 value->SetInteger("columnNumber", entry->column_number() - 1);
673 }
674 value->SetString("codeType", entry->code_type_string());
675 value->EndDictionary();
676 value->SetInteger("id", node->id());
677 if (node->parent()) {
678 value->SetInteger("parent", node->parent()->id());
679 }
680 const char* deopt_reason = entry->bailout_reason();
681 if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
682 value->SetString("deoptReason", deopt_reason);
683 }
684}
685
686} // namespace
687
689 std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
690 if (pending_nodes.empty() && samples_.empty()) return;
691 auto value = TracedValue::Create();
692
693 if (!pending_nodes.empty() || streaming_next_sample_ != samples_.size()) {
694 value->BeginDictionary("cpuProfile");
695 if (!pending_nodes.empty()) {
696 value->BeginArray("nodes");
697 for (auto node : pending_nodes) {
698 value->BeginDictionary();
699 BuildNodeValue(node, value.get());
700 value->EndDictionary();
701 }
702 value->EndArray();
703 }
704 if (streaming_next_sample_ != samples_.size()) {
705 value->BeginArray("samples");
706 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
707 value->AppendInteger(samples_[i].node->id());
708 }
709 value->EndArray();
710 value->BeginDictionary("trace_ids");
711 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
712 if (!samples_[i].trace_id.has_value()) {
713 continue;
714 }
715 value->SetUnsignedInteger(
716 std::to_string(samples_[i].trace_id.value()).c_str(),
717 samples_[i].node->id());
718 }
719 value->EndDictionary();
720 }
721 value->EndDictionary();
722 }
723 if (streaming_next_sample_ != samples_.size()) {
724 // timeDeltas are computed within CLOCK_MONOTONIC. However, trace event
725 // "ts" timestamps are converted to CLOCK_BOOTTIME by Perfetto. To get
726 // absolute timestamps in CLOCK_BOOTTIME from timeDeltas, add them to
727 // the "ts" timestamp from the initial "Profile" trace event sent by
728 // CpuProfile::CpuProfile().
729 //
730 // Note that if the system is suspended and resumed while samples_ is
731 // captured, timeDeltas derived after resume will not be convertible to
732 // correct CLOCK_BOOTTIME time values (for instance, producing
733 // CLOCK_BOOTTIME time values in the middle of the suspended period).
734 value->BeginArray("timeDeltas");
735 base::TimeTicks lastTimestamp =
737 : start_time();
738 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
739 value->AppendInteger(static_cast<int>(
740 (samples_[i].timestamp - lastTimestamp).InMicroseconds()));
741 lastTimestamp = samples_[i].timestamp;
742 }
743 value->EndArray();
744 bool has_non_zero_lines =
745 std::any_of(samples_.begin() + streaming_next_sample_, samples_.end(),
746 [](const SampleInfo& sample) { return sample.line != 0; });
747 if (has_non_zero_lines) {
748 value->BeginArray("lines");
749 for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
750 value->AppendInteger(samples_[i].line);
751 }
752 value->EndArray();
753 }
755 }
756
758 "ProfileChunk", id_, "data", std::move(value));
759}
760
763 // Stop tracking context movements after profiling stops.
766 auto value = TracedValue::Create();
767 // The endTime timestamp is not converted to Perfetto's clock domain and will
768 // get out of sync with other timestamps Perfetto knows about, including the
769 // automatic trace event "ts" timestamp. endTime is included for backward
770 // compatibility with the tracing protocol: its presence in "data" is used by
771 // devtools to identify the last ProfileChunk but the value of "ts" should be
772 // used instead (it is recorded nearly immediately after).
773 value->SetDouble("endTime", end_time_.since_origin().InMicroseconds());
775 "ProfileChunk", id_, "data", std::move(value));
776}
777
778namespace {
779
780void FlattenNodesTree(const v8::CpuProfileNode* node,
781 std::vector<const v8::CpuProfileNode*>* nodes) {
782 nodes->emplace_back(node);
783 const int childrenCount = node->GetChildrenCount();
784 for (int i = 0; i < childrenCount; i++)
785 FlattenNodesTree(node->GetChild(i), nodes);
786}
787
788} // namespace
789
792 writer_ = new OutputStreamWriter(stream);
794 delete writer_;
795 writer_ = nullptr;
796}
797
799 const v8::CpuProfileNode* node, int lineCount) {
800 std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
801 if (node->GetLineTicks(&entries[0], lineCount)) {
802 for (int i = 0; i < lineCount; i++) {
803 writer_->AddCharacter('{');
804 writer_->AddString("\"line\":");
805 writer_->AddNumber(entries[i].line);
806 writer_->AddString(",\"ticks\":");
807 writer_->AddNumber(entries[i].hit_count);
808 writer_->AddCharacter('}');
809 if (i != (lineCount - 1)) writer_->AddCharacter(',');
810 }
811 }
812}
813
815 const v8::CpuProfileNode* node) {
816 writer_->AddString("\"functionName\":\"");
817 writer_->AddString(node->GetFunctionNameStr());
818 writer_->AddString("\",\"lineNumber\":");
819 writer_->AddNumber(node->GetLineNumber() - 1);
820 writer_->AddString(",\"columnNumber\":");
821 writer_->AddNumber(node->GetColumnNumber() - 1);
822 writer_->AddString(",\"scriptId\":");
823 writer_->AddNumber(node->GetScriptId());
824 writer_->AddString(",\"url\":\"");
825 writer_->AddString(node->GetScriptResourceNameStr());
826 writer_->AddCharacter('"');
827}
828
830 int childrenCount) {
831 for (int i = 0; i < childrenCount; i++) {
832 writer_->AddNumber(node->GetChild(i)->GetNodeId());
833 if (i != (childrenCount - 1)) writer_->AddCharacter(',');
834 }
835}
836
838 writer_->AddCharacter('{');
839 writer_->AddString("\"id\":");
840 writer_->AddNumber(node->GetNodeId());
841
842 writer_->AddString(",\"hitCount\":");
843 writer_->AddNumber(node->GetHitCount());
844
845 writer_->AddString(",\"callFrame\":{");
846 SerializeCallFrame(node);
847 writer_->AddCharacter('}');
848
849 const int childrenCount = node->GetChildrenCount();
850 if (childrenCount) {
851 writer_->AddString(",\"children\":[");
852 SerializeChildren(node, childrenCount);
853 writer_->AddCharacter(']');
854 }
855
856 const char* deoptReason = node->GetBailoutReason();
857 if (deoptReason && deoptReason[0] && strcmp(deoptReason, "no reason")) {
858 writer_->AddString(",\"deoptReason\":\"");
859 writer_->AddString(deoptReason);
860 writer_->AddCharacter('"');
861 }
862
863 unsigned lineCount = node->GetHitLineCount();
864 if (lineCount) {
865 writer_->AddString(",\"positionTicks\":[");
866 SerializePositionTicks(node, lineCount);
867 writer_->AddCharacter(']');
868 }
869 writer_->AddCharacter('}');
870}
871
873 std::vector<const v8::CpuProfileNode*> nodes;
874 FlattenNodesTree(
875 reinterpret_cast<const v8::CpuProfileNode*>(profile_->top_down()->root()),
876 &nodes);
877
878 for (size_t i = 0; i < nodes.size(); i++) {
879 SerializeNode(nodes.at(i));
880 if (writer_->aborted()) return;
881 if (i != (nodes.size() - 1)) writer_->AddCharacter(',');
882 }
883}
884
887 uint64_t lastTime = profile_->start_time().since_origin().InMicroseconds();
888 for (int i = 0; i < count; i++) {
889 uint64_t ts = profile_->sample(i).timestamp.since_origin().InMicroseconds();
890 writer_->AddNumber(static_cast<int>(ts - lastTime));
891 if (i != (count - 1)) writer_->AddString(",");
892 lastTime = ts;
893 }
894}
895
898 for (int i = 0; i < count; i++) {
899 writer_->AddNumber(profile_->sample(i).node->id());
900 if (i != (count - 1)) writer_->AddString(",");
901 }
902}
903
905 writer_->AddCharacter('{');
906 writer_->AddString("\"nodes\":[");
908 writer_->AddString("]");
909
910 writer_->AddString(",\"startTime\":");
911 writer_->AddNumber(static_cast<unsigned>(
913
914 writer_->AddString(",\"endTime\":");
915 writer_->AddNumber(static_cast<unsigned>(
917
918 writer_->AddString(",\"samples\":[");
920 if (writer_->aborted()) return;
921 writer_->AddCharacter(']');
922
923 writer_->AddString(",\"timeDeltas\":[");
925 if (writer_->aborted()) return;
926 writer_->AddString("]");
927
928 writer_->AddCharacter('}');
929 writer_->Finalize();
930}
931
932void CpuProfile::Print() const {
933 base::OS::Print("[Top down]:\n");
937}
938
940 if (entry->is_ref_counted()) entry->AddRef();
941}
942
944 if (entry->is_ref_counted() && entry->DecRef() == 0) {
945 if (entry->rare_data_) {
946 for (auto* inline_entry : entry->rare_data_->inline_entries_) {
947 DecRef(inline_entry);
948 }
949 }
951 delete entry;
952 }
953}
954
956 : code_entries_(storage) {}
957
959
961 for (auto& slot : code_map_) {
962 if (CodeEntry* entry = slot.second.entry) {
963 code_entries_.DecRef(entry);
964 } else {
965 // We expect all entries in the code mapping to contain a CodeEntry.
966 UNREACHABLE();
967 }
968 }
969
970 code_map_.clear();
971}
972
974 unsigned size) {
975 code_map_.emplace(addr, CodeEntryMapInfo{entry, size});
976 entry->set_instruction_start(addr);
977}
978
980 auto range = code_map_.equal_range(entry->instruction_start());
981 for (auto i = range.first; i != range.second; ++i) {
982 if (i->second.entry == entry) {
983 code_entries_.DecRef(entry);
984 code_map_.erase(i);
985 return true;
986 }
987 }
988 return false;
989}
990
992 auto left = code_map_.upper_bound(start);
993 if (left != code_map_.begin()) {
994 --left;
995 if (left->first + left->second.size <= start) ++left;
996 }
997 auto right = left;
998 for (; right != code_map_.end() && right->first < end; ++right) {
999 code_entries_.DecRef(right->second.entry);
1000 }
1001 code_map_.erase(left, right);
1002}
1003
1005 Address* out_instruction_start) {
1006 // Note that an address may correspond to multiple CodeEntry objects. An
1007 // arbitrary selection is made (as per multimap spec) in the event of a
1008 // collision.
1009 auto it = code_map_.upper_bound(addr);
1010 if (it == code_map_.begin()) return nullptr;
1011 --it;
1012 Address start_address = it->first;
1013 Address end_address = start_address + it->second.size;
1014 CodeEntry* ret = addr < end_address ? it->second.entry : nullptr;
1015 DCHECK(!ret || (addr >= start_address && addr < end_address));
1016 if (ret && out_instruction_start) *out_instruction_start = start_address;
1017 return ret;
1018}
1019
1021 if (from == to) return;
1022
1023 auto range = code_map_.equal_range(from);
1024 // Instead of iterating until |range.second|, iterate the number of elements.
1025 // This is because the |range.second| may no longer be the element past the
1026 // end of the equal elements range after insertions.
1027 size_t distance = std::distance(range.first, range.second);
1028 auto it = range.first;
1029 while (distance--) {
1030 CodeEntryMapInfo& info = it->second;
1031 DCHECK(info.entry);
1032 DCHECK_EQ(info.entry->instruction_start(), from);
1033 info.entry->set_instruction_start(to);
1034
1035 DCHECK(from + info.size <= to || to + info.size <= from);
1036 code_map_.emplace(to, info);
1037 it++;
1038 }
1039
1040 code_map_.erase(range.first, it);
1041}
1042
1044 for (const auto& pair : code_map_) {
1045 base::OS::Print("%p %5d %s\n", reinterpret_cast<void*>(pair.first),
1046 pair.second.size, pair.second.entry->name());
1047 }
1048}
1049
1051 size_t map_size = 0;
1052 for (const auto& pair : code_map_) {
1053 map_size += sizeof(pair.first) + sizeof(pair.second) +
1054 pair.second.entry->EstimatedSize();
1055 }
1056 return sizeof(*this) + map_size;
1057}
1058
1060 : profiler_(nullptr), current_profiles_mutex_(), isolate_(isolate) {
1061 USE(isolate_);
1062}
1063
1068
1070 const char* title, CpuProfilingOptions options,
1071 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
1072 return StartProfiling(++last_id_, title, std::move(options),
1073 std::move(delegate));
1074}
1075
1077 ProfilerId id, const char* title, CpuProfilingOptions options,
1078 std::unique_ptr<DiscardedSamplesDelegate> delegate) {
1080 if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
1081 return {
1082 0,
1084 };
1085 }
1086
1087 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1088 if ((profile->title() != nullptr && title != nullptr &&
1089 strcmp(profile->title(), title) == 0) ||
1090 profile->id() == id) {
1091 // Ignore attempts to start profile with the same title or id
1092 // ... though return kAlreadyStarted to force it collect a sample.
1093 return {
1094 profile->id(),
1096 };
1097 }
1098 }
1099
1100 CpuProfile* profile = new CpuProfile(profiler_, id, title, std::move(options),
1101 std::move(delegate));
1102 current_profiles_.emplace_back(profile);
1103
1104 return {
1105 profile->id(),
1107 };
1108}
1109
1112 CpuProfile* profile = nullptr;
1113
1114 auto it = std::find_if(
1115 current_profiles_.rbegin(), current_profiles_.rend(),
1116 [=](const std::unique_ptr<CpuProfile>& p) { return id == p->id(); });
1117
1118 if (it != current_profiles_.rend()) {
1119 (*it)->FinishProfile();
1120 profile = it->get();
1121 finished_profiles_.push_back(std::move(*it));
1122 // Convert reverse iterator to matching forward iterator.
1123 current_profiles_.erase(--(it.base()));
1124 }
1125 return profile;
1126}
1127
1129 if (title == nullptr) return nullptr;
1130 // http://crbug/51594, edge case console.profile may provide an empty title
1131 // and must not crash
1132 const bool empty_title = title[0] == '\0';
1134 auto it = std::find_if(
1135 current_profiles_.rbegin(), current_profiles_.rend(),
1136 [&](const std::unique_ptr<CpuProfile>& p) {
1137 return (empty_title ||
1138 (p->title() != nullptr && strcmp(p->title(), title) == 0));
1139 });
1140 if (it != current_profiles_.rend()) return it->get();
1141 return nullptr;
1142}
1143
1146 if (current_profiles_.size() != 1) return false;
1147 return id == current_profiles_[0]->id();
1148}
1149
1151 // Called from VM thread for a completed profile.
1153 auto pos =
1154 std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
1155 [&](const std::unique_ptr<CpuProfile>& finished_profile) {
1156 return finished_profile.get() == profile;
1157 });
1158 DCHECK(pos != finished_profiles_.end());
1159 finished_profiles_.erase(pos);
1160}
1161
1162namespace {
1163
1164int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
1165 return b ? GreatestCommonDivisor(b, a % b) : a;
1166}
1167
1168} // namespace
1169
1172
1173 int64_t base_sampling_interval_us =
1175 if (base_sampling_interval_us == 0) return base::TimeDelta();
1176
1177 int64_t interval_us = 0;
1178 {
1180 for (const auto& profile : current_profiles_) {
1181 // Snap the profile's requested sampling interval to the next multiple of
1182 // the base sampling interval.
1183 int64_t profile_interval_us =
1184 std::max<int64_t>((profile->sampling_interval_us() +
1185 base_sampling_interval_us - 1) /
1186 base_sampling_interval_us,
1187 1) *
1188 base_sampling_interval_us;
1189 interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
1190 }
1191 }
1192 return base::TimeDelta::FromMicroseconds(interval_us);
1193}
1194
1196 base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
1197 bool update_stats, base::TimeDelta sampling_interval, StateTag state,
1198 EmbedderStateTag embedder_state_tag, Address native_context_address,
1199 Address embedder_native_context_address,
1200 const std::optional<uint64_t> trace_id) {
1201 // As starting / stopping profiles is rare relatively to this
1202 // method, we don't bother minimizing the duration of lock holding,
1203 // e.g. copying contents of the list to a local vector.
1204 const ProfileStackTrace empty_path;
1206 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1207 ContextFilter& context_filter = profile->context_filter();
1208 // If the context filter check failed, omit the contents of the stack.
1209 bool accepts_context = context_filter.Accept(native_context_address);
1210 bool accepts_embedder_context =
1211 context_filter.Accept(embedder_native_context_address);
1212
1213 // if FilterContext is set, do not propagate StateTag if not accepted.
1214 // GC (and LOGGING when during GC) is the exception, because native context
1215 // address can be empty but we still want to know that this is GC.
1216 if (!accepts_context && state != StateTag::GC &&
1217 state != StateTag::LOGGING) {
1218 state = StateTag::IDLE;
1219 }
1220 profile->AddPath(
1221 timestamp, accepts_context ? path : empty_path, src_line, update_stats,
1222 sampling_interval, state,
1223 accepts_embedder_context ? embedder_state_tag : EmbedderStateTag::EMPTY,
1224 trace_id);
1225 }
1226}
1227
1229 Address from, Address to) {
1231 for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
1232 profile->context_filter().OnMoveEvent(from, to);
1233 }
1234}
1235
1236} // namespace internal
1237} // namespace v8
Isolate * isolate_
SourcePosition pos
static const int kNoColumnNumberInfo
static const int kNoLineNumberInfo
static const unsigned kNoSampleLimit
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate)
void PostTask(std::unique_ptr< Task > task, const SourceLocation &location=SourceLocation::Current())
Definition v8-platform.h:82
static const int kNoScriptId
Definition v8-script.h:91
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
static constexpr TimeDelta FromMicroseconds(int64_t microseconds)
Definition time.h:87
constexpr bool IsZero() const
Definition time.h:113
int64_t InMicroseconds() const
Definition time.cc:251
static TimeTicks Now()
Definition time.cc:736
constexpr bool IsNull() const
Definition time.h:265
constexpr TimeDelta since_origin() const
Definition time.h:295
static const char *const kNoDeoptReason
LogEventListener::CodeTag code_tag() const
static V8_EXPORT_PRIVATE const char *const kUnresolvedFunctionName
bool IsSameFunctionAs(const CodeEntry *entry) const
std::atomic< std::size_t > ref_count_
void FillFunctionInfo(Tagged< SharedFunctionInfo > shared)
const char * bailout_reason() const
void set_script_id(int script_id)
std::unique_ptr< RareData > rare_data_
static V8_EXPORT_PRIVATE const char *const kIdleEntryName
void SetInlineStacks(std::unordered_set< CodeEntry *, Hasher, Equals > inline_entries, std::unordered_map< int, std::vector< CodeEntryAndLineNumber > > inline_stacks)
static V8_EXPORT_PRIVATE CodeEntry * gc_entry()
static V8_EXPORT_PRIVATE const char *const kGarbageCollectorEntryName
void set_bailout_reason(const char *bailout_reason)
CpuProfileDeoptInfo GetDeoptInfo()
const char * name() const
static V8_EXPORT_PRIVATE CodeEntry * root_entry()
const std::vector< CodeEntryAndLineNumber > * GetInlineStack(int pc_offset) const
std::unique_ptr< SourcePositionTable > line_info_
void set_position(int position)
void ReleaseStrings(StringsStorage &strings)
static V8_EXPORT_PRIVATE const char *const kEmptyResourceName
static V8_EXPORT_PRIVATE CodeEntry * program_entry()
const char * code_type_string() const
static const char *const kEmptyBailoutReason
void set_instruction_start(Address address)
static V8_EXPORT_PRIVATE CodeEntry * idle_entry()
int GetSourceLine(int pc_offset) const
static V8_EXPORT_PRIVATE const char *const kRootEntryName
void set_deopt_info(const char *deopt_reason, int deopt_id, std::vector< CpuProfileDeoptFrame > inlined_frames)
static V8_EXPORT_PRIVATE CodeEntry * unresolved_entry()
const char * resource_name() const
static V8_EXPORT_PRIVATE const char *const kProgramEntryName
Address instruction_start() const
void set_native_context_address(Address address)
bool Accept(Address native_context_address) const
void OnMoveEvent(Address from_address, Address to_address)
void SerializeNode(const v8::CpuProfileNode *node)
void SerializeChildren(const v8::CpuProfileNode *node, int childrenCount)
void Serialize(v8::OutputStream *stream)
void SerializeCallFrame(const v8::CpuProfileNode *node)
void SerializePositionTicks(const v8::CpuProfileNode *node, int lineCount)
const ProfileTree * top_down() const
std::unique_ptr< DiscardedSamplesDelegate > delegate_
const CpuProfilingOptions options_
base::TimeTicks start_time() const
void AddPath(base::TimeTicks timestamp, const ProfileStackTrace &path, int src_line, bool update_stats, base::TimeDelta sampling_interval, StateTag state, EmbedderStateTag embedder_state, const std::optional< uint64_t > trace_id=std::nullopt)
std::deque< SampleInfo > samples_
V8_EXPORT_PRIVATE bool CheckSubsample(base::TimeDelta sampling_interval)
base::TimeTicks end_time() const
const SampleInfo & sample(int index) const
V8_EXPORT_PRIVATE CpuProfile(CpuProfiler *profiler, ProfilerId id, const char *title, CpuProfilingOptions options, std::unique_ptr< DiscardedSamplesDelegate > delegate=nullptr)
V8_EXPORT_PRIVATE void Print() const
base::TimeDelta next_sample_delta_
CpuProfiler *const profiler_
Isolate * isolate() const
base::TimeDelta sampling_interval() const
CpuProfilingResult StartProfilingForTesting(ProfilerId id)
void RemoveProfile(CpuProfile *profile)
CpuProfile * StopProfiling(ProfilerId id)
std::vector< std::unique_ptr< CpuProfile > > finished_profiles_
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to)
CpuProfile * Lookup(const char *title)
CpuProfilingResult StartProfiling(const char *title=nullptr, CpuProfilingOptions options={}, std::unique_ptr< DiscardedSamplesDelegate > delegate=nullptr)
static std::atomic< ProfilerId > last_id_
std::vector< std::unique_ptr< CpuProfile > > current_profiles_
void AddPathToCurrentProfiles(base::TimeTicks timestamp, const ProfileStackTrace &path, int src_line, bool update_stats, base::TimeDelta sampling_interval, StateTag state, EmbedderStateTag embedder_state_tag, Address native_context_address=kNullAddress, Address native_embedder_context_address=kNullAddress, const std::optional< uint64_t > trace_id=std::nullopt)
void AfterChildTraversed(ProfileNode *, ProfileNode *)
void AfterAllChildrenTraversed(ProfileNode *node)
void BeforeTraversingChild(ProfileNode *, ProfileNode *)
void MoveCode(Address from, Address to)
std::multimap< Address, CodeEntryMapInfo > code_map_
void AddCode(Address addr, CodeEntry *entry, unsigned size)
InstructionStreamMap(CodeEntryStorage &storage)
CodeEntry * FindEntry(Address addr, Address *out_instruction_start=nullptr)
void ClearCodesInRange(Address start, Address end)
ThreadId thread_id() const
Definition isolate.h:821
V8_INLINE ProfileNode * current_child()
Position(ProfileNode *node)
V8_INLINE bool has_current_child()
std::vector< CpuProfileDeoptInfo > deopt_infos_
void Print(int indent) const
std::unordered_map< int, int > line_ticks_
const std::vector< ProfileNode * > * children() const
ProfileNode * FindOrAddChild(CodeEntry *entry, int line_number=0)
void IncrementLineTicks(int src_line)
ProfileNode(ProfileTree *tree, CodeEntry *entry, ProfileNode *parent, int line_number=0)
CpuProfileNode::SourceType source_type() const
bool GetLineTicks(v8::CpuProfileNode::LineTick *entries, unsigned int length) const
ProfileNode * FindChild(CodeEntry *entry, int line_number=v8::CpuProfileNode::kNoLineNumberInfo)
std::unordered_map< CodeEntryAndLineNumber, ProfileNode *, Hasher, Equals > children_
std::vector< ProfileNode * > children_list_
void CollectDeoptInfo(CodeEntry *entry)
CodeEntryStorage * code_entries()
ProfileNode * AddPathFromEnd(const std::vector< CodeEntry * > &path, int src_line=v8::CpuProfileNode::kNoLineNumberInfo, bool update_stats=true)
void TraverseDepthFirst(Callback *callback)
std::vector< const ProfileNode * > TakePendingNodes()
ProfileTree(Isolate *isolate, CodeEntryStorage *storage=nullptr)
static ProfilerStats * Instance()
int GetInliningId(int pc_offset) const
int GetSourceLineNumber(int pc_offset) const
void SetPosition(int pc_offset, int line, int inlining_id)
std::vector< SourcePositionTuple > pc_offsets_to_lines_
static ThreadId Current()
Definition thread-id.h:32
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static std::unique_ptr< TracedValue > Create()
int start
uint32_t count
Handle< SharedFunctionInfo > info
int end
GCOptions options_
TNode< Object > callback
Node * node
ZoneStack< RpoNumber > & stack
ZoneVector< Entry > entries
int pc_offset
base::TimeTicks start_time_
STL namespace.
uint32_t ComputeUnseededHash(uint32_t key)
Definition utils.h:271
const char * GetBailoutReason(BailoutReason reason)
static constexpr Address kNullAddress
Definition v8-internal.h:53
std::vector< CodeEntryAndLineNumber > ProfileStackTrace
constexpr int kNoDeoptimizationId
Definition globals.h:861
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
CpuProfilingMode
uint32_t ProfilerId
Definition v8-profiler.h:32
StateTag
Definition v8-unwinder.h:36
@ GC
Definition v8-unwinder.h:38
@ LOGGING
Definition v8-unwinder.h:46
@ IDLE
Definition v8-unwinder.h:45
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
const char * deopt_reason
Definition v8-profiler.h:53
std::vector< CpuProfileDeoptFrame > deopt_inlined_frames_
std::unordered_set< CodeEntry *, Hasher, Equals > inline_entries_
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name, arg1_val)
#define V8_INLINE
Definition v8config.h:500