v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
sampler.cc
Go to the documentation of this file.
1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
10
11#ifdef USE_SIGNALS
12
13#include <errno.h>
14#include <pthread.h>
15#include <signal.h>
16#include <sys/time.h>
17#include <atomic>
18
19#if !V8_OS_QNX && !V8_OS_AIX && !V8_OS_ZOS
20#include <sys/syscall.h>
21#endif
22
23#if V8_OS_AIX || V8_TARGET_ARCH_S390X
24
26
27#elif V8_OS_DARWIN
28#include <mach/mach.h>
29// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
30// and is a typedef for struct sigcontext. There is no uc_mcontext.
31#elif !V8_OS_OPENBSD
32#include <ucontext.h>
33#endif
34
35#include <unistd.h>
36
37#elif V8_OS_WIN || V8_OS_CYGWIN
38
39#include <windows.h>
40
41#elif V8_OS_FUCHSIA
42
43#include <zircon/process.h>
44#include <zircon/syscalls.h>
45#include <zircon/syscalls/debug.h>
46#include <zircon/types.h>
47
48// TODO(wez): Remove this once the Fuchsia SDK has rolled.
49#if defined(ZX_THREAD_STATE_REGSET0)
50#define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
51zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
52 uint32_t dummy_out_len = 0;
53 return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
54 &dummy_out_len);
55}
56#if defined(__x86_64__)
57using zx_thread_state_general_regs_t = zx_x86_64_general_regs_t;
58#else
59using zx_thread_state_general_regs_t = zx_arm64_general_regs_t;
60#endif
61#endif // !defined(ZX_THREAD_STATE_GENERAL_REGS)
62
63#endif
64
65#include <algorithm>
66#include <vector>
67
71
72#if V8_OS_ZOS
73// Header from zoslib, for __mcontext_t_:
74#include "edcwccwi.h"
75#endif
76
77#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
78
79// Not all versions of Android's C library provide ucontext_t.
80// Detect this and provide custom but compatible definitions. Note that these
81// follow the GLibc naming convention to access register values from
82// mcontext_t.
83//
84// See http://code.google.com/p/android/issues/detail?id=34784
85
86#if defined(__arm__)
87
88using mcontext_t = struct sigcontext;
89
90struct ucontext_t {
91 uint32_t uc_flags;
92 struct ucontext* uc_link;
93 stack_t uc_stack;
94 mcontext_t uc_mcontext;
95 // Other fields are not used by V8, don't define them here.
96};
97
98#elif defined(__aarch64__)
99
100using mcontext_t = struct sigcontext;
101
102struct ucontext_t {
103 uint64_t uc_flags;
104 struct ucontext* uc_link;
105 stack_t uc_stack;
106 mcontext_t uc_mcontext;
107 // Other fields are not used by V8, don't define them here.
108};
109
110#elif defined(__mips__)
111// MIPS version of sigcontext, for Android bionic.
112struct mcontext_t {
113 uint32_t regmask;
114 uint32_t status;
115 uint64_t pc;
116 uint64_t gregs[32];
117 uint64_t fpregs[32];
118 uint32_t acx;
119 uint32_t fpc_csr;
120 uint32_t fpc_eir;
121 uint32_t used_math;
122 uint32_t dsp;
123 uint64_t mdhi;
124 uint64_t mdlo;
125 uint32_t hi1;
126 uint32_t lo1;
127 uint32_t hi2;
128 uint32_t lo2;
129 uint32_t hi3;
130 uint32_t lo3;
131};
132
133struct ucontext_t {
134 uint32_t uc_flags;
135 struct ucontext* uc_link;
136 stack_t uc_stack;
137 mcontext_t uc_mcontext;
138 // Other fields are not used by V8, don't define them here.
139};
140
141#elif defined(__i386__)
142// x86 version for Android.
143struct mcontext_t {
144 uint32_t gregs[19];
145 void* fpregs;
146 uint32_t oldmask;
147 uint32_t cr2;
148};
149
150using kernel_sigset_t = uint32_t[2]; // x86 kernel uses 64-bit signal masks
151struct ucontext_t {
152 uint32_t uc_flags;
153 struct ucontext* uc_link;
154 stack_t uc_stack;
155 mcontext_t uc_mcontext;
156 // Other fields are not used by V8, don't define them here.
157};
158enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
159
160#elif defined(__x86_64__)
161// x64 version for Android.
162struct mcontext_t {
163 uint64_t gregs[23];
164 void* fpregs;
165 uint64_t __reserved1[8];
166};
167
168struct ucontext_t {
169 uint64_t uc_flags;
170 struct ucontext* uc_link;
171 stack_t uc_stack;
172 mcontext_t uc_mcontext;
173 // Other fields are not used by V8, don't define them here.
174};
175enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
176#endif
177
178#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
179
180namespace v8 {
181namespace sampler {
182
183#if defined(USE_SIGNALS)
184
185AtomicGuard::AtomicGuard(AtomicMutex* atomic, bool is_blocking)
186 : atomic_(atomic), is_success_(false) {
187 do {
188 bool expected = false;
189 // We have to use the strong version here for the case where is_blocking
190 // is false, and we will only attempt the exchange once.
191 is_success_ = atomic->compare_exchange_strong(expected, true);
192 } while (is_blocking && !is_success_);
193}
194
195AtomicGuard::~AtomicGuard() {
196 if (!is_success_) return;
197 atomic_->store(false);
198}
199
200bool AtomicGuard::is_success() const { return is_success_; }
201
202class Sampler::PlatformData {
203 public:
204 PlatformData()
205 : vm_tid_(base::OS::GetCurrentThreadId()), vm_tself_(pthread_self()) {}
206 int vm_tid() const { return vm_tid_; }
207 pthread_t vm_tself() const { return vm_tself_; }
208
209 private:
210 int vm_tid_;
211 pthread_t vm_tself_;
212};
213
214void SamplerManager::AddSampler(Sampler* sampler) {
215 AtomicGuard atomic_guard(&samplers_access_counter_);
216 DCHECK(sampler->IsActive());
217 int thread_id = sampler->platform_data()->vm_tid();
218 auto it = sampler_map_.find(thread_id);
219 if (it == sampler_map_.end()) {
220 SamplerList samplers;
221 samplers.push_back(sampler);
222 sampler_map_.emplace(thread_id, std::move(samplers));
223 } else {
224 SamplerList& samplers = it->second;
225 auto sampler_it = std::find(samplers.begin(), samplers.end(), sampler);
226 if (sampler_it == samplers.end()) samplers.push_back(sampler);
227 }
228}
229
230void SamplerManager::RemoveSampler(Sampler* sampler) {
231 AtomicGuard atomic_guard(&samplers_access_counter_);
232 DCHECK(sampler->IsActive());
233 int thread_id = sampler->platform_data()->vm_tid();
234 auto it = sampler_map_.find(thread_id);
235 DCHECK_NE(it, sampler_map_.end());
236 SamplerList& samplers = it->second;
237 samplers.erase(std::remove(samplers.begin(), samplers.end(), sampler),
238 samplers.end());
239 if (samplers.empty()) {
240 sampler_map_.erase(it);
241 }
242}
243
244void SamplerManager::DoSample(const v8::RegisterState& state) {
245 AtomicGuard atomic_guard(&samplers_access_counter_, false);
246 // TODO(petermarshall): Add stat counters for the bailouts here.
247 if (!atomic_guard.is_success()) return;
248 int thread_id = base::OS::GetCurrentThreadId();
249 auto it = sampler_map_.find(thread_id);
250 if (it == sampler_map_.end()) return;
251 SamplerList& samplers = it->second;
252
253 for (Sampler* sampler : samplers) {
254 if (!sampler->ShouldRecordSample()) continue;
255 Isolate* isolate = sampler->isolate();
256 // We require a fully initialized and entered isolate.
257 if (isolate == nullptr || !isolate->IsInUse()) continue;
258 sampler->SampleStack(state);
259 }
260}
261
262SamplerManager* SamplerManager::instance() {
263 static base::LeakyObject<SamplerManager> instance;
264 return instance.get();
265}
266
267#elif V8_OS_WIN || V8_OS_CYGWIN
268
269// ----------------------------------------------------------------------------
270// Win32 profiler support. On Cygwin we use the same sampler implementation as
271// on Win32.
272
273class Sampler::PlatformData {
274 public:
275 // Get a handle to the calling thread. This is the thread that we are
276 // going to profile. We need to make a copy of the handle because we are
277 // going to use it in the sampler thread.
278 PlatformData() {
279 HANDLE current_process = GetCurrentProcess();
280 BOOL result = DuplicateHandle(
281 current_process, GetCurrentThread(), current_process, &profiled_thread_,
282 THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION,
283 FALSE, 0);
284 DCHECK(result);
285 USE(result);
286 }
287
288 ~PlatformData() {
289 if (profiled_thread_ != nullptr) {
290 CloseHandle(profiled_thread_);
291 profiled_thread_ = nullptr;
292 }
293 }
294
295 HANDLE profiled_thread() { return profiled_thread_; }
296
297 private:
298 HANDLE profiled_thread_;
299};
300
301#elif V8_OS_FUCHSIA
302
303class Sampler::PlatformData {
304 public:
305 PlatformData() {
306 zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
307 &profiled_thread_);
308 }
309 ~PlatformData() {
310 if (profiled_thread_ != ZX_HANDLE_INVALID) {
311 zx_handle_close(profiled_thread_);
312 profiled_thread_ = ZX_HANDLE_INVALID;
313 }
314 }
315
316 zx_handle_t profiled_thread() { return profiled_thread_; }
317
318 private:
319 zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
320};
321
322#endif // USE_SIGNALS
323
324#if defined(USE_SIGNALS)
325class SignalHandler {
326 public:
327 static void IncreaseSamplerCount() {
328 base::RecursiveMutexGuard lock_guard(mutex_.Pointer());
329 if (++client_count_ == 1) Install();
330 }
331
332 static void DecreaseSamplerCount() {
333 base::RecursiveMutexGuard lock_guard(mutex_.Pointer());
334 if (--client_count_ == 0) Restore();
335 }
336
337 static bool Installed() {
338 // mutex_ will also be used in Sampler::DoSample to guard the state below.
339 base::RecursiveMutexGuard lock_guard(mutex_.Pointer());
340 return signal_handler_installed_;
341 }
342
343 static v8::base::RecursiveMutex* mutex() { return mutex_.Pointer(); }
344
345 private:
346 static void Install() {
347 struct sigaction sa;
348 sa.sa_sigaction = &HandleProfilerSignal;
349 sigemptyset(&sa.sa_mask);
350#if V8_OS_QNX
351 sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
352#else
353 sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
354#endif
355 signal_handler_installed_ =
356 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
357 }
358
359 static void Restore() {
360 if (signal_handler_installed_) {
361 signal_handler_installed_ = false;
362#if V8_OS_AIX || V8_TARGET_ARCH_S390X
363 // On Aix, IBMi & zLinux SIGPROF can sometimes arrive after the
364 // default signal handler is restored, resulting in intermittent test
365 // failure when profiling is enabled (https://crbug.com/v8/12952)
366 base::OS::Sleep(base::TimeDelta::FromMicroseconds(10));
367#endif
368 sigaction(SIGPROF, &old_signal_handler_, nullptr);
369 }
370 }
371
372 static void FillRegisterState(void* context, RegisterState* regs);
373 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
374
375 // Protects the process wide state below.
377 static int client_count_;
378 static bool signal_handler_installed_;
379 static struct sigaction old_signal_handler_;
380};
381
382base::LazyRecursiveMutex SignalHandler::mutex_ =
384
385int SignalHandler::client_count_ = 0;
386struct sigaction SignalHandler::old_signal_handler_;
387bool SignalHandler::signal_handler_installed_ = false;
388
389void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
390 void* context) {
392 USE(info);
393 if (signal != SIGPROF) return;
395 FillRegisterState(context, &state);
396 SamplerManager::instance()->DoSample(state);
397}
398
399void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
400 // Extracting the sample from the context is extremely machine dependent.
401 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
402#if !(V8_OS_OPENBSD || V8_OS_ZOS || \
403 (V8_OS_LINUX && (V8_HOST_ARCH_S390X || V8_HOST_ARCH_PPC64)))
404 mcontext_t& mcontext = ucontext->uc_mcontext;
405#elif V8_OS_ZOS
406 __mcontext_t_* mcontext = reinterpret_cast<__mcontext_t_*>(context);
407#endif
408#if V8_OS_LINUX
409#if V8_HOST_ARCH_IA32
410 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
411 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
412 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
413#elif V8_HOST_ARCH_X64
414 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
415 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
416 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
417#elif V8_HOST_ARCH_ARM
418#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
419 // Old GLibc ARM versions used a gregs[] array to access the register
420 // values from mcontext_t.
421 state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
422 state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
423 state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
424 state->lr = reinterpret_cast<void*>(mcontext.gregs[R14]);
425#else
426 state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
427 state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
428 state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
429 state->lr = reinterpret_cast<void*>(mcontext.arm_lr);
430#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
431#elif V8_HOST_ARCH_ARM64
432 state->pc = reinterpret_cast<void*>(mcontext.pc);
433 state->sp = reinterpret_cast<void*>(mcontext.sp);
434 // FP is an alias for x29.
435 state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
436 // LR is an alias for x30.
437 state->lr = reinterpret_cast<void*>(mcontext.regs[30]);
438#elif V8_HOST_ARCH_MIPS64
439 state->pc = reinterpret_cast<void*>(mcontext.pc);
440 state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
441 state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
442#elif V8_HOST_ARCH_LOONG64
443 state->pc = reinterpret_cast<void*>(mcontext.__pc);
444 state->sp = reinterpret_cast<void*>(mcontext.__gregs[3]);
445 state->fp = reinterpret_cast<void*>(mcontext.__gregs[22]);
446#elif V8_HOST_ARCH_PPC64
447#if V8_LIBC_GLIBC
448 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
449 state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
450 state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
451 state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->link);
452#else
453 // Some C libraries, notably Musl, define the regs member as a void pointer
454 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[32]);
455 state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[1]);
456 state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[31]);
457 state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[36]);
458#endif
459#elif V8_HOST_ARCH_S390X
460 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
461 state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
462 state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
463 state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[14]);
464#elif V8_HOST_ARCH_RISCV64 || V8_HOST_ARCH_RISCV32
465 // Spec CH.25 RISC-V Assembly Programmer’s Handbook
466 state->pc = reinterpret_cast<void*>(mcontext.__gregs[REG_PC]);
467 state->sp = reinterpret_cast<void*>(mcontext.__gregs[REG_SP]);
468 state->fp = reinterpret_cast<void*>(mcontext.__gregs[REG_S0]);
469 state->lr = reinterpret_cast<void*>(mcontext.__gregs[REG_RA]);
470#endif // V8_HOST_ARCH_*
471
472#elif V8_OS_ZOS
473 state->pc = reinterpret_cast<void*>(mcontext->__mc_psw);
474 state->sp = reinterpret_cast<void*>(mcontext->__mc_gr[15]);
475 state->fp = reinterpret_cast<void*>(mcontext->__mc_gr[11]);
476 state->lr = reinterpret_cast<void*>(mcontext->__mc_gr[14]);
477#elif V8_OS_IOS
478
479#if V8_TARGET_ARCH_ARM64
480 // Building for the iOS device.
481 state->pc = reinterpret_cast<void*>(mcontext->__ss.__pc);
482 state->sp = reinterpret_cast<void*>(mcontext->__ss.__sp);
483 state->fp = reinterpret_cast<void*>(mcontext->__ss.__fp);
484#elif V8_TARGET_ARCH_X64
485 // Building for the iOS simulator.
486 state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
487 state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
488 state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
489#else
490#error Unexpected iOS target architecture.
491#endif // V8_TARGET_ARCH_ARM64
492
493#elif V8_OS_DARWIN
494#if V8_HOST_ARCH_X64
495 state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
496 state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
497 state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
498#elif V8_HOST_ARCH_IA32
499 state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
500 state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
501 state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
502#elif V8_HOST_ARCH_ARM64
503 state->pc =
504 reinterpret_cast<void*>(arm_thread_state64_get_pc(mcontext->__ss));
505 state->sp =
506 reinterpret_cast<void*>(arm_thread_state64_get_sp(mcontext->__ss));
507 state->fp =
508 reinterpret_cast<void*>(arm_thread_state64_get_fp(mcontext->__ss));
509#endif // V8_HOST_ARCH_*
510#elif V8_OS_FREEBSD
511#if V8_HOST_ARCH_IA32
512 state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
513 state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
514 state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
515#elif V8_HOST_ARCH_X64
516 state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
517 state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
518 state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
519#elif V8_HOST_ARCH_ARM
520 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_PC]);
521 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_SP]);
522 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_FP]);
523#endif // V8_HOST_ARCH_*
524#elif V8_OS_NETBSD
525#if V8_HOST_ARCH_IA32
526 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
527 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
528 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
529#elif V8_HOST_ARCH_X64
530 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
531 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
532 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
533#endif // V8_HOST_ARCH_*
534#elif V8_OS_OPENBSD
535#if V8_HOST_ARCH_IA32
536 state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
537 state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
538 state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
539#elif V8_HOST_ARCH_X64
540 state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
541 state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
542 state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
543#endif // V8_HOST_ARCH_*
544#elif V8_OS_SOLARIS
545 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
546 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
547 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
548#elif V8_OS_QNX
549#if V8_HOST_ARCH_IA32
550 state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
551 state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
552 state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
553#elif V8_HOST_ARCH_ARM
554 state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
555 state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
556 state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
557#endif // V8_HOST_ARCH_*
558#elif V8_OS_AIX
559 state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
560 state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
561 state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
562 state->lr = reinterpret_cast<void*>(mcontext.jmp_context.lr);
563#endif // V8_OS_AIX
564}
565
566#endif // USE_SIGNALS
567
569 : isolate_(isolate), data_(std::make_unique<PlatformData>()) {
570 // Abseil's deadlock detection uses locks. If we end up taking a sample absl
571 // internally holds this lock, we can end up deadlocking.
572 SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
573}
574
576
578 DCHECK(!IsActive());
579 SetActive(true);
580#if defined(USE_SIGNALS)
581 SignalHandler::IncreaseSamplerCount();
582 SamplerManager::instance()->AddSampler(this);
583#endif
584}
585
587#if defined(USE_SIGNALS)
588 SamplerManager::instance()->RemoveSampler(this);
589 SignalHandler::DecreaseSamplerCount();
590#endif
591 DCHECK(IsActive());
592 SetActive(false);
593}
594
595#if defined(USE_SIGNALS)
596
597void Sampler::DoSample() {
598 base::RecursiveMutexGuard lock_guard(SignalHandler::mutex());
599 if (!SignalHandler::Installed()) return;
601 pthread_kill(platform_data()->vm_tself(), SIGPROF);
602}
603
604#elif V8_OS_WIN || V8_OS_CYGWIN
605
606void Sampler::DoSample() {
607 HANDLE profiled_thread = platform_data()->profiled_thread();
608 if (profiled_thread == nullptr) return;
609
610 const DWORD kSuspendFailed = static_cast<DWORD>(-1);
611 if (SuspendThread(profiled_thread) == kSuspendFailed) return;
612
613 // Context used for sampling the register state of the profiled thread.
614 CONTEXT context;
615 memset(&context, 0, sizeof(context));
616 context.ContextFlags = CONTEXT_FULL;
617 if (GetThreadContext(profiled_thread, &context) != 0) {
619#if V8_HOST_ARCH_X64
620 state.pc = reinterpret_cast<void*>(context.Rip);
621 state.sp = reinterpret_cast<void*>(context.Rsp);
622 state.fp = reinterpret_cast<void*>(context.Rbp);
623#elif V8_HOST_ARCH_ARM64
624 state.pc = reinterpret_cast<void*>(context.Pc);
625 state.sp = reinterpret_cast<void*>(context.Sp);
626 state.fp = reinterpret_cast<void*>(context.Fp);
627#else
628 state.pc = reinterpret_cast<void*>(context.Eip);
629 state.sp = reinterpret_cast<void*>(context.Esp);
630 state.fp = reinterpret_cast<void*>(context.Ebp);
631#endif
632 SampleStack(state);
633 }
634 ResumeThread(profiled_thread);
635}
636
637#elif V8_OS_FUCHSIA
638
639void Sampler::DoSample() {
640 zx_handle_t profiled_thread = platform_data()->profiled_thread();
641 if (profiled_thread == ZX_HANDLE_INVALID) return;
642
643 zx_handle_t suspend_token = ZX_HANDLE_INVALID;
644 if (zx_task_suspend_token(profiled_thread, &suspend_token) != ZX_OK) return;
645
646 // Wait for the target thread to become suspended, or to exit.
647 // TODO(wez): There is currently no suspension count for threads, so there
648 // is a risk that some other caller resumes the thread in-between our suspend
649 // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
650 // deadline to protect against hanging the sampler thread in this case.
651 zx_signals_t signals = 0;
652 zx_status_t suspended = zx_object_wait_one(
653 profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
654 zx_deadline_after(ZX_MSEC(100)), &signals);
655 if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
656 zx_handle_close(suspend_token);
657 return;
658 }
659
660 // Fetch a copy of its "general register" states.
661 zx_thread_state_general_regs_t thread_state = {};
662 if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
663 &thread_state, sizeof(thread_state)) == ZX_OK) {
665#if V8_HOST_ARCH_X64
666 state.pc = reinterpret_cast<void*>(thread_state.rip);
667 state.sp = reinterpret_cast<void*>(thread_state.rsp);
668 state.fp = reinterpret_cast<void*>(thread_state.rbp);
669#elif V8_HOST_ARCH_ARM64
670 state.pc = reinterpret_cast<void*>(thread_state.pc);
671 state.sp = reinterpret_cast<void*>(thread_state.sp);
672 state.fp = reinterpret_cast<void*>(thread_state.r[29]);
673#endif
674 SampleStack(state);
675 }
676
677 zx_handle_close(suspend_token);
678}
679
680// TODO(wez): Remove this once the Fuchsia SDK has rolled.
681#if defined(ZX_THREAD_STATE_REGSET0)
682#undef ZX_THREAD_STATE_GENERAL_REGS
683#endif
684
685#endif // USE_SIGNALS
686
687} // namespace sampler
688} // namespace v8
Isolate * isolate_
uint8_t data_[MAX_STACK_LENGTH]
static void SetDefaultPermissionsForSignalHandler()
Definition v8.cc:309
static int GetCurrentThreadId()
Definition platform.cc:29
bool IsActive() const
Definition sampler.h:55
Isolate * isolate() const
Definition sampler.h:43
Sampler(Isolate *isolate)
Definition sampler.cc:568
void SetShouldRecordSample()
Definition sampler.h:87
void SetActive(bool value)
Definition sampler.h:83
virtual void SampleStack(const v8::RegisterState &regs)=0
bool ShouldRecordSample()
Definition sampler.h:59
PlatformData * platform_data() const
Definition sampler.h:75
base::Mutex & mutex_
TNode< Context > context
ZoneVector< RpoNumber > & result
LiftoffAssembler::CacheState state
base::Mutex mutex
#define LAZY_RECURSIVE_MUTEX_INITIALIZER
Definition mutex.h:180
STL namespace.
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define USE(...)
Definition macros.h:293
int BOOL
void * HANDLE
unsigned long DWORD