v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
time.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#if V8_OS_POSIX
8#include <fcntl.h> // for O_RDONLY
9#include <sys/time.h>
10#include <unistd.h>
11#endif
12
13#if V8_OS_DARWIN
14#include <mach/mach.h>
15#include <mach/mach_time.h>
16#include <pthread.h>
17#endif
18
19#if V8_OS_FUCHSIA
20#include <threads.h>
21#include <zircon/syscalls.h>
22#include <zircon/threads.h>
23#endif
24
25#if V8_OS_STARBOARD
26#include <sys/time.h>
27#endif // V8_OS_STARBOARD
28
29#include <cstring>
30#include <ostream>
31
32#if V8_OS_WIN
33#include <windows.h>
34
35// This has to come after windows.h.
36#include <mmsystem.h> // For timeGetTime().
37
38#include <atomic>
39
41#endif
42#include "src/base/cpu.h"
43#include "src/base/logging.h"
46
47#if V8_OS_STARBOARD
48#include "starboard/common/time.h"
49#endif
50
51namespace {
52
53#if V8_OS_DARWIN
54int64_t ComputeThreadTicks() {
55 mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
56 thread_basic_info_data_t thread_info_data;
57 kern_return_t kr = thread_info(
58 pthread_mach_thread_np(pthread_self()),
59 THREAD_BASIC_INFO,
60 reinterpret_cast<thread_info_t>(&thread_info_data),
61 &thread_info_count);
62 CHECK_EQ(kr, KERN_SUCCESS);
63
64 // We can add the seconds into a {int64_t} without overflow.
65 CHECK_LE(thread_info_data.user_time.seconds,
66 std::numeric_limits<int64_t>::max() -
67 thread_info_data.system_time.seconds);
68 int64_t seconds =
69 thread_info_data.user_time.seconds + thread_info_data.system_time.seconds;
70 // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
71 // in [0, 2 * kMicrosecondsPerSecond) must result in a valid {int64_t}.
72 static constexpr int64_t kSecondsLimit =
73 (std::numeric_limits<int64_t>::max() /
75 2;
76 CHECK_GT(kSecondsLimit, seconds);
77 int64_t micros = seconds * v8::base::Time::kMicrosecondsPerSecond;
78 micros += (thread_info_data.user_time.microseconds +
79 thread_info_data.system_time.microseconds);
80 return micros;
81}
82#elif V8_OS_FUCHSIA
83V8_INLINE int64_t GetFuchsiaThreadTicks() {
84 zx_info_thread_stats_t info;
85 zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
86 ZX_INFO_THREAD_STATS, &info,
87 sizeof(info), nullptr, nullptr);
88 CHECK_EQ(status, ZX_OK);
89 return info.total_runtime / v8::base::Time::kNanosecondsPerMicrosecond;
90}
91#elif V8_OS_POSIX
92// Helper function to get results from clock_gettime() and convert to a
93// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
94// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
95// _POSIX_MONOTONIC_CLOCK to -1.
96V8_INLINE int64_t ClockNow(clockid_t clk_id) {
97#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
98 defined(V8_OS_BSD) || defined(V8_OS_ANDROID) || defined(V8_OS_ZOS)
99#if defined(V8_OS_AIX)
100 // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
101 // resolution of 10ms. thread_cputime API provides the time in ns.
102 if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
103#if defined(__PASE__) // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi
104 return 0;
105#else
106 thread_cputime_t tc;
107 if (thread_cputime(-1, &tc) != 0) {
108 UNREACHABLE();
109 }
112#endif // defined(__PASE__)
113 }
114#endif // defined(V8_OS_AIX)
115 struct timespec ts;
116 if (clock_gettime(clk_id, &ts) != 0) {
117 UNREACHABLE();
118 }
119 // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
120 // in [0, kMicrosecondsPerSecond) must result in a valid {int64_t}.
121 static constexpr int64_t kSecondsLimit =
122 (std::numeric_limits<int64_t>::max() /
124 1;
125 CHECK_GT(kSecondsLimit, ts.tv_sec);
126 int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
128 return result;
129#else // Monotonic clock not supported.
130 return 0;
131#endif
132}
133
134V8_INLINE int64_t NanosecondsNow() {
135 struct timespec ts;
136 clock_gettime(CLOCK_MONOTONIC, &ts);
137 return int64_t{ts.tv_sec} * v8::base::Time::kNanosecondsPerSecond +
138 ts.tv_nsec;
139}
140
141inline bool IsHighResolutionTimer(clockid_t clk_id) {
142 // Currently this is only needed for CLOCK_MONOTONIC. If other clocks need
143 // to be checked, care must be taken to support all platforms correctly;
144 // see ClockNow() above for precedent.
145 DCHECK_EQ(clk_id, CLOCK_MONOTONIC);
146 int64_t previous = NanosecondsNow();
147 // There should be enough attempts to make the loop run for more than one
148 // microsecond if the early return is not taken -- the elapsed time can't
149 // be measured in that situation, so we have to estimate it offline.
150 constexpr int kAttempts = 100;
151 for (int i = 0; i < kAttempts; i++) {
152 int64_t next = NanosecondsNow();
153 int64_t delta = next - previous;
154 if (delta == 0) continue;
155 // We expect most systems to take this branch on the first iteration.
157 return true;
158 }
159 previous = next;
160 }
161 // As of 2022, we expect that the loop above has taken at least 2 μs (on
162 // a fast desktop). If we still haven't seen a non-zero clock increment
163 // in sub-microsecond range, assume a low resolution timer.
164 return false;
165}
166
167#elif V8_OS_WIN
168// Returns the current value of the performance counter.
169V8_INLINE uint64_t QPCNowRaw() {
170 LARGE_INTEGER perf_counter_now = {};
171 // According to the MSDN documentation for QueryPerformanceCounter(), this
172 // will never fail on systems that run XP or later.
173 // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
174 BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
175 DCHECK(result);
176 USE(result);
177 return perf_counter_now.QuadPart;
178}
179#endif // V8_OS_DARWIN
180
181} // namespace
182
183namespace v8 {
184namespace base {
185
186int TimeDelta::InDays() const {
187 if (IsMax()) {
188 // Preserve max to prevent overflow.
189 return std::numeric_limits<int>::max();
190 }
191 return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
192}
193
195 if (IsMax()) {
196 // Preserve max to prevent overflow.
197 return std::numeric_limits<int>::max();
198 }
199 return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
200}
201
203 if (IsMax()) {
204 // Preserve max to prevent overflow.
205 return std::numeric_limits<int>::max();
206 }
207 return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
208}
209
210double TimeDelta::InSecondsF() const {
211 if (IsMax()) {
212 // Preserve max to prevent overflow.
213 return std::numeric_limits<double>::infinity();
214 }
215 return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
216}
217
218int64_t TimeDelta::InSeconds() const {
219 if (IsMax()) {
220 // Preserve max to prevent overflow.
221 return std::numeric_limits<int64_t>::max();
222 }
224}
225
227 if (IsMax()) {
228 // Preserve max to prevent overflow.
229 return std::numeric_limits<double>::infinity();
230 }
231 return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
232}
233
235 if (IsMax()) {
236 // Preserve max to prevent overflow.
237 return std::numeric_limits<int64_t>::max();
238 }
240}
241
243 if (IsMax()) {
244 // Preserve max to prevent overflow.
245 return std::numeric_limits<int64_t>::max();
246 }
249}
250
252 if (IsMax()) {
253 // Preserve max to prevent overflow.
254 return std::numeric_limits<int64_t>::max();
255 }
256 return delta_;
257}
258
260 if (IsMax()) {
261 // Preserve max to prevent overflow.
262 return std::numeric_limits<int64_t>::max();
263 }
265}
266
267#if V8_OS_DARWIN
268
269TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
270 DCHECK_GE(ts.tv_nsec, 0);
271 DCHECK_LT(ts.tv_nsec,
272 static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
273 return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
275}
276
277
278struct mach_timespec TimeDelta::ToMachTimespec() const {
279 struct mach_timespec ts;
280 DCHECK_GE(delta_, 0);
281 ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
282 ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
283 Time::kNanosecondsPerMicrosecond;
284 return ts;
285}
286
287#endif // V8_OS_DARWIN
288
289#if V8_OS_POSIX
290
291TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
292 DCHECK_GE(ts.tv_nsec, 0);
293 DCHECK_LT(ts.tv_nsec,
294 static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
295 return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
297}
298
299
300struct timespec TimeDelta::ToTimespec() const {
301 struct timespec ts;
302 ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
303 ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
304 Time::kNanosecondsPerMicrosecond;
305 return ts;
306}
307
308#endif // V8_OS_POSIX
309
310
311#if V8_OS_WIN
312
313// We implement time using the high-resolution timers so that we can get
314// timeouts which are smaller than 10-15ms. To avoid any drift, we
315// periodically resync the internal clock to the system clock.
316class Clock final {
317 public:
318 Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
319
320 Time Now() {
321 // Time between resampling the un-granular clock for this API (1 minute).
322 const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
323
324 MutexGuard lock_guard(&mutex_);
325
326 // Determine current time and ticks.
327 TimeTicks ticks = GetSystemTicks();
328 Time time = GetSystemTime();
329
330 // Check if we need to synchronize with the system clock due to a backwards
331 // time change or the amount of time elapsed.
332 TimeDelta elapsed = ticks - initial_ticks_;
333 if (time < initial_time_ || elapsed > kMaxElapsedTime) {
334 initial_ticks_ = ticks;
335 initial_time_ = time;
336 return time;
337 }
338
339 return initial_time_ + elapsed;
340 }
341
342 Time NowFromSystemTime() {
343 MutexGuard lock_guard(&mutex_);
344 initial_ticks_ = GetSystemTicks();
345 initial_time_ = GetSystemTime();
346 return initial_time_;
347 }
348
349 private:
350 static TimeTicks GetSystemTicks() {
351 return TimeTicks::Now();
352 }
353
354 static Time GetSystemTime() {
355 FILETIME ft;
356 ::GetSystemTimeAsFileTime(&ft);
357 return Time::FromFiletime(ft);
358 }
359
360 TimeTicks initial_ticks_;
361 Time initial_time_;
362 Mutex mutex_;
363};
364
365namespace {
366DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
367} // namespace
368
369Time Time::Now() { return GetClock()->Now(); }
370
371Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
372
373// Time between windows epoch and standard epoch.
374static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
375
376Time Time::FromFiletime(FILETIME ft) {
377 if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
378 return Time();
379 }
380 if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
381 ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
382 return Max();
383 }
384 int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
385 (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
386 return Time(us - kTimeToEpochInMicroseconds);
387}
388
389
390FILETIME Time::ToFiletime() const {
391 DCHECK_GE(us_, 0);
392 FILETIME ft;
393 if (IsNull()) {
394 ft.dwLowDateTime = 0;
395 ft.dwHighDateTime = 0;
396 return ft;
397 }
398 if (IsMax()) {
399 ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
400 ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
401 return ft;
402 }
403 uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
404 ft.dwLowDateTime = static_cast<DWORD>(us);
405 ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
406 return ft;
407}
408
409#elif V8_OS_POSIX || V8_OS_STARBOARD
410
411Time Time::Now() {
412 struct timeval tv;
413 int result = gettimeofday(&tv, nullptr);
414 DCHECK_EQ(0, result);
415 USE(result);
416 return FromTimeval(tv);
417}
418
419
421 return Now();
422}
423
424
425Time Time::FromTimespec(struct timespec ts) {
426 DCHECK_GE(ts.tv_nsec, 0);
427 DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
428 if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
429 return Time();
430 }
431 if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
432 ts.tv_sec == std::numeric_limits<time_t>::max()) {
433 return Max();
434 }
435 return Time(ts.tv_sec * kMicrosecondsPerSecond +
436 ts.tv_nsec / kNanosecondsPerMicrosecond);
437}
438
439
440struct timespec Time::ToTimespec() const {
441 struct timespec ts;
442 if (IsNull()) {
443 ts.tv_sec = 0;
444 ts.tv_nsec = 0;
445 return ts;
446 }
447 if (IsMax()) {
448 ts.tv_sec = std::numeric_limits<time_t>::max();
449 ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
450 return ts;
451 }
452 ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
453 ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
454 return ts;
455}
456
457
458Time Time::FromTimeval(struct timeval tv) {
459 DCHECK_GE(tv.tv_usec, 0);
460 DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
461 if (tv.tv_usec == 0 && tv.tv_sec == 0) {
462 return Time();
463 }
464 if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
465 tv.tv_sec == std::numeric_limits<time_t>::max()) {
466 return Max();
467 }
468 return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
469}
470
471
472struct timeval Time::ToTimeval() const {
473 struct timeval tv;
474 if (IsNull()) {
475 tv.tv_sec = 0;
476 tv.tv_usec = 0;
477 return tv;
478 }
479 if (IsMax()) {
480 tv.tv_sec = std::numeric_limits<time_t>::max();
481 tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
482 return tv;
483 }
484 tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
485 tv.tv_usec = us_ % kMicrosecondsPerSecond;
486 return tv;
487}
488
489#endif // V8_OS_POSIX || V8_OS_STARBOARD
490
491Time Time::FromJsTime(double ms_since_epoch) {
492 // The epoch is a valid time, so this constructor doesn't interpret
493 // 0 as the null time.
494 if (ms_since_epoch == std::numeric_limits<double>::max()) {
495 return Max();
496 }
497 return Time(
498 static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
499}
500
501
502double Time::ToJsTime() const {
503 if (IsNull()) {
504 // Preserve 0 so the invalid result doesn't depend on the platform.
505 return 0;
506 }
507 if (IsMax()) {
508 // Preserve max without offset to prevent overflow.
509 return std::numeric_limits<double>::max();
510 }
511 return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
512}
513
514
515std::ostream& operator<<(std::ostream& os, const Time& time) {
516 return os << time.ToJsTime();
517}
518
519
520#if V8_OS_WIN
521
522namespace {
523
524// We define a wrapper to adapt between the __stdcall and __cdecl call of the
525// mock function, and to avoid a static constructor. Assigning an import to a
526// function pointer directly would require setup code to fetch from the IAT.
527DWORD timeGetTimeWrapper() { return timeGetTime(); }
528
529DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
530
531// A structure holding the most significant bits of "last seen" and a
532// "rollover" counter.
533union LastTimeAndRolloversState {
534 // The state as a single 32-bit opaque value.
535 int32_t as_opaque_32;
536
537 // The state as usable values.
538 struct {
539 // The top 8-bits of the "last" time. This is enough to check for rollovers
540 // and the small bit-size means fewer CompareAndSwap operations to store
541 // changes in state, which in turn makes for fewer retries.
542 uint8_t last_8;
543 // A count of the number of detected rollovers. Using this as bits 47-32
544 // of the upper half of a 64-bit value results in a 48-bit tick counter.
545 // This extends the total rollover period from about 49 days to about 8800
546 // years while still allowing it to be stored with last_8 in a single
547 // 32-bit value.
548 uint16_t rollovers;
549 } as_values;
550};
551std::atomic<int32_t> g_last_time_and_rollovers{0};
552static_assert(sizeof(LastTimeAndRolloversState) <=
553 sizeof(g_last_time_and_rollovers),
554 "LastTimeAndRolloversState does not fit in a single atomic word");
555
556// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
557// because it returns the number of milliseconds since Windows has started,
558// which will roll over the 32-bit value every ~49 days. We try to track
559// rollover ourselves, which works if TimeTicks::Now() is called at least every
560// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
561TimeTicks RolloverProtectedNow() {
562 LastTimeAndRolloversState state;
563 DWORD now; // DWORD is always unsigned 32 bits.
564
565 // Fetch the "now" and "last" tick values, updating "last" with "now" and
566 // incrementing the "rollovers" counter if the tick-value has wrapped back
567 // around. Atomic operations ensure that both "last" and "rollovers" are
568 // always updated together.
569 int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
570 while (true) {
571 state.as_opaque_32 = original;
572 now = g_tick_function();
573 uint8_t now_8 = static_cast<uint8_t>(now >> 24);
574 if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
575 state.as_values.last_8 = now_8;
576
577 // If the state hasn't changed, exit the loop.
578 if (state.as_opaque_32 == original) break;
579
580 // Save the changed state. If the existing value is unchanged from the
581 // original, exit the loop.
582 if (g_last_time_and_rollovers.compare_exchange_weak(
583 original, state.as_opaque_32, std::memory_order_acq_rel)) {
584 break;
585 }
586
587 // Another thread has done something in between so retry from the top.
588 // {original} has been updated by the {compare_exchange_weak}.
589 }
590
591 return TimeTicks() +
592 TimeDelta::FromMilliseconds(
593 now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
594}
595
596// Discussion of tick counter options on Windows:
597//
598// (1) CPU cycle counter. (Retrieved via RDTSC)
599// The CPU counter provides the highest resolution time stamp and is the least
600// expensive to retrieve. However, on older CPUs, two issues can affect its
601// reliability: First it is maintained per processor and not synchronized
602// between processors. Also, the counters will change frequency due to thermal
603// and power changes, and stop in some states.
604//
605// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
606// resolution (<1 microsecond) time stamp. On most hardware running today, it
607// auto-detects and uses the constant-rate RDTSC counter to provide extremely
608// efficient and reliable time stamps.
609//
610// On older CPUs where RDTSC is unreliable, it falls back to using more
611// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
612// PM timer, and can involve system calls; and all this is up to the HAL (with
613// some help from ACPI). According to
614// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
615// worst case, it gets the counter from the rollover interrupt on the
616// programmable interrupt timer. In best cases, the HAL may conclude that the
617// RDTSC counter runs at a constant frequency, then it uses that instead. On
618// multiprocessor machines, it will try to verify the values returned from
619// RDTSC on each processor are consistent with each other, and apply a handful
620// of workarounds for known buggy hardware. In other words, QPC is supposed to
621// give consistent results on a multiprocessor computer, but for older CPUs it
622// can be unreliable due bugs in BIOS or HAL.
623//
624// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
625// milliseconds) time stamp but is comparatively less expensive to retrieve and
626// more reliable. Time::EnableHighResolutionTimer() and
627// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
628// this timer; and also other Windows applications can alter it, affecting this
629// one.
630
631TimeTicks InitialTimeTicksNowFunction();
632
633// See "threading notes" in InitializeNowFunctionPointer() for details on how
634// concurrent reads/writes to these globals has been made safe.
635using TimeTicksNowFunction = decltype(&TimeTicks::Now);
636TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
637int64_t g_qpc_ticks_per_second = 0;
638
639TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
640 // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
641 // InitializeNowFunctionPointer(), has happened by this point.
642 std::atomic_thread_fence(std::memory_order_acquire);
643
644 DCHECK_GT(g_qpc_ticks_per_second, 0);
645
646 // If the QPC Value is below the overflow threshold, we proceed with
647 // simple multiply and divide.
648 if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
649 return TimeDelta::FromMicroseconds(
650 qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
651 }
652 // Otherwise, calculate microseconds in a round about manner to avoid
653 // overflow and precision issues.
654 int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
655 int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
656 return TimeDelta::FromMicroseconds(
657 (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
658 ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
659 g_qpc_ticks_per_second));
660}
661
662TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
663
664void InitializeTimeTicksNowFunctionPointer() {
665 LARGE_INTEGER ticks_per_sec = {};
666 if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
667
668 // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
669 // the low-resolution clock.
670 //
671 // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
672 // will still use the low-resolution clock. A CPU lacking a non-stop time
673 // counter will cause Windows to provide an alternate QPC implementation that
674 // works, but is expensive to use. Certain Athlon CPUs are known to make the
675 // QPC implementation unreliable.
676 //
677 // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
678 // ~72% of users fall within this category.
679 TimeTicksNowFunction now_function;
680 CPU cpu;
681 if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
682 now_function = &RolloverProtectedNow;
683 } else {
684 now_function = &QPCNow;
685 }
686
687 // Threading note 1: In an unlikely race condition, it's possible for two or
688 // more threads to enter InitializeNowFunctionPointer() in parallel. This is
689 // not a problem since all threads should end up writing out the same values
690 // to the global variables.
691 //
692 // Threading note 2: A release fence is placed here to ensure, from the
693 // perspective of other threads using the function pointers, that the
694 // assignment to |g_qpc_ticks_per_second| happens before the function pointers
695 // are changed.
696 g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
697 std::atomic_thread_fence(std::memory_order_release);
698 g_time_ticks_now_function = now_function;
699}
700
701TimeTicks InitialTimeTicksNowFunction() {
702 InitializeTimeTicksNowFunctionPointer();
703 return g_time_ticks_now_function();
704}
705
706#if V8_HOST_ARCH_ARM64
707// From MSDN, FILETIME "Contains a 64-bit value representing the number of
708// 100-nanosecond intervals since January 1, 1601 (UTC)."
709int64_t FileTimeToMicroseconds(const FILETIME& ft) {
710 // Need to bit_cast to fix alignment, then divide by 10 to convert
711 // 100-nanoseconds to microseconds. This only works on little-endian
712 // machines.
713 return bit_cast<int64_t, FILETIME>(ft) / 10;
714}
715#endif
716
717} // namespace
718
719// static
720TimeTicks TimeTicks::Now() {
721 // Make sure we never return 0 here.
722 TimeTicks ticks(g_time_ticks_now_function());
723 DCHECK(!ticks.IsNull());
724 return ticks;
725}
726
727// static
729 if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
730 InitializeTimeTicksNowFunctionPointer();
731 return g_time_ticks_now_function == &QPCNow;
732}
733
734#else // V8_OS_WIN
735
737 int64_t ticks;
738#if V8_OS_DARWIN
739 static struct mach_timebase_info info;
740 if (info.denom == 0) {
741 kern_return_t result = mach_timebase_info(&info);
742 DCHECK_EQ(KERN_SUCCESS, result);
743 USE(result);
744 }
745 ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
746 info.numer / info.denom);
747#elif V8_OS_SOLARIS
748 ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
749#elif V8_OS_FUCHSIA
750 ticks = zx_clock_get_monotonic() / Time::kNanosecondsPerMicrosecond;
751#elif V8_OS_POSIX
752 ticks = ClockNow(CLOCK_MONOTONIC);
753#elif V8_OS_STARBOARD
754 ticks = starboard::CurrentMonotonicTime();
755#else
756#error platform does not implement TimeTicks::Now.
757#endif // V8_OS_DARWIN
758 // Make sure we never return 0 here.
759 return TimeTicks(ticks + 1);
760}
761
762// static
764#if V8_OS_DARWIN
765 return true;
766#elif V8_OS_FUCHSIA
767 return true;
768#elif V8_OS_POSIX
769 static const bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
770 return is_high_resolution;
771#else
772 return true;
773#endif
774}
775
776#endif // V8_OS_WIN
777
778
780#if V8_OS_STARBOARD
781 return starboard::CurrentMonotonicThreadTime() != 0;
782#elif defined(__PASE__)
783 // Thread CPU time accounting is unavailable in PASE
784 return false;
785#elif (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
786 defined(V8_OS_DARWIN) || defined(V8_OS_ANDROID) || \
787 defined(V8_OS_SOLARIS) || defined(V8_OS_ZOS)
788 return true;
789#elif defined(V8_OS_WIN)
790 return IsSupportedWin();
791#else
792 return false;
793#endif
794}
795
796
798#if V8_OS_STARBOARD
799 const int64_t now = starboard::CurrentMonotonicThreadTime();
800 if (now != 0)
801 return ThreadTicks(now);
802 UNREACHABLE();
803#elif V8_OS_DARWIN
804 return ThreadTicks(ComputeThreadTicks());
805#elif V8_OS_FUCHSIA
806 return ThreadTicks(GetFuchsiaThreadTicks());
807#elif (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
808 defined(V8_OS_ANDROID) || defined(V8_OS_ZOS)
809 return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
810#elif V8_OS_SOLARIS
811 return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
812#elif V8_OS_WIN
813 return ThreadTicks::GetForThread(::GetCurrentThread());
814#else
815 UNREACHABLE();
816#endif
817}
818
819
820#if V8_OS_WIN
821ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
823
824#if V8_HOST_ARCH_ARM64
825 // QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
826 // actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
827 // backed by the actual number of CPU cycles executed, rather than a
828 // constant-rate timer like Intel. To work around this, use GetThreadTimes
829 // (which isn't as accurate but is meaningful as a measure of elapsed
830 // per-thread time).
831 FILETIME creation_time, exit_time, kernel_time, user_time;
832 ::GetThreadTimes(thread_handle, &creation_time, &exit_time, &kernel_time,
833 &user_time);
834
835 int64_t us = FileTimeToMicroseconds(user_time);
836 return ThreadTicks(us);
837#else
838 // Get the number of TSC ticks used by the current thread.
839 ULONG64 thread_cycle_time = 0;
840 ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
841
842 // Get the frequency of the TSC.
843 double tsc_ticks_per_second = TSCTicksPerSecond();
844 if (tsc_ticks_per_second == 0)
845 return ThreadTicks();
846
847 // Return the CPU time of the current thread.
848 double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
849 return ThreadTicks(
850 static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
851#endif
852}
853
854// static
855bool ThreadTicks::IsSupportedWin() {
856 static bool is_supported = base::CPU().has_non_stop_time_stamp_counter();
857 return is_supported;
858}
859
860// static
861void ThreadTicks::WaitUntilInitializedWin() {
862#ifndef V8_HOST_ARCH_ARM64
863 while (TSCTicksPerSecond() == 0) ::Sleep(10);
864#endif
865}
866
867#ifndef V8_HOST_ARCH_ARM64
868double ThreadTicks::TSCTicksPerSecond() {
870
871 // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
872 // frequency, because there is no guarantee that the TSC frequency is equal to
873 // the performance counter frequency.
874
875 // The TSC frequency is cached in a static variable because it takes some time
876 // to compute it.
877 static double tsc_ticks_per_second = 0;
878 if (tsc_ticks_per_second != 0)
879 return tsc_ticks_per_second;
880
881 // Increase the thread priority to reduces the chances of having a context
882 // switch during a reading of the TSC and the performance counter.
883 int previous_priority = ::GetThreadPriority(::GetCurrentThread());
884 ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
885
886 // The first time that this function is called, make an initial reading of the
887 // TSC and the performance counter.
888 static const uint64_t tsc_initial = __rdtsc();
889 static const uint64_t perf_counter_initial = QPCNowRaw();
890
891 // Make another reading of the TSC and the performance counter every time
892 // this function is called.
893 uint64_t tsc_now = __rdtsc();
894 uint64_t perf_counter_now = QPCNowRaw();
895
896 // Reset the thread priority.
897 ::SetThreadPriority(::GetCurrentThread(), previous_priority);
898
899 // Make sure that at least 50 ms elapsed between the 2 readings. The first
900 // time that this function is called, we don't expect this to be the case.
901 // Note: The longer the elapsed time between the 2 readings is, the more
902 // accurate the computed TSC frequency will be. The 50 ms value was
903 // chosen because local benchmarks show that it allows us to get a
904 // stddev of less than 1 tick/us between multiple runs.
905 // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
906 // this will never fail on systems that run XP or later.
907 // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
908 LARGE_INTEGER perf_counter_frequency = {};
909 ::QueryPerformanceFrequency(&perf_counter_frequency);
910 DCHECK_GE(perf_counter_now, perf_counter_initial);
911 uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
912 double elapsed_time_seconds =
913 perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
914
915 const double kMinimumEvaluationPeriodSeconds = 0.05;
916 if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
917 return 0;
918
919 // Compute the frequency of the TSC.
920 DCHECK_GE(tsc_now, tsc_initial);
921 uint64_t tsc_ticks = tsc_now - tsc_initial;
922 tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
923
924 return tsc_ticks_per_second;
925}
926#endif // !defined(V8_HOST_ARCH_ARM64)
927#endif // V8_OS_WIN
928
929} // namespace base
930} // namespace v8
bool has_non_stop_time_stamp_counter() const
Definition cpu.h:99
static bool IsSupported()
Definition time.cc:779
static ThreadTicks Now()
Definition time.cc:797
constexpr ThreadTicks()
Definition time.h:464
static constexpr int64_t kMicrosecondsPerMinute
Definition time.h:51
static constexpr int64_t kNanosecondsPerSecond
Definition time.h:57
static constexpr int64_t kNanosecondsPerMicrosecond
Definition time.h:56
static constexpr int64_t kMicrosecondsPerHour
Definition time.h:52
static constexpr int64_t kMicrosecondsPerMillisecond
Definition time.h:48
static constexpr int64_t kMicrosecondsPerSecond
Definition time.h:49
static constexpr int64_t kMicrosecondsPerDay
Definition time.h:53
constexpr TimeDelta()
Definition time.h:69
int64_t InMillisecondsRoundedUp() const
Definition time.cc:242
int64_t InSeconds() const
Definition time.cc:218
int InMinutes() const
Definition time.cc:202
double InMillisecondsF() const
Definition time.cc:226
int InHours() const
Definition time.cc:194
int InDays() const
Definition time.cc:186
static TimeDelta FromTimespec(struct timespec ts)
double InSecondsF() const
Definition time.cc:210
int64_t InMilliseconds() const
Definition time.cc:234
constexpr bool IsMax() const
Definition time.h:116
int64_t delta_
Definition time.h:224
static TimeDelta FromMachTimespec(struct mach_timespec ts)
int64_t InMicroseconds() const
Definition time.cc:251
int64_t InNanoseconds() const
Definition time.cc:259
constexpr TimeTicks()
Definition time.h:428
static TimeTicks Now()
Definition time.cc:736
static bool IsHighResolution()
Definition time.cc:763
constexpr Time()
Definition time.h:370
static Time FromTimespec(struct timespec ts)
static Time Now()
static Time NowFromSystemTime()
double ToJsTime() const
Definition time.cc:502
static Time FromJsTime(double ms_since_epoch)
Definition time.cc:491
static Time FromFiletime(struct _FILETIME ft)
struct _FILETIME ToFiletime() const
static Time FromTimeval(struct timeval tv)
constexpr bool IsMax() const
Definition time.h:277
constexpr bool IsNull() const
Definition time.h:265
base::Mutex & mutex_
Handle< SharedFunctionInfo > info
LineAndColumn previous
TimeRecord time
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
LiftoffAssembler::CacheState state
int int32_t
Definition unicode.cc:40
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
std::ostream & operator<<(std::ostream &out, AddressRegion region)
kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset IsNull(value)||IsJSProxy(value)||IsWasmObject(value)||(IsJSObject(value) &&(HeapLayout
Definition map-inl.h:70
#define UNREACHABLE()
Definition logging.h:67
#define CHECK_GT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define V8_INLINE
Definition v8config.h:500
int BOOL
void * HANDLE
unsigned long DWORD