5#ifndef V8_UTILS_UTILS_H_
6#define V8_UTILS_UTILS_H_
24#if defined(V8_USE_SIPHASH)
25#include "third_party/siphash/halfsiphash.h"
47#if defined(V8_TARGET_ARCH_ARM64) && \
48 (defined(__ARM_NEON) || defined(__ARM_NEON__))
49#define V8_OPTIMIZE_WITH_NEON
65 using UnsignedT =
typename std::make_unsigned<T>::type;
66 UnsignedT
mask = ~(
static_cast<UnsignedT
>(~0) >> shift);
67 return (
static_cast<UnsignedT
>(
x) >> shift) |
mask;
76 if (std::isnan(
x))
return x;
77 if (std::isnan(
y))
return y;
78 if (std::signbit(
x) < std::signbit(
y))
return x;
85 if (std::isnan(
x))
return x;
86 if (std::isnan(
y))
return y;
87 if (std::signbit(
x) < std::signbit(
y))
return y;
93typename std::make_unsigned<T>::type
Abs(T a)
94 requires std::is_signed<T>::value
99 using unsignedT =
typename std::make_unsigned<T>::type;
100 unsignedT
x =
static_cast<unsignedT
>(
a);
101 unsignedT
y =
static_cast<unsignedT
>(a >> (
sizeof(
T) * 8 - 1));
106#if defined(V8_OS_WIN)
110 if (!(std::isfinite(
x) && (!std::isfinite(
y) && !std::isnan(
y))) &&
111 !(
x == 0 && (
y != 0 && std::isfinite(
y)))) {
119#elif defined(V8_OS_AIX)
121 feclearexcept(FE_ALL_EXCEPT);
123 int exception = fetestexcept(FE_UNDERFLOW);
124 return (exception ?
x :
result);
126 return std::fmod(
x,
y);
132 if (std::is_signed<T>::value) {
133 if (a > 0 && b > 0) {
134 if (a > std::numeric_limits<T>::max() - b) {
135 return std::numeric_limits<T>::max();
137 }
else if (a < 0 && b < 0) {
138 if (a < std::numeric_limits<T>::min() - b) {
139 return std::numeric_limits<T>::min();
143 CHECK(std::is_unsigned<T>::value);
144 if (a > std::numeric_limits<T>::max() - b) {
145 return std::numeric_limits<T>::max();
153 if (std::is_signed<T>::value) {
154 if (a >= 0 && b < 0) {
155 if (a > std::numeric_limits<T>::max() + b) {
156 return std::numeric_limits<T>::max();
159 if (a < std::numeric_limits<T>::min() + b) {
160 return std::numeric_limits<T>::min();
164 CHECK(std::is_unsigned<T>::value);
166 return static_cast<T
>(0);
179 static_assert(std::is_integral<T>::value,
"only integral types");
181 constexpr int size_in_bits =
sizeof(
T) * 8;
182 int round_const = 1 << (size_in_bits - 2);
183 int64_t product = a * b;
184 product += round_const;
185 product >>= (size_in_bits - 1);
186 return base::saturated_cast<T>(product);
192template <
typename W
ide,
typename Narrow>
195 std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
196 "only integral types");
197 static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
198 "both must have same signedness");
199 static_assert(
sizeof(Narrow) * 2 ==
sizeof(Wide),
"only twice as long");
201 return static_cast<Wide
>(
a) *
static_cast<Wide
>(b);
207template <
typename W
ide,
typename Narrow>
210 std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
211 "only integral types");
212 static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
213 "both must have same signedness");
214 static_assert(
sizeof(Narrow) * 2 ==
sizeof(Wide),
"only twice as long");
216 return static_cast<Wide
>(
a) +
static_cast<Wide
>(b);
221 static_assert(std::is_unsigned<T>::value,
"Only for unsiged types");
222 static_assert(
sizeof(
T) <
sizeof(uint64_t),
"Must be smaller than uint64_t");
223 return (
static_cast<uint64_t
>(a) +
static_cast<uint64_t
>(b) + 1) >> 1;
239#define DEFINE_ONE_FIELD_OFFSET(Name, Size, ...) \
240 Name, Name##End = Name + (Size)-1,
242#define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \
244 LIST_MACRO##_StartOffset = StartOffset - 1, \
245 LIST_MACRO(DEFINE_ONE_FIELD_OFFSET) \
248#define DEFINE_ONE_FIELD_OFFSET_PURE_NAME(CamelName, Size, ...) \
249 k##CamelName##Offset, \
250 k##CamelName##OffsetEnd = k##CamelName##Offset + (Size)-1,
252#define DEFINE_FIELD_OFFSET_CONSTANTS_WITH_PURE_NAME(StartOffset, LIST_MACRO) \
254 LIST_MACRO##_StartOffset = StartOffset - 1, \
255 LIST_MACRO(DEFINE_ONE_FIELD_OFFSET_PURE_NAME) \
259#define FIELD_SIZE(Name) (Name##End + 1 - Name)
262#define STATIC_ASSERT_FIELD_OFFSETS_EQUAL(Offset1, Offset2) \
263 static_assert(static_cast<int>(Offset1) == Offset2)
273 hash = ~hash + (hash << 15);
274 hash = hash ^ (hash >> 12);
275 hash = hash + (hash << 2);
276 hash = hash ^ (hash >> 4);
278 hash = hash ^ (hash >> 16);
279 return hash & 0x3fffffff;
284 hash = ~hash + (hash << 18);
285 hash = hash ^ (hash >> 31);
287 hash = hash ^ (hash >> 11);
288 hash = hash + (hash << 6);
289 hash = hash ^ (hash >> 22);
290 return static_cast<uint32_t
>(hash & 0x3fffffff);
295 return halfsiphash(
key, seed);
303 static_cast<uint32_t
>(
reinterpret_cast<intptr_t
>(ptr)));
314#if defined(V8_TARGET_LITTLE_ENDIAN)
315static const int kInt64LowerHalfMemoryOffset = 0;
316static const int kInt64UpperHalfMemoryOffset = 4;
317#elif defined(V8_TARGET_BIG_ENDIAN)
318static const int kInt64LowerHalfMemoryOffset = 4;
319static const int kInt64UpperHalfMemoryOffset = 0;
354template <
typename Char>
357 static_assert(
sizeof(Char) == 1);
360 static constexpr uint16_t kSIMDMatched16Mask = UINT16_MAX;
361 static constexpr uint32_t kSIMDMatched32Mask = UINT32_MAX;
365 const __m128i lhs128_start =
366 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(lhs));
367 const __m128i lhs128_end = _mm_lddqu_si128(
368 reinterpret_cast<const __m128i*
>(lhs +
count -
sizeof(__m128i)));
369 const __m128i rhs128_start =
370 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(rhs));
371 const __m128i rhs128_end = _mm_lddqu_si128(
372 reinterpret_cast<const __m128i*
>(rhs +
count -
sizeof(__m128i)));
373 const __m128i res_start = _mm_cmpeq_epi8(lhs128_start, rhs128_start);
374 const __m128i res_end = _mm_cmpeq_epi8(lhs128_end, rhs128_end);
376 _mm_movemask_epi8(res_start) << 16 | _mm_movemask_epi8(res_end);
377 return res == kSIMDMatched32Mask;
381 const __m128i lhs128_unrolled =
382 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(lhs));
383 const __m128i rhs128_unrolled =
384 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(rhs));
385 const __m128i res_unrolled = _mm_cmpeq_epi8(lhs128_unrolled, rhs128_unrolled);
386 const uint16_t res_unrolled_mask = _mm_movemask_epi8(res_unrolled);
387 if (res_unrolled_mask != kSIMDMatched16Mask)
return false;
389 for (
size_t i = count %
sizeof(__m128i);
i <
count;
i +=
sizeof(__m128i)) {
390 const __m128i lhs128 =
391 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(lhs +
i));
392 const __m128i rhs128 =
393 _mm_lddqu_si128(
reinterpret_cast<const __m128i*
>(rhs +
i));
394 const __m128i res = _mm_cmpeq_epi8(lhs128, rhs128);
395 const uint16_t res_mask = _mm_movemask_epi8(res);
396 if (res_mask != kSIMDMatched16Mask)
return false;
401#elif defined(V8_OPTIMIZE_WITH_NEON)
405template <
typename Char>
410 static_assert(
sizeof(Char) == 1);
415 const auto lhs0 = vld1q_u8(lhs);
416 const auto lhs1 = vld1q_u8(lhs + count -
sizeof(uint8x16_t));
417 const auto rhs0 = vld1q_u8(rhs);
418 const auto rhs1 = vld1q_u8(rhs + count -
sizeof(uint8x16_t));
419 const auto xored0 = veorq_u8(lhs0, rhs0);
420 const auto xored1 = veorq_u8(lhs1, rhs1);
421 const auto ored = vorrq_u8(xored0, xored1);
422 return !
static_cast<bool>(
423 vgetq_lane_u64(vreinterpretq_u64_u8(vpmaxq_u8(ored, ored)), 0));
427 const auto first_lhs0 = vld1q_u8(lhs);
428 const auto first_rhs0 = vld1q_u8(rhs);
429 const auto first_xored = veorq_u8(first_lhs0, first_rhs0);
430 if (
static_cast<bool>(vgetq_lane_u64(
431 vreinterpretq_u64_u8(vpmaxq_u8(first_xored, first_xored)), 0))) {
434 for (
size_t i = count %
sizeof(uint8x16_t);
i <
count;
435 i +=
sizeof(uint8x16_t)) {
436 const auto lhs0 = vld1q_u8(lhs +
i);
437 const auto rhs0 = vld1q_u8(rhs +
i);
438 const auto xored = veorq_u8(lhs0, rhs0);
439 if (
static_cast<bool>(
440 vgetq_lane_u64(vreinterpretq_u64_u8(vpmaxq_u8(xored, xored)), 0)))
448template <
typename IntType,
typename Char>
452 static_assert(
sizeof(Char) == 1);
453 return *
reinterpret_cast<const IntType*
>(lhs) ==
454 *
reinterpret_cast<const IntType*
>(rhs) &&
455 *
reinterpret_cast<const IntType*
>(lhs +
count -
sizeof(IntType)) ==
456 *
reinterpret_cast<const IntType*
>(rhs +
count -
sizeof(IntType));
459template <
typename Char>
462 static_assert(
sizeof(Char) == 1);
473 return *
reinterpret_cast<const uint16_t*
>(lhs) ==
474 *
reinterpret_cast<const uint16_t*
>(rhs);
487template <
typename l
char,
typename r
char>
490 static_assert(std::is_unsigned<lchar>::value);
491 static_assert(std::is_unsigned<rchar>::value);
492 if constexpr (
sizeof(*lhs) ==
sizeof(*rhs)) {
493#if defined(__SSE3__) || defined(V8_OPTIMIZE_WITH_NEON)
494 if constexpr (
sizeof(*lhs) == 1) {
500 return memcmp(lhs, rhs, chars *
sizeof(*lhs)) == 0;
502 for (
const lchar* limit = lhs + chars; lhs < limit; ++lhs, ++rhs) {
503 if (*lhs != *rhs)
return false;
508template <
typename l
char,
typename r
char>
511 using ulchar =
typename std::make_unsigned<lchar>::type;
512 using urchar =
typename std::make_unsigned<rchar>::type;
514 reinterpret_cast<const urchar*
>(rhs), chars);
518template <
typename l
char,
typename r
char>
521 static_assert(std::is_unsigned<lchar>::value);
522 static_assert(std::is_unsigned<rchar>::value);
523 if (
sizeof(*lhs) ==
sizeof(
char) &&
sizeof(*rhs) ==
sizeof(
char)) {
526 return memcmp(lhs, rhs, chars);
528 for (
const lchar* limit = lhs + chars; lhs < limit; ++lhs, ++rhs) {
529 int r =
static_cast<int>(*lhs) -
static_cast<int>(*rhs);
530 if (
r != 0)
return r;
535template <
typename l
char,
typename r
char>
536inline int CompareChars(
const lchar* lhs,
const rchar* rhs,
size_t chars) {
537 using ulchar =
typename std::make_unsigned<lchar>::type;
538 using urchar =
typename std::make_unsigned<rchar>::type;
540 reinterpret_cast<const urchar*
>(rhs), chars);
544inline constexpr uint64_t
TenToThe(uint32_t exponent) {
548 for (uint32_t
i = 0;
i < exponent;
i++) answer *= 10;
556 return (
x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
560 return (
x >> lsb) & ((
static_cast<uint64_t
>(1) << (1 + msb - lsb)) - 1);
564 return static_cast<int32_t
>(
x << (31 - msb)) >> (lsb + 31 - msb);
568inline constexpr bool is_intn(int64_t
x,
unsigned n) {
569 DCHECK((0 < n) && (n < 64));
570 int64_t limit =
static_cast<int64_t
>(1) << (n - 1);
571 return (-limit <=
x) && (
x < limit);
582 return (
x & ((
static_cast<T
>(1) << n) - 1));
586#define INT_1_TO_63_LIST(V) \
587 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
588 V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
589 V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
590 V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
591 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
592 V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
596#define DECLARE_IS_INT_N(N) \
597 inline constexpr bool is_int##N(int64_t x) { return is_intn(x, N); }
598#define DECLARE_IS_UINT_N(N) \
600 inline constexpr bool is_uint##N(T x) { \
601 return is_uintn(x, N); \
603#define DECLARE_TRUNCATE_TO_INT_N(N) \
605 inline constexpr T truncate_to_int##N(T x) { \
606 return truncate_to_intn(x, N); \
609#define DECLARE_CHECKED_TRUNCATE_TO_INT_N(N) \
611 inline constexpr T checked_truncate_to_int##N(T x) { \
612 CHECK(is_int##N(x)); \
613 return truncate_to_intn(x, N); \
619#undef DECLARE_IS_INT_N
620#undef DECLARE_IS_UINT_N
621#undef DECLARE_TRUNCATE_TO_INT_N
622#undef DECLARE_CHECKED_TRUNCATE_TO_INT_N
625#define INT_0_TO_127_LIST(V) \
626V(0) V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) \
627V(10) V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) \
628V(20) V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) \
629V(30) V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) \
630V(40) V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) \
631V(50) V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) \
632V(60) V(61) V(62) V(63) V(64) V(65) V(66) V(67) V(68) V(69) \
633V(70) V(71) V(72) V(73) V(74) V(75) V(76) V(77) V(78) V(79) \
634V(80) V(81) V(82) V(83) V(84) V(85) V(86) V(87) V(88) V(89) \
635V(90) V(91) V(92) V(93) V(94) V(95) V(96) V(97) V(98) V(99) \
636V(100) V(101) V(102) V(103) V(104) V(105) V(106) V(107) V(108) V(109) \
637V(110) V(111) V(112) V(113) V(114) V(115) V(116) V(117) V(118) V(119) \
638V(120) V(121) V(122) V(123) V(124) V(125) V(126) V(127)
686 return id_ == other.id_;
689 return id_ != other.id_;
715 PrintF(FILE* out, const
char* format, ...);
730 bool verbose = true);
735 bool verbose = true);
740 bool verbose = true);
742 bool verbose = true);
746template <typename Char>
752template <
typename Stream,
typename index_t,
763#if V8_HAS_BUILTIN_BSWAP16
764 return __builtin_bswap16(value);
766 return value << 8 | (value >> 8 & 0x00FF);
771#if V8_HAS_BUILTIN_BSWAP32
772 return __builtin_bswap32(value);
774 return value << 24 | ((value << 8) & 0x00FF0000) |
775 ((value >> 8) & 0x0000FF00) | ((value >> 24) & 0x00000FF);
780#if V8_HAS_BUILTIN_BSWAP64
781 return __builtin_bswap64(value);
784 return value << (bits_of_v - 8) |
785 ((value << (bits_of_v - 24)) & 0x00FF000000000000) |
786 ((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
787 ((value << (bits_of_v - 56)) & 0x000000FF00000000) |
788 ((value >> (bits_of_v - 56)) & 0x00000000FF000000) |
789 ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
790 ((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
791 ((value >> (bits_of_v - 8)) & 0x00000000000000FF);
797 size_t size_of_v =
sizeof(
value);
802 return static_cast<V>(
ByteReverse16(
static_cast<uint16_t
>(value)));
804 return static_cast<V>(
ByteReverse32(
static_cast<uint32_t
>(value)));
806 return static_cast<V>(
ByteReverse64(
static_cast<uint64_t
>(value)));
816T FpOpWorkaround(T input, T value) {
817 if ( std::signbit(input) && value == 0.0 &&
818 !std::signbit(value)) {
825inline Float16 FpOpWorkaround(Float16 input, Float16 value) {
826 float result = FpOpWorkaround(input.ToFloat32(), value.ToFloat32());
833 base::Vector<const char> filter);
840 static constexpr int kZapByte = 0xCC;
841 std::memset(
reinterpret_cast<void*
>(addr), kZapByte, size_in_bytes);
845 size_t max_allowed_byte_length,
size_t* pages) {
848 if (byte_length > max_allowed_byte_length) {
851 size_t bytes_wanted =
RoundUp(byte_length, page_size);
852 if (bytes_wanted > max_allowed_byte_length) {
855 *pages = bytes_wanted / page_size;
bool operator==(const BytecodeOffset &other) const
static const int kFirstBuiltinContinuationId
static constexpr BytecodeOffset None()
static BytecodeOffset ConstructStubCreate()
friend size_t hash_value(BytecodeOffset)
bool operator!=(const BytecodeOffset &other) const
constexpr bool IsNone() const
V8_EXPORT_PRIVATE friend std::ostream & operator<<(std::ostream &, BytecodeOffset)
static BytecodeOffset ConstructStubInvoke()
constexpr int ToInt() const
constexpr BytecodeOffset(int id)
bool operator!=(FeedbackSlot that) const
static const int kInvalidSlot
FeedbackSlot WithOffset(int offset) const
bool operator==(FeedbackSlot that) const
static FeedbackSlot Invalid()
V8_EXPORT_PRIVATE friend std::ostream & operator<<(std::ostream &os, FeedbackSlot)
friend size_t hash_value(FeedbackSlot slot)
static Float16 FromFloat32(float f32)
SetOncePointer & operator=(T *value)
bool operator!=(std::nullptr_t) const
bool operator==(std::nullptr_t) const
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
constexpr unsigned CountLeadingZeros(T value)
V8_INLINE bool SimdMemEqual(const Char *lhs, const Char *rhs, size_t count)
int WriteBytes(const char *filename, const uint8_t *bytes, int size, bool verbose)
Wide MultiplyLong(Narrow a, Narrow b)
void PrintPID(const char *format,...)
std::make_unsigned< T >::type Abs(T a)
constexpr int kBitsPerByte
uint32_t ComputeLongHash(uint64_t key)
static uint16_t ByteReverse16(uint16_t value)
V8_INLINE void ZapCode(Address addr, size_t size_in_bytes)
double Modulo(double x, double y)
void PrintF(const char *format,...)
char * ReadLine(const char *prompt)
bool DoubleToBoolean(double d)
int32_t signed_bitextract_32(int msb, int lsb, uint32_t x)
static uint32_t ByteReverse32(uint32_t value)
uint32_t ComputeUnseededHash(uint32_t key)
bool CompareCharsEqualUnsigned(const lchar *lhs, const rchar *rhs, size_t chars)
std::ostream & operator<<(std::ostream &os, AtomicMemoryOrder order)
bool RoundUpToPageSize(size_t byte_length, size_t page_size, size_t max_allowed_byte_length, size_t *pages)
T SaturateRoundingQMul(T a, T b)
uint32_t ComputePointerHash(void *ptr)
bool CompareCharsEqual(const lchar *lhs, const rchar *rhs, size_t chars)
int WriteChars(const char *filename, const char *str, int size, bool verbose)
constexpr uint64_t kMaxUInt64
uintptr_t GetCurrentStackPosition()
constexpr uint64_t TenToThe(uint32_t exponent)
constexpr bool is_intn(int64_t x, unsigned n)
bool PassesFilter(base::Vector< const char > name, base::Vector< const char > filter)
static T ArithmeticShiftRight(T x, int shift)
constexpr T truncate_to_intn(T x, unsigned n)
Wide AddLong(Narrow a, Narrow b)
return ComputeSeededHash(static_cast< uint32_t >(key), HashSeed(isolate))
bool StringToIndex(Stream *stream, index_t *index)
static const uint64_t kZeroHashSeed
int CompareChars(const lchar *lhs, const rchar *rhs, size_t chars)
V8_INLINE bool OverlappingCompare(const Char *lhs, const Char *rhs, size_t count)
uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x)
void PrintIsolate(void *isolate, const char *format,...)
constexpr bool is_uintn(int64_t x, unsigned n)
T RoundingAverageUnsigned(T a, T b)
static uint64_t ByteReverse64(uint64_t value)
int CompareCharsUnsigned(const lchar *lhs, const rchar *rhs, size_t chars)
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
std::string ReadFile(const char *filename, bool *exists, bool verbose)
static V ByteReverse(V value)
bool TryAddIndexChar(uint32_t *index, Char c)
uint32_t ComputeAddressHash(Address address)
#define PRINTF_FORMAT(format_param, dots_param)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
constexpr T RoundUp(T x, intptr_t m)
#define V8_EXPORT_PRIVATE
#define DECLARE_IS_UINT_N(N)
#define INT_1_TO_63_LIST(V)
#define DECLARE_TRUNCATE_TO_INT_N(N)
#define DECLARE_IS_INT_N(N)
#define DECLARE_CHECKED_TRUNCATE_TO_INT_N(N)
#define V8_CLANG_NO_SANITIZE(what)
defined(V8_TRIVIAL_ABI)