28#ifdef V8_HOST_ARCH_ARM64
40enum class SimdKinds { kSSE, kNeon, kAVX2,
kNone };
42inline SimdKinds get_vectorization_kind() {
44#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
47 bool has_avx2 =
false;
50 return SimdKinds::kAVX2;
54 return SimdKinds::kSSE;
58 return SimdKinds::kNeon;
60 return SimdKinds::kNone;
68inline uintptr_t slow_search(T* array, uintptr_t array_len, uintptr_t index,
70 for (; index < array_len; index++) {
71 if (array[index] == search_element) {
103#if defined(_MSC_VER) && !defined(__clang__)
104#define PACK32x4(w, x, y, z) \
105 { ((w) + (uint64_t(x) << 32)), ((y) + (uint64_t(z) << 32)) }
107#define PACK32x4(w, x, y, z) \
108 { (w), (x), (y), (z) }
113 uint32x4_t
mask = PACK32x4(4, 3, 2, 1);
115 return 4 - vmaxvq_u32(
mask);
118inline int extract_first_nonzero_index_uint64x2_t(uint64x2_t v) {
120 PACK32x4(2, 0, 1, 0);
121 mask = vandq_u32(
mask, vreinterpretq_u32_u64(v));
122 return 2 - vmaxvq_u32(
mask);
125inline int32_t reinterpret_vmaxvq_u64(uint64x2_t v) {
126 return vmaxvq_u32(vreinterpretq_u32_u64(v));
130#define VECTORIZED_LOOP_Neon(type_load, type_eq, set1, cmp, movemask) \
132 constexpr int elems_in_vector = sizeof(type_load) / sizeof(T); \
133 type_load search_element_vec = set1(search_element); \
135 for (; index + elems_in_vector <= array_len; index += elems_in_vector) { \
136 type_load vector = *reinterpret_cast<type_load*>(&array[index]); \
137 type_eq eq = cmp(vector, search_element_vec); \
138 if (movemask(eq)) { \
139 return index + extract_first_nonzero_index_##type_eq(eq); \
144#define VECTORIZED_LOOP_x86(type_load, type_eq, set1, cmp, movemask, extract) \
146 constexpr int elems_in_vector = sizeof(type_load) / sizeof(T); \
147 type_load search_element_vec = set1(search_element); \
149 for (; index + elems_in_vector <= array_len; index += elems_in_vector) { \
150 type_load vector = *reinterpret_cast<type_load*>(&array[index]); \
151 type_eq eq = cmp(vector, search_element_vec); \
152 int eq_mask = movemask(eq); \
154 return index + extract(eq_mask); \
160__m128i _mm_cmpeq_epi64_nosse4_2(__m128i a, __m128i b) {
161 __m128i res = _mm_cmpeq_epi32(a, b);
164 __m128i res_swapped = _mm_shuffle_epi32(res, _MM_SHUFFLE(2, 3, 0, 1));
166 return _mm_and_si128(res, res_swapped);
174inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
175 uintptr_t index, T search_element) {
176 static constexpr bool is_uint32 =
177 sizeof(
T) ==
sizeof(uint32_t) && std::is_integral_v<T>;
178 static constexpr bool is_uint64 =
179 sizeof(
T) ==
sizeof(uint64_t) && std::is_integral_v<T>;
180 static constexpr bool is_double =
181 sizeof(
T) ==
sizeof(
double) && std::is_floating_point_v<T>;
183 static_assert(is_uint32 || is_uint64 || is_double);
185#if !(defined(__SSE3__) || defined(NEON64))
187 return slow_search(array, array_len, index, search_element);
191 const int target_align = 16;
193 const int target_align = 16;
195 const int target_align = 4;
202 (
reinterpret_cast<std::uintptr_t
>(&(array[
index])) % target_align) != 0;
204 if (array[index] == search_element) {
211 if constexpr (is_uint32) {
212#define MOVEMASK(x) _mm_movemask_ps(_mm_castsi128_ps(x))
213#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
218 }
else if constexpr (is_uint64) {
219#define MOVEMASK(x) _mm_movemask_ps(_mm_castsi128_ps(x))
226#define EXTRACT(x) (((x) & 1) ? 0 : 1)
228 _mm_cmpeq_epi64_nosse4_2, MOVEMASK, EXTRACT)
231 }
else if constexpr (is_double) {
232#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
234 _mm_movemask_pd, EXTRACT)
238 if constexpr (is_uint32) {
241 }
else if constexpr (is_uint64) {
243 reinterpret_vmaxvq_u64)
244 }
else if constexpr (is_double) {
246 reinterpret_vmaxvq_u64)
255 return slow_search(array, array_len, index, search_element);
258#if defined(_MSC_VER) && defined(__clang__)
261#define IS_CLANG_WIN 1
268#if defined(__SSE3__) && !defined(_M_IX86) && !defined(IS_CLANG_WIN)
272#define TARGET_AVX2 __attribute__((target("avx2")))
275TARGET_AVX2
inline uintptr_t fast_search_avx(T* array, uintptr_t array_len,
278 static constexpr bool is_uint32 =
279 sizeof(
T) ==
sizeof(uint32_t) && std::is_integral_v<T>;
280 static constexpr bool is_uint64 =
281 sizeof(
T) ==
sizeof(uint64_t) && std::is_integral_v<T>;
282 static constexpr bool is_double =
283 sizeof(
T) ==
sizeof(
double) && std::is_floating_point_v<T>;
285 static_assert(is_uint32 || is_uint64 || is_double);
287 const int target_align = 32;
291 (
reinterpret_cast<std::uintptr_t
>(&(array[
index])) % target_align) != 0;
293 if (array[index] == search_element) {
299 if constexpr (is_uint32) {
300#define MOVEMASK(x) _mm256_movemask_ps(_mm256_castsi256_ps(x))
301#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
306 }
else if constexpr (is_uint64) {
307#define MOVEMASK(x) _mm256_movemask_pd(_mm256_castsi256_pd(x))
308#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
310 _mm256_cmpeq_epi64, MOVEMASK, EXTRACT)
313 }
else if constexpr (is_double) {
314#define CMP(a, b) _mm256_cmp_pd(a, b, _CMP_EQ_OQ)
315#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
317 _mm256_movemask_pd, EXTRACT)
325 return slow_search(array, array_len, index, search_element);
329#elif defined(IS_CLANG_WIN)
331inline uintptr_t fast_search_avx(T* array, uintptr_t array_len, uintptr_t index,
334 return fast_search_noavx(array, array_len, index, search_element);
338uintptr_t fast_search_avx(T* array, uintptr_t array_len, uintptr_t index,
345#undef VECTORIZED_LOOP_Neon
346#undef VECTORIZED_LOOP_x86
349inline uintptr_t search(T* array, uintptr_t array_len, uintptr_t index,
351 if (get_vectorization_kind() == SimdKinds::kAVX2) {
352 return fast_search_avx(array, array_len, index, search_element);
354 return fast_search_noavx(array, array_len, index, search_element);
358enum class ArrayIndexOfIncludesKind { DOUBLE, OBJECTORSMI };
371template <ArrayIndexOfIncludesKind kind>
372Address ArrayIndexOfIncludes(
Address array_start, uintptr_t array_len,
373 uintptr_t from_index,
Address search_element) {
374 if (array_len == 0) {
378 if constexpr (
kind == ArrayIndexOfIncludesKind::DOUBLE) {
381 UnalignedDoubleMember* unaligned_array = fixed_array->begin();
385 static_assert(
sizeof(UnalignedDoubleMember) ==
sizeof(double));
386 double* array =
reinterpret_cast<double*
>(unaligned_array);
396 DCHECK(!std::isnan(search_num));
398 if (
reinterpret_cast<uintptr_t
>(array) %
sizeof(
double) != 0) {
400 for (; from_index < array_len; from_index++) {
401 if (fixed_array->is_the_hole(
static_cast<int>(from_index))) {
406 if (fixed_array->get_scalar(
static_cast<int>(from_index)) ==
414 return search<double>(array, array_len, from_index, search_num);
417 if constexpr (
kind == ArrayIndexOfIncludesKind::OBJECTORSMI) {
421 fixed_array->RawFieldOfFirstElement().ToVoidPtr());
427 return search<Tagged_t>(array, array_len, from_index,
428 static_cast<Tagged_t>(search_element));
436 uintptr_t from_index,
438 return ArrayIndexOfIncludes<ArrayIndexOfIncludesKind::OBJECTORSMI>(
439 array_start, array_len, from_index, search_element);
443 uintptr_t from_index,
445 return ArrayIndexOfIncludes<ArrayIndexOfIncludesKind::DOUBLE>(
446 array_start, array_len, from_index, search_element);
452char NibbleToHex(uint8_t nibble) {
453 const char correction =
'a' -
'0' - 10;
454 const char c = nibble +
'0';
455 uint8_t temp = 128 - 10 + nibble;
456 uint8_t msb = temp & 0x80;
457 uint8_t
mask = msb - (msb >> 7);
458 return c + (
mask & correction);
461void Uint8ArrayToHexSlow(
const char* bytes,
size_t length,
462 DirectHandle<SeqOneByteString> string_output) {
465 uint8_t
byte = bytes[
i];
466 uint8_t high =
byte >> 4;
467 uint8_t low =
byte & 0x0F;
469 string_output->SeqOneByteStringSet(index++, NibbleToHex(high));
470 string_output->SeqOneByteStringSet(index++, NibbleToHex(low));
474inline uint16_t ByteToHex(uint8_t
byte) {
475 const uint16_t correction = ((
'a' -
'0' - 10) << 8) + (
'a' -
'0' - 10);
476#if V8_TARGET_BIG_ENDIAN
477 const uint16_t nibbles = (
byte << 4) + (
byte & 0xF);
479 const uint16_t nibbles = ((
byte & 0xF) << 8) + (
byte >> 4);
481 const uint16_t chars = nibbles + 0x3030;
482 const uint16_t temp = 0x8080 - 0x0A0A + nibbles;
485 return chars + (
mask & correction);
488V8_ALLOW_UNUSED void HandleRemainingNibbles(
const char* bytes, uint8_t* output,
489 size_t length,
size_t i) {
492 size_t rest = length & 0x7;
493 for (
i = 0;
i < rest;
i++) {
494 *(output_pairs++) = ByteToHex(*bytes++);
523void Uint8ArrayToHexFastWithSSE(
const char* bytes, uint8_t* output,
527 alignas(16) uint8_t nibbles_buffer[16];
530 for (
size_t j =
i; j <
i + 8; j++) {
531 nibbles_buffer[index++] = bytes[j] >> 4;
532 nibbles_buffer[index++] = bytes[j] & 0x0F;
537 _mm_load_si128(
reinterpret_cast<__m128i*
>(nibbles_buffer));
538 __m128i nine = _mm_set1_epi8(9);
539 __m128i ascii_0 = _mm_set1_epi8(
'0');
540 __m128i correction = _mm_set1_epi8(
'a' - 10 -
'0');
543 __m128i ascii_result = _mm_add_epi8(nibbles, ascii_0);
546 __m128i
mask = _mm_cmpgt_epi8(nibbles, nine);
549 __m128i corrected_result = _mm_and_si128(
mask, correction);
550 corrected_result = _mm_add_epi8(ascii_result, corrected_result);
553 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(&output[
i * 2]),
557 HandleRemainingNibbles(bytes, output, length,
i);
562void Uint8ArrayToHexFastWithNeon(
const char* bytes, uint8_t* output,
566 alignas(16) uint8_t nibbles_buffer[16];
569 for (
size_t j =
i; j <
i + 8; j++) {
570 nibbles_buffer[index++] = bytes[j] >> 4;
571 nibbles_buffer[index++] = bytes[j] & 0x0F;
575 uint8x16_t nibbles = vld1q_u8(nibbles_buffer);
576 uint8x16_t nine = vdupq_n_u8(9);
577 uint8x16_t ascii0 = vdupq_n_u8(
'0');
578 uint8x16_t correction = vdupq_n_u8(
'a' - 10 -
'0');
581 uint8x16_t ascii_result = vaddq_u8(nibbles, ascii0);
584 uint8x16_t
mask = vcgtq_u8(nibbles, nine);
587 uint8x16_t corrected_result = vandq_u8(
mask, correction);
588 corrected_result = vaddq_u8(ascii_result, corrected_result);
591 vst1q_u8(&output[
i * 2], corrected_result);
594 HandleRemainingNibbles(bytes, output, length,
i);
602 if (get_vectorization_kind() == SimdKinds::kAVX2 ||
603 get_vectorization_kind() == SimdKinds::kSSE) {
606 Uint8ArrayToHexFastWithSSE(bytes, string_output->GetChars(no_gc), length);
608 return *string_output;
613 if (get_vectorization_kind() == SimdKinds::kNeon) {
616 Uint8ArrayToHexFastWithNeon(bytes, string_output->GetChars(no_gc),
619 return *string_output;
623 Uint8ArrayToHexSlow(bytes, length, string_output);
624 return *string_output;
630 if (hex >=
'0' && hex <=
'9') {
632 }
else if (hex >=
'a' && hex <=
'f') {
634 }
else if (hex >=
'A' && hex <=
'F') {
642std::optional<uint8_t> HandleRemainingHexValues(base::Vector<T>& input_vector,
644 T higher = input_vector[
i];
645 T lower = input_vector[
i + 1];
647 uint8_t result_high = 0;
648 Maybe<uint8_t> maybe_result_high = HexToUint8(higher);
649 if (!maybe_result_high.To(&result_high)) {
653 uint8_t result_low = 0;
654 Maybe<uint8_t> maybe_result_low = HexToUint8(lower);
655 if (!maybe_result_low.To(&result_low)) {
660 uint8_t
result = result_high + result_low;
665const __m128i char_0 = _mm_set1_epi8(
'0');
667inline std::optional<__m128i> HexToUint8FastWithSSE(__m128i nibbles) {
672 static const __m128i char_a = _mm_set1_epi8(
'a');
673 static const __m128i char_A = _mm_set1_epi8(
'A');
674 static const __m128i all_10 = _mm_set1_epi8(10);
675 static const __m128i all_6 = _mm_set1_epi8(6);
687 __m128i nibbles_09 = _mm_sub_epi8(nibbles, char_0);
693 _mm_cmplt_epi8(nibbles_09, _mm_max_epu8(nibbles_09, all_10));
694 nibbles_09 = _mm_and_si128(nibbles_09, mask_09);
700 __m128i nibbles_af = _mm_sub_epi8(nibbles, char_a);
701 __m128i mask_af = _mm_cmplt_epi8(nibbles_af, _mm_max_epu8(nibbles_af, all_6));
702 nibbles_af = _mm_and_si128(_mm_add_epi8(nibbles_af, all_10), mask_af);
705 __m128i nibbles_AF = _mm_sub_epi8(nibbles, char_A);
706 __m128i mask_AF = _mm_cmplt_epi8(nibbles_AF, _mm_max_epu8(nibbles_AF, all_6));
707 nibbles_AF = _mm_and_si128(_mm_add_epi8(nibbles_AF, all_10), mask_AF);
712 __m128i combined_mask = _mm_or_si128(_mm_or_si128(mask_af, mask_AF), mask_09);
714 if (_mm_movemask_epi8(_mm_cmpeq_epi8(
715 combined_mask, _mm_set1_epi64x(0xffffffffffffffff))) != 0xFFFF) {
722 return _mm_or_si128(_mm_or_si128(nibbles_af, nibbles_AF), nibbles_09);
726bool Uint8ArrayFromHexWithSSE(base::Vector<T>& input_vector,
727 DirectHandle<JSArrayBuffer> buffer,
728 size_t output_length) {
729 CHECK_EQ(buffer->GetByteLength(), output_length);
734 for (
i = 0;
i + 32 <= output_length * 2;
i += 32) {
738 __m128i first_batch =
739 _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(&input_vector[
i]));
741 if constexpr (std::is_same_v<T, const base::uc16>) {
742 __m128i second_part_first_batch = _mm_loadu_si128(
743 reinterpret_cast<const __m128i*
>(&input_vector[
i + 8]));
745 first_batch = _mm_packus_epi16(first_batch, second_part_first_batch);
751 __m128i second_batch = _mm_loadu_si128(
752 reinterpret_cast<const __m128i*
>(&input_vector[
i + 16]));
753 if constexpr (std::is_same_v<T, const base::uc16>) {
754 __m128i second_part_second_batch = _mm_loadu_si128(
755 reinterpret_cast<const __m128i*
>(&input_vector[
i + 24]));
757 second_batch = _mm_packus_epi16(second_batch, second_part_second_batch);
760 __m128i
mask = _mm_set1_epi64((__m64)0x00ff00ff00ff00ff);
765 __m128i first_batch_lo_nibbles = _mm_srli_epi16(first_batch, 8);
770 __m128i first_batch_hi_nibbles = _mm_and_si128(first_batch,
mask);
775 __m128i second_batch_lo_nibbles = _mm_srli_epi16(second_batch, 8);
780 __m128i second_batch_hi_nibbles = _mm_and_si128(second_batch,
mask);
787 _mm_packus_epi16(first_batch_lo_nibbles, second_batch_lo_nibbles);
794 _mm_packus_epi16(first_batch_hi_nibbles, second_batch_hi_nibbles);
799 std::optional<__m128i> maybe_uint8_low_nibbles =
800 HexToUint8FastWithSSE(lo_nibbles);
803 if (!maybe_uint8_low_nibbles.has_value()) {
806 __m128i uint8_low_nibbles = maybe_uint8_low_nibbles.value();
811 std::optional<__m128i> maybe_uint8_high_nibbles =
812 HexToUint8FastWithSSE(hi_nibbles);
815 if (!maybe_uint8_high_nibbles.has_value()) {
818 __m128i uint8_high_nibbles = maybe_uint8_high_nibbles.value();
824 __m128i uint8_shifted_high_nibbles = _mm_slli_epi64(uint8_high_nibbles, 4);
830 __m128i final_result =
831 _mm_or_si128(uint8_shifted_high_nibbles, uint8_low_nibbles);
835 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(&(
static_cast<uint8_t*
>(
836 buffer->backing_store())[
i / 2])),
841 std::optional<uint8_t>
result = 0;
842 for (
size_t j =
i; j < output_length * 2; j += 2) {
843 result = HandleRemainingHexValues(input_vector, j);
845 static_cast<uint8_t*
>(buffer->backing_store())[j / 2] =
result.value();
857inline std::optional<uint8x16_t> HexToUint8FastWithNeon(uint8x16_t nibbles) {
862 uint8x16_t char_0 = vdupq_n_u8(
'0');
863 uint8x16_t char_a = vdupq_n_u8(
'a');
864 uint8x16_t char_A = vdupq_n_u8(
'A');
865 uint8x16_t all_10 = vdupq_n_u8(10);
866 uint8x16_t all_6 = vdupq_n_u8(6);
878 uint8x16_t nibbles_09 = vsubq_u8(nibbles, char_0);
879 uint8x16_t mask_09 = vcgtq_u8(all_10, nibbles_09);
880 nibbles_09 = vandq_u8(nibbles_09, mask_09);
886 uint8x16_t nibbles_af = vsubq_u8(nibbles, char_a);
887 uint8x16_t mask_af = vcgtq_u8(all_6, nibbles_af);
888 nibbles_af = vandq_u8(vaddq_u8(nibbles_af, all_10), mask_af);
894 uint8x16_t nibbles_AF = vsubq_u8(nibbles, char_A);
895 uint8x16_t mask_AF = vcgtq_u8(all_6, nibbles_AF);
896 nibbles_AF = vandq_u8(vaddq_u8(nibbles_AF, all_10), mask_AF);
901 uint8x16_t combined_mask = vorrq_u8(vorrq_u8(mask_af, mask_AF), mask_09);
904 if (vminvq_u8(combined_mask) != 0xFF)
return {};
909 return vorrq_u8(vorrq_u8(nibbles_af, nibbles_AF), nibbles_09);
913bool Uint8ArrayFromHexWithNeon(base::Vector<T>& input_vector,
914 DirectHandle<JSArrayBuffer> buffer,
915 size_t output_length) {
917 CHECK_EQ(buffer->GetByteLength(), output_length);
920 for (
i = 0;
i + 32 <= output_length * 2;
i += 32) {
924 uint8x16_t first_batch =
925 vld1q_u8(
reinterpret_cast<const uint8_t*
>(&input_vector[
i]));
928 if constexpr (std::is_same_v<T, const base::uc16>) {
929 uint8x16_t second_part_first_batch =
930 vld1q_u8(
reinterpret_cast<const uint8_t*
>(&input_vector[
i + 8]));
932 vmovn_high_u16(vmovn_u16(first_batch), second_part_first_batch);
938 uint8x16_t second_batch =
939 vld1q_u8(
reinterpret_cast<const uint8_t*
>(&input_vector[
i + 16]));
941 if constexpr (std::is_same_v<T, const base::uc16>) {
942 uint8x16_t second_part_second_batch =
943 vld1q_u8(
reinterpret_cast<const uint8_t*
>(&input_vector[
i + 24]));
945 vmovn_high_u16(vmovn_u16(second_batch), second_part_second_batch);
951 uint8x16_t first_batch_lo_nibbles =
952 vreinterpretq_u8_u16(vshrq_n_u16(vreinterpretq_u16_u8(first_batch), 8));
957 uint8x16_t second_batch_lo_nibbles = vreinterpretq_u8_u16(
958 vshrq_n_u16(vreinterpretq_u16_u8(second_batch), 8));
963 uint8x16_t lo_nibbles = vmovn_high_u16(vmovn_u16(first_batch_lo_nibbles),
964 second_batch_lo_nibbles);
970 uint8x16_t hi_nibbles =
971 vmovn_high_u16(vmovn_u16(first_batch), second_batch);
976 std::optional<uint8x16_t> maybe_uint8_low_nibbles =
977 HexToUint8FastWithNeon(lo_nibbles);
980 if (!maybe_uint8_low_nibbles.has_value()) {
983 uint8x16_t uint8_low_nibbles = maybe_uint8_low_nibbles.value();
988 std::optional<uint8x16_t> maybe_uint8_high_nibbles =
989 HexToUint8FastWithNeon(hi_nibbles);
992 if (!maybe_uint8_high_nibbles.has_value()) {
995 uint8x16_t uint8_high_nibbles = maybe_uint8_high_nibbles.value();
1001 uint8x16_t uint8_shifted_high_nibbles =
1002 vshlq_n_u64(vreinterpretq_u64_u8(uint8_high_nibbles), 4);
1008 uint8x16_t final_result =
1009 vorrq_u8(uint8_shifted_high_nibbles, uint8_low_nibbles);
1013 vst1q_u8(
reinterpret_cast<uint8_t*
>(buffer->backing_store()) +
i / 2,
1018 std::optional<uint8_t>
result = 0;
1019 for (
size_t j =
i; j < output_length * 2; j += 2) {
1020 result = HandleRemainingHexValues(input_vector, j);
1021 if (
result.has_value()) {
1022 static_cast<uint8_t*
>(buffer->backing_store())[j / 2] =
result.value();
1034template <
typename T>
1037 size_t output_length) {
1038 size_t input_length = input_vector.
size();
1039 DCHECK_EQ(output_length, input_length / 2);
1042 if (get_vectorization_kind() == SimdKinds::kAVX2 ||
1043 get_vectorization_kind() == SimdKinds::kSSE) {
1044 return Uint8ArrayFromHexWithSSE(input_vector, buffer, output_length);
1049 if (get_vectorization_kind() == SimdKinds::kNeon) {
1050 return Uint8ArrayFromHexWithNeon(input_vector, buffer, output_length);
1055 std::optional<uint8_t>
result = 0;
1056 for (uint32_t
i = 0;
i < input_length;
i += 2) {
1057 result = HandleRemainingHexValues(input_vector,
i);
1058 if (
result.has_value()) {
1059 reinterpret_cast<uint8_t*
>(buffer->backing_store())[index++] =
1070 size_t output_length);
1073 size_t output_length);
constexpr size_t size() const
static bool IsSupported(CpuFeature f)
static constexpr Tagged< Smi > FromInt(int value)
V8_INLINE constexpr StorageType ptr() const
ZoneVector< RpoNumber > & result
Tagged(T object) -> Tagged< T >
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
uintptr_t ArrayIndexOfIncludesDouble(Address array_start, uintptr_t array_len, uintptr_t from_index, Address search_element)
kStaticElementsTemplateOffset kInstancePropertiesTemplateOffset Tagged< FixedArray >
bool ArrayBufferFromHex(base::Vector< T > &input_vector, DirectHandle< JSArrayBuffer > buffer, size_t output_length)
uintptr_t ArrayIndexOfIncludesSmiOrObject(Address array_start, uintptr_t array_len, uintptr_t from_index, Address search_element)
Tagged< Object > Uint8ArrayToHex(const char *bytes, size_t length, DirectHandle< SeqOneByteString > string_output)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Maybe< T > Just(const T &t)
#define VECTORIZED_LOOP_x86(type_load, type_eq, set1, cmp, movemask, extract)
#define VECTORIZED_LOOP_Neon(type_load, type_eq, set1, cmp, movemask)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)